python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0+ /* * Hardware monitoring driver for STMicroelectronics digital controller PM6764TR */ #include <linux/err.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pmbus.h> #include "pmbus.h" #define PM6764TR_PMBUS_READ_VOUT 0xD4 static int pm6764tr_read_word_data(struct i2c_client *client, int page, int phase, int reg) { int ret; switch (reg) { case PMBUS_VIRT_READ_VMON: ret = pmbus_read_word_data(client, page, phase, PM6764TR_PMBUS_READ_VOUT); break; default: ret = -ENODATA; break; } return ret; } static struct pmbus_driver_info pm6764tr_info = { .pages = 1, .format[PSC_VOLTAGE_IN] = linear, .format[PSC_VOLTAGE_OUT] = vid, .format[PSC_TEMPERATURE] = linear, .format[PSC_CURRENT_OUT] = linear, .format[PSC_POWER] = linear, .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN | PMBUS_HAVE_PIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_POUT | PMBUS_HAVE_VMON | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, .read_word_data = pm6764tr_read_word_data, }; static int pm6764tr_probe(struct i2c_client *client) { return pmbus_do_probe(client, &pm6764tr_info); } static const struct i2c_device_id pm6764tr_id[] = { {"pm6764tr", 0}, {} }; MODULE_DEVICE_TABLE(i2c, pm6764tr_id); static const struct of_device_id __maybe_unused pm6764tr_of_match[] = { {.compatible = "st,pm6764tr"}, {} }; /* This is the driver that will be inserted */ static struct i2c_driver pm6764tr_driver = { .driver = { .name = "pm6764tr", .of_match_table = of_match_ptr(pm6764tr_of_match), }, .probe = pm6764tr_probe, .id_table = pm6764tr_id, }; module_i2c_driver(pm6764tr_driver); MODULE_AUTHOR("Charles Hsu"); MODULE_DESCRIPTION("PMBus driver for ST PM6764TR"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS(PMBUS);
linux-master
drivers/hwmon/pmbus/pm6764tr.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Hardware monitoring driver for IR35221 * * Copyright (C) IBM Corporation 2017. */ #include <linux/err.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include "pmbus.h" #define IR35221_MFR_VIN_PEAK 0xc5 #define IR35221_MFR_VOUT_PEAK 0xc6 #define IR35221_MFR_IOUT_PEAK 0xc7 #define IR35221_MFR_TEMP_PEAK 0xc8 #define IR35221_MFR_VIN_VALLEY 0xc9 #define IR35221_MFR_VOUT_VALLEY 0xca #define IR35221_MFR_IOUT_VALLEY 0xcb #define IR35221_MFR_TEMP_VALLEY 0xcc static int ir35221_read_word_data(struct i2c_client *client, int page, int phase, int reg) { int ret; switch (reg) { case PMBUS_VIRT_READ_VIN_MAX: ret = pmbus_read_word_data(client, page, phase, IR35221_MFR_VIN_PEAK); break; case PMBUS_VIRT_READ_VOUT_MAX: ret = pmbus_read_word_data(client, page, phase, IR35221_MFR_VOUT_PEAK); break; case PMBUS_VIRT_READ_IOUT_MAX: ret = pmbus_read_word_data(client, page, phase, IR35221_MFR_IOUT_PEAK); break; case PMBUS_VIRT_READ_TEMP_MAX: ret = pmbus_read_word_data(client, page, phase, IR35221_MFR_TEMP_PEAK); break; case PMBUS_VIRT_READ_VIN_MIN: ret = pmbus_read_word_data(client, page, phase, IR35221_MFR_VIN_VALLEY); break; case PMBUS_VIRT_READ_VOUT_MIN: ret = pmbus_read_word_data(client, page, phase, IR35221_MFR_VOUT_VALLEY); break; case PMBUS_VIRT_READ_IOUT_MIN: ret = pmbus_read_word_data(client, page, phase, IR35221_MFR_IOUT_VALLEY); break; case PMBUS_VIRT_READ_TEMP_MIN: ret = pmbus_read_word_data(client, page, phase, IR35221_MFR_TEMP_VALLEY); break; default: ret = -ENODATA; break; } return ret; } static int ir35221_probe(struct i2c_client *client) { struct pmbus_driver_info *info; u8 buf[I2C_SMBUS_BLOCK_MAX]; int ret; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA | I2C_FUNC_SMBUS_READ_WORD_DATA | I2C_FUNC_SMBUS_READ_BLOCK_DATA)) return -ENODEV; ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf); if (ret < 0) { dev_err(&client->dev, "Failed to read PMBUS_MFR_ID\n"); return ret; } if (ret != 2 || strncmp(buf, "RI", strlen("RI"))) { dev_err(&client->dev, "MFR_ID unrecognised\n"); return -ENODEV; } ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf); if (ret < 0) { dev_err(&client->dev, "Failed to read PMBUS_MFR_MODEL\n"); return ret; } if (ret != 2 || !(buf[0] == 0x6c && buf[1] == 0x00)) { dev_err(&client->dev, "MFR_MODEL unrecognised\n"); return -ENODEV; } info = devm_kzalloc(&client->dev, sizeof(struct pmbus_driver_info), GFP_KERNEL); if (!info) return -ENOMEM; info->read_word_data = ir35221_read_word_data; info->pages = 2; info->format[PSC_VOLTAGE_IN] = linear; info->format[PSC_VOLTAGE_OUT] = linear; info->format[PSC_CURRENT_IN] = linear; info->format[PSC_CURRENT_OUT] = linear; info->format[PSC_POWER] = linear; info->format[PSC_TEMPERATURE] = linear; info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_PIN | PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP; info->func[1] = info->func[0]; return pmbus_do_probe(client, info); } static const struct i2c_device_id ir35221_id[] = { {"ir35221", 0}, {} }; MODULE_DEVICE_TABLE(i2c, ir35221_id); static struct i2c_driver ir35221_driver = { .driver = { .name = "ir35221", }, .probe = ir35221_probe, .id_table = ir35221_id, }; module_i2c_driver(ir35221_driver); MODULE_AUTHOR("Samuel Mendoza-Jonas <[email protected]"); MODULE_DESCRIPTION("PMBus driver for IR35221"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS(PMBUS);
linux-master
drivers/hwmon/pmbus/ir35221.c
// SPDX-License-Identifier: GPL-2.0 /* * Delta AHE-50DC power shelf fan control module driver * * Copyright 2021 Zev Weiss <[email protected]> */ #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pmbus.h> #include "pmbus.h" #define AHE50DC_PMBUS_READ_TEMP4 0xd0 static int ahe50dc_fan_write_byte(struct i2c_client *client, int page, u8 value) { /* * The CLEAR_FAULTS operation seems to sometimes (unpredictably, perhaps * 5% of the time or so) trigger a problematic phenomenon in which the * fan speeds surge momentarily and at least some (perhaps all?) of the * system's power outputs experience a glitch. * * However, according to Delta it should be OK to simply not send any * CLEAR_FAULTS commands (the device doesn't seem to be capable of * reporting any faults anyway), so just blackhole them unconditionally. */ return value == PMBUS_CLEAR_FAULTS ? -EOPNOTSUPP : -ENODATA; } static int ahe50dc_fan_read_word_data(struct i2c_client *client, int page, int phase, int reg) { /* temp1 in (virtual) page 1 is remapped to mfr-specific temp4 */ if (page == 1) { if (reg == PMBUS_READ_TEMPERATURE_1) return i2c_smbus_read_word_data(client, AHE50DC_PMBUS_READ_TEMP4); return -EOPNOTSUPP; } /* * There's a fairly limited set of commands this device actually * supports, so here we block attempts to read anything else (which * return 0xffff and would cause confusion elsewhere). */ switch (reg) { case PMBUS_STATUS_WORD: case PMBUS_FAN_COMMAND_1: case PMBUS_FAN_COMMAND_2: case PMBUS_FAN_COMMAND_3: case PMBUS_FAN_COMMAND_4: case PMBUS_STATUS_FAN_12: case PMBUS_STATUS_FAN_34: case PMBUS_READ_VIN: case PMBUS_READ_TEMPERATURE_1: case PMBUS_READ_TEMPERATURE_2: case PMBUS_READ_TEMPERATURE_3: case PMBUS_READ_FAN_SPEED_1: case PMBUS_READ_FAN_SPEED_2: case PMBUS_READ_FAN_SPEED_3: case PMBUS_READ_FAN_SPEED_4: return -ENODATA; default: return -EOPNOTSUPP; } } static struct pmbus_driver_info ahe50dc_fan_info = { .pages = 2, .format[PSC_FAN] = direct, .format[PSC_TEMPERATURE] = direct, .format[PSC_VOLTAGE_IN] = direct, .m[PSC_FAN] = 1, .b[PSC_FAN] = 0, .R[PSC_FAN] = 0, .m[PSC_TEMPERATURE] = 1, .b[PSC_TEMPERATURE] = 0, .R[PSC_TEMPERATURE] = 1, .m[PSC_VOLTAGE_IN] = 1, .b[PSC_VOLTAGE_IN] = 0, .R[PSC_VOLTAGE_IN] = 3, .func[0] = PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_VIN | PMBUS_HAVE_FAN12 | PMBUS_HAVE_FAN34 | PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_STATUS_FAN34 | PMBUS_PAGE_VIRTUAL, .func[1] = PMBUS_HAVE_TEMP | PMBUS_PAGE_VIRTUAL, .write_byte = ahe50dc_fan_write_byte, .read_word_data = ahe50dc_fan_read_word_data, }; /* * CAPABILITY returns 0xff, which appears to be this device's way indicating * it doesn't support something (and if we enable I2C_CLIENT_PEC on seeing bit * 7 being set it generates bad PECs, so let's not go there). */ static struct pmbus_platform_data ahe50dc_fan_data = { .flags = PMBUS_NO_CAPABILITY, }; static int ahe50dc_fan_probe(struct i2c_client *client) { client->dev.platform_data = &ahe50dc_fan_data; return pmbus_do_probe(client, &ahe50dc_fan_info); } static const struct i2c_device_id ahe50dc_fan_id[] = { { "ahe50dc_fan" }, { } }; MODULE_DEVICE_TABLE(i2c, ahe50dc_fan_id); static const struct of_device_id __maybe_unused ahe50dc_fan_of_match[] = { { .compatible = "delta,ahe50dc-fan" }, { } }; MODULE_DEVICE_TABLE(of, ahe50dc_fan_of_match); static struct i2c_driver ahe50dc_fan_driver = { .driver = { .name = "ahe50dc_fan", .of_match_table = of_match_ptr(ahe50dc_fan_of_match), }, .probe = ahe50dc_fan_probe, .id_table = ahe50dc_fan_id, }; module_i2c_driver(ahe50dc_fan_driver); MODULE_AUTHOR("Zev Weiss <[email protected]>"); MODULE_DESCRIPTION("Driver for Delta AHE-50DC power shelf fan control module"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS(PMBUS);
linux-master
drivers/hwmon/pmbus/delta-ahe50dc-fan.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (c) 2018-2021 Intel Corporation #include <linux/auxiliary_bus.h> #include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/devm-helpers.h> #include <linux/hwmon.h> #include <linux/jiffies.h> #include <linux/module.h> #include <linux/peci.h> #include <linux/peci-cpu.h> #include <linux/units.h> #include <linux/workqueue.h> #include "common.h" #define DIMM_MASK_CHECK_DELAY_JIFFIES msecs_to_jiffies(5000) /* Max number of channel ranks and DIMM index per channel */ #define CHAN_RANK_MAX_ON_HSX 8 #define DIMM_IDX_MAX_ON_HSX 3 #define CHAN_RANK_MAX_ON_BDX 4 #define DIMM_IDX_MAX_ON_BDX 3 #define CHAN_RANK_MAX_ON_BDXD 2 #define DIMM_IDX_MAX_ON_BDXD 2 #define CHAN_RANK_MAX_ON_SKX 6 #define DIMM_IDX_MAX_ON_SKX 2 #define CHAN_RANK_MAX_ON_ICX 8 #define DIMM_IDX_MAX_ON_ICX 2 #define CHAN_RANK_MAX_ON_ICXD 4 #define DIMM_IDX_MAX_ON_ICXD 2 #define CHAN_RANK_MAX_ON_SPR 8 #define DIMM_IDX_MAX_ON_SPR 2 #define CHAN_RANK_MAX CHAN_RANK_MAX_ON_HSX #define DIMM_IDX_MAX DIMM_IDX_MAX_ON_HSX #define DIMM_NUMS_MAX (CHAN_RANK_MAX * DIMM_IDX_MAX) #define CPU_SEG_MASK GENMASK(23, 16) #define GET_CPU_SEG(x) (((x) & CPU_SEG_MASK) >> 16) #define CPU_BUS_MASK GENMASK(7, 0) #define GET_CPU_BUS(x) ((x) & CPU_BUS_MASK) #define DIMM_TEMP_MAX GENMASK(15, 8) #define DIMM_TEMP_CRIT GENMASK(23, 16) #define GET_TEMP_MAX(x) (((x) & DIMM_TEMP_MAX) >> 8) #define GET_TEMP_CRIT(x) (((x) & DIMM_TEMP_CRIT) >> 16) #define NO_DIMM_RETRY_COUNT_MAX 5 struct peci_dimmtemp; struct dimm_info { int chan_rank_max; int dimm_idx_max; u8 min_peci_revision; int (*read_thresholds)(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data); }; struct peci_dimm_thresholds { long temp_max; long temp_crit; struct peci_sensor_state state; }; enum peci_dimm_threshold_type { temp_max_type, temp_crit_type, }; struct peci_dimmtemp { struct peci_device *peci_dev; struct device *dev; const char *name; const struct dimm_info *gen_info; struct delayed_work detect_work; struct { struct peci_sensor_data temp; struct peci_dimm_thresholds thresholds; } dimm[DIMM_NUMS_MAX]; char **dimmtemp_label; DECLARE_BITMAP(dimm_mask, DIMM_NUMS_MAX); u8 no_dimm_retry_count; }; static u8 __dimm_temp(u32 reg, int dimm_order) { return (reg >> (dimm_order * 8)) & 0xff; } static int get_dimm_temp(struct peci_dimmtemp *priv, int dimm_no, long *val) { int dimm_order = dimm_no % priv->gen_info->dimm_idx_max; int chan_rank = dimm_no / priv->gen_info->dimm_idx_max; int ret = 0; u32 data; mutex_lock(&priv->dimm[dimm_no].temp.state.lock); if (!peci_sensor_need_update(&priv->dimm[dimm_no].temp.state)) goto skip_update; ret = peci_pcs_read(priv->peci_dev, PECI_PCS_DDR_DIMM_TEMP, chan_rank, &data); if (ret) goto unlock; priv->dimm[dimm_no].temp.value = __dimm_temp(data, dimm_order) * MILLIDEGREE_PER_DEGREE; peci_sensor_mark_updated(&priv->dimm[dimm_no].temp.state); skip_update: *val = priv->dimm[dimm_no].temp.value; unlock: mutex_unlock(&priv->dimm[dimm_no].temp.state.lock); return ret; } static int update_thresholds(struct peci_dimmtemp *priv, int dimm_no) { int dimm_order = dimm_no % priv->gen_info->dimm_idx_max; int chan_rank = dimm_no / priv->gen_info->dimm_idx_max; u32 data; int ret; if (!peci_sensor_need_update(&priv->dimm[dimm_no].thresholds.state)) return 0; ret = priv->gen_info->read_thresholds(priv, dimm_order, chan_rank, &data); if (ret == -ENODATA) /* Use default or previous value */ return 0; if (ret) return ret; priv->dimm[dimm_no].thresholds.temp_max = GET_TEMP_MAX(data) * MILLIDEGREE_PER_DEGREE; priv->dimm[dimm_no].thresholds.temp_crit = GET_TEMP_CRIT(data) * MILLIDEGREE_PER_DEGREE; peci_sensor_mark_updated(&priv->dimm[dimm_no].thresholds.state); return 0; } static int get_dimm_thresholds(struct peci_dimmtemp *priv, enum peci_dimm_threshold_type type, int dimm_no, long *val) { int ret; mutex_lock(&priv->dimm[dimm_no].thresholds.state.lock); ret = update_thresholds(priv, dimm_no); if (ret) goto unlock; switch (type) { case temp_max_type: *val = priv->dimm[dimm_no].thresholds.temp_max; break; case temp_crit_type: *val = priv->dimm[dimm_no].thresholds.temp_crit; break; default: ret = -EOPNOTSUPP; break; } unlock: mutex_unlock(&priv->dimm[dimm_no].thresholds.state.lock); return ret; } static int dimmtemp_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, const char **str) { struct peci_dimmtemp *priv = dev_get_drvdata(dev); if (attr != hwmon_temp_label) return -EOPNOTSUPP; *str = (const char *)priv->dimmtemp_label[channel]; return 0; } static int dimmtemp_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { struct peci_dimmtemp *priv = dev_get_drvdata(dev); switch (attr) { case hwmon_temp_input: return get_dimm_temp(priv, channel, val); case hwmon_temp_max: return get_dimm_thresholds(priv, temp_max_type, channel, val); case hwmon_temp_crit: return get_dimm_thresholds(priv, temp_crit_type, channel, val); default: break; } return -EOPNOTSUPP; } static umode_t dimmtemp_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr, int channel) { const struct peci_dimmtemp *priv = data; if (test_bit(channel, priv->dimm_mask)) return 0444; return 0; } static const struct hwmon_ops peci_dimmtemp_ops = { .is_visible = dimmtemp_is_visible, .read_string = dimmtemp_read_string, .read = dimmtemp_read, }; static int check_populated_dimms(struct peci_dimmtemp *priv) { int chan_rank_max = priv->gen_info->chan_rank_max; int dimm_idx_max = priv->gen_info->dimm_idx_max; DECLARE_BITMAP(dimm_mask, DIMM_NUMS_MAX); DECLARE_BITMAP(chan_rank_empty, CHAN_RANK_MAX); int chan_rank, dimm_idx, ret, i; u32 pcs; if (chan_rank_max * dimm_idx_max > DIMM_NUMS_MAX) { WARN_ONCE(1, "Unsupported number of DIMMs - chan_rank_max: %d, dimm_idx_max: %d", chan_rank_max, dimm_idx_max); return -EINVAL; } bitmap_zero(dimm_mask, DIMM_NUMS_MAX); bitmap_zero(chan_rank_empty, CHAN_RANK_MAX); for (chan_rank = 0; chan_rank < chan_rank_max; chan_rank++) { ret = peci_pcs_read(priv->peci_dev, PECI_PCS_DDR_DIMM_TEMP, chan_rank, &pcs); if (ret) { /* * Overall, we expect either success or -EINVAL in * order to determine whether DIMM is populated or not. * For anything else we fall back to deferring the * detection to be performed at a later point in time. */ if (ret == -EINVAL) { bitmap_set(chan_rank_empty, chan_rank, 1); continue; } return -EAGAIN; } for (dimm_idx = 0; dimm_idx < dimm_idx_max; dimm_idx++) if (__dimm_temp(pcs, dimm_idx)) bitmap_set(dimm_mask, chan_rank * dimm_idx_max + dimm_idx, 1); } /* * If we got all -EINVALs, it means that the CPU doesn't have any * DIMMs. Unfortunately, it may also happen at the very start of * host platform boot. Retrying a couple of times lets us make sure * that the state is persistent. */ if (bitmap_full(chan_rank_empty, chan_rank_max)) { if (priv->no_dimm_retry_count < NO_DIMM_RETRY_COUNT_MAX) { priv->no_dimm_retry_count++; return -EAGAIN; } return -ENODEV; } /* * It's possible that memory training is not done yet. In this case we * defer the detection to be performed at a later point in time. */ if (bitmap_empty(dimm_mask, DIMM_NUMS_MAX)) { priv->no_dimm_retry_count = 0; return -EAGAIN; } for_each_set_bit(i, dimm_mask, DIMM_NUMS_MAX) { dev_dbg(priv->dev, "Found DIMM%#x\n", i); } bitmap_copy(priv->dimm_mask, dimm_mask, DIMM_NUMS_MAX); return 0; } static int create_dimm_temp_label(struct peci_dimmtemp *priv, int chan) { int rank = chan / priv->gen_info->dimm_idx_max; int idx = chan % priv->gen_info->dimm_idx_max; priv->dimmtemp_label[chan] = devm_kasprintf(priv->dev, GFP_KERNEL, "DIMM %c%d", 'A' + rank, idx + 1); if (!priv->dimmtemp_label[chan]) return -ENOMEM; return 0; } static const struct hwmon_channel_info * const peci_dimmtemp_temp_info[] = { HWMON_CHANNEL_INFO(temp, [0 ... DIMM_NUMS_MAX - 1] = HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT), NULL }; static const struct hwmon_chip_info peci_dimmtemp_chip_info = { .ops = &peci_dimmtemp_ops, .info = peci_dimmtemp_temp_info, }; static int create_dimm_temp_info(struct peci_dimmtemp *priv) { int ret, i, channels; struct device *dev; /* * We expect to either find populated DIMMs and carry on with creating * sensors, or find out that there are no DIMMs populated. * All other states mean that the platform never reached the state that * allows to check DIMM state - causing us to retry later on. */ ret = check_populated_dimms(priv); if (ret == -ENODEV) { dev_dbg(priv->dev, "No DIMMs found\n"); return 0; } else if (ret) { schedule_delayed_work(&priv->detect_work, DIMM_MASK_CHECK_DELAY_JIFFIES); dev_dbg(priv->dev, "Deferred populating DIMM temp info\n"); return ret; } channels = priv->gen_info->chan_rank_max * priv->gen_info->dimm_idx_max; priv->dimmtemp_label = devm_kzalloc(priv->dev, channels * sizeof(char *), GFP_KERNEL); if (!priv->dimmtemp_label) return -ENOMEM; for_each_set_bit(i, priv->dimm_mask, DIMM_NUMS_MAX) { ret = create_dimm_temp_label(priv, i); if (ret) return ret; mutex_init(&priv->dimm[i].thresholds.state.lock); mutex_init(&priv->dimm[i].temp.state.lock); } dev = devm_hwmon_device_register_with_info(priv->dev, priv->name, priv, &peci_dimmtemp_chip_info, NULL); if (IS_ERR(dev)) { dev_err(priv->dev, "Failed to register hwmon device\n"); return PTR_ERR(dev); } dev_dbg(priv->dev, "%s: sensor '%s'\n", dev_name(dev), priv->name); return 0; } static void create_dimm_temp_info_delayed(struct work_struct *work) { struct peci_dimmtemp *priv = container_of(to_delayed_work(work), struct peci_dimmtemp, detect_work); int ret; ret = create_dimm_temp_info(priv); if (ret && ret != -EAGAIN) dev_err(priv->dev, "Failed to populate DIMM temp info\n"); } static int peci_dimmtemp_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { struct device *dev = &adev->dev; struct peci_device *peci_dev = to_peci_device(dev->parent); struct peci_dimmtemp *priv; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->name = devm_kasprintf(dev, GFP_KERNEL, "peci_dimmtemp.cpu%d", peci_dev->info.socket_id); if (!priv->name) return -ENOMEM; priv->dev = dev; priv->peci_dev = peci_dev; priv->gen_info = (const struct dimm_info *)id->driver_data; /* * This is just a sanity check. Since we're using commands that are * guaranteed to be supported on a given platform, we should never see * revision lower than expected. */ if (peci_dev->info.peci_revision < priv->gen_info->min_peci_revision) dev_warn(priv->dev, "Unexpected PECI revision %#x, some features may be unavailable\n", peci_dev->info.peci_revision); ret = devm_delayed_work_autocancel(priv->dev, &priv->detect_work, create_dimm_temp_info_delayed); if (ret) return ret; ret = create_dimm_temp_info(priv); if (ret && ret != -EAGAIN) { dev_err(dev, "Failed to populate DIMM temp info\n"); return ret; } return 0; } static int read_thresholds_hsx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data) { u8 dev, func; u16 reg; int ret; /* * Device 20, Function 0: IMC 0 channel 0 -> rank 0 * Device 20, Function 1: IMC 0 channel 1 -> rank 1 * Device 21, Function 0: IMC 0 channel 2 -> rank 2 * Device 21, Function 1: IMC 0 channel 3 -> rank 3 * Device 23, Function 0: IMC 1 channel 0 -> rank 4 * Device 23, Function 1: IMC 1 channel 1 -> rank 5 * Device 24, Function 0: IMC 1 channel 2 -> rank 6 * Device 24, Function 1: IMC 1 channel 3 -> rank 7 */ dev = 20 + chan_rank / 2 + chan_rank / 4; func = chan_rank % 2; reg = 0x120 + dimm_order * 4; ret = peci_pci_local_read(priv->peci_dev, 1, dev, func, reg, data); if (ret) return ret; return 0; } static int read_thresholds_bdxd(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data) { u8 dev, func; u16 reg; int ret; /* * Device 10, Function 2: IMC 0 channel 0 -> rank 0 * Device 10, Function 6: IMC 0 channel 1 -> rank 1 * Device 12, Function 2: IMC 1 channel 0 -> rank 2 * Device 12, Function 6: IMC 1 channel 1 -> rank 3 */ dev = 10 + chan_rank / 2 * 2; func = (chan_rank % 2) ? 6 : 2; reg = 0x120 + dimm_order * 4; ret = peci_pci_local_read(priv->peci_dev, 2, dev, func, reg, data); if (ret) return ret; return 0; } static int read_thresholds_skx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data) { u8 dev, func; u16 reg; int ret; /* * Device 10, Function 2: IMC 0 channel 0 -> rank 0 * Device 10, Function 6: IMC 0 channel 1 -> rank 1 * Device 11, Function 2: IMC 0 channel 2 -> rank 2 * Device 12, Function 2: IMC 1 channel 0 -> rank 3 * Device 12, Function 6: IMC 1 channel 1 -> rank 4 * Device 13, Function 2: IMC 1 channel 2 -> rank 5 */ dev = 10 + chan_rank / 3 * 2 + (chan_rank % 3 == 2 ? 1 : 0); func = chan_rank % 3 == 1 ? 6 : 2; reg = 0x120 + dimm_order * 4; ret = peci_pci_local_read(priv->peci_dev, 2, dev, func, reg, data); if (ret) return ret; return 0; } static int read_thresholds_icx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data) { u32 reg_val; u64 offset; int ret; u8 dev; ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd4, &reg_val); if (ret || !(reg_val & BIT(31))) return -ENODATA; /* Use default or previous value */ ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd0, &reg_val); if (ret) return -ENODATA; /* Use default or previous value */ /* * Device 26, Offset 224e0: IMC 0 channel 0 -> rank 0 * Device 26, Offset 264e0: IMC 0 channel 1 -> rank 1 * Device 27, Offset 224e0: IMC 1 channel 0 -> rank 2 * Device 27, Offset 264e0: IMC 1 channel 1 -> rank 3 * Device 28, Offset 224e0: IMC 2 channel 0 -> rank 4 * Device 28, Offset 264e0: IMC 2 channel 1 -> rank 5 * Device 29, Offset 224e0: IMC 3 channel 0 -> rank 6 * Device 29, Offset 264e0: IMC 3 channel 1 -> rank 7 */ dev = 26 + chan_rank / 2; offset = 0x224e0 + dimm_order * 4 + (chan_rank % 2) * 0x4000; ret = peci_mmio_read(priv->peci_dev, 0, GET_CPU_SEG(reg_val), GET_CPU_BUS(reg_val), dev, 0, offset, data); if (ret) return ret; return 0; } static int read_thresholds_spr(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data) { u32 reg_val; u64 offset; int ret; u8 dev; ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd4, &reg_val); if (ret || !(reg_val & BIT(31))) return -ENODATA; /* Use default or previous value */ ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd0, &reg_val); if (ret) return -ENODATA; /* Use default or previous value */ /* * Device 26, Offset 219a8: IMC 0 channel 0 -> rank 0 * Device 26, Offset 299a8: IMC 0 channel 1 -> rank 1 * Device 27, Offset 219a8: IMC 1 channel 0 -> rank 2 * Device 27, Offset 299a8: IMC 1 channel 1 -> rank 3 * Device 28, Offset 219a8: IMC 2 channel 0 -> rank 4 * Device 28, Offset 299a8: IMC 2 channel 1 -> rank 5 * Device 29, Offset 219a8: IMC 3 channel 0 -> rank 6 * Device 29, Offset 299a8: IMC 3 channel 1 -> rank 7 */ dev = 26 + chan_rank / 2; offset = 0x219a8 + dimm_order * 4 + (chan_rank % 2) * 0x8000; ret = peci_mmio_read(priv->peci_dev, 0, GET_CPU_SEG(reg_val), GET_CPU_BUS(reg_val), dev, 0, offset, data); if (ret) return ret; return 0; } static const struct dimm_info dimm_hsx = { .chan_rank_max = CHAN_RANK_MAX_ON_HSX, .dimm_idx_max = DIMM_IDX_MAX_ON_HSX, .min_peci_revision = 0x33, .read_thresholds = &read_thresholds_hsx, }; static const struct dimm_info dimm_bdx = { .chan_rank_max = CHAN_RANK_MAX_ON_BDX, .dimm_idx_max = DIMM_IDX_MAX_ON_BDX, .min_peci_revision = 0x33, .read_thresholds = &read_thresholds_hsx, }; static const struct dimm_info dimm_bdxd = { .chan_rank_max = CHAN_RANK_MAX_ON_BDXD, .dimm_idx_max = DIMM_IDX_MAX_ON_BDXD, .min_peci_revision = 0x33, .read_thresholds = &read_thresholds_bdxd, }; static const struct dimm_info dimm_skx = { .chan_rank_max = CHAN_RANK_MAX_ON_SKX, .dimm_idx_max = DIMM_IDX_MAX_ON_SKX, .min_peci_revision = 0x33, .read_thresholds = &read_thresholds_skx, }; static const struct dimm_info dimm_icx = { .chan_rank_max = CHAN_RANK_MAX_ON_ICX, .dimm_idx_max = DIMM_IDX_MAX_ON_ICX, .min_peci_revision = 0x40, .read_thresholds = &read_thresholds_icx, }; static const struct dimm_info dimm_icxd = { .chan_rank_max = CHAN_RANK_MAX_ON_ICXD, .dimm_idx_max = DIMM_IDX_MAX_ON_ICXD, .min_peci_revision = 0x40, .read_thresholds = &read_thresholds_icx, }; static const struct dimm_info dimm_spr = { .chan_rank_max = CHAN_RANK_MAX_ON_SPR, .dimm_idx_max = DIMM_IDX_MAX_ON_SPR, .min_peci_revision = 0x40, .read_thresholds = &read_thresholds_spr, }; static const struct auxiliary_device_id peci_dimmtemp_ids[] = { { .name = "peci_cpu.dimmtemp.hsx", .driver_data = (kernel_ulong_t)&dimm_hsx, }, { .name = "peci_cpu.dimmtemp.bdx", .driver_data = (kernel_ulong_t)&dimm_bdx, }, { .name = "peci_cpu.dimmtemp.bdxd", .driver_data = (kernel_ulong_t)&dimm_bdxd, }, { .name = "peci_cpu.dimmtemp.skx", .driver_data = (kernel_ulong_t)&dimm_skx, }, { .name = "peci_cpu.dimmtemp.icx", .driver_data = (kernel_ulong_t)&dimm_icx, }, { .name = "peci_cpu.dimmtemp.icxd", .driver_data = (kernel_ulong_t)&dimm_icxd, }, { .name = "peci_cpu.dimmtemp.spr", .driver_data = (kernel_ulong_t)&dimm_spr, }, { } }; MODULE_DEVICE_TABLE(auxiliary, peci_dimmtemp_ids); static struct auxiliary_driver peci_dimmtemp_driver = { .probe = peci_dimmtemp_probe, .id_table = peci_dimmtemp_ids, }; module_auxiliary_driver(peci_dimmtemp_driver); MODULE_AUTHOR("Jae Hyun Yoo <[email protected]>"); MODULE_AUTHOR("Iwona Winiarska <[email protected]>"); MODULE_DESCRIPTION("PECI dimmtemp driver"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS(PECI_CPU);
linux-master
drivers/hwmon/peci/dimmtemp.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (c) 2018-2021 Intel Corporation #include <linux/auxiliary_bus.h> #include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/hwmon.h> #include <linux/jiffies.h> #include <linux/module.h> #include <linux/peci.h> #include <linux/peci-cpu.h> #include <linux/units.h> #include "common.h" #define CORE_NUMS_MAX 64 #define BASE_CHANNEL_NUMS 5 #define CPUTEMP_CHANNEL_NUMS (BASE_CHANNEL_NUMS + CORE_NUMS_MAX) #define TEMP_TARGET_FAN_TEMP_MASK GENMASK(15, 8) #define TEMP_TARGET_REF_TEMP_MASK GENMASK(23, 16) #define TEMP_TARGET_TJ_OFFSET_MASK GENMASK(29, 24) #define DTS_MARGIN_MASK GENMASK(15, 0) #define PCS_MODULE_TEMP_MASK GENMASK(15, 0) struct resolved_cores_reg { u8 bus; u8 dev; u8 func; u8 offset; }; struct cpu_info { struct resolved_cores_reg *reg; u8 min_peci_revision; s32 (*thermal_margin_to_millidegree)(u16 val); }; struct peci_temp_target { s32 tcontrol; s32 tthrottle; s32 tjmax; struct peci_sensor_state state; }; enum peci_temp_target_type { tcontrol_type, tthrottle_type, tjmax_type, crit_hyst_type, }; struct peci_cputemp { struct peci_device *peci_dev; struct device *dev; const char *name; const struct cpu_info *gen_info; struct { struct peci_temp_target target; struct peci_sensor_data die; struct peci_sensor_data dts; struct peci_sensor_data core[CORE_NUMS_MAX]; } temp; const char **coretemp_label; DECLARE_BITMAP(core_mask, CORE_NUMS_MAX); }; enum cputemp_channels { channel_die, channel_dts, channel_tcontrol, channel_tthrottle, channel_tjmax, channel_core, }; static const char * const cputemp_label[BASE_CHANNEL_NUMS] = { "Die", "DTS", "Tcontrol", "Tthrottle", "Tjmax", }; static int update_temp_target(struct peci_cputemp *priv) { s32 tthrottle_offset, tcontrol_margin; u32 pcs; int ret; if (!peci_sensor_need_update(&priv->temp.target.state)) return 0; ret = peci_pcs_read(priv->peci_dev, PECI_PCS_TEMP_TARGET, 0, &pcs); if (ret) return ret; priv->temp.target.tjmax = FIELD_GET(TEMP_TARGET_REF_TEMP_MASK, pcs) * MILLIDEGREE_PER_DEGREE; tcontrol_margin = FIELD_GET(TEMP_TARGET_FAN_TEMP_MASK, pcs); tcontrol_margin = sign_extend32(tcontrol_margin, 7) * MILLIDEGREE_PER_DEGREE; priv->temp.target.tcontrol = priv->temp.target.tjmax - tcontrol_margin; tthrottle_offset = FIELD_GET(TEMP_TARGET_TJ_OFFSET_MASK, pcs) * MILLIDEGREE_PER_DEGREE; priv->temp.target.tthrottle = priv->temp.target.tjmax - tthrottle_offset; peci_sensor_mark_updated(&priv->temp.target.state); return 0; } static int get_temp_target(struct peci_cputemp *priv, enum peci_temp_target_type type, long *val) { int ret; mutex_lock(&priv->temp.target.state.lock); ret = update_temp_target(priv); if (ret) goto unlock; switch (type) { case tcontrol_type: *val = priv->temp.target.tcontrol; break; case tthrottle_type: *val = priv->temp.target.tthrottle; break; case tjmax_type: *val = priv->temp.target.tjmax; break; case crit_hyst_type: *val = priv->temp.target.tjmax - priv->temp.target.tcontrol; break; default: ret = -EOPNOTSUPP; break; } unlock: mutex_unlock(&priv->temp.target.state.lock); return ret; } /* * Error codes: * 0x8000: General sensor error * 0x8001: Reserved * 0x8002: Underflow on reading value * 0x8003-0x81ff: Reserved */ static bool dts_valid(u16 val) { return val < 0x8000 || val > 0x81ff; } /* * Processors return a value of DTS reading in S10.6 fixed point format * (16 bits: 10-bit signed magnitude, 6-bit fraction). */ static s32 dts_ten_dot_six_to_millidegree(u16 val) { return sign_extend32(val, 15) * MILLIDEGREE_PER_DEGREE / 64; } /* * For older processors, thermal margin reading is returned in S8.8 fixed * point format (16 bits: 8-bit signed magnitude, 8-bit fraction). */ static s32 dts_eight_dot_eight_to_millidegree(u16 val) { return sign_extend32(val, 15) * MILLIDEGREE_PER_DEGREE / 256; } static int get_die_temp(struct peci_cputemp *priv, long *val) { int ret = 0; long tjmax; u16 temp; mutex_lock(&priv->temp.die.state.lock); if (!peci_sensor_need_update(&priv->temp.die.state)) goto skip_update; ret = peci_temp_read(priv->peci_dev, &temp); if (ret) goto err_unlock; if (!dts_valid(temp)) { ret = -EIO; goto err_unlock; } ret = get_temp_target(priv, tjmax_type, &tjmax); if (ret) goto err_unlock; priv->temp.die.value = (s32)tjmax + dts_ten_dot_six_to_millidegree(temp); peci_sensor_mark_updated(&priv->temp.die.state); skip_update: *val = priv->temp.die.value; err_unlock: mutex_unlock(&priv->temp.die.state.lock); return ret; } static int get_dts(struct peci_cputemp *priv, long *val) { int ret = 0; u16 thermal_margin; long tcontrol; u32 pcs; mutex_lock(&priv->temp.dts.state.lock); if (!peci_sensor_need_update(&priv->temp.dts.state)) goto skip_update; ret = peci_pcs_read(priv->peci_dev, PECI_PCS_THERMAL_MARGIN, 0, &pcs); if (ret) goto err_unlock; thermal_margin = FIELD_GET(DTS_MARGIN_MASK, pcs); if (!dts_valid(thermal_margin)) { ret = -EIO; goto err_unlock; } ret = get_temp_target(priv, tcontrol_type, &tcontrol); if (ret) goto err_unlock; /* Note that the tcontrol should be available before calling it */ priv->temp.dts.value = (s32)tcontrol - priv->gen_info->thermal_margin_to_millidegree(thermal_margin); peci_sensor_mark_updated(&priv->temp.dts.state); skip_update: *val = priv->temp.dts.value; err_unlock: mutex_unlock(&priv->temp.dts.state.lock); return ret; } static int get_core_temp(struct peci_cputemp *priv, int core_index, long *val) { int ret = 0; u16 core_dts_margin; long tjmax; u32 pcs; mutex_lock(&priv->temp.core[core_index].state.lock); if (!peci_sensor_need_update(&priv->temp.core[core_index].state)) goto skip_update; ret = peci_pcs_read(priv->peci_dev, PECI_PCS_MODULE_TEMP, core_index, &pcs); if (ret) goto err_unlock; core_dts_margin = FIELD_GET(PCS_MODULE_TEMP_MASK, pcs); if (!dts_valid(core_dts_margin)) { ret = -EIO; goto err_unlock; } ret = get_temp_target(priv, tjmax_type, &tjmax); if (ret) goto err_unlock; /* Note that the tjmax should be available before calling it */ priv->temp.core[core_index].value = (s32)tjmax + dts_ten_dot_six_to_millidegree(core_dts_margin); peci_sensor_mark_updated(&priv->temp.core[core_index].state); skip_update: *val = priv->temp.core[core_index].value; err_unlock: mutex_unlock(&priv->temp.core[core_index].state.lock); return ret; } static int cputemp_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, const char **str) { struct peci_cputemp *priv = dev_get_drvdata(dev); if (attr != hwmon_temp_label) return -EOPNOTSUPP; *str = channel < channel_core ? cputemp_label[channel] : priv->coretemp_label[channel - channel_core]; return 0; } static int cputemp_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { struct peci_cputemp *priv = dev_get_drvdata(dev); switch (attr) { case hwmon_temp_input: switch (channel) { case channel_die: return get_die_temp(priv, val); case channel_dts: return get_dts(priv, val); case channel_tcontrol: return get_temp_target(priv, tcontrol_type, val); case channel_tthrottle: return get_temp_target(priv, tthrottle_type, val); case channel_tjmax: return get_temp_target(priv, tjmax_type, val); default: return get_core_temp(priv, channel - channel_core, val); } break; case hwmon_temp_max: return get_temp_target(priv, tcontrol_type, val); case hwmon_temp_crit: return get_temp_target(priv, tjmax_type, val); case hwmon_temp_crit_hyst: return get_temp_target(priv, crit_hyst_type, val); default: return -EOPNOTSUPP; } return 0; } static umode_t cputemp_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr, int channel) { const struct peci_cputemp *priv = data; if (channel > CPUTEMP_CHANNEL_NUMS) return 0; if (channel < channel_core) return 0444; if (test_bit(channel - channel_core, priv->core_mask)) return 0444; return 0; } static int init_core_mask(struct peci_cputemp *priv) { struct peci_device *peci_dev = priv->peci_dev; struct resolved_cores_reg *reg = priv->gen_info->reg; u64 core_mask; u32 data; int ret; /* Get the RESOLVED_CORES register value */ switch (peci_dev->info.model) { case INTEL_FAM6_ICELAKE_X: case INTEL_FAM6_ICELAKE_D: case INTEL_FAM6_SAPPHIRERAPIDS_X: ret = peci_ep_pci_local_read(peci_dev, 0, reg->bus, reg->dev, reg->func, reg->offset + 4, &data); if (ret) return ret; core_mask = (u64)data << 32; ret = peci_ep_pci_local_read(peci_dev, 0, reg->bus, reg->dev, reg->func, reg->offset, &data); if (ret) return ret; core_mask |= data; break; default: ret = peci_pci_local_read(peci_dev, reg->bus, reg->dev, reg->func, reg->offset, &data); if (ret) return ret; core_mask = data; break; } if (!core_mask) return -EIO; bitmap_from_u64(priv->core_mask, core_mask); return 0; } static int create_temp_label(struct peci_cputemp *priv) { unsigned long core_max = find_last_bit(priv->core_mask, CORE_NUMS_MAX); int i; priv->coretemp_label = devm_kzalloc(priv->dev, (core_max + 1) * sizeof(char *), GFP_KERNEL); if (!priv->coretemp_label) return -ENOMEM; for_each_set_bit(i, priv->core_mask, CORE_NUMS_MAX) { priv->coretemp_label[i] = devm_kasprintf(priv->dev, GFP_KERNEL, "Core %d", i); if (!priv->coretemp_label[i]) return -ENOMEM; } return 0; } static void check_resolved_cores(struct peci_cputemp *priv) { /* * Failure to resolve cores is non-critical, we're still able to * provide other sensor data. */ if (init_core_mask(priv)) return; if (create_temp_label(priv)) bitmap_zero(priv->core_mask, CORE_NUMS_MAX); } static void sensor_init(struct peci_cputemp *priv) { int i; mutex_init(&priv->temp.target.state.lock); mutex_init(&priv->temp.die.state.lock); mutex_init(&priv->temp.dts.state.lock); for_each_set_bit(i, priv->core_mask, CORE_NUMS_MAX) mutex_init(&priv->temp.core[i].state.lock); } static const struct hwmon_ops peci_cputemp_ops = { .is_visible = cputemp_is_visible, .read_string = cputemp_read_string, .read = cputemp_read, }; static const struct hwmon_channel_info * const peci_cputemp_info[] = { HWMON_CHANNEL_INFO(temp, /* Die temperature */ HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | HWMON_T_CRIT_HYST, /* DTS margin */ HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | HWMON_T_CRIT_HYST, /* Tcontrol temperature */ HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_CRIT, /* Tthrottle temperature */ HWMON_T_LABEL | HWMON_T_INPUT, /* Tjmax temperature */ HWMON_T_LABEL | HWMON_T_INPUT, /* Core temperature - for all core channels */ [channel_core ... CPUTEMP_CHANNEL_NUMS - 1] = HWMON_T_LABEL | HWMON_T_INPUT), NULL }; static const struct hwmon_chip_info peci_cputemp_chip_info = { .ops = &peci_cputemp_ops, .info = peci_cputemp_info, }; static int peci_cputemp_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { struct device *dev = &adev->dev; struct peci_device *peci_dev = to_peci_device(dev->parent); struct peci_cputemp *priv; struct device *hwmon_dev; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->name = devm_kasprintf(dev, GFP_KERNEL, "peci_cputemp.cpu%d", peci_dev->info.socket_id); if (!priv->name) return -ENOMEM; priv->dev = dev; priv->peci_dev = peci_dev; priv->gen_info = (const struct cpu_info *)id->driver_data; /* * This is just a sanity check. Since we're using commands that are * guaranteed to be supported on a given platform, we should never see * revision lower than expected. */ if (peci_dev->info.peci_revision < priv->gen_info->min_peci_revision) dev_warn(priv->dev, "Unexpected PECI revision %#x, some features may be unavailable\n", peci_dev->info.peci_revision); check_resolved_cores(priv); sensor_init(priv); hwmon_dev = devm_hwmon_device_register_with_info(priv->dev, priv->name, priv, &peci_cputemp_chip_info, NULL); return PTR_ERR_OR_ZERO(hwmon_dev); } /* * RESOLVED_CORES PCI configuration register may have different location on * different platforms. */ static struct resolved_cores_reg resolved_cores_reg_hsx = { .bus = 1, .dev = 30, .func = 3, .offset = 0xb4, }; static struct resolved_cores_reg resolved_cores_reg_icx = { .bus = 14, .dev = 30, .func = 3, .offset = 0xd0, }; static struct resolved_cores_reg resolved_cores_reg_spr = { .bus = 31, .dev = 30, .func = 6, .offset = 0x80, }; static const struct cpu_info cpu_hsx = { .reg = &resolved_cores_reg_hsx, .min_peci_revision = 0x33, .thermal_margin_to_millidegree = &dts_eight_dot_eight_to_millidegree, }; static const struct cpu_info cpu_skx = { .reg = &resolved_cores_reg_hsx, .min_peci_revision = 0x33, .thermal_margin_to_millidegree = &dts_ten_dot_six_to_millidegree, }; static const struct cpu_info cpu_icx = { .reg = &resolved_cores_reg_icx, .min_peci_revision = 0x40, .thermal_margin_to_millidegree = &dts_ten_dot_six_to_millidegree, }; static const struct cpu_info cpu_spr = { .reg = &resolved_cores_reg_spr, .min_peci_revision = 0x40, .thermal_margin_to_millidegree = &dts_ten_dot_six_to_millidegree, }; static const struct auxiliary_device_id peci_cputemp_ids[] = { { .name = "peci_cpu.cputemp.hsx", .driver_data = (kernel_ulong_t)&cpu_hsx, }, { .name = "peci_cpu.cputemp.bdx", .driver_data = (kernel_ulong_t)&cpu_hsx, }, { .name = "peci_cpu.cputemp.bdxd", .driver_data = (kernel_ulong_t)&cpu_hsx, }, { .name = "peci_cpu.cputemp.skx", .driver_data = (kernel_ulong_t)&cpu_skx, }, { .name = "peci_cpu.cputemp.icx", .driver_data = (kernel_ulong_t)&cpu_icx, }, { .name = "peci_cpu.cputemp.icxd", .driver_data = (kernel_ulong_t)&cpu_icx, }, { .name = "peci_cpu.cputemp.spr", .driver_data = (kernel_ulong_t)&cpu_spr, }, { } }; MODULE_DEVICE_TABLE(auxiliary, peci_cputemp_ids); static struct auxiliary_driver peci_cputemp_driver = { .probe = peci_cputemp_probe, .id_table = peci_cputemp_ids, }; module_auxiliary_driver(peci_cputemp_driver); MODULE_AUTHOR("Jae Hyun Yoo <[email protected]>"); MODULE_AUTHOR("Iwona Winiarska <[email protected]>"); MODULE_DESCRIPTION("PECI cputemp driver"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS(PECI_CPU);
linux-master
drivers/hwmon/peci/cputemp.c
// SPDX-License-Identifier: GPL-2.0+ // Copyright IBM Corp 2019 #include <linux/device.h> #include <linux/export.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/math64.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/property.h> #include <linux/sysfs.h> #include <asm/unaligned.h> #include "common.h" #define EXTN_FLAG_SENSOR_ID BIT(7) #define OCC_ERROR_COUNT_THRESHOLD 2 /* required by OCC spec */ #define OCC_STATE_SAFE 4 #define OCC_SAFE_TIMEOUT msecs_to_jiffies(60000) /* 1 min */ #define OCC_UPDATE_FREQUENCY msecs_to_jiffies(1000) #define OCC_TEMP_SENSOR_FAULT 0xFF #define OCC_FRU_TYPE_VRM 3 /* OCC sensor type and version definitions */ struct temp_sensor_1 { u16 sensor_id; u16 value; } __packed; struct temp_sensor_2 { u32 sensor_id; u8 fru_type; u8 value; } __packed; struct temp_sensor_10 { u32 sensor_id; u8 fru_type; u8 value; u8 throttle; u8 reserved; } __packed; struct freq_sensor_1 { u16 sensor_id; u16 value; } __packed; struct freq_sensor_2 { u32 sensor_id; u16 value; } __packed; struct power_sensor_1 { u16 sensor_id; u32 update_tag; u32 accumulator; u16 value; } __packed; struct power_sensor_2 { u32 sensor_id; u8 function_id; u8 apss_channel; u16 reserved; u32 update_tag; u64 accumulator; u16 value; } __packed; struct power_sensor_data { u16 value; u32 update_tag; u64 accumulator; } __packed; struct power_sensor_data_and_time { u16 update_time; u16 value; u32 update_tag; u64 accumulator; } __packed; struct power_sensor_a0 { u32 sensor_id; struct power_sensor_data_and_time system; u32 reserved; struct power_sensor_data_and_time proc; struct power_sensor_data vdd; struct power_sensor_data vdn; } __packed; struct caps_sensor_2 { u16 cap; u16 system_power; u16 n_cap; u16 max; u16 min; u16 user; u8 user_source; } __packed; struct caps_sensor_3 { u16 cap; u16 system_power; u16 n_cap; u16 max; u16 hard_min; u16 soft_min; u16 user; u8 user_source; } __packed; struct extended_sensor { union { u8 name[4]; u32 sensor_id; }; u8 flags; u8 reserved; u8 data[6]; } __packed; static int occ_poll(struct occ *occ) { int rc; u8 cmd[7]; struct occ_poll_response_header *header; /* big endian */ cmd[0] = 0; /* sequence number */ cmd[1] = 0; /* cmd type */ cmd[2] = 0; /* data length msb */ cmd[3] = 1; /* data length lsb */ cmd[4] = occ->poll_cmd_data; /* data */ cmd[5] = 0; /* checksum msb */ cmd[6] = 0; /* checksum lsb */ /* mutex should already be locked if necessary */ rc = occ->send_cmd(occ, cmd, sizeof(cmd), &occ->resp, sizeof(occ->resp)); if (rc) { occ->last_error = rc; if (occ->error_count++ > OCC_ERROR_COUNT_THRESHOLD) occ->error = rc; goto done; } /* clear error since communication was successful */ occ->error_count = 0; occ->last_error = 0; occ->error = 0; /* check for safe state */ header = (struct occ_poll_response_header *)occ->resp.data; if (header->occ_state == OCC_STATE_SAFE) { if (occ->last_safe) { if (time_after(jiffies, occ->last_safe + OCC_SAFE_TIMEOUT)) occ->error = -EHOSTDOWN; } else { occ->last_safe = jiffies; } } else { occ->last_safe = 0; } done: occ_sysfs_poll_done(occ); return rc; } static int occ_set_user_power_cap(struct occ *occ, u16 user_power_cap) { int rc; u8 cmd[8]; u8 resp[8]; __be16 user_power_cap_be = cpu_to_be16(user_power_cap); cmd[0] = 0; /* sequence number */ cmd[1] = 0x22; /* cmd type */ cmd[2] = 0; /* data length msb */ cmd[3] = 2; /* data length lsb */ memcpy(&cmd[4], &user_power_cap_be, 2); cmd[6] = 0; /* checksum msb */ cmd[7] = 0; /* checksum lsb */ rc = mutex_lock_interruptible(&occ->lock); if (rc) return rc; rc = occ->send_cmd(occ, cmd, sizeof(cmd), resp, sizeof(resp)); mutex_unlock(&occ->lock); return rc; } int occ_update_response(struct occ *occ) { int rc = mutex_lock_interruptible(&occ->lock); if (rc) return rc; /* limit the maximum rate of polling the OCC */ if (time_after(jiffies, occ->next_update)) { rc = occ_poll(occ); occ->next_update = jiffies + OCC_UPDATE_FREQUENCY; } else { rc = occ->last_error; } mutex_unlock(&occ->lock); return rc; } static ssize_t occ_show_temp_1(struct device *dev, struct device_attribute *attr, char *buf) { int rc; u32 val = 0; struct temp_sensor_1 *temp; struct occ *occ = dev_get_drvdata(dev); struct occ_sensors *sensors = &occ->sensors; struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); rc = occ_update_response(occ); if (rc) return rc; temp = ((struct temp_sensor_1 *)sensors->temp.data) + sattr->index; switch (sattr->nr) { case 0: val = get_unaligned_be16(&temp->sensor_id); break; case 1: /* * If a sensor reading has expired and couldn't be refreshed, * OCC returns 0xFFFF for that sensor. */ if (temp->value == 0xFFFF) return -EREMOTEIO; val = get_unaligned_be16(&temp->value) * 1000; break; default: return -EINVAL; } return sysfs_emit(buf, "%u\n", val); } static ssize_t occ_show_temp_2(struct device *dev, struct device_attribute *attr, char *buf) { int rc; u32 val = 0; struct temp_sensor_2 *temp; struct occ *occ = dev_get_drvdata(dev); struct occ_sensors *sensors = &occ->sensors; struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); rc = occ_update_response(occ); if (rc) return rc; temp = ((struct temp_sensor_2 *)sensors->temp.data) + sattr->index; switch (sattr->nr) { case 0: val = get_unaligned_be32(&temp->sensor_id); break; case 1: val = temp->value; if (val == OCC_TEMP_SENSOR_FAULT) return -EREMOTEIO; /* * VRM doesn't return temperature, only alarm bit. This * attribute maps to tempX_alarm instead of tempX_input for * VRM */ if (temp->fru_type != OCC_FRU_TYPE_VRM) { /* sensor not ready */ if (val == 0) return -EAGAIN; val *= 1000; } break; case 2: val = temp->fru_type; break; case 3: val = temp->value == OCC_TEMP_SENSOR_FAULT; break; default: return -EINVAL; } return sysfs_emit(buf, "%u\n", val); } static ssize_t occ_show_temp_10(struct device *dev, struct device_attribute *attr, char *buf) { int rc; u32 val = 0; struct temp_sensor_10 *temp; struct occ *occ = dev_get_drvdata(dev); struct occ_sensors *sensors = &occ->sensors; struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); rc = occ_update_response(occ); if (rc) return rc; temp = ((struct temp_sensor_10 *)sensors->temp.data) + sattr->index; switch (sattr->nr) { case 0: val = get_unaligned_be32(&temp->sensor_id); break; case 1: val = temp->value; if (val == OCC_TEMP_SENSOR_FAULT) return -EREMOTEIO; /* sensor not ready */ if (val == 0) return -EAGAIN; val *= 1000; break; case 2: val = temp->fru_type; break; case 3: val = temp->value == OCC_TEMP_SENSOR_FAULT; break; case 4: val = temp->throttle * 1000; break; default: return -EINVAL; } return sysfs_emit(buf, "%u\n", val); } static ssize_t occ_show_freq_1(struct device *dev, struct device_attribute *attr, char *buf) { int rc; u16 val = 0; struct freq_sensor_1 *freq; struct occ *occ = dev_get_drvdata(dev); struct occ_sensors *sensors = &occ->sensors; struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); rc = occ_update_response(occ); if (rc) return rc; freq = ((struct freq_sensor_1 *)sensors->freq.data) + sattr->index; switch (sattr->nr) { case 0: val = get_unaligned_be16(&freq->sensor_id); break; case 1: val = get_unaligned_be16(&freq->value); break; default: return -EINVAL; } return sysfs_emit(buf, "%u\n", val); } static ssize_t occ_show_freq_2(struct device *dev, struct device_attribute *attr, char *buf) { int rc; u32 val = 0; struct freq_sensor_2 *freq; struct occ *occ = dev_get_drvdata(dev); struct occ_sensors *sensors = &occ->sensors; struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); rc = occ_update_response(occ); if (rc) return rc; freq = ((struct freq_sensor_2 *)sensors->freq.data) + sattr->index; switch (sattr->nr) { case 0: val = get_unaligned_be32(&freq->sensor_id); break; case 1: val = get_unaligned_be16(&freq->value); break; default: return -EINVAL; } return sysfs_emit(buf, "%u\n", val); } static ssize_t occ_show_power_1(struct device *dev, struct device_attribute *attr, char *buf) { int rc; u64 val = 0; struct power_sensor_1 *power; struct occ *occ = dev_get_drvdata(dev); struct occ_sensors *sensors = &occ->sensors; struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); rc = occ_update_response(occ); if (rc) return rc; power = ((struct power_sensor_1 *)sensors->power.data) + sattr->index; switch (sattr->nr) { case 0: val = get_unaligned_be16(&power->sensor_id); break; case 1: val = get_unaligned_be32(&power->accumulator) / get_unaligned_be32(&power->update_tag); val *= 1000000ULL; break; case 2: val = (u64)get_unaligned_be32(&power->update_tag) * occ->powr_sample_time_us; break; case 3: val = get_unaligned_be16(&power->value) * 1000000ULL; break; default: return -EINVAL; } return sysfs_emit(buf, "%llu\n", val); } static u64 occ_get_powr_avg(u64 *accum, u32 *samples) { u64 divisor = get_unaligned_be32(samples); return (divisor == 0) ? 0 : div64_u64(get_unaligned_be64(accum) * 1000000ULL, divisor); } static ssize_t occ_show_power_2(struct device *dev, struct device_attribute *attr, char *buf) { int rc; u64 val = 0; struct power_sensor_2 *power; struct occ *occ = dev_get_drvdata(dev); struct occ_sensors *sensors = &occ->sensors; struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); rc = occ_update_response(occ); if (rc) return rc; power = ((struct power_sensor_2 *)sensors->power.data) + sattr->index; switch (sattr->nr) { case 0: return sysfs_emit(buf, "%u_%u_%u\n", get_unaligned_be32(&power->sensor_id), power->function_id, power->apss_channel); case 1: val = occ_get_powr_avg(&power->accumulator, &power->update_tag); break; case 2: val = (u64)get_unaligned_be32(&power->update_tag) * occ->powr_sample_time_us; break; case 3: val = get_unaligned_be16(&power->value) * 1000000ULL; break; default: return -EINVAL; } return sysfs_emit(buf, "%llu\n", val); } static ssize_t occ_show_power_a0(struct device *dev, struct device_attribute *attr, char *buf) { int rc; u64 val = 0; struct power_sensor_a0 *power; struct occ *occ = dev_get_drvdata(dev); struct occ_sensors *sensors = &occ->sensors; struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); rc = occ_update_response(occ); if (rc) return rc; power = ((struct power_sensor_a0 *)sensors->power.data) + sattr->index; switch (sattr->nr) { case 0: return sysfs_emit(buf, "%u_system\n", get_unaligned_be32(&power->sensor_id)); case 1: val = occ_get_powr_avg(&power->system.accumulator, &power->system.update_tag); break; case 2: val = (u64)get_unaligned_be32(&power->system.update_tag) * occ->powr_sample_time_us; break; case 3: val = get_unaligned_be16(&power->system.value) * 1000000ULL; break; case 4: return sysfs_emit(buf, "%u_proc\n", get_unaligned_be32(&power->sensor_id)); case 5: val = occ_get_powr_avg(&power->proc.accumulator, &power->proc.update_tag); break; case 6: val = (u64)get_unaligned_be32(&power->proc.update_tag) * occ->powr_sample_time_us; break; case 7: val = get_unaligned_be16(&power->proc.value) * 1000000ULL; break; case 8: return sysfs_emit(buf, "%u_vdd\n", get_unaligned_be32(&power->sensor_id)); case 9: val = occ_get_powr_avg(&power->vdd.accumulator, &power->vdd.update_tag); break; case 10: val = (u64)get_unaligned_be32(&power->vdd.update_tag) * occ->powr_sample_time_us; break; case 11: val = get_unaligned_be16(&power->vdd.value) * 1000000ULL; break; case 12: return sysfs_emit(buf, "%u_vdn\n", get_unaligned_be32(&power->sensor_id)); case 13: val = occ_get_powr_avg(&power->vdn.accumulator, &power->vdn.update_tag); break; case 14: val = (u64)get_unaligned_be32(&power->vdn.update_tag) * occ->powr_sample_time_us; break; case 15: val = get_unaligned_be16(&power->vdn.value) * 1000000ULL; break; default: return -EINVAL; } return sysfs_emit(buf, "%llu\n", val); } static ssize_t occ_show_caps_1_2(struct device *dev, struct device_attribute *attr, char *buf) { int rc; u64 val = 0; struct caps_sensor_2 *caps; struct occ *occ = dev_get_drvdata(dev); struct occ_sensors *sensors = &occ->sensors; struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); rc = occ_update_response(occ); if (rc) return rc; caps = ((struct caps_sensor_2 *)sensors->caps.data) + sattr->index; switch (sattr->nr) { case 0: return sysfs_emit(buf, "system\n"); case 1: val = get_unaligned_be16(&caps->cap) * 1000000ULL; break; case 2: val = get_unaligned_be16(&caps->system_power) * 1000000ULL; break; case 3: val = get_unaligned_be16(&caps->n_cap) * 1000000ULL; break; case 4: val = get_unaligned_be16(&caps->max) * 1000000ULL; break; case 5: val = get_unaligned_be16(&caps->min) * 1000000ULL; break; case 6: val = get_unaligned_be16(&caps->user) * 1000000ULL; break; case 7: if (occ->sensors.caps.version == 1) return -EINVAL; val = caps->user_source; break; default: return -EINVAL; } return sysfs_emit(buf, "%llu\n", val); } static ssize_t occ_show_caps_3(struct device *dev, struct device_attribute *attr, char *buf) { int rc; u64 val = 0; struct caps_sensor_3 *caps; struct occ *occ = dev_get_drvdata(dev); struct occ_sensors *sensors = &occ->sensors; struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); rc = occ_update_response(occ); if (rc) return rc; caps = ((struct caps_sensor_3 *)sensors->caps.data) + sattr->index; switch (sattr->nr) { case 0: return sysfs_emit(buf, "system\n"); case 1: val = get_unaligned_be16(&caps->cap) * 1000000ULL; break; case 2: val = get_unaligned_be16(&caps->system_power) * 1000000ULL; break; case 3: val = get_unaligned_be16(&caps->n_cap) * 1000000ULL; break; case 4: val = get_unaligned_be16(&caps->max) * 1000000ULL; break; case 5: val = get_unaligned_be16(&caps->hard_min) * 1000000ULL; break; case 6: val = get_unaligned_be16(&caps->user) * 1000000ULL; break; case 7: val = caps->user_source; break; case 8: val = get_unaligned_be16(&caps->soft_min) * 1000000ULL; break; default: return -EINVAL; } return sysfs_emit(buf, "%llu\n", val); } static ssize_t occ_store_caps_user(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int rc; u16 user_power_cap; unsigned long long value; struct occ *occ = dev_get_drvdata(dev); rc = kstrtoull(buf, 0, &value); if (rc) return rc; user_power_cap = div64_u64(value, 1000000ULL); /* microwatt to watt */ rc = occ_set_user_power_cap(occ, user_power_cap); if (rc) return rc; return count; } static ssize_t occ_show_extended(struct device *dev, struct device_attribute *attr, char *buf) { int rc; struct extended_sensor *extn; struct occ *occ = dev_get_drvdata(dev); struct occ_sensors *sensors = &occ->sensors; struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); rc = occ_update_response(occ); if (rc) return rc; extn = ((struct extended_sensor *)sensors->extended.data) + sattr->index; switch (sattr->nr) { case 0: if (extn->flags & EXTN_FLAG_SENSOR_ID) { rc = sysfs_emit(buf, "%u", get_unaligned_be32(&extn->sensor_id)); } else { rc = sysfs_emit(buf, "%4phN\n", extn->name); } break; case 1: rc = sysfs_emit(buf, "%02x\n", extn->flags); break; case 2: rc = sysfs_emit(buf, "%6phN\n", extn->data); break; default: return -EINVAL; } return rc; } /* * Some helper macros to make it easier to define an occ_attribute. Since these * are dynamically allocated, we shouldn't use the existing kernel macros which * stringify the name argument. */ #define ATTR_OCC(_name, _mode, _show, _store) { \ .attr = { \ .name = _name, \ .mode = VERIFY_OCTAL_PERMISSIONS(_mode), \ }, \ .show = _show, \ .store = _store, \ } #define SENSOR_ATTR_OCC(_name, _mode, _show, _store, _nr, _index) { \ .dev_attr = ATTR_OCC(_name, _mode, _show, _store), \ .index = _index, \ .nr = _nr, \ } #define OCC_INIT_ATTR(_name, _mode, _show, _store, _nr, _index) \ ((struct sensor_device_attribute_2) \ SENSOR_ATTR_OCC(_name, _mode, _show, _store, _nr, _index)) /* * Allocate and instatiate sensor_device_attribute_2s. It's most efficient to * use our own instead of the built-in hwmon attribute types. */ static int occ_setup_sensor_attrs(struct occ *occ) { unsigned int i, s, num_attrs = 0; struct device *dev = occ->bus_dev; struct occ_sensors *sensors = &occ->sensors; struct occ_attribute *attr; struct temp_sensor_2 *temp; ssize_t (*show_temp)(struct device *, struct device_attribute *, char *) = occ_show_temp_1; ssize_t (*show_freq)(struct device *, struct device_attribute *, char *) = occ_show_freq_1; ssize_t (*show_power)(struct device *, struct device_attribute *, char *) = occ_show_power_1; ssize_t (*show_caps)(struct device *, struct device_attribute *, char *) = occ_show_caps_1_2; switch (sensors->temp.version) { case 1: num_attrs += (sensors->temp.num_sensors * 2); break; case 2: num_attrs += (sensors->temp.num_sensors * 4); show_temp = occ_show_temp_2; break; case 0x10: num_attrs += (sensors->temp.num_sensors * 5); show_temp = occ_show_temp_10; break; default: sensors->temp.num_sensors = 0; } switch (sensors->freq.version) { case 2: show_freq = occ_show_freq_2; fallthrough; case 1: num_attrs += (sensors->freq.num_sensors * 2); break; default: sensors->freq.num_sensors = 0; } switch (sensors->power.version) { case 2: show_power = occ_show_power_2; fallthrough; case 1: num_attrs += (sensors->power.num_sensors * 4); break; case 0xA0: num_attrs += (sensors->power.num_sensors * 16); show_power = occ_show_power_a0; break; default: sensors->power.num_sensors = 0; } switch (sensors->caps.version) { case 1: num_attrs += (sensors->caps.num_sensors * 7); break; case 2: num_attrs += (sensors->caps.num_sensors * 8); break; case 3: show_caps = occ_show_caps_3; num_attrs += (sensors->caps.num_sensors * 9); break; default: sensors->caps.num_sensors = 0; } switch (sensors->extended.version) { case 1: num_attrs += (sensors->extended.num_sensors * 3); break; default: sensors->extended.num_sensors = 0; } occ->attrs = devm_kzalloc(dev, sizeof(*occ->attrs) * num_attrs, GFP_KERNEL); if (!occ->attrs) return -ENOMEM; /* null-terminated list */ occ->group.attrs = devm_kzalloc(dev, sizeof(*occ->group.attrs) * num_attrs + 1, GFP_KERNEL); if (!occ->group.attrs) return -ENOMEM; attr = occ->attrs; for (i = 0; i < sensors->temp.num_sensors; ++i) { s = i + 1; temp = ((struct temp_sensor_2 *)sensors->temp.data) + i; snprintf(attr->name, sizeof(attr->name), "temp%d_label", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_temp, NULL, 0, i); attr++; if (sensors->temp.version == 2 && temp->fru_type == OCC_FRU_TYPE_VRM) { snprintf(attr->name, sizeof(attr->name), "temp%d_alarm", s); } else { snprintf(attr->name, sizeof(attr->name), "temp%d_input", s); } attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_temp, NULL, 1, i); attr++; if (sensors->temp.version > 1) { snprintf(attr->name, sizeof(attr->name), "temp%d_fru_type", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_temp, NULL, 2, i); attr++; snprintf(attr->name, sizeof(attr->name), "temp%d_fault", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_temp, NULL, 3, i); attr++; if (sensors->temp.version == 0x10) { snprintf(attr->name, sizeof(attr->name), "temp%d_max", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_temp, NULL, 4, i); attr++; } } } for (i = 0; i < sensors->freq.num_sensors; ++i) { s = i + 1; snprintf(attr->name, sizeof(attr->name), "freq%d_label", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_freq, NULL, 0, i); attr++; snprintf(attr->name, sizeof(attr->name), "freq%d_input", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_freq, NULL, 1, i); attr++; } if (sensors->power.version == 0xA0) { /* * Special case for many-attribute power sensor. Split it into * a sensor number per power type, emulating several sensors. */ for (i = 0; i < sensors->power.num_sensors; ++i) { unsigned int j; unsigned int nr = 0; s = (i * 4) + 1; for (j = 0; j < 4; ++j) { snprintf(attr->name, sizeof(attr->name), "power%d_label", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_power, NULL, nr++, i); attr++; snprintf(attr->name, sizeof(attr->name), "power%d_average", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_power, NULL, nr++, i); attr++; snprintf(attr->name, sizeof(attr->name), "power%d_average_interval", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_power, NULL, nr++, i); attr++; snprintf(attr->name, sizeof(attr->name), "power%d_input", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_power, NULL, nr++, i); attr++; s++; } } s = (sensors->power.num_sensors * 4) + 1; } else { for (i = 0; i < sensors->power.num_sensors; ++i) { s = i + 1; snprintf(attr->name, sizeof(attr->name), "power%d_label", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_power, NULL, 0, i); attr++; snprintf(attr->name, sizeof(attr->name), "power%d_average", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_power, NULL, 1, i); attr++; snprintf(attr->name, sizeof(attr->name), "power%d_average_interval", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_power, NULL, 2, i); attr++; snprintf(attr->name, sizeof(attr->name), "power%d_input", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_power, NULL, 3, i); attr++; } s = sensors->power.num_sensors + 1; } if (sensors->caps.num_sensors >= 1) { snprintf(attr->name, sizeof(attr->name), "power%d_label", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, 0, 0); attr++; snprintf(attr->name, sizeof(attr->name), "power%d_cap", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, 1, 0); attr++; snprintf(attr->name, sizeof(attr->name), "power%d_input", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, 2, 0); attr++; snprintf(attr->name, sizeof(attr->name), "power%d_cap_not_redundant", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, 3, 0); attr++; snprintf(attr->name, sizeof(attr->name), "power%d_cap_max", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, 4, 0); attr++; snprintf(attr->name, sizeof(attr->name), "power%d_cap_min", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, 5, 0); attr++; snprintf(attr->name, sizeof(attr->name), "power%d_cap_user", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0644, show_caps, occ_store_caps_user, 6, 0); attr++; if (sensors->caps.version > 1) { snprintf(attr->name, sizeof(attr->name), "power%d_cap_user_source", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, 7, 0); attr++; if (sensors->caps.version > 2) { snprintf(attr->name, sizeof(attr->name), "power%d_cap_min_soft", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, 8, 0); attr++; } } } for (i = 0; i < sensors->extended.num_sensors; ++i) { s = i + 1; snprintf(attr->name, sizeof(attr->name), "extn%d_label", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, occ_show_extended, NULL, 0, i); attr++; snprintf(attr->name, sizeof(attr->name), "extn%d_flags", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, occ_show_extended, NULL, 1, i); attr++; snprintf(attr->name, sizeof(attr->name), "extn%d_input", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, occ_show_extended, NULL, 2, i); attr++; } /* put the sensors in the group */ for (i = 0; i < num_attrs; ++i) { sysfs_attr_init(&occ->attrs[i].sensor.dev_attr.attr); occ->group.attrs[i] = &occ->attrs[i].sensor.dev_attr.attr; } return 0; } /* only need to do this once at startup, as OCC won't change sensors on us */ static void occ_parse_poll_response(struct occ *occ) { unsigned int i, old_offset, offset = 0, size = 0; struct occ_sensor *sensor; struct occ_sensors *sensors = &occ->sensors; struct occ_response *resp = &occ->resp; struct occ_poll_response *poll = (struct occ_poll_response *)&resp->data[0]; struct occ_poll_response_header *header = &poll->header; struct occ_sensor_data_block *block = &poll->block; dev_info(occ->bus_dev, "OCC found, code level: %.16s\n", header->occ_code_level); for (i = 0; i < header->num_sensor_data_blocks; ++i) { block = (struct occ_sensor_data_block *)((u8 *)block + offset); old_offset = offset; offset = (block->header.num_sensors * block->header.sensor_length) + sizeof(block->header); size += offset; /* validate all the length/size fields */ if ((size + sizeof(*header)) >= OCC_RESP_DATA_BYTES) { dev_warn(occ->bus_dev, "exceeded response buffer\n"); return; } dev_dbg(occ->bus_dev, " %04x..%04x: %.4s (%d sensors)\n", old_offset, offset - 1, block->header.eye_catcher, block->header.num_sensors); /* match sensor block type */ if (strncmp(block->header.eye_catcher, "TEMP", 4) == 0) sensor = &sensors->temp; else if (strncmp(block->header.eye_catcher, "FREQ", 4) == 0) sensor = &sensors->freq; else if (strncmp(block->header.eye_catcher, "POWR", 4) == 0) sensor = &sensors->power; else if (strncmp(block->header.eye_catcher, "CAPS", 4) == 0) sensor = &sensors->caps; else if (strncmp(block->header.eye_catcher, "EXTN", 4) == 0) sensor = &sensors->extended; else { dev_warn(occ->bus_dev, "sensor not supported %.4s\n", block->header.eye_catcher); continue; } sensor->num_sensors = block->header.num_sensors; sensor->version = block->header.sensor_format; sensor->data = &block->data; } dev_dbg(occ->bus_dev, "Max resp size: %u+%zd=%zd\n", size, sizeof(*header), size + sizeof(*header)); } int occ_active(struct occ *occ, bool active) { int rc = mutex_lock_interruptible(&occ->lock); if (rc) return rc; if (active) { if (occ->active) { rc = -EALREADY; goto unlock; } occ->error_count = 0; occ->last_safe = 0; rc = occ_poll(occ); if (rc < 0) { dev_err(occ->bus_dev, "failed to get OCC poll response=%02x: %d\n", occ->resp.return_status, rc); goto unlock; } occ->active = true; occ->next_update = jiffies + OCC_UPDATE_FREQUENCY; occ_parse_poll_response(occ); rc = occ_setup_sensor_attrs(occ); if (rc) { dev_err(occ->bus_dev, "failed to setup sensor attrs: %d\n", rc); goto unlock; } occ->hwmon = hwmon_device_register_with_groups(occ->bus_dev, "occ", occ, occ->groups); if (IS_ERR(occ->hwmon)) { rc = PTR_ERR(occ->hwmon); occ->hwmon = NULL; dev_err(occ->bus_dev, "failed to register hwmon device: %d\n", rc); goto unlock; } } else { if (!occ->active) { rc = -EALREADY; goto unlock; } if (occ->hwmon) hwmon_device_unregister(occ->hwmon); occ->active = false; occ->hwmon = NULL; } unlock: mutex_unlock(&occ->lock); return rc; } int occ_setup(struct occ *occ) { int rc; mutex_init(&occ->lock); occ->groups[0] = &occ->group; rc = occ_setup_sysfs(occ); if (rc) { dev_err(occ->bus_dev, "failed to setup sysfs: %d\n", rc); return rc; } if (!device_property_read_bool(occ->bus_dev, "ibm,no-poll-on-init")) { rc = occ_active(occ, true); if (rc) occ_shutdown_sysfs(occ); } return rc; } EXPORT_SYMBOL_GPL(occ_setup); void occ_shutdown(struct occ *occ) { mutex_lock(&occ->lock); occ_shutdown_sysfs(occ); if (occ->hwmon) hwmon_device_unregister(occ->hwmon); occ->hwmon = NULL; mutex_unlock(&occ->lock); } EXPORT_SYMBOL_GPL(occ_shutdown); MODULE_AUTHOR("Eddie James <[email protected]>"); MODULE_DESCRIPTION("Common OCC hwmon code"); MODULE_LICENSE("GPL");
linux-master
drivers/hwmon/occ/common.c
// SPDX-License-Identifier: GPL-2.0+ // Copyright IBM Corp 2019 #include <linux/bitops.h> #include <linux/device.h> #include <linux/export.h> #include <linux/hwmon-sysfs.h> #include <linux/kernel.h> #include <linux/kstrtox.h> #include <linux/sysfs.h> #include "common.h" /* OCC status register */ #define OCC_STAT_MASTER BIT(7) /* OCC extended status register */ #define OCC_EXT_STAT_DVFS_OT BIT(7) #define OCC_EXT_STAT_DVFS_POWER BIT(6) #define OCC_EXT_STAT_MEM_THROTTLE BIT(5) #define OCC_EXT_STAT_QUICK_DROP BIT(4) #define OCC_EXT_STAT_DVFS_VDD BIT(3) #define OCC_EXT_STAT_GPU_THROTTLE GENMASK(2, 0) static ssize_t occ_active_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int rc; bool active; struct occ *occ = dev_get_drvdata(dev); rc = kstrtobool(buf, &active); if (rc) return rc; rc = occ_active(occ, active); if (rc) return rc; return count; } static ssize_t occ_sysfs_show(struct device *dev, struct device_attribute *attr, char *buf) { int rc; int val = 0; struct occ *occ = dev_get_drvdata(dev); struct occ_poll_response_header *header; struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr); if (occ->active) { rc = occ_update_response(occ); if (rc) return rc; header = (struct occ_poll_response_header *)occ->resp.data; switch (sattr->index) { case 0: val = !!(header->status & OCC_STAT_MASTER); break; case 1: val = 1; break; case 2: val = !!(header->ext_status & OCC_EXT_STAT_DVFS_OT); break; case 3: val = !!(header->ext_status & OCC_EXT_STAT_DVFS_POWER); break; case 4: val = !!(header->ext_status & OCC_EXT_STAT_MEM_THROTTLE); break; case 5: val = !!(header->ext_status & OCC_EXT_STAT_QUICK_DROP); break; case 6: val = header->occ_state; break; case 7: if (header->status & OCC_STAT_MASTER) val = hweight8(header->occs_present); else val = 1; break; case 8: val = header->ips_status; break; case 9: val = header->mode; break; case 10: val = !!(header->ext_status & OCC_EXT_STAT_DVFS_VDD); break; case 11: val = header->ext_status & OCC_EXT_STAT_GPU_THROTTLE; break; default: return -EINVAL; } } else { if (sattr->index == 1) val = 0; else if (sattr->index <= 11) val = -ENODATA; else return -EINVAL; } return sysfs_emit(buf, "%d\n", val); } static ssize_t occ_error_show(struct device *dev, struct device_attribute *attr, char *buf) { struct occ *occ = dev_get_drvdata(dev); occ_update_response(occ); return sysfs_emit(buf, "%d\n", occ->error); } static SENSOR_DEVICE_ATTR(occ_master, 0444, occ_sysfs_show, NULL, 0); static SENSOR_DEVICE_ATTR(occ_active, 0644, occ_sysfs_show, occ_active_store, 1); static SENSOR_DEVICE_ATTR(occ_dvfs_overtemp, 0444, occ_sysfs_show, NULL, 2); static SENSOR_DEVICE_ATTR(occ_dvfs_power, 0444, occ_sysfs_show, NULL, 3); static SENSOR_DEVICE_ATTR(occ_mem_throttle, 0444, occ_sysfs_show, NULL, 4); static SENSOR_DEVICE_ATTR(occ_quick_pwr_drop, 0444, occ_sysfs_show, NULL, 5); static SENSOR_DEVICE_ATTR(occ_state, 0444, occ_sysfs_show, NULL, 6); static SENSOR_DEVICE_ATTR(occs_present, 0444, occ_sysfs_show, NULL, 7); static SENSOR_DEVICE_ATTR(occ_ips_status, 0444, occ_sysfs_show, NULL, 8); static SENSOR_DEVICE_ATTR(occ_mode, 0444, occ_sysfs_show, NULL, 9); static SENSOR_DEVICE_ATTR(occ_dvfs_vdd, 0444, occ_sysfs_show, NULL, 10); static SENSOR_DEVICE_ATTR(occ_gpu_throttle, 0444, occ_sysfs_show, NULL, 11); static DEVICE_ATTR_RO(occ_error); static struct attribute *occ_attributes[] = { &sensor_dev_attr_occ_master.dev_attr.attr, &sensor_dev_attr_occ_active.dev_attr.attr, &sensor_dev_attr_occ_dvfs_overtemp.dev_attr.attr, &sensor_dev_attr_occ_dvfs_power.dev_attr.attr, &sensor_dev_attr_occ_mem_throttle.dev_attr.attr, &sensor_dev_attr_occ_quick_pwr_drop.dev_attr.attr, &sensor_dev_attr_occ_state.dev_attr.attr, &sensor_dev_attr_occs_present.dev_attr.attr, &sensor_dev_attr_occ_ips_status.dev_attr.attr, &sensor_dev_attr_occ_mode.dev_attr.attr, &sensor_dev_attr_occ_dvfs_vdd.dev_attr.attr, &sensor_dev_attr_occ_gpu_throttle.dev_attr.attr, &dev_attr_occ_error.attr, NULL }; static const struct attribute_group occ_sysfs = { .attrs = occ_attributes, }; void occ_sysfs_poll_done(struct occ *occ) { const char *name; struct occ_poll_response_header *header = (struct occ_poll_response_header *)occ->resp.data; /* * On the first poll response, we haven't yet created the sysfs * attributes, so don't make any notify calls. */ if (!occ->active) goto done; if ((header->status & OCC_STAT_MASTER) != (occ->prev_stat & OCC_STAT_MASTER)) { name = sensor_dev_attr_occ_master.dev_attr.attr.name; sysfs_notify(&occ->bus_dev->kobj, NULL, name); } if ((header->ext_status & OCC_EXT_STAT_DVFS_OT) != (occ->prev_ext_stat & OCC_EXT_STAT_DVFS_OT)) { name = sensor_dev_attr_occ_dvfs_overtemp.dev_attr.attr.name; sysfs_notify(&occ->bus_dev->kobj, NULL, name); } if ((header->ext_status & OCC_EXT_STAT_DVFS_POWER) != (occ->prev_ext_stat & OCC_EXT_STAT_DVFS_POWER)) { name = sensor_dev_attr_occ_dvfs_power.dev_attr.attr.name; sysfs_notify(&occ->bus_dev->kobj, NULL, name); } if ((header->ext_status & OCC_EXT_STAT_MEM_THROTTLE) != (occ->prev_ext_stat & OCC_EXT_STAT_MEM_THROTTLE)) { name = sensor_dev_attr_occ_mem_throttle.dev_attr.attr.name; sysfs_notify(&occ->bus_dev->kobj, NULL, name); } if ((header->ext_status & OCC_EXT_STAT_QUICK_DROP) != (occ->prev_ext_stat & OCC_EXT_STAT_QUICK_DROP)) { name = sensor_dev_attr_occ_quick_pwr_drop.dev_attr.attr.name; sysfs_notify(&occ->bus_dev->kobj, NULL, name); } if ((header->ext_status & OCC_EXT_STAT_DVFS_VDD) != (occ->prev_ext_stat & OCC_EXT_STAT_DVFS_VDD)) { name = sensor_dev_attr_occ_dvfs_vdd.dev_attr.attr.name; sysfs_notify(&occ->bus_dev->kobj, NULL, name); } if ((header->ext_status & OCC_EXT_STAT_GPU_THROTTLE) != (occ->prev_ext_stat & OCC_EXT_STAT_GPU_THROTTLE)) { name = sensor_dev_attr_occ_gpu_throttle.dev_attr.attr.name; sysfs_notify(&occ->bus_dev->kobj, NULL, name); } if ((header->status & OCC_STAT_MASTER) && header->occs_present != occ->prev_occs_present) { name = sensor_dev_attr_occs_present.dev_attr.attr.name; sysfs_notify(&occ->bus_dev->kobj, NULL, name); } if (header->ips_status != occ->prev_ips_status) { name = sensor_dev_attr_occ_ips_status.dev_attr.attr.name; sysfs_notify(&occ->bus_dev->kobj, NULL, name); } if (header->mode != occ->prev_mode) { name = sensor_dev_attr_occ_mode.dev_attr.attr.name; sysfs_notify(&occ->bus_dev->kobj, NULL, name); } if (occ->error && occ->error != occ->prev_error) { name = dev_attr_occ_error.attr.name; sysfs_notify(&occ->bus_dev->kobj, NULL, name); } /* no notifications for OCC state; doesn't indicate error condition */ done: occ->prev_error = occ->error; occ->prev_stat = header->status; occ->prev_ext_stat = header->ext_status; occ->prev_occs_present = header->occs_present; occ->prev_ips_status = header->ips_status; occ->prev_mode = header->mode; } int occ_setup_sysfs(struct occ *occ) { return sysfs_create_group(&occ->bus_dev->kobj, &occ_sysfs); } void occ_shutdown_sysfs(struct occ *occ) { sysfs_remove_group(&occ->bus_dev->kobj, &occ_sysfs); }
linux-master
drivers/hwmon/occ/sysfs.c
// SPDX-License-Identifier: GPL-2.0+ // Copyright IBM Corp 2019 #include <linux/device.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/fsi-occ.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/string.h> #include <linux/sysfs.h> #include "common.h" #define OCC_CHECKSUM_RETRIES 3 struct p9_sbe_occ { struct occ occ; bool sbe_error; void *ffdc; size_t ffdc_len; size_t ffdc_size; struct mutex sbe_error_lock; /* lock access to ffdc data */ struct device *sbe; }; #define to_p9_sbe_occ(x) container_of((x), struct p9_sbe_occ, occ) static ssize_t ffdc_read(struct file *filp, struct kobject *kobj, struct bin_attribute *battr, char *buf, loff_t pos, size_t count) { ssize_t rc = 0; struct occ *occ = dev_get_drvdata(kobj_to_dev(kobj)); struct p9_sbe_occ *ctx = to_p9_sbe_occ(occ); mutex_lock(&ctx->sbe_error_lock); if (ctx->sbe_error) { rc = memory_read_from_buffer(buf, count, &pos, ctx->ffdc, ctx->ffdc_len); if (pos >= ctx->ffdc_len) ctx->sbe_error = false; } mutex_unlock(&ctx->sbe_error_lock); return rc; } static BIN_ATTR_RO(ffdc, OCC_MAX_RESP_WORDS * 4); static bool p9_sbe_occ_save_ffdc(struct p9_sbe_occ *ctx, const void *resp, size_t resp_len) { bool notify = false; mutex_lock(&ctx->sbe_error_lock); if (!ctx->sbe_error) { if (resp_len > ctx->ffdc_size) { kvfree(ctx->ffdc); ctx->ffdc = kvmalloc(resp_len, GFP_KERNEL); if (!ctx->ffdc) { ctx->ffdc_len = 0; ctx->ffdc_size = 0; goto done; } ctx->ffdc_size = resp_len; } notify = true; ctx->sbe_error = true; ctx->ffdc_len = resp_len; memcpy(ctx->ffdc, resp, resp_len); } done: mutex_unlock(&ctx->sbe_error_lock); return notify; } static int p9_sbe_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len, void *resp, size_t resp_len) { size_t original_resp_len = resp_len; struct p9_sbe_occ *ctx = to_p9_sbe_occ(occ); int rc, i; for (i = 0; i < OCC_CHECKSUM_RETRIES; ++i) { rc = fsi_occ_submit(ctx->sbe, cmd, len, resp, &resp_len); if (rc >= 0) break; if (resp_len) { if (p9_sbe_occ_save_ffdc(ctx, resp, resp_len)) sysfs_notify(&occ->bus_dev->kobj, NULL, bin_attr_ffdc.attr.name); return rc; } if (rc != -EBADE) return rc; resp_len = original_resp_len; } switch (((struct occ_response *)resp)->return_status) { case OCC_RESP_CMD_IN_PRG: rc = -ETIMEDOUT; break; case OCC_RESP_SUCCESS: rc = 0; break; case OCC_RESP_CMD_INVAL: case OCC_RESP_CMD_LEN_INVAL: case OCC_RESP_DATA_INVAL: case OCC_RESP_CHKSUM_ERR: rc = -EINVAL; break; case OCC_RESP_INT_ERR: case OCC_RESP_BAD_STATE: case OCC_RESP_CRIT_EXCEPT: case OCC_RESP_CRIT_INIT: case OCC_RESP_CRIT_WATCHDOG: case OCC_RESP_CRIT_OCB: case OCC_RESP_CRIT_HW: rc = -EREMOTEIO; break; default: rc = -EPROTO; } return rc; } static int p9_sbe_occ_probe(struct platform_device *pdev) { int rc; struct occ *occ; struct p9_sbe_occ *ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; mutex_init(&ctx->sbe_error_lock); ctx->sbe = pdev->dev.parent; occ = &ctx->occ; occ->bus_dev = &pdev->dev; platform_set_drvdata(pdev, occ); occ->powr_sample_time_us = 500; occ->poll_cmd_data = 0x20; /* P9 OCC poll data */ occ->send_cmd = p9_sbe_occ_send_cmd; rc = occ_setup(occ); if (rc == -ESHUTDOWN) rc = -ENODEV; /* Host is shutdown, don't spew errors */ if (!rc) { rc = device_create_bin_file(occ->bus_dev, &bin_attr_ffdc); if (rc) { dev_warn(occ->bus_dev, "failed to create SBE error ffdc file\n"); rc = 0; } } return rc; } static int p9_sbe_occ_remove(struct platform_device *pdev) { struct occ *occ = platform_get_drvdata(pdev); struct p9_sbe_occ *ctx = to_p9_sbe_occ(occ); device_remove_bin_file(occ->bus_dev, &bin_attr_ffdc); ctx->sbe = NULL; occ_shutdown(occ); kvfree(ctx->ffdc); return 0; } static const struct of_device_id p9_sbe_occ_of_match[] = { { .compatible = "ibm,p9-occ-hwmon" }, { .compatible = "ibm,p10-occ-hwmon" }, {} }; MODULE_DEVICE_TABLE(of, p9_sbe_occ_of_match); static struct platform_driver p9_sbe_occ_driver = { .driver = { .name = "occ-hwmon", .of_match_table = p9_sbe_occ_of_match, }, .probe = p9_sbe_occ_probe, .remove = p9_sbe_occ_remove, }; module_platform_driver(p9_sbe_occ_driver); MODULE_AUTHOR("Eddie James <[email protected]>"); MODULE_DESCRIPTION("BMC P9 OCC hwmon driver"); MODULE_LICENSE("GPL");
linux-master
drivers/hwmon/occ/p9_sbe.c
// SPDX-License-Identifier: GPL-2.0+ // Copyright IBM Corp 2019 #include <linux/device.h> #include <linux/errno.h> #include <linux/fsi-occ.h> #include <linux/i2c.h> #include <linux/jiffies.h> #include <linux/module.h> #include <linux/sched.h> #include <asm/unaligned.h> #include "common.h" #define OCC_TIMEOUT_MS 1000 #define OCC_CMD_IN_PRG_WAIT_MS 50 /* OCB (on-chip control bridge - interface to OCC) registers */ #define OCB_DATA1 0x6B035 #define OCB_ADDR 0x6B070 #define OCB_DATA3 0x6B075 /* OCC SRAM address space */ #define OCC_SRAM_ADDR_CMD 0xFFFF6000 #define OCC_SRAM_ADDR_RESP 0xFFFF7000 #define OCC_DATA_ATTN 0x20010000 struct p8_i2c_occ { struct occ occ; struct i2c_client *client; }; #define to_p8_i2c_occ(x) container_of((x), struct p8_i2c_occ, occ) static int p8_i2c_occ_getscom(struct i2c_client *client, u32 address, u8 *data) { ssize_t rc; __be64 buf; struct i2c_msg msgs[2]; /* p8 i2c slave requires shift */ address <<= 1; msgs[0].addr = client->addr; msgs[0].flags = client->flags & I2C_M_TEN; msgs[0].len = sizeof(u32); /* address is a scom address; bus-endian */ msgs[0].buf = (char *)&address; /* data from OCC is big-endian */ msgs[1].addr = client->addr; msgs[1].flags = (client->flags & I2C_M_TEN) | I2C_M_RD; msgs[1].len = sizeof(u64); msgs[1].buf = (char *)&buf; rc = i2c_transfer(client->adapter, msgs, 2); if (rc < 0) return rc; *(u64 *)data = be64_to_cpu(buf); return 0; } static int p8_i2c_occ_putscom(struct i2c_client *client, u32 address, u8 *data) { u32 buf[3]; ssize_t rc; /* p8 i2c slave requires shift */ address <<= 1; /* address is bus-endian; data passed through from user as-is */ buf[0] = address; memcpy(&buf[1], &data[4], sizeof(u32)); memcpy(&buf[2], data, sizeof(u32)); rc = i2c_master_send(client, (const char *)buf, sizeof(buf)); if (rc < 0) return rc; else if (rc != sizeof(buf)) return -EIO; return 0; } static int p8_i2c_occ_putscom_u32(struct i2c_client *client, u32 address, u32 data0, u32 data1) { u8 buf[8]; memcpy(buf, &data0, 4); memcpy(buf + 4, &data1, 4); return p8_i2c_occ_putscom(client, address, buf); } static int p8_i2c_occ_putscom_be(struct i2c_client *client, u32 address, u8 *data, size_t len) { __be32 data0 = 0, data1 = 0; memcpy(&data0, data, min_t(size_t, len, 4)); if (len > 4) { len -= 4; memcpy(&data1, data + 4, min_t(size_t, len, 4)); } return p8_i2c_occ_putscom_u32(client, address, be32_to_cpu(data0), be32_to_cpu(data1)); } static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len, void *resp, size_t resp_len) { int i, rc; unsigned long start; u16 data_length; const unsigned long timeout = msecs_to_jiffies(OCC_TIMEOUT_MS); const long wait_time = msecs_to_jiffies(OCC_CMD_IN_PRG_WAIT_MS); struct p8_i2c_occ *ctx = to_p8_i2c_occ(occ); struct i2c_client *client = ctx->client; struct occ_response *or = (struct occ_response *)resp; start = jiffies; /* set sram address for command */ rc = p8_i2c_occ_putscom_u32(client, OCB_ADDR, OCC_SRAM_ADDR_CMD, 0); if (rc) return rc; /* write command (expected to already be BE), we need bus-endian... */ rc = p8_i2c_occ_putscom_be(client, OCB_DATA3, cmd, len); if (rc) return rc; /* trigger OCC attention */ rc = p8_i2c_occ_putscom_u32(client, OCB_DATA1, OCC_DATA_ATTN, 0); if (rc) return rc; do { /* set sram address for response */ rc = p8_i2c_occ_putscom_u32(client, OCB_ADDR, OCC_SRAM_ADDR_RESP, 0); if (rc) return rc; rc = p8_i2c_occ_getscom(client, OCB_DATA3, (u8 *)resp); if (rc) return rc; /* wait for OCC */ if (or->return_status == OCC_RESP_CMD_IN_PRG) { rc = -EALREADY; if (time_after(jiffies, start + timeout)) break; set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(wait_time); } } while (rc); /* check the OCC response */ switch (or->return_status) { case OCC_RESP_CMD_IN_PRG: rc = -ETIMEDOUT; break; case OCC_RESP_SUCCESS: rc = 0; break; case OCC_RESP_CMD_INVAL: case OCC_RESP_CMD_LEN_INVAL: case OCC_RESP_DATA_INVAL: case OCC_RESP_CHKSUM_ERR: rc = -EINVAL; break; case OCC_RESP_INT_ERR: case OCC_RESP_BAD_STATE: case OCC_RESP_CRIT_EXCEPT: case OCC_RESP_CRIT_INIT: case OCC_RESP_CRIT_WATCHDOG: case OCC_RESP_CRIT_OCB: case OCC_RESP_CRIT_HW: rc = -EREMOTEIO; break; default: rc = -EPROTO; } if (rc < 0) return rc; data_length = get_unaligned_be16(&or->data_length); if ((data_length + 7) > resp_len) return -EMSGSIZE; /* fetch the rest of the response data */ for (i = 8; i < data_length + 7; i += 8) { rc = p8_i2c_occ_getscom(client, OCB_DATA3, ((u8 *)resp) + i); if (rc) return rc; } return 0; } static int p8_i2c_occ_probe(struct i2c_client *client) { struct occ *occ; struct p8_i2c_occ *ctx = devm_kzalloc(&client->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->client = client; occ = &ctx->occ; occ->bus_dev = &client->dev; dev_set_drvdata(&client->dev, occ); occ->powr_sample_time_us = 250; occ->poll_cmd_data = 0x10; /* P8 OCC poll data */ occ->send_cmd = p8_i2c_occ_send_cmd; return occ_setup(occ); } static void p8_i2c_occ_remove(struct i2c_client *client) { struct occ *occ = dev_get_drvdata(&client->dev); occ_shutdown(occ); } static const struct of_device_id p8_i2c_occ_of_match[] = { { .compatible = "ibm,p8-occ-hwmon" }, {} }; MODULE_DEVICE_TABLE(of, p8_i2c_occ_of_match); static struct i2c_driver p8_i2c_occ_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "occ-hwmon", .of_match_table = p8_i2c_occ_of_match, }, .probe = p8_i2c_occ_probe, .remove = p8_i2c_occ_remove, }; module_i2c_driver(p8_i2c_occ_driver); MODULE_AUTHOR("Eddie James <[email protected]>"); MODULE_DESCRIPTION("BMC P8 OCC hwmon driver"); MODULE_LICENSE("GPL");
linux-master
drivers/hwmon/occ/p8_i2c.c
// SPDX-License-Identifier: GPL-2.0-only /* * SCSI RDMA (SRP) transport class * * Copyright (C) 2007 FUJITA Tomonori <[email protected]> */ #include <linux/init.h> #include <linux/module.h> #include <linux/jiffies.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/string.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_srp.h> #include "scsi_priv.h" struct srp_host_attrs { atomic_t next_port_id; }; #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data) #define SRP_HOST_ATTRS 0 #define SRP_RPORT_ATTRS 8 struct srp_internal { struct scsi_transport_template t; struct srp_function_template *f; struct device_attribute *host_attrs[SRP_HOST_ATTRS + 1]; struct device_attribute *rport_attrs[SRP_RPORT_ATTRS + 1]; struct transport_container rport_attr_cont; }; static int scsi_is_srp_rport(const struct device *dev); #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t) #define dev_to_rport(d) container_of(d, struct srp_rport, dev) #define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent) static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r) { return dev_to_shost(r->dev.parent); } static int find_child_rport(struct device *dev, void *data) { struct device **child = data; if (scsi_is_srp_rport(dev)) { WARN_ON_ONCE(*child); *child = dev; } return 0; } static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost) { struct device *child = NULL; WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child, find_child_rport) < 0); return child ? dev_to_rport(child) : NULL; } /** * srp_tmo_valid() - check timeout combination validity * @reconnect_delay: Reconnect delay in seconds. * @fast_io_fail_tmo: Fast I/O fail timeout in seconds. * @dev_loss_tmo: Device loss timeout in seconds. * * The combination of the timeout parameters must be such that SCSI commands * are finished in a reasonable time. Hence do not allow the fast I/O fail * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT nor allow dev_loss_tmo to * exceed that limit if failing I/O fast has been disabled. Furthermore, these * parameters must be such that multipath can detect failed paths timely. * Hence do not allow all three parameters to be disabled simultaneously. */ int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, long dev_loss_tmo) { if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0) return -EINVAL; if (reconnect_delay == 0) return -EINVAL; if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT) return -EINVAL; if (fast_io_fail_tmo < 0 && dev_loss_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT) return -EINVAL; if (dev_loss_tmo >= LONG_MAX / HZ) return -EINVAL; if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 && fast_io_fail_tmo >= dev_loss_tmo) return -EINVAL; return 0; } EXPORT_SYMBOL_GPL(srp_tmo_valid); static int srp_host_setup(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); struct srp_host_attrs *srp_host = to_srp_host_attrs(shost); atomic_set(&srp_host->next_port_id, 0); return 0; } static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup, NULL, NULL); static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports", NULL, NULL, NULL); static ssize_t show_srp_rport_id(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_rport *rport = transport_class_to_srp_rport(dev); return sprintf(buf, "%16phC\n", rport->port_id); } static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL); static const struct { u32 value; char *name; } srp_rport_role_names[] = { {SRP_RPORT_ROLE_INITIATOR, "SRP Initiator"}, {SRP_RPORT_ROLE_TARGET, "SRP Target"}, }; static ssize_t show_srp_rport_roles(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_rport *rport = transport_class_to_srp_rport(dev); int i; char *name = NULL; for (i = 0; i < ARRAY_SIZE(srp_rport_role_names); i++) if (srp_rport_role_names[i].value == rport->roles) { name = srp_rport_role_names[i].name; break; } return sprintf(buf, "%s\n", name ? : "unknown"); } static DEVICE_ATTR(roles, S_IRUGO, show_srp_rport_roles, NULL); static ssize_t store_srp_rport_delete(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct srp_rport *rport = transport_class_to_srp_rport(dev); struct Scsi_Host *shost = dev_to_shost(dev); struct srp_internal *i = to_srp_internal(shost->transportt); if (i->f->rport_delete) { i->f->rport_delete(rport); return count; } else { return -ENOSYS; } } static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete); static ssize_t show_srp_rport_state(struct device *dev, struct device_attribute *attr, char *buf) { static const char *const state_name[] = { [SRP_RPORT_RUNNING] = "running", [SRP_RPORT_BLOCKED] = "blocked", [SRP_RPORT_FAIL_FAST] = "fail-fast", [SRP_RPORT_LOST] = "lost", }; struct srp_rport *rport = transport_class_to_srp_rport(dev); enum srp_rport_state state = rport->state; return sprintf(buf, "%s\n", (unsigned)state < ARRAY_SIZE(state_name) ? state_name[state] : "???"); } static DEVICE_ATTR(state, S_IRUGO, show_srp_rport_state, NULL); static ssize_t srp_show_tmo(char *buf, int tmo) { return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n"); } int srp_parse_tmo(int *tmo, const char *buf) { int res = 0; if (strncmp(buf, "off", 3) != 0) res = kstrtoint(buf, 0, tmo); else *tmo = -1; return res; } EXPORT_SYMBOL(srp_parse_tmo); static ssize_t show_reconnect_delay(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_rport *rport = transport_class_to_srp_rport(dev); return srp_show_tmo(buf, rport->reconnect_delay); } static ssize_t store_reconnect_delay(struct device *dev, struct device_attribute *attr, const char *buf, const size_t count) { struct srp_rport *rport = transport_class_to_srp_rport(dev); int res, delay; res = srp_parse_tmo(&delay, buf); if (res) goto out; res = srp_tmo_valid(delay, rport->fast_io_fail_tmo, rport->dev_loss_tmo); if (res) goto out; if (rport->reconnect_delay <= 0 && delay > 0 && rport->state != SRP_RPORT_RUNNING) { queue_delayed_work(system_long_wq, &rport->reconnect_work, delay * HZ); } else if (delay <= 0) { cancel_delayed_work(&rport->reconnect_work); } rport->reconnect_delay = delay; res = count; out: return res; } static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, show_reconnect_delay, store_reconnect_delay); static ssize_t show_failed_reconnects(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_rport *rport = transport_class_to_srp_rport(dev); return sprintf(buf, "%d\n", rport->failed_reconnects); } static DEVICE_ATTR(failed_reconnects, S_IRUGO, show_failed_reconnects, NULL); static ssize_t show_srp_rport_fast_io_fail_tmo(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_rport *rport = transport_class_to_srp_rport(dev); return srp_show_tmo(buf, rport->fast_io_fail_tmo); } static ssize_t store_srp_rport_fast_io_fail_tmo(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct srp_rport *rport = transport_class_to_srp_rport(dev); int res; int fast_io_fail_tmo; res = srp_parse_tmo(&fast_io_fail_tmo, buf); if (res) goto out; res = srp_tmo_valid(rport->reconnect_delay, fast_io_fail_tmo, rport->dev_loss_tmo); if (res) goto out; rport->fast_io_fail_tmo = fast_io_fail_tmo; res = count; out: return res; } static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR, show_srp_rport_fast_io_fail_tmo, store_srp_rport_fast_io_fail_tmo); static ssize_t show_srp_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_rport *rport = transport_class_to_srp_rport(dev); return srp_show_tmo(buf, rport->dev_loss_tmo); } static ssize_t store_srp_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct srp_rport *rport = transport_class_to_srp_rport(dev); int res; int dev_loss_tmo; res = srp_parse_tmo(&dev_loss_tmo, buf); if (res) goto out; res = srp_tmo_valid(rport->reconnect_delay, rport->fast_io_fail_tmo, dev_loss_tmo); if (res) goto out; rport->dev_loss_tmo = dev_loss_tmo; res = count; out: return res; } static DEVICE_ATTR(dev_loss_tmo, S_IRUGO | S_IWUSR, show_srp_rport_dev_loss_tmo, store_srp_rport_dev_loss_tmo); static int srp_rport_set_state(struct srp_rport *rport, enum srp_rport_state new_state) { enum srp_rport_state old_state = rport->state; lockdep_assert_held(&rport->mutex); switch (new_state) { case SRP_RPORT_RUNNING: switch (old_state) { case SRP_RPORT_LOST: goto invalid; default: break; } break; case SRP_RPORT_BLOCKED: switch (old_state) { case SRP_RPORT_RUNNING: break; default: goto invalid; } break; case SRP_RPORT_FAIL_FAST: switch (old_state) { case SRP_RPORT_LOST: goto invalid; default: break; } break; case SRP_RPORT_LOST: break; } rport->state = new_state; return 0; invalid: return -EINVAL; } /** * srp_reconnect_work() - reconnect and schedule a new attempt if necessary * @work: Work structure used for scheduling this operation. */ static void srp_reconnect_work(struct work_struct *work) { struct srp_rport *rport = container_of(to_delayed_work(work), struct srp_rport, reconnect_work); struct Scsi_Host *shost = rport_to_shost(rport); int delay, res; res = srp_reconnect_rport(rport); if (res != 0) { shost_printk(KERN_ERR, shost, "reconnect attempt %d failed (%d)\n", ++rport->failed_reconnects, res); delay = rport->reconnect_delay * min(100, max(1, rport->failed_reconnects - 10)); if (delay > 0) queue_delayed_work(system_long_wq, &rport->reconnect_work, delay * HZ); } } /* * scsi_block_targets() must have been called before this function is * called to guarantee that no .queuecommand() calls are in progress. */ static void __rport_fail_io_fast(struct srp_rport *rport) { struct Scsi_Host *shost = rport_to_shost(rport); struct srp_internal *i; lockdep_assert_held(&rport->mutex); if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST)) return; scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); /* Involve the LLD if possible to terminate all I/O on the rport. */ i = to_srp_internal(shost->transportt); if (i->f->terminate_rport_io) i->f->terminate_rport_io(rport); } /** * rport_fast_io_fail_timedout() - fast I/O failure timeout handler * @work: Work structure used for scheduling this operation. */ static void rport_fast_io_fail_timedout(struct work_struct *work) { struct srp_rport *rport = container_of(to_delayed_work(work), struct srp_rport, fast_io_fail_work); struct Scsi_Host *shost = rport_to_shost(rport); pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n", dev_name(&rport->dev), dev_name(&shost->shost_gendev)); mutex_lock(&rport->mutex); if (rport->state == SRP_RPORT_BLOCKED) __rport_fail_io_fast(rport); mutex_unlock(&rport->mutex); } /** * rport_dev_loss_timedout() - device loss timeout handler * @work: Work structure used for scheduling this operation. */ static void rport_dev_loss_timedout(struct work_struct *work) { struct srp_rport *rport = container_of(to_delayed_work(work), struct srp_rport, dev_loss_work); struct Scsi_Host *shost = rport_to_shost(rport); struct srp_internal *i = to_srp_internal(shost->transportt); pr_info("dev_loss_tmo expired for SRP %s / %s.\n", dev_name(&rport->dev), dev_name(&shost->shost_gendev)); mutex_lock(&rport->mutex); WARN_ON(srp_rport_set_state(rport, SRP_RPORT_LOST) != 0); scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); mutex_unlock(&rport->mutex); i->f->rport_delete(rport); } static void __srp_start_tl_fail_timers(struct srp_rport *rport) { struct Scsi_Host *shost = rport_to_shost(rport); int delay, fast_io_fail_tmo, dev_loss_tmo; lockdep_assert_held(&rport->mutex); delay = rport->reconnect_delay; fast_io_fail_tmo = rport->fast_io_fail_tmo; dev_loss_tmo = rport->dev_loss_tmo; pr_debug("%s current state: %d\n", dev_name(&shost->shost_gendev), rport->state); if (rport->state == SRP_RPORT_LOST) return; if (delay > 0) queue_delayed_work(system_long_wq, &rport->reconnect_work, 1UL * delay * HZ); if ((fast_io_fail_tmo >= 0 || dev_loss_tmo >= 0) && srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) { pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev), rport->state); scsi_block_targets(shost, &shost->shost_gendev); if (fast_io_fail_tmo >= 0) queue_delayed_work(system_long_wq, &rport->fast_io_fail_work, 1UL * fast_io_fail_tmo * HZ); if (dev_loss_tmo >= 0) queue_delayed_work(system_long_wq, &rport->dev_loss_work, 1UL * dev_loss_tmo * HZ); } } /** * srp_start_tl_fail_timers() - start the transport layer failure timers * @rport: SRP target port. * * Start the transport layer fast I/O failure and device loss timers. Do not * modify a timer that was already started. */ void srp_start_tl_fail_timers(struct srp_rport *rport) { mutex_lock(&rport->mutex); __srp_start_tl_fail_timers(rport); mutex_unlock(&rport->mutex); } EXPORT_SYMBOL(srp_start_tl_fail_timers); /** * srp_reconnect_rport() - reconnect to an SRP target port * @rport: SRP target port. * * Blocks SCSI command queueing before invoking reconnect() such that * queuecommand() won't be invoked concurrently with reconnect() from outside * the SCSI EH. This is important since a reconnect() implementation may * reallocate resources needed by queuecommand(). * * Notes: * - This function neither waits until outstanding requests have finished nor * tries to abort these. It is the responsibility of the reconnect() * function to finish outstanding commands before reconnecting to the target * port. * - It is the responsibility of the caller to ensure that the resources * reallocated by the reconnect() function won't be used while this function * is in progress. One possible strategy is to invoke this function from * the context of the SCSI EH thread only. Another possible strategy is to * lock the rport mutex inside each SCSI LLD callback that can be invoked by * the SCSI EH (the scsi_host_template.eh_*() functions and also the * scsi_host_template.queuecommand() function). */ int srp_reconnect_rport(struct srp_rport *rport) { struct Scsi_Host *shost = rport_to_shost(rport); struct srp_internal *i = to_srp_internal(shost->transportt); struct scsi_device *sdev; int res; pr_debug("SCSI host %s\n", dev_name(&shost->shost_gendev)); res = mutex_lock_interruptible(&rport->mutex); if (res) goto out; if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST) /* * sdev state must be SDEV_TRANSPORT_OFFLINE, transition * to SDEV_BLOCK is illegal. Calling scsi_target_unblock() * later is ok though, scsi_internal_device_unblock_nowait() * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK. */ scsi_block_targets(shost, &shost->shost_gendev); res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV; pr_debug("%s (state %d): transport.reconnect() returned %d\n", dev_name(&shost->shost_gendev), rport->state, res); if (res == 0) { cancel_delayed_work(&rport->fast_io_fail_work); cancel_delayed_work(&rport->dev_loss_work); rport->failed_reconnects = 0; srp_rport_set_state(rport, SRP_RPORT_RUNNING); scsi_target_unblock(&shost->shost_gendev, SDEV_RUNNING); /* * If the SCSI error handler has offlined one or more devices, * invoking scsi_target_unblock() won't change the state of * these devices into running so do that explicitly. */ shost_for_each_device(sdev, shost) { mutex_lock(&sdev->state_mutex); if (sdev->sdev_state == SDEV_OFFLINE) sdev->sdev_state = SDEV_RUNNING; mutex_unlock(&sdev->state_mutex); } } else if (rport->state == SRP_RPORT_RUNNING) { /* * srp_reconnect_rport() has been invoked with fast_io_fail * and dev_loss off. Mark the port as failed and start the TL * failure timers if these had not yet been started. */ __rport_fail_io_fast(rport); __srp_start_tl_fail_timers(rport); } else if (rport->state != SRP_RPORT_BLOCKED) { scsi_target_unblock(&shost->shost_gendev, SDEV_TRANSPORT_OFFLINE); } mutex_unlock(&rport->mutex); out: return res; } EXPORT_SYMBOL(srp_reconnect_rport); /** * srp_timed_out() - SRP transport intercept of the SCSI timeout EH * @scmd: SCSI command. * * If a timeout occurs while an rport is in the blocked state, ask the SCSI * EH to continue waiting (SCSI_EH_RESET_TIMER). Otherwise let the SCSI core * handle the timeout (SCSI_EH_NOT_HANDLED). * * Note: This function is called from soft-IRQ context and with the request * queue lock held. */ enum scsi_timeout_action srp_timed_out(struct scsi_cmnd *scmd) { struct scsi_device *sdev = scmd->device; struct Scsi_Host *shost = sdev->host; struct srp_internal *i = to_srp_internal(shost->transportt); struct srp_rport *rport = shost_to_rport(shost); pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev)); return rport && rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 && i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? SCSI_EH_RESET_TIMER : SCSI_EH_NOT_HANDLED; } EXPORT_SYMBOL(srp_timed_out); static void srp_rport_release(struct device *dev) { struct srp_rport *rport = dev_to_rport(dev); put_device(dev->parent); kfree(rport); } static int scsi_is_srp_rport(const struct device *dev) { return dev->release == srp_rport_release; } static int srp_rport_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct srp_internal *i; if (!scsi_is_srp_rport(dev)) return 0; shost = dev_to_shost(dev->parent); if (!shost->transportt) return 0; if (shost->transportt->host_attrs.ac.class != &srp_host_class.class) return 0; i = to_srp_internal(shost->transportt); return &i->rport_attr_cont.ac == cont; } static int srp_host_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct srp_internal *i; if (!scsi_is_host_device(dev)) return 0; shost = dev_to_shost(dev); if (!shost->transportt) return 0; if (shost->transportt->host_attrs.ac.class != &srp_host_class.class) return 0; i = to_srp_internal(shost->transportt); return &i->t.host_attrs.ac == cont; } /** * srp_rport_get() - increment rport reference count * @rport: SRP target port. */ void srp_rport_get(struct srp_rport *rport) { get_device(&rport->dev); } EXPORT_SYMBOL(srp_rport_get); /** * srp_rport_put() - decrement rport reference count * @rport: SRP target port. */ void srp_rport_put(struct srp_rport *rport) { put_device(&rport->dev); } EXPORT_SYMBOL(srp_rport_put); /** * srp_rport_add - add a SRP remote port to the device hierarchy * @shost: scsi host the remote port is connected to. * @ids: The port id for the remote port. * * Publishes a port to the rest of the system. */ struct srp_rport *srp_rport_add(struct Scsi_Host *shost, struct srp_rport_identifiers *ids) { struct srp_rport *rport; struct device *parent = &shost->shost_gendev; struct srp_internal *i = to_srp_internal(shost->transportt); int id, ret; rport = kzalloc(sizeof(*rport), GFP_KERNEL); if (!rport) return ERR_PTR(-ENOMEM); mutex_init(&rport->mutex); device_initialize(&rport->dev); rport->dev.parent = get_device(parent); rport->dev.release = srp_rport_release; memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id)); rport->roles = ids->roles; if (i->f->reconnect) rport->reconnect_delay = i->f->reconnect_delay ? *i->f->reconnect_delay : 10; INIT_DELAYED_WORK(&rport->reconnect_work, srp_reconnect_work); rport->fast_io_fail_tmo = i->f->fast_io_fail_tmo ? *i->f->fast_io_fail_tmo : 15; rport->dev_loss_tmo = i->f->dev_loss_tmo ? *i->f->dev_loss_tmo : 60; INIT_DELAYED_WORK(&rport->fast_io_fail_work, rport_fast_io_fail_timedout); INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout); id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id); dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id); transport_setup_device(&rport->dev); ret = device_add(&rport->dev); if (ret) { transport_destroy_device(&rport->dev); put_device(&rport->dev); return ERR_PTR(ret); } transport_add_device(&rport->dev); transport_configure_device(&rport->dev); return rport; } EXPORT_SYMBOL_GPL(srp_rport_add); /** * srp_rport_del - remove a SRP remote port * @rport: SRP remote port to remove * * Removes the specified SRP remote port. */ void srp_rport_del(struct srp_rport *rport) { struct device *dev = &rport->dev; transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); put_device(dev); } EXPORT_SYMBOL_GPL(srp_rport_del); static int do_srp_rport_del(struct device *dev, void *data) { if (scsi_is_srp_rport(dev)) srp_rport_del(dev_to_rport(dev)); return 0; } /** * srp_remove_host - tear down a Scsi_Host's SRP data structures * @shost: Scsi Host that is torn down * * Removes all SRP remote ports for a given Scsi_Host. * Must be called just before scsi_remove_host for SRP HBAs. */ void srp_remove_host(struct Scsi_Host *shost) { device_for_each_child(&shost->shost_gendev, NULL, do_srp_rport_del); } EXPORT_SYMBOL_GPL(srp_remove_host); /** * srp_stop_rport_timers - stop the transport layer recovery timers * @rport: SRP remote port for which to stop the timers. * * Must be called after srp_remove_host() and scsi_remove_host(). The caller * must hold a reference on the rport (rport->dev) and on the SCSI host * (rport->dev.parent). */ void srp_stop_rport_timers(struct srp_rport *rport) { mutex_lock(&rport->mutex); if (rport->state == SRP_RPORT_BLOCKED) __rport_fail_io_fast(rport); srp_rport_set_state(rport, SRP_RPORT_LOST); mutex_unlock(&rport->mutex); cancel_delayed_work_sync(&rport->reconnect_work); cancel_delayed_work_sync(&rport->fast_io_fail_work); cancel_delayed_work_sync(&rport->dev_loss_work); } EXPORT_SYMBOL_GPL(srp_stop_rport_timers); /** * srp_attach_transport - instantiate SRP transport template * @ft: SRP transport class function template */ struct scsi_transport_template * srp_attach_transport(struct srp_function_template *ft) { int count; struct srp_internal *i; i = kzalloc(sizeof(*i), GFP_KERNEL); if (!i) return NULL; i->t.host_size = sizeof(struct srp_host_attrs); i->t.host_attrs.ac.attrs = &i->host_attrs[0]; i->t.host_attrs.ac.class = &srp_host_class.class; i->t.host_attrs.ac.match = srp_host_match; i->host_attrs[0] = NULL; transport_container_register(&i->t.host_attrs); i->rport_attr_cont.ac.attrs = &i->rport_attrs[0]; i->rport_attr_cont.ac.class = &srp_rport_class.class; i->rport_attr_cont.ac.match = srp_rport_match; count = 0; i->rport_attrs[count++] = &dev_attr_port_id; i->rport_attrs[count++] = &dev_attr_roles; if (ft->has_rport_state) { i->rport_attrs[count++] = &dev_attr_state; i->rport_attrs[count++] = &dev_attr_fast_io_fail_tmo; i->rport_attrs[count++] = &dev_attr_dev_loss_tmo; } if (ft->reconnect) { i->rport_attrs[count++] = &dev_attr_reconnect_delay; i->rport_attrs[count++] = &dev_attr_failed_reconnects; } if (ft->rport_delete) i->rport_attrs[count++] = &dev_attr_delete; i->rport_attrs[count++] = NULL; BUG_ON(count > ARRAY_SIZE(i->rport_attrs)); transport_container_register(&i->rport_attr_cont); i->f = ft; return &i->t; } EXPORT_SYMBOL_GPL(srp_attach_transport); /** * srp_release_transport - release SRP transport template instance * @t: transport template instance */ void srp_release_transport(struct scsi_transport_template *t) { struct srp_internal *i = to_srp_internal(t); transport_container_unregister(&i->t.host_attrs); transport_container_unregister(&i->rport_attr_cont); kfree(i); } EXPORT_SYMBOL_GPL(srp_release_transport); static __init int srp_transport_init(void) { int ret; ret = transport_class_register(&srp_host_class); if (ret) return ret; ret = transport_class_register(&srp_rport_class); if (ret) goto unregister_host_class; return 0; unregister_host_class: transport_class_unregister(&srp_host_class); return ret; } static void __exit srp_transport_exit(void) { transport_class_unregister(&srp_host_class); transport_class_unregister(&srp_rport_class); } MODULE_AUTHOR("FUJITA Tomonori"); MODULE_DESCRIPTION("SRP Transport Attributes"); MODULE_LICENSE("GPL"); module_init(srp_transport_init); module_exit(srp_transport_exit);
linux-master
drivers/scsi/scsi_transport_srp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Sun3 SCSI stuff by Erik Verbruggen ([email protected]) * * Sun3 DMA routines added by Sam Creasey ([email protected]) * * VME support added by Sam Creasey * * TODO: modify this driver to support multiple Sun3 SCSI VME boards * * Adapted from mac_scsinew.c: */ /* * Generic Macintosh NCR5380 driver * * Copyright 1998, Michael Schmitz <[email protected]> * * derived in part from: */ /* * Generic Generic NCR5380 driver * * Copyright 1995, Russell King */ #include <linux/types.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/platform_device.h> #include <asm/io.h> #include <asm/dvma.h> #include <scsi/scsi_host.h> /* minimum number of bytes to do dma on */ #define DMA_MIN_SIZE 129 /* Definitions for the core NCR5380 driver. */ #define NCR5380_implementation_fields /* none */ #define NCR5380_read(reg) in_8(hostdata->io + (reg)) #define NCR5380_write(reg, value) out_8(hostdata->io + (reg), value) #define NCR5380_queue_command sun3scsi_queue_command #define NCR5380_host_reset sun3scsi_host_reset #define NCR5380_abort sun3scsi_abort #define NCR5380_info sun3scsi_info #define NCR5380_dma_xfer_len sun3scsi_dma_xfer_len #define NCR5380_dma_recv_setup sun3scsi_dma_count #define NCR5380_dma_send_setup sun3scsi_dma_count #define NCR5380_dma_residual sun3scsi_dma_residual #include "NCR5380.h" /* dma regs start at regbase + 8, directly after the NCR regs */ struct sun3_dma_regs { unsigned short dma_addr_hi; /* vme only */ unsigned short dma_addr_lo; /* vme only */ unsigned short dma_count_hi; /* vme only */ unsigned short dma_count_lo; /* vme only */ unsigned short udc_data; /* udc dma data reg (obio only) */ unsigned short udc_addr; /* uda dma addr reg (obio only) */ unsigned short fifo_data; /* fifo data reg, * holds extra byte on odd dma reads */ unsigned short fifo_count; unsigned short csr; /* control/status reg */ unsigned short bpack_hi; /* vme only */ unsigned short bpack_lo; /* vme only */ unsigned short ivect; /* vme only */ unsigned short fifo_count_hi; /* vme only */ }; /* ucd chip specific regs - live in dvma space */ struct sun3_udc_regs { unsigned short rsel; /* select regs to load */ unsigned short addr_hi; /* high word of addr */ unsigned short addr_lo; /* low word */ unsigned short count; /* words to be xfer'd */ unsigned short mode_hi; /* high word of channel mode */ unsigned short mode_lo; /* low word of channel mode */ }; /* addresses of the udc registers */ #define UDC_MODE 0x38 #define UDC_CSR 0x2e /* command/status */ #define UDC_CHN_HI 0x26 /* chain high word */ #define UDC_CHN_LO 0x22 /* chain lo word */ #define UDC_CURA_HI 0x1a /* cur reg A high */ #define UDC_CURA_LO 0x0a /* cur reg A low */ #define UDC_CURB_HI 0x12 /* cur reg B high */ #define UDC_CURB_LO 0x02 /* cur reg B low */ #define UDC_MODE_HI 0x56 /* mode reg high */ #define UDC_MODE_LO 0x52 /* mode reg low */ #define UDC_COUNT 0x32 /* words to xfer */ /* some udc commands */ #define UDC_RESET 0 #define UDC_CHN_START 0xa0 /* start chain */ #define UDC_INT_ENABLE 0x32 /* channel 1 int on */ /* udc mode words */ #define UDC_MODE_HIWORD 0x40 #define UDC_MODE_LSEND 0xc2 #define UDC_MODE_LRECV 0xd2 /* udc reg selections */ #define UDC_RSEL_SEND 0x282 #define UDC_RSEL_RECV 0x182 /* bits in csr reg */ #define CSR_DMA_ACTIVE 0x8000 #define CSR_DMA_CONFLICT 0x4000 #define CSR_DMA_BUSERR 0x2000 #define CSR_FIFO_EMPTY 0x400 /* fifo flushed? */ #define CSR_SDB_INT 0x200 /* sbc interrupt pending */ #define CSR_DMA_INT 0x100 /* dma interrupt pending */ #define CSR_LEFT 0xc0 #define CSR_LEFT_3 0xc0 #define CSR_LEFT_2 0x80 #define CSR_LEFT_1 0x40 #define CSR_PACK_ENABLE 0x20 #define CSR_DMA_ENABLE 0x10 #define CSR_SEND 0x8 /* 1 = send 0 = recv */ #define CSR_FIFO 0x2 /* reset fifo */ #define CSR_INTR 0x4 /* interrupt enable */ #define CSR_SCSI 0x1 #define VME_DATA24 0x3d00 extern int sun3_map_test(unsigned long, char *); static int setup_can_queue = -1; module_param(setup_can_queue, int, 0); static int setup_cmd_per_lun = -1; module_param(setup_cmd_per_lun, int, 0); static int setup_sg_tablesize = -1; module_param(setup_sg_tablesize, int, 0); static int setup_hostid = -1; module_param(setup_hostid, int, 0); /* ms to wait after hitting dma regs */ #define SUN3_DMA_DELAY 10 /* dvma buffer to allocate -- 32k should hopefully be more than sufficient */ #define SUN3_DVMA_BUFSIZE 0xe000 static struct scsi_cmnd *sun3_dma_setup_done; static volatile struct sun3_dma_regs *dregs; static struct sun3_udc_regs *udc_regs; static unsigned char *sun3_dma_orig_addr; static unsigned long sun3_dma_orig_count; static int sun3_dma_active; static unsigned long last_residual; #ifndef SUN3_SCSI_VME /* dma controller register access functions */ static inline unsigned short sun3_udc_read(unsigned char reg) { unsigned short ret; dregs->udc_addr = UDC_CSR; udelay(SUN3_DMA_DELAY); ret = dregs->udc_data; udelay(SUN3_DMA_DELAY); return ret; } static inline void sun3_udc_write(unsigned short val, unsigned char reg) { dregs->udc_addr = reg; udelay(SUN3_DMA_DELAY); dregs->udc_data = val; udelay(SUN3_DMA_DELAY); } #endif // safe bits for the CSR #define CSR_GOOD 0x060f static irqreturn_t scsi_sun3_intr(int irq, void *dev) { struct Scsi_Host *instance = dev; unsigned short csr = dregs->csr; int handled = 0; #ifdef SUN3_SCSI_VME dregs->csr &= ~CSR_DMA_ENABLE; #endif if(csr & ~CSR_GOOD) { if (csr & CSR_DMA_BUSERR) shost_printk(KERN_ERR, instance, "bus error in DMA\n"); if (csr & CSR_DMA_CONFLICT) shost_printk(KERN_ERR, instance, "DMA conflict\n"); handled = 1; } if(csr & (CSR_SDB_INT | CSR_DMA_INT)) { NCR5380_intr(irq, dev); handled = 1; } return IRQ_RETVAL(handled); } /* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */ static int sun3scsi_dma_setup(struct NCR5380_hostdata *hostdata, unsigned char *data, int count, int write_flag) { void *addr; if(sun3_dma_orig_addr != NULL) dvma_unmap(sun3_dma_orig_addr); #ifdef SUN3_SCSI_VME addr = (void *)dvma_map_vme((unsigned long) data, count); #else addr = (void *)dvma_map((unsigned long) data, count); #endif sun3_dma_orig_addr = addr; sun3_dma_orig_count = count; #ifndef SUN3_SCSI_VME dregs->fifo_count = 0; sun3_udc_write(UDC_RESET, UDC_CSR); /* reset fifo */ dregs->csr &= ~CSR_FIFO; dregs->csr |= CSR_FIFO; #endif /* set direction */ if(write_flag) dregs->csr |= CSR_SEND; else dregs->csr &= ~CSR_SEND; #ifdef SUN3_SCSI_VME dregs->csr |= CSR_PACK_ENABLE; dregs->dma_addr_hi = ((unsigned long)addr >> 16); dregs->dma_addr_lo = ((unsigned long)addr & 0xffff); dregs->dma_count_hi = 0; dregs->dma_count_lo = 0; dregs->fifo_count_hi = 0; dregs->fifo_count = 0; #else /* byte count for fifo */ dregs->fifo_count = count; sun3_udc_write(UDC_RESET, UDC_CSR); /* reset fifo */ dregs->csr &= ~CSR_FIFO; dregs->csr |= CSR_FIFO; if(dregs->fifo_count != count) { shost_printk(KERN_ERR, hostdata->host, "FIFO mismatch %04x not %04x\n", dregs->fifo_count, (unsigned int) count); NCR5380_dprint(NDEBUG_DMA, hostdata->host); } /* setup udc */ udc_regs->addr_hi = (((unsigned long)(addr) & 0xff0000) >> 8); udc_regs->addr_lo = ((unsigned long)(addr) & 0xffff); udc_regs->count = count/2; /* count in words */ udc_regs->mode_hi = UDC_MODE_HIWORD; if(write_flag) { if(count & 1) udc_regs->count++; udc_regs->mode_lo = UDC_MODE_LSEND; udc_regs->rsel = UDC_RSEL_SEND; } else { udc_regs->mode_lo = UDC_MODE_LRECV; udc_regs->rsel = UDC_RSEL_RECV; } /* announce location of regs block */ sun3_udc_write(((dvma_vtob(udc_regs) & 0xff0000) >> 8), UDC_CHN_HI); sun3_udc_write((dvma_vtob(udc_regs) & 0xffff), UDC_CHN_LO); /* set dma master on */ sun3_udc_write(0xd, UDC_MODE); /* interrupt enable */ sun3_udc_write(UDC_INT_ENABLE, UDC_CSR); #endif return count; } static int sun3scsi_dma_count(struct NCR5380_hostdata *hostdata, unsigned char *data, int count) { return count; } static inline int sun3scsi_dma_recv_setup(struct NCR5380_hostdata *hostdata, unsigned char *data, int count) { return sun3scsi_dma_setup(hostdata, data, count, 0); } static inline int sun3scsi_dma_send_setup(struct NCR5380_hostdata *hostdata, unsigned char *data, int count) { return sun3scsi_dma_setup(hostdata, data, count, 1); } static int sun3scsi_dma_residual(struct NCR5380_hostdata *hostdata) { return last_residual; } static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, struct scsi_cmnd *cmd) { int wanted_len = NCR5380_to_ncmd(cmd)->this_residual; if (wanted_len < DMA_MIN_SIZE || blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) return 0; return wanted_len; } static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data) { #ifdef SUN3_SCSI_VME unsigned short csr; csr = dregs->csr; dregs->dma_count_hi = (sun3_dma_orig_count >> 16); dregs->dma_count_lo = (sun3_dma_orig_count & 0xffff); dregs->fifo_count_hi = (sun3_dma_orig_count >> 16); dregs->fifo_count = (sun3_dma_orig_count & 0xffff); /* if(!(csr & CSR_DMA_ENABLE)) * dregs->csr |= CSR_DMA_ENABLE; */ #else sun3_udc_write(UDC_CHN_START, UDC_CSR); #endif return 0; } /* clean up after our dma is done */ static int sun3scsi_dma_finish(enum dma_data_direction data_dir) { const bool write_flag = data_dir == DMA_TO_DEVICE; unsigned short __maybe_unused count; unsigned short fifo; int ret = 0; sun3_dma_active = 0; #ifdef SUN3_SCSI_VME dregs->csr &= ~CSR_DMA_ENABLE; fifo = dregs->fifo_count; if (write_flag) { if ((fifo > 0) && (fifo < sun3_dma_orig_count)) fifo++; } last_residual = fifo; /* empty bytes from the fifo which didn't make it */ if ((!write_flag) && (dregs->csr & CSR_LEFT)) { unsigned char *vaddr; vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr); vaddr += (sun3_dma_orig_count - fifo); vaddr--; switch (dregs->csr & CSR_LEFT) { case CSR_LEFT_3: *vaddr = (dregs->bpack_lo & 0xff00) >> 8; vaddr--; fallthrough; case CSR_LEFT_2: *vaddr = (dregs->bpack_hi & 0x00ff); vaddr--; fallthrough; case CSR_LEFT_1: *vaddr = (dregs->bpack_hi & 0xff00) >> 8; break; } } #else // check to empty the fifo on a read if(!write_flag) { int tmo = 20000; /* .2 sec */ while(1) { if(dregs->csr & CSR_FIFO_EMPTY) break; if(--tmo <= 0) { printk("sun3scsi: fifo failed to empty!\n"); return 1; } udelay(10); } } dregs->udc_addr = 0x32; udelay(SUN3_DMA_DELAY); count = 2 * dregs->udc_data; udelay(SUN3_DMA_DELAY); fifo = dregs->fifo_count; last_residual = fifo; /* empty bytes from the fifo which didn't make it */ if((!write_flag) && (count - fifo) == 2) { unsigned short data; unsigned char *vaddr; data = dregs->fifo_data; vaddr = (unsigned char *)dvma_btov(sun3_dma_orig_addr); vaddr += (sun3_dma_orig_count - fifo); vaddr[-2] = (data & 0xff00) >> 8; vaddr[-1] = (data & 0xff); } #endif dvma_unmap(sun3_dma_orig_addr); sun3_dma_orig_addr = NULL; #ifdef SUN3_SCSI_VME dregs->dma_addr_hi = 0; dregs->dma_addr_lo = 0; dregs->dma_count_hi = 0; dregs->dma_count_lo = 0; dregs->fifo_count = 0; dregs->fifo_count_hi = 0; dregs->csr &= ~CSR_SEND; /* dregs->csr |= CSR_DMA_ENABLE; */ #else sun3_udc_write(UDC_RESET, UDC_CSR); dregs->fifo_count = 0; dregs->csr &= ~CSR_SEND; /* reset fifo */ dregs->csr &= ~CSR_FIFO; dregs->csr |= CSR_FIFO; #endif sun3_dma_setup_done = NULL; return ret; } #include "NCR5380.c" #ifdef SUN3_SCSI_VME #define SUN3_SCSI_NAME "Sun3 NCR5380 VME SCSI" #define DRV_MODULE_NAME "sun3_scsi_vme" #else #define SUN3_SCSI_NAME "Sun3 NCR5380 SCSI" #define DRV_MODULE_NAME "sun3_scsi" #endif #define PFX DRV_MODULE_NAME ": " static struct scsi_host_template sun3_scsi_template = { .module = THIS_MODULE, .proc_name = DRV_MODULE_NAME, .name = SUN3_SCSI_NAME, .info = sun3scsi_info, .queuecommand = sun3scsi_queue_command, .eh_abort_handler = sun3scsi_abort, .eh_host_reset_handler = sun3scsi_host_reset, .can_queue = 16, .this_id = 7, .sg_tablesize = 1, .cmd_per_lun = 2, .dma_boundary = PAGE_SIZE - 1, .cmd_size = sizeof(struct NCR5380_cmd), }; static int __init sun3_scsi_probe(struct platform_device *pdev) { struct Scsi_Host *instance; struct NCR5380_hostdata *hostdata; int error; struct resource *irq, *mem; void __iomem *ioaddr; int host_flags = 0; #ifdef SUN3_SCSI_VME int i; #endif if (setup_can_queue > 0) sun3_scsi_template.can_queue = setup_can_queue; if (setup_cmd_per_lun > 0) sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun; if (setup_sg_tablesize > 0) sun3_scsi_template.sg_tablesize = setup_sg_tablesize; if (setup_hostid >= 0) sun3_scsi_template.this_id = setup_hostid & 7; #ifdef SUN3_SCSI_VME ioaddr = NULL; for (i = 0; i < 2; i++) { unsigned char x; irq = platform_get_resource(pdev, IORESOURCE_IRQ, i); mem = platform_get_resource(pdev, IORESOURCE_MEM, i); if (!irq || !mem) break; ioaddr = sun3_ioremap(mem->start, resource_size(mem), SUN3_PAGE_TYPE_VME16); dregs = (struct sun3_dma_regs *)(ioaddr + 8); if (sun3_map_test((unsigned long)dregs, &x)) { unsigned short oldcsr; oldcsr = dregs->csr; dregs->csr = 0; udelay(SUN3_DMA_DELAY); if (dregs->csr == 0x1400) break; dregs->csr = oldcsr; } iounmap(ioaddr); ioaddr = NULL; } if (!ioaddr) return -ENODEV; #else irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!irq || !mem) return -ENODEV; ioaddr = ioremap(mem->start, resource_size(mem)); dregs = (struct sun3_dma_regs *)(ioaddr + 8); udc_regs = dvma_malloc(sizeof(struct sun3_udc_regs)); if (!udc_regs) { pr_err(PFX "couldn't allocate DVMA memory!\n"); iounmap(ioaddr); return -ENOMEM; } #endif instance = scsi_host_alloc(&sun3_scsi_template, sizeof(struct NCR5380_hostdata)); if (!instance) { error = -ENOMEM; goto fail_alloc; } instance->irq = irq->start; hostdata = shost_priv(instance); hostdata->base = mem->start; hostdata->io = ioaddr; error = NCR5380_init(instance, host_flags); if (error) goto fail_init; error = request_irq(instance->irq, scsi_sun3_intr, 0, "NCR5380", instance); if (error) { pr_err(PFX "scsi%d: IRQ %d not free, bailing out\n", instance->host_no, instance->irq); goto fail_irq; } dregs->csr = 0; udelay(SUN3_DMA_DELAY); dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR; udelay(SUN3_DMA_DELAY); dregs->fifo_count = 0; #ifdef SUN3_SCSI_VME dregs->fifo_count_hi = 0; dregs->dma_addr_hi = 0; dregs->dma_addr_lo = 0; dregs->dma_count_hi = 0; dregs->dma_count_lo = 0; dregs->ivect = VME_DATA24 | (instance->irq & 0xff); #endif NCR5380_maybe_reset_bus(instance); error = scsi_add_host(instance, NULL); if (error) goto fail_host; platform_set_drvdata(pdev, instance); scsi_scan_host(instance); return 0; fail_host: free_irq(instance->irq, instance); fail_irq: NCR5380_exit(instance); fail_init: scsi_host_put(instance); fail_alloc: if (udc_regs) dvma_free(udc_regs); iounmap(ioaddr); return error; } static int __exit sun3_scsi_remove(struct platform_device *pdev) { struct Scsi_Host *instance = platform_get_drvdata(pdev); struct NCR5380_hostdata *hostdata = shost_priv(instance); void __iomem *ioaddr = hostdata->io; scsi_remove_host(instance); free_irq(instance->irq, instance); NCR5380_exit(instance); scsi_host_put(instance); if (udc_regs) dvma_free(udc_regs); iounmap(ioaddr); return 0; } static struct platform_driver sun3_scsi_driver = { .remove = __exit_p(sun3_scsi_remove), .driver = { .name = DRV_MODULE_NAME, }, }; module_platform_driver_probe(sun3_scsi_driver, sun3_scsi_probe); MODULE_ALIAS("platform:" DRV_MODULE_NAME); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/sun3_scsi.c
// SPDX-License-Identifier: GPL-2.0 /* * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers * * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <[email protected]> * * Based on the original DAC960 driver, * Copyright 1998-2001 by Leonard N. Zubkoff <[email protected]> * Portions Copyright 2002 by Mylex (An IBM Business Unit) * */ #include <linux/module.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/raid_class.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_tcq.h> #include "myrb.h" static struct raid_template *myrb_raid_template; static void myrb_monitor(struct work_struct *work); static inline void myrb_translate_devstate(void *DeviceState); static inline int myrb_logical_channel(struct Scsi_Host *shost) { return shost->max_channel - 1; } static struct myrb_devstate_name_entry { enum myrb_devstate state; const char *name; } myrb_devstate_name_list[] = { { MYRB_DEVICE_DEAD, "Dead" }, { MYRB_DEVICE_WO, "WriteOnly" }, { MYRB_DEVICE_ONLINE, "Online" }, { MYRB_DEVICE_CRITICAL, "Critical" }, { MYRB_DEVICE_STANDBY, "Standby" }, { MYRB_DEVICE_OFFLINE, "Offline" }, }; static const char *myrb_devstate_name(enum myrb_devstate state) { struct myrb_devstate_name_entry *entry = myrb_devstate_name_list; int i; for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) { if (entry[i].state == state) return entry[i].name; } return "Unknown"; } static struct myrb_raidlevel_name_entry { enum myrb_raidlevel level; const char *name; } myrb_raidlevel_name_list[] = { { MYRB_RAID_LEVEL0, "RAID0" }, { MYRB_RAID_LEVEL1, "RAID1" }, { MYRB_RAID_LEVEL3, "RAID3" }, { MYRB_RAID_LEVEL5, "RAID5" }, { MYRB_RAID_LEVEL6, "RAID6" }, { MYRB_RAID_JBOD, "JBOD" }, }; static const char *myrb_raidlevel_name(enum myrb_raidlevel level) { struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list; int i; for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) { if (entry[i].level == level) return entry[i].name; } return NULL; } /* * myrb_create_mempools - allocates auxiliary data structures * * Return: true on success, false otherwise. */ static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb) { size_t elem_size, elem_align; elem_align = sizeof(struct myrb_sge); elem_size = cb->host->sg_tablesize * elem_align; cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev, elem_size, elem_align, 0); if (cb->sg_pool == NULL) { shost_printk(KERN_ERR, cb->host, "Failed to allocate SG pool\n"); return false; } cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev, sizeof(struct myrb_dcdb), sizeof(unsigned int), 0); if (!cb->dcdb_pool) { dma_pool_destroy(cb->sg_pool); cb->sg_pool = NULL; shost_printk(KERN_ERR, cb->host, "Failed to allocate DCDB pool\n"); return false; } snprintf(cb->work_q_name, sizeof(cb->work_q_name), "myrb_wq_%d", cb->host->host_no); cb->work_q = create_singlethread_workqueue(cb->work_q_name); if (!cb->work_q) { dma_pool_destroy(cb->dcdb_pool); cb->dcdb_pool = NULL; dma_pool_destroy(cb->sg_pool); cb->sg_pool = NULL; shost_printk(KERN_ERR, cb->host, "Failed to create workqueue\n"); return false; } /* * Initialize the Monitoring Timer. */ INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor); queue_delayed_work(cb->work_q, &cb->monitor_work, 1); return true; } /* * myrb_destroy_mempools - tears down the memory pools for the controller */ static void myrb_destroy_mempools(struct myrb_hba *cb) { cancel_delayed_work_sync(&cb->monitor_work); destroy_workqueue(cb->work_q); dma_pool_destroy(cb->sg_pool); dma_pool_destroy(cb->dcdb_pool); } /* * myrb_reset_cmd - reset command block */ static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk) { union myrb_cmd_mbox *mbox = &cmd_blk->mbox; memset(mbox, 0, sizeof(union myrb_cmd_mbox)); cmd_blk->status = 0; } /* * myrb_qcmd - queues command block for execution */ static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) { void __iomem *base = cb->io_base; union myrb_cmd_mbox *mbox = &cmd_blk->mbox; union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox; cb->write_cmd_mbox(next_mbox, mbox); if (cb->prev_cmd_mbox1->words[0] == 0 || cb->prev_cmd_mbox2->words[0] == 0) cb->get_cmd_mbox(base); cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1; cb->prev_cmd_mbox1 = next_mbox; if (++next_mbox > cb->last_cmd_mbox) next_mbox = cb->first_cmd_mbox; cb->next_cmd_mbox = next_mbox; } /* * myrb_exec_cmd - executes command block and waits for completion. * * Return: command status */ static unsigned short myrb_exec_cmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) { DECLARE_COMPLETION_ONSTACK(cmpl); unsigned long flags; cmd_blk->completion = &cmpl; spin_lock_irqsave(&cb->queue_lock, flags); cb->qcmd(cb, cmd_blk); spin_unlock_irqrestore(&cb->queue_lock, flags); wait_for_completion(&cmpl); return cmd_blk->status; } /* * myrb_exec_type3 - executes a type 3 command and waits for completion. * * Return: command status */ static unsigned short myrb_exec_type3(struct myrb_hba *cb, enum myrb_cmd_opcode op, dma_addr_t addr) { struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk; union myrb_cmd_mbox *mbox = &cmd_blk->mbox; unsigned short status; mutex_lock(&cb->dcmd_mutex); myrb_reset_cmd(cmd_blk); mbox->type3.id = MYRB_DCMD_TAG; mbox->type3.opcode = op; mbox->type3.addr = addr; status = myrb_exec_cmd(cb, cmd_blk); mutex_unlock(&cb->dcmd_mutex); return status; } /* * myrb_exec_type3D - executes a type 3D command and waits for completion. * * Return: command status */ static unsigned short myrb_exec_type3D(struct myrb_hba *cb, enum myrb_cmd_opcode op, struct scsi_device *sdev, struct myrb_pdev_state *pdev_info) { struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk; union myrb_cmd_mbox *mbox = &cmd_blk->mbox; unsigned short status; dma_addr_t pdev_info_addr; pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info, sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE); if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr)) return MYRB_STATUS_SUBSYS_FAILED; mutex_lock(&cb->dcmd_mutex); myrb_reset_cmd(cmd_blk); mbox->type3D.id = MYRB_DCMD_TAG; mbox->type3D.opcode = op; mbox->type3D.channel = sdev->channel; mbox->type3D.target = sdev->id; mbox->type3D.addr = pdev_info_addr; status = myrb_exec_cmd(cb, cmd_blk); mutex_unlock(&cb->dcmd_mutex); dma_unmap_single(&cb->pdev->dev, pdev_info_addr, sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE); if (status == MYRB_STATUS_SUCCESS && mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD) myrb_translate_devstate(pdev_info); return status; } static char *myrb_event_msg[] = { "killed because write recovery failed", "killed because of SCSI bus reset failure", "killed because of double check condition", "killed because it was removed", "killed because of gross error on SCSI chip", "killed because of bad tag returned from drive", "killed because of timeout on SCSI command", "killed because of reset SCSI command issued from system", "killed because busy or parity error count exceeded limit", "killed because of 'kill drive' command from system", "killed because of selection timeout", "killed due to SCSI phase sequence error", "killed due to unknown status", }; /** * myrb_get_event - get event log from HBA * @cb: pointer to the hba structure * @event: number of the event * * Execute a type 3E command and logs the event message */ static void myrb_get_event(struct myrb_hba *cb, unsigned int event) { struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; union myrb_cmd_mbox *mbox = &cmd_blk->mbox; struct myrb_log_entry *ev_buf; dma_addr_t ev_addr; unsigned short status; ev_buf = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry), &ev_addr, GFP_KERNEL); if (!ev_buf) return; myrb_reset_cmd(cmd_blk); mbox->type3E.id = MYRB_MCMD_TAG; mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION; mbox->type3E.optype = DAC960_V1_GetEventLogEntry; mbox->type3E.opqual = 1; mbox->type3E.ev_seq = event; mbox->type3E.addr = ev_addr; status = myrb_exec_cmd(cb, cmd_blk); if (status != MYRB_STATUS_SUCCESS) shost_printk(KERN_INFO, cb->host, "Failed to get event log %d, status %04x\n", event, status); else if (ev_buf->seq_num == event) { struct scsi_sense_hdr sshdr; memset(&sshdr, 0, sizeof(sshdr)); scsi_normalize_sense(ev_buf->sense, 32, &sshdr); if (sshdr.sense_key == VENDOR_SPECIFIC && sshdr.asc == 0x80 && sshdr.ascq < ARRAY_SIZE(myrb_event_msg)) shost_printk(KERN_CRIT, cb->host, "Physical drive %d:%d: %s\n", ev_buf->channel, ev_buf->target, myrb_event_msg[sshdr.ascq]); else shost_printk(KERN_CRIT, cb->host, "Physical drive %d:%d: Sense: %X/%02X/%02X\n", ev_buf->channel, ev_buf->target, sshdr.sense_key, sshdr.asc, sshdr.ascq); } dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry), ev_buf, ev_addr); } /* * myrb_get_errtable - retrieves the error table from the controller * * Executes a type 3 command and logs the error table from the controller. */ static void myrb_get_errtable(struct myrb_hba *cb) { struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; union myrb_cmd_mbox *mbox = &cmd_blk->mbox; unsigned short status; struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS]; memcpy(&old_table, cb->err_table, sizeof(old_table)); myrb_reset_cmd(cmd_blk); mbox->type3.id = MYRB_MCMD_TAG; mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE; mbox->type3.addr = cb->err_table_addr; status = myrb_exec_cmd(cb, cmd_blk); if (status == MYRB_STATUS_SUCCESS) { struct myrb_error_entry *table = cb->err_table; struct myrb_error_entry *new, *old; size_t err_table_offset; struct scsi_device *sdev; shost_for_each_device(sdev, cb->host) { if (sdev->channel >= myrb_logical_channel(cb->host)) continue; err_table_offset = sdev->channel * MYRB_MAX_TARGETS + sdev->id; new = table + err_table_offset; old = &old_table[err_table_offset]; if (new->parity_err == old->parity_err && new->soft_err == old->soft_err && new->hard_err == old->hard_err && new->misc_err == old->misc_err) continue; sdev_printk(KERN_CRIT, sdev, "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n", new->parity_err, new->soft_err, new->hard_err, new->misc_err); } } } /* * myrb_get_ldev_info - retrieves the logical device table from the controller * * Executes a type 3 command and updates the logical device table. * * Return: command status */ static unsigned short myrb_get_ldev_info(struct myrb_hba *cb) { unsigned short status; int ldev_num, ldev_cnt = cb->enquiry->ldev_count; struct Scsi_Host *shost = cb->host; status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO, cb->ldev_info_addr); if (status != MYRB_STATUS_SUCCESS) return status; for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) { struct myrb_ldev_info *old = NULL; struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num; struct scsi_device *sdev; sdev = scsi_device_lookup(shost, myrb_logical_channel(shost), ldev_num, 0); if (!sdev) { if (new->state == MYRB_DEVICE_OFFLINE) continue; shost_printk(KERN_INFO, shost, "Adding Logical Drive %d in state %s\n", ldev_num, myrb_devstate_name(new->state)); scsi_add_device(shost, myrb_logical_channel(shost), ldev_num, 0); continue; } old = sdev->hostdata; if (new->state != old->state) shost_printk(KERN_INFO, shost, "Logical Drive %d is now %s\n", ldev_num, myrb_devstate_name(new->state)); if (new->wb_enabled != old->wb_enabled) sdev_printk(KERN_INFO, sdev, "Logical Drive is now WRITE %s\n", (new->wb_enabled ? "BACK" : "THRU")); memcpy(old, new, sizeof(*new)); scsi_device_put(sdev); } return status; } /* * myrb_get_rbld_progress - get rebuild progress information * * Executes a type 3 command and returns the rebuild progress * information. * * Return: command status */ static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb, struct myrb_rbld_progress *rbld) { struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; union myrb_cmd_mbox *mbox = &cmd_blk->mbox; struct myrb_rbld_progress *rbld_buf; dma_addr_t rbld_addr; unsigned short status; rbld_buf = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress), &rbld_addr, GFP_KERNEL); if (!rbld_buf) return MYRB_STATUS_RBLD_NOT_CHECKED; myrb_reset_cmd(cmd_blk); mbox->type3.id = MYRB_MCMD_TAG; mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS; mbox->type3.addr = rbld_addr; status = myrb_exec_cmd(cb, cmd_blk); if (rbld) memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress)); dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress), rbld_buf, rbld_addr); return status; } /* * myrb_update_rbld_progress - updates the rebuild status * * Updates the rebuild status for the attached logical devices. */ static void myrb_update_rbld_progress(struct myrb_hba *cb) { struct myrb_rbld_progress rbld_buf; unsigned short status; status = myrb_get_rbld_progress(cb, &rbld_buf); if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS && cb->last_rbld_status == MYRB_STATUS_SUCCESS) status = MYRB_STATUS_RBLD_SUCCESS; if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) { unsigned int blocks_done = rbld_buf.ldev_size - rbld_buf.blocks_left; struct scsi_device *sdev; sdev = scsi_device_lookup(cb->host, myrb_logical_channel(cb->host), rbld_buf.ldev_num, 0); if (!sdev) return; switch (status) { case MYRB_STATUS_SUCCESS: sdev_printk(KERN_INFO, sdev, "Rebuild in Progress, %d%% completed\n", (100 * (blocks_done >> 7)) / (rbld_buf.ldev_size >> 7)); break; case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE: sdev_printk(KERN_INFO, sdev, "Rebuild Failed due to Logical Drive Failure\n"); break; case MYRB_STATUS_RBLD_FAILED_BADBLOCKS: sdev_printk(KERN_INFO, sdev, "Rebuild Failed due to Bad Blocks on Other Drives\n"); break; case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED: sdev_printk(KERN_INFO, sdev, "Rebuild Failed due to Failure of Drive Being Rebuilt\n"); break; case MYRB_STATUS_RBLD_SUCCESS: sdev_printk(KERN_INFO, sdev, "Rebuild Completed Successfully\n"); break; case MYRB_STATUS_RBLD_SUCCESS_TERMINATED: sdev_printk(KERN_INFO, sdev, "Rebuild Successfully Terminated\n"); break; default: break; } scsi_device_put(sdev); } cb->last_rbld_status = status; } /* * myrb_get_cc_progress - retrieve the rebuild status * * Execute a type 3 Command and fetch the rebuild / consistency check * status. */ static void myrb_get_cc_progress(struct myrb_hba *cb) { struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; union myrb_cmd_mbox *mbox = &cmd_blk->mbox; struct myrb_rbld_progress *rbld_buf; dma_addr_t rbld_addr; unsigned short status; rbld_buf = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress), &rbld_addr, GFP_KERNEL); if (!rbld_buf) { cb->need_cc_status = true; return; } myrb_reset_cmd(cmd_blk); mbox->type3.id = MYRB_MCMD_TAG; mbox->type3.opcode = MYRB_CMD_REBUILD_STAT; mbox->type3.addr = rbld_addr; status = myrb_exec_cmd(cb, cmd_blk); if (status == MYRB_STATUS_SUCCESS) { unsigned int ldev_num = rbld_buf->ldev_num; unsigned int ldev_size = rbld_buf->ldev_size; unsigned int blocks_done = ldev_size - rbld_buf->blocks_left; struct scsi_device *sdev; sdev = scsi_device_lookup(cb->host, myrb_logical_channel(cb->host), ldev_num, 0); if (sdev) { sdev_printk(KERN_INFO, sdev, "Consistency Check in Progress: %d%% completed\n", (100 * (blocks_done >> 7)) / (ldev_size >> 7)); scsi_device_put(sdev); } } dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress), rbld_buf, rbld_addr); } /* * myrb_bgi_control - updates background initialisation status * * Executes a type 3B command and updates the background initialisation status */ static void myrb_bgi_control(struct myrb_hba *cb) { struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; union myrb_cmd_mbox *mbox = &cmd_blk->mbox; struct myrb_bgi_status *bgi, *last_bgi; dma_addr_t bgi_addr; struct scsi_device *sdev = NULL; unsigned short status; bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status), &bgi_addr, GFP_KERNEL); if (!bgi) { shost_printk(KERN_ERR, cb->host, "Failed to allocate bgi memory\n"); return; } myrb_reset_cmd(cmd_blk); mbox->type3B.id = MYRB_DCMD_TAG; mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL; mbox->type3B.optype = 0x20; mbox->type3B.addr = bgi_addr; status = myrb_exec_cmd(cb, cmd_blk); last_bgi = &cb->bgi_status; sdev = scsi_device_lookup(cb->host, myrb_logical_channel(cb->host), bgi->ldev_num, 0); switch (status) { case MYRB_STATUS_SUCCESS: switch (bgi->status) { case MYRB_BGI_INVALID: break; case MYRB_BGI_STARTED: if (!sdev) break; sdev_printk(KERN_INFO, sdev, "Background Initialization Started\n"); break; case MYRB_BGI_INPROGRESS: if (!sdev) break; if (bgi->blocks_done == last_bgi->blocks_done && bgi->ldev_num == last_bgi->ldev_num) break; sdev_printk(KERN_INFO, sdev, "Background Initialization in Progress: %d%% completed\n", (100 * (bgi->blocks_done >> 7)) / (bgi->ldev_size >> 7)); break; case MYRB_BGI_SUSPENDED: if (!sdev) break; sdev_printk(KERN_INFO, sdev, "Background Initialization Suspended\n"); break; case MYRB_BGI_CANCELLED: if (!sdev) break; sdev_printk(KERN_INFO, sdev, "Background Initialization Cancelled\n"); break; } memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status)); break; case MYRB_STATUS_BGI_SUCCESS: if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS) sdev_printk(KERN_INFO, sdev, "Background Initialization Completed Successfully\n"); cb->bgi_status.status = MYRB_BGI_INVALID; break; case MYRB_STATUS_BGI_ABORTED: if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS) sdev_printk(KERN_INFO, sdev, "Background Initialization Aborted\n"); fallthrough; case MYRB_STATUS_NO_BGI_INPROGRESS: cb->bgi_status.status = MYRB_BGI_INVALID; break; } if (sdev) scsi_device_put(sdev); dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status), bgi, bgi_addr); } /* * myrb_hba_enquiry - updates the controller status * * Executes a DAC_V1_Enquiry command and updates the controller status. * * Return: command status */ static unsigned short myrb_hba_enquiry(struct myrb_hba *cb) { struct myrb_enquiry old, *new; unsigned short status; memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry)); status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr); if (status != MYRB_STATUS_SUCCESS) return status; new = cb->enquiry; if (new->ldev_count > old.ldev_count) { int ldev_num = old.ldev_count - 1; while (++ldev_num < new->ldev_count) shost_printk(KERN_CRIT, cb->host, "Logical Drive %d Now Exists\n", ldev_num); } if (new->ldev_count < old.ldev_count) { int ldev_num = new->ldev_count - 1; while (++ldev_num < old.ldev_count) shost_printk(KERN_CRIT, cb->host, "Logical Drive %d No Longer Exists\n", ldev_num); } if (new->status.deferred != old.status.deferred) shost_printk(KERN_CRIT, cb->host, "Deferred Write Error Flag is now %s\n", (new->status.deferred ? "TRUE" : "FALSE")); if (new->ev_seq != old.ev_seq) { cb->new_ev_seq = new->ev_seq; cb->need_err_info = true; shost_printk(KERN_INFO, cb->host, "Event log %d/%d (%d/%d) available\n", cb->old_ev_seq, cb->new_ev_seq, old.ev_seq, new->ev_seq); } if ((new->ldev_critical > 0 && new->ldev_critical != old.ldev_critical) || (new->ldev_offline > 0 && new->ldev_offline != old.ldev_offline) || (new->ldev_count != old.ldev_count)) { shost_printk(KERN_INFO, cb->host, "Logical drive count changed (%d/%d/%d)\n", new->ldev_critical, new->ldev_offline, new->ldev_count); cb->need_ldev_info = true; } if (new->pdev_dead > 0 || new->pdev_dead != old.pdev_dead || time_after_eq(jiffies, cb->secondary_monitor_time + MYRB_SECONDARY_MONITOR_INTERVAL)) { cb->need_bgi_status = cb->bgi_status_supported; cb->secondary_monitor_time = jiffies; } if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS || new->rbld == MYRB_BG_RBLD_IN_PROGRESS || old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS || old.rbld == MYRB_BG_RBLD_IN_PROGRESS) { cb->need_rbld = true; cb->rbld_first = (new->ldev_critical < old.ldev_critical); } if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS) switch (new->rbld) { case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS: shost_printk(KERN_INFO, cb->host, "Consistency Check Completed Successfully\n"); break; case MYRB_STDBY_RBLD_IN_PROGRESS: case MYRB_BG_RBLD_IN_PROGRESS: break; case MYRB_BG_CHECK_IN_PROGRESS: cb->need_cc_status = true; break; case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR: shost_printk(KERN_INFO, cb->host, "Consistency Check Completed with Error\n"); break; case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED: shost_printk(KERN_INFO, cb->host, "Consistency Check Failed - Physical Device Failed\n"); break; case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED: shost_printk(KERN_INFO, cb->host, "Consistency Check Failed - Logical Drive Failed\n"); break; case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER: shost_printk(KERN_INFO, cb->host, "Consistency Check Failed - Other Causes\n"); break; case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED: shost_printk(KERN_INFO, cb->host, "Consistency Check Successfully Terminated\n"); break; } else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS) cb->need_cc_status = true; return MYRB_STATUS_SUCCESS; } /* * myrb_set_pdev_state - sets the device state for a physical device * * Return: command status */ static unsigned short myrb_set_pdev_state(struct myrb_hba *cb, struct scsi_device *sdev, enum myrb_devstate state) { struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk; union myrb_cmd_mbox *mbox = &cmd_blk->mbox; unsigned short status; mutex_lock(&cb->dcmd_mutex); mbox->type3D.opcode = MYRB_CMD_START_DEVICE; mbox->type3D.id = MYRB_DCMD_TAG; mbox->type3D.channel = sdev->channel; mbox->type3D.target = sdev->id; mbox->type3D.state = state & 0x1F; status = myrb_exec_cmd(cb, cmd_blk); mutex_unlock(&cb->dcmd_mutex); return status; } /* * myrb_enable_mmio - enables the Memory Mailbox Interface * * PD and P controller types have no memory mailbox, but still need the * other dma mapped memory. * * Return: true on success, false otherwise. */ static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn) { void __iomem *base = cb->io_base; struct pci_dev *pdev = cb->pdev; size_t err_table_size; size_t ldev_info_size; union myrb_cmd_mbox *cmd_mbox_mem; struct myrb_stat_mbox *stat_mbox_mem; union myrb_cmd_mbox mbox; unsigned short status; memset(&mbox, 0, sizeof(union myrb_cmd_mbox)); if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { dev_err(&pdev->dev, "DMA mask out of range\n"); return false; } cb->enquiry = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry), &cb->enquiry_addr, GFP_KERNEL); if (!cb->enquiry) return false; err_table_size = sizeof(struct myrb_error_entry) * MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS; cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size, &cb->err_table_addr, GFP_KERNEL); if (!cb->err_table) return false; ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS; cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size, &cb->ldev_info_addr, GFP_KERNEL); if (!cb->ldev_info_buf) return false; /* * Skip mailbox initialisation for PD and P Controllers */ if (!mmio_init_fn) return true; /* These are the base addresses for the command memory mailbox array */ cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox); cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev, cb->cmd_mbox_size, &cb->cmd_mbox_addr, GFP_KERNEL); if (!cb->first_cmd_mbox) return false; cmd_mbox_mem = cb->first_cmd_mbox; cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1; cb->last_cmd_mbox = cmd_mbox_mem; cb->next_cmd_mbox = cb->first_cmd_mbox; cb->prev_cmd_mbox1 = cb->last_cmd_mbox; cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1; /* These are the base addresses for the status memory mailbox array */ cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT * sizeof(struct myrb_stat_mbox); cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev, cb->stat_mbox_size, &cb->stat_mbox_addr, GFP_KERNEL); if (!cb->first_stat_mbox) return false; stat_mbox_mem = cb->first_stat_mbox; stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1; cb->last_stat_mbox = stat_mbox_mem; cb->next_stat_mbox = cb->first_stat_mbox; /* Enable the Memory Mailbox Interface. */ cb->dual_mode_interface = true; mbox.typeX.opcode = 0x2B; mbox.typeX.id = 0; mbox.typeX.opcode2 = 0x14; mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr; mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr; status = mmio_init_fn(pdev, base, &mbox); if (status != MYRB_STATUS_SUCCESS) { cb->dual_mode_interface = false; mbox.typeX.opcode2 = 0x10; status = mmio_init_fn(pdev, base, &mbox); if (status != MYRB_STATUS_SUCCESS) { dev_err(&pdev->dev, "Failed to enable mailbox, statux %02X\n", status); return false; } } return true; } /* * myrb_get_hba_config - reads the configuration information * * Reads the configuration information from the controller and * initializes the controller structure. * * Return: 0 on success, errno otherwise */ static int myrb_get_hba_config(struct myrb_hba *cb) { struct myrb_enquiry2 *enquiry2; dma_addr_t enquiry2_addr; struct myrb_config2 *config2; dma_addr_t config2_addr; struct Scsi_Host *shost = cb->host; struct pci_dev *pdev = cb->pdev; int pchan_max = 0, pchan_cur = 0; unsigned short status; int ret = -ENODEV, memsize = 0; enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2), &enquiry2_addr, GFP_KERNEL); if (!enquiry2) { shost_printk(KERN_ERR, cb->host, "Failed to allocate V1 enquiry2 memory\n"); return -ENOMEM; } config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2), &config2_addr, GFP_KERNEL); if (!config2) { shost_printk(KERN_ERR, cb->host, "Failed to allocate V1 config2 memory\n"); dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2), enquiry2, enquiry2_addr); return -ENOMEM; } mutex_lock(&cb->dma_mutex); status = myrb_hba_enquiry(cb); mutex_unlock(&cb->dma_mutex); if (status != MYRB_STATUS_SUCCESS) { shost_printk(KERN_WARNING, cb->host, "Failed it issue V1 Enquiry\n"); goto out_free; } status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr); if (status != MYRB_STATUS_SUCCESS) { shost_printk(KERN_WARNING, cb->host, "Failed to issue V1 Enquiry2\n"); goto out_free; } status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr); if (status != MYRB_STATUS_SUCCESS) { shost_printk(KERN_WARNING, cb->host, "Failed to issue ReadConfig2\n"); goto out_free; } status = myrb_get_ldev_info(cb); if (status != MYRB_STATUS_SUCCESS) { shost_printk(KERN_WARNING, cb->host, "Failed to get logical drive information\n"); goto out_free; } /* * Initialize the Controller Model Name and Full Model Name fields. */ switch (enquiry2->hw.sub_model) { case DAC960_V1_P_PD_PU: if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA) strcpy(cb->model_name, "DAC960PU"); else strcpy(cb->model_name, "DAC960PD"); break; case DAC960_V1_PL: strcpy(cb->model_name, "DAC960PL"); break; case DAC960_V1_PG: strcpy(cb->model_name, "DAC960PG"); break; case DAC960_V1_PJ: strcpy(cb->model_name, "DAC960PJ"); break; case DAC960_V1_PR: strcpy(cb->model_name, "DAC960PR"); break; case DAC960_V1_PT: strcpy(cb->model_name, "DAC960PT"); break; case DAC960_V1_PTL0: strcpy(cb->model_name, "DAC960PTL0"); break; case DAC960_V1_PRL: strcpy(cb->model_name, "DAC960PRL"); break; case DAC960_V1_PTL1: strcpy(cb->model_name, "DAC960PTL1"); break; case DAC960_V1_1164P: strcpy(cb->model_name, "eXtremeRAID 1100"); break; default: shost_printk(KERN_WARNING, cb->host, "Unknown Model %X\n", enquiry2->hw.sub_model); goto out; } /* * Initialize the Controller Firmware Version field and verify that it * is a supported firmware version. * The supported firmware versions are: * * DAC1164P 5.06 and above * DAC960PTL/PRL/PJ/PG 4.06 and above * DAC960PU/PD/PL 3.51 and above * DAC960PU/PD/PL/P 2.73 and above */ #if defined(CONFIG_ALPHA) /* * DEC Alpha machines were often equipped with DAC960 cards that were * OEMed from Mylex, and had their own custom firmware. Version 2.70, * the last custom FW revision to be released by DEC for these older * controllers, appears to work quite well with this driver. * * Cards tested successfully were several versions each of the PD and * PU, called by DEC the KZPSC and KZPAC, respectively, and having * the Manufacturer Numbers (from Mylex), usually on a sticker on the * back of the board, of: * * KZPSC: D040347 (1-channel) or D040348 (2-channel) * or D040349 (3-channel) * KZPAC: D040395 (1-channel) or D040396 (2-channel) * or D040397 (3-channel) */ # define FIRMWARE_27X "2.70" #else # define FIRMWARE_27X "2.73" #endif if (enquiry2->fw.major_version == 0) { enquiry2->fw.major_version = cb->enquiry->fw_major_version; enquiry2->fw.minor_version = cb->enquiry->fw_minor_version; enquiry2->fw.firmware_type = '0'; enquiry2->fw.turn_id = 0; } snprintf(cb->fw_version, sizeof(cb->fw_version), "%u.%02u-%c-%02u", enquiry2->fw.major_version, enquiry2->fw.minor_version, enquiry2->fw.firmware_type, enquiry2->fw.turn_id); if (!((enquiry2->fw.major_version == 5 && enquiry2->fw.minor_version >= 6) || (enquiry2->fw.major_version == 4 && enquiry2->fw.minor_version >= 6) || (enquiry2->fw.major_version == 3 && enquiry2->fw.minor_version >= 51) || (enquiry2->fw.major_version == 2 && strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) { shost_printk(KERN_WARNING, cb->host, "Firmware Version '%s' unsupported\n", cb->fw_version); goto out; } /* * Initialize the Channels, Targets, Memory Size, and SAF-TE * Enclosure Management Enabled fields. */ switch (enquiry2->hw.model) { case MYRB_5_CHANNEL_BOARD: pchan_max = 5; break; case MYRB_3_CHANNEL_BOARD: case MYRB_3_CHANNEL_ASIC_DAC: pchan_max = 3; break; case MYRB_2_CHANNEL_BOARD: pchan_max = 2; break; default: pchan_max = enquiry2->cfg_chan; break; } pchan_cur = enquiry2->cur_chan; if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT) cb->bus_width = 32; else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT) cb->bus_width = 16; else cb->bus_width = 8; cb->ldev_block_size = enquiry2->ldev_block_size; shost->max_channel = pchan_cur; shost->max_id = enquiry2->max_targets; memsize = enquiry2->mem_size >> 20; cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE); /* * Initialize the Controller Queue Depth, Driver Queue Depth, * Logical Drive Count, Maximum Blocks per Command, Controller * Scatter/Gather Limit, and Driver Scatter/Gather Limit. * The Driver Queue Depth must be at most one less than the * Controller Queue Depth to allow for an automatic drive * rebuild operation. */ shost->can_queue = cb->enquiry->max_tcq; if (shost->can_queue < 3) shost->can_queue = enquiry2->max_cmds; if (shost->can_queue < 3) /* Play safe and disable TCQ */ shost->can_queue = 1; if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2) shost->can_queue = MYRB_CMD_MBOX_COUNT - 2; shost->max_sectors = enquiry2->max_sectors; shost->sg_tablesize = enquiry2->max_sge; if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT) shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT; /* * Initialize the Stripe Size, Segment Size, and Geometry Translation. */ cb->stripe_size = config2->blocks_per_stripe * config2->block_factor >> (10 - MYRB_BLKSIZE_BITS); cb->segment_size = config2->blocks_per_cacheline * config2->block_factor >> (10 - MYRB_BLKSIZE_BITS); /* Assume 255/63 translation */ cb->ldev_geom_heads = 255; cb->ldev_geom_sectors = 63; if (config2->drive_geometry) { cb->ldev_geom_heads = 128; cb->ldev_geom_sectors = 32; } /* * Initialize the Background Initialization Status. */ if ((cb->fw_version[0] == '4' && strcmp(cb->fw_version, "4.08") >= 0) || (cb->fw_version[0] == '5' && strcmp(cb->fw_version, "5.08") >= 0)) { cb->bgi_status_supported = true; myrb_bgi_control(cb); } cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS; ret = 0; out: shost_printk(KERN_INFO, cb->host, "Configuring %s PCI RAID Controller\n", cb->model_name); shost_printk(KERN_INFO, cb->host, " Firmware Version: %s, Memory Size: %dMB\n", cb->fw_version, memsize); if (cb->io_addr == 0) shost_printk(KERN_INFO, cb->host, " I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n", (unsigned long)cb->pci_addr, cb->irq); else shost_printk(KERN_INFO, cb->host, " I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n", (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr, cb->irq); shost_printk(KERN_INFO, cb->host, " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n", cb->host->can_queue, cb->host->max_sectors); shost_printk(KERN_INFO, cb->host, " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n", cb->host->can_queue, cb->host->sg_tablesize, MYRB_SCATTER_GATHER_LIMIT); shost_printk(KERN_INFO, cb->host, " Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n", cb->stripe_size, cb->segment_size, cb->ldev_geom_heads, cb->ldev_geom_sectors, cb->safte_enabled ? " SAF-TE Enclosure Management Enabled" : ""); shost_printk(KERN_INFO, cb->host, " Physical: %d/%d channels %d/%d/%d devices\n", pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead, cb->host->max_id); shost_printk(KERN_INFO, cb->host, " Logical: 1/1 channels, %d/%d disks\n", cb->enquiry->ldev_count, MYRB_MAX_LDEVS); out_free: dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2), enquiry2, enquiry2_addr); dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2), config2, config2_addr); return ret; } /* * myrb_unmap - unmaps controller structures */ static void myrb_unmap(struct myrb_hba *cb) { if (cb->ldev_info_buf) { size_t ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS; dma_free_coherent(&cb->pdev->dev, ldev_info_size, cb->ldev_info_buf, cb->ldev_info_addr); cb->ldev_info_buf = NULL; } if (cb->err_table) { size_t err_table_size = sizeof(struct myrb_error_entry) * MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS; dma_free_coherent(&cb->pdev->dev, err_table_size, cb->err_table, cb->err_table_addr); cb->err_table = NULL; } if (cb->enquiry) { dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry), cb->enquiry, cb->enquiry_addr); cb->enquiry = NULL; } if (cb->first_stat_mbox) { dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size, cb->first_stat_mbox, cb->stat_mbox_addr); cb->first_stat_mbox = NULL; } if (cb->first_cmd_mbox) { dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size, cb->first_cmd_mbox, cb->cmd_mbox_addr); cb->first_cmd_mbox = NULL; } } /* * myrb_cleanup - cleanup controller structures */ static void myrb_cleanup(struct myrb_hba *cb) { struct pci_dev *pdev = cb->pdev; /* Free the memory mailbox, status, and related structures */ myrb_unmap(cb); if (cb->mmio_base) { if (cb->disable_intr) cb->disable_intr(cb->io_base); iounmap(cb->mmio_base); } if (cb->irq) free_irq(cb->irq, cb); if (cb->io_addr) release_region(cb->io_addr, 0x80); pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); scsi_host_put(cb->host); } static int myrb_host_reset(struct scsi_cmnd *scmd) { struct Scsi_Host *shost = scmd->device->host; struct myrb_hba *cb = shost_priv(shost); cb->reset(cb->io_base); return SUCCESS; } static int myrb_pthru_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmd) { struct request *rq = scsi_cmd_to_rq(scmd); struct myrb_hba *cb = shost_priv(shost); struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd); union myrb_cmd_mbox *mbox = &cmd_blk->mbox; struct myrb_dcdb *dcdb; dma_addr_t dcdb_addr; struct scsi_device *sdev = scmd->device; struct scatterlist *sgl; unsigned long flags; int nsge; myrb_reset_cmd(cmd_blk); dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr); if (!dcdb) return SCSI_MLQUEUE_HOST_BUSY; nsge = scsi_dma_map(scmd); if (nsge > 1) { dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr); scmd->result = (DID_ERROR << 16); scsi_done(scmd); return 0; } mbox->type3.opcode = MYRB_CMD_DCDB; mbox->type3.id = rq->tag + 3; mbox->type3.addr = dcdb_addr; dcdb->channel = sdev->channel; dcdb->target = sdev->id; switch (scmd->sc_data_direction) { case DMA_NONE: dcdb->data_xfer = MYRB_DCDB_XFER_NONE; break; case DMA_TO_DEVICE: dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE; break; case DMA_FROM_DEVICE: dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM; break; default: dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL; break; } dcdb->early_status = false; if (rq->timeout <= 10) dcdb->timeout = MYRB_DCDB_TMO_10_SECS; else if (rq->timeout <= 60) dcdb->timeout = MYRB_DCDB_TMO_60_SECS; else if (rq->timeout <= 600) dcdb->timeout = MYRB_DCDB_TMO_10_MINS; else dcdb->timeout = MYRB_DCDB_TMO_24_HRS; dcdb->no_autosense = false; dcdb->allow_disconnect = true; sgl = scsi_sglist(scmd); dcdb->dma_addr = sg_dma_address(sgl); if (sg_dma_len(sgl) > USHRT_MAX) { dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff; dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16; } else { dcdb->xfer_len_lo = sg_dma_len(sgl); dcdb->xfer_len_hi4 = 0; } dcdb->cdb_len = scmd->cmd_len; dcdb->sense_len = sizeof(dcdb->sense); memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len); spin_lock_irqsave(&cb->queue_lock, flags); cb->qcmd(cb, cmd_blk); spin_unlock_irqrestore(&cb->queue_lock, flags); return 0; } static void myrb_inquiry(struct myrb_hba *cb, struct scsi_cmnd *scmd) { unsigned char inq[36] = { 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00, 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, }; if (cb->bus_width > 16) inq[7] |= 1 << 6; if (cb->bus_width > 8) inq[7] |= 1 << 5; memcpy(&inq[16], cb->model_name, 16); memcpy(&inq[32], cb->fw_version, 1); memcpy(&inq[33], &cb->fw_version[2], 2); memcpy(&inq[35], &cb->fw_version[7], 1); scsi_sg_copy_from_buffer(scmd, (void *)inq, 36); } static void myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd, struct myrb_ldev_info *ldev_info) { unsigned char modes[32], *mode_pg; bool dbd; size_t mode_len; dbd = (scmd->cmnd[1] & 0x08) == 0x08; if (dbd) { mode_len = 24; mode_pg = &modes[4]; } else { mode_len = 32; mode_pg = &modes[12]; } memset(modes, 0, sizeof(modes)); modes[0] = mode_len - 1; if (!dbd) { unsigned char *block_desc = &modes[4]; modes[3] = 8; put_unaligned_be32(ldev_info->size, &block_desc[0]); put_unaligned_be32(cb->ldev_block_size, &block_desc[5]); } mode_pg[0] = 0x08; mode_pg[1] = 0x12; if (ldev_info->wb_enabled) mode_pg[2] |= 0x04; if (cb->segment_size) { mode_pg[2] |= 0x08; put_unaligned_be16(cb->segment_size, &mode_pg[14]); } scsi_sg_copy_from_buffer(scmd, modes, mode_len); } static void myrb_request_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd) { scsi_build_sense(scmd, 0, NO_SENSE, 0, 0); scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); } static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd, struct myrb_ldev_info *ldev_info) { unsigned char data[8]; dev_dbg(&scmd->device->sdev_gendev, "Capacity %u, blocksize %u\n", ldev_info->size, cb->ldev_block_size); put_unaligned_be32(ldev_info->size - 1, &data[0]); put_unaligned_be32(cb->ldev_block_size, &data[4]); scsi_sg_copy_from_buffer(scmd, data, 8); } static int myrb_ldev_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmd) { struct myrb_hba *cb = shost_priv(shost); struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd); union myrb_cmd_mbox *mbox = &cmd_blk->mbox; struct myrb_ldev_info *ldev_info; struct scsi_device *sdev = scmd->device; struct scatterlist *sgl; unsigned long flags; u64 lba; u32 block_cnt; int nsge; ldev_info = sdev->hostdata; if (ldev_info->state != MYRB_DEVICE_ONLINE && ldev_info->state != MYRB_DEVICE_WO) { dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n", sdev->id, ldev_info ? ldev_info->state : 0xff); scmd->result = (DID_BAD_TARGET << 16); scsi_done(scmd); return 0; } switch (scmd->cmnd[0]) { case TEST_UNIT_READY: scmd->result = (DID_OK << 16); scsi_done(scmd); return 0; case INQUIRY: if (scmd->cmnd[1] & 1) { /* Illegal request, invalid field in CDB */ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0); } else { myrb_inquiry(cb, scmd); scmd->result = (DID_OK << 16); } scsi_done(scmd); return 0; case SYNCHRONIZE_CACHE: scmd->result = (DID_OK << 16); scsi_done(scmd); return 0; case MODE_SENSE: if ((scmd->cmnd[2] & 0x3F) != 0x3F && (scmd->cmnd[2] & 0x3F) != 0x08) { /* Illegal request, invalid field in CDB */ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0); } else { myrb_mode_sense(cb, scmd, ldev_info); scmd->result = (DID_OK << 16); } scsi_done(scmd); return 0; case READ_CAPACITY: if ((scmd->cmnd[1] & 1) || (scmd->cmnd[8] & 1)) { /* Illegal request, invalid field in CDB */ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0); scsi_done(scmd); return 0; } lba = get_unaligned_be32(&scmd->cmnd[2]); if (lba) { /* Illegal request, invalid field in CDB */ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0); scsi_done(scmd); return 0; } myrb_read_capacity(cb, scmd, ldev_info); scsi_done(scmd); return 0; case REQUEST_SENSE: myrb_request_sense(cb, scmd); scmd->result = (DID_OK << 16); return 0; case SEND_DIAGNOSTIC: if (scmd->cmnd[1] != 0x04) { /* Illegal request, invalid field in CDB */ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0); } else { /* Assume good status */ scmd->result = (DID_OK << 16); } scsi_done(scmd); return 0; case READ_6: if (ldev_info->state == MYRB_DEVICE_WO) { /* Data protect, attempt to read invalid data */ scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06); scsi_done(scmd); return 0; } fallthrough; case WRITE_6: lba = (((scmd->cmnd[1] & 0x1F) << 16) | (scmd->cmnd[2] << 8) | scmd->cmnd[3]); block_cnt = scmd->cmnd[4]; break; case READ_10: if (ldev_info->state == MYRB_DEVICE_WO) { /* Data protect, attempt to read invalid data */ scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06); scsi_done(scmd); return 0; } fallthrough; case WRITE_10: case VERIFY: /* 0x2F */ case WRITE_VERIFY: /* 0x2E */ lba = get_unaligned_be32(&scmd->cmnd[2]); block_cnt = get_unaligned_be16(&scmd->cmnd[7]); break; case READ_12: if (ldev_info->state == MYRB_DEVICE_WO) { /* Data protect, attempt to read invalid data */ scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06); scsi_done(scmd); return 0; } fallthrough; case WRITE_12: case VERIFY_12: /* 0xAF */ case WRITE_VERIFY_12: /* 0xAE */ lba = get_unaligned_be32(&scmd->cmnd[2]); block_cnt = get_unaligned_be32(&scmd->cmnd[6]); break; default: /* Illegal request, invalid opcode */ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0); scsi_done(scmd); return 0; } myrb_reset_cmd(cmd_blk); mbox->type5.id = scsi_cmd_to_rq(scmd)->tag + 3; if (scmd->sc_data_direction == DMA_NONE) goto submit; nsge = scsi_dma_map(scmd); if (nsge == 1) { sgl = scsi_sglist(scmd); if (scmd->sc_data_direction == DMA_FROM_DEVICE) mbox->type5.opcode = MYRB_CMD_READ; else mbox->type5.opcode = MYRB_CMD_WRITE; mbox->type5.ld.xfer_len = block_cnt; mbox->type5.ld.ldev_num = sdev->id; mbox->type5.lba = lba; mbox->type5.addr = (u32)sg_dma_address(sgl); } else { struct myrb_sge *hw_sgl; dma_addr_t hw_sgl_addr; int i; hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr); if (!hw_sgl) return SCSI_MLQUEUE_HOST_BUSY; cmd_blk->sgl = hw_sgl; cmd_blk->sgl_addr = hw_sgl_addr; if (scmd->sc_data_direction == DMA_FROM_DEVICE) mbox->type5.opcode = MYRB_CMD_READ_SG; else mbox->type5.opcode = MYRB_CMD_WRITE_SG; mbox->type5.ld.xfer_len = block_cnt; mbox->type5.ld.ldev_num = sdev->id; mbox->type5.lba = lba; mbox->type5.addr = hw_sgl_addr; mbox->type5.sg_count = nsge; scsi_for_each_sg(scmd, sgl, nsge, i) { hw_sgl->sge_addr = (u32)sg_dma_address(sgl); hw_sgl->sge_count = (u32)sg_dma_len(sgl); hw_sgl++; } } submit: spin_lock_irqsave(&cb->queue_lock, flags); cb->qcmd(cb, cmd_blk); spin_unlock_irqrestore(&cb->queue_lock, flags); return 0; } static int myrb_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmd) { struct scsi_device *sdev = scmd->device; if (sdev->channel > myrb_logical_channel(shost)) { scmd->result = (DID_BAD_TARGET << 16); scsi_done(scmd); return 0; } if (sdev->channel == myrb_logical_channel(shost)) return myrb_ldev_queuecommand(shost, scmd); return myrb_pthru_queuecommand(shost, scmd); } static int myrb_ldev_slave_alloc(struct scsi_device *sdev) { struct myrb_hba *cb = shost_priv(sdev->host); struct myrb_ldev_info *ldev_info; unsigned short ldev_num = sdev->id; enum raid_level level; ldev_info = cb->ldev_info_buf + ldev_num; if (!ldev_info) return -ENXIO; sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL); if (!sdev->hostdata) return -ENOMEM; dev_dbg(&sdev->sdev_gendev, "slave alloc ldev %d state %x\n", ldev_num, ldev_info->state); memcpy(sdev->hostdata, ldev_info, sizeof(*ldev_info)); switch (ldev_info->raid_level) { case MYRB_RAID_LEVEL0: level = RAID_LEVEL_LINEAR; break; case MYRB_RAID_LEVEL1: level = RAID_LEVEL_1; break; case MYRB_RAID_LEVEL3: level = RAID_LEVEL_3; break; case MYRB_RAID_LEVEL5: level = RAID_LEVEL_5; break; case MYRB_RAID_LEVEL6: level = RAID_LEVEL_6; break; case MYRB_RAID_JBOD: level = RAID_LEVEL_JBOD; break; default: level = RAID_LEVEL_UNKNOWN; break; } raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level); return 0; } static int myrb_pdev_slave_alloc(struct scsi_device *sdev) { struct myrb_hba *cb = shost_priv(sdev->host); struct myrb_pdev_state *pdev_info; unsigned short status; if (sdev->id > MYRB_MAX_TARGETS) return -ENXIO; pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL); if (!pdev_info) return -ENOMEM; status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE, sdev, pdev_info); if (status != MYRB_STATUS_SUCCESS) { dev_dbg(&sdev->sdev_gendev, "Failed to get device state, status %x\n", status); kfree(pdev_info); return -ENXIO; } if (!pdev_info->present) { dev_dbg(&sdev->sdev_gendev, "device not present, skip\n"); kfree(pdev_info); return -ENXIO; } dev_dbg(&sdev->sdev_gendev, "slave alloc pdev %d:%d state %x\n", sdev->channel, sdev->id, pdev_info->state); sdev->hostdata = pdev_info; return 0; } static int myrb_slave_alloc(struct scsi_device *sdev) { if (sdev->channel > myrb_logical_channel(sdev->host)) return -ENXIO; if (sdev->lun > 0) return -ENXIO; if (sdev->channel == myrb_logical_channel(sdev->host)) return myrb_ldev_slave_alloc(sdev); return myrb_pdev_slave_alloc(sdev); } static int myrb_slave_configure(struct scsi_device *sdev) { struct myrb_ldev_info *ldev_info; if (sdev->channel > myrb_logical_channel(sdev->host)) return -ENXIO; if (sdev->channel < myrb_logical_channel(sdev->host)) { sdev->no_uld_attach = 1; return 0; } if (sdev->lun != 0) return -ENXIO; ldev_info = sdev->hostdata; if (!ldev_info) return -ENXIO; if (ldev_info->state != MYRB_DEVICE_ONLINE) sdev_printk(KERN_INFO, sdev, "Logical drive is %s\n", myrb_devstate_name(ldev_info->state)); sdev->tagged_supported = 1; return 0; } static void myrb_slave_destroy(struct scsi_device *sdev) { kfree(sdev->hostdata); } static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { struct myrb_hba *cb = shost_priv(sdev->host); geom[0] = cb->ldev_geom_heads; geom[1] = cb->ldev_geom_sectors; geom[2] = sector_div(capacity, geom[0] * geom[1]); return 0; } static ssize_t raid_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct myrb_hba *cb = shost_priv(sdev->host); int ret; if (!sdev->hostdata) return snprintf(buf, 16, "Unknown\n"); if (sdev->channel == myrb_logical_channel(sdev->host)) { struct myrb_ldev_info *ldev_info = sdev->hostdata; const char *name; name = myrb_devstate_name(ldev_info->state); if (name) ret = snprintf(buf, 32, "%s\n", name); else ret = snprintf(buf, 32, "Invalid (%02X)\n", ldev_info->state); } else { struct myrb_pdev_state *pdev_info = sdev->hostdata; unsigned short status; const char *name; status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE, sdev, pdev_info); if (status != MYRB_STATUS_SUCCESS) sdev_printk(KERN_INFO, sdev, "Failed to get device state, status %x\n", status); if (!pdev_info->present) name = "Removed"; else name = myrb_devstate_name(pdev_info->state); if (name) ret = snprintf(buf, 32, "%s\n", name); else ret = snprintf(buf, 32, "Invalid (%02X)\n", pdev_info->state); } return ret; } static ssize_t raid_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); struct myrb_hba *cb = shost_priv(sdev->host); struct myrb_pdev_state *pdev_info; enum myrb_devstate new_state; unsigned short status; if (!strncmp(buf, "kill", 4) || !strncmp(buf, "offline", 7)) new_state = MYRB_DEVICE_DEAD; else if (!strncmp(buf, "online", 6)) new_state = MYRB_DEVICE_ONLINE; else if (!strncmp(buf, "standby", 7)) new_state = MYRB_DEVICE_STANDBY; else return -EINVAL; pdev_info = sdev->hostdata; if (!pdev_info) { sdev_printk(KERN_INFO, sdev, "Failed - no physical device information\n"); return -ENXIO; } if (!pdev_info->present) { sdev_printk(KERN_INFO, sdev, "Failed - device not present\n"); return -ENXIO; } if (pdev_info->state == new_state) return count; status = myrb_set_pdev_state(cb, sdev, new_state); switch (status) { case MYRB_STATUS_SUCCESS: break; case MYRB_STATUS_START_DEVICE_FAILED: sdev_printk(KERN_INFO, sdev, "Failed - Unable to Start Device\n"); count = -EAGAIN; break; case MYRB_STATUS_NO_DEVICE: sdev_printk(KERN_INFO, sdev, "Failed - No Device at Address\n"); count = -ENODEV; break; case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET: sdev_printk(KERN_INFO, sdev, "Failed - Invalid Channel or Target or Modifier\n"); count = -EINVAL; break; case MYRB_STATUS_CHANNEL_BUSY: sdev_printk(KERN_INFO, sdev, "Failed - Channel Busy\n"); count = -EBUSY; break; default: sdev_printk(KERN_INFO, sdev, "Failed - Unexpected Status %04X\n", status); count = -EIO; break; } return count; } static DEVICE_ATTR_RW(raid_state); static ssize_t raid_level_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); if (sdev->channel == myrb_logical_channel(sdev->host)) { struct myrb_ldev_info *ldev_info = sdev->hostdata; const char *name; if (!ldev_info) return -ENXIO; name = myrb_raidlevel_name(ldev_info->raid_level); if (!name) return snprintf(buf, 32, "Invalid (%02X)\n", ldev_info->state); return snprintf(buf, 32, "%s\n", name); } return snprintf(buf, 32, "Physical Drive\n"); } static DEVICE_ATTR_RO(raid_level); static ssize_t rebuild_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct myrb_hba *cb = shost_priv(sdev->host); struct myrb_rbld_progress rbld_buf; unsigned char status; if (sdev->channel < myrb_logical_channel(sdev->host)) return snprintf(buf, 32, "physical device - not rebuilding\n"); status = myrb_get_rbld_progress(cb, &rbld_buf); if (rbld_buf.ldev_num != sdev->id || status != MYRB_STATUS_SUCCESS) return snprintf(buf, 32, "not rebuilding\n"); return snprintf(buf, 32, "rebuilding block %u of %u\n", rbld_buf.ldev_size - rbld_buf.blocks_left, rbld_buf.ldev_size); } static ssize_t rebuild_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); struct myrb_hba *cb = shost_priv(sdev->host); struct myrb_cmdblk *cmd_blk; union myrb_cmd_mbox *mbox; unsigned short status; int rc, start; const char *msg; rc = kstrtoint(buf, 0, &start); if (rc) return rc; if (sdev->channel >= myrb_logical_channel(sdev->host)) return -ENXIO; status = myrb_get_rbld_progress(cb, NULL); if (start) { if (status == MYRB_STATUS_SUCCESS) { sdev_printk(KERN_INFO, sdev, "Rebuild Not Initiated; already in progress\n"); return -EALREADY; } mutex_lock(&cb->dcmd_mutex); cmd_blk = &cb->dcmd_blk; myrb_reset_cmd(cmd_blk); mbox = &cmd_blk->mbox; mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC; mbox->type3D.id = MYRB_DCMD_TAG; mbox->type3D.channel = sdev->channel; mbox->type3D.target = sdev->id; status = myrb_exec_cmd(cb, cmd_blk); mutex_unlock(&cb->dcmd_mutex); } else { struct pci_dev *pdev = cb->pdev; unsigned char *rate; dma_addr_t rate_addr; if (status != MYRB_STATUS_SUCCESS) { sdev_printk(KERN_INFO, sdev, "Rebuild Not Cancelled; not in progress\n"); return 0; } rate = dma_alloc_coherent(&pdev->dev, sizeof(char), &rate_addr, GFP_KERNEL); if (rate == NULL) { sdev_printk(KERN_INFO, sdev, "Cancellation of Rebuild Failed - Out of Memory\n"); return -ENOMEM; } mutex_lock(&cb->dcmd_mutex); cmd_blk = &cb->dcmd_blk; myrb_reset_cmd(cmd_blk); mbox = &cmd_blk->mbox; mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL; mbox->type3R.id = MYRB_DCMD_TAG; mbox->type3R.rbld_rate = 0xFF; mbox->type3R.addr = rate_addr; status = myrb_exec_cmd(cb, cmd_blk); dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr); mutex_unlock(&cb->dcmd_mutex); } if (status == MYRB_STATUS_SUCCESS) { sdev_printk(KERN_INFO, sdev, "Rebuild %s\n", start ? "Initiated" : "Cancelled"); return count; } if (!start) { sdev_printk(KERN_INFO, sdev, "Rebuild Not Cancelled, status 0x%x\n", status); return -EIO; } switch (status) { case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE: msg = "Attempt to Rebuild Online or Unresponsive Drive"; break; case MYRB_STATUS_RBLD_NEW_DISK_FAILED: msg = "New Disk Failed During Rebuild"; break; case MYRB_STATUS_INVALID_ADDRESS: msg = "Invalid Device Address"; break; case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS: msg = "Already in Progress"; break; default: msg = NULL; break; } if (msg) sdev_printk(KERN_INFO, sdev, "Rebuild Failed - %s\n", msg); else sdev_printk(KERN_INFO, sdev, "Rebuild Failed, status 0x%x\n", status); return -EIO; } static DEVICE_ATTR_RW(rebuild); static ssize_t consistency_check_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); struct myrb_hba *cb = shost_priv(sdev->host); struct myrb_rbld_progress rbld_buf; struct myrb_cmdblk *cmd_blk; union myrb_cmd_mbox *mbox; unsigned short ldev_num = 0xFFFF; unsigned short status; int rc, start; const char *msg; rc = kstrtoint(buf, 0, &start); if (rc) return rc; if (sdev->channel < myrb_logical_channel(sdev->host)) return -ENXIO; status = myrb_get_rbld_progress(cb, &rbld_buf); if (start) { if (status == MYRB_STATUS_SUCCESS) { sdev_printk(KERN_INFO, sdev, "Check Consistency Not Initiated; already in progress\n"); return -EALREADY; } mutex_lock(&cb->dcmd_mutex); cmd_blk = &cb->dcmd_blk; myrb_reset_cmd(cmd_blk); mbox = &cmd_blk->mbox; mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC; mbox->type3C.id = MYRB_DCMD_TAG; mbox->type3C.ldev_num = sdev->id; mbox->type3C.auto_restore = true; status = myrb_exec_cmd(cb, cmd_blk); mutex_unlock(&cb->dcmd_mutex); } else { struct pci_dev *pdev = cb->pdev; unsigned char *rate; dma_addr_t rate_addr; if (ldev_num != sdev->id) { sdev_printk(KERN_INFO, sdev, "Check Consistency Not Cancelled; not in progress\n"); return 0; } rate = dma_alloc_coherent(&pdev->dev, sizeof(char), &rate_addr, GFP_KERNEL); if (rate == NULL) { sdev_printk(KERN_INFO, sdev, "Cancellation of Check Consistency Failed - Out of Memory\n"); return -ENOMEM; } mutex_lock(&cb->dcmd_mutex); cmd_blk = &cb->dcmd_blk; myrb_reset_cmd(cmd_blk); mbox = &cmd_blk->mbox; mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL; mbox->type3R.id = MYRB_DCMD_TAG; mbox->type3R.rbld_rate = 0xFF; mbox->type3R.addr = rate_addr; status = myrb_exec_cmd(cb, cmd_blk); dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr); mutex_unlock(&cb->dcmd_mutex); } if (status == MYRB_STATUS_SUCCESS) { sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n", start ? "Initiated" : "Cancelled"); return count; } if (!start) { sdev_printk(KERN_INFO, sdev, "Check Consistency Not Cancelled, status 0x%x\n", status); return -EIO; } switch (status) { case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE: msg = "Dependent Physical Device is DEAD"; break; case MYRB_STATUS_RBLD_NEW_DISK_FAILED: msg = "New Disk Failed During Rebuild"; break; case MYRB_STATUS_INVALID_ADDRESS: msg = "Invalid or Nonredundant Logical Drive"; break; case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS: msg = "Already in Progress"; break; default: msg = NULL; break; } if (msg) sdev_printk(KERN_INFO, sdev, "Check Consistency Failed - %s\n", msg); else sdev_printk(KERN_INFO, sdev, "Check Consistency Failed, status 0x%x\n", status); return -EIO; } static ssize_t consistency_check_show(struct device *dev, struct device_attribute *attr, char *buf) { return rebuild_show(dev, attr, buf); } static DEVICE_ATTR_RW(consistency_check); static ssize_t ctlr_num_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct myrb_hba *cb = shost_priv(shost); return snprintf(buf, 20, "%u\n", cb->ctlr_num); } static DEVICE_ATTR_RO(ctlr_num); static ssize_t firmware_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct myrb_hba *cb = shost_priv(shost); return snprintf(buf, 16, "%s\n", cb->fw_version); } static DEVICE_ATTR_RO(firmware); static ssize_t model_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct myrb_hba *cb = shost_priv(shost); return snprintf(buf, 16, "%s\n", cb->model_name); } static DEVICE_ATTR_RO(model); static ssize_t flush_cache_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct myrb_hba *cb = shost_priv(shost); unsigned short status; status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0); if (status == MYRB_STATUS_SUCCESS) { shost_printk(KERN_INFO, shost, "Cache Flush Completed\n"); return count; } shost_printk(KERN_INFO, shost, "Cache Flush Failed, status %x\n", status); return -EIO; } static DEVICE_ATTR_WO(flush_cache); static struct attribute *myrb_sdev_attrs[] = { &dev_attr_rebuild.attr, &dev_attr_consistency_check.attr, &dev_attr_raid_state.attr, &dev_attr_raid_level.attr, NULL, }; ATTRIBUTE_GROUPS(myrb_sdev); static struct attribute *myrb_shost_attrs[] = { &dev_attr_ctlr_num.attr, &dev_attr_model.attr, &dev_attr_firmware.attr, &dev_attr_flush_cache.attr, NULL, }; ATTRIBUTE_GROUPS(myrb_shost); static const struct scsi_host_template myrb_template = { .module = THIS_MODULE, .name = "DAC960", .proc_name = "myrb", .queuecommand = myrb_queuecommand, .eh_host_reset_handler = myrb_host_reset, .slave_alloc = myrb_slave_alloc, .slave_configure = myrb_slave_configure, .slave_destroy = myrb_slave_destroy, .bios_param = myrb_biosparam, .cmd_size = sizeof(struct myrb_cmdblk), .shost_groups = myrb_shost_groups, .sdev_groups = myrb_sdev_groups, .this_id = -1, }; /** * myrb_is_raid - return boolean indicating device is raid volume * @dev: the device struct object */ static int myrb_is_raid(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); return sdev->channel == myrb_logical_channel(sdev->host); } /** * myrb_get_resync - get raid volume resync percent complete * @dev: the device struct object */ static void myrb_get_resync(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); struct myrb_hba *cb = shost_priv(sdev->host); struct myrb_rbld_progress rbld_buf; unsigned int percent_complete = 0; unsigned short status; unsigned int ldev_size = 0, remaining = 0; if (sdev->channel < myrb_logical_channel(sdev->host)) return; status = myrb_get_rbld_progress(cb, &rbld_buf); if (status == MYRB_STATUS_SUCCESS) { if (rbld_buf.ldev_num == sdev->id) { ldev_size = rbld_buf.ldev_size; remaining = rbld_buf.blocks_left; } } if (remaining && ldev_size) percent_complete = (ldev_size - remaining) * 100 / ldev_size; raid_set_resync(myrb_raid_template, dev, percent_complete); } /** * myrb_get_state - get raid volume status * @dev: the device struct object */ static void myrb_get_state(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); struct myrb_hba *cb = shost_priv(sdev->host); struct myrb_ldev_info *ldev_info = sdev->hostdata; enum raid_state state = RAID_STATE_UNKNOWN; unsigned short status; if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info) state = RAID_STATE_UNKNOWN; else { status = myrb_get_rbld_progress(cb, NULL); if (status == MYRB_STATUS_SUCCESS) state = RAID_STATE_RESYNCING; else { switch (ldev_info->state) { case MYRB_DEVICE_ONLINE: state = RAID_STATE_ACTIVE; break; case MYRB_DEVICE_WO: case MYRB_DEVICE_CRITICAL: state = RAID_STATE_DEGRADED; break; default: state = RAID_STATE_OFFLINE; } } } raid_set_state(myrb_raid_template, dev, state); } static struct raid_function_template myrb_raid_functions = { .cookie = &myrb_template, .is_raid = myrb_is_raid, .get_resync = myrb_get_resync, .get_state = myrb_get_state, }; static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk, struct scsi_cmnd *scmd) { unsigned short status; if (!cmd_blk) return; scsi_dma_unmap(scmd); if (cmd_blk->dcdb) { memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64); dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb, cmd_blk->dcdb_addr); cmd_blk->dcdb = NULL; } if (cmd_blk->sgl) { dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr); cmd_blk->sgl = NULL; cmd_blk->sgl_addr = 0; } status = cmd_blk->status; switch (status) { case MYRB_STATUS_SUCCESS: case MYRB_STATUS_DEVICE_BUSY: scmd->result = (DID_OK << 16) | status; break; case MYRB_STATUS_BAD_DATA: dev_dbg(&scmd->device->sdev_gendev, "Bad Data Encountered\n"); if (scmd->sc_data_direction == DMA_FROM_DEVICE) /* Unrecovered read error */ scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0); else /* Write error */ scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0); break; case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR: scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n"); if (scmd->sc_data_direction == DMA_FROM_DEVICE) /* Unrecovered read error, auto-reallocation failed */ scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0x04); else /* Write error, auto-reallocation failed */ scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0x02); break; case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE: dev_dbg(&scmd->device->sdev_gendev, "Logical Drive Nonexistent or Offline"); scmd->result = (DID_BAD_TARGET << 16); break; case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV: dev_dbg(&scmd->device->sdev_gendev, "Attempt to Access Beyond End of Logical Drive"); /* Logical block address out of range */ scsi_build_sense(scmd, 0, NOT_READY, 0x21, 0); break; case MYRB_STATUS_DEVICE_NONRESPONSIVE: dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n"); scmd->result = (DID_BAD_TARGET << 16); break; default: scmd_printk(KERN_ERR, scmd, "Unexpected Error Status %04X", status); scmd->result = (DID_ERROR << 16); break; } scsi_done(scmd); } static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) { if (!cmd_blk) return; if (cmd_blk->completion) { complete(cmd_blk->completion); cmd_blk->completion = NULL; } } static void myrb_monitor(struct work_struct *work) { struct myrb_hba *cb = container_of(work, struct myrb_hba, monitor_work.work); struct Scsi_Host *shost = cb->host; unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL; dev_dbg(&shost->shost_gendev, "monitor tick\n"); if (cb->new_ev_seq > cb->old_ev_seq) { int event = cb->old_ev_seq; dev_dbg(&shost->shost_gendev, "get event log no %d/%d\n", cb->new_ev_seq, event); myrb_get_event(cb, event); cb->old_ev_seq = event + 1; interval = 10; } else if (cb->need_err_info) { cb->need_err_info = false; dev_dbg(&shost->shost_gendev, "get error table\n"); myrb_get_errtable(cb); interval = 10; } else if (cb->need_rbld && cb->rbld_first) { cb->need_rbld = false; dev_dbg(&shost->shost_gendev, "get rebuild progress\n"); myrb_update_rbld_progress(cb); interval = 10; } else if (cb->need_ldev_info) { cb->need_ldev_info = false; dev_dbg(&shost->shost_gendev, "get logical drive info\n"); myrb_get_ldev_info(cb); interval = 10; } else if (cb->need_rbld) { cb->need_rbld = false; dev_dbg(&shost->shost_gendev, "get rebuild progress\n"); myrb_update_rbld_progress(cb); interval = 10; } else if (cb->need_cc_status) { cb->need_cc_status = false; dev_dbg(&shost->shost_gendev, "get consistency check progress\n"); myrb_get_cc_progress(cb); interval = 10; } else if (cb->need_bgi_status) { cb->need_bgi_status = false; dev_dbg(&shost->shost_gendev, "get background init status\n"); myrb_bgi_control(cb); interval = 10; } else { dev_dbg(&shost->shost_gendev, "new enquiry\n"); mutex_lock(&cb->dma_mutex); myrb_hba_enquiry(cb); mutex_unlock(&cb->dma_mutex); if ((cb->new_ev_seq - cb->old_ev_seq > 0) || cb->need_err_info || cb->need_rbld || cb->need_ldev_info || cb->need_cc_status || cb->need_bgi_status) { dev_dbg(&shost->shost_gendev, "reschedule monitor\n"); interval = 0; } } if (interval > 1) cb->primary_monitor_time = jiffies; queue_delayed_work(cb->work_q, &cb->monitor_work, interval); } /* * myrb_err_status - reports controller BIOS messages * * Controller BIOS messages are passed through the Error Status Register * when the driver performs the BIOS handshaking. * * Return: true for fatal errors and false otherwise. */ static bool myrb_err_status(struct myrb_hba *cb, unsigned char error, unsigned char parm0, unsigned char parm1) { struct pci_dev *pdev = cb->pdev; switch (error) { case 0x00: dev_info(&pdev->dev, "Physical Device %d:%d Not Responding\n", parm1, parm0); break; case 0x08: dev_notice(&pdev->dev, "Spinning Up Drives\n"); break; case 0x30: dev_notice(&pdev->dev, "Configuration Checksum Error\n"); break; case 0x60: dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n"); break; case 0x70: dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n"); break; case 0x90: dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n", parm1, parm0); break; case 0xA0: dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n"); break; case 0xB0: dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n"); break; case 0xD0: dev_notice(&pdev->dev, "New Controller Configuration Found\n"); break; case 0xF0: dev_err(&pdev->dev, "Fatal Memory Parity Error\n"); return true; default: dev_err(&pdev->dev, "Unknown Initialization Error %02X\n", error); return true; } return false; } /* * Hardware-specific functions */ /* * DAC960 LA Series Controllers */ static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base) { writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET); } static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base) { writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET); } static inline void DAC960_LA_reset_ctrl(void __iomem *base) { writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET); } static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base) { writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET); } static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base) { unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET); return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY); } static inline bool DAC960_LA_init_in_progress(void __iomem *base) { unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET); return !(idb & DAC960_LA_IDB_INIT_DONE); } static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base) { writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET); } static inline void DAC960_LA_ack_intr(void __iomem *base) { writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET); } static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base) { unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET); return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL; } static inline void DAC960_LA_enable_intr(void __iomem *base) { unsigned char odb = 0xFF; odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ; writeb(odb, base + DAC960_LA_IRQMASK_OFFSET); } static inline void DAC960_LA_disable_intr(void __iomem *base) { unsigned char odb = 0xFF; odb |= DAC960_LA_IRQMASK_DISABLE_IRQ; writeb(odb, base + DAC960_LA_IRQMASK_OFFSET); } static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox, union myrb_cmd_mbox *mbox) { mem_mbox->words[1] = mbox->words[1]; mem_mbox->words[2] = mbox->words[2]; mem_mbox->words[3] = mbox->words[3]; /* Memory barrier to prevent reordering */ wmb(); mem_mbox->words[0] = mbox->words[0]; /* Memory barrier to force PCI access */ mb(); } static inline void DAC960_LA_write_hw_mbox(void __iomem *base, union myrb_cmd_mbox *mbox) { writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET); writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET); writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET); writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET); } static inline unsigned short DAC960_LA_read_status(void __iomem *base) { return readw(base + DAC960_LA_STS_OFFSET); } static inline bool DAC960_LA_read_error_status(void __iomem *base, unsigned char *error, unsigned char *param0, unsigned char *param1) { unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET); if (!(errsts & DAC960_LA_ERRSTS_PENDING)) return false; errsts &= ~DAC960_LA_ERRSTS_PENDING; *error = errsts; *param0 = readb(base + DAC960_LA_CMDOP_OFFSET); *param1 = readb(base + DAC960_LA_CMDID_OFFSET); writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET); return true; } static inline unsigned short DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base, union myrb_cmd_mbox *mbox) { unsigned short status; int timeout = 0; while (timeout < MYRB_MAILBOX_TIMEOUT) { if (!DAC960_LA_hw_mbox_is_full(base)) break; udelay(10); timeout++; } if (DAC960_LA_hw_mbox_is_full(base)) { dev_err(&pdev->dev, "Timeout waiting for empty mailbox\n"); return MYRB_STATUS_SUBSYS_TIMEOUT; } DAC960_LA_write_hw_mbox(base, mbox); DAC960_LA_hw_mbox_new_cmd(base); timeout = 0; while (timeout < MYRB_MAILBOX_TIMEOUT) { if (DAC960_LA_hw_mbox_status_available(base)) break; udelay(10); timeout++; } if (!DAC960_LA_hw_mbox_status_available(base)) { dev_err(&pdev->dev, "Timeout waiting for mailbox status\n"); return MYRB_STATUS_SUBSYS_TIMEOUT; } status = DAC960_LA_read_status(base); DAC960_LA_ack_hw_mbox_intr(base); DAC960_LA_ack_hw_mbox_status(base); return status; } static int DAC960_LA_hw_init(struct pci_dev *pdev, struct myrb_hba *cb, void __iomem *base) { int timeout = 0; unsigned char error, parm0, parm1; DAC960_LA_disable_intr(base); DAC960_LA_ack_hw_mbox_status(base); udelay(1000); while (DAC960_LA_init_in_progress(base) && timeout < MYRB_MAILBOX_TIMEOUT) { if (DAC960_LA_read_error_status(base, &error, &parm0, &parm1) && myrb_err_status(cb, error, parm0, parm1)) return -ENODEV; udelay(10); timeout++; } if (timeout == MYRB_MAILBOX_TIMEOUT) { dev_err(&pdev->dev, "Timeout waiting for Controller Initialisation\n"); return -ETIMEDOUT; } if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) { dev_err(&pdev->dev, "Unable to Enable Memory Mailbox Interface\n"); DAC960_LA_reset_ctrl(base); return -ENODEV; } DAC960_LA_enable_intr(base); cb->qcmd = myrb_qcmd; cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox; if (cb->dual_mode_interface) cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd; else cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd; cb->disable_intr = DAC960_LA_disable_intr; cb->reset = DAC960_LA_reset_ctrl; return 0; } static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg) { struct myrb_hba *cb = arg; void __iomem *base = cb->io_base; struct myrb_stat_mbox *next_stat_mbox; unsigned long flags; spin_lock_irqsave(&cb->queue_lock, flags); DAC960_LA_ack_intr(base); next_stat_mbox = cb->next_stat_mbox; while (next_stat_mbox->valid) { unsigned char id = next_stat_mbox->id; struct scsi_cmnd *scmd = NULL; struct myrb_cmdblk *cmd_blk = NULL; if (id == MYRB_DCMD_TAG) cmd_blk = &cb->dcmd_blk; else if (id == MYRB_MCMD_TAG) cmd_blk = &cb->mcmd_blk; else { scmd = scsi_host_find_tag(cb->host, id - 3); if (scmd) cmd_blk = scsi_cmd_priv(scmd); } if (cmd_blk) cmd_blk->status = next_stat_mbox->status; else dev_err(&cb->pdev->dev, "Unhandled command completion %d\n", id); memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox)); if (++next_stat_mbox > cb->last_stat_mbox) next_stat_mbox = cb->first_stat_mbox; if (cmd_blk) { if (id < 3) myrb_handle_cmdblk(cb, cmd_blk); else myrb_handle_scsi(cb, cmd_blk, scmd); } } cb->next_stat_mbox = next_stat_mbox; spin_unlock_irqrestore(&cb->queue_lock, flags); return IRQ_HANDLED; } static struct myrb_privdata DAC960_LA_privdata = { .hw_init = DAC960_LA_hw_init, .irq_handler = DAC960_LA_intr_handler, .mmio_size = DAC960_LA_mmio_size, }; /* * DAC960 PG Series Controllers */ static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base) { writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET); } static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base) { writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET); } static inline void DAC960_PG_reset_ctrl(void __iomem *base) { writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET); } static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base) { writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET); } static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base) { unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET); return idb & DAC960_PG_IDB_HWMBOX_FULL; } static inline bool DAC960_PG_init_in_progress(void __iomem *base) { unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET); return idb & DAC960_PG_IDB_INIT_IN_PROGRESS; } static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base) { writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET); } static inline void DAC960_PG_ack_intr(void __iomem *base) { writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET); } static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base) { unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET); return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL; } static inline void DAC960_PG_enable_intr(void __iomem *base) { unsigned int imask = (unsigned int)-1; imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ; writel(imask, base + DAC960_PG_IRQMASK_OFFSET); } static inline void DAC960_PG_disable_intr(void __iomem *base) { unsigned int imask = (unsigned int)-1; writel(imask, base + DAC960_PG_IRQMASK_OFFSET); } static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox, union myrb_cmd_mbox *mbox) { mem_mbox->words[1] = mbox->words[1]; mem_mbox->words[2] = mbox->words[2]; mem_mbox->words[3] = mbox->words[3]; /* Memory barrier to prevent reordering */ wmb(); mem_mbox->words[0] = mbox->words[0]; /* Memory barrier to force PCI access */ mb(); } static inline void DAC960_PG_write_hw_mbox(void __iomem *base, union myrb_cmd_mbox *mbox) { writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET); writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET); writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET); writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET); } static inline unsigned short DAC960_PG_read_status(void __iomem *base) { return readw(base + DAC960_PG_STS_OFFSET); } static inline bool DAC960_PG_read_error_status(void __iomem *base, unsigned char *error, unsigned char *param0, unsigned char *param1) { unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET); if (!(errsts & DAC960_PG_ERRSTS_PENDING)) return false; errsts &= ~DAC960_PG_ERRSTS_PENDING; *error = errsts; *param0 = readb(base + DAC960_PG_CMDOP_OFFSET); *param1 = readb(base + DAC960_PG_CMDID_OFFSET); writeb(0, base + DAC960_PG_ERRSTS_OFFSET); return true; } static inline unsigned short DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base, union myrb_cmd_mbox *mbox) { unsigned short status; int timeout = 0; while (timeout < MYRB_MAILBOX_TIMEOUT) { if (!DAC960_PG_hw_mbox_is_full(base)) break; udelay(10); timeout++; } if (DAC960_PG_hw_mbox_is_full(base)) { dev_err(&pdev->dev, "Timeout waiting for empty mailbox\n"); return MYRB_STATUS_SUBSYS_TIMEOUT; } DAC960_PG_write_hw_mbox(base, mbox); DAC960_PG_hw_mbox_new_cmd(base); timeout = 0; while (timeout < MYRB_MAILBOX_TIMEOUT) { if (DAC960_PG_hw_mbox_status_available(base)) break; udelay(10); timeout++; } if (!DAC960_PG_hw_mbox_status_available(base)) { dev_err(&pdev->dev, "Timeout waiting for mailbox status\n"); return MYRB_STATUS_SUBSYS_TIMEOUT; } status = DAC960_PG_read_status(base); DAC960_PG_ack_hw_mbox_intr(base); DAC960_PG_ack_hw_mbox_status(base); return status; } static int DAC960_PG_hw_init(struct pci_dev *pdev, struct myrb_hba *cb, void __iomem *base) { int timeout = 0; unsigned char error, parm0, parm1; DAC960_PG_disable_intr(base); DAC960_PG_ack_hw_mbox_status(base); udelay(1000); while (DAC960_PG_init_in_progress(base) && timeout < MYRB_MAILBOX_TIMEOUT) { if (DAC960_PG_read_error_status(base, &error, &parm0, &parm1) && myrb_err_status(cb, error, parm0, parm1)) return -EIO; udelay(10); timeout++; } if (timeout == MYRB_MAILBOX_TIMEOUT) { dev_err(&pdev->dev, "Timeout waiting for Controller Initialisation\n"); return -ETIMEDOUT; } if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) { dev_err(&pdev->dev, "Unable to Enable Memory Mailbox Interface\n"); DAC960_PG_reset_ctrl(base); return -ENODEV; } DAC960_PG_enable_intr(base); cb->qcmd = myrb_qcmd; cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox; if (cb->dual_mode_interface) cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd; else cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd; cb->disable_intr = DAC960_PG_disable_intr; cb->reset = DAC960_PG_reset_ctrl; return 0; } static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg) { struct myrb_hba *cb = arg; void __iomem *base = cb->io_base; struct myrb_stat_mbox *next_stat_mbox; unsigned long flags; spin_lock_irqsave(&cb->queue_lock, flags); DAC960_PG_ack_intr(base); next_stat_mbox = cb->next_stat_mbox; while (next_stat_mbox->valid) { unsigned char id = next_stat_mbox->id; struct scsi_cmnd *scmd = NULL; struct myrb_cmdblk *cmd_blk = NULL; if (id == MYRB_DCMD_TAG) cmd_blk = &cb->dcmd_blk; else if (id == MYRB_MCMD_TAG) cmd_blk = &cb->mcmd_blk; else { scmd = scsi_host_find_tag(cb->host, id - 3); if (scmd) cmd_blk = scsi_cmd_priv(scmd); } if (cmd_blk) cmd_blk->status = next_stat_mbox->status; else dev_err(&cb->pdev->dev, "Unhandled command completion %d\n", id); memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox)); if (++next_stat_mbox > cb->last_stat_mbox) next_stat_mbox = cb->first_stat_mbox; if (id < 3) myrb_handle_cmdblk(cb, cmd_blk); else myrb_handle_scsi(cb, cmd_blk, scmd); } cb->next_stat_mbox = next_stat_mbox; spin_unlock_irqrestore(&cb->queue_lock, flags); return IRQ_HANDLED; } static struct myrb_privdata DAC960_PG_privdata = { .hw_init = DAC960_PG_hw_init, .irq_handler = DAC960_PG_intr_handler, .mmio_size = DAC960_PG_mmio_size, }; /* * DAC960 PD Series Controllers */ static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base) { writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET); } static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base) { writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET); } static inline void DAC960_PD_reset_ctrl(void __iomem *base) { writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET); } static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base) { unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET); return idb & DAC960_PD_IDB_HWMBOX_FULL; } static inline bool DAC960_PD_init_in_progress(void __iomem *base) { unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET); return idb & DAC960_PD_IDB_INIT_IN_PROGRESS; } static inline void DAC960_PD_ack_intr(void __iomem *base) { writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET); } static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base) { unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET); return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL; } static inline void DAC960_PD_enable_intr(void __iomem *base) { writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET); } static inline void DAC960_PD_disable_intr(void __iomem *base) { writeb(0, base + DAC960_PD_IRQEN_OFFSET); } static inline void DAC960_PD_write_cmd_mbox(void __iomem *base, union myrb_cmd_mbox *mbox) { writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET); writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET); writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET); writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET); } static inline unsigned char DAC960_PD_read_status_cmd_ident(void __iomem *base) { return readb(base + DAC960_PD_STSID_OFFSET); } static inline unsigned short DAC960_PD_read_status(void __iomem *base) { return readw(base + DAC960_PD_STS_OFFSET); } static inline bool DAC960_PD_read_error_status(void __iomem *base, unsigned char *error, unsigned char *param0, unsigned char *param1) { unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET); if (!(errsts & DAC960_PD_ERRSTS_PENDING)) return false; errsts &= ~DAC960_PD_ERRSTS_PENDING; *error = errsts; *param0 = readb(base + DAC960_PD_CMDOP_OFFSET); *param1 = readb(base + DAC960_PD_CMDID_OFFSET); writeb(0, base + DAC960_PD_ERRSTS_OFFSET); return true; } static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) { void __iomem *base = cb->io_base; union myrb_cmd_mbox *mbox = &cmd_blk->mbox; while (DAC960_PD_hw_mbox_is_full(base)) udelay(1); DAC960_PD_write_cmd_mbox(base, mbox); DAC960_PD_hw_mbox_new_cmd(base); } static int DAC960_PD_hw_init(struct pci_dev *pdev, struct myrb_hba *cb, void __iomem *base) { int timeout = 0; unsigned char error, parm0, parm1; if (!request_region(cb->io_addr, 0x80, "myrb")) { dev_err(&pdev->dev, "IO port 0x%lx busy\n", (unsigned long)cb->io_addr); return -EBUSY; } DAC960_PD_disable_intr(base); DAC960_PD_ack_hw_mbox_status(base); udelay(1000); while (DAC960_PD_init_in_progress(base) && timeout < MYRB_MAILBOX_TIMEOUT) { if (DAC960_PD_read_error_status(base, &error, &parm0, &parm1) && myrb_err_status(cb, error, parm0, parm1)) return -EIO; udelay(10); timeout++; } if (timeout == MYRB_MAILBOX_TIMEOUT) { dev_err(&pdev->dev, "Timeout waiting for Controller Initialisation\n"); return -ETIMEDOUT; } if (!myrb_enable_mmio(cb, NULL)) { dev_err(&pdev->dev, "Unable to Enable Memory Mailbox Interface\n"); DAC960_PD_reset_ctrl(base); return -ENODEV; } DAC960_PD_enable_intr(base); cb->qcmd = DAC960_PD_qcmd; cb->disable_intr = DAC960_PD_disable_intr; cb->reset = DAC960_PD_reset_ctrl; return 0; } static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg) { struct myrb_hba *cb = arg; void __iomem *base = cb->io_base; unsigned long flags; spin_lock_irqsave(&cb->queue_lock, flags); while (DAC960_PD_hw_mbox_status_available(base)) { unsigned char id = DAC960_PD_read_status_cmd_ident(base); struct scsi_cmnd *scmd = NULL; struct myrb_cmdblk *cmd_blk = NULL; if (id == MYRB_DCMD_TAG) cmd_blk = &cb->dcmd_blk; else if (id == MYRB_MCMD_TAG) cmd_blk = &cb->mcmd_blk; else { scmd = scsi_host_find_tag(cb->host, id - 3); if (scmd) cmd_blk = scsi_cmd_priv(scmd); } if (cmd_blk) cmd_blk->status = DAC960_PD_read_status(base); else dev_err(&cb->pdev->dev, "Unhandled command completion %d\n", id); DAC960_PD_ack_intr(base); DAC960_PD_ack_hw_mbox_status(base); if (id < 3) myrb_handle_cmdblk(cb, cmd_blk); else myrb_handle_scsi(cb, cmd_blk, scmd); } spin_unlock_irqrestore(&cb->queue_lock, flags); return IRQ_HANDLED; } static struct myrb_privdata DAC960_PD_privdata = { .hw_init = DAC960_PD_hw_init, .irq_handler = DAC960_PD_intr_handler, .mmio_size = DAC960_PD_mmio_size, }; /* * DAC960 P Series Controllers * * Similar to the DAC960 PD Series Controllers, but some commands have * to be translated. */ static inline void myrb_translate_enquiry(void *enq) { memcpy(enq + 132, enq + 36, 64); memset(enq + 36, 0, 96); } static inline void myrb_translate_devstate(void *state) { memcpy(state + 2, state + 3, 1); memmove(state + 4, state + 5, 2); memmove(state + 6, state + 8, 4); } static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk) { union myrb_cmd_mbox *mbox = &cmd_blk->mbox; int ldev_num = mbox->type5.ld.ldev_num; mbox->bytes[3] &= 0x7; mbox->bytes[3] |= mbox->bytes[7] << 6; mbox->bytes[7] = ldev_num; } static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk) { union myrb_cmd_mbox *mbox = &cmd_blk->mbox; int ldev_num = mbox->bytes[7]; mbox->bytes[7] = mbox->bytes[3] >> 6; mbox->bytes[3] &= 0x7; mbox->bytes[3] |= ldev_num << 3; } static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) { void __iomem *base = cb->io_base; union myrb_cmd_mbox *mbox = &cmd_blk->mbox; switch (mbox->common.opcode) { case MYRB_CMD_ENQUIRY: mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD; break; case MYRB_CMD_GET_DEVICE_STATE: mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD; break; case MYRB_CMD_READ: mbox->common.opcode = MYRB_CMD_READ_OLD; myrb_translate_to_rw_command(cmd_blk); break; case MYRB_CMD_WRITE: mbox->common.opcode = MYRB_CMD_WRITE_OLD; myrb_translate_to_rw_command(cmd_blk); break; case MYRB_CMD_READ_SG: mbox->common.opcode = MYRB_CMD_READ_SG_OLD; myrb_translate_to_rw_command(cmd_blk); break; case MYRB_CMD_WRITE_SG: mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD; myrb_translate_to_rw_command(cmd_blk); break; default: break; } while (DAC960_PD_hw_mbox_is_full(base)) udelay(1); DAC960_PD_write_cmd_mbox(base, mbox); DAC960_PD_hw_mbox_new_cmd(base); } static int DAC960_P_hw_init(struct pci_dev *pdev, struct myrb_hba *cb, void __iomem *base) { int timeout = 0; unsigned char error, parm0, parm1; if (!request_region(cb->io_addr, 0x80, "myrb")) { dev_err(&pdev->dev, "IO port 0x%lx busy\n", (unsigned long)cb->io_addr); return -EBUSY; } DAC960_PD_disable_intr(base); DAC960_PD_ack_hw_mbox_status(base); udelay(1000); while (DAC960_PD_init_in_progress(base) && timeout < MYRB_MAILBOX_TIMEOUT) { if (DAC960_PD_read_error_status(base, &error, &parm0, &parm1) && myrb_err_status(cb, error, parm0, parm1)) return -EAGAIN; udelay(10); timeout++; } if (timeout == MYRB_MAILBOX_TIMEOUT) { dev_err(&pdev->dev, "Timeout waiting for Controller Initialisation\n"); return -ETIMEDOUT; } if (!myrb_enable_mmio(cb, NULL)) { dev_err(&pdev->dev, "Unable to allocate DMA mapped memory\n"); DAC960_PD_reset_ctrl(base); return -ETIMEDOUT; } DAC960_PD_enable_intr(base); cb->qcmd = DAC960_P_qcmd; cb->disable_intr = DAC960_PD_disable_intr; cb->reset = DAC960_PD_reset_ctrl; return 0; } static irqreturn_t DAC960_P_intr_handler(int irq, void *arg) { struct myrb_hba *cb = arg; void __iomem *base = cb->io_base; unsigned long flags; spin_lock_irqsave(&cb->queue_lock, flags); while (DAC960_PD_hw_mbox_status_available(base)) { unsigned char id = DAC960_PD_read_status_cmd_ident(base); struct scsi_cmnd *scmd = NULL; struct myrb_cmdblk *cmd_blk = NULL; union myrb_cmd_mbox *mbox; enum myrb_cmd_opcode op; if (id == MYRB_DCMD_TAG) cmd_blk = &cb->dcmd_blk; else if (id == MYRB_MCMD_TAG) cmd_blk = &cb->mcmd_blk; else { scmd = scsi_host_find_tag(cb->host, id - 3); if (scmd) cmd_blk = scsi_cmd_priv(scmd); } if (cmd_blk) cmd_blk->status = DAC960_PD_read_status(base); else dev_err(&cb->pdev->dev, "Unhandled command completion %d\n", id); DAC960_PD_ack_intr(base); DAC960_PD_ack_hw_mbox_status(base); if (!cmd_blk) continue; mbox = &cmd_blk->mbox; op = mbox->common.opcode; switch (op) { case MYRB_CMD_ENQUIRY_OLD: mbox->common.opcode = MYRB_CMD_ENQUIRY; myrb_translate_enquiry(cb->enquiry); break; case MYRB_CMD_READ_OLD: mbox->common.opcode = MYRB_CMD_READ; myrb_translate_from_rw_command(cmd_blk); break; case MYRB_CMD_WRITE_OLD: mbox->common.opcode = MYRB_CMD_WRITE; myrb_translate_from_rw_command(cmd_blk); break; case MYRB_CMD_READ_SG_OLD: mbox->common.opcode = MYRB_CMD_READ_SG; myrb_translate_from_rw_command(cmd_blk); break; case MYRB_CMD_WRITE_SG_OLD: mbox->common.opcode = MYRB_CMD_WRITE_SG; myrb_translate_from_rw_command(cmd_blk); break; default: break; } if (id < 3) myrb_handle_cmdblk(cb, cmd_blk); else myrb_handle_scsi(cb, cmd_blk, scmd); } spin_unlock_irqrestore(&cb->queue_lock, flags); return IRQ_HANDLED; } static struct myrb_privdata DAC960_P_privdata = { .hw_init = DAC960_P_hw_init, .irq_handler = DAC960_P_intr_handler, .mmio_size = DAC960_PD_mmio_size, }; static struct myrb_hba *myrb_detect(struct pci_dev *pdev, const struct pci_device_id *entry) { struct myrb_privdata *privdata = (struct myrb_privdata *)entry->driver_data; irq_handler_t irq_handler = privdata->irq_handler; unsigned int mmio_size = privdata->mmio_size; struct Scsi_Host *shost; struct myrb_hba *cb = NULL; shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba)); if (!shost) { dev_err(&pdev->dev, "Unable to allocate Controller\n"); return NULL; } shost->max_cmd_len = 12; shost->max_lun = 256; cb = shost_priv(shost); mutex_init(&cb->dcmd_mutex); mutex_init(&cb->dma_mutex); cb->pdev = pdev; cb->host = shost; if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "Failed to enable PCI device\n"); scsi_host_put(shost); return NULL; } if (privdata->hw_init == DAC960_PD_hw_init || privdata->hw_init == DAC960_P_hw_init) { cb->io_addr = pci_resource_start(pdev, 0); cb->pci_addr = pci_resource_start(pdev, 1); } else cb->pci_addr = pci_resource_start(pdev, 0); pci_set_drvdata(pdev, cb); spin_lock_init(&cb->queue_lock); if (mmio_size < PAGE_SIZE) mmio_size = PAGE_SIZE; cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size); if (cb->mmio_base == NULL) { dev_err(&pdev->dev, "Unable to map Controller Register Window\n"); goto failure; } cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK); if (privdata->hw_init(pdev, cb, cb->io_base)) goto failure; if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) { dev_err(&pdev->dev, "Unable to acquire IRQ Channel %d\n", pdev->irq); goto failure; } cb->irq = pdev->irq; return cb; failure: dev_err(&pdev->dev, "Failed to initialize Controller\n"); myrb_cleanup(cb); return NULL; } static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry) { struct myrb_hba *cb; int ret; cb = myrb_detect(dev, entry); if (!cb) return -ENODEV; ret = myrb_get_hba_config(cb); if (ret < 0) { myrb_cleanup(cb); return ret; } if (!myrb_create_mempools(dev, cb)) { ret = -ENOMEM; goto failed; } ret = scsi_add_host(cb->host, &dev->dev); if (ret) { dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret); myrb_destroy_mempools(cb); goto failed; } scsi_scan_host(cb->host); return 0; failed: myrb_cleanup(cb); return ret; } static void myrb_remove(struct pci_dev *pdev) { struct myrb_hba *cb = pci_get_drvdata(pdev); shost_printk(KERN_NOTICE, cb->host, "Flushing Cache..."); myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0); myrb_cleanup(cb); myrb_destroy_mempools(cb); } static const struct pci_device_id myrb_id_table[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_MYLEX_DAC960_LA), .driver_data = (unsigned long) &DAC960_LA_privdata, }, { PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata), }, { PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata), }, { PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata), }, {0, }, }; MODULE_DEVICE_TABLE(pci, myrb_id_table); static struct pci_driver myrb_pci_driver = { .name = "myrb", .id_table = myrb_id_table, .probe = myrb_probe, .remove = myrb_remove, }; static int __init myrb_init_module(void) { int ret; myrb_raid_template = raid_class_attach(&myrb_raid_functions); if (!myrb_raid_template) return -ENODEV; ret = pci_register_driver(&myrb_pci_driver); if (ret) raid_class_release(myrb_raid_template); return ret; } static void __exit myrb_cleanup_module(void) { pci_unregister_driver(&myrb_pci_driver); raid_class_release(myrb_raid_template); } module_init(myrb_init_module); module_exit(myrb_cleanup_module); MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)"); MODULE_AUTHOR("Hannes Reinecke <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/myrb.c
/* * dc395x.c * * Device Driver for Tekram DC395(U/UW/F), DC315(U) * PCI SCSI Bus Master Host Adapter * (SCSI chip set used Tekram ASIC TRM-S1040) * * Authors: * C.L. Huang <[email protected]> * Erich Chen <[email protected]> * (C) Copyright 1995-1999 Tekram Technology Co., Ltd. * * Kurt Garloff <[email protected]> * (C) 1999-2000 Kurt Garloff * * Oliver Neukum <[email protected]> * Ali Akcaagac <[email protected]> * Jamie Lenehan <[email protected]> * (C) 2003 * * License: GNU GPL * ************************************************************************* * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ************************************************************************ */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/delay.h> #include <linux/ctype.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/list.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <asm/io.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_spi.h> #include "dc395x.h" #define DC395X_NAME "dc395x" #define DC395X_BANNER "Tekram DC395(U/UW/F), DC315(U) - ASIC TRM-S1040" #define DC395X_VERSION "v2.05, 2004/03/08" /*--------------------------------------------------------------------------- Features ---------------------------------------------------------------------------*/ /* * Set to disable parts of the driver */ /*#define DC395x_NO_DISCONNECT*/ /*#define DC395x_NO_TAGQ*/ /*#define DC395x_NO_SYNC*/ /*#define DC395x_NO_WIDE*/ /*--------------------------------------------------------------------------- Debugging ---------------------------------------------------------------------------*/ /* * Types of debugging that can be enabled and disabled */ #define DBG_KG 0x0001 #define DBG_0 0x0002 #define DBG_1 0x0004 #define DBG_SG 0x0020 #define DBG_FIFO 0x0040 #define DBG_PIO 0x0080 /* * Set set of things to output debugging for. * Undefine to remove all debugging */ /*#define DEBUG_MASK (DBG_0|DBG_1|DBG_SG|DBG_FIFO|DBG_PIO)*/ /*#define DEBUG_MASK DBG_0*/ /* * Output a kernel mesage at the specified level and append the * driver name and a ": " to the start of the message */ #define dprintkl(level, format, arg...) \ printk(level DC395X_NAME ": " format , ## arg) #ifdef DEBUG_MASK /* * print a debug message - this is formated with KERN_DEBUG, then the * driver name followed by a ": " and then the message is output. * This also checks that the specified debug level is enabled before * outputing the message */ #define dprintkdbg(type, format, arg...) \ do { \ if ((type) & (DEBUG_MASK)) \ dprintkl(KERN_DEBUG , format , ## arg); \ } while (0) /* * Check if the specified type of debugging is enabled */ #define debug_enabled(type) ((DEBUG_MASK) & (type)) #else /* * No debugging. Do nothing */ #define dprintkdbg(type, format, arg...) \ do {} while (0) #define debug_enabled(type) (0) #endif #ifndef PCI_VENDOR_ID_TEKRAM #define PCI_VENDOR_ID_TEKRAM 0x1DE1 /* Vendor ID */ #endif #ifndef PCI_DEVICE_ID_TEKRAM_TRMS1040 #define PCI_DEVICE_ID_TEKRAM_TRMS1040 0x0391 /* Device ID */ #endif #define DC395x_LOCK_IO(dev,flags) spin_lock_irqsave(((struct Scsi_Host *)dev)->host_lock, flags) #define DC395x_UNLOCK_IO(dev,flags) spin_unlock_irqrestore(((struct Scsi_Host *)dev)->host_lock, flags) #define DC395x_read8(acb,address) (u8)(inb(acb->io_port_base + (address))) #define DC395x_read16(acb,address) (u16)(inw(acb->io_port_base + (address))) #define DC395x_read32(acb,address) (u32)(inl(acb->io_port_base + (address))) #define DC395x_write8(acb,address,value) outb((value), acb->io_port_base + (address)) #define DC395x_write16(acb,address,value) outw((value), acb->io_port_base + (address)) #define DC395x_write32(acb,address,value) outl((value), acb->io_port_base + (address)) #define TAG_NONE 255 /* * srb->segement_x is the hw sg list. It is always allocated as a * DC395x_MAX_SG_LISTENTRY entries in a linear block which does not * cross a page boundy. */ #define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY) struct SGentry { u32 address; /* bus! address */ u32 length; }; /* The SEEPROM structure for TRM_S1040 */ struct NVRamTarget { u8 cfg0; /* Target configuration byte 0 */ u8 period; /* Target period */ u8 cfg2; /* Target configuration byte 2 */ u8 cfg3; /* Target configuration byte 3 */ }; struct NvRamType { u8 sub_vendor_id[2]; /* 0,1 Sub Vendor ID */ u8 sub_sys_id[2]; /* 2,3 Sub System ID */ u8 sub_class; /* 4 Sub Class */ u8 vendor_id[2]; /* 5,6 Vendor ID */ u8 device_id[2]; /* 7,8 Device ID */ u8 reserved; /* 9 Reserved */ struct NVRamTarget target[DC395x_MAX_SCSI_ID]; /** 10,11,12,13 ** 14,15,16,17 ** .... ** .... ** 70,71,72,73 */ u8 scsi_id; /* 74 Host Adapter SCSI ID */ u8 channel_cfg; /* 75 Channel configuration */ u8 delay_time; /* 76 Power on delay time */ u8 max_tag; /* 77 Maximum tags */ u8 reserved0; /* 78 */ u8 boot_target; /* 79 */ u8 boot_lun; /* 80 */ u8 reserved1; /* 81 */ u16 reserved2[22]; /* 82,..125 */ u16 cksum; /* 126,127 */ }; struct ScsiReqBlk { struct list_head list; /* next/prev ptrs for srb lists */ struct DeviceCtlBlk *dcb; struct scsi_cmnd *cmd; struct SGentry *segment_x; /* Linear array of hw sg entries (up to 64 entries) */ dma_addr_t sg_bus_addr; /* Bus address of sg list (ie, of segment_x) */ u8 sg_count; /* No of HW sg entries for this request */ u8 sg_index; /* Index of HW sg entry for this request */ size_t total_xfer_length; /* Total number of bytes remaining to be transferred */ size_t request_length; /* Total number of bytes in this request */ /* * The sense buffer handling function, request_sense, uses * the first hw sg entry (segment_x[0]) and the transfer * length (total_xfer_length). While doing this it stores the * original values into the last sg hw list * (srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1] and the * total_xfer_length in xferred. These values are restored in * pci_unmap_srb_sense. This is the only place xferred is used. */ size_t xferred; /* Saved copy of total_xfer_length */ u16 state; u8 msgin_buf[6]; u8 msgout_buf[6]; u8 adapter_status; u8 target_status; u8 msg_count; u8 end_message; u8 tag_number; u8 status; u8 retry_count; u8 flag; u8 scsi_phase; }; struct DeviceCtlBlk { struct list_head list; /* next/prev ptrs for the dcb list */ struct AdapterCtlBlk *acb; struct list_head srb_going_list; /* head of going srb list */ struct list_head srb_waiting_list; /* head of waiting srb list */ struct ScsiReqBlk *active_srb; u32 tag_mask; u16 max_command; u8 target_id; /* SCSI Target ID (SCSI Only) */ u8 target_lun; /* SCSI Log. Unit (SCSI Only) */ u8 identify_msg; u8 dev_mode; u8 inquiry7; /* To store Inquiry flags */ u8 sync_mode; /* 0:async mode */ u8 min_nego_period; /* for nego. */ u8 sync_period; /* for reg. */ u8 sync_offset; /* for reg. and nego.(low nibble) */ u8 flag; u8 dev_type; u8 init_tcq_flag; }; struct AdapterCtlBlk { struct Scsi_Host *scsi_host; unsigned long io_port_base; unsigned long io_port_len; struct list_head dcb_list; /* head of going dcb list */ struct DeviceCtlBlk *dcb_run_robin; struct DeviceCtlBlk *active_dcb; struct list_head srb_free_list; /* head of free srb list */ struct ScsiReqBlk *tmp_srb; struct timer_list waiting_timer; struct timer_list selto_timer; unsigned long last_reset; u16 srb_count; u8 sel_timeout; unsigned int irq_level; u8 tag_max_num; u8 acb_flag; u8 gmode2; u8 config; u8 lun_chk; u8 scan_devices; u8 hostid_bit; u8 dcb_map[DC395x_MAX_SCSI_ID]; struct DeviceCtlBlk *children[DC395x_MAX_SCSI_ID][32]; struct pci_dev *dev; u8 msg_len; struct ScsiReqBlk srb_array[DC395x_MAX_SRB_CNT]; struct ScsiReqBlk srb; struct NvRamType eeprom; /* eeprom settings for this adapter */ }; /*--------------------------------------------------------------------------- Forward declarations ---------------------------------------------------------------------------*/ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status); static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status); static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status); static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status); static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status); static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status); static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status); static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status); static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status); static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status); static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status); static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status); static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status); static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status); static void set_basic_config(struct AdapterCtlBlk *acb); static void cleanup_after_transfer(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb); static void reset_scsi_bus(struct AdapterCtlBlk *acb); static void data_io_transfer(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 io_dir); static void disconnect(struct AdapterCtlBlk *acb); static void reselect(struct AdapterCtlBlk *acb); static u8 start_scsi(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb); static inline void enable_msgout_abort(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb); static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb); static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_code, struct scsi_cmnd *cmd, u8 force); static void scsi_reset_detect(struct AdapterCtlBlk *acb); static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb); static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb); static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb); static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb); static void set_xfer_rate(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb); static void waiting_timeout(struct timer_list *t); /*--------------------------------------------------------------------------- Static Data ---------------------------------------------------------------------------*/ static u16 current_sync_offset = 0; static void *dc395x_scsi_phase0[] = { data_out_phase0,/* phase:0 */ data_in_phase0, /* phase:1 */ command_phase0, /* phase:2 */ status_phase0, /* phase:3 */ nop0, /* phase:4 PH_BUS_FREE .. initial phase */ nop0, /* phase:5 PH_BUS_FREE .. initial phase */ msgout_phase0, /* phase:6 */ msgin_phase0, /* phase:7 */ }; static void *dc395x_scsi_phase1[] = { data_out_phase1,/* phase:0 */ data_in_phase1, /* phase:1 */ command_phase1, /* phase:2 */ status_phase1, /* phase:3 */ nop1, /* phase:4 PH_BUS_FREE .. initial phase */ nop1, /* phase:5 PH_BUS_FREE .. initial phase */ msgout_phase1, /* phase:6 */ msgin_phase1, /* phase:7 */ }; /* *Fast20: 000 50ns, 20.0 MHz * 001 75ns, 13.3 MHz * 010 100ns, 10.0 MHz * 011 125ns, 8.0 MHz * 100 150ns, 6.6 MHz * 101 175ns, 5.7 MHz * 110 200ns, 5.0 MHz * 111 250ns, 4.0 MHz * *Fast40(LVDS): 000 25ns, 40.0 MHz * 001 50ns, 20.0 MHz * 010 75ns, 13.3 MHz * 011 100ns, 10.0 MHz * 100 125ns, 8.0 MHz * 101 150ns, 6.6 MHz * 110 175ns, 5.7 MHz * 111 200ns, 5.0 MHz */ /*static u8 clock_period[] = {12,19,25,31,37,44,50,62};*/ /* real period:48ns,76ns,100ns,124ns,148ns,176ns,200ns,248ns */ static u8 clock_period[] = { 12, 18, 25, 31, 37, 43, 50, 62 }; static u16 clock_speed[] = { 200, 133, 100, 80, 67, 58, 50, 40 }; /*--------------------------------------------------------------------------- Configuration ---------------------------------------------------------------------------*/ /* * Module/boot parameters currently effect *all* instances of the * card in the system. */ /* * Command line parameters are stored in a structure below. * These are the index's into the structure for the various * command line options. */ #define CFG_ADAPTER_ID 0 #define CFG_MAX_SPEED 1 #define CFG_DEV_MODE 2 #define CFG_ADAPTER_MODE 3 #define CFG_TAGS 4 #define CFG_RESET_DELAY 5 #define CFG_NUM 6 /* number of configuration items */ /* * Value used to indicate that a command line override * hasn't been used to modify the value. */ #define CFG_PARAM_UNSET -1 /* * Hold command line parameters. */ struct ParameterData { int value; /* value of this setting */ int min; /* minimum value */ int max; /* maximum value */ int def; /* default value */ int safe; /* safe value */ }; static struct ParameterData cfg_data[] = { { /* adapter id */ CFG_PARAM_UNSET, 0, 15, 7, 7 }, { /* max speed */ CFG_PARAM_UNSET, 0, 7, 1, /* 13.3Mhz */ 4, /* 6.7Hmz */ }, { /* dev mode */ CFG_PARAM_UNSET, 0, 0x3f, NTC_DO_PARITY_CHK | NTC_DO_DISCONNECT | NTC_DO_SYNC_NEGO | NTC_DO_WIDE_NEGO | NTC_DO_TAG_QUEUEING | NTC_DO_SEND_START, NTC_DO_PARITY_CHK | NTC_DO_SEND_START }, { /* adapter mode */ CFG_PARAM_UNSET, 0, 0x2f, NAC_SCANLUN | NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET /*| NAC_ACTIVE_NEG*/, NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET | 0x08 }, { /* tags */ CFG_PARAM_UNSET, 0, 5, 3, /* 16 tags (??) */ 2, }, { /* reset delay */ CFG_PARAM_UNSET, 0, 180, 1, /* 1 second */ 10, /* 10 seconds */ } }; /* * Safe settings. If set to zero the BIOS/default values with * command line overrides will be used. If set to 1 then safe and * slow settings will be used. */ static bool use_safe_settings = 0; module_param_named(safe, use_safe_settings, bool, 0); MODULE_PARM_DESC(safe, "Use safe and slow settings only. Default: false"); module_param_named(adapter_id, cfg_data[CFG_ADAPTER_ID].value, int, 0); MODULE_PARM_DESC(adapter_id, "Adapter SCSI ID. Default 7 (0-15)"); module_param_named(max_speed, cfg_data[CFG_MAX_SPEED].value, int, 0); MODULE_PARM_DESC(max_speed, "Maximum bus speed. Default 1 (0-7) Speeds: 0=20, 1=13.3, 2=10, 3=8, 4=6.7, 5=5.8, 6=5, 7=4 Mhz"); module_param_named(dev_mode, cfg_data[CFG_DEV_MODE].value, int, 0); MODULE_PARM_DESC(dev_mode, "Device mode."); module_param_named(adapter_mode, cfg_data[CFG_ADAPTER_MODE].value, int, 0); MODULE_PARM_DESC(adapter_mode, "Adapter mode."); module_param_named(tags, cfg_data[CFG_TAGS].value, int, 0); MODULE_PARM_DESC(tags, "Number of tags (1<<x). Default 3 (0-5)"); module_param_named(reset_delay, cfg_data[CFG_RESET_DELAY].value, int, 0); MODULE_PARM_DESC(reset_delay, "Reset delay in seconds. Default 1 (0-180)"); /** * set_safe_settings - if the use_safe_settings option is set then * set all values to the safe and slow values. **/ static void set_safe_settings(void) { if (use_safe_settings) { int i; dprintkl(KERN_INFO, "Using safe settings.\n"); for (i = 0; i < CFG_NUM; i++) { cfg_data[i].value = cfg_data[i].safe; } } } /** * fix_settings - reset any boot parameters which are out of range * back to the default values. **/ static void fix_settings(void) { int i; dprintkdbg(DBG_1, "setup: AdapterId=%08x MaxSpeed=%08x DevMode=%08x " "AdapterMode=%08x Tags=%08x ResetDelay=%08x\n", cfg_data[CFG_ADAPTER_ID].value, cfg_data[CFG_MAX_SPEED].value, cfg_data[CFG_DEV_MODE].value, cfg_data[CFG_ADAPTER_MODE].value, cfg_data[CFG_TAGS].value, cfg_data[CFG_RESET_DELAY].value); for (i = 0; i < CFG_NUM; i++) { if (cfg_data[i].value < cfg_data[i].min || cfg_data[i].value > cfg_data[i].max) cfg_data[i].value = cfg_data[i].def; } } /* * Mapping from the eeprom delay index value (index into this array) * to the number of actual seconds that the delay should be for. */ static char eeprom_index_to_delay_map[] = { 1, 3, 5, 10, 16, 30, 60, 120 }; /** * eeprom_index_to_delay - Take the eeprom delay setting and convert it * into a number of seconds. * * @eeprom: The eeprom structure in which we find the delay index to map. **/ static void eeprom_index_to_delay(struct NvRamType *eeprom) { eeprom->delay_time = eeprom_index_to_delay_map[eeprom->delay_time]; } /** * delay_to_eeprom_index - Take a delay in seconds and return the * closest eeprom index which will delay for at least that amount of * seconds. * * @delay: The delay, in seconds, to find the eeprom index for. **/ static int delay_to_eeprom_index(int delay) { u8 idx = 0; while (idx < 7 && eeprom_index_to_delay_map[idx] < delay) idx++; return idx; } /** * eeprom_override - Override the eeprom settings, in the provided * eeprom structure, with values that have been set on the command * line. * * @eeprom: The eeprom data to override with command line options. **/ static void eeprom_override(struct NvRamType *eeprom) { u8 id; /* Adapter Settings */ if (cfg_data[CFG_ADAPTER_ID].value != CFG_PARAM_UNSET) eeprom->scsi_id = (u8)cfg_data[CFG_ADAPTER_ID].value; if (cfg_data[CFG_ADAPTER_MODE].value != CFG_PARAM_UNSET) eeprom->channel_cfg = (u8)cfg_data[CFG_ADAPTER_MODE].value; if (cfg_data[CFG_RESET_DELAY].value != CFG_PARAM_UNSET) eeprom->delay_time = delay_to_eeprom_index( cfg_data[CFG_RESET_DELAY].value); if (cfg_data[CFG_TAGS].value != CFG_PARAM_UNSET) eeprom->max_tag = (u8)cfg_data[CFG_TAGS].value; /* Device Settings */ for (id = 0; id < DC395x_MAX_SCSI_ID; id++) { if (cfg_data[CFG_DEV_MODE].value != CFG_PARAM_UNSET) eeprom->target[id].cfg0 = (u8)cfg_data[CFG_DEV_MODE].value; if (cfg_data[CFG_MAX_SPEED].value != CFG_PARAM_UNSET) eeprom->target[id].period = (u8)cfg_data[CFG_MAX_SPEED].value; } } /*--------------------------------------------------------------------------- ---------------------------------------------------------------------------*/ static unsigned int list_size(struct list_head *head) { unsigned int count = 0; struct list_head *pos; list_for_each(pos, head) count++; return count; } static struct DeviceCtlBlk *dcb_get_next(struct list_head *head, struct DeviceCtlBlk *pos) { int use_next = 0; struct DeviceCtlBlk* next = NULL; struct DeviceCtlBlk* i; if (list_empty(head)) return NULL; /* find supplied dcb and then select the next one */ list_for_each_entry(i, head, list) if (use_next) { next = i; break; } else if (i == pos) { use_next = 1; } /* if no next one take the head one (ie, wraparound) */ if (!next) list_for_each_entry(i, head, list) { next = i; break; } return next; } static void free_tag(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) { if (srb->tag_number < 255) { dcb->tag_mask &= ~(1 << srb->tag_number); /* free tag mask */ srb->tag_number = 255; } } /* Find cmd in SRB list */ static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd, struct list_head *head) { struct ScsiReqBlk *i; list_for_each_entry(i, head, list) if (i->cmd == cmd) return i; return NULL; } /* Sets the timer to wake us up */ static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to) { if (timer_pending(&acb->waiting_timer)) return; if (time_before(jiffies + to, acb->last_reset - HZ / 2)) acb->waiting_timer.expires = acb->last_reset - HZ / 2 + 1; else acb->waiting_timer.expires = jiffies + to + 1; add_timer(&acb->waiting_timer); } /* Send the next command from the waiting list to the bus */ static void waiting_process_next(struct AdapterCtlBlk *acb) { struct DeviceCtlBlk *start = NULL; struct DeviceCtlBlk *pos; struct DeviceCtlBlk *dcb; struct ScsiReqBlk *srb; struct list_head *dcb_list_head = &acb->dcb_list; if (acb->active_dcb || (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) return; if (timer_pending(&acb->waiting_timer)) del_timer(&acb->waiting_timer); if (list_empty(dcb_list_head)) return; /* * Find the starting dcb. Need to find it again in the list * since the list may have changed since we set the ptr to it */ list_for_each_entry(dcb, dcb_list_head, list) if (dcb == acb->dcb_run_robin) { start = dcb; break; } if (!start) { /* This can happen! */ start = list_entry(dcb_list_head->next, typeof(*start), list); acb->dcb_run_robin = start; } /* * Loop over the dcb, but we start somewhere (potentially) in * the middle of the loop so we need to manully do this. */ pos = start; do { struct list_head *waiting_list_head = &pos->srb_waiting_list; /* Make sure, the next another device gets scheduled ... */ acb->dcb_run_robin = dcb_get_next(dcb_list_head, acb->dcb_run_robin); if (list_empty(waiting_list_head) || pos->max_command <= list_size(&pos->srb_going_list)) { /* move to next dcb */ pos = dcb_get_next(dcb_list_head, pos); } else { srb = list_entry(waiting_list_head->next, struct ScsiReqBlk, list); /* Try to send to the bus */ if (!start_scsi(acb, pos, srb)) list_move(&srb->list, &pos->srb_going_list); else waiting_set_timer(acb, HZ/50); break; } } while (pos != start); } /* Wake up waiting queue */ static void waiting_timeout(struct timer_list *t) { unsigned long flags; struct AdapterCtlBlk *acb = from_timer(acb, t, waiting_timer); dprintkdbg(DBG_1, "waiting_timeout: Queue woken up by timer. acb=%p\n", acb); DC395x_LOCK_IO(acb->scsi_host, flags); waiting_process_next(acb); DC395x_UNLOCK_IO(acb->scsi_host, flags); } /* Get the DCB for a given ID/LUN combination */ static struct DeviceCtlBlk *find_dcb(struct AdapterCtlBlk *acb, u8 id, u8 lun) { return acb->children[id][lun]; } /* Send SCSI Request Block (srb) to adapter (acb) */ static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) { struct DeviceCtlBlk *dcb = srb->dcb; if (dcb->max_command <= list_size(&dcb->srb_going_list) || acb->active_dcb || (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) { list_add_tail(&srb->list, &dcb->srb_waiting_list); waiting_process_next(acb); return; } if (!start_scsi(acb, dcb, srb)) { list_add_tail(&srb->list, &dcb->srb_going_list); } else { list_add(&srb->list, &dcb->srb_waiting_list); waiting_set_timer(acb, HZ / 50); } } /* Prepare SRB for being sent to Device DCB w/ command *cmd */ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) { int nseg; enum dma_data_direction dir = cmd->sc_data_direction; dprintkdbg(DBG_0, "build_srb: (0x%p) <%02i-%i>\n", cmd, dcb->target_id, dcb->target_lun); srb->dcb = dcb; srb->cmd = cmd; srb->sg_count = 0; srb->total_xfer_length = 0; srb->sg_bus_addr = 0; srb->sg_index = 0; srb->adapter_status = 0; srb->target_status = 0; srb->msg_count = 0; srb->status = 0; srb->flag = 0; srb->state = 0; srb->retry_count = 0; srb->tag_number = TAG_NONE; srb->scsi_phase = PH_BUS_FREE; /* initial phase */ srb->end_message = 0; nseg = scsi_dma_map(cmd); BUG_ON(nseg < 0); if (dir == DMA_NONE || !nseg) { dprintkdbg(DBG_0, "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n", cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd), srb->segment_x[0].address); } else { int i; u32 reqlen = scsi_bufflen(cmd); struct scatterlist *sg; struct SGentry *sgp = srb->segment_x; srb->sg_count = nseg; dprintkdbg(DBG_0, "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n", reqlen, scsi_sglist(cmd), scsi_sg_count(cmd), srb->sg_count); scsi_for_each_sg(cmd, sg, srb->sg_count, i) { u32 busaddr = (u32)sg_dma_address(sg); u32 seglen = (u32)sg->length; sgp[i].address = busaddr; sgp[i].length = seglen; srb->total_xfer_length += seglen; } sgp += srb->sg_count - 1; /* * adjust last page if too big as it is allocated * on even page boundaries */ if (srb->total_xfer_length > reqlen) { sgp->length -= (srb->total_xfer_length - reqlen); srb->total_xfer_length = reqlen; } /* Fixup for WIDE padding - make sure length is even */ if (dcb->sync_period & WIDE_SYNC && srb->total_xfer_length % 2) { srb->total_xfer_length++; sgp->length++; } srb->sg_bus_addr = dma_map_single(&dcb->acb->dev->dev, srb->segment_x, SEGMENTX_LEN, DMA_TO_DEVICE); dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n", srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN); } srb->request_length = srb->total_xfer_length; } /** * dc395x_queue_command_lck - queue scsi command passed from the mid * layer, invoke 'done' on completion * * @cmd: pointer to scsi command object * * Returns 1 if the adapter (host) is busy, else returns 0. One * reason for an adapter to be busy is that the number * of outstanding queued commands is already equal to * struct Scsi_Host::can_queue . * * Required: if struct Scsi_Host::can_queue is ever non-zero * then this function is required. * * Locks: struct Scsi_Host::host_lock held on entry (with "irqsave") * and is expected to be held on return. * */ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd) { void (*done)(struct scsi_cmnd *) = scsi_done; struct DeviceCtlBlk *dcb; struct ScsiReqBlk *srb; struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)cmd->device->host->hostdata; dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n", cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]); /* Assume BAD_TARGET; will be cleared later */ set_host_byte(cmd, DID_BAD_TARGET); /* ignore invalid targets */ if (cmd->device->id >= acb->scsi_host->max_id || cmd->device->lun >= acb->scsi_host->max_lun || cmd->device->lun >31) { goto complete; } /* does the specified lun on the specified device exist */ if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) { dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n", cmd->device->id, (u8)cmd->device->lun); goto complete; } /* do we have a DCB for the device */ dcb = find_dcb(acb, cmd->device->id, cmd->device->lun); if (!dcb) { /* should never happen */ dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>", cmd->device->id, (u8)cmd->device->lun); goto complete; } set_host_byte(cmd, DID_OK); set_status_byte(cmd, SAM_STAT_GOOD); srb = list_first_entry_or_null(&acb->srb_free_list, struct ScsiReqBlk, list); if (!srb) { /* * Return 1 since we are unable to queue this command at this * point in time. */ dprintkdbg(DBG_0, "queue_command: No free srb's\n"); return 1; } list_del(&srb->list); build_srb(cmd, dcb, srb); if (!list_empty(&dcb->srb_waiting_list)) { /* append to waiting queue */ list_add_tail(&srb->list, &dcb->srb_waiting_list); waiting_process_next(acb); } else { /* process immediately */ send_srb(acb, srb); } dprintkdbg(DBG_1, "queue_command: (0x%p) done\n", cmd); return 0; complete: /* * Complete the command immediatey, and then return 0 to * indicate that we have handled the command. This is usually * done when the commad is for things like non existent * devices. */ done(cmd); return 0; } static DEF_SCSI_QCMD(dc395x_queue_command) static void dump_register_info(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) { u16 pstat; struct pci_dev *dev = acb->dev; pci_read_config_word(dev, PCI_STATUS, &pstat); if (!dcb) dcb = acb->active_dcb; if (!srb && dcb) srb = dcb->active_srb; if (srb) { if (!srb->cmd) dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n", srb, srb->cmd); else dprintkl(KERN_INFO, "dump: srb=%p cmd=%p " "cmnd=0x%02x <%02i-%i>\n", srb, srb->cmd, srb->cmd->cmnd[0], srb->cmd->device->id, (u8)srb->cmd->device->lun); printk(" sglist=%p cnt=%i idx=%i len=%zu\n", srb->segment_x, srb->sg_count, srb->sg_index, srb->total_xfer_length); printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n", srb->state, srb->status, srb->scsi_phase, (acb->active_dcb) ? "" : "not"); } dprintkl(KERN_INFO, "dump: SCSI{status=0x%04x fifocnt=0x%02x " "signals=0x%02x irqstat=0x%02x sync=0x%02x target=0x%02x " "rselid=0x%02x ctr=0x%08x irqen=0x%02x config=0x%04x " "config2=0x%02x cmd=0x%02x selto=0x%02x}\n", DC395x_read16(acb, TRM_S1040_SCSI_STATUS), DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT), DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL), DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS), DC395x_read8(acb, TRM_S1040_SCSI_SYNC), DC395x_read8(acb, TRM_S1040_SCSI_TARGETID), DC395x_read8(acb, TRM_S1040_SCSI_IDMSG), DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), DC395x_read8(acb, TRM_S1040_SCSI_INTEN), DC395x_read16(acb, TRM_S1040_SCSI_CONFIG0), DC395x_read8(acb, TRM_S1040_SCSI_CONFIG2), DC395x_read8(acb, TRM_S1040_SCSI_COMMAND), DC395x_read8(acb, TRM_S1040_SCSI_TIMEOUT)); dprintkl(KERN_INFO, "dump: DMA{cmd=0x%04x fifocnt=0x%02x fstat=0x%02x " "irqstat=0x%02x irqen=0x%02x cfg=0x%04x tctr=0x%08x " "ctctr=0x%08x addr=0x%08x:0x%08x}\n", DC395x_read16(acb, TRM_S1040_DMA_COMMAND), DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT), DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT), DC395x_read8(acb, TRM_S1040_DMA_STATUS), DC395x_read8(acb, TRM_S1040_DMA_INTEN), DC395x_read16(acb, TRM_S1040_DMA_CONFIG), DC395x_read32(acb, TRM_S1040_DMA_XCNT), DC395x_read32(acb, TRM_S1040_DMA_CXCNT), DC395x_read32(acb, TRM_S1040_DMA_XHIGHADDR), DC395x_read32(acb, TRM_S1040_DMA_XLOWADDR)); dprintkl(KERN_INFO, "dump: gen{gctrl=0x%02x gstat=0x%02x gtmr=0x%02x} " "pci{status=0x%04x}\n", DC395x_read8(acb, TRM_S1040_GEN_CONTROL), DC395x_read8(acb, TRM_S1040_GEN_STATUS), DC395x_read8(acb, TRM_S1040_GEN_TIMER), pstat); } static inline void clear_fifo(struct AdapterCtlBlk *acb, char *txt) { #if debug_enabled(DBG_FIFO) u8 lines = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL); u8 fifocnt = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT); if (!(fifocnt & 0x40)) dprintkdbg(DBG_FIFO, "clear_fifo: (%i bytes) on phase %02x in %s\n", fifocnt & 0x3f, lines, txt); #endif DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRFIFO); } static void reset_dev_param(struct AdapterCtlBlk *acb) { struct DeviceCtlBlk *dcb; struct NvRamType *eeprom = &acb->eeprom; dprintkdbg(DBG_0, "reset_dev_param: acb=%p\n", acb); list_for_each_entry(dcb, &acb->dcb_list, list) { u8 period_index; dcb->sync_mode &= ~(SYNC_NEGO_DONE + WIDE_NEGO_DONE); dcb->sync_period = 0; dcb->sync_offset = 0; dcb->dev_mode = eeprom->target[dcb->target_id].cfg0; period_index = eeprom->target[dcb->target_id].period & 0x07; dcb->min_nego_period = clock_period[period_index]; if (!(dcb->dev_mode & NTC_DO_WIDE_NEGO) || !(acb->config & HCC_WIDE_CARD)) dcb->sync_mode &= ~WIDE_NEGO_ENABLE; } } /* * perform a hard reset on the SCSI bus * @cmd - some command for this host (for fetching hooks) * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003). */ static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd) { struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)cmd->device->host->hostdata; dprintkl(KERN_INFO, "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n", cmd, cmd->device->id, (u8)cmd->device->lun, cmd); if (timer_pending(&acb->waiting_timer)) del_timer(&acb->waiting_timer); /* * disable interrupt */ DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00); DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00); DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE); DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE); reset_scsi_bus(acb); udelay(500); /* We may be in serious trouble. Wait some seconds */ acb->last_reset = jiffies + 3 * HZ / 2 + HZ * acb->eeprom.delay_time; /* * re-enable interrupt */ /* Clear SCSI FIFO */ DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO); clear_fifo(acb, "eh_bus_reset"); /* Delete pending IRQ */ DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS); set_basic_config(acb); reset_dev_param(acb); doing_srb_done(acb, DID_RESET, cmd, 0); acb->active_dcb = NULL; acb->acb_flag = 0; /* RESET_DETECT, RESET_DONE ,RESET_DEV */ waiting_process_next(acb); return SUCCESS; } static int dc395x_eh_bus_reset(struct scsi_cmnd *cmd) { int rc; spin_lock_irq(cmd->device->host->host_lock); rc = __dc395x_eh_bus_reset(cmd); spin_unlock_irq(cmd->device->host->host_lock); return rc; } /* * abort an errant SCSI command * @cmd - command to be aborted * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003). */ static int dc395x_eh_abort(struct scsi_cmnd *cmd) { /* * Look into our command queues: If it has not been sent already, * we remove it and return success. Otherwise fail. */ struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)cmd->device->host->hostdata; struct DeviceCtlBlk *dcb; struct ScsiReqBlk *srb; dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n", cmd, cmd->device->id, (u8)cmd->device->lun, cmd); dcb = find_dcb(acb, cmd->device->id, cmd->device->lun); if (!dcb) { dprintkl(KERN_DEBUG, "eh_abort: No such device\n"); return FAILED; } srb = find_cmd(cmd, &dcb->srb_waiting_list); if (srb) { list_del(&srb->list); pci_unmap_srb_sense(acb, srb); pci_unmap_srb(acb, srb); free_tag(dcb, srb); list_add_tail(&srb->list, &acb->srb_free_list); dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n"); set_host_byte(cmd, DID_ABORT); return SUCCESS; } srb = find_cmd(cmd, &dcb->srb_going_list); if (srb) { dprintkl(KERN_DEBUG, "eh_abort: Command in progress\n"); /* XXX: Should abort the command here */ } else { dprintkl(KERN_DEBUG, "eh_abort: Command not found\n"); } return FAILED; } /* SDTR */ static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) { u8 *ptr = srb->msgout_buf + srb->msg_count; if (srb->msg_count > 1) { dprintkl(KERN_INFO, "build_sdtr: msgout_buf BUSY (%i: %02x %02x)\n", srb->msg_count, srb->msgout_buf[0], srb->msgout_buf[1]); return; } if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) { dcb->sync_offset = 0; dcb->min_nego_period = 200 >> 2; } else if (dcb->sync_offset == 0) dcb->sync_offset = SYNC_NEGO_OFFSET; srb->msg_count += spi_populate_sync_msg(ptr, dcb->min_nego_period, dcb->sync_offset); srb->state |= SRB_DO_SYNC_NEGO; } /* WDTR */ static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) { u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) & (acb->config & HCC_WIDE_CARD)) ? 1 : 0; u8 *ptr = srb->msgout_buf + srb->msg_count; if (srb->msg_count > 1) { dprintkl(KERN_INFO, "build_wdtr: msgout_buf BUSY (%i: %02x %02x)\n", srb->msg_count, srb->msgout_buf[0], srb->msgout_buf[1]); return; } srb->msg_count += spi_populate_width_msg(ptr, wide); srb->state |= SRB_DO_WIDE_NEGO; } #if 0 /* Timer to work around chip flaw: When selecting and the bus is * busy, we sometimes miss a Selection timeout IRQ */ void selection_timeout_missed(unsigned long ptr); /* Sets the timer to wake us up */ static void selto_timer(struct AdapterCtlBlk *acb) { if (timer_pending(&acb->selto_timer)) return; acb->selto_timer.function = selection_timeout_missed; acb->selto_timer.data = (unsigned long) acb; if (time_before (jiffies + HZ, acb->last_reset + HZ / 2)) acb->selto_timer.expires = acb->last_reset + HZ / 2 + 1; else acb->selto_timer.expires = jiffies + HZ + 1; add_timer(&acb->selto_timer); } void selection_timeout_missed(unsigned long ptr) { unsigned long flags; struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr; struct ScsiReqBlk *srb; dprintkl(KERN_DEBUG, "Chip forgot to produce SelTO IRQ!\n"); if (!acb->active_dcb || !acb->active_dcb->active_srb) { dprintkl(KERN_DEBUG, "... but no cmd pending? Oops!\n"); return; } DC395x_LOCK_IO(acb->scsi_host, flags); srb = acb->active_dcb->active_srb; disconnect(acb); DC395x_UNLOCK_IO(acb->scsi_host, flags); } #endif static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, struct ScsiReqBlk* srb) { u16 __maybe_unused s_stat2, return_code; u8 s_stat, scsicommand, i, identify_message; u8 *ptr; dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n", dcb->target_id, dcb->target_lun, srb); srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */ s_stat = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL); s_stat2 = 0; s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS); #if 1 if (s_stat & 0x20 /* s_stat2 & 0x02000 */ ) { dprintkdbg(DBG_KG, "start_scsi: (0x%p) BUSY %02x %04x\n", s_stat, s_stat2); /* * Try anyway? * * We could, BUT: Sometimes the TRM_S1040 misses to produce a Selection * Timeout, a Disconnect or a Reselection IRQ, so we would be screwed! * (This is likely to be a bug in the hardware. Obviously, most people * only have one initiator per SCSI bus.) * Instead let this fail and have the timer make sure the command is * tried again after a short time */ /*selto_timer (acb); */ return 1; } #endif if (acb->active_dcb) { dprintkl(KERN_DEBUG, "start_scsi: (0x%p) Attempt to start a" "command while another command (0x%p) is active.", srb->cmd, acb->active_dcb->active_srb ? acb->active_dcb->active_srb->cmd : 0); return 1; } if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) { dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd); return 1; } /* Allow starting of SCSI commands half a second before we allow the mid-level * to queue them again after a reset */ if (time_before(jiffies, acb->last_reset - HZ / 2)) { dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n"); return 1; } /* Flush FIFO */ clear_fifo(acb, "start_scsi"); DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id); DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id); DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period); DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset); srb->scsi_phase = PH_BUS_FREE; /* initial phase */ identify_message = dcb->identify_msg; /*DC395x_TRM_write8(TRM_S1040_SCSI_IDMSG, identify_message); */ /* Don't allow disconnection for AUTO_REQSENSE: Cont.All.Cond.! */ if (srb->flag & AUTO_REQSENSE) identify_message &= 0xBF; if (((srb->cmd->cmnd[0] == INQUIRY) || (srb->cmd->cmnd[0] == REQUEST_SENSE) || (srb->flag & AUTO_REQSENSE)) && (((dcb->sync_mode & WIDE_NEGO_ENABLE) && !(dcb->sync_mode & WIDE_NEGO_DONE)) || ((dcb->sync_mode & SYNC_NEGO_ENABLE) && !(dcb->sync_mode & SYNC_NEGO_DONE))) && (dcb->target_lun == 0)) { srb->msgout_buf[0] = identify_message; srb->msg_count = 1; scsicommand = SCMD_SEL_ATNSTOP; srb->state = SRB_MSGOUT; #ifndef SYNC_FIRST if (dcb->sync_mode & WIDE_NEGO_ENABLE && dcb->inquiry7 & SCSI_INQ_WBUS16) { build_wdtr(acb, dcb, srb); goto no_cmd; } #endif if (dcb->sync_mode & SYNC_NEGO_ENABLE && dcb->inquiry7 & SCSI_INQ_SYNC) { build_sdtr(acb, dcb, srb); goto no_cmd; } if (dcb->sync_mode & WIDE_NEGO_ENABLE && dcb->inquiry7 & SCSI_INQ_WBUS16) { build_wdtr(acb, dcb, srb); goto no_cmd; } srb->msg_count = 0; } /* Send identify message */ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, identify_message); scsicommand = SCMD_SEL_ATN; srb->state = SRB_START_; #ifndef DC395x_NO_TAGQ if ((dcb->sync_mode & EN_TAG_QUEUEING) && (identify_message & 0xC0)) { /* Send Tag message */ u32 tag_mask = 1; u8 tag_number = 0; while (tag_mask & dcb->tag_mask && tag_number < dcb->max_command) { tag_mask = tag_mask << 1; tag_number++; } if (tag_number >= dcb->max_command) { dprintkl(KERN_WARNING, "start_scsi: (0x%p) " "Out of tags target=<%02i-%i>)\n", srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); srb->state = SRB_READY; DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT); return 1; } /* Send Tag id */ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SIMPLE_QUEUE_TAG); DC395x_write8(acb, TRM_S1040_SCSI_FIFO, tag_number); dcb->tag_mask |= tag_mask; srb->tag_number = tag_number; scsicommand = SCMD_SEL_ATN3; srb->state = SRB_START_; } #endif /*polling:*/ /* Send CDB ..command block ......... */ dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n", srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun, srb->cmd->cmnd[0], srb->tag_number); if (srb->flag & AUTO_REQSENSE) { DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE); DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5)); DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE); DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); } else { ptr = (u8 *)srb->cmd->cmnd; for (i = 0; i < srb->cmd->cmd_len; i++) DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++); } no_cmd: DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT | DO_DATALATCH); if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) { /* * If start_scsi return 1: * we caught an interrupt (must be reset or reselection ... ) * : Let's process it first! */ dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> Failed - busy\n", srb->cmd, dcb->target_id, dcb->target_lun); srb->state = SRB_READY; free_tag(dcb, srb); srb->msg_count = 0; return_code = 1; /* This IRQ should NOT get lost, as we did not acknowledge it */ } else { /* * If start_scsi returns 0: * we know that the SCSI processor is free */ srb->scsi_phase = PH_BUS_FREE; /* initial phase */ dcb->active_srb = srb; acb->active_dcb = dcb; return_code = 0; /* it's important for atn stop */ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH | DO_HWRESELECT); /* SCSI command */ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, scsicommand); } return return_code; } #define DC395x_ENABLE_MSGOUT \ DC395x_write16 (acb, TRM_S1040_SCSI_CONTROL, DO_SETATN); \ srb->state |= SRB_MSGOUT /* abort command */ static inline void enable_msgout_abort(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) { srb->msgout_buf[0] = ABORT; srb->msg_count = 1; DC395x_ENABLE_MSGOUT; srb->state &= ~SRB_MSGIN; srb->state |= SRB_MSGOUT; } /** * dc395x_handle_interrupt - Handle an interrupt that has been confirmed to * have been triggered for this card. * * @acb: a pointer to the adpter control block * @scsi_status: the status return when we checked the card **/ static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb, u16 scsi_status) { struct DeviceCtlBlk *dcb; struct ScsiReqBlk *srb; u16 phase; u8 scsi_intstatus; unsigned long flags; void (*dc395x_statev)(struct AdapterCtlBlk *, struct ScsiReqBlk *, u16 *); DC395x_LOCK_IO(acb->scsi_host, flags); /* This acknowledges the IRQ */ scsi_intstatus = DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS); if ((scsi_status & 0x2007) == 0x2002) dprintkl(KERN_DEBUG, "COP after COP completed? %04x\n", scsi_status); if (debug_enabled(DBG_KG)) { if (scsi_intstatus & INT_SELTIMEOUT) dprintkdbg(DBG_KG, "handle_interrupt: Selection timeout\n"); } /*dprintkl(KERN_DEBUG, "handle_interrupt: intstatus = 0x%02x ", scsi_intstatus); */ if (timer_pending(&acb->selto_timer)) del_timer(&acb->selto_timer); if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) { disconnect(acb); /* bus free interrupt */ goto out_unlock; } if (scsi_intstatus & INT_RESELECTED) { reselect(acb); goto out_unlock; } if (scsi_intstatus & INT_SELECT) { dprintkl(KERN_INFO, "Host does not support target mode!\n"); goto out_unlock; } if (scsi_intstatus & INT_SCSIRESET) { scsi_reset_detect(acb); goto out_unlock; } if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) { dcb = acb->active_dcb; if (!dcb) { dprintkl(KERN_DEBUG, "Oops: BusService (%04x %02x) w/o ActiveDCB!\n", scsi_status, scsi_intstatus); goto out_unlock; } srb = dcb->active_srb; if (dcb->flag & ABORT_DEV_) { dprintkdbg(DBG_0, "MsgOut Abort Device.....\n"); enable_msgout_abort(acb, srb); } /* software sequential machine */ phase = (u16)srb->scsi_phase; /* * 62037 or 62137 * call dc395x_scsi_phase0[]... "phase entry" * handle every phase before start transfer */ /* data_out_phase0, phase:0 */ /* data_in_phase0, phase:1 */ /* command_phase0, phase:2 */ /* status_phase0, phase:3 */ /* nop0, phase:4 PH_BUS_FREE .. initial phase */ /* nop0, phase:5 PH_BUS_FREE .. initial phase */ /* msgout_phase0, phase:6 */ /* msgin_phase0, phase:7 */ dc395x_statev = dc395x_scsi_phase0[phase]; dc395x_statev(acb, srb, &scsi_status); /* * if there were any exception occurred scsi_status * will be modify to bus free phase new scsi_status * transfer out from ... previous dc395x_statev */ srb->scsi_phase = scsi_status & PHASEMASK; phase = (u16)scsi_status & PHASEMASK; /* * call dc395x_scsi_phase1[]... "phase entry" handle * every phase to do transfer */ /* data_out_phase1, phase:0 */ /* data_in_phase1, phase:1 */ /* command_phase1, phase:2 */ /* status_phase1, phase:3 */ /* nop1, phase:4 PH_BUS_FREE .. initial phase */ /* nop1, phase:5 PH_BUS_FREE .. initial phase */ /* msgout_phase1, phase:6 */ /* msgin_phase1, phase:7 */ dc395x_statev = dc395x_scsi_phase1[phase]; dc395x_statev(acb, srb, &scsi_status); } out_unlock: DC395x_UNLOCK_IO(acb->scsi_host, flags); } static irqreturn_t dc395x_interrupt(int irq, void *dev_id) { struct AdapterCtlBlk *acb = dev_id; u16 scsi_status; u8 dma_status; irqreturn_t handled = IRQ_NONE; /* * Check for pending interrupt */ scsi_status = DC395x_read16(acb, TRM_S1040_SCSI_STATUS); dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS); if (scsi_status & SCSIINTERRUPT) { /* interrupt pending - let's process it! */ dc395x_handle_interrupt(acb, scsi_status); handled = IRQ_HANDLED; } else if (dma_status & 0x20) { /* Error from the DMA engine */ dprintkl(KERN_INFO, "Interrupt from DMA engine: 0x%02x!\n", dma_status); #if 0 dprintkl(KERN_INFO, "This means DMA error! Try to handle ...\n"); if (acb->active_dcb) { acb->active_dcb-> flag |= ABORT_DEV_; if (acb->active_dcb->active_srb) enable_msgout_abort(acb, acb->active_dcb->active_srb); } DC395x_write8(acb, TRM_S1040_DMA_CONTROL, ABORTXFER | CLRXFIFO); #else dprintkl(KERN_INFO, "Ignoring DMA error (probably a bad thing) ...\n"); acb = NULL; #endif handled = IRQ_HANDLED; } return handled; } static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { dprintkdbg(DBG_0, "msgout_phase0: (0x%p)\n", srb->cmd); if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) *pscsi_status = PH_BUS_FREE; /*.. initial phase */ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ srb->state &= ~SRB_MSGOUT; } static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { u16 i; u8 *ptr; dprintkdbg(DBG_0, "msgout_phase1: (0x%p)\n", srb->cmd); clear_fifo(acb, "msgout_phase1"); if (!(srb->state & SRB_MSGOUT)) { srb->state |= SRB_MSGOUT; dprintkl(KERN_DEBUG, "msgout_phase1: (0x%p) Phase unexpected\n", srb->cmd); /* So what ? */ } if (!srb->msg_count) { dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n", srb->cmd); DC395x_write8(acb, TRM_S1040_SCSI_FIFO, NOP); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT); return; } ptr = (u8 *)srb->msgout_buf; for (i = 0; i < srb->msg_count; i++) DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++); srb->msg_count = 0; if (srb->msgout_buf[0] == ABORT_TASK_SET) srb->state = SRB_ABORT_SENT; DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT); } static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { dprintkdbg(DBG_0, "command_phase0: (0x%p)\n", srb->cmd); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); } static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { struct DeviceCtlBlk *dcb; u8 *ptr; u16 i; dprintkdbg(DBG_0, "command_phase1: (0x%p)\n", srb->cmd); clear_fifo(acb, "command_phase1"); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN); if (!(srb->flag & AUTO_REQSENSE)) { ptr = (u8 *)srb->cmd->cmnd; for (i = 0; i < srb->cmd->cmd_len; i++) { DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr); ptr++; } } else { DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE); dcb = acb->active_dcb; /* target id */ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5)); DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE); DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); } srb->state |= SRB_COMMAND; /* it's important for atn stop */ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* SCSI command */ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT); } /* * Verify that the remaining space in the hw sg lists is the same as * the count of remaining bytes in srb->total_xfer_length */ static void sg_verify_length(struct ScsiReqBlk *srb) { if (debug_enabled(DBG_SG)) { unsigned len = 0; unsigned idx = srb->sg_index; struct SGentry *psge = srb->segment_x + idx; for (; idx < srb->sg_count; psge++, idx++) len += psge->length; if (len != srb->total_xfer_length) dprintkdbg(DBG_SG, "Inconsistent SRB S/G lengths (Tot=%i, Count=%i) !!\n", srb->total_xfer_length, len); } } /* * Compute the next Scatter Gather list index and adjust its length * and address if necessary */ static void sg_update_list(struct ScsiReqBlk *srb, u32 left) { u8 idx; u32 xferred = srb->total_xfer_length - left; /* bytes transferred */ struct SGentry *psge = srb->segment_x + srb->sg_index; dprintkdbg(DBG_0, "sg_update_list: Transferred %i of %i bytes, %i remain\n", xferred, srb->total_xfer_length, left); if (xferred == 0) { /* nothing to update since we did not transfer any data */ return; } sg_verify_length(srb); srb->total_xfer_length = left; /* update remaining count */ for (idx = srb->sg_index; idx < srb->sg_count; idx++) { if (xferred >= psge->length) { /* Complete SG entries done */ xferred -= psge->length; } else { /* Partial SG entry done */ dma_sync_single_for_cpu(&srb->dcb->acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN, DMA_TO_DEVICE); psge->length -= xferred; psge->address += xferred; srb->sg_index = idx; dma_sync_single_for_device(&srb->dcb->acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN, DMA_TO_DEVICE); break; } psge++; } sg_verify_length(srb); } /* * We have transferred a single byte (PIO mode?) and need to update * the count of bytes remaining (total_xfer_length) and update the sg * entry to either point to next byte in the current sg entry, or of * already at the end to point to the start of the next sg entry */ static void sg_subtract_one(struct ScsiReqBlk *srb) { sg_update_list(srb, srb->total_xfer_length - 1); } /* * cleanup_after_transfer * * Makes sure, DMA and SCSI engine are empty, after the transfer has finished * KG: Currently called from StatusPhase1 () * Should probably also be called from other places * Best might be to call it in DataXXPhase0, if new phase will differ */ static void cleanup_after_transfer(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) { /*DC395x_write8 (TRM_S1040_DMA_STATUS, FORCEDMACOMP); */ if (DC395x_read16(acb, TRM_S1040_DMA_COMMAND) & 0x0001) { /* read */ if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40)) clear_fifo(acb, "cleanup/in"); if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO); } else { /* write */ if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO); if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40)) clear_fifo(acb, "cleanup/out"); } DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); } /* * Those no of bytes will be transferred w/ PIO through the SCSI FIFO * Seems to be needed for unknown reasons; could be a hardware bug :-( */ #define DC395x_LASTPIO 4 static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { struct DeviceCtlBlk *dcb = srb->dcb; u16 scsi_status = *pscsi_status; u32 d_left_counter = 0; dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n", srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); /* * KG: We need to drain the buffers before we draw any conclusions! * This means telling the DMA to push the rest into SCSI, telling * SCSI to push the rest to the bus. * However, the device might have been the one to stop us (phase * change), and the data in transit just needs to be accounted so * it can be retransmitted.) */ /* * KG: Stop DMA engine pushing more data into the SCSI FIFO * If we need more data, the DMA SG list will be freshly set up, anyway */ dprintkdbg(DBG_PIO, "data_out_phase0: " "DMA{fifocnt=0x%02x fifostat=0x%02x} " "SCSI{fifocnt=0x%02x cnt=0x%06x status=0x%04x} total=0x%06x\n", DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT), DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT), DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT), DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), scsi_status, srb->total_xfer_length); DC395x_write8(acb, TRM_S1040_DMA_CONTROL, STOPDMAXFER | CLRXFIFO); if (!(srb->state & SRB_XFERPAD)) { if (scsi_status & PARITYERROR) srb->status |= PARITY_ERROR; /* * KG: Right, we can't just rely on the SCSI_COUNTER, because this * is the no of bytes it got from the DMA engine not the no it * transferred successfully to the device. (And the difference could * be as much as the FIFO size, I guess ...) */ if (!(scsi_status & SCSIXFERDONE)) { /* * when data transfer from DMA FIFO to SCSI FIFO * if there was some data left in SCSI FIFO */ d_left_counter = (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x1F); if (dcb->sync_period & WIDE_SYNC) d_left_counter <<= 1; dprintkdbg(DBG_KG, "data_out_phase0: FIFO contains %i %s\n" "SCSI{fifocnt=0x%02x cnt=0x%08x} " "DMA{fifocnt=0x%04x cnt=0x%02x ctr=0x%08x}\n", DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT), (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes", DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT), DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT), DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT), DC395x_read32(acb, TRM_S1040_DMA_CXCNT)); } /* * calculate all the residue data that not yet tranfered * SCSI transfer counter + left in SCSI FIFO data * * .....TRM_S1040_SCSI_COUNTER (24bits) * The counter always decrement by one for every SCSI byte transfer. * .....TRM_S1040_SCSI_FIFOCNT ( 5bits) * The counter is SCSI FIFO offset counter (in units of bytes or! words) */ if (srb->total_xfer_length > DC395x_LASTPIO) d_left_counter += DC395x_read32(acb, TRM_S1040_SCSI_COUNTER); /* Is this a good idea? */ /*clear_fifo(acb, "DOP1"); */ /* KG: What is this supposed to be useful for? WIDE padding stuff? */ if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC && scsi_bufflen(srb->cmd) % 2) { d_left_counter = 0; dprintkl(KERN_INFO, "data_out_phase0: Discard 1 byte (0x%02x)\n", scsi_status); } /* * KG: Oops again. Same thinko as above: The SCSI might have been * faster than the DMA engine, so that it ran out of data. * In that case, we have to do just nothing! * But: Why the interrupt: No phase change. No XFERCNT_2_ZERO. Or? */ /* * KG: This is nonsense: We have been WRITING data to the bus * If the SCSI engine has no bytes left, how should the DMA engine? */ if (d_left_counter == 0) { srb->total_xfer_length = 0; } else { /* * if transfer not yet complete * there were some data residue in SCSI FIFO or * SCSI transfer counter not empty */ long oldxferred = srb->total_xfer_length - d_left_counter; const int diff = (dcb->sync_period & WIDE_SYNC) ? 2 : 1; sg_update_list(srb, d_left_counter); /* KG: Most ugly hack! Apparently, this works around a chip bug */ if ((srb->segment_x[srb->sg_index].length == diff && scsi_sg_count(srb->cmd)) || ((oldxferred & ~PAGE_MASK) == (PAGE_SIZE - diff)) ) { dprintkl(KERN_INFO, "data_out_phase0: " "Work around chip bug (%i)?\n", diff); d_left_counter = srb->total_xfer_length - diff; sg_update_list(srb, d_left_counter); /*srb->total_xfer_length -= diff; */ /*srb->virt_addr += diff; */ /*if (srb->cmd->use_sg) */ /* srb->sg_index++; */ } } } if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) { cleanup_after_transfer(acb, srb); } } static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n", srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); clear_fifo(acb, "data_out_phase1"); /* do prepare before transfer when data out phase */ data_io_transfer(acb, srb, XFERDATAOUT); } static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { u16 scsi_status = *pscsi_status; dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n", srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); /* * KG: DataIn is much more tricky than DataOut. When the device is finished * and switches to another phase, the SCSI engine should be finished too. * But: There might still be bytes left in its FIFO to be fetched by the DMA * engine and transferred to memory. * We should wait for the FIFOs to be emptied by that (is there any way to * enforce this?) and then stop the DMA engine, because it might think, that * there are more bytes to follow. Yes, the device might disconnect prior to * having all bytes transferred! * Also we should make sure that all data from the DMA engine buffer's really * made its way to the system memory! Some documentation on this would not * seem to be a bad idea, actually. */ if (!(srb->state & SRB_XFERPAD)) { u32 d_left_counter; unsigned int sc, fc; if (scsi_status & PARITYERROR) { dprintkl(KERN_INFO, "data_in_phase0: (0x%p) " "Parity Error\n", srb->cmd); srb->status |= PARITY_ERROR; } /* * KG: We should wait for the DMA FIFO to be empty ... * but: it would be better to wait first for the SCSI FIFO and then the * the DMA FIFO to become empty? How do we know, that the device not already * sent data to the FIFO in a MsgIn phase, eg.? */ if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) { #if 0 int ctr = 6000000; dprintkl(KERN_DEBUG, "DIP0: Wait for DMA FIFO to flush ...\n"); /*DC395x_write8 (TRM_S1040_DMA_CONTROL, STOPDMAXFER); */ /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 7); */ /*DC395x_write8 (TRM_S1040_SCSI_COMMAND, SCMD_DMA_IN); */ while (! (DC395x_read16(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80) && --ctr); if (ctr < 6000000 - 1) dprintkl(KERN_DEBUG "DIP0: Had to wait for DMA ...\n"); if (!ctr) dprintkl(KERN_ERR, "Deadlock in DIP0 waiting for DMA FIFO empty!!\n"); /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 0); */ #endif dprintkdbg(DBG_KG, "data_in_phase0: " "DMA{fifocnt=0x%02x fifostat=0x%02x}\n", DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT), DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT)); } /* Now: Check remainig data: The SCSI counters should tell us ... */ sc = DC395x_read32(acb, TRM_S1040_SCSI_COUNTER); fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT); d_left_counter = sc + ((fc & 0x1f) << ((srb->dcb->sync_period & WIDE_SYNC) ? 1 : 0)); dprintkdbg(DBG_KG, "data_in_phase0: " "SCSI{fifocnt=0x%02x%s ctr=0x%08x} " "DMA{fifocnt=0x%02x fifostat=0x%02x ctr=0x%08x} " "Remain{totxfer=%i scsi_fifo+ctr=%i}\n", fc, (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes", sc, fc, DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT), DC395x_read32(acb, TRM_S1040_DMA_CXCNT), srb->total_xfer_length, d_left_counter); #if DC395x_LASTPIO /* KG: Less than or equal to 4 bytes can not be transferred via DMA, it seems. */ if (d_left_counter && srb->total_xfer_length <= DC395x_LASTPIO) { size_t left_io = srb->total_xfer_length; /*u32 addr = (srb->segment_x[srb->sg_index].address); */ /*sg_update_list (srb, d_left_counter); */ dprintkdbg(DBG_PIO, "data_in_phase0: PIO (%i %s) " "for remaining %i bytes:", fc & 0x1f, (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes", srb->total_xfer_length); if (srb->dcb->sync_period & WIDE_SYNC) DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, CFG2_WIDEFIFO); while (left_io) { unsigned char *virt, *base = NULL; unsigned long flags = 0; size_t len = left_io; size_t offset = srb->request_length - left_io; local_irq_save(flags); /* Assumption: it's inside one page as it's at most 4 bytes and I just assume it's on a 4-byte boundary */ base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd), srb->sg_count, &offset, &len); virt = base + offset; left_io -= len; while (len) { u8 byte; byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); *virt++ = byte; if (debug_enabled(DBG_PIO)) printk(" %02x", byte); d_left_counter--; sg_subtract_one(srb); len--; fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT); if (fc == 0x40) { left_io = 0; break; } } WARN_ON((fc != 0x40) == !d_left_counter); if (fc == 0x40 && (srb->dcb->sync_period & WIDE_SYNC)) { /* Read the last byte ... */ if (srb->total_xfer_length > 0) { u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); *virt++ = byte; srb->total_xfer_length--; if (debug_enabled(DBG_PIO)) printk(" %02x", byte); } DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0); } scsi_kunmap_atomic_sg(base); local_irq_restore(flags); } /*printk(" %08x", *(u32*)(bus_to_virt (addr))); */ /*srb->total_xfer_length = 0; */ if (debug_enabled(DBG_PIO)) printk("\n"); } #endif /* DC395x_LASTPIO */ #if 0 /* * KG: This was in DATAOUT. Does it also belong here? * Nobody seems to know what counter and fifo_cnt count exactly ... */ if (!(scsi_status & SCSIXFERDONE)) { /* * when data transfer from DMA FIFO to SCSI FIFO * if there was some data left in SCSI FIFO */ d_left_counter = (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x1F); if (srb->dcb->sync_period & WIDE_SYNC) d_left_counter <<= 1; /* * if WIDE scsi SCSI FIFOCNT unit is word !!! * so need to *= 2 * KG: Seems to be correct ... */ } #endif /* KG: This should not be needed any more! */ if (d_left_counter == 0 || (scsi_status & SCSIXFERCNT_2_ZERO)) { #if 0 int ctr = 6000000; u8 TempDMAstatus; do { TempDMAstatus = DC395x_read8(acb, TRM_S1040_DMA_STATUS); } while (!(TempDMAstatus & DMAXFERCOMP) && --ctr); if (!ctr) dprintkl(KERN_ERR, "Deadlock in DataInPhase0 waiting for DMA!!\n"); srb->total_xfer_length = 0; #endif srb->total_xfer_length = d_left_counter; } else { /* phase changed */ /* * parsing the case: * when a transfer not yet complete * but be disconnected by target * if transfer not yet complete * there were some data residue in SCSI FIFO or * SCSI transfer counter not empty */ sg_update_list(srb, d_left_counter); } } /* KG: The target may decide to disconnect: Empty FIFO before! */ if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) { cleanup_after_transfer(acb, srb); } } static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n", srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); data_io_transfer(acb, srb, XFERDATAIN); } static void data_io_transfer(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 io_dir) { struct DeviceCtlBlk *dcb = srb->dcb; u8 bval; dprintkdbg(DBG_0, "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n", srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun, ((io_dir & DMACMD_DIR) ? 'r' : 'w'), srb->total_xfer_length, srb->sg_index, srb->sg_count); if (srb == acb->tmp_srb) dprintkl(KERN_ERR, "data_io_transfer: Using tmp_srb!\n"); if (srb->sg_index >= srb->sg_count) { /* can't happen? out of bounds error */ return; } if (srb->total_xfer_length > DC395x_LASTPIO) { u8 dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS); /* * KG: What should we do: Use SCSI Cmd 0x90/0x92? * Maybe, even ABORTXFER would be appropriate */ if (dma_status & XFERPENDING) { dprintkl(KERN_DEBUG, "data_io_transfer: Xfer pending! " "Expect trouble!\n"); dump_register_info(acb, dcb, srb); DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO); } /* clear_fifo(acb, "IO"); */ /* * load what physical address of Scatter/Gather list table * want to be transfer */ srb->state |= SRB_DATA_XFER; DC395x_write32(acb, TRM_S1040_DMA_XHIGHADDR, 0); if (scsi_sg_count(srb->cmd)) { /* with S/G */ io_dir |= DMACMD_SG; DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR, srb->sg_bus_addr + sizeof(struct SGentry) * srb->sg_index); /* load how many bytes in the sg list table */ DC395x_write32(acb, TRM_S1040_DMA_XCNT, ((u32)(srb->sg_count - srb->sg_index) << 3)); } else { /* without S/G */ io_dir &= ~DMACMD_SG; DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR, srb->segment_x[0].address); DC395x_write32(acb, TRM_S1040_DMA_XCNT, srb->segment_x[0].length); } /* load total transfer length (24bits) max value 16Mbyte */ DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, srb->total_xfer_length); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ if (io_dir & DMACMD_DIR) { /* read */ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_DMA_IN); DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir); } else { DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir); DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_DMA_OUT); } } #if DC395x_LASTPIO else if (srb->total_xfer_length > 0) { /* The last four bytes: Do PIO */ /* * load what physical address of Scatter/Gather list table * want to be transfer */ srb->state |= SRB_DATA_XFER; /* load total transfer length (24bits) max value 16Mbyte */ DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, srb->total_xfer_length); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ if (io_dir & DMACMD_DIR) { /* read */ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_IN); } else { /* write */ int ln = srb->total_xfer_length; size_t left_io = srb->total_xfer_length; if (srb->dcb->sync_period & WIDE_SYNC) DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, CFG2_WIDEFIFO); while (left_io) { unsigned char *virt, *base = NULL; unsigned long flags = 0; size_t len = left_io; size_t offset = srb->request_length - left_io; local_irq_save(flags); /* Again, max 4 bytes */ base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd), srb->sg_count, &offset, &len); virt = base + offset; left_io -= len; while (len--) { if (debug_enabled(DBG_PIO)) printk(" %02x", *virt); DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *virt++); sg_subtract_one(srb); } scsi_kunmap_atomic_sg(base); local_irq_restore(flags); } if (srb->dcb->sync_period & WIDE_SYNC) { if (ln % 2) { DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); if (debug_enabled(DBG_PIO)) printk(" |00"); } DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0); } /*DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, ln); */ if (debug_enabled(DBG_PIO)) printk("\n"); DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT); } } #endif /* DC395x_LASTPIO */ else { /* xfer pad */ if (srb->sg_count) { srb->adapter_status = H_OVER_UNDER_RUN; srb->status |= OVER_RUN; } /* * KG: despite the fact that we are using 16 bits I/O ops * the SCSI FIFO is only 8 bits according to the docs * (we can set bit 1 in 0x8f to serialize FIFO access ...) */ if (dcb->sync_period & WIDE_SYNC) { DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 2); DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, CFG2_WIDEFIFO); if (io_dir & DMACMD_DIR) { DC395x_read8(acb, TRM_S1040_SCSI_FIFO); DC395x_read8(acb, TRM_S1040_SCSI_FIFO); } else { /* Danger, Robinson: If you find KGs * scattered over the wide disk, the driver * or chip is to blame :-( */ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K'); DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'G'); } DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0); } else { DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1); /* Danger, Robinson: If you find a collection of Ks on your disk * something broke :-( */ if (io_dir & DMACMD_DIR) DC395x_read8(acb, TRM_S1040_SCSI_FIFO); else DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K'); } srb->state |= SRB_XFERPAD; DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ /* SCSI command */ bval = (io_dir & DMACMD_DIR) ? SCMD_FIFO_IN : SCMD_FIFO_OUT; DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, bval); } } static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n", srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */ srb->state = SRB_COMPLETED; *pscsi_status = PH_BUS_FREE; /*.. initial phase */ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT); } static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n", srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); srb->state = SRB_STATUS; DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP); } /* Check if the message is complete */ static inline u8 msgin_completed(u8 * msgbuf, u32 len) { if (*msgbuf == EXTENDED_MESSAGE) { if (len < 2) return 0; if (len < msgbuf[1] + 2) return 0; } else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f) /* two byte messages */ if (len < 2) return 0; return 1; } /* reject_msg */ static inline void msgin_reject(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) { srb->msgout_buf[0] = MESSAGE_REJECT; srb->msg_count = 1; DC395x_ENABLE_MSGOUT; srb->state &= ~SRB_MSGIN; srb->state |= SRB_MSGOUT; dprintkl(KERN_INFO, "msgin_reject: 0x%02x <%02i-%i>\n", srb->msgin_buf[0], srb->dcb->target_id, srb->dcb->target_lun); } static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, u8 tag) { struct ScsiReqBlk *srb = NULL; struct ScsiReqBlk *i; dprintkdbg(DBG_0, "msgin_qtag: (0x%p) tag=%i srb=%p\n", srb->cmd, tag, srb); if (!(dcb->tag_mask & (1 << tag))) dprintkl(KERN_DEBUG, "msgin_qtag: tag_mask=0x%08x does not reserve tag %i!\n", dcb->tag_mask, tag); if (list_empty(&dcb->srb_going_list)) goto mingx0; list_for_each_entry(i, &dcb->srb_going_list, list) { if (i->tag_number == tag) { srb = i; break; } } if (!srb) goto mingx0; dprintkdbg(DBG_0, "msgin_qtag: (0x%p) <%02i-%i>\n", srb->cmd, srb->dcb->target_id, srb->dcb->target_lun); if (dcb->flag & ABORT_DEV_) { /*srb->state = SRB_ABORT_SENT; */ enable_msgout_abort(acb, srb); } if (!(srb->state & SRB_DISCONNECT)) goto mingx0; memcpy(srb->msgin_buf, dcb->active_srb->msgin_buf, acb->msg_len); srb->state |= dcb->active_srb->state; srb->state |= SRB_DATA_XFER; dcb->active_srb = srb; /* How can we make the DORS happy? */ return srb; mingx0: srb = acb->tmp_srb; srb->state = SRB_UNEXPECT_RESEL; dcb->active_srb = srb; srb->msgout_buf[0] = ABORT_TASK; srb->msg_count = 1; DC395x_ENABLE_MSGOUT; dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag); return srb; } static inline void reprogram_regs(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb) { DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id); DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period); DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset); set_xfer_rate(acb, dcb); } /* set async transfer mode */ static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) { struct DeviceCtlBlk *dcb = srb->dcb; dprintkl(KERN_DEBUG, "msgin_set_async: No sync transfers <%02i-%i>\n", dcb->target_id, dcb->target_lun); dcb->sync_mode &= ~(SYNC_NEGO_ENABLE); dcb->sync_mode |= SYNC_NEGO_DONE; /*dcb->sync_period &= 0; */ dcb->sync_offset = 0; dcb->min_nego_period = 200 >> 2; /* 200ns <=> 5 MHz */ srb->state &= ~SRB_DO_SYNC_NEGO; reprogram_regs(acb, dcb); if ((dcb->sync_mode & WIDE_NEGO_ENABLE) && !(dcb->sync_mode & WIDE_NEGO_DONE)) { build_wdtr(acb, dcb, srb); DC395x_ENABLE_MSGOUT; dprintkdbg(DBG_0, "msgin_set_async(rej): Try WDTR anyway\n"); } } /* set sync transfer mode */ static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) { struct DeviceCtlBlk *dcb = srb->dcb; u8 bval; int fact; dprintkdbg(DBG_1, "msgin_set_sync: <%02i> Sync: %ins " "(%02i.%01i MHz) Offset %i\n", dcb->target_id, srb->msgin_buf[3] << 2, (250 / srb->msgin_buf[3]), ((250 % srb->msgin_buf[3]) * 10) / srb->msgin_buf[3], srb->msgin_buf[4]); if (srb->msgin_buf[4] > 15) srb->msgin_buf[4] = 15; if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) dcb->sync_offset = 0; else if (dcb->sync_offset == 0) dcb->sync_offset = srb->msgin_buf[4]; if (srb->msgin_buf[4] > dcb->sync_offset) srb->msgin_buf[4] = dcb->sync_offset; else dcb->sync_offset = srb->msgin_buf[4]; bval = 0; while (bval < 7 && (srb->msgin_buf[3] > clock_period[bval] || dcb->min_nego_period > clock_period[bval])) bval++; if (srb->msgin_buf[3] < clock_period[bval]) dprintkl(KERN_INFO, "msgin_set_sync: Increase sync nego period to %ins\n", clock_period[bval] << 2); srb->msgin_buf[3] = clock_period[bval]; dcb->sync_period &= 0xf0; dcb->sync_period |= ALT_SYNC | bval; dcb->min_nego_period = srb->msgin_buf[3]; if (dcb->sync_period & WIDE_SYNC) fact = 500; else fact = 250; dprintkl(KERN_INFO, "Target %02i: %s Sync: %ins Offset %i (%02i.%01i MB/s)\n", dcb->target_id, (fact == 500) ? "Wide16" : "", dcb->min_nego_period << 2, dcb->sync_offset, (fact / dcb->min_nego_period), ((fact % dcb->min_nego_period) * 10 + dcb->min_nego_period / 2) / dcb->min_nego_period); if (!(srb->state & SRB_DO_SYNC_NEGO)) { /* Reply with corrected SDTR Message */ dprintkl(KERN_DEBUG, "msgin_set_sync: answer w/%ins %i\n", srb->msgin_buf[3] << 2, srb->msgin_buf[4]); memcpy(srb->msgout_buf, srb->msgin_buf, 5); srb->msg_count = 5; DC395x_ENABLE_MSGOUT; dcb->sync_mode |= SYNC_NEGO_DONE; } else { if ((dcb->sync_mode & WIDE_NEGO_ENABLE) && !(dcb->sync_mode & WIDE_NEGO_DONE)) { build_wdtr(acb, dcb, srb); DC395x_ENABLE_MSGOUT; dprintkdbg(DBG_0, "msgin_set_sync: Also try WDTR\n"); } } srb->state &= ~SRB_DO_SYNC_NEGO; dcb->sync_mode |= SYNC_NEGO_DONE | SYNC_NEGO_ENABLE; reprogram_regs(acb, dcb); } static inline void msgin_set_nowide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) { struct DeviceCtlBlk *dcb = srb->dcb; dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id); dcb->sync_period &= ~WIDE_SYNC; dcb->sync_mode &= ~(WIDE_NEGO_ENABLE); dcb->sync_mode |= WIDE_NEGO_DONE; srb->state &= ~SRB_DO_WIDE_NEGO; reprogram_regs(acb, dcb); if ((dcb->sync_mode & SYNC_NEGO_ENABLE) && !(dcb->sync_mode & SYNC_NEGO_DONE)) { build_sdtr(acb, dcb, srb); DC395x_ENABLE_MSGOUT; dprintkdbg(DBG_0, "msgin_set_nowide: Rejected. Try SDTR anyway\n"); } } static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) { struct DeviceCtlBlk *dcb = srb->dcb; u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO && acb->config & HCC_WIDE_CARD) ? 1 : 0; dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id); if (srb->msgin_buf[3] > wide) srb->msgin_buf[3] = wide; /* Completed */ if (!(srb->state & SRB_DO_WIDE_NEGO)) { dprintkl(KERN_DEBUG, "msgin_set_wide: Wide nego initiated <%02i>\n", dcb->target_id); memcpy(srb->msgout_buf, srb->msgin_buf, 4); srb->msg_count = 4; srb->state |= SRB_DO_WIDE_NEGO; DC395x_ENABLE_MSGOUT; } dcb->sync_mode |= (WIDE_NEGO_ENABLE | WIDE_NEGO_DONE); if (srb->msgin_buf[3] > 0) dcb->sync_period |= WIDE_SYNC; else dcb->sync_period &= ~WIDE_SYNC; srb->state &= ~SRB_DO_WIDE_NEGO; /*dcb->sync_mode &= ~(WIDE_NEGO_ENABLE+WIDE_NEGO_DONE); */ dprintkdbg(DBG_1, "msgin_set_wide: Wide (%i bit) negotiated <%02i>\n", (8 << srb->msgin_buf[3]), dcb->target_id); reprogram_regs(acb, dcb); if ((dcb->sync_mode & SYNC_NEGO_ENABLE) && !(dcb->sync_mode & SYNC_NEGO_DONE)) { build_sdtr(acb, dcb, srb); DC395x_ENABLE_MSGOUT; dprintkdbg(DBG_0, "msgin_set_wide: Also try SDTR.\n"); } } /* * extended message codes: * * code description * * 02h Reserved * 00h MODIFY DATA POINTER * 01h SYNCHRONOUS DATA TRANSFER REQUEST * 03h WIDE DATA TRANSFER REQUEST * 04h - 7Fh Reserved * 80h - FFh Vendor specific */ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { struct DeviceCtlBlk *dcb = acb->active_dcb; dprintkdbg(DBG_0, "msgin_phase0: (0x%p)\n", srb->cmd); srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); if (msgin_completed(srb->msgin_buf, acb->msg_len)) { /* Now eval the msg */ switch (srb->msgin_buf[0]) { case DISCONNECT: srb->state = SRB_DISCONNECT; break; case SIMPLE_QUEUE_TAG: case HEAD_OF_QUEUE_TAG: case ORDERED_QUEUE_TAG: srb = msgin_qtag(acb, dcb, srb->msgin_buf[1]); break; case MESSAGE_REJECT: DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN | DO_DATALATCH); /* A sync nego message was rejected ! */ if (srb->state & SRB_DO_SYNC_NEGO) { msgin_set_async(acb, srb); break; } /* A wide nego message was rejected ! */ if (srb->state & SRB_DO_WIDE_NEGO) { msgin_set_nowide(acb, srb); break; } enable_msgout_abort(acb, srb); /*srb->state |= SRB_ABORT_SENT */ break; case EXTENDED_MESSAGE: /* SDTR */ if (srb->msgin_buf[1] == 3 && srb->msgin_buf[2] == EXTENDED_SDTR) { msgin_set_sync(acb, srb); break; } /* WDTR */ if (srb->msgin_buf[1] == 2 && srb->msgin_buf[2] == EXTENDED_WDTR && srb->msgin_buf[3] <= 2) { /* sanity check ... */ msgin_set_wide(acb, srb); break; } msgin_reject(acb, srb); break; case IGNORE_WIDE_RESIDUE: /* Discard wide residual */ dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n"); break; case COMMAND_COMPLETE: /* nothing has to be done */ break; case SAVE_POINTERS: /* * SAVE POINTER may be ignored as we have the struct * ScsiReqBlk* associated with the scsi command. */ dprintkdbg(DBG_0, "msgin_phase0: (0x%p) " "SAVE POINTER rem=%i Ignore\n", srb->cmd, srb->total_xfer_length); break; case RESTORE_POINTERS: dprintkdbg(DBG_0, "msgin_phase0: RESTORE POINTER. Ignore\n"); break; case ABORT: dprintkdbg(DBG_0, "msgin_phase0: (0x%p) " "<%02i-%i> ABORT msg\n", srb->cmd, dcb->target_id, dcb->target_lun); dcb->flag |= ABORT_DEV_; enable_msgout_abort(acb, srb); break; default: /* reject unknown messages */ if (srb->msgin_buf[0] & IDENTIFY_BASE) { dprintkdbg(DBG_0, "msgin_phase0: Identify msg\n"); srb->msg_count = 1; srb->msgout_buf[0] = dcb->identify_msg; DC395x_ENABLE_MSGOUT; srb->state |= SRB_MSGOUT; /*break; */ } msgin_reject(acb, srb); } /* Clear counter and MsgIn state */ srb->state &= ~SRB_MSGIN; acb->msg_len = 0; } *pscsi_status = PH_BUS_FREE; DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important ... you know! */ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT); } static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { dprintkdbg(DBG_0, "msgin_phase1: (0x%p)\n", srb->cmd); clear_fifo(acb, "msgin_phase1"); DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1); if (!(srb->state & SRB_MSGIN)) { srb->state &= ~SRB_DISCONNECT; srb->state |= SRB_MSGIN; } DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ /* SCSI command */ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_IN); } static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { } static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { } static void set_xfer_rate(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb) { struct DeviceCtlBlk *i; /* set all lun device's period, offset */ if (dcb->identify_msg & 0x07) return; if (acb->scan_devices) { current_sync_offset = dcb->sync_offset; return; } list_for_each_entry(i, &acb->dcb_list, list) if (i->target_id == dcb->target_id) { i->sync_period = dcb->sync_period; i->sync_offset = dcb->sync_offset; i->sync_mode = dcb->sync_mode; i->min_nego_period = dcb->min_nego_period; } } static void disconnect(struct AdapterCtlBlk *acb) { struct DeviceCtlBlk *dcb = acb->active_dcb; struct ScsiReqBlk *srb; if (!dcb) { dprintkl(KERN_ERR, "disconnect: No such device\n"); udelay(500); /* Suspend queue for a while */ acb->last_reset = jiffies + HZ / 2 + HZ * acb->eeprom.delay_time; clear_fifo(acb, "disconnectEx"); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT); return; } srb = dcb->active_srb; acb->active_dcb = NULL; dprintkdbg(DBG_0, "disconnect: (0x%p)\n", srb->cmd); srb->scsi_phase = PH_BUS_FREE; /* initial phase */ clear_fifo(acb, "disconnect"); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT); if (srb->state & SRB_UNEXPECT_RESEL) { dprintkl(KERN_ERR, "disconnect: Unexpected reselection <%02i-%i>\n", dcb->target_id, dcb->target_lun); srb->state = 0; waiting_process_next(acb); } else if (srb->state & SRB_ABORT_SENT) { dcb->flag &= ~ABORT_DEV_; acb->last_reset = jiffies + HZ / 2 + 1; dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n"); doing_srb_done(acb, DID_ABORT, srb->cmd, 1); waiting_process_next(acb); } else { if ((srb->state & (SRB_START_ + SRB_MSGOUT)) || !(srb-> state & (SRB_DISCONNECT | SRB_COMPLETED))) { /* * Selection time out * SRB_START_ || SRB_MSGOUT || (!SRB_DISCONNECT && !SRB_COMPLETED) */ /* Unexp. Disc / Sel Timeout */ if (srb->state != SRB_START_ && srb->state != SRB_MSGOUT) { srb->state = SRB_READY; dprintkl(KERN_DEBUG, "disconnect: (0x%p) Unexpected\n", srb->cmd); srb->target_status = SCSI_STAT_SEL_TIMEOUT; goto disc1; } else { /* Normal selection timeout */ dprintkdbg(DBG_KG, "disconnect: (0x%p) " "<%02i-%i> SelTO\n", srb->cmd, dcb->target_id, dcb->target_lun); if (srb->retry_count++ > DC395x_MAX_RETRIES || acb->scan_devices) { srb->target_status = SCSI_STAT_SEL_TIMEOUT; goto disc1; } free_tag(dcb, srb); list_move(&srb->list, &dcb->srb_waiting_list); dprintkdbg(DBG_KG, "disconnect: (0x%p) Retry\n", srb->cmd); waiting_set_timer(acb, HZ / 20); } } else if (srb->state & SRB_DISCONNECT) { u8 bval = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL); /* * SRB_DISCONNECT (This is what we expect!) */ if (bval & 0x40) { dprintkdbg(DBG_0, "disconnect: SCSI bus stat " " 0x%02x: ACK set! Other controllers?\n", bval); /* It could come from another initiator, therefore don't do much ! */ } else waiting_process_next(acb); } else if (srb->state & SRB_COMPLETED) { disc1: /* ** SRB_COMPLETED */ free_tag(dcb, srb); dcb->active_srb = NULL; srb->state = SRB_FREE; srb_done(acb, dcb, srb); } } } static void reselect(struct AdapterCtlBlk *acb) { struct DeviceCtlBlk *dcb = acb->active_dcb; struct ScsiReqBlk *srb = NULL; u16 rsel_tar_lun_id; u8 id, lun; dprintkdbg(DBG_0, "reselect: acb=%p\n", acb); clear_fifo(acb, "reselect"); /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT | DO_DATALATCH); */ /* Read Reselected Target ID and LUN */ rsel_tar_lun_id = DC395x_read16(acb, TRM_S1040_SCSI_TARGETID); if (dcb) { /* Arbitration lost but Reselection win */ srb = dcb->active_srb; if (!srb) { dprintkl(KERN_DEBUG, "reselect: Arb lost Resel won, " "but active_srb == NULL\n"); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ return; } /* Why the if ? */ if (!acb->scan_devices) { dprintkdbg(DBG_KG, "reselect: (0x%p) <%02i-%i> " "Arb lost but Resel win rsel=%i stat=0x%04x\n", srb->cmd, dcb->target_id, dcb->target_lun, rsel_tar_lun_id, DC395x_read16(acb, TRM_S1040_SCSI_STATUS)); /*srb->state |= SRB_DISCONNECT; */ srb->state = SRB_READY; free_tag(dcb, srb); list_move(&srb->list, &dcb->srb_waiting_list); waiting_set_timer(acb, HZ / 20); /* return; */ } } /* Read Reselected Target Id and LUN */ if (!(rsel_tar_lun_id & (IDENTIFY_BASE << 8))) dprintkl(KERN_DEBUG, "reselect: Expects identify msg. " "Got %i!\n", rsel_tar_lun_id); id = rsel_tar_lun_id & 0xff; lun = (rsel_tar_lun_id >> 8) & 7; dcb = find_dcb(acb, id, lun); if (!dcb) { dprintkl(KERN_ERR, "reselect: From non existent device " "<%02i-%i>\n", id, lun); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ return; } acb->active_dcb = dcb; if (!(dcb->dev_mode & NTC_DO_DISCONNECT)) dprintkl(KERN_DEBUG, "reselect: in spite of forbidden " "disconnection? <%02i-%i>\n", dcb->target_id, dcb->target_lun); if (dcb->sync_mode & EN_TAG_QUEUEING) { srb = acb->tmp_srb; dcb->active_srb = srb; } else { /* There can be only one! */ srb = dcb->active_srb; if (!srb || !(srb->state & SRB_DISCONNECT)) { /* * abort command */ dprintkl(KERN_DEBUG, "reselect: w/o disconnected cmds <%02i-%i>\n", dcb->target_id, dcb->target_lun); srb = acb->tmp_srb; srb->state = SRB_UNEXPECT_RESEL; dcb->active_srb = srb; enable_msgout_abort(acb, srb); } else { if (dcb->flag & ABORT_DEV_) { /*srb->state = SRB_ABORT_SENT; */ enable_msgout_abort(acb, srb); } else srb->state = SRB_DATA_XFER; } } srb->scsi_phase = PH_BUS_FREE; /* initial phase */ /* Program HA ID, target ID, period and offset */ dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id); DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id); /* host ID */ DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id); /* target ID */ DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset); /* offset */ DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period); /* sync period, wide */ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ /* SCSI command */ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT); } static inline u8 tagq_blacklist(char *name) { #ifndef DC395x_NO_TAGQ #if 0 u8 i; for (i = 0; i < BADDEVCNT; i++) if (memcmp(name, DC395x_baddevname1[i], 28) == 0) return 1; #endif return 0; #else return 1; #endif } static void disc_tagq_set(struct DeviceCtlBlk *dcb, struct ScsiInqData *ptr) { /* Check for SCSI format (ANSI and Response data format) */ if ((ptr->Vers & 0x07) >= 2 || (ptr->RDF & 0x0F) == 2) { if ((ptr->Flags & SCSI_INQ_CMDQUEUE) && (dcb->dev_mode & NTC_DO_TAG_QUEUEING) && /*(dcb->dev_mode & NTC_DO_DISCONNECT) */ /* ((dcb->dev_type == TYPE_DISK) || (dcb->dev_type == TYPE_MOD)) && */ !tagq_blacklist(((char *)ptr) + 8)) { if (dcb->max_command == 1) dcb->max_command = dcb->acb->tag_max_num; dcb->sync_mode |= EN_TAG_QUEUEING; /*dcb->tag_mask = 0; */ } else dcb->max_command = 1; } } static void add_dev(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, struct ScsiInqData *ptr) { u8 bval1 = ptr->DevType & SCSI_DEVTYPE; dcb->dev_type = bval1; /* if (bval1 == TYPE_DISK || bval1 == TYPE_MOD) */ disc_tagq_set(dcb, ptr); } /* unmap mapped pci regions from SRB */ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) { struct scsi_cmnd *cmd = srb->cmd; enum dma_data_direction dir = cmd->sc_data_direction; if (scsi_sg_count(cmd) && dir != DMA_NONE) { /* unmap DC395x SG list */ dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n", srb->sg_bus_addr, SEGMENTX_LEN); dma_unmap_single(&acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN, DMA_TO_DEVICE); dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n", scsi_sg_count(cmd), scsi_bufflen(cmd)); /* unmap the sg segments */ scsi_dma_unmap(cmd); } } /* unmap mapped pci sense buffer from SRB */ static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) { if (!(srb->flag & AUTO_REQSENSE)) return; /* Unmap sense buffer */ dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n", srb->segment_x[0].address); dma_unmap_single(&acb->dev->dev, srb->segment_x[0].address, srb->segment_x[0].length, DMA_FROM_DEVICE); /* Restore SG stuff */ srb->total_xfer_length = srb->xferred; srb->segment_x[0].address = srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address; srb->segment_x[0].length = srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length; } /* * Complete execution of a SCSI command * Signal completion to the generic SCSI driver */ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) { u8 tempcnt, status; struct scsi_cmnd *cmd = srb->cmd; enum dma_data_direction dir = cmd->sc_data_direction; int ckc_only = 1; dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n", srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count, scsi_sgtalbe(cmd)); status = srb->target_status; set_host_byte(cmd, DID_OK); set_status_byte(cmd, SAM_STAT_GOOD); if (srb->flag & AUTO_REQSENSE) { dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n"); pci_unmap_srb_sense(acb, srb); /* ** target status.......................... */ srb->flag &= ~AUTO_REQSENSE; srb->adapter_status = 0; srb->target_status = SAM_STAT_CHECK_CONDITION; if (debug_enabled(DBG_1)) { switch (cmd->sense_buffer[2] & 0x0f) { case NOT_READY: dprintkl(KERN_DEBUG, "ReqSense: NOT_READY cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", cmd->cmnd[0], dcb->target_id, dcb->target_lun, status, acb->scan_devices); break; case UNIT_ATTENTION: dprintkl(KERN_DEBUG, "ReqSense: UNIT_ATTENTION cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", cmd->cmnd[0], dcb->target_id, dcb->target_lun, status, acb->scan_devices); break; case ILLEGAL_REQUEST: dprintkl(KERN_DEBUG, "ReqSense: ILLEGAL_REQUEST cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", cmd->cmnd[0], dcb->target_id, dcb->target_lun, status, acb->scan_devices); break; case MEDIUM_ERROR: dprintkl(KERN_DEBUG, "ReqSense: MEDIUM_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", cmd->cmnd[0], dcb->target_id, dcb->target_lun, status, acb->scan_devices); break; case HARDWARE_ERROR: dprintkl(KERN_DEBUG, "ReqSense: HARDWARE_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", cmd->cmnd[0], dcb->target_id, dcb->target_lun, status, acb->scan_devices); break; } if (cmd->sense_buffer[7] >= 6) printk("sense=0x%02x ASC=0x%02x ASCQ=0x%02x " "(0x%08x 0x%08x)\n", cmd->sense_buffer[2], cmd->sense_buffer[12], cmd->sense_buffer[13], *((unsigned int *)(cmd->sense_buffer + 3)), *((unsigned int *)(cmd->sense_buffer + 8))); else printk("sense=0x%02x No ASC/ASCQ (0x%08x)\n", cmd->sense_buffer[2], *((unsigned int *)(cmd->sense_buffer + 3))); } if (status == SAM_STAT_CHECK_CONDITION) { set_host_byte(cmd, DID_BAD_TARGET); goto ckc_e; } dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n"); set_status_byte(cmd, SAM_STAT_CHECK_CONDITION); goto ckc_e; } /*************************************************************/ if (status) { /* * target status.......................... */ if (status == SAM_STAT_CHECK_CONDITION) { request_sense(acb, dcb, srb); return; } else if (status == SAM_STAT_TASK_SET_FULL) { tempcnt = (u8)list_size(&dcb->srb_going_list); dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n", dcb->target_id, dcb->target_lun, tempcnt); if (tempcnt > 1) tempcnt--; dcb->max_command = tempcnt; free_tag(dcb, srb); list_move(&srb->list, &dcb->srb_waiting_list); waiting_set_timer(acb, HZ / 20); srb->adapter_status = 0; srb->target_status = 0; return; } else if (status == SCSI_STAT_SEL_TIMEOUT) { srb->adapter_status = H_SEL_TIMEOUT; srb->target_status = 0; set_host_byte(cmd, DID_NO_CONNECT); } else { srb->adapter_status = 0; set_host_byte(cmd, DID_ERROR); set_status_byte(cmd, status); } } else { /* ** process initiator status.......................... */ status = srb->adapter_status; if (status & H_OVER_UNDER_RUN) { srb->target_status = 0; scsi_msg_to_host_byte(cmd, srb->end_message); } else if (srb->status & PARITY_ERROR) { set_host_byte(cmd, DID_PARITY); } else { /* No error */ srb->adapter_status = 0; srb->target_status = 0; } } ckc_only = 0; /* Check Error Conditions */ ckc_e: pci_unmap_srb(acb, srb); if (cmd->cmnd[0] == INQUIRY) { unsigned char *base = NULL; struct ScsiInqData *ptr; unsigned long flags = 0; struct scatterlist* sg = scsi_sglist(cmd); size_t offset = 0, len = sizeof(struct ScsiInqData); local_irq_save(flags); base = scsi_kmap_atomic_sg(sg, scsi_sg_count(cmd), &offset, &len); ptr = (struct ScsiInqData *)(base + offset); if (!ckc_only && get_host_byte(cmd) == DID_OK && cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8 && dir != DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2) dcb->inquiry7 = ptr->Flags; /*if( srb->cmd->cmnd[0] == INQUIRY && */ /* (host_byte(cmd->result) == DID_OK || status_byte(cmd->result) & CHECK_CONDITION) ) */ if ((get_host_byte(cmd) == DID_OK) || (get_status_byte(cmd) == SAM_STAT_CHECK_CONDITION)) { if (!dcb->init_tcq_flag) { add_dev(acb, dcb, ptr); dcb->init_tcq_flag = 1; } } scsi_kunmap_atomic_sg(base); local_irq_restore(flags); } /* Here is the info for Doug Gilbert's sg3 ... */ scsi_set_resid(cmd, srb->total_xfer_length); if (debug_enabled(DBG_KG)) { if (srb->total_xfer_length) dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> " "cmnd=0x%02x Missed %i bytes\n", cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0], srb->total_xfer_length); } if (srb != acb->tmp_srb) { /* Add to free list */ dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n", cmd, cmd->result); list_move_tail(&srb->list, &acb->srb_free_list); } else { dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n"); } scsi_done(cmd); waiting_process_next(acb); } /* abort all cmds in our queues */ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag, struct scsi_cmnd *cmd, u8 force) { struct DeviceCtlBlk *dcb; dprintkl(KERN_INFO, "doing_srb_done: pids "); list_for_each_entry(dcb, &acb->dcb_list, list) { struct ScsiReqBlk *srb; struct ScsiReqBlk *tmp; struct scsi_cmnd *p; list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) { p = srb->cmd; printk("G:%p(%02i-%i) ", p, p->device->id, (u8)p->device->lun); list_del(&srb->list); free_tag(dcb, srb); list_add_tail(&srb->list, &acb->srb_free_list); set_host_byte(p, did_flag); set_status_byte(p, SAM_STAT_GOOD); pci_unmap_srb_sense(acb, srb); pci_unmap_srb(acb, srb); if (force) { /* For new EH, we normally don't need to give commands back, * as they all complete or all time out */ scsi_done(p); } } if (!list_empty(&dcb->srb_going_list)) dprintkl(KERN_DEBUG, "How could the ML send cmnds to the Going queue? <%02i-%i>\n", dcb->target_id, dcb->target_lun); if (dcb->tag_mask) dprintkl(KERN_DEBUG, "tag_mask for <%02i-%i> should be empty, is %08x!\n", dcb->target_id, dcb->target_lun, dcb->tag_mask); /* Waiting queue */ list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) { p = srb->cmd; printk("W:%p<%02i-%i>", p, p->device->id, (u8)p->device->lun); list_move_tail(&srb->list, &acb->srb_free_list); set_host_byte(p, did_flag); set_status_byte(p, SAM_STAT_GOOD); pci_unmap_srb_sense(acb, srb); pci_unmap_srb(acb, srb); if (force) { /* For new EH, we normally don't need to give commands back, * as they all complete or all time out */ scsi_done(cmd); } } if (!list_empty(&dcb->srb_waiting_list)) dprintkl(KERN_DEBUG, "ML queued %i cmnds again to <%02i-%i>\n", list_size(&dcb->srb_waiting_list), dcb->target_id, dcb->target_lun); dcb->flag &= ~ABORT_DEV_; } printk("\n"); } static void reset_scsi_bus(struct AdapterCtlBlk *acb) { dprintkdbg(DBG_0, "reset_scsi_bus: acb=%p\n", acb); acb->acb_flag |= RESET_DEV; /* RESET_DETECT, RESET_DONE, RESET_DEV */ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI); while (!(DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET)) /* nothing */; } static void set_basic_config(struct AdapterCtlBlk *acb) { u8 bval; u16 wval; DC395x_write8(acb, TRM_S1040_SCSI_TIMEOUT, acb->sel_timeout); if (acb->config & HCC_PARITY) bval = PHASELATCH | INITIATOR | BLOCKRST | PARITYCHECK; else bval = PHASELATCH | INITIATOR | BLOCKRST; DC395x_write8(acb, TRM_S1040_SCSI_CONFIG0, bval); /* program configuration 1: Act_Neg (+ Act_Neg_Enh? + Fast_Filter? + DataDis?) */ DC395x_write8(acb, TRM_S1040_SCSI_CONFIG1, 0x03); /* was 0x13: default */ /* program Host ID */ DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id); /* set ansynchronous transfer */ DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, 0x00); /* Turn LED control off */ wval = DC395x_read16(acb, TRM_S1040_GEN_CONTROL) & 0x7F; DC395x_write16(acb, TRM_S1040_GEN_CONTROL, wval); /* DMA config */ wval = DC395x_read16(acb, TRM_S1040_DMA_CONFIG) & ~DMA_FIFO_CTRL; wval |= DMA_FIFO_HALF_HALF | DMA_ENHANCE /*| DMA_MEM_MULTI_READ */ ; DC395x_write16(acb, TRM_S1040_DMA_CONFIG, wval); /* Clear pending interrupt status */ DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS); /* Enable SCSI interrupt */ DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x7F); DC395x_write8(acb, TRM_S1040_DMA_INTEN, EN_SCSIINTR | EN_DMAXFERERROR /*| EN_DMAXFERABORT | EN_DMAXFERCOMP | EN_FORCEDMACOMP */ ); } static void scsi_reset_detect(struct AdapterCtlBlk *acb) { dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb); /* delay half a second */ if (timer_pending(&acb->waiting_timer)) del_timer(&acb->waiting_timer); DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE); DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE); /*DC395x_write8(acb, TRM_S1040_DMA_CONTROL,STOPDMAXFER); */ udelay(500); /* Maybe we locked up the bus? Then lets wait even longer ... */ acb->last_reset = jiffies + 5 * HZ / 2 + HZ * acb->eeprom.delay_time; clear_fifo(acb, "scsi_reset_detect"); set_basic_config(acb); /*1.25 */ /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT); */ if (acb->acb_flag & RESET_DEV) { /* RESET_DETECT, RESET_DONE, RESET_DEV */ acb->acb_flag |= RESET_DONE; } else { acb->acb_flag |= RESET_DETECT; reset_dev_param(acb); doing_srb_done(acb, DID_RESET, NULL, 1); /*DC395x_RecoverSRB( acb ); */ acb->active_dcb = NULL; acb->acb_flag = 0; waiting_process_next(acb); } } static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) { struct scsi_cmnd *cmd = srb->cmd; dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n", cmd, cmd->device->id, (u8)cmd->device->lun); srb->flag |= AUTO_REQSENSE; srb->adapter_status = 0; srb->target_status = 0; /* KG: Can this prevent crap sense data ? */ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); /* Save some data */ srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address = srb->segment_x[0].address; srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length = srb->segment_x[0].length; srb->xferred = srb->total_xfer_length; /* srb->segment_x : a one entry of S/G list table */ srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE; srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE; /* Map sense buffer */ srb->segment_x[0].address = dma_map_single(&acb->dev->dev, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n", cmd->sense_buffer, srb->segment_x[0].address, SCSI_SENSE_BUFFERSIZE); srb->sg_count = 1; srb->sg_index = 0; if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */ dprintkl(KERN_DEBUG, "request_sense: (0x%p) failed <%02i-%i>\n", srb->cmd, dcb->target_id, dcb->target_lun); list_move(&srb->list, &dcb->srb_waiting_list); waiting_set_timer(acb, HZ / 100); } } /** * device_alloc - Allocate a new device instance. This create the * devices instance and sets up all the data items. The adapter * instance is required to obtain confiuration information for this * device. This does *not* add this device to the adapters device * list. * * @acb: The adapter to obtain configuration information from. * @target: The target for the new device. * @lun: The lun for the new device. * * Return the new device if successful or NULL on failure. **/ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb, u8 target, u8 lun) { struct NvRamType *eeprom = &acb->eeprom; u8 period_index = eeprom->target[target].period & 0x07; struct DeviceCtlBlk *dcb; dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC); dprintkdbg(DBG_0, "device_alloc: <%02i-%i>\n", target, lun); if (!dcb) return NULL; dcb->acb = NULL; INIT_LIST_HEAD(&dcb->srb_going_list); INIT_LIST_HEAD(&dcb->srb_waiting_list); dcb->active_srb = NULL; dcb->tag_mask = 0; dcb->max_command = 1; dcb->target_id = target; dcb->target_lun = lun; dcb->dev_mode = eeprom->target[target].cfg0; #ifndef DC395x_NO_DISCONNECT dcb->identify_msg = IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun); #else dcb->identify_msg = IDENTIFY(0, lun); #endif dcb->inquiry7 = 0; dcb->sync_mode = 0; dcb->min_nego_period = clock_period[period_index]; dcb->sync_period = 0; dcb->sync_offset = 0; dcb->flag = 0; #ifndef DC395x_NO_WIDE if ((dcb->dev_mode & NTC_DO_WIDE_NEGO) && (acb->config & HCC_WIDE_CARD)) dcb->sync_mode |= WIDE_NEGO_ENABLE; #endif #ifndef DC395x_NO_SYNC if (dcb->dev_mode & NTC_DO_SYNC_NEGO) if (!(lun) || current_sync_offset) dcb->sync_mode |= SYNC_NEGO_ENABLE; #endif if (dcb->target_lun != 0) { /* Copy settings */ struct DeviceCtlBlk *p = NULL, *iter; list_for_each_entry(iter, &acb->dcb_list, list) if (iter->target_id == dcb->target_id) { p = iter; break; } if (!p) { kfree(dcb); return NULL; } dprintkdbg(DBG_1, "device_alloc: <%02i-%i> copy from <%02i-%i>\n", dcb->target_id, dcb->target_lun, p->target_id, p->target_lun); dcb->sync_mode = p->sync_mode; dcb->sync_period = p->sync_period; dcb->min_nego_period = p->min_nego_period; dcb->sync_offset = p->sync_offset; dcb->inquiry7 = p->inquiry7; } return dcb; } /** * adapter_add_device - Adds the device instance to the adaptor instance. * * @acb: The adapter device to be updated * @dcb: A newly created and initialised device instance to add. **/ static void adapter_add_device(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb) { /* backpointer to adapter */ dcb->acb = acb; /* set run_robin to this device if it is currently empty */ if (list_empty(&acb->dcb_list)) acb->dcb_run_robin = dcb; /* add device to list */ list_add_tail(&dcb->list, &acb->dcb_list); /* update device maps */ acb->dcb_map[dcb->target_id] |= (1 << dcb->target_lun); acb->children[dcb->target_id][dcb->target_lun] = dcb; } /** * adapter_remove_device - Removes the device instance from the adaptor * instance. The device instance is not check in any way or freed by this. * The caller is expected to take care of that. This will simply remove the * device from the adapters data strcutures. * * @acb: The adapter device to be updated * @dcb: A device that has previously been added to the adapter. **/ static void adapter_remove_device(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb) { struct DeviceCtlBlk *i; struct DeviceCtlBlk *tmp; dprintkdbg(DBG_0, "adapter_remove_device: <%02i-%i>\n", dcb->target_id, dcb->target_lun); /* fix up any pointers to this device that we have in the adapter */ if (acb->active_dcb == dcb) acb->active_dcb = NULL; if (acb->dcb_run_robin == dcb) acb->dcb_run_robin = dcb_get_next(&acb->dcb_list, dcb); /* unlink from list */ list_for_each_entry_safe(i, tmp, &acb->dcb_list, list) if (dcb == i) { list_del(&i->list); break; } /* clear map and children */ acb->dcb_map[dcb->target_id] &= ~(1 << dcb->target_lun); acb->children[dcb->target_id][dcb->target_lun] = NULL; dcb->acb = NULL; } /** * adapter_remove_and_free_device - Removes a single device from the adapter * and then frees the device information. * * @acb: The adapter device to be updated * @dcb: A device that has previously been added to the adapter. */ static void adapter_remove_and_free_device(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb) { if (list_size(&dcb->srb_going_list) > 1) { dprintkdbg(DBG_1, "adapter_remove_and_free_device: <%02i-%i> " "Won't remove because of %i active requests.\n", dcb->target_id, dcb->target_lun, list_size(&dcb->srb_going_list)); return; } adapter_remove_device(acb, dcb); kfree(dcb); } /** * adapter_remove_and_free_all_devices - Removes and frees all of the * devices associated with the specified adapter. * * @acb: The adapter from which all devices should be removed. **/ static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb) { struct DeviceCtlBlk *dcb; struct DeviceCtlBlk *tmp; dprintkdbg(DBG_1, "adapter_remove_and_free_all_devices: num=%i\n", list_size(&acb->dcb_list)); list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list) adapter_remove_and_free_device(acb, dcb); } /** * dc395x_slave_alloc - Called by the scsi mid layer to tell us about a new * scsi device that we need to deal with. We allocate a new device and then * insert that device into the adapters device list. * * @scsi_device: The new scsi device that we need to handle. **/ static int dc395x_slave_alloc(struct scsi_device *scsi_device) { struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata; struct DeviceCtlBlk *dcb; dcb = device_alloc(acb, scsi_device->id, scsi_device->lun); if (!dcb) return -ENOMEM; adapter_add_device(acb, dcb); return 0; } /** * dc395x_slave_destroy - Called by the scsi mid layer to tell us about a * device that is going away. * * @scsi_device: The new scsi device that we need to handle. **/ static void dc395x_slave_destroy(struct scsi_device *scsi_device) { struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata; struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun); if (dcb) adapter_remove_and_free_device(acb, dcb); } /** * trms1040_wait_30us: wait for 30 us * * Waits for 30us (using the chip by the looks of it..) * * @io_port: base I/O address **/ static void trms1040_wait_30us(unsigned long io_port) { /* ScsiPortStallExecution(30); wait 30 us */ outb(5, io_port + TRM_S1040_GEN_TIMER); while (!(inb(io_port + TRM_S1040_GEN_STATUS) & GTIMEOUT)) /* nothing */ ; } /** * trms1040_write_cmd - write the secified command and address to * chip * * @io_port: base I/O address * @cmd: SB + op code (command) to send * @addr: address to send **/ static void trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr) { int i; u8 send_data; /* program SB + OP code */ for (i = 0; i < 3; i++, cmd <<= 1) { send_data = NVR_SELECT; if (cmd & 0x04) /* Start from bit 2 */ send_data |= NVR_BITOUT; outb(send_data, io_port + TRM_S1040_GEN_NVRAM); trms1040_wait_30us(io_port); outb((send_data | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM); trms1040_wait_30us(io_port); } /* send address */ for (i = 0; i < 7; i++, addr <<= 1) { send_data = NVR_SELECT; if (addr & 0x40) /* Start from bit 6 */ send_data |= NVR_BITOUT; outb(send_data, io_port + TRM_S1040_GEN_NVRAM); trms1040_wait_30us(io_port); outb((send_data | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM); trms1040_wait_30us(io_port); } outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM); trms1040_wait_30us(io_port); } /** * trms1040_set_data - store a single byte in the eeprom * * Called from write all to write a single byte into the SSEEPROM * Which is done one bit at a time. * * @io_port: base I/O address * @addr: offset into EEPROM * @byte: bytes to write **/ static void trms1040_set_data(unsigned long io_port, u8 addr, u8 byte) { int i; u8 send_data; /* Send write command & address */ trms1040_write_cmd(io_port, 0x05, addr); /* Write data */ for (i = 0; i < 8; i++, byte <<= 1) { send_data = NVR_SELECT; if (byte & 0x80) /* Start from bit 7 */ send_data |= NVR_BITOUT; outb(send_data, io_port + TRM_S1040_GEN_NVRAM); trms1040_wait_30us(io_port); outb((send_data | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM); trms1040_wait_30us(io_port); } outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM); trms1040_wait_30us(io_port); /* Disable chip select */ outb(0, io_port + TRM_S1040_GEN_NVRAM); trms1040_wait_30us(io_port); outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM); trms1040_wait_30us(io_port); /* Wait for write ready */ while (1) { outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM); trms1040_wait_30us(io_port); outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM); trms1040_wait_30us(io_port); if (inb(io_port + TRM_S1040_GEN_NVRAM) & NVR_BITIN) break; } /* Disable chip select */ outb(0, io_port + TRM_S1040_GEN_NVRAM); } /** * trms1040_write_all - write 128 bytes to the eeprom * * Write the supplied 128 bytes to the chips SEEPROM * * @eeprom: the data to write * @io_port: the base io port **/ static void trms1040_write_all(struct NvRamType *eeprom, unsigned long io_port) { u8 *b_eeprom = (u8 *)eeprom; u8 addr; /* Enable SEEPROM */ outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM), io_port + TRM_S1040_GEN_CONTROL); /* write enable */ trms1040_write_cmd(io_port, 0x04, 0xFF); outb(0, io_port + TRM_S1040_GEN_NVRAM); trms1040_wait_30us(io_port); /* write */ for (addr = 0; addr < 128; addr++, b_eeprom++) trms1040_set_data(io_port, addr, *b_eeprom); /* write disable */ trms1040_write_cmd(io_port, 0x04, 0x00); outb(0, io_port + TRM_S1040_GEN_NVRAM); trms1040_wait_30us(io_port); /* Disable SEEPROM */ outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM), io_port + TRM_S1040_GEN_CONTROL); } /** * trms1040_get_data - get a single byte from the eeprom * * Called from read all to read a single byte into the SSEEPROM * Which is done one bit at a time. * * @io_port: base I/O address * @addr: offset into SEEPROM * * Returns the byte read. **/ static u8 trms1040_get_data(unsigned long io_port, u8 addr) { int i; u8 read_byte; u8 result = 0; /* Send read command & address */ trms1040_write_cmd(io_port, 0x06, addr); /* read data */ for (i = 0; i < 8; i++) { outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM); trms1040_wait_30us(io_port); outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM); /* Get data bit while falling edge */ read_byte = inb(io_port + TRM_S1040_GEN_NVRAM); result <<= 1; if (read_byte & NVR_BITIN) result |= 1; trms1040_wait_30us(io_port); } /* Disable chip select */ outb(0, io_port + TRM_S1040_GEN_NVRAM); return result; } /** * trms1040_read_all - read all bytes from the eeprom * * Read the 128 bytes from the SEEPROM. * * @eeprom: where to store the data * @io_port: the base io port **/ static void trms1040_read_all(struct NvRamType *eeprom, unsigned long io_port) { u8 *b_eeprom = (u8 *)eeprom; u8 addr; /* Enable SEEPROM */ outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM), io_port + TRM_S1040_GEN_CONTROL); /* read details */ for (addr = 0; addr < 128; addr++, b_eeprom++) *b_eeprom = trms1040_get_data(io_port, addr); /* Disable SEEPROM */ outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM), io_port + TRM_S1040_GEN_CONTROL); } /** * check_eeprom - get and check contents of the eeprom * * Read seeprom 128 bytes into the memory provider in eeprom. * Checks the checksum and if it's not correct it uses a set of default * values. * * @eeprom: caller allocated strcuture to read the eeprom data into * @io_port: io port to read from **/ static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port) { u16 *w_eeprom = (u16 *)eeprom; u16 w_addr; u16 cksum; u32 d_addr; u32 *d_eeprom; trms1040_read_all(eeprom, io_port); /* read eeprom */ cksum = 0; for (w_addr = 0, w_eeprom = (u16 *)eeprom; w_addr < 64; w_addr++, w_eeprom++) cksum += *w_eeprom; if (cksum != 0x1234) { /* * Checksum is wrong. * Load a set of defaults into the eeprom buffer */ dprintkl(KERN_WARNING, "EEProm checksum error: using default values and options.\n"); eeprom->sub_vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM; eeprom->sub_vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8); eeprom->sub_sys_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040; eeprom->sub_sys_id[1] = (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8); eeprom->sub_class = 0x00; eeprom->vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM; eeprom->vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8); eeprom->device_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040; eeprom->device_id[1] = (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8); eeprom->reserved = 0x00; for (d_addr = 0, d_eeprom = (u32 *)eeprom->target; d_addr < 16; d_addr++, d_eeprom++) *d_eeprom = 0x00000077; /* cfg3,cfg2,period,cfg0 */ *d_eeprom++ = 0x04000F07; /* max_tag,delay_time,channel_cfg,scsi_id */ *d_eeprom++ = 0x00000015; /* reserved1,boot_lun,boot_target,reserved0 */ for (d_addr = 0; d_addr < 12; d_addr++, d_eeprom++) *d_eeprom = 0x00; /* Now load defaults (maybe set by boot/module params) */ set_safe_settings(); fix_settings(); eeprom_override(eeprom); eeprom->cksum = 0x00; for (w_addr = 0, cksum = 0, w_eeprom = (u16 *)eeprom; w_addr < 63; w_addr++, w_eeprom++) cksum += *w_eeprom; *w_eeprom = 0x1234 - cksum; trms1040_write_all(eeprom, io_port); eeprom->delay_time = cfg_data[CFG_RESET_DELAY].value; } else { set_safe_settings(); eeprom_index_to_delay(eeprom); eeprom_override(eeprom); } } /** * print_eeprom_settings - output the eeprom settings * to the kernel log so people can see what they were. * * @eeprom: The eeprom data strucutre to show details for. **/ static void print_eeprom_settings(struct NvRamType *eeprom) { dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n", eeprom->scsi_id, eeprom->target[0].period, clock_speed[eeprom->target[0].period] / 10, clock_speed[eeprom->target[0].period] % 10, eeprom->target[0].cfg0); dprintkl(KERN_INFO, " AdaptMode=0x%02x, Tags=%i(%02i), DelayReset=%is\n", eeprom->channel_cfg, eeprom->max_tag, 1 << eeprom->max_tag, eeprom->delay_time); } /* Free SG tables */ static void adapter_sg_tables_free(struct AdapterCtlBlk *acb) { int i; const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN; for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page) kfree(acb->srb_array[i].segment_x); } /* * Allocate SG tables; as we have to pci_map them, an SG list (struct SGentry*) * should never cross a page boundary */ static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb) { const unsigned mem_needed = (DC395x_MAX_SRB_CNT+1) *SEGMENTX_LEN; int pages = (mem_needed+(PAGE_SIZE-1))/PAGE_SIZE; const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN; int srb_idx = 0; unsigned i = 0; struct SGentry *ptr; for (i = 0; i < DC395x_MAX_SRB_CNT; i++) acb->srb_array[i].segment_x = NULL; dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages); while (pages--) { ptr = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!ptr) { adapter_sg_tables_free(acb); return 1; } dprintkdbg(DBG_1, "Allocate %li bytes at %p for SG segments %i\n", PAGE_SIZE, ptr, srb_idx); i = 0; while (i < srbs_per_page && srb_idx < DC395x_MAX_SRB_CNT) acb->srb_array[srb_idx++].segment_x = ptr + (i++ * DC395x_MAX_SG_LISTENTRY); } if (i < srbs_per_page) acb->srb.segment_x = ptr + (i * DC395x_MAX_SG_LISTENTRY); else dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n"); return 0; } /** * adapter_print_config - print adapter connection and termination * config * * The io port in the adapter needs to have been set before calling * this function. * * @acb: The adapter to print the information for. **/ static void adapter_print_config(struct AdapterCtlBlk *acb) { u8 bval; bval = DC395x_read8(acb, TRM_S1040_GEN_STATUS); dprintkl(KERN_INFO, "%sConnectors: ", ((bval & WIDESCSI) ? "(Wide) " : "")); if (!(bval & CON5068)) printk("ext%s ", !(bval & EXT68HIGH) ? "68" : "50"); if (!(bval & CON68)) printk("int68%s ", !(bval & INT68HIGH) ? "" : "(50)"); if (!(bval & CON50)) printk("int50 "); if ((bval & (CON5068 | CON50 | CON68)) == 0 /*(CON5068 | CON50 | CON68) */ ) printk(" Oops! (All 3?) "); bval = DC395x_read8(acb, TRM_S1040_GEN_CONTROL); printk(" Termination: "); if (bval & DIS_TERM) printk("Disabled\n"); else { if (bval & AUTOTERM) printk("Auto "); if (bval & LOW8TERM) printk("Low "); if (bval & UP8TERM) printk("High "); printk("\n"); } } /** * adapter_init_params - Initialize the various parameters in the * adapter structure. Note that the pointer to the scsi_host is set * early (when this instance is created) and the io_port and irq * values are set later after they have been reserved. This just gets * everything set to a good starting position. * * The eeprom structure in the adapter needs to have been set before * calling this function. * * @acb: The adapter to initialize. **/ static void adapter_init_params(struct AdapterCtlBlk *acb) { struct NvRamType *eeprom = &acb->eeprom; int i; /* NOTE: acb->scsi_host is set at scsi_host/acb creation time */ /* NOTE: acb->io_port_base is set at port registration time */ /* NOTE: acb->io_port_len is set at port registration time */ INIT_LIST_HEAD(&acb->dcb_list); acb->dcb_run_robin = NULL; acb->active_dcb = NULL; INIT_LIST_HEAD(&acb->srb_free_list); /* temp SRB for Q tag used or abort command used */ acb->tmp_srb = &acb->srb; timer_setup(&acb->waiting_timer, waiting_timeout, 0); timer_setup(&acb->selto_timer, NULL, 0); acb->srb_count = DC395x_MAX_SRB_CNT; acb->sel_timeout = DC395x_SEL_TIMEOUT; /* timeout=250ms */ /* NOTE: acb->irq_level is set at IRQ registration time */ acb->tag_max_num = 1 << eeprom->max_tag; if (acb->tag_max_num > 30) acb->tag_max_num = 30; acb->acb_flag = 0; /* RESET_DETECT, RESET_DONE, RESET_DEV */ acb->gmode2 = eeprom->channel_cfg; acb->config = 0; /* NOTE: actually set in adapter_init_chip */ if (eeprom->channel_cfg & NAC_SCANLUN) acb->lun_chk = 1; acb->scan_devices = 1; acb->scsi_host->this_id = eeprom->scsi_id; acb->hostid_bit = (1 << acb->scsi_host->this_id); for (i = 0; i < DC395x_MAX_SCSI_ID; i++) acb->dcb_map[i] = 0; acb->msg_len = 0; /* link static array of srbs into the srb free list */ for (i = 0; i < acb->srb_count - 1; i++) list_add_tail(&acb->srb_array[i].list, &acb->srb_free_list); } /** * adapter_init_scsi_host - Initialize the scsi host instance based on * values that we have already stored in the adapter instance. There's * some mention that a lot of these are deprecated, so we won't use * them (we'll use the ones in the adapter instance) but we'll fill * them in in case something else needs them. * * The eeprom structure, irq and io ports in the adapter need to have * been set before calling this function. * * @host: The scsi host instance to fill in the values for. **/ static void adapter_init_scsi_host(struct Scsi_Host *host) { struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata; struct NvRamType *eeprom = &acb->eeprom; host->max_cmd_len = 24; host->can_queue = DC395x_MAX_CMD_QUEUE; host->cmd_per_lun = DC395x_MAX_CMD_PER_LUN; host->this_id = (int)eeprom->scsi_id; host->io_port = acb->io_port_base; host->n_io_port = acb->io_port_len; host->dma_channel = -1; host->unique_id = acb->io_port_base; host->irq = acb->irq_level; acb->last_reset = jiffies; host->max_id = 16; if (host->max_id - 1 == eeprom->scsi_id) host->max_id--; if (eeprom->channel_cfg & NAC_SCANLUN) host->max_lun = 8; else host->max_lun = 1; } /** * adapter_init_chip - Get the chip into a know state and figure out * some of the settings that apply to this adapter. * * The io port in the adapter needs to have been set before calling * this function. The config will be configured correctly on return. * * @acb: The adapter which we are to init. **/ static void adapter_init_chip(struct AdapterCtlBlk *acb) { struct NvRamType *eeprom = &acb->eeprom; /* Mask all the interrupt */ DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00); DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00); /* Reset SCSI module */ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE); /* Reset PCI/DMA module */ DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE); udelay(20); /* program configuration 0 */ acb->config = HCC_AUTOTERM | HCC_PARITY; if (DC395x_read8(acb, TRM_S1040_GEN_STATUS) & WIDESCSI) acb->config |= HCC_WIDE_CARD; if (eeprom->channel_cfg & NAC_POWERON_SCSI_RESET) acb->config |= HCC_SCSI_RESET; if (acb->config & HCC_SCSI_RESET) { dprintkl(KERN_INFO, "Performing initial SCSI bus reset\n"); DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI); /*while (!( DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET )); */ /*spin_unlock_irq (&io_request_lock); */ udelay(500); acb->last_reset = jiffies + HZ / 2 + HZ * acb->eeprom.delay_time; /*spin_lock_irq (&io_request_lock); */ } } /** * adapter_init - Grab the resource for the card, setup the adapter * information, set the card into a known state, create the various * tables etc etc. This basically gets all adapter information all up * to date, initialised and gets the chip in sync with it. * * @acb: The adapter which we are to init. * @io_port: The base I/O port * @io_port_len: The I/O port size * @irq: IRQ * * Returns 0 if the initialization succeeds, any other value on * failure. **/ static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port, u32 io_port_len, unsigned int irq) { if (!request_region(io_port, io_port_len, DC395X_NAME)) { dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port); goto failed; } /* store port base to indicate we have registered it */ acb->io_port_base = io_port; acb->io_port_len = io_port_len; if (request_irq(irq, dc395x_interrupt, IRQF_SHARED, DC395X_NAME, acb)) { /* release the region we just claimed */ dprintkl(KERN_INFO, "Failed to register IRQ\n"); goto failed; } /* store irq to indicate we have registered it */ acb->irq_level = irq; /* get eeprom configuration information and command line settings etc */ check_eeprom(&acb->eeprom, io_port); print_eeprom_settings(&acb->eeprom); /* setup adapter control block */ adapter_init_params(acb); /* display card connectors/termination settings */ adapter_print_config(acb); if (adapter_sg_tables_alloc(acb)) { dprintkl(KERN_DEBUG, "Memory allocation for SG tables failed\n"); goto failed; } adapter_init_scsi_host(acb->scsi_host); adapter_init_chip(acb); set_basic_config(acb); dprintkdbg(DBG_0, "adapter_init: acb=%p, pdcb_map=%p psrb_array=%p " "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n", acb, acb->dcb_map, acb->srb_array, sizeof(struct AdapterCtlBlk), sizeof(struct DeviceCtlBlk), sizeof(struct ScsiReqBlk)); return 0; failed: if (acb->irq_level) free_irq(acb->irq_level, acb); if (acb->io_port_base) release_region(acb->io_port_base, acb->io_port_len); adapter_sg_tables_free(acb); return 1; } /** * adapter_uninit_chip - cleanly shut down the scsi controller chip, * stopping all operations and disabling interrupt generation on the * card. * * @acb: The adapter which we are to shutdown. **/ static void adapter_uninit_chip(struct AdapterCtlBlk *acb) { /* disable interrupts */ DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0); DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0); /* reset the scsi bus */ if (acb->config & HCC_SCSI_RESET) reset_scsi_bus(acb); /* clear any pending interrupt state */ DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS); } /** * adapter_uninit - Shut down the chip and release any resources that * we had allocated. Once this returns the adapter should not be used * anymore. * * @acb: The adapter which we are to un-initialize. **/ static void adapter_uninit(struct AdapterCtlBlk *acb) { unsigned long flags; DC395x_LOCK_IO(acb->scsi_host, flags); /* remove timers */ if (timer_pending(&acb->waiting_timer)) del_timer(&acb->waiting_timer); if (timer_pending(&acb->selto_timer)) del_timer(&acb->selto_timer); adapter_uninit_chip(acb); adapter_remove_and_free_all_devices(acb); DC395x_UNLOCK_IO(acb->scsi_host, flags); if (acb->irq_level) free_irq(acb->irq_level, acb); if (acb->io_port_base) release_region(acb->io_port_base, acb->io_port_len); adapter_sg_tables_free(acb); } #undef YESNO #define YESNO(YN) \ if (YN) seq_printf(m, " Yes ");\ else seq_printf(m, " No ") static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host) { struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata; int spd, spd1; struct DeviceCtlBlk *dcb; unsigned long flags; int dev; seq_puts(m, DC395X_BANNER " PCI SCSI Host Adapter\n" " Driver Version " DC395X_VERSION "\n"); DC395x_LOCK_IO(acb->scsi_host, flags); seq_printf(m, "SCSI Host Nr %i, ", host->host_no); seq_printf(m, "DC395U/UW/F DC315/U %s\n", (acb->config & HCC_WIDE_CARD) ? "Wide" : ""); seq_printf(m, "io_port_base 0x%04lx, ", acb->io_port_base); seq_printf(m, "irq_level 0x%04x, ", acb->irq_level); seq_printf(m, " SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000); seq_printf(m, "MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun); seq_printf(m, "AdapterID %i\n", host->this_id); seq_printf(m, "tag_max_num %i", acb->tag_max_num); /*seq_printf(m, ", DMA_Status %i\n", DC395x_read8(acb, TRM_S1040_DMA_STATUS)); */ seq_printf(m, ", FilterCfg 0x%02x", DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1)); seq_printf(m, ", DelayReset %is\n", acb->eeprom.delay_time); /*seq_printf(m, "\n"); */ seq_printf(m, "Nr of DCBs: %i\n", list_size(&acb->dcb_list)); seq_printf(m, "Map of attached LUNs: %8ph\n", &acb->dcb_map[0]); seq_printf(m, " %8ph\n", &acb->dcb_map[8]); seq_puts(m, "Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n"); dev = 0; list_for_each_entry(dcb, &acb->dcb_list, list) { int nego_period; seq_printf(m, "%02i %02i %02i ", dev, dcb->target_id, dcb->target_lun); YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK); YESNO(dcb->sync_offset); YESNO(dcb->sync_period & WIDE_SYNC); YESNO(dcb->dev_mode & NTC_DO_DISCONNECT); YESNO(dcb->dev_mode & NTC_DO_SEND_START); YESNO(dcb->sync_mode & EN_TAG_QUEUEING); nego_period = clock_period[dcb->sync_period & 0x07] << 2; if (dcb->sync_offset) seq_printf(m, " %03i ns ", nego_period); else seq_printf(m, " (%03i ns)", (dcb->min_nego_period << 2)); if (dcb->sync_offset & 0x0f) { spd = 1000 / (nego_period); spd1 = 1000 % (nego_period); spd1 = (spd1 * 10 + nego_period / 2) / (nego_period); seq_printf(m, " %2i.%1i M %02i ", spd, spd1, (dcb->sync_offset & 0x0f)); } else seq_puts(m, " "); /* Add more info ... */ seq_printf(m, " %02i\n", dcb->max_command); dev++; } if (timer_pending(&acb->waiting_timer)) seq_puts(m, "Waiting queue timer running\n"); else seq_putc(m, '\n'); list_for_each_entry(dcb, &acb->dcb_list, list) { struct ScsiReqBlk *srb; if (!list_empty(&dcb->srb_waiting_list)) seq_printf(m, "DCB (%02i-%i): Waiting: %i:", dcb->target_id, dcb->target_lun, list_size(&dcb->srb_waiting_list)); list_for_each_entry(srb, &dcb->srb_waiting_list, list) seq_printf(m, " %p", srb->cmd); if (!list_empty(&dcb->srb_going_list)) seq_printf(m, "\nDCB (%02i-%i): Going : %i:", dcb->target_id, dcb->target_lun, list_size(&dcb->srb_going_list)); list_for_each_entry(srb, &dcb->srb_going_list, list) seq_printf(m, " %p", srb->cmd); if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list)) seq_putc(m, '\n'); } if (debug_enabled(DBG_1)) { seq_printf(m, "DCB list for ACB %p:\n", acb); list_for_each_entry(dcb, &acb->dcb_list, list) { seq_printf(m, "%p -> ", dcb); } seq_puts(m, "END\n"); } DC395x_UNLOCK_IO(acb->scsi_host, flags); return 0; } static const struct scsi_host_template dc395x_driver_template = { .module = THIS_MODULE, .proc_name = DC395X_NAME, .show_info = dc395x_show_info, .name = DC395X_BANNER " " DC395X_VERSION, .queuecommand = dc395x_queue_command, .slave_alloc = dc395x_slave_alloc, .slave_destroy = dc395x_slave_destroy, .can_queue = DC395x_MAX_CAN_QUEUE, .this_id = 7, .sg_tablesize = DC395x_MAX_SG_TABLESIZE, .cmd_per_lun = DC395x_MAX_CMD_PER_LUN, .eh_abort_handler = dc395x_eh_abort, .eh_bus_reset_handler = dc395x_eh_bus_reset, .dma_boundary = PAGE_SIZE - 1, }; /** * banner_display - Display banner on first instance of driver * initialized. **/ static void banner_display(void) { static int banner_done = 0; if (!banner_done) { dprintkl(KERN_INFO, "%s %s\n", DC395X_BANNER, DC395X_VERSION); banner_done = 1; } } /** * dc395x_init_one - Initialise a single instance of the adapter. * * The PCI layer will call this once for each instance of the adapter * that it finds in the system. The pci_dev strcuture indicates which * instance we are being called from. * * @dev: The PCI device to initialize. * @id: Looks like a pointer to the entry in our pci device table * that was actually matched by the PCI subsystem. * * Returns 0 on success, or an error code (-ve) on failure. **/ static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id) { struct Scsi_Host *scsi_host = NULL; struct AdapterCtlBlk *acb = NULL; unsigned long io_port_base; unsigned int io_port_len; unsigned int irq; dprintkdbg(DBG_0, "Init one instance (%s)\n", pci_name(dev)); banner_display(); if (pci_enable_device(dev)) { dprintkl(KERN_INFO, "PCI Enable device failed.\n"); return -ENODEV; } io_port_base = pci_resource_start(dev, 0) & PCI_BASE_ADDRESS_IO_MASK; io_port_len = pci_resource_len(dev, 0); irq = dev->irq; dprintkdbg(DBG_0, "IO_PORT=0x%04lx, IRQ=0x%x\n", io_port_base, dev->irq); /* allocate scsi host information (includes out adapter) */ scsi_host = scsi_host_alloc(&dc395x_driver_template, sizeof(struct AdapterCtlBlk)); if (!scsi_host) { dprintkl(KERN_INFO, "scsi_host_alloc failed\n"); goto fail; } acb = (struct AdapterCtlBlk*)scsi_host->hostdata; acb->scsi_host = scsi_host; acb->dev = dev; /* initialise the adapter and everything we need */ if (adapter_init(acb, io_port_base, io_port_len, irq)) { dprintkl(KERN_INFO, "adapter init failed\n"); acb = NULL; goto fail; } pci_set_master(dev); /* get the scsi mid level to scan for new devices on the bus */ if (scsi_add_host(scsi_host, &dev->dev)) { dprintkl(KERN_ERR, "scsi_add_host failed\n"); goto fail; } pci_set_drvdata(dev, scsi_host); scsi_scan_host(scsi_host); return 0; fail: if (acb != NULL) adapter_uninit(acb); if (scsi_host != NULL) scsi_host_put(scsi_host); pci_disable_device(dev); return -ENODEV; } /** * dc395x_remove_one - Called to remove a single instance of the * adapter. * * @dev: The PCI device to initialize. **/ static void dc395x_remove_one(struct pci_dev *dev) { struct Scsi_Host *scsi_host = pci_get_drvdata(dev); struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata); dprintkdbg(DBG_0, "dc395x_remove_one: acb=%p\n", acb); scsi_remove_host(scsi_host); adapter_uninit(acb); pci_disable_device(dev); scsi_host_put(scsi_host); } static struct pci_device_id dc395x_pci_table[] = { { .vendor = PCI_VENDOR_ID_TEKRAM, .device = PCI_DEVICE_ID_TEKRAM_TRMS1040, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, dc395x_pci_table); static struct pci_driver dc395x_driver = { .name = DC395X_NAME, .id_table = dc395x_pci_table, .probe = dc395x_init_one, .remove = dc395x_remove_one, }; module_pci_driver(dc395x_driver); MODULE_AUTHOR("C.L. Huang / Erich Chen / Kurt Garloff"); MODULE_DESCRIPTION("SCSI host adapter driver for Tekram TRM-S1040 based adapters: Tekram DC395 and DC315 series"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/dc395x.c
// SPDX-License-Identifier: GPL-2.0-only /* * Detection routine for the NCR53c710 based MVME16x SCSI Controllers for Linux. * * Based on work by Alan Hourihane * * Rewritten to use 53c700.c by Kars de Jong <[email protected]> */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <asm/mvme16xhw.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_spi.h> #include "53c700.h" MODULE_AUTHOR("Kars de Jong <[email protected]>"); MODULE_DESCRIPTION("MVME16x NCR53C710 driver"); MODULE_LICENSE("GPL"); static struct scsi_host_template mvme16x_scsi_driver_template = { .name = "MVME16x NCR53c710 SCSI", .proc_name = "MVME16x", .this_id = 7, .module = THIS_MODULE, }; static struct platform_device *mvme16x_scsi_device; static int mvme16x_probe(struct platform_device *dev) { struct Scsi_Host * host = NULL; struct NCR_700_Host_Parameters *hostdata; if (!MACH_IS_MVME16x) goto out; if (mvme16x_config & MVME16x_CONFIG_NO_SCSICHIP) { printk(KERN_INFO "mvme16x-scsi: detection disabled, " "SCSI chip not present\n"); goto out; } hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL); if (hostdata == NULL) { printk(KERN_ERR "mvme16x-scsi: " "Failed to allocate host data\n"); goto out; } /* Fill in the required pieces of hostdata */ hostdata->base = (void __iomem *)0xfff47000UL; hostdata->clock = 50; /* XXX - depends on the CPU clock! */ hostdata->chip710 = 1; hostdata->dmode_extra = DMODE_FC2; hostdata->dcntl_extra = EA_710; hostdata->ctest7_extra = CTEST7_TT1; /* and register the chip */ host = NCR_700_detect(&mvme16x_scsi_driver_template, hostdata, &dev->dev); if (!host) { printk(KERN_ERR "mvme16x-scsi: No host detected; " "board configuration problem?\n"); goto out_free; } host->this_id = 7; host->base = 0xfff47000UL; host->irq = MVME16x_IRQ_SCSI; if (request_irq(host->irq, NCR_700_intr, 0, "mvme16x-scsi", host)) { printk(KERN_ERR "mvme16x-scsi: request_irq failed\n"); goto out_put_host; } /* Enable scsi chip ints */ { volatile unsigned long v; /* Enable scsi interrupts at level 4 in PCCchip2 */ v = in_be32(0xfff4202c); v = (v & ~0xff) | 0x10 | 4; out_be32(0xfff4202c, v); } platform_set_drvdata(dev, host); scsi_scan_host(host); return 0; out_put_host: scsi_host_put(host); out_free: kfree(hostdata); out: return -ENODEV; } static int mvme16x_device_remove(struct platform_device *dev) { struct Scsi_Host *host = platform_get_drvdata(dev); struct NCR_700_Host_Parameters *hostdata = shost_priv(host); /* Disable scsi chip ints */ { volatile unsigned long v; v = in_be32(0xfff4202c); v &= ~0x10; out_be32(0xfff4202c, v); } scsi_remove_host(host); NCR_700_release(host); kfree(hostdata); free_irq(host->irq, host); return 0; } static struct platform_driver mvme16x_scsi_driver = { .driver = { .name = "mvme16x-scsi", }, .probe = mvme16x_probe, .remove = mvme16x_device_remove, }; static int __init mvme16x_scsi_init(void) { int err; err = platform_driver_register(&mvme16x_scsi_driver); if (err) return err; mvme16x_scsi_device = platform_device_register_simple("mvme16x-scsi", -1, NULL, 0); if (IS_ERR(mvme16x_scsi_device)) { platform_driver_unregister(&mvme16x_scsi_driver); return PTR_ERR(mvme16x_scsi_device); } return 0; } static void __exit mvme16x_scsi_exit(void) { platform_device_unregister(mvme16x_scsi_device); platform_driver_unregister(&mvme16x_scsi_driver); } module_init(mvme16x_scsi_init); module_exit(mvme16x_scsi_exit);
linux-master
drivers/scsi/mvme16x_scsi.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/drivers/scsi/scsi_proc.c * * The functions in this file provide an interface between * the PROC file system and the SCSI device drivers * It is mainly used for debugging, statistics and to pass * information directly to the lowlevel driver. * * (c) 1995 Michael Neuffer [email protected] * Version: 0.99.8 last change: 95/09/13 * * generic command parser provided by: * Andreas Heilwagen <[email protected]> * * generic_proc_info() support of xxxx_info() by: * Michael A. Griffith <[email protected]> */ #include <linux/module.h> #include <linux/init.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/errno.h> #include <linux/blkdev.h> #include <linux/seq_file.h> #include <linux/mutex.h> #include <linux/gfp.h> #include <linux/uaccess.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include "scsi_priv.h" #include "scsi_logging.h" /* 4K page size, but our output routines, use some slack for overruns */ #define PROC_BLOCK_SIZE (3*1024) static struct proc_dir_entry *proc_scsi; /* Protects scsi_proc_list */ static DEFINE_MUTEX(global_host_template_mutex); static LIST_HEAD(scsi_proc_list); /** * struct scsi_proc_entry - (host template, SCSI proc dir) association * @entry: entry in scsi_proc_list. * @sht: SCSI host template associated with the procfs directory. * @proc_dir: procfs directory associated with the SCSI host template. * @present: Number of SCSI hosts instantiated for @sht. */ struct scsi_proc_entry { struct list_head entry; const struct scsi_host_template *sht; struct proc_dir_entry *proc_dir; unsigned int present; }; static ssize_t proc_scsi_host_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct Scsi_Host *shost = pde_data(file_inode(file)); ssize_t ret = -ENOMEM; char *page; if (count > PROC_BLOCK_SIZE) return -EOVERFLOW; if (!shost->hostt->write_info) return -EINVAL; page = (char *)__get_free_page(GFP_KERNEL); if (page) { ret = -EFAULT; if (copy_from_user(page, buf, count)) goto out; ret = shost->hostt->write_info(shost, page, count); } out: free_page((unsigned long)page); return ret; } static int proc_scsi_show(struct seq_file *m, void *v) { struct Scsi_Host *shost = m->private; return shost->hostt->show_info(m, shost); } static int proc_scsi_host_open(struct inode *inode, struct file *file) { return single_open_size(file, proc_scsi_show, pde_data(inode), 4 * PAGE_SIZE); } static struct scsi_proc_entry * __scsi_lookup_proc_entry(const struct scsi_host_template *sht) { struct scsi_proc_entry *e; lockdep_assert_held(&global_host_template_mutex); list_for_each_entry(e, &scsi_proc_list, entry) if (e->sht == sht) return e; return NULL; } static struct scsi_proc_entry * scsi_lookup_proc_entry(const struct scsi_host_template *sht) { struct scsi_proc_entry *e; mutex_lock(&global_host_template_mutex); e = __scsi_lookup_proc_entry(sht); mutex_unlock(&global_host_template_mutex); return e; } /** * scsi_template_proc_dir() - returns the procfs dir for a SCSI host template * @sht: SCSI host template pointer. */ struct proc_dir_entry * scsi_template_proc_dir(const struct scsi_host_template *sht) { struct scsi_proc_entry *e = scsi_lookup_proc_entry(sht); return e ? e->proc_dir : NULL; } EXPORT_SYMBOL_GPL(scsi_template_proc_dir); static const struct proc_ops proc_scsi_ops = { .proc_open = proc_scsi_host_open, .proc_release = single_release, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_write = proc_scsi_host_write }; /** * scsi_proc_hostdir_add - Create directory in /proc for a scsi host * @sht: owner of this directory * * Sets sht->proc_dir to the new directory. */ int scsi_proc_hostdir_add(const struct scsi_host_template *sht) { struct scsi_proc_entry *e; int ret; if (!sht->show_info) return 0; mutex_lock(&global_host_template_mutex); e = __scsi_lookup_proc_entry(sht); if (!e) { e = kzalloc(sizeof(*e), GFP_KERNEL); if (!e) { ret = -ENOMEM; goto unlock; } } if (e->present++) goto success; e->proc_dir = proc_mkdir(sht->proc_name, proc_scsi); if (!e->proc_dir) { printk(KERN_ERR "%s: proc_mkdir failed for %s\n", __func__, sht->proc_name); ret = -ENOMEM; goto unlock; } e->sht = sht; list_add_tail(&e->entry, &scsi_proc_list); success: e = NULL; ret = 0; unlock: mutex_unlock(&global_host_template_mutex); kfree(e); return ret; } /** * scsi_proc_hostdir_rm - remove directory in /proc for a scsi host * @sht: owner of directory */ void scsi_proc_hostdir_rm(const struct scsi_host_template *sht) { struct scsi_proc_entry *e; if (!sht->show_info) return; mutex_lock(&global_host_template_mutex); e = __scsi_lookup_proc_entry(sht); if (e && !--e->present) { remove_proc_entry(sht->proc_name, proc_scsi); list_del(&e->entry); kfree(e); } mutex_unlock(&global_host_template_mutex); } /** * scsi_proc_host_add - Add entry for this host to appropriate /proc dir * @shost: host to add */ void scsi_proc_host_add(struct Scsi_Host *shost) { const struct scsi_host_template *sht = shost->hostt; struct scsi_proc_entry *e; struct proc_dir_entry *p; char name[10]; if (!sht->show_info) return; e = scsi_lookup_proc_entry(sht); if (!e) goto err; sprintf(name,"%d", shost->host_no); p = proc_create_data(name, S_IRUGO | S_IWUSR, e->proc_dir, &proc_scsi_ops, shost); if (!p) goto err; return; err: shost_printk(KERN_ERR, shost, "%s: Failed to register host (%s failed)\n", __func__, e ? "proc_create_data()" : "scsi_proc_hostdir_add()"); } /** * scsi_proc_host_rm - remove this host's entry from /proc * @shost: which host */ void scsi_proc_host_rm(struct Scsi_Host *shost) { const struct scsi_host_template *sht = shost->hostt; struct scsi_proc_entry *e; char name[10]; if (!sht->show_info) return; e = scsi_lookup_proc_entry(sht); if (!e) return; sprintf(name,"%d", shost->host_no); remove_proc_entry(name, e->proc_dir); } /** * proc_print_scsidevice - return data about this host * @dev: A scsi device * @data: &struct seq_file to output to. * * Description: prints Host, Channel, Id, Lun, Vendor, Model, Rev, Type, * and revision. */ static int proc_print_scsidevice(struct device *dev, void *data) { struct scsi_device *sdev; struct seq_file *s = data; int i; if (!scsi_is_sdev_device(dev)) goto out; sdev = to_scsi_device(dev); seq_printf(s, "Host: scsi%d Channel: %02d Id: %02d Lun: %02llu\n Vendor: ", sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); for (i = 0; i < 8; i++) { if (sdev->vendor[i] >= 0x20) seq_putc(s, sdev->vendor[i]); else seq_putc(s, ' '); } seq_puts(s, " Model: "); for (i = 0; i < 16; i++) { if (sdev->model[i] >= 0x20) seq_putc(s, sdev->model[i]); else seq_putc(s, ' '); } seq_puts(s, " Rev: "); for (i = 0; i < 4; i++) { if (sdev->rev[i] >= 0x20) seq_putc(s, sdev->rev[i]); else seq_putc(s, ' '); } seq_putc(s, '\n'); seq_printf(s, " Type: %s ", scsi_device_type(sdev->type)); seq_printf(s, " ANSI SCSI revision: %02x", sdev->scsi_level - (sdev->scsi_level > 1)); if (sdev->scsi_level == 2) seq_puts(s, " CCS\n"); else seq_putc(s, '\n'); out: return 0; } /** * scsi_add_single_device - Respond to user request to probe for/add device * @host: user-supplied decimal integer * @channel: user-supplied decimal integer * @id: user-supplied decimal integer * @lun: user-supplied decimal integer * * Description: called by writing "scsi add-single-device" to /proc/scsi/scsi. * * does scsi_host_lookup() and either user_scan() if that transport * type supports it, or else scsi_scan_host_selected() * * Note: this seems to be aimed exclusively at SCSI parallel busses. */ static int scsi_add_single_device(uint host, uint channel, uint id, uint lun) { struct Scsi_Host *shost; int error = -ENXIO; shost = scsi_host_lookup(host); if (!shost) return error; if (shost->transportt->user_scan) error = shost->transportt->user_scan(shost, channel, id, lun); else error = scsi_scan_host_selected(shost, channel, id, lun, SCSI_SCAN_MANUAL); scsi_host_put(shost); return error; } /** * scsi_remove_single_device - Respond to user request to remove a device * @host: user-supplied decimal integer * @channel: user-supplied decimal integer * @id: user-supplied decimal integer * @lun: user-supplied decimal integer * * Description: called by writing "scsi remove-single-device" to * /proc/scsi/scsi. Does a scsi_device_lookup() and scsi_remove_device() */ static int scsi_remove_single_device(uint host, uint channel, uint id, uint lun) { struct scsi_device *sdev; struct Scsi_Host *shost; int error = -ENXIO; shost = scsi_host_lookup(host); if (!shost) return error; sdev = scsi_device_lookup(shost, channel, id, lun); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); error = 0; } scsi_host_put(shost); return error; } /** * proc_scsi_write - handle writes to /proc/scsi/scsi * @file: not used * @buf: buffer to write * @length: length of buf, at most PAGE_SIZE * @ppos: not used * * Description: this provides a legacy mechanism to add or remove devices by * Host, Channel, ID, and Lun. To use, * "echo 'scsi add-single-device 0 1 2 3' > /proc/scsi/scsi" or * "echo 'scsi remove-single-device 0 1 2 3' > /proc/scsi/scsi" with * "0 1 2 3" replaced by the Host, Channel, Id, and Lun. * * Note: this seems to be aimed at parallel SCSI. Most modern busses (USB, * SATA, Firewire, Fibre Channel, etc) dynamically assign these values to * provide a unique identifier and nothing more. */ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, size_t length, loff_t *ppos) { int host, channel, id, lun; char *buffer, *end, *p; int err; if (!buf || length > PAGE_SIZE) return -EINVAL; buffer = (char *)__get_free_page(GFP_KERNEL); if (!buffer) return -ENOMEM; err = -EFAULT; if (copy_from_user(buffer, buf, length)) goto out; err = -EINVAL; if (length < PAGE_SIZE) { end = buffer + length; *end = '\0'; } else { end = buffer + PAGE_SIZE - 1; if (*end) goto out; } /* * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi * with "0 1 2 3" replaced by your "Host Channel Id Lun". */ if (!strncmp("scsi add-single-device", buffer, 22)) { p = buffer + 23; host = (p < end) ? simple_strtoul(p, &p, 0) : 0; channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; err = scsi_add_single_device(host, channel, id, lun); /* * Usage: echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi * with "0 1 2 3" replaced by your "Host Channel Id Lun". */ } else if (!strncmp("scsi remove-single-device", buffer, 25)) { p = buffer + 26; host = (p < end) ? simple_strtoul(p, &p, 0) : 0; channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; err = scsi_remove_single_device(host, channel, id, lun); } /* * convert success returns so that we return the * number of bytes consumed. */ if (!err) err = length; out: free_page((unsigned long)buffer); return err; } static inline struct device *next_scsi_device(struct device *start) { struct device *next = bus_find_next_device(&scsi_bus_type, start); put_device(start); return next; } static void *scsi_seq_start(struct seq_file *sfile, loff_t *pos) { struct device *dev = NULL; loff_t n = *pos; while ((dev = next_scsi_device(dev))) { if (!n--) break; sfile->private++; } return dev; } static void *scsi_seq_next(struct seq_file *sfile, void *v, loff_t *pos) { (*pos)++; sfile->private++; return next_scsi_device(v); } static void scsi_seq_stop(struct seq_file *sfile, void *v) { put_device(v); } static int scsi_seq_show(struct seq_file *sfile, void *dev) { if (!sfile->private) seq_puts(sfile, "Attached devices:\n"); return proc_print_scsidevice(dev, sfile); } static const struct seq_operations scsi_seq_ops = { .start = scsi_seq_start, .next = scsi_seq_next, .stop = scsi_seq_stop, .show = scsi_seq_show }; /** * proc_scsi_open - glue function * @inode: not used * @file: passed to single_open() * * Associates proc_scsi_show with this file */ static int proc_scsi_open(struct inode *inode, struct file *file) { /* * We don't really need this for the write case but it doesn't * harm either. */ return seq_open(file, &scsi_seq_ops); } static const struct proc_ops scsi_scsi_proc_ops = { .proc_open = proc_scsi_open, .proc_read = seq_read, .proc_write = proc_scsi_write, .proc_lseek = seq_lseek, .proc_release = seq_release, }; /** * scsi_init_procfs - create scsi and scsi/scsi in procfs */ int __init scsi_init_procfs(void) { struct proc_dir_entry *pde; proc_scsi = proc_mkdir("scsi", NULL); if (!proc_scsi) goto err1; pde = proc_create("scsi/scsi", 0, NULL, &scsi_scsi_proc_ops); if (!pde) goto err2; return 0; err2: remove_proc_entry("scsi", NULL); err1: return -ENOMEM; } /** * scsi_exit_procfs - Remove scsi/scsi and scsi from procfs */ void scsi_exit_procfs(void) { remove_proc_entry("scsi/scsi", NULL); remove_proc_entry("scsi", NULL); }
linux-master
drivers/scsi/scsi_proc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Zalon 53c7xx device driver. * By Richard Hirst ([email protected]) */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/types.h> #include <asm/hardware.h> #include <asm/io.h> #include "../parisc/gsc.h" #include "ncr53c8xx.h" MODULE_AUTHOR("Richard Hirst"); MODULE_DESCRIPTION("Bluefish/Zalon 720 SCSI Driver"); MODULE_LICENSE("GPL"); #define GSC_SCSI_ZALON_OFFSET 0x800 #define IO_MODULE_EIM (1*4) #define IO_MODULE_DC_ADATA (2*4) #define IO_MODULE_II_CDATA (3*4) #define IO_MODULE_IO_COMMAND (12*4) #define IO_MODULE_IO_STATUS (13*4) #define IOSTATUS_RY 0x40 #define IOSTATUS_FE 0x80 #define IOIIDATA_SMINT5L 0x40000000 #define IOIIDATA_MINT5EN 0x20000000 #define IOIIDATA_PACKEN 0x10000000 #define IOIIDATA_PREFETCHEN 0x08000000 #define IOIIDATA_IOII 0x00000020 #define CMD_RESET 5 static struct ncr_chip zalon720_chip __initdata = { .revision_id = 0x0f, .burst_max = 3, .offset_max = 8, .nr_divisor = 4, .features = FE_WIDE | FE_DIFF | FE_EHP| FE_MUX | FE_EA, }; #if 0 /* FIXME: * Is this function dead code? or is someone planning on using it in the * future. The clock = (int) pdc_result[16] does not look correct to * me ... I think it should be iodc_data[16]. Since this cause a compile * error with the new encapsulated PDC, I'm not compiling in this function. * - RB */ /* poke SCSI clock out of iodc data */ static u8 iodc_data[32] __attribute__ ((aligned (64))); static unsigned long pdc_result[32] __attribute__ ((aligned (16))) ={0,0,0,0}; static int lasi_scsi_clock(void * hpa, int defaultclock) { int clock, status; status = pdc_iodc_read(&pdc_result, hpa, 0, &iodc_data, 32 ); if (status == PDC_RET_OK) { clock = (int) pdc_result[16]; } else { printk(KERN_WARNING "%s: pdc_iodc_read returned %d\n", __func__, status); clock = defaultclock; } printk(KERN_DEBUG "%s: SCSI clock %d\n", __func__, clock); return clock; } #endif static struct scsi_host_template zalon7xx_template = { .module = THIS_MODULE, .proc_name = "zalon7xx", .cmd_size = sizeof(struct ncr_cmd_priv), }; static int __init zalon_probe(struct parisc_device *dev) { struct gsc_irq gsc_irq; u32 zalon_vers; int error = -ENODEV; void __iomem *zalon = ioremap(dev->hpa.start, 4096); void __iomem *io_port = zalon + GSC_SCSI_ZALON_OFFSET; static int unit = 0; struct Scsi_Host *host; struct ncr_device device; __raw_writel(CMD_RESET, zalon + IO_MODULE_IO_COMMAND); while (!(__raw_readl(zalon + IO_MODULE_IO_STATUS) & IOSTATUS_RY)) cpu_relax(); __raw_writel(IOIIDATA_MINT5EN | IOIIDATA_PACKEN | IOIIDATA_PREFETCHEN, zalon + IO_MODULE_II_CDATA); /* XXX: Save the Zalon version for bug workarounds? */ zalon_vers = (__raw_readl(zalon + IO_MODULE_II_CDATA) >> 24) & 0x07; /* Setup the interrupts first. ** Later on request_irq() will register the handler. */ dev->irq = gsc_alloc_irq(&gsc_irq); printk(KERN_INFO "%s: Zalon version %d, IRQ %d\n", __func__, zalon_vers, dev->irq); __raw_writel(gsc_irq.txn_addr | gsc_irq.txn_data, zalon + IO_MODULE_EIM); if (zalon_vers == 0) printk(KERN_WARNING "%s: Zalon 1.1 or earlier\n", __func__); memset(&device, 0, sizeof(struct ncr_device)); /* The following three are needed before any other access. */ __raw_writeb(0x20, io_port + 0x38); /* DCNTL_REG, EA */ __raw_writeb(0x04, io_port + 0x1b); /* CTEST0_REG, EHP */ __raw_writeb(0x80, io_port + 0x22); /* CTEST4_REG, MUX */ /* Initialise ncr_device structure with items required by ncr_attach. */ device.chip = zalon720_chip; device.host_id = 7; device.dev = &dev->dev; device.slot.base = dev->hpa.start + GSC_SCSI_ZALON_OFFSET; device.slot.base_v = io_port; device.slot.irq = dev->irq; device.differential = 2; host = ncr_attach(&zalon7xx_template, unit, &device); if (!host) return -ENODEV; if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) { dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ", dev->irq); goto fail; } unit++; dev_set_drvdata(&dev->dev, host); error = scsi_add_host(host, &dev->dev); if (error) goto fail_free_irq; scsi_scan_host(host); return 0; fail_free_irq: free_irq(dev->irq, host); fail: ncr53c8xx_release(host); return error; } static const struct parisc_device_id zalon_tbl[] __initconst = { { HPHW_A_DMA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00089 }, { 0, } }; MODULE_DEVICE_TABLE(parisc, zalon_tbl); static void __exit zalon_remove(struct parisc_device *dev) { struct Scsi_Host *host = dev_get_drvdata(&dev->dev); scsi_remove_host(host); ncr53c8xx_release(host); free_irq(dev->irq, host); } static struct parisc_driver zalon_driver __refdata = { .name = "zalon", .id_table = zalon_tbl, .probe = zalon_probe, .remove = __exit_p(zalon_remove), }; static int __init zalon7xx_init(void) { int ret = ncr53c8xx_init(); if (!ret) ret = register_parisc_driver(&zalon_driver); if (ret) ncr53c8xx_exit(); return ret; } static void __exit zalon7xx_exit(void) { unregister_parisc_driver(&zalon_driver); ncr53c8xx_exit(); } module_init(zalon7xx_init); module_exit(zalon7xx_exit);
linux-master
drivers/scsi/zalon.c
/* 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux. Written By: Adam Radford <[email protected]> Modifications By: Tom Couch Copyright (C) 2004-2009 Applied Micro Circuits Corporation. Copyright (C) 2010 LSI Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. NO WARRANTY THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. DISCLAIMER OF LIABILITY NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Bugs/Comments/Suggestions should be mailed to: [email protected] Note: This version of the driver does not contain a bundled firmware image. History ------- 2.26.02.000 - Driver cleanup for kernel submission. 2.26.02.001 - Replace schedule_timeout() calls with msleep(). 2.26.02.002 - Add support for PAE mode. Add lun support. Fix twa_remove() to free irq handler/unregister_chrdev() before shutting down card. Change to new 'change_queue_depth' api. Fix 'handled=1' ISR usage, remove bogus IRQ check. Remove un-needed eh_abort handler. Add support for embedded firmware error strings. 2.26.02.003 - Correctly handle single sgl's with use_sg=1. 2.26.02.004 - Add support for 9550SX controllers. 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher. 2.26.02.006 - Fix 9550SX pchip reset timeout. Add big endian support. 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic(). 2.26.02.008 - Free irq handler in __twa_shutdown(). Serialize reset code. Add support for 9650SE controllers. 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails. 2.26.02.010 - Add support for 9690SA controllers. 2.26.02.011 - Increase max AENs drained to 256. Add MSI support and "use_msi" module parameter. Fix bug in twa_get_param() on 4GB+. Use pci_resource_len() for ioremap(). 2.26.02.012 - Add power management support. 2.26.02.013 - Fix bug in twa_load_sgl(). 2.26.02.014 - Force 60 second timeout default. */ #include <linux/module.h> #include <linux/reboot.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/time.h> #include <linux/mutex.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/irq.h> #include <linux/uaccess.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_cmnd.h> #include "3w-9xxx.h" /* Globals */ #define TW_DRIVER_VERSION "2.26.02.014" static DEFINE_MUTEX(twa_chrdev_mutex); static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; static unsigned int twa_device_extension_count; static int twa_major = -1; extern struct timezone sys_tz; /* Module parameters */ MODULE_AUTHOR ("LSI"); MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(TW_DRIVER_VERSION); static int use_msi = 0; module_param(use_msi, int, S_IRUGO); MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0"); /* Function prototypes */ static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header); static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id); static char *twa_aen_severity_lookup(unsigned char severity_code); static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id); static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static int twa_chrdev_open(struct inode *inode, struct file *file); static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host); static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id); static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id); static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits, u32 set_features, unsigned short current_fw_srl, unsigned short current_fw_arch_id, unsigned short current_fw_branch, unsigned short current_fw_build, unsigned short *fw_on_ctlr_srl, unsigned short *fw_on_ctlr_arch_id, unsigned short *fw_on_ctlr_branch, unsigned short *fw_on_ctlr_build, u32 *init_connect_result); static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length); static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds); static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds); static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal); static int twa_reset_device_extension(TW_Device_Extension *tw_dev); static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset); static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, unsigned char *cdb, int use_sg, TW_SG_Entry *sglistarg); static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id); static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code); /* Functions */ /* Show some statistics about the card */ static ssize_t twa_show_stats(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *host = class_to_shost(dev); TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; unsigned long flags = 0; ssize_t len; spin_lock_irqsave(tw_dev->host->host_lock, flags); len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n" "Current commands posted: %4d\n" "Max commands posted: %4d\n" "Current pending commands: %4d\n" "Max pending commands: %4d\n" "Last sgl length: %4d\n" "Max sgl length: %4d\n" "Last sector count: %4d\n" "Max sector count: %4d\n" "SCSI Host Resets: %4d\n" "AEN's: %4d\n", TW_DRIVER_VERSION, tw_dev->posted_request_count, tw_dev->max_posted_request_count, tw_dev->pending_request_count, tw_dev->max_pending_request_count, tw_dev->sgl_entries, tw_dev->max_sgl_entries, tw_dev->sector_count, tw_dev->max_sector_count, tw_dev->num_resets, tw_dev->aen_count); spin_unlock_irqrestore(tw_dev->host->host_lock, flags); return len; } /* End twa_show_stats() */ /* Create sysfs 'stats' entry */ static struct device_attribute twa_host_stats_attr = { .attr = { .name = "stats", .mode = S_IRUGO, }, .show = twa_show_stats }; /* Host attributes initializer */ static struct attribute *twa_host_attrs[] = { &twa_host_stats_attr.attr, NULL, }; ATTRIBUTE_GROUPS(twa_host); /* File operations struct for character device */ static const struct file_operations twa_fops = { .owner = THIS_MODULE, .unlocked_ioctl = twa_chrdev_ioctl, .open = twa_chrdev_open, .release = NULL, .llseek = noop_llseek, }; /* * The controllers use an inline buffer instead of a mapped SGL for small, * single entry buffers. Note that we treat a zero-length transfer like * a mapped SGL. */ static bool twa_command_mapped(struct scsi_cmnd *cmd) { return scsi_sg_count(cmd) != 1 || scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH; } /* This function will complete an aen request from the isr */ static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id) { TW_Command_Full *full_command_packet; TW_Command *command_packet; TW_Command_Apache_Header *header; unsigned short aen; int retval = 1; header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; tw_dev->posted_request_count--; aen = le16_to_cpu(header->status_block.error); full_command_packet = tw_dev->command_packet_virt[request_id]; command_packet = &full_command_packet->command.oldcommand; /* First check for internal completion of set param for time sync */ if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) { /* Keep reading the queue in case there are more aen's */ if (twa_aen_read_queue(tw_dev, request_id)) goto out2; else { retval = 0; goto out; } } switch (aen) { case TW_AEN_QUEUE_EMPTY: /* Quit reading the queue if this is the last one */ break; case TW_AEN_SYNC_TIME_WITH_HOST: twa_aen_sync_time(tw_dev, request_id); retval = 0; goto out; default: twa_aen_queue_event(tw_dev, header); /* If there are more aen's, keep reading the queue */ if (twa_aen_read_queue(tw_dev, request_id)) goto out2; else { retval = 0; goto out; } } retval = 0; out2: tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); out: return retval; } /* End twa_aen_complete() */ /* This function will drain aen queue */ static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset) { int request_id = 0; unsigned char cdb[TW_MAX_CDB_LEN]; TW_SG_Entry sglist[1]; int finished = 0, count = 0; TW_Command_Full *full_command_packet; TW_Command_Apache_Header *header; unsigned short aen; int first_reset = 0, queue = 0, retval = 1; if (no_check_reset) first_reset = 0; else first_reset = 1; full_command_packet = tw_dev->command_packet_virt[request_id]; memset(full_command_packet, 0, sizeof(TW_Command_Full)); /* Initialize cdb */ memset(&cdb, 0, TW_MAX_CDB_LEN); cdb[0] = REQUEST_SENSE; /* opcode */ cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ /* Initialize sglist */ memset(&sglist, 0, sizeof(TW_SG_Entry)); sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE); sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); if (tw_dev->generic_buffer_phys[request_id] & TW_ALIGNMENT_9000_SGL) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain"); goto out; } /* Mark internal command */ tw_dev->srb[request_id] = NULL; do { /* Send command to the board */ if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense"); goto out; } /* Now poll for completion */ if (twa_poll_response(tw_dev, request_id, 30)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue"); tw_dev->posted_request_count--; goto out; } tw_dev->posted_request_count--; header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; aen = le16_to_cpu(header->status_block.error); queue = 0; count++; switch (aen) { case TW_AEN_QUEUE_EMPTY: if (first_reset != 1) goto out; else finished = 1; break; case TW_AEN_SOFT_RESET: if (first_reset == 0) first_reset = 1; else queue = 1; break; case TW_AEN_SYNC_TIME_WITH_HOST: break; default: queue = 1; } /* Now queue an event info */ if (queue) twa_aen_queue_event(tw_dev, header); } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN)); if (count == TW_MAX_AEN_DRAIN) goto out; retval = 0; out: tw_dev->state[request_id] = TW_S_INITIAL; return retval; } /* End twa_aen_drain_queue() */ /* This function will queue an event */ static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header) { u32 local_time; TW_Event *event; unsigned short aen; char host[16]; char *error_str; tw_dev->aen_count++; /* Fill out event info */ event = tw_dev->event_queue[tw_dev->error_index]; /* Check for clobber */ host[0] = '\0'; if (tw_dev->host) { sprintf(host, " scsi%d:", tw_dev->host->host_no); if (event->retrieved == TW_AEN_NOT_RETRIEVED) tw_dev->aen_clobber = 1; } aen = le16_to_cpu(header->status_block.error); memset(event, 0, sizeof(TW_Event)); event->severity = TW_SEV_OUT(header->status_block.severity__reserved); /* event->time_stamp_sec overflows in y2106 */ local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60)); event->time_stamp_sec = local_time; event->aen_code = aen; event->retrieved = TW_AEN_NOT_RETRIEVED; event->sequence_id = tw_dev->error_sequence_id; tw_dev->error_sequence_id++; /* Check for embedded error string */ error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]); header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0'; event->parameter_len = strlen(header->err_specific_desc); memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str)))); if (event->severity != TW_AEN_SEVERITY_DEBUG) printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n", host, twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)), TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str, header->err_specific_desc); else tw_dev->aen_count--; if ((tw_dev->error_index + 1) == TW_Q_LENGTH) tw_dev->event_queue_wrapped = 1; tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH; } /* End twa_aen_queue_event() */ /* This function will read the aen queue from the isr */ static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id) { unsigned char cdb[TW_MAX_CDB_LEN]; TW_SG_Entry sglist[1]; TW_Command_Full *full_command_packet; int retval = 1; full_command_packet = tw_dev->command_packet_virt[request_id]; memset(full_command_packet, 0, sizeof(TW_Command_Full)); /* Initialize cdb */ memset(&cdb, 0, TW_MAX_CDB_LEN); cdb[0] = REQUEST_SENSE; /* opcode */ cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ /* Initialize sglist */ memset(&sglist, 0, sizeof(TW_SG_Entry)); sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE); sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); /* Mark internal command */ tw_dev->srb[request_id] = NULL; /* Now post the command packet */ if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue"); goto out; } retval = 0; out: return retval; } /* End twa_aen_read_queue() */ /* This function will look up an AEN severity string */ static char *twa_aen_severity_lookup(unsigned char severity_code) { char *retval = NULL; if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) || (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG)) goto out; retval = twa_aen_severity_table[severity_code]; out: return retval; } /* End twa_aen_severity_lookup() */ /* This function will sync firmware time with the host time */ static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id) { u32 schedulertime; TW_Command_Full *full_command_packet; TW_Command *command_packet; TW_Param_Apache *param; time64_t local_time; /* Fill out the command packet */ full_command_packet = tw_dev->command_packet_virt[request_id]; memset(full_command_packet, 0, sizeof(TW_Command_Full)); command_packet = &full_command_packet->command.oldcommand; command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM); command_packet->request_id = request_id; command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE); command_packet->size = TW_COMMAND_SIZE; command_packet->byte6_offset.parameter_count = cpu_to_le16(1); /* Setup the param */ param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; memset(param, 0, TW_SECTOR_SIZE); param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */ param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */ param->parameter_size_bytes = cpu_to_le16(4); /* Convert system time in UTC to local time seconds since last Sunday 12:00AM */ local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60)); div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime); memcpy(param->data, &(__le32){cpu_to_le32(schedulertime)}, sizeof(__le32)); /* Mark internal command */ tw_dev->srb[request_id] = NULL; /* Now post the command */ twa_post_command_packet(tw_dev, request_id, 1); } /* End twa_aen_sync_time() */ /* This function will allocate memory and check if it is correctly aligned */ static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which) { int i; dma_addr_t dma_handle; unsigned long *cpu_addr; int retval = 1; cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL); if (!cpu_addr) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); goto out; } if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory"); dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH, cpu_addr, dma_handle); goto out; } memset(cpu_addr, 0, size*TW_Q_LENGTH); for (i = 0; i < TW_Q_LENGTH; i++) { switch(which) { case 0: tw_dev->command_packet_phys[i] = dma_handle+(i*size); tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size)); break; case 1: tw_dev->generic_buffer_phys[i] = dma_handle+(i*size); tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size)); break; } } retval = 0; out: return retval; } /* End twa_allocate_memory() */ /* This function will check the status register for unexpected bits */ static int twa_check_bits(u32 status_reg_value) { int retval = 1; if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS) goto out; if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0) goto out; retval = 0; out: return retval; } /* End twa_check_bits() */ /* This function will check the srl and decide if we are compatible */ static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed) { int retval = 1; unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0; unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0; u32 init_connect_result = 0; if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL, TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH, TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl, &fw_on_ctlr_arch_id, &fw_on_ctlr_branch, &fw_on_ctlr_build, &init_connect_result)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL"); goto out; } tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl; tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch; tw_dev->tw_compat_info.working_build = fw_on_ctlr_build; /* Try base mode compatibility */ if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) { if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, TW_EXTENDED_INIT_CONNECT, TW_BASE_FW_SRL, TW_9000_ARCH_ID, TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD, &fw_on_ctlr_srl, &fw_on_ctlr_arch_id, &fw_on_ctlr_branch, &fw_on_ctlr_build, &init_connect_result)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL"); goto out; } if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) { if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware"); } else { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver"); } goto out; } tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL; tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH; tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD; } /* Load rest of compatibility struct */ strscpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, sizeof(tw_dev->tw_compat_info.driver_version)); tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL; tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH; tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD; tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL; tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH; tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD; tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl; tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch; tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build; retval = 0; out: return retval; } /* End twa_check_srl() */ /* This function handles ioctl for the character device */ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(file); long timeout; unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; dma_addr_t dma_handle; int request_id = 0; unsigned int sequence_id = 0; unsigned char event_index, start_index; TW_Ioctl_Driver_Command driver_command; TW_Ioctl_Buf_Apache *tw_ioctl; TW_Lock *tw_lock; TW_Command_Full *full_command_packet; TW_Compatibility_Info *tw_compat_info; TW_Event *event; ktime_t current_time; TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)]; int retval = TW_IOCTL_ERROR_OS_EFAULT; void __user *argp = (void __user *)arg; mutex_lock(&twa_chrdev_mutex); /* Only let one of these through at a time */ if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { retval = TW_IOCTL_ERROR_OS_EINTR; goto out; } /* First copy down the driver command */ if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command))) goto out2; /* Check data buffer size */ if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) { retval = TW_IOCTL_ERROR_OS_EINVAL; goto out2; } /* Hardware can only do multiple of 512 byte transfers */ data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511; /* Now allocate ioctl buf memory */ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted, &dma_handle, GFP_KERNEL); if (!cpu_addr) { retval = TW_IOCTL_ERROR_OS_ENOMEM; goto out2; } tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr; /* Now copy down the entire ioctl */ if (copy_from_user(tw_ioctl, argp, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length)) goto out3; /* See which ioctl we are doing */ switch (cmd) { case TW_IOCTL_FIRMWARE_PASS_THROUGH: spin_lock_irqsave(tw_dev->host->host_lock, flags); twa_get_request_id(tw_dev, &request_id); /* Flag internal command */ tw_dev->srb[request_id] = NULL; /* Flag chrdev ioctl */ tw_dev->chrdev_request_id = request_id; full_command_packet = &tw_ioctl->firmware_command; /* Load request id and sglist for both command types */ twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted); memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full)); /* Now post the command packet to the controller */ twa_post_command_packet(tw_dev, request_id, 1); spin_unlock_irqrestore(tw_dev->host->host_lock, flags); timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ; /* Now wait for command to complete */ timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout); /* We timed out, and didn't get an interrupt */ if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) { /* Now we need to reset the board */ printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n", tw_dev->host->host_no, TW_DRIVER, 0x37, cmd); retval = TW_IOCTL_ERROR_OS_EIO; twa_reset_device_extension(tw_dev); goto out3; } /* Now copy in the command packet response */ memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full)); /* Now complete the io */ spin_lock_irqsave(tw_dev->host->host_lock, flags); tw_dev->posted_request_count--; tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); spin_unlock_irqrestore(tw_dev->host->host_lock, flags); break; case TW_IOCTL_GET_COMPATIBILITY_INFO: tw_ioctl->driver_command.status = 0; /* Copy compatibility struct into ioctl data buffer */ tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer; memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info)); break; case TW_IOCTL_GET_LAST_EVENT: if (tw_dev->event_queue_wrapped) { if (tw_dev->aen_clobber) { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; tw_dev->aen_clobber = 0; } else tw_ioctl->driver_command.status = 0; } else { if (!tw_dev->error_index) { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; break; } tw_ioctl->driver_command.status = 0; } event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH; memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; break; case TW_IOCTL_GET_FIRST_EVENT: if (tw_dev->event_queue_wrapped) { if (tw_dev->aen_clobber) { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; tw_dev->aen_clobber = 0; } else tw_ioctl->driver_command.status = 0; event_index = tw_dev->error_index; } else { if (!tw_dev->error_index) { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; break; } tw_ioctl->driver_command.status = 0; event_index = 0; } memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; break; case TW_IOCTL_GET_NEXT_EVENT: event = (TW_Event *)tw_ioctl->data_buffer; sequence_id = event->sequence_id; tw_ioctl->driver_command.status = 0; if (tw_dev->event_queue_wrapped) { if (tw_dev->aen_clobber) { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; tw_dev->aen_clobber = 0; } start_index = tw_dev->error_index; } else { if (!tw_dev->error_index) { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; break; } start_index = 0; } event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH; if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) { if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER) tw_dev->aen_clobber = 1; tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; break; } memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; break; case TW_IOCTL_GET_PREVIOUS_EVENT: event = (TW_Event *)tw_ioctl->data_buffer; sequence_id = event->sequence_id; tw_ioctl->driver_command.status = 0; if (tw_dev->event_queue_wrapped) { if (tw_dev->aen_clobber) { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; tw_dev->aen_clobber = 0; } start_index = tw_dev->error_index; } else { if (!tw_dev->error_index) { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; break; } start_index = 0; } event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH; if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) { if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER) tw_dev->aen_clobber = 1; tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; break; } memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; break; case TW_IOCTL_GET_LOCK: tw_lock = (TW_Lock *)tw_ioctl->data_buffer; current_time = ktime_get(); if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || ktime_after(current_time, tw_dev->ioctl_time)) { tw_dev->ioctl_sem_lock = 1; tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec); tw_ioctl->driver_command.status = 0; tw_lock->time_remaining_msec = tw_lock->timeout_msec; } else { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED; tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time); } break; case TW_IOCTL_RELEASE_LOCK: if (tw_dev->ioctl_sem_lock == 1) { tw_dev->ioctl_sem_lock = 0; tw_ioctl->driver_command.status = 0; } else { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED; } break; default: retval = TW_IOCTL_ERROR_OS_ENOTTY; goto out3; } /* Now copy the entire response to userspace */ if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length) == 0) retval = 0; out3: /* Now free ioctl buf memory */ dma_free_coherent(&tw_dev->tw_pci_dev->dev, sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted, cpu_addr, dma_handle); out2: mutex_unlock(&tw_dev->ioctl_lock); out: mutex_unlock(&twa_chrdev_mutex); return retval; } /* End twa_chrdev_ioctl() */ /* This function handles open for the character device */ /* NOTE that this function will race with remove. */ static int twa_chrdev_open(struct inode *inode, struct file *file) { unsigned int minor_number; int retval = TW_IOCTL_ERROR_OS_ENODEV; if (!capable(CAP_SYS_ADMIN)) { retval = -EACCES; goto out; } minor_number = iminor(inode); if (minor_number >= twa_device_extension_count) goto out; retval = 0; out: return retval; } /* End twa_chrdev_open() */ /* This function will print readable messages from status register errors */ static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value) { int retval = 1; /* Check for various error conditions and handle them appropriately */ if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing"); writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); } if (status_reg_value & TW_STATUS_PCI_ABORT) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing"); writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev)); pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT); } if (status_reg_value & TW_STATUS_QUEUE_ERROR) { if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) && (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) || (!test_bit(TW_IN_RESET, &tw_dev->flags))) TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing"); writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); } if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) { if (tw_dev->reset_print == 0) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing"); tw_dev->reset_print = 1; } goto out; } retval = 0; out: return retval; } /* End twa_decode_bits() */ /* This function will empty the response queue */ static int twa_empty_response_queue(TW_Device_Extension *tw_dev) { u32 status_reg_value; int count = 0, retval = 1; status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) { readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); count++; } if (count == TW_MAX_RESPONSE_DRAIN) goto out; retval = 0; out: return retval; } /* End twa_empty_response_queue() */ /* This function will clear the pchip/response queue on 9550SX */ static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev) { u32 response_que_value = 0; unsigned long before; int retval = 1; if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) { before = jiffies; while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) { response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev)); msleep(1); if (time_after(jiffies, before + HZ * 30)) goto out; } /* P-chip settle time */ msleep(500); retval = 0; } else retval = 0; out: return retval; } /* End twa_empty_response_queue_large() */ /* This function passes sense keys from firmware to scsi layer */ static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host) { TW_Command_Full *full_command_packet; unsigned short error; int retval = 1; char *error_str; full_command_packet = tw_dev->command_packet_virt[request_id]; /* Check for embedded error string */ error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]); /* Don't print error for Logical unit not supported during rollcall */ error = le16_to_cpu(full_command_packet->header.status_block.error); if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) { if (print_host) printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n", tw_dev->host->host_no, TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error, error_str[0] ? error_str : twa_string_lookup(twa_error_table, error), full_command_packet->header.err_specific_desc); else printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n", TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error, error_str[0] ? error_str : twa_string_lookup(twa_error_table, error), full_command_packet->header.err_specific_desc); } if (copy_sense) { memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH); tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1); retval = TW_ISR_DONT_RESULT; goto out; } retval = 0; out: return retval; } /* End twa_fill_sense() */ /* This function will free up device extension resources */ static void twa_free_device_extension(TW_Device_Extension *tw_dev) { if (tw_dev->command_packet_virt[0]) dma_free_coherent(&tw_dev->tw_pci_dev->dev, sizeof(TW_Command_Full) * TW_Q_LENGTH, tw_dev->command_packet_virt[0], tw_dev->command_packet_phys[0]); if (tw_dev->generic_buffer_virt[0]) dma_free_coherent(&tw_dev->tw_pci_dev->dev, TW_SECTOR_SIZE * TW_Q_LENGTH, tw_dev->generic_buffer_virt[0], tw_dev->generic_buffer_phys[0]); kfree(tw_dev->event_queue[0]); } /* End twa_free_device_extension() */ /* This function will free a request id */ static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id) { tw_dev->free_queue[tw_dev->free_tail] = request_id; tw_dev->state[request_id] = TW_S_FINISHED; tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH; } /* End twa_free_request_id() */ /* This function will get parameter table entries from the firmware */ static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes) { TW_Command_Full *full_command_packet; TW_Command *command_packet; TW_Param_Apache *param; void *retval = NULL; /* Setup the command packet */ full_command_packet = tw_dev->command_packet_virt[request_id]; memset(full_command_packet, 0, sizeof(TW_Command_Full)); command_packet = &full_command_packet->command.oldcommand; command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM); command_packet->size = TW_COMMAND_SIZE; command_packet->request_id = request_id; command_packet->byte6_offset.block_count = cpu_to_le16(1); /* Now setup the param */ param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; memset(param, 0, TW_SECTOR_SIZE); param->table_id = cpu_to_le16(table_id | 0x8000); param->parameter_id = cpu_to_le16(parameter_id); param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes); command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE); /* Post the command packet to the board */ twa_post_command_packet(tw_dev, request_id, 1); /* Poll for completion */ if (twa_poll_response(tw_dev, request_id, 30)) TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param") else retval = (void *)&(param->data[0]); tw_dev->posted_request_count--; tw_dev->state[request_id] = TW_S_INITIAL; return retval; } /* End twa_get_param() */ /* This function will assign an available request id */ static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id) { *request_id = tw_dev->free_queue[tw_dev->free_head]; tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH; tw_dev->state[*request_id] = TW_S_STARTED; } /* End twa_get_request_id() */ /* This function will send an initconnection command to controller */ static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits, u32 set_features, unsigned short current_fw_srl, unsigned short current_fw_arch_id, unsigned short current_fw_branch, unsigned short current_fw_build, unsigned short *fw_on_ctlr_srl, unsigned short *fw_on_ctlr_arch_id, unsigned short *fw_on_ctlr_branch, unsigned short *fw_on_ctlr_build, u32 *init_connect_result) { TW_Command_Full *full_command_packet; TW_Initconnect *tw_initconnect; int request_id = 0, retval = 1; /* Initialize InitConnection command packet */ full_command_packet = tw_dev->command_packet_virt[request_id]; memset(full_command_packet, 0, sizeof(TW_Command_Full)); full_command_packet->header.header_desc.size_header = 128; tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand; tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION); tw_initconnect->request_id = request_id; tw_initconnect->message_credits = cpu_to_le16(message_credits); /* Turn on 64-bit sgl support if we need to */ set_features |= sizeof(dma_addr_t) > 4 ? 1 : 0; tw_initconnect->features = cpu_to_le32(set_features); if (set_features & TW_EXTENDED_INIT_CONNECT) { tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED; tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl); tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id); tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch); tw_initconnect->fw_build = cpu_to_le16(current_fw_build); } else tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE; /* Send command packet to the board */ twa_post_command_packet(tw_dev, request_id, 1); /* Poll for completion */ if (twa_poll_response(tw_dev, request_id, 30)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection"); } else { if (set_features & TW_EXTENDED_INIT_CONNECT) { *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl); *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id); *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch); *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build); *init_connect_result = le32_to_cpu(tw_initconnect->result); } retval = 0; } tw_dev->posted_request_count--; tw_dev->state[request_id] = TW_S_INITIAL; return retval; } /* End twa_initconnection() */ /* This function will initialize the fields of a device extension */ static int twa_initialize_device_extension(TW_Device_Extension *tw_dev) { int i, retval = 1; /* Initialize command packet buffers */ if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed"); goto out; } /* Initialize generic buffer */ if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed"); goto out; } /* Allocate event info space */ tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL); if (!tw_dev->event_queue[0]) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed"); goto out; } for (i = 0; i < TW_Q_LENGTH; i++) { tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event))); tw_dev->free_queue[i] = i; tw_dev->state[i] = TW_S_INITIAL; } tw_dev->pending_head = TW_Q_START; tw_dev->pending_tail = TW_Q_START; tw_dev->free_head = TW_Q_START; tw_dev->free_tail = TW_Q_START; tw_dev->error_sequence_id = 1; tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; mutex_init(&tw_dev->ioctl_lock); init_waitqueue_head(&tw_dev->ioctl_wqueue); retval = 0; out: return retval; } /* End twa_initialize_device_extension() */ /* This function is the interrupt service routine */ static irqreturn_t twa_interrupt(int irq, void *dev_instance) { int request_id, error = 0; u32 status_reg_value; TW_Response_Queue response_que; TW_Command_Full *full_command_packet; TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance; int handled = 0; /* Get the per adapter lock */ spin_lock(tw_dev->host->host_lock); /* Read the registers */ status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); /* Check if this is our interrupt, otherwise bail */ if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT)) goto twa_interrupt_bail; handled = 1; /* If we are resetting, bail */ if (test_bit(TW_IN_RESET, &tw_dev->flags)) goto twa_interrupt_bail; /* Check controller for errors */ if (twa_check_bits(status_reg_value)) { if (twa_decode_bits(tw_dev, status_reg_value)) { TW_CLEAR_ALL_INTERRUPTS(tw_dev); goto twa_interrupt_bail; } } /* Handle host interrupt */ if (status_reg_value & TW_STATUS_HOST_INTERRUPT) TW_CLEAR_HOST_INTERRUPT(tw_dev); /* Handle attention interrupt */ if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) { TW_CLEAR_ATTENTION_INTERRUPT(tw_dev); if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) { twa_get_request_id(tw_dev, &request_id); error = twa_aen_read_queue(tw_dev, request_id); if (error) { tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); } } } /* Handle command interrupt */ if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) { TW_MASK_COMMAND_INTERRUPT(tw_dev); /* Drain as many pending commands as we can */ while (tw_dev->pending_request_count > 0) { request_id = tw_dev->pending_queue[tw_dev->pending_head]; if (tw_dev->state[request_id] != TW_S_PENDING) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending"); TW_CLEAR_ALL_INTERRUPTS(tw_dev); goto twa_interrupt_bail; } if (twa_post_command_packet(tw_dev, request_id, 1)==0) { tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH; tw_dev->pending_request_count--; } else { /* If we get here, we will continue re-posting on the next command interrupt */ break; } } } /* Handle response interrupt */ if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) { /* Drain the response queue from the board */ while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) { /* Complete the response */ response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); request_id = TW_RESID_OUT(response_que.response_id); full_command_packet = tw_dev->command_packet_virt[request_id]; error = 0; /* Check for command packet errors */ if (full_command_packet->command.newcommand.status != 0) { if (tw_dev->srb[request_id] != NULL) { error = twa_fill_sense(tw_dev, request_id, 1, 1); } else { /* Skip ioctl error prints */ if (request_id != tw_dev->chrdev_request_id) { error = twa_fill_sense(tw_dev, request_id, 0, 1); } } } /* Check for correct state */ if (tw_dev->state[request_id] != TW_S_POSTED) { if (tw_dev->srb[request_id] != NULL) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted"); TW_CLEAR_ALL_INTERRUPTS(tw_dev); goto twa_interrupt_bail; } } /* Check for internal command completion */ if (tw_dev->srb[request_id] == NULL) { if (request_id != tw_dev->chrdev_request_id) { if (twa_aen_complete(tw_dev, request_id)) TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt"); } else { tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; wake_up(&tw_dev->ioctl_wqueue); } } else { struct scsi_cmnd *cmd; cmd = tw_dev->srb[request_id]; twa_scsiop_execute_scsi_complete(tw_dev, request_id); /* If no error command was a success */ if (error == 0) { cmd->result = (DID_OK << 16); } /* If error, command failed */ if (error == 1) { /* Ask for a host reset */ cmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION; } /* Report residual bytes for single sgl */ if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) { u32 length = le32_to_cpu(full_command_packet->command.newcommand.sg_list[0].length); if (length < scsi_bufflen(cmd)) scsi_set_resid(cmd, scsi_bufflen(cmd) - length); } /* Now complete the io */ if (twa_command_mapped(cmd)) scsi_dma_unmap(cmd); scsi_done(cmd); tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); tw_dev->posted_request_count--; } /* Check for valid status after each drain */ status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); if (twa_check_bits(status_reg_value)) { if (twa_decode_bits(tw_dev, status_reg_value)) { TW_CLEAR_ALL_INTERRUPTS(tw_dev); goto twa_interrupt_bail; } } } } twa_interrupt_bail: spin_unlock(tw_dev->host->host_lock); return IRQ_RETVAL(handled); } /* End twa_interrupt() */ /* This function will load the request id and various sgls for ioctls */ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length) { TW_Command *oldcommand; TW_Command_Apache *newcommand; TW_SG_Entry *sgl; unsigned int pae = 0; if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4)) pae = 1; if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { newcommand = &full_command_packet->command.newcommand; newcommand->request_id__lunl = TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id); if (length) { newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache)); newcommand->sg_list[0].length = cpu_to_le32(length); } newcommand->sgl_entries__lunh = TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0); } else { oldcommand = &full_command_packet->command.oldcommand; oldcommand->request_id = request_id; if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) { /* Load the sg list */ if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA) sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae); else sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset)); sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache)); sgl->length = cpu_to_le32(length); oldcommand->size += pae; } } } /* End twa_load_sgl() */ /* This function will poll for a response interrupt of a request */ static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) { int retval = 1, found = 0, response_request_id; TW_Response_Queue response_queue; TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id]; if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) { response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); response_request_id = TW_RESID_OUT(response_queue.response_id); if (request_id != response_request_id) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response"); goto out; } if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { if (full_command_packet->command.newcommand.status != 0) { /* bad response */ twa_fill_sense(tw_dev, request_id, 0, 0); goto out; } found = 1; } else { if (full_command_packet->command.oldcommand.status != 0) { /* bad response */ twa_fill_sense(tw_dev, request_id, 0, 0); goto out; } found = 1; } } if (found) retval = 0; out: return retval; } /* End twa_poll_response() */ /* This function will poll the status register for a flag */ static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds) { u32 status_reg_value; unsigned long before; int retval = 1; status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); before = jiffies; if (twa_check_bits(status_reg_value)) twa_decode_bits(tw_dev, status_reg_value); while ((status_reg_value & flag) != flag) { status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); if (twa_check_bits(status_reg_value)) twa_decode_bits(tw_dev, status_reg_value); if (time_after(jiffies, before + HZ * seconds)) goto out; msleep(50); } retval = 0; out: return retval; } /* End twa_poll_status() */ /* This function will poll the status register for disappearance of a flag */ static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds) { u32 status_reg_value; unsigned long before; int retval = 1; status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); before = jiffies; if (twa_check_bits(status_reg_value)) twa_decode_bits(tw_dev, status_reg_value); while ((status_reg_value & flag) != 0) { status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); if (twa_check_bits(status_reg_value)) twa_decode_bits(tw_dev, status_reg_value); if (time_after(jiffies, before + HZ * seconds)) goto out; msleep(50); } retval = 0; out: return retval; } /* End twa_poll_status_gone() */ /* This function will attempt to post a command packet to the board */ static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal) { u32 status_reg_value; dma_addr_t command_que_value; int retval = 1; command_que_value = tw_dev->command_packet_phys[request_id]; /* For 9650SE write low 4 bytes first */ if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) || (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) { command_que_value += TW_COMMAND_OFFSET; writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev)); } status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); if (twa_check_bits(status_reg_value)) twa_decode_bits(tw_dev, status_reg_value); if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) { /* Only pend internal driver commands */ if (!internal) { retval = SCSI_MLQUEUE_HOST_BUSY; goto out; } /* Couldn't post the command packet, so we do it later */ if (tw_dev->state[request_id] != TW_S_PENDING) { tw_dev->state[request_id] = TW_S_PENDING; tw_dev->pending_request_count++; if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) { tw_dev->max_pending_request_count = tw_dev->pending_request_count; } tw_dev->pending_queue[tw_dev->pending_tail] = request_id; tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH; } TW_UNMASK_COMMAND_INTERRUPT(tw_dev); goto out; } else { if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) || (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) { /* Now write upper 4 bytes */ writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4); } else { if (sizeof(dma_addr_t) > 4) { command_que_value += TW_COMMAND_OFFSET; writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4); } else { writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); } } tw_dev->state[request_id] = TW_S_POSTED; tw_dev->posted_request_count++; if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) { tw_dev->max_posted_request_count = tw_dev->posted_request_count; } } retval = 0; out: return retval; } /* End twa_post_command_packet() */ /* This function will reset a device extension */ static int twa_reset_device_extension(TW_Device_Extension *tw_dev) { int i = 0; int retval = 1; unsigned long flags = 0; set_bit(TW_IN_RESET, &tw_dev->flags); TW_DISABLE_INTERRUPTS(tw_dev); TW_MASK_COMMAND_INTERRUPT(tw_dev); spin_lock_irqsave(tw_dev->host->host_lock, flags); /* Abort all requests that are in progress */ for (i = 0; i < TW_Q_LENGTH; i++) { if ((tw_dev->state[i] != TW_S_FINISHED) && (tw_dev->state[i] != TW_S_INITIAL) && (tw_dev->state[i] != TW_S_COMPLETED)) { if (tw_dev->srb[i]) { struct scsi_cmnd *cmd = tw_dev->srb[i]; cmd->result = (DID_RESET << 16); if (twa_command_mapped(cmd)) scsi_dma_unmap(cmd); scsi_done(cmd); } } } /* Reset queues and counts */ for (i = 0; i < TW_Q_LENGTH; i++) { tw_dev->free_queue[i] = i; tw_dev->state[i] = TW_S_INITIAL; } tw_dev->free_head = TW_Q_START; tw_dev->free_tail = TW_Q_START; tw_dev->posted_request_count = 0; tw_dev->pending_request_count = 0; tw_dev->pending_head = TW_Q_START; tw_dev->pending_tail = TW_Q_START; tw_dev->reset_print = 0; spin_unlock_irqrestore(tw_dev->host->host_lock, flags); if (twa_reset_sequence(tw_dev, 1)) goto out; TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); clear_bit(TW_IN_RESET, &tw_dev->flags); tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; retval = 0; out: return retval; } /* End twa_reset_device_extension() */ /* This function will reset a controller */ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset) { int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset; while (tries < TW_MAX_RESET_TRIES) { if (do_soft_reset) { TW_SOFT_RESET(tw_dev); /* Clear pchip/response queue on 9550SX */ if (twa_empty_response_queue_large(tw_dev)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence"); do_soft_reset = 1; tries++; continue; } } /* Make sure controller is in a good state */ if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence"); do_soft_reset = 1; tries++; continue; } /* Empty response queue */ if (twa_empty_response_queue(tw_dev)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence"); do_soft_reset = 1; tries++; continue; } flashed = 0; /* Check for compatibility/flash */ if (twa_check_srl(tw_dev, &flashed)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence"); do_soft_reset = 1; tries++; continue; } else { if (flashed) { tries++; continue; } } /* Drain the AEN queue */ if (twa_aen_drain_queue(tw_dev, soft_reset)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence"); do_soft_reset = 1; tries++; continue; } /* If we got here, controller is in a good state */ retval = 0; goto out; } out: return retval; } /* End twa_reset_sequence() */ /* This funciton returns unit geometry in cylinders/heads/sectors */ static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { int heads, sectors, cylinders; if (capacity >= 0x200000) { heads = 255; sectors = 63; cylinders = sector_div(capacity, heads * sectors); } else { heads = 64; sectors = 32; cylinders = sector_div(capacity, heads * sectors); } geom[0] = heads; geom[1] = sectors; geom[2] = cylinders; return 0; } /* End twa_scsi_biosparam() */ /* This is the new scsi eh reset function */ static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt) { TW_Device_Extension *tw_dev = NULL; int retval = FAILED; tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; tw_dev->num_resets++; sdev_printk(KERN_WARNING, SCpnt->device, "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n", TW_DRIVER, 0x2c, SCpnt->cmnd[0]); /* Make sure we are not issuing an ioctl or resetting from ioctl */ mutex_lock(&tw_dev->ioctl_lock); /* Now reset the card and some of the device extension data */ if (twa_reset_device_extension(tw_dev)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset"); goto out; } retval = SUCCESS; out: mutex_unlock(&tw_dev->ioctl_lock); return retval; } /* End twa_scsi_eh_reset() */ /* This is the main scsi queue function to handle scsi opcodes */ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt) { void (*done)(struct scsi_cmnd *) = scsi_done; int request_id, retval; TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; /* If we are resetting due to timed out ioctl, report as busy */ if (test_bit(TW_IN_RESET, &tw_dev->flags)) { retval = SCSI_MLQUEUE_HOST_BUSY; goto out; } /* Check if this FW supports luns */ if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) { SCpnt->result = (DID_BAD_TARGET << 16); done(SCpnt); retval = 0; goto out; } /* Get a free request id */ twa_get_request_id(tw_dev, &request_id); /* Save the scsi command for use by the ISR */ tw_dev->srb[request_id] = SCpnt; retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); switch (retval) { case SCSI_MLQUEUE_HOST_BUSY: if (twa_command_mapped(SCpnt)) scsi_dma_unmap(SCpnt); twa_free_request_id(tw_dev, request_id); break; case 1: SCpnt->result = (DID_ERROR << 16); if (twa_command_mapped(SCpnt)) scsi_dma_unmap(SCpnt); done(SCpnt); tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); retval = 0; } out: return retval; } /* End twa_scsi_queue() */ static DEF_SCSI_QCMD(twa_scsi_queue) /* This function hands scsi cdb's to the firmware */ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, unsigned char *cdb, int use_sg, TW_SG_Entry *sglistarg) { TW_Command_Full *full_command_packet; TW_Command_Apache *command_packet; u32 num_sectors = 0x0; int i, sg_count; struct scsi_cmnd *srb = NULL; struct scatterlist *sg; int retval = 1; if (tw_dev->srb[request_id]) srb = tw_dev->srb[request_id]; /* Initialize command packet */ full_command_packet = tw_dev->command_packet_virt[request_id]; full_command_packet->header.header_desc.size_header = 128; full_command_packet->header.status_block.error = 0; full_command_packet->header.status_block.severity__reserved = 0; command_packet = &full_command_packet->command.newcommand; command_packet->status = 0; command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI); /* We forced 16 byte cdb use earlier */ if (!cdb) memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN); else memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN); if (srb) { command_packet->unit = srb->device->id; command_packet->request_id__lunl = TW_REQ_LUN_IN(srb->device->lun, request_id); } else { command_packet->request_id__lunl = TW_REQ_LUN_IN(0, request_id); command_packet->unit = 0; } command_packet->sgl_offset = 16; if (!sglistarg) { /* Map sglist from scsi layer to cmd packet */ if (scsi_sg_count(srb)) { if (!twa_command_mapped(srb)) { if (srb->sc_data_direction == DMA_TO_DEVICE || srb->sc_data_direction == DMA_BIDIRECTIONAL) scsi_sg_copy_to_buffer(srb, tw_dev->generic_buffer_virt[request_id], TW_SECTOR_SIZE); command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH); } else { sg_count = scsi_dma_map(srb); if (sg_count < 0) goto out; scsi_for_each_sg(srb, sg, sg_count, i) { command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg)); command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg)); if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi"); goto out; } } } command_packet->sgl_entries__lunh = TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])); } } else { /* Internal cdb post */ for (i = 0; i < use_sg; i++) { command_packet->sg_list[i].address = sglistarg[i].address; command_packet->sg_list[i].length = sglistarg[i].length; if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post"); goto out; } } command_packet->sgl_entries__lunh = TW_REQ_LUN_IN(0, use_sg); } if (srb) { if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6) num_sectors = (u32)srb->cmnd[4]; if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10) num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8); } /* Update sector statistic */ tw_dev->sector_count = num_sectors; if (tw_dev->sector_count > tw_dev->max_sector_count) tw_dev->max_sector_count = tw_dev->sector_count; /* Update SG statistics */ if (srb) { tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]); if (tw_dev->sgl_entries > tw_dev->max_sgl_entries) tw_dev->max_sgl_entries = tw_dev->sgl_entries; } /* Now post the command to the board */ if (srb) { retval = twa_post_command_packet(tw_dev, request_id, 0); } else { twa_post_command_packet(tw_dev, request_id, 1); retval = 0; } out: return retval; } /* End twa_scsiop_execute_scsi() */ /* This function completes an execute scsi operation */ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id) { struct scsi_cmnd *cmd = tw_dev->srb[request_id]; if (!twa_command_mapped(cmd) && (cmd->sc_data_direction == DMA_FROM_DEVICE || cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { if (scsi_sg_count(cmd) == 1) { void *buf = tw_dev->generic_buffer_virt[request_id]; scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE); } } } /* End twa_scsiop_execute_scsi_complete() */ /* This function tells the controller to shut down */ static void __twa_shutdown(TW_Device_Extension *tw_dev) { /* Disable interrupts */ TW_DISABLE_INTERRUPTS(tw_dev); /* Free up the IRQ */ free_irq(tw_dev->tw_pci_dev->irq, tw_dev); printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no); /* Tell the card we are shutting down */ if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed"); } else { printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n"); } /* Clear all interrupts just before exit */ TW_CLEAR_ALL_INTERRUPTS(tw_dev); } /* End __twa_shutdown() */ /* Wrapper for __twa_shutdown */ static void twa_shutdown(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; __twa_shutdown(tw_dev); } /* End twa_shutdown() */ /* This function will look up a string */ static char *twa_string_lookup(twa_message_type *table, unsigned int code) { int index; for (index = 0; ((code != table[index].code) && (table[index].text != (char *)0)); index++); return(table[index].text); } /* End twa_string_lookup() */ /* This function gets called when a disk is coming on-line */ static int twa_slave_configure(struct scsi_device *sdev) { /* Force 60 second timeout */ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); return 0; } /* End twa_slave_configure() */ static const struct scsi_host_template driver_template = { .module = THIS_MODULE, .name = "3ware 9000 Storage Controller", .queuecommand = twa_scsi_queue, .eh_host_reset_handler = twa_scsi_eh_reset, .bios_param = twa_scsi_biosparam, .change_queue_depth = scsi_change_queue_depth, .can_queue = TW_Q_LENGTH-2, .slave_configure = twa_slave_configure, .this_id = -1, .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH, .max_sectors = TW_MAX_SECTORS, .cmd_per_lun = TW_MAX_CMDS_PER_LUN, .shost_groups = twa_host_groups, .emulated = 1, .no_write_same = 1, }; /* This function will probe and initialize a card */ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) { struct Scsi_Host *host = NULL; TW_Device_Extension *tw_dev; unsigned long mem_addr, mem_len; int retval; retval = pci_enable_device(pdev); if (retval) { TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device"); return -ENODEV; } pci_set_master(pdev); pci_try_set_mwi(pdev); retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (retval) retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (retval) { TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask"); retval = -ENODEV; goto out_disable_device; } host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension)); if (!host) { TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension"); retval = -ENOMEM; goto out_disable_device; } tw_dev = (TW_Device_Extension *)host->hostdata; /* Save values to device extension */ tw_dev->host = host; tw_dev->tw_pci_dev = pdev; if (twa_initialize_device_extension(tw_dev)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension"); retval = -ENOMEM; goto out_free_device_extension; } /* Request IO regions */ retval = pci_request_regions(pdev, "3w-9xxx"); if (retval) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region"); goto out_free_device_extension; } if (pdev->device == PCI_DEVICE_ID_3WARE_9000) { mem_addr = pci_resource_start(pdev, 1); mem_len = pci_resource_len(pdev, 1); } else { mem_addr = pci_resource_start(pdev, 2); mem_len = pci_resource_len(pdev, 2); } /* Save base address */ tw_dev->base_addr = ioremap(mem_addr, mem_len); if (!tw_dev->base_addr) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap"); retval = -ENOMEM; goto out_release_mem_region; } /* Disable interrupts on the card */ TW_DISABLE_INTERRUPTS(tw_dev); /* Initialize the card */ if (twa_reset_sequence(tw_dev, 0)) { retval = -ENOMEM; goto out_iounmap; } /* Set host specific parameters */ if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) || (pdev->device == PCI_DEVICE_ID_3WARE_9690SA)) host->max_id = TW_MAX_UNITS_9650SE; else host->max_id = TW_MAX_UNITS; host->max_cmd_len = TW_MAX_CDB_LEN; /* Channels aren't supported by adapter */ host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl); host->max_channel = 0; /* Register the card with the kernel SCSI layer */ retval = scsi_add_host(host, &pdev->dev); if (retval) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed"); goto out_iounmap; } pci_set_drvdata(pdev, host); printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n", host->host_no, mem_addr, pdev->irq); printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n", host->host_no, (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE, TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH), (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE, TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH), le32_to_cpu(*(__le32 *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE, TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH))); /* Try to enable MSI */ if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) && !pci_enable_msi(pdev)) set_bit(TW_USING_MSI, &tw_dev->flags); /* Now setup the interrupt handler */ retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev); if (retval) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ"); goto out_remove_host; } twa_device_extension_list[twa_device_extension_count] = tw_dev; twa_device_extension_count++; /* Re-enable interrupts on the card */ TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); /* Finally, scan the host */ scsi_scan_host(host); if (twa_major == -1) { if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0) TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device"); } return 0; out_remove_host: if (test_bit(TW_USING_MSI, &tw_dev->flags)) pci_disable_msi(pdev); scsi_remove_host(host); out_iounmap: iounmap(tw_dev->base_addr); out_release_mem_region: pci_release_regions(pdev); out_free_device_extension: twa_free_device_extension(tw_dev); scsi_host_put(host); out_disable_device: pci_disable_device(pdev); return retval; } /* End twa_probe() */ /* This function is called to remove a device */ static void twa_remove(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; scsi_remove_host(tw_dev->host); /* Unregister character device */ if (twa_major >= 0) { unregister_chrdev(twa_major, "twa"); twa_major = -1; } /* Shutdown the card */ __twa_shutdown(tw_dev); /* Disable MSI if enabled */ if (test_bit(TW_USING_MSI, &tw_dev->flags)) pci_disable_msi(pdev); /* Free IO remapping */ iounmap(tw_dev->base_addr); /* Free up the mem region */ pci_release_regions(pdev); /* Free up device extension resources */ twa_free_device_extension(tw_dev); scsi_host_put(tw_dev->host); pci_disable_device(pdev); twa_device_extension_count--; } /* End twa_remove() */ /* This function is called on PCI suspend */ static int __maybe_unused twa_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct Scsi_Host *host = pci_get_drvdata(pdev); TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no); TW_DISABLE_INTERRUPTS(tw_dev); free_irq(tw_dev->tw_pci_dev->irq, tw_dev); if (test_bit(TW_USING_MSI, &tw_dev->flags)) pci_disable_msi(pdev); /* Tell the card we are shutting down */ if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend"); } else { printk(KERN_WARNING "3w-9xxx: Suspend complete.\n"); } TW_CLEAR_ALL_INTERRUPTS(tw_dev); return 0; } /* End twa_suspend() */ /* This function is called on PCI resume */ static int __maybe_unused twa_resume(struct device *dev) { int retval = 0; struct pci_dev *pdev = to_pci_dev(dev); struct Scsi_Host *host = pci_get_drvdata(pdev); TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no); pci_try_set_mwi(pdev); retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (retval) retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (retval) { TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume"); retval = -ENODEV; goto out_disable_device; } /* Initialize the card */ if (twa_reset_sequence(tw_dev, 0)) { retval = -ENODEV; goto out_disable_device; } /* Now setup the interrupt handler */ retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev); if (retval) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume"); retval = -ENODEV; goto out_disable_device; } /* Now enable MSI if enabled */ if (test_bit(TW_USING_MSI, &tw_dev->flags)) pci_enable_msi(pdev); /* Re-enable interrupts on the card */ TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); printk(KERN_WARNING "3w-9xxx: Resume complete.\n"); return 0; out_disable_device: scsi_remove_host(host); return retval; } /* End twa_resume() */ /* PCI Devices supported by this driver */ static struct pci_device_id twa_pci_tbl[] = { { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { } }; MODULE_DEVICE_TABLE(pci, twa_pci_tbl); static SIMPLE_DEV_PM_OPS(twa_pm_ops, twa_suspend, twa_resume); /* pci_driver initializer */ static struct pci_driver twa_driver = { .name = "3w-9xxx", .id_table = twa_pci_tbl, .probe = twa_probe, .remove = twa_remove, .driver.pm = &twa_pm_ops, .shutdown = twa_shutdown }; /* This function is called on driver initialization */ static int __init twa_init(void) { printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION); return pci_register_driver(&twa_driver); } /* End twa_init() */ /* This function is called on driver exit */ static void __exit twa_exit(void) { pci_unregister_driver(&twa_driver); } /* End twa_exit() */ module_init(twa_init); module_exit(twa_exit);
linux-master
drivers/scsi/3w-9xxx.c
// SPDX-License-Identifier: GPL-2.0 /* * ASCII values for a number of symbolic constants, printing functions, * etc. * Additions for SCSI 2 and Linux 2.2.x by D. Gilbert (990422) * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002) * by D. Gilbert and aeb (20020609) * Updated to SPC-4 T10/1713-D Rev 36g, D. Gilbert 20130701 */ #include <linux/blkdev.h> #include <linux/module.h> #include <linux/kernel.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dbg.h> /* Commands with service actions that change the command name */ #define THIRD_PARTY_COPY_OUT 0x83 #define THIRD_PARTY_COPY_IN 0x84 struct sa_name_list { int opcode; const struct value_name_pair *arr; int arr_sz; }; struct value_name_pair { int value; const char * name; }; static const char * cdb_byte0_names[] = { /* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense", /* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL, "Reassign Blocks", /* 08-0d */ "Read(6)", NULL, "Write(6)", "Seek(6)", NULL, NULL, /* 0e-12 */ NULL, "Read Reverse", "Write Filemarks", "Space", "Inquiry", /* 13-16 */ "Verify(6)", "Recover Buffered Data", "Mode Select(6)", "Reserve(6)", /* 17-1a */ "Release(6)", "Copy", "Erase", "Mode Sense(6)", /* 1b-1d */ "Start/Stop Unit", "Receive Diagnostic", "Send Diagnostic", /* 1e-1f */ "Prevent/Allow Medium Removal", NULL, /* 20-22 */ NULL, NULL, NULL, /* 23-28 */ "Read Format Capacities", "Set Window", "Read Capacity(10)", NULL, NULL, "Read(10)", /* 29-2d */ "Read Generation", "Write(10)", "Seek(10)", "Erase(10)", "Read updated block", /* 2e-31 */ "Write Verify(10)", "Verify(10)", "Search High", "Search Equal", /* 32-34 */ "Search Low", "Set Limits", "Prefetch/Read Position", /* 35-37 */ "Synchronize Cache(10)", "Lock/Unlock Cache(10)", "Read Defect Data(10)", /* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer", "Read Buffer", /* 3d-3f */ "Update Block", "Read Long(10)", "Write Long(10)", /* 40-41 */ "Change Definition", "Write Same(10)", /* 42-48 */ "Unmap/Read sub-channel", "Read TOC/PMA/ATIP", "Read density support", "Play audio(10)", "Get configuration", "Play audio msf", "Sanitize/Play audio track/index", /* 49-4f */ "Play track relative(10)", "Get event status notification", "Pause/resume", "Log Select", "Log Sense", "Stop play/scan", NULL, /* 50-55 */ "Xdwrite", "Xpwrite, Read disk info", "Xdread, Read track info", "Reserve track", "Send OPC info", "Mode Select(10)", /* 56-5b */ "Reserve(10)", "Release(10)", "Repair track", "Read master cue", "Mode Sense(10)", "Close track/session", /* 5c-5f */ "Read buffer capacity", "Send cue sheet", "Persistent reserve in", "Persistent reserve out", /* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, "Extended CDB", "Variable length", /* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)", "Third party copy out", "Third party copy in", /* 85-89 */ "ATA command pass through(16)", "Access control in", "Access control out", "Read(16)", "Compare and Write", /* 8a-8f */ "Write(16)", "ORWrite", "Read attributes", "Write attributes", "Write and verify(16)", "Verify(16)", /* 90-94 */ "Pre-fetch(16)", "Synchronize cache(16)", "Lock/unlock cache(16)", "Write same(16)", NULL, /* 95-99 */ NULL, NULL, NULL, NULL, NULL, /* 9a-9f */ NULL, NULL, NULL, "Service action bidirectional", "Service action in(16)", "Service action out(16)", /* a0-a5 */ "Report luns", "ATA command pass through(12)/Blank", "Security protocol in", "Maintenance in", "Maintenance out", "Move medium/play audio(12)", /* a6-a9 */ "Exchange medium", "Move medium attached", "Read(12)", "Play track relative(12)", /* aa-ae */ "Write(12)", NULL, "Erase(12), Get Performance", "Read DVD structure", "Write and verify(12)", /* af-b1 */ "Verify(12)", "Search data high(12)", "Search data equal(12)", /* b2-b4 */ "Search data low(12)", "Set limits(12)", "Read element status attached", /* b5-b6 */ "Security protocol out", "Send volume tag, set streaming", /* b7-b9 */ "Read defect data(12)", "Read element status", "Read CD msf", /* ba-bc */ "Redundancy group (in), Scan", "Redundancy group (out), Set cd-rom speed", "Spare (in), Play cd", /* bd-bf */ "Spare (out), Mechanism status", "Volume set (in), Read cd", "Volume set (out), Send DVD structure", }; static const struct value_name_pair maint_in_arr[] = { {0x5, "Report identifying information"}, {0xa, "Report target port groups"}, {0xb, "Report aliases"}, {0xc, "Report supported operation codes"}, {0xd, "Report supported task management functions"}, {0xe, "Report priority"}, {0xf, "Report timestamp"}, {0x10, "Management protocol in"}, }; #define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr) static const struct value_name_pair maint_out_arr[] = { {0x6, "Set identifying information"}, {0xa, "Set target port groups"}, {0xb, "Change aliases"}, {0xc, "Remove I_T nexus"}, {0xe, "Set priority"}, {0xf, "Set timestamp"}, {0x10, "Management protocol out"}, }; #define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr) static const struct value_name_pair serv_in12_arr[] = { {0x1, "Read media serial number"}, }; #define SERV_IN12_SZ ARRAY_SIZE(serv_in12_arr) static const struct value_name_pair serv_out12_arr[] = { {-1, "dummy entry"}, }; #define SERV_OUT12_SZ ARRAY_SIZE(serv_out12_arr) static const struct value_name_pair serv_bidi_arr[] = { {-1, "dummy entry"}, }; #define SERV_BIDI_SZ ARRAY_SIZE(serv_bidi_arr) static const struct value_name_pair serv_in16_arr[] = { {0x10, "Read capacity(16)"}, {0x11, "Read long(16)"}, {0x12, "Get LBA status"}, {0x13, "Report referrals"}, }; #define SERV_IN16_SZ ARRAY_SIZE(serv_in16_arr) static const struct value_name_pair serv_out16_arr[] = { {0x11, "Write long(16)"}, {0x1f, "Notify data transfer device(16)"}, }; #define SERV_OUT16_SZ ARRAY_SIZE(serv_out16_arr) static const struct value_name_pair pr_in_arr[] = { {0x0, "Persistent reserve in, read keys"}, {0x1, "Persistent reserve in, read reservation"}, {0x2, "Persistent reserve in, report capabilities"}, {0x3, "Persistent reserve in, read full status"}, }; #define PR_IN_SZ ARRAY_SIZE(pr_in_arr) static const struct value_name_pair pr_out_arr[] = { {0x0, "Persistent reserve out, register"}, {0x1, "Persistent reserve out, reserve"}, {0x2, "Persistent reserve out, release"}, {0x3, "Persistent reserve out, clear"}, {0x4, "Persistent reserve out, preempt"}, {0x5, "Persistent reserve out, preempt and abort"}, {0x6, "Persistent reserve out, register and ignore existing key"}, {0x7, "Persistent reserve out, register and move"}, }; #define PR_OUT_SZ ARRAY_SIZE(pr_out_arr) /* SPC-4 rev 34 renamed the Extended Copy opcode to Third Party Copy Out. LID1 (List Identifier length: 1 byte) is the Extended Copy found in SPC-2 and SPC-3 */ static const struct value_name_pair tpc_out_arr[] = { {0x0, "Extended copy(LID1)"}, {0x1, "Extended copy(LID4)"}, {0x10, "Populate token"}, {0x11, "Write using token"}, {0x1c, "Copy operation abort"}, }; #define TPC_OUT_SZ ARRAY_SIZE(tpc_out_arr) static const struct value_name_pair tpc_in_arr[] = { {0x0, "Receive copy status(LID1)"}, {0x1, "Receive copy data(LID1)"}, {0x3, "Receive copy operating parameters"}, {0x4, "Receive copy failure details(LID1)"}, {0x5, "Receive copy status(LID4)"}, {0x6, "Receive copy data(LID4)"}, {0x7, "Receive ROD token information"}, {0x8, "Report all ROD tokens"}, }; #define TPC_IN_SZ ARRAY_SIZE(tpc_in_arr) static const struct value_name_pair variable_length_arr[] = { {0x1, "Rebuild(32)"}, {0x2, "Regenerate(32)"}, {0x3, "Xdread(32)"}, {0x4, "Xdwrite(32)"}, {0x5, "Xdwrite extended(32)"}, {0x6, "Xpwrite(32)"}, {0x7, "Xdwriteread(32)"}, {0x8, "Xdwrite extended(64)"}, {0x9, "Read(32)"}, {0xa, "Verify(32)"}, {0xb, "Write(32)"}, {0xc, "Write an verify(32)"}, {0xd, "Write same(32)"}, {0x8801, "Format OSD"}, {0x8802, "Create (osd)"}, {0x8803, "List (osd)"}, {0x8805, "Read (osd)"}, {0x8806, "Write (osd)"}, {0x8807, "Append (osd)"}, {0x8808, "Flush (osd)"}, {0x880a, "Remove (osd)"}, {0x880b, "Create partition (osd)"}, {0x880c, "Remove partition (osd)"}, {0x880e, "Get attributes (osd)"}, {0x880f, "Set attributes (osd)"}, {0x8812, "Create and write (osd)"}, {0x8815, "Create collection (osd)"}, {0x8816, "Remove collection (osd)"}, {0x8817, "List collection (osd)"}, {0x8818, "Set key (osd)"}, {0x8819, "Set master key (osd)"}, {0x881a, "Flush collection (osd)"}, {0x881b, "Flush partition (osd)"}, {0x881c, "Flush OSD"}, {0x8f7e, "Perform SCSI command (osd)"}, {0x8f7f, "Perform task management function (osd)"}, }; #define VARIABLE_LENGTH_SZ ARRAY_SIZE(variable_length_arr) static struct sa_name_list sa_names_arr[] = { {VARIABLE_LENGTH_CMD, variable_length_arr, VARIABLE_LENGTH_SZ}, {MAINTENANCE_IN, maint_in_arr, MAINT_IN_SZ}, {MAINTENANCE_OUT, maint_out_arr, MAINT_OUT_SZ}, {PERSISTENT_RESERVE_IN, pr_in_arr, PR_IN_SZ}, {PERSISTENT_RESERVE_OUT, pr_out_arr, PR_OUT_SZ}, {SERVICE_ACTION_IN_12, serv_in12_arr, SERV_IN12_SZ}, {SERVICE_ACTION_OUT_12, serv_out12_arr, SERV_OUT12_SZ}, {SERVICE_ACTION_BIDIRECTIONAL, serv_bidi_arr, SERV_BIDI_SZ}, {SERVICE_ACTION_IN_16, serv_in16_arr, SERV_IN16_SZ}, {SERVICE_ACTION_OUT_16, serv_out16_arr, SERV_OUT16_SZ}, {THIRD_PARTY_COPY_IN, tpc_in_arr, TPC_IN_SZ}, {THIRD_PARTY_COPY_OUT, tpc_out_arr, TPC_OUT_SZ}, {0, NULL, 0}, }; bool scsi_opcode_sa_name(int opcode, int service_action, const char **cdb_name, const char **sa_name) { struct sa_name_list *sa_name_ptr; const struct value_name_pair *arr = NULL; int arr_sz, k; *cdb_name = NULL; if (opcode >= VENDOR_SPECIFIC_CDB) return false; if (opcode < ARRAY_SIZE(cdb_byte0_names)) *cdb_name = cdb_byte0_names[opcode]; for (sa_name_ptr = sa_names_arr; sa_name_ptr->arr; ++sa_name_ptr) { if (sa_name_ptr->opcode == opcode) { arr = sa_name_ptr->arr; arr_sz = sa_name_ptr->arr_sz; break; } } if (!arr) return false; for (k = 0; k < arr_sz; ++k, ++arr) { if (service_action == arr->value) break; } if (k < arr_sz) *sa_name = arr->name; return true; } struct error_info { unsigned short code12; /* 0x0302 looks better than 0x03,0x02 */ unsigned short size; }; /* * There are 700+ entries in this table. To save space, we don't store * (code, pointer) pairs, which would make sizeof(struct * error_info)==16 on 64 bits. Rather, the second element just stores * the size (including \0) of the corresponding string, and we use the * sum of these to get the appropriate offset into additional_text * defined below. This approach saves 12 bytes per entry. */ static const struct error_info additional[] = { #define SENSE_CODE(c, s) {c, sizeof(s)}, #include "sense_codes.h" #undef SENSE_CODE }; static const char *additional_text = #define SENSE_CODE(c, s) s "\0" #include "sense_codes.h" #undef SENSE_CODE ; struct error_info2 { unsigned char code1, code2_min, code2_max; const char * str; const char * fmt; }; static const struct error_info2 additional2[] = { {0x40, 0x00, 0x7f, "Ram failure", ""}, {0x40, 0x80, 0xff, "Diagnostic failure on component", ""}, {0x41, 0x00, 0xff, "Data path failure", ""}, {0x42, 0x00, 0xff, "Power-on or self-test failure", ""}, {0x4D, 0x00, 0xff, "Tagged overlapped commands", "task tag "}, {0x70, 0x00, 0xff, "Decompression exception", "short algorithm id of "}, {0, 0, 0, NULL, NULL} }; /* description of the sense key values */ static const char * const snstext[] = { "No Sense", /* 0: There is no sense information */ "Recovered Error", /* 1: The last command completed successfully but used error correction */ "Not Ready", /* 2: The addressed target is not ready */ "Medium Error", /* 3: Data error detected on the medium */ "Hardware Error", /* 4: Controller or device failure */ "Illegal Request", /* 5: Error in request */ "Unit Attention", /* 6: Removable medium was changed, or the target has been reset, or ... */ "Data Protect", /* 7: Access to the data is blocked */ "Blank Check", /* 8: Reached unexpected written or unwritten region of the medium */ "Vendor Specific(9)", "Copy Aborted", /* A: COPY or COMPARE was aborted */ "Aborted Command", /* B: The target aborted the command */ "Equal", /* C: A SEARCH DATA command found data equal, reserved in SPC-4 rev 36 */ "Volume Overflow", /* D: Medium full with still data to be written */ "Miscompare", /* E: Source data and data on the medium do not agree */ "Completed", /* F: command completed sense data reported, may occur for successful command */ }; /* Get sense key string or NULL if not available */ const char * scsi_sense_key_string(unsigned char key) { if (key < ARRAY_SIZE(snstext)) return snstext[key]; return NULL; } EXPORT_SYMBOL(scsi_sense_key_string); /* * Get additional sense code string or NULL if not available. * This string may contain a "%x" and should be printed with ascq as arg. */ const char * scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt) { int i; unsigned short code = ((asc << 8) | ascq); unsigned offset = 0; *fmt = NULL; for (i = 0; i < ARRAY_SIZE(additional); i++) { if (additional[i].code12 == code) return additional_text + offset; offset += additional[i].size; } for (i = 0; additional2[i].fmt; i++) { if (additional2[i].code1 == asc && ascq >= additional2[i].code2_min && ascq <= additional2[i].code2_max) { *fmt = additional2[i].fmt; return additional2[i].str; } } return NULL; } EXPORT_SYMBOL(scsi_extd_sense_format); static const char * const hostbyte_table[]={ "DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET", "DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR", "DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE", "DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST", "DID_TARGET_FAILURE", "DID_NEXUS_FAILURE", "DID_ALLOC_FAILURE", "DID_MEDIUM_ERROR" }; const char *scsi_hostbyte_string(int result) { enum scsi_host_status hb = host_byte(result); const char *hb_string = NULL; if (hb < ARRAY_SIZE(hostbyte_table)) hb_string = hostbyte_table[hb]; return hb_string; } EXPORT_SYMBOL(scsi_hostbyte_string); #define scsi_mlreturn_name(result) { result, #result } static const struct value_name_pair scsi_mlreturn_arr[] = { scsi_mlreturn_name(NEEDS_RETRY), scsi_mlreturn_name(SUCCESS), scsi_mlreturn_name(FAILED), scsi_mlreturn_name(QUEUED), scsi_mlreturn_name(SOFT_ERROR), scsi_mlreturn_name(ADD_TO_MLQUEUE), scsi_mlreturn_name(TIMEOUT_ERROR), scsi_mlreturn_name(SCSI_RETURN_NOT_HANDLED), scsi_mlreturn_name(FAST_IO_FAIL) }; const char *scsi_mlreturn_string(int result) { const struct value_name_pair *arr = scsi_mlreturn_arr; int k; for (k = 0; k < ARRAY_SIZE(scsi_mlreturn_arr); ++k, ++arr) { if (result == arr->value) return arr->name; } return NULL; } EXPORT_SYMBOL(scsi_mlreturn_string);
linux-master
drivers/scsi/constants.c
// SPDX-License-Identifier: GPL-2.0-only /* * sr.c Copyright (C) 1992 David Giller * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale * * adapted from: * sd.c Copyright (C) 1992 Drew Eckhardt * Linux scsi disk driver by * Drew Eckhardt <[email protected]> * * Modified by Eric Youngdale [email protected] to * add scatter-gather, multiple outstanding request, and other * enhancements. * * Modified by Eric Youngdale [email protected] to support loadable * low-level scsi drivers. * * Modified by Thomas Quinot [email protected] to * provide auto-eject. * * Modified by Gerd Knorr <[email protected]> to support the * generic cdrom interface * * Modified by Jens Axboe <[email protected]> - Uniform sr_packet() * interface, capabilities probe additions, ioctl cleanups, etc. * * Modified by Richard Gooch <[email protected]> to support devfs * * Modified by Jens Axboe <[email protected]> - support DVD-RAM * transparently and lose the GHOST hack * * Modified by Arnaldo Carvalho de Melo <[email protected]> * check resource allocation in sr_init and some cleanups */ #include <linux/module.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/bio.h> #include <linux/compat.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/cdrom.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/major.h> #include <linux/blkdev.h> #include <linux/blk-pm.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/uaccess.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_ioctl.h> /* For the door lock/unlock commands */ #include "scsi_logging.h" #include "sr.h" MODULE_DESCRIPTION("SCSI cdrom (sr) driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_CDROM_MAJOR); MODULE_ALIAS_SCSI_DEVICE(TYPE_ROM); MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM); #define SR_DISKS 256 #define SR_CAPABILITIES \ (CDC_CLOSE_TRAY|CDC_OPEN_TRAY|CDC_LOCK|CDC_SELECT_SPEED| \ CDC_SELECT_DISC|CDC_MULTI_SESSION|CDC_MCN|CDC_MEDIA_CHANGED| \ CDC_PLAY_AUDIO|CDC_RESET|CDC_DRIVE_STATUS| \ CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_DVD_RAM|CDC_GENERIC_PACKET| \ CDC_MRW|CDC_MRW_W|CDC_RAM) static int sr_probe(struct device *); static int sr_remove(struct device *); static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt); static int sr_done(struct scsi_cmnd *); static int sr_runtime_suspend(struct device *dev); static const struct dev_pm_ops sr_pm_ops = { .runtime_suspend = sr_runtime_suspend, }; static struct scsi_driver sr_template = { .gendrv = { .name = "sr", .owner = THIS_MODULE, .probe = sr_probe, .remove = sr_remove, .pm = &sr_pm_ops, }, .init_command = sr_init_command, .done = sr_done, }; static unsigned long sr_index_bits[SR_DISKS / BITS_PER_LONG]; static DEFINE_SPINLOCK(sr_index_lock); static struct lock_class_key sr_bio_compl_lkclass; static int sr_open(struct cdrom_device_info *, int); static void sr_release(struct cdrom_device_info *); static void get_sectorsize(struct scsi_cd *); static int get_capabilities(struct scsi_cd *); static unsigned int sr_check_events(struct cdrom_device_info *cdi, unsigned int clearing, int slot); static int sr_packet(struct cdrom_device_info *, struct packet_command *); static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf, u32 lba, u32 nr, u8 *last_sense); static const struct cdrom_device_ops sr_dops = { .open = sr_open, .release = sr_release, .drive_status = sr_drive_status, .check_events = sr_check_events, .tray_move = sr_tray_move, .lock_door = sr_lock_door, .select_speed = sr_select_speed, .get_last_session = sr_get_last_session, .get_mcn = sr_get_mcn, .reset = sr_reset, .audio_ioctl = sr_audio_ioctl, .generic_packet = sr_packet, .read_cdda_bpc = sr_read_cdda_bpc, .capability = SR_CAPABILITIES, }; static inline struct scsi_cd *scsi_cd(struct gendisk *disk) { return disk->private_data; } static int sr_runtime_suspend(struct device *dev) { struct scsi_cd *cd = dev_get_drvdata(dev); if (!cd) /* E.g.: runtime suspend following sr_remove() */ return 0; if (cd->media_present) return -EBUSY; else return 0; } static unsigned int sr_get_events(struct scsi_device *sdev) { u8 buf[8]; u8 cmd[] = { GET_EVENT_STATUS_NOTIFICATION, 1, /* polled */ 0, 0, /* reserved */ 1 << 4, /* notification class: media */ 0, 0, /* reserved */ 0, sizeof(buf), /* allocation length */ 0, /* control */ }; struct event_header *eh = (void *)buf; struct media_event_desc *med = (void *)(buf + 4); struct scsi_sense_hdr sshdr; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, }; int result; result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buf, sizeof(buf), SR_TIMEOUT, MAX_RETRIES, &exec_args); if (scsi_sense_valid(&sshdr) && sshdr.sense_key == UNIT_ATTENTION) return DISK_EVENT_MEDIA_CHANGE; if (result || be16_to_cpu(eh->data_len) < sizeof(*med)) return 0; if (eh->nea || eh->notification_class != 0x4) return 0; if (med->media_event_code == 1) return DISK_EVENT_EJECT_REQUEST; else if (med->media_event_code == 2) return DISK_EVENT_MEDIA_CHANGE; else if (med->media_event_code == 3) return DISK_EVENT_MEDIA_CHANGE; return 0; } /* * This function checks to see if the media has been changed or eject * button has been pressed. It is possible that we have already * sensed a change, or the drive may have sensed one and not yet * reported it. The past events are accumulated in sdev->changed and * returned together with the current state. */ static unsigned int sr_check_events(struct cdrom_device_info *cdi, unsigned int clearing, int slot) { struct scsi_cd *cd = cdi->handle; bool last_present; struct scsi_sense_hdr sshdr; unsigned int events; int ret; /* no changer support */ if (CDSL_CURRENT != slot) return 0; events = sr_get_events(cd->device); cd->get_event_changed |= events & DISK_EVENT_MEDIA_CHANGE; /* * If earlier GET_EVENT_STATUS_NOTIFICATION and TUR did not agree * for several times in a row. We rely on TUR only for this likely * broken device, to prevent generating incorrect media changed * events for every open(). */ if (cd->ignore_get_event) { events &= ~DISK_EVENT_MEDIA_CHANGE; goto do_tur; } /* * GET_EVENT_STATUS_NOTIFICATION is enough unless MEDIA_CHANGE * is being cleared. Note that there are devices which hang * if asked to execute TUR repeatedly. */ if (cd->device->changed) { events |= DISK_EVENT_MEDIA_CHANGE; cd->device->changed = 0; cd->tur_changed = true; } if (!(clearing & DISK_EVENT_MEDIA_CHANGE)) return events; do_tur: /* let's see whether the media is there with TUR */ last_present = cd->media_present; ret = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); /* * Media is considered to be present if TUR succeeds or fails with * sense data indicating something other than media-not-present * (ASC 0x3a). */ cd->media_present = scsi_status_is_good(ret) || (scsi_sense_valid(&sshdr) && sshdr.asc != 0x3a); if (last_present != cd->media_present) cd->device->changed = 1; if (cd->device->changed) { events |= DISK_EVENT_MEDIA_CHANGE; cd->device->changed = 0; cd->tur_changed = true; } if (cd->ignore_get_event) return events; /* check whether GET_EVENT is reporting spurious MEDIA_CHANGE */ if (!cd->tur_changed) { if (cd->get_event_changed) { if (cd->tur_mismatch++ > 8) { sr_printk(KERN_WARNING, cd, "GET_EVENT and TUR disagree continuously, suppress GET_EVENT events\n"); cd->ignore_get_event = true; } } else { cd->tur_mismatch = 0; } } cd->tur_changed = false; cd->get_event_changed = false; return events; } /* * sr_done is the interrupt routine for the device driver. * * It will be notified on the end of a SCSI read / write, and will take one * of several actions based on success or failure. */ static int sr_done(struct scsi_cmnd *SCpnt) { int result = SCpnt->result; int this_count = scsi_bufflen(SCpnt); int good_bytes = (result == 0 ? this_count : 0); int block_sectors = 0; long error_sector; struct request *rq = scsi_cmd_to_rq(SCpnt); struct scsi_cd *cd = scsi_cd(rq->q->disk); #ifdef DEBUG scmd_printk(KERN_INFO, SCpnt, "done: %x\n", result); #endif /* * Handle MEDIUM ERRORs or VOLUME OVERFLOWs that indicate partial * success. Since this is a relatively rare error condition, no * care is taken to avoid unnecessary additional work such as * memcpy's that could be avoided. */ if (scsi_status_is_check_condition(result) && (SCpnt->sense_buffer[0] & 0x7f) == 0x70) { /* Sense current */ switch (SCpnt->sense_buffer[2]) { case MEDIUM_ERROR: case VOLUME_OVERFLOW: case ILLEGAL_REQUEST: if (!(SCpnt->sense_buffer[0] & 0x90)) break; error_sector = get_unaligned_be32(&SCpnt->sense_buffer[3]); if (rq->bio != NULL) block_sectors = bio_sectors(rq->bio); if (block_sectors < 4) block_sectors = 4; if (cd->device->sector_size == 2048) error_sector <<= 2; error_sector &= ~(block_sectors - 1); good_bytes = (error_sector - blk_rq_pos(rq)) << 9; if (good_bytes < 0 || good_bytes >= this_count) good_bytes = 0; /* * The SCSI specification allows for the value * returned by READ CAPACITY to be up to 75 2K * sectors past the last readable block. * Therefore, if we hit a medium error within the * last 75 2K sectors, we decrease the saved size * value. */ if (error_sector < get_capacity(cd->disk) && cd->capacity - error_sector < 4 * 75) set_capacity(cd->disk, error_sector); break; case RECOVERED_ERROR: good_bytes = this_count; break; default: break; } } return good_bytes; } static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt) { int block = 0, this_count, s_size; struct scsi_cd *cd; struct request *rq = scsi_cmd_to_rq(SCpnt); blk_status_t ret; ret = scsi_alloc_sgtables(SCpnt); if (ret != BLK_STS_OK) return ret; cd = scsi_cd(rq->q->disk); SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt, "Doing sr request, block = %d\n", block)); if (!cd->device || !scsi_device_online(cd->device)) { SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "Finishing %u sectors\n", blk_rq_sectors(rq))); SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "Retry with 0x%p\n", SCpnt)); goto out; } if (cd->device->changed) { /* * quietly refuse to do anything to a changed disc until the * changed bit has been reset */ goto out; } s_size = cd->device->sector_size; if (s_size != 512 && s_size != 1024 && s_size != 2048) { scmd_printk(KERN_ERR, SCpnt, "bad sector size %d\n", s_size); goto out; } switch (req_op(rq)) { case REQ_OP_WRITE: if (!cd->writeable) goto out; SCpnt->cmnd[0] = WRITE_10; cd->cdi.media_written = 1; break; case REQ_OP_READ: SCpnt->cmnd[0] = READ_10; break; default: blk_dump_rq_flags(rq, "Unknown sr command"); goto out; } { struct scatterlist *sg; int i, size = 0, sg_count = scsi_sg_count(SCpnt); scsi_for_each_sg(SCpnt, sg, sg_count, i) size += sg->length; if (size != scsi_bufflen(SCpnt)) { scmd_printk(KERN_ERR, SCpnt, "mismatch count %d, bytes %d\n", size, scsi_bufflen(SCpnt)); if (scsi_bufflen(SCpnt) > size) SCpnt->sdb.length = size; } } /* * request doesn't start on hw block boundary, add scatter pads */ if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) || (scsi_bufflen(SCpnt) % s_size)) { scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n"); goto out; } this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9); SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "%s %d/%u 512 byte blocks.\n", (rq_data_dir(rq) == WRITE) ? "writing" : "reading", this_count, blk_rq_sectors(rq))); SCpnt->cmnd[1] = 0; block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9); if (this_count > 0xffff) { this_count = 0xffff; SCpnt->sdb.length = this_count * s_size; } put_unaligned_be32(block, &SCpnt->cmnd[2]); SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0; put_unaligned_be16(this_count, &SCpnt->cmnd[7]); /* * We shouldn't disconnect in the middle of a sector, so with a dumb * host adapter, it's safe to assume that we can at least transfer * this many bytes between each connect / disconnect. */ SCpnt->transfersize = cd->device->sector_size; SCpnt->underflow = this_count << 9; SCpnt->allowed = MAX_RETRIES; SCpnt->cmd_len = 10; /* * This indicates that the command is ready from our end to be queued. */ return BLK_STS_OK; out: scsi_free_sgtables(SCpnt); return BLK_STS_IOERR; } static void sr_revalidate_disk(struct scsi_cd *cd) { struct scsi_sense_hdr sshdr; /* if the unit is not ready, nothing more to do */ if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr)) return; sr_cd_check(&cd->cdi); get_sectorsize(cd); } static int sr_block_open(struct gendisk *disk, blk_mode_t mode) { struct scsi_cd *cd = scsi_cd(disk); struct scsi_device *sdev = cd->device; int ret; if (scsi_device_get(cd->device)) return -ENXIO; scsi_autopm_get_device(sdev); if (disk_check_media_change(disk)) sr_revalidate_disk(cd); mutex_lock(&cd->lock); ret = cdrom_open(&cd->cdi, mode); mutex_unlock(&cd->lock); scsi_autopm_put_device(sdev); if (ret) scsi_device_put(cd->device); return ret; } static void sr_block_release(struct gendisk *disk) { struct scsi_cd *cd = scsi_cd(disk); mutex_lock(&cd->lock); cdrom_release(&cd->cdi); mutex_unlock(&cd->lock); scsi_device_put(cd->device); } static int sr_block_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned cmd, unsigned long arg) { struct scsi_cd *cd = scsi_cd(bdev->bd_disk); struct scsi_device *sdev = cd->device; void __user *argp = (void __user *)arg; int ret; if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO)) return -ENOIOCTLCMD; mutex_lock(&cd->lock); ret = scsi_ioctl_block_when_processing_errors(sdev, cmd, (mode & BLK_OPEN_NDELAY)); if (ret) goto out; scsi_autopm_get_device(sdev); if (cmd != CDROMCLOSETRAY && cmd != CDROMEJECT) { ret = cdrom_ioctl(&cd->cdi, bdev, cmd, arg); if (ret != -ENOSYS) goto put; } ret = scsi_ioctl(sdev, mode & BLK_OPEN_WRITE, cmd, argp); put: scsi_autopm_put_device(sdev); out: mutex_unlock(&cd->lock); return ret; } static unsigned int sr_block_check_events(struct gendisk *disk, unsigned int clearing) { struct scsi_cd *cd = disk->private_data; if (atomic_read(&cd->device->disk_events_disable_depth)) return 0; return cdrom_check_events(&cd->cdi, clearing); } static void sr_free_disk(struct gendisk *disk) { struct scsi_cd *cd = disk->private_data; spin_lock(&sr_index_lock); clear_bit(MINOR(disk_devt(disk)), sr_index_bits); spin_unlock(&sr_index_lock); unregister_cdrom(&cd->cdi); mutex_destroy(&cd->lock); kfree(cd); } static const struct block_device_operations sr_bdops = { .owner = THIS_MODULE, .open = sr_block_open, .release = sr_block_release, .ioctl = sr_block_ioctl, .compat_ioctl = blkdev_compat_ptr_ioctl, .check_events = sr_block_check_events, .free_disk = sr_free_disk, }; static int sr_open(struct cdrom_device_info *cdi, int purpose) { struct scsi_cd *cd = cdi->handle; struct scsi_device *sdev = cd->device; /* * If the device is in error recovery, wait until it is done. * If the device is offline, then disallow any access to it. */ if (!scsi_block_when_processing_errors(sdev)) return -ENXIO; return 0; } static void sr_release(struct cdrom_device_info *cdi) { } static int sr_probe(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); struct gendisk *disk; struct scsi_cd *cd; int minor, error; scsi_autopm_get_device(sdev); error = -ENODEV; if (sdev->type != TYPE_ROM && sdev->type != TYPE_WORM) goto fail; error = -ENOMEM; cd = kzalloc(sizeof(*cd), GFP_KERNEL); if (!cd) goto fail; disk = blk_mq_alloc_disk_for_queue(sdev->request_queue, &sr_bio_compl_lkclass); if (!disk) goto fail_free; mutex_init(&cd->lock); spin_lock(&sr_index_lock); minor = find_first_zero_bit(sr_index_bits, SR_DISKS); if (minor == SR_DISKS) { spin_unlock(&sr_index_lock); error = -EBUSY; goto fail_put; } __set_bit(minor, sr_index_bits); spin_unlock(&sr_index_lock); disk->major = SCSI_CDROM_MAJOR; disk->first_minor = minor; disk->minors = 1; sprintf(disk->disk_name, "sr%d", minor); disk->fops = &sr_bdops; disk->flags |= GENHD_FL_REMOVABLE | GENHD_FL_NO_PART; disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST; disk->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT | DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE; blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT); cd->device = sdev; cd->disk = disk; cd->capacity = 0x1fffff; cd->device->changed = 1; /* force recheck CD type */ cd->media_present = 1; cd->use = 1; cd->readcd_known = 0; cd->readcd_cdda = 0; cd->cdi.ops = &sr_dops; cd->cdi.handle = cd; cd->cdi.mask = 0; cd->cdi.capacity = 1; sprintf(cd->cdi.name, "sr%d", minor); sdev->sector_size = 2048; /* A guess, just in case */ error = -ENOMEM; if (get_capabilities(cd)) goto fail_minor; sr_vendor_init(cd); set_capacity(disk, cd->capacity); disk->private_data = cd; if (register_cdrom(disk, &cd->cdi)) goto fail_minor; /* * Initialize block layer runtime PM stuffs before the * periodic event checking request gets started in add_disk. */ blk_pm_runtime_init(sdev->request_queue, dev); dev_set_drvdata(dev, cd); sr_revalidate_disk(cd); error = device_add_disk(&sdev->sdev_gendev, disk, NULL); if (error) goto unregister_cdrom; sdev_printk(KERN_DEBUG, sdev, "Attached scsi CD-ROM %s\n", cd->cdi.name); scsi_autopm_put_device(cd->device); return 0; unregister_cdrom: unregister_cdrom(&cd->cdi); fail_minor: spin_lock(&sr_index_lock); clear_bit(minor, sr_index_bits); spin_unlock(&sr_index_lock); fail_put: put_disk(disk); mutex_destroy(&cd->lock); fail_free: kfree(cd); fail: scsi_autopm_put_device(sdev); return error; } static void get_sectorsize(struct scsi_cd *cd) { unsigned char cmd[10]; unsigned char buffer[8]; int the_result, retries = 3; int sector_size; struct request_queue *queue; do { cmd[0] = READ_CAPACITY; memset((void *) &cmd[1], 0, 9); memset(buffer, 0, sizeof(buffer)); /* Do the command and wait.. */ the_result = scsi_execute_cmd(cd->device, cmd, REQ_OP_DRV_IN, buffer, sizeof(buffer), SR_TIMEOUT, MAX_RETRIES, NULL); retries--; } while (the_result && retries); if (the_result) { cd->capacity = 0x1fffff; sector_size = 2048; /* A guess, just in case */ } else { long last_written; cd->capacity = 1 + get_unaligned_be32(&buffer[0]); /* * READ_CAPACITY doesn't return the correct size on * certain UDF media. If last_written is larger, use * it instead. * * http://bugzilla.kernel.org/show_bug.cgi?id=9668 */ if (!cdrom_get_last_written(&cd->cdi, &last_written)) cd->capacity = max_t(long, cd->capacity, last_written); sector_size = get_unaligned_be32(&buffer[4]); switch (sector_size) { /* * HP 4020i CD-Recorder reports 2340 byte sectors * Philips CD-Writers report 2352 byte sectors * * Use 2k sectors for them.. */ case 0: case 2340: case 2352: sector_size = 2048; fallthrough; case 2048: cd->capacity *= 4; fallthrough; case 512: break; default: sr_printk(KERN_INFO, cd, "unsupported sector size %d.", sector_size); cd->capacity = 0; } cd->device->sector_size = sector_size; /* * Add this so that we have the ability to correctly gauge * what the device is capable of. */ set_capacity(cd->disk, cd->capacity); } queue = cd->device->request_queue; blk_queue_logical_block_size(queue, sector_size); return; } static int get_capabilities(struct scsi_cd *cd) { unsigned char *buffer; struct scsi_mode_data data; struct scsi_sense_hdr sshdr; unsigned int ms_len = 128; int rc, n; static const char *loadmech[] = { "caddy", "tray", "pop-up", "", "changer", "cartridge changer", "", "" }; /* allocate transfer buffer */ buffer = kmalloc(512, GFP_KERNEL); if (!buffer) { sr_printk(KERN_ERR, cd, "out of memory.\n"); return -ENOMEM; } /* eat unit attentions */ scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); /* ask for mode page 0x2a */ rc = scsi_mode_sense(cd->device, 0, 0x2a, 0, buffer, ms_len, SR_TIMEOUT, 3, &data, NULL); if (rc < 0 || data.length > ms_len || data.header_length + data.block_descriptor_length > data.length) { /* failed, drive doesn't have capabilities mode page */ cd->cdi.speed = 1; cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R | CDC_DVD | CDC_DVD_RAM | CDC_SELECT_DISC | CDC_SELECT_SPEED | CDC_MRW | CDC_MRW_W | CDC_RAM); kfree(buffer); sr_printk(KERN_INFO, cd, "scsi-1 drive"); return 0; } n = data.header_length + data.block_descriptor_length; cd->cdi.speed = get_unaligned_be16(&buffer[n + 8]) / 176; cd->readcd_known = 1; cd->readcd_cdda = buffer[n + 5] & 0x01; /* print some capability bits */ sr_printk(KERN_INFO, cd, "scsi3-mmc drive: %dx/%dx %s%s%s%s%s%s\n", get_unaligned_be16(&buffer[n + 14]) / 176, cd->cdi.speed, buffer[n + 3] & 0x01 ? "writer " : "", /* CD Writer */ buffer[n + 3] & 0x20 ? "dvd-ram " : "", buffer[n + 2] & 0x02 ? "cd/rw " : "", /* can read rewriteable */ buffer[n + 4] & 0x20 ? "xa/form2 " : "", /* can read xa/from2 */ buffer[n + 5] & 0x01 ? "cdda " : "", /* can read audio data */ loadmech[buffer[n + 6] >> 5]); if ((buffer[n + 6] >> 5) == 0) /* caddy drives can't close tray... */ cd->cdi.mask |= CDC_CLOSE_TRAY; if ((buffer[n + 2] & 0x8) == 0) /* not a DVD drive */ cd->cdi.mask |= CDC_DVD; if ((buffer[n + 3] & 0x20) == 0) /* can't write DVD-RAM media */ cd->cdi.mask |= CDC_DVD_RAM; if ((buffer[n + 3] & 0x10) == 0) /* can't write DVD-R media */ cd->cdi.mask |= CDC_DVD_R; if ((buffer[n + 3] & 0x2) == 0) /* can't write CD-RW media */ cd->cdi.mask |= CDC_CD_RW; if ((buffer[n + 3] & 0x1) == 0) /* can't write CD-R media */ cd->cdi.mask |= CDC_CD_R; if ((buffer[n + 6] & 0x8) == 0) /* can't eject */ cd->cdi.mask |= CDC_OPEN_TRAY; if ((buffer[n + 6] >> 5) == mechtype_individual_changer || (buffer[n + 6] >> 5) == mechtype_cartridge_changer) cd->cdi.capacity = cdrom_number_of_slots(&cd->cdi); if (cd->cdi.capacity <= 1) /* not a changer */ cd->cdi.mask |= CDC_SELECT_DISC; /*else I don't think it can close its tray cd->cdi.mask |= CDC_CLOSE_TRAY; */ /* * if DVD-RAM, MRW-W or CD-RW, we are randomly writable */ if ((cd->cdi.mask & (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) != (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) { cd->writeable = 1; } kfree(buffer); return 0; } /* * sr_packet() is the entry point for the generic commands generated * by the Uniform CD-ROM layer. */ static int sr_packet(struct cdrom_device_info *cdi, struct packet_command *cgc) { struct scsi_cd *cd = cdi->handle; struct scsi_device *sdev = cd->device; if (cgc->cmd[0] == GPCMD_READ_DISC_INFO && sdev->no_read_disc_info) return -EDRIVE_CANT_DO_THIS; if (cgc->timeout <= 0) cgc->timeout = IOCTL_TIMEOUT; sr_do_ioctl(cd, cgc); return cgc->stat; } static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf, u32 lba, u32 nr, u8 *last_sense) { struct gendisk *disk = cdi->disk; u32 len = nr * CD_FRAMESIZE_RAW; struct scsi_cmnd *scmd; struct request *rq; struct bio *bio; int ret; rq = scsi_alloc_request(disk->queue, REQ_OP_DRV_IN, 0); if (IS_ERR(rq)) return PTR_ERR(rq); scmd = blk_mq_rq_to_pdu(rq); ret = blk_rq_map_user(disk->queue, rq, NULL, ubuf, len, GFP_KERNEL); if (ret) goto out_put_request; scmd->cmnd[0] = GPCMD_READ_CD; scmd->cmnd[1] = 1 << 2; scmd->cmnd[2] = (lba >> 24) & 0xff; scmd->cmnd[3] = (lba >> 16) & 0xff; scmd->cmnd[4] = (lba >> 8) & 0xff; scmd->cmnd[5] = lba & 0xff; scmd->cmnd[6] = (nr >> 16) & 0xff; scmd->cmnd[7] = (nr >> 8) & 0xff; scmd->cmnd[8] = nr & 0xff; scmd->cmnd[9] = 0xf8; scmd->cmd_len = 12; rq->timeout = 60 * HZ; bio = rq->bio; blk_execute_rq(rq, false); if (scmd->result) { struct scsi_sense_hdr sshdr; scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len, &sshdr); *last_sense = sshdr.sense_key; ret = -EIO; } if (blk_rq_unmap_user(bio)) ret = -EFAULT; out_put_request: blk_mq_free_request(rq); return ret; } static int sr_remove(struct device *dev) { struct scsi_cd *cd = dev_get_drvdata(dev); scsi_autopm_get_device(cd->device); del_gendisk(cd->disk); put_disk(cd->disk); return 0; } static int __init init_sr(void) { int rc; rc = register_blkdev(SCSI_CDROM_MAJOR, "sr"); if (rc) return rc; rc = scsi_register_driver(&sr_template.gendrv); if (rc) unregister_blkdev(SCSI_CDROM_MAJOR, "sr"); return rc; } static void __exit exit_sr(void) { scsi_unregister_driver(&sr_template.gendrv); unregister_blkdev(SCSI_CDROM_MAJOR, "sr"); } module_init(init_sr); module_exit(exit_sr); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/sr.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * SCSI device handler infrastructure. * * Copyright IBM Corporation, 2007 * Authors: * Chandra Seetharaman <[email protected]> * Mike Anderson <[email protected]> */ #include <linux/slab.h> #include <linux/module.h> #include <scsi/scsi_dh.h> #include "scsi_priv.h" static DEFINE_SPINLOCK(list_lock); static LIST_HEAD(scsi_dh_list); struct scsi_dh_blist { const char *vendor; const char *model; const char *driver; }; static const struct scsi_dh_blist scsi_dh_blist[] = { {"DGC", "RAID", "emc" }, {"DGC", "DISK", "emc" }, {"DGC", "VRAID", "emc" }, {"COMPAQ", "MSA1000 VOLUME", "hp_sw" }, {"COMPAQ", "HSV110", "hp_sw" }, {"HP", "HSV100", "hp_sw"}, {"DEC", "HSG80", "hp_sw"}, {"IBM", "1722", "rdac", }, {"IBM", "1724", "rdac", }, {"IBM", "1726", "rdac", }, {"IBM", "1742", "rdac", }, {"IBM", "1745", "rdac", }, {"IBM", "1746", "rdac", }, {"IBM", "1813", "rdac", }, {"IBM", "1814", "rdac", }, {"IBM", "1815", "rdac", }, {"IBM", "1818", "rdac", }, {"IBM", "3526", "rdac", }, {"IBM", "3542", "rdac", }, {"IBM", "3552", "rdac", }, {"SGI", "TP9300", "rdac", }, {"SGI", "TP9400", "rdac", }, {"SGI", "TP9500", "rdac", }, {"SGI", "TP9700", "rdac", }, {"SGI", "IS", "rdac", }, {"STK", "OPENstorage", "rdac", }, {"STK", "FLEXLINE 380", "rdac", }, {"STK", "BladeCtlr", "rdac", }, {"SUN", "CSM", "rdac", }, {"SUN", "LCSM100", "rdac", }, {"SUN", "STK6580_6780", "rdac", }, {"SUN", "SUN_6180", "rdac", }, {"SUN", "ArrayStorage", "rdac", }, {"DELL", "MD3", "rdac", }, {"NETAPP", "INF-01-00", "rdac", }, {"LSI", "INF-01-00", "rdac", }, {"ENGENIO", "INF-01-00", "rdac", }, {"LENOVO", "DE_Series", "rdac", }, {"FUJITSU", "ETERNUS_AHB", "rdac", }, {NULL, NULL, NULL }, }; static const char * scsi_dh_find_driver(struct scsi_device *sdev) { const struct scsi_dh_blist *b; if (scsi_device_tpgs(sdev)) return "alua"; for (b = scsi_dh_blist; b->vendor; b++) { if (!strncmp(sdev->vendor, b->vendor, strlen(b->vendor)) && !strncmp(sdev->model, b->model, strlen(b->model))) { return b->driver; } } return NULL; } static struct scsi_device_handler *__scsi_dh_lookup(const char *name) { struct scsi_device_handler *tmp, *found = NULL; spin_lock(&list_lock); list_for_each_entry(tmp, &scsi_dh_list, list) { if (!strncmp(tmp->name, name, strlen(tmp->name))) { found = tmp; break; } } spin_unlock(&list_lock); return found; } static struct scsi_device_handler *scsi_dh_lookup(const char *name) { struct scsi_device_handler *dh; if (!name || strlen(name) == 0) return NULL; dh = __scsi_dh_lookup(name); if (!dh) { request_module("scsi_dh_%s", name); dh = __scsi_dh_lookup(name); } return dh; } /* * scsi_dh_handler_attach - Attach a device handler to a device * @sdev - SCSI device the device handler should attach to * @scsi_dh - The device handler to attach */ static int scsi_dh_handler_attach(struct scsi_device *sdev, struct scsi_device_handler *scsi_dh) { int error, ret = 0; if (!try_module_get(scsi_dh->module)) return -EINVAL; error = scsi_dh->attach(sdev); if (error != SCSI_DH_OK) { switch (error) { case SCSI_DH_NOMEM: ret = -ENOMEM; break; case SCSI_DH_RES_TEMP_UNAVAIL: ret = -EAGAIN; break; case SCSI_DH_DEV_UNSUPP: case SCSI_DH_NOSYS: ret = -ENODEV; break; default: ret = -EINVAL; break; } if (ret != -ENODEV) sdev_printk(KERN_ERR, sdev, "%s: Attach failed (%d)\n", scsi_dh->name, error); module_put(scsi_dh->module); } else sdev->handler = scsi_dh; return ret; } /* * scsi_dh_handler_detach - Detach a device handler from a device * @sdev - SCSI device the device handler should be detached from */ static void scsi_dh_handler_detach(struct scsi_device *sdev) { sdev->handler->detach(sdev); sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", sdev->handler->name); module_put(sdev->handler->module); } void scsi_dh_add_device(struct scsi_device *sdev) { struct scsi_device_handler *devinfo = NULL; const char *drv; drv = scsi_dh_find_driver(sdev); if (drv) devinfo = __scsi_dh_lookup(drv); /* * device_handler is optional, so ignore errors * from scsi_dh_handler_attach() */ if (devinfo) (void)scsi_dh_handler_attach(sdev, devinfo); } void scsi_dh_release_device(struct scsi_device *sdev) { if (sdev->handler) scsi_dh_handler_detach(sdev); } /* * scsi_register_device_handler - register a device handler personality * module. * @scsi_dh - device handler to be registered. * * Returns 0 on success, -EBUSY if handler already registered. */ int scsi_register_device_handler(struct scsi_device_handler *scsi_dh) { if (__scsi_dh_lookup(scsi_dh->name)) return -EBUSY; if (!scsi_dh->attach || !scsi_dh->detach) return -EINVAL; spin_lock(&list_lock); list_add(&scsi_dh->list, &scsi_dh_list); spin_unlock(&list_lock); printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name); return SCSI_DH_OK; } EXPORT_SYMBOL_GPL(scsi_register_device_handler); /* * scsi_unregister_device_handler - register a device handler personality * module. * @scsi_dh - device handler to be unregistered. * * Returns 0 on success, -ENODEV if handler not registered. */ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh) { if (!__scsi_dh_lookup(scsi_dh->name)) return -ENODEV; spin_lock(&list_lock); list_del(&scsi_dh->list); spin_unlock(&list_lock); printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name); return SCSI_DH_OK; } EXPORT_SYMBOL_GPL(scsi_unregister_device_handler); /* * scsi_dh_activate - activate the path associated with the scsi_device * corresponding to the given request queue. * Returns immediately without waiting for activation to be completed. * @q - Request queue that is associated with the scsi_device to be * activated. * @fn - Function to be called upon completion of the activation. * Function fn is called with data (below) and the error code. * Function fn may be called from the same calling context. So, * do not hold the lock in the caller which may be needed in fn. * @data - data passed to the function fn upon completion. * */ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) { struct scsi_device *sdev; int err = SCSI_DH_NOSYS; sdev = scsi_device_from_queue(q); if (!sdev) { if (fn) fn(data, err); return err; } if (!sdev->handler) goto out_fn; err = SCSI_DH_NOTCONN; if (sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL) goto out_fn; err = SCSI_DH_DEV_OFFLINED; if (sdev->sdev_state == SDEV_OFFLINE) goto out_fn; if (sdev->handler->activate) err = sdev->handler->activate(sdev, fn, data); out_put_device: put_device(&sdev->sdev_gendev); return err; out_fn: if (fn) fn(data, err); goto out_put_device; } EXPORT_SYMBOL_GPL(scsi_dh_activate); /* * scsi_dh_set_params - set the parameters for the device as per the * string specified in params. * @q - Request queue that is associated with the scsi_device for * which the parameters to be set. * @params - parameters in the following format * "no_of_params\0param1\0param2\0param3\0...\0" * for example, string for 2 parameters with value 10 and 21 * is specified as "2\010\021\0". */ int scsi_dh_set_params(struct request_queue *q, const char *params) { struct scsi_device *sdev; int err = -SCSI_DH_NOSYS; sdev = scsi_device_from_queue(q); if (!sdev) return err; if (sdev->handler && sdev->handler->set_params) err = sdev->handler->set_params(sdev, params); put_device(&sdev->sdev_gendev); return err; } EXPORT_SYMBOL_GPL(scsi_dh_set_params); /* * scsi_dh_attach - Attach device handler * @q - Request queue that is associated with the scsi_device * the handler should be attached to * @name - name of the handler to attach */ int scsi_dh_attach(struct request_queue *q, const char *name) { struct scsi_device *sdev; struct scsi_device_handler *scsi_dh; int err = 0; sdev = scsi_device_from_queue(q); if (!sdev) return -ENODEV; scsi_dh = scsi_dh_lookup(name); if (!scsi_dh) { err = -EINVAL; goto out_put_device; } if (sdev->handler) { if (sdev->handler != scsi_dh) err = -EBUSY; goto out_put_device; } err = scsi_dh_handler_attach(sdev, scsi_dh); out_put_device: put_device(&sdev->sdev_gendev); return err; } EXPORT_SYMBOL_GPL(scsi_dh_attach); /* * scsi_dh_attached_handler_name - Get attached device handler's name * @q - Request queue that is associated with the scsi_device * that may have a device handler attached * @gfp - the GFP mask used in the kmalloc() call when allocating memory * * Returns name of attached handler, NULL if no handler is attached. * Caller must take care to free the returned string. */ const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp) { struct scsi_device *sdev; const char *handler_name = NULL; sdev = scsi_device_from_queue(q); if (!sdev) return NULL; if (sdev->handler) handler_name = kstrdup(sdev->handler->name, gfp); put_device(&sdev->sdev_gendev); return handler_name; } EXPORT_SYMBOL_GPL(scsi_dh_attached_handler_name);
linux-master
drivers/scsi/scsi_dh.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2003 Christoph Hellwig. */ #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/sysctl.h> #include "scsi_logging.h" #include "scsi_priv.h" static struct ctl_table scsi_table[] = { { .procname = "logging_level", .data = &scsi_logging_level, .maxlen = sizeof(scsi_logging_level), .mode = 0644, .proc_handler = proc_dointvec }, { } }; static struct ctl_table_header *scsi_table_header; int __init scsi_init_sysctl(void) { scsi_table_header = register_sysctl("dev/scsi", scsi_table); if (!scsi_table_header) return -ENOMEM; return 0; } void scsi_exit_sysctl(void) { unregister_sysctl_table(scsi_table_header); }
linux-master
drivers/scsi/scsi_sysctl.c
/* * Disk Array driver for HP Smart Array SAS controllers * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries * Copyright 2016 Microsemi Corporation * Copyright 2014-2015 PMC-Sierra, Inc. * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more details. * * Questions/Comments/Bugfixes to [email protected] * */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/compat.h> #include <linux/blktrace_api.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/completion.h> #include <linux/moduleparam.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_transport_sas.h> #include <scsi/scsi_dbg.h> #include <linux/cciss_ioctl.h> #include <linux/string.h> #include <linux/bitmap.h> #include <linux/atomic.h> #include <linux/jiffies.h> #include <linux/percpu-defs.h> #include <linux/percpu.h> #include <asm/unaligned.h> #include <asm/div64.h> #include "hpsa_cmd.h" #include "hpsa.h" /* * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' * with an optional trailing '-' followed by a byte value (0-255). */ #define HPSA_DRIVER_VERSION "3.4.20-200" #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" #define HPSA "hpsa" /* How long to wait for CISS doorbell communication */ #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */ #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */ #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */ #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */ #define MAX_IOCTL_CONFIG_WAIT 1000 /*define how many times we will try a command because of bus resets */ #define MAX_CMD_RETRIES 3 /* How long to wait before giving up on a command */ #define HPSA_EH_PTRAID_TIMEOUT (240 * HZ) /* Embedded module documentation macros - see modules.h */ MODULE_AUTHOR("Hewlett-Packard Company"); MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ HPSA_DRIVER_VERSION); MODULE_VERSION(HPSA_DRIVER_VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS("cciss"); static int hpsa_simple_mode; module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(hpsa_simple_mode, "Use 'simple mode' rather than 'performant mode'"); /* define the PCI info for the cards we can control */ static const struct pci_device_id hpsa_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580}, {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581}, {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582}, {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583}, {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584}, {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585}, {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088}, {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f}, {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); /* board_id = Subsystem Device ID & Vendor ID * product = Marketing Name for the board * access = Address of the struct of function pointers */ static struct board_type products[] = { {0x40700E11, "Smart Array 5300", &SA5A_access}, {0x40800E11, "Smart Array 5i", &SA5B_access}, {0x40820E11, "Smart Array 532", &SA5B_access}, {0x40830E11, "Smart Array 5312", &SA5B_access}, {0x409A0E11, "Smart Array 641", &SA5A_access}, {0x409B0E11, "Smart Array 642", &SA5A_access}, {0x409C0E11, "Smart Array 6400", &SA5A_access}, {0x409D0E11, "Smart Array 6400 EM", &SA5A_access}, {0x40910E11, "Smart Array 6i", &SA5A_access}, {0x3225103C, "Smart Array P600", &SA5A_access}, {0x3223103C, "Smart Array P800", &SA5A_access}, {0x3234103C, "Smart Array P400", &SA5A_access}, {0x3235103C, "Smart Array P400i", &SA5A_access}, {0x3211103C, "Smart Array E200i", &SA5A_access}, {0x3212103C, "Smart Array E200", &SA5A_access}, {0x3213103C, "Smart Array E200i", &SA5A_access}, {0x3214103C, "Smart Array E200i", &SA5A_access}, {0x3215103C, "Smart Array E200i", &SA5A_access}, {0x3237103C, "Smart Array E500", &SA5A_access}, {0x323D103C, "Smart Array P700m", &SA5A_access}, {0x3241103C, "Smart Array P212", &SA5_access}, {0x3243103C, "Smart Array P410", &SA5_access}, {0x3245103C, "Smart Array P410i", &SA5_access}, {0x3247103C, "Smart Array P411", &SA5_access}, {0x3249103C, "Smart Array P812", &SA5_access}, {0x324A103C, "Smart Array P712m", &SA5_access}, {0x324B103C, "Smart Array P711m", &SA5_access}, {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */ {0x3350103C, "Smart Array P222", &SA5_access}, {0x3351103C, "Smart Array P420", &SA5_access}, {0x3352103C, "Smart Array P421", &SA5_access}, {0x3353103C, "Smart Array P822", &SA5_access}, {0x3354103C, "Smart Array P420i", &SA5_access}, {0x3355103C, "Smart Array P220i", &SA5_access}, {0x3356103C, "Smart Array P721m", &SA5_access}, {0x1920103C, "Smart Array P430i", &SA5_access}, {0x1921103C, "Smart Array P830i", &SA5_access}, {0x1922103C, "Smart Array P430", &SA5_access}, {0x1923103C, "Smart Array P431", &SA5_access}, {0x1924103C, "Smart Array P830", &SA5_access}, {0x1925103C, "Smart Array P831", &SA5_access}, {0x1926103C, "Smart Array P731m", &SA5_access}, {0x1928103C, "Smart Array P230i", &SA5_access}, {0x1929103C, "Smart Array P530", &SA5_access}, {0x21BD103C, "Smart Array P244br", &SA5_access}, {0x21BE103C, "Smart Array P741m", &SA5_access}, {0x21BF103C, "Smart HBA H240ar", &SA5_access}, {0x21C0103C, "Smart Array P440ar", &SA5_access}, {0x21C1103C, "Smart Array P840ar", &SA5_access}, {0x21C2103C, "Smart Array P440", &SA5_access}, {0x21C3103C, "Smart Array P441", &SA5_access}, {0x21C4103C, "Smart Array", &SA5_access}, {0x21C5103C, "Smart Array P841", &SA5_access}, {0x21C6103C, "Smart HBA H244br", &SA5_access}, {0x21C7103C, "Smart HBA H240", &SA5_access}, {0x21C8103C, "Smart HBA H241", &SA5_access}, {0x21C9103C, "Smart Array", &SA5_access}, {0x21CA103C, "Smart Array P246br", &SA5_access}, {0x21CB103C, "Smart Array P840", &SA5_access}, {0x21CC103C, "Smart Array", &SA5_access}, {0x21CD103C, "Smart Array", &SA5_access}, {0x21CE103C, "Smart HBA", &SA5_access}, {0x05809005, "SmartHBA-SA", &SA5_access}, {0x05819005, "SmartHBA-SA 8i", &SA5_access}, {0x05829005, "SmartHBA-SA 8i8e", &SA5_access}, {0x05839005, "SmartHBA-SA 8e", &SA5_access}, {0x05849005, "SmartHBA-SA 16i", &SA5_access}, {0x05859005, "SmartHBA-SA 4i4e", &SA5_access}, {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, {0x00881590, "HP Storage P1228e Array Controller", &SA5_access}, {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access}, {0xFFFF103C, "Unknown Smart Array", &SA5_access}, }; static struct scsi_transport_template *hpsa_sas_transport_template; static int hpsa_add_sas_host(struct ctlr_info *h); static void hpsa_delete_sas_host(struct ctlr_info *h); static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, struct hpsa_scsi_dev_t *device); static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device); static struct hpsa_scsi_dev_t *hpsa_find_device_by_sas_rphy(struct ctlr_info *h, struct sas_rphy *rphy); #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy) static const struct scsi_cmnd hpsa_cmd_busy; #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle) static const struct scsi_cmnd hpsa_cmd_idle; static int number_of_controllers; static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, void __user *arg); static int hpsa_passthru_ioctl(struct ctlr_info *h, IOCTL_Command_struct *iocommand); static int hpsa_big_passthru_ioctl(struct ctlr_info *h, BIG_IOCTL_Command_struct *ioc); #ifdef CONFIG_COMPAT static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd, void __user *arg); #endif static void cmd_free(struct ctlr_info *h, struct CommandList *c); static struct CommandList *cmd_alloc(struct ctlr_info *h); static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c); static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, struct scsi_cmnd *scmd); static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, int cmd_type); static void hpsa_free_cmd_pool(struct ctlr_info *h); #define VPD_PAGE (1 << 8) #define HPSA_SIMPLE_ERROR_BITS 0x03 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); static void hpsa_scan_start(struct Scsi_Host *); static int hpsa_scan_finished(struct Scsi_Host *sh, unsigned long elapsed_time); static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); static int hpsa_slave_alloc(struct scsi_device *sdev); static int hpsa_slave_configure(struct scsi_device *sdev); static void hpsa_slave_destroy(struct scsi_device *sdev); static void hpsa_update_scsi_devices(struct ctlr_info *h); static int check_for_unit_attention(struct ctlr_info *h, struct CommandList *c); static void check_ioctl_unit_attention(struct ctlr_info *h, struct CommandList *c); /* performant mode helper functions */ static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, int min_blocks, u32 *bucket_map); static void hpsa_free_performant_mode(struct ctlr_info *h); static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); static inline u32 next_command(struct ctlr_info *h, u8 q); static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, u64 *cfg_offset); static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, unsigned long *memory_bar); static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id, bool *legacy_board); static int wait_for_device_to_become_ready(struct ctlr_info *h, unsigned char lunaddr[], int reply_queue); static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, int wait_for_ready); static inline void finish_cmd(struct CommandList *c); static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h); #define BOARD_NOT_READY 0 #define BOARD_READY 1 static void hpsa_drain_accel_commands(struct ctlr_info *h); static void hpsa_flush_cache(struct ctlr_info *h); static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk); static void hpsa_command_resubmit_worker(struct work_struct *work); static u32 lockup_detected(struct ctlr_info *h); static int detect_controller_lockup(struct ctlr_info *h); static void hpsa_disable_rld_caching(struct ctlr_info *h); static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, struct ReportExtendedLUNdata *buf, int bufsize); static bool hpsa_vpd_page_supported(struct ctlr_info *h, unsigned char scsi3addr[], u8 page); static int hpsa_luns_changed(struct ctlr_info *h); static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, struct hpsa_scsi_dev_t *dev, unsigned char *scsi3addr); static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) { unsigned long *priv = shost_priv(sdev->host); return (struct ctlr_info *) *priv; } static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) { unsigned long *priv = shost_priv(sh); return (struct ctlr_info *) *priv; } static inline bool hpsa_is_cmd_idle(struct CommandList *c) { return c->scsi_cmd == SCSI_CMD_IDLE; } /* extract sense key, asc, and ascq from sense data. -1 means invalid. */ static void decode_sense_data(const u8 *sense_data, int sense_data_len, u8 *sense_key, u8 *asc, u8 *ascq) { struct scsi_sense_hdr sshdr; bool rc; *sense_key = -1; *asc = -1; *ascq = -1; if (sense_data_len < 1) return; rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr); if (rc) { *sense_key = sshdr.sense_key; *asc = sshdr.asc; *ascq = sshdr.ascq; } } static int check_for_unit_attention(struct ctlr_info *h, struct CommandList *c) { u8 sense_key, asc, ascq; int sense_len; if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) sense_len = sizeof(c->err_info->SenseInfo); else sense_len = c->err_info->SenseLen; decode_sense_data(c->err_info->SenseInfo, sense_len, &sense_key, &asc, &ascq); if (sense_key != UNIT_ATTENTION || asc == 0xff) return 0; switch (asc) { case STATE_CHANGED: dev_warn(&h->pdev->dev, "%s: a state change detected, command retried\n", h->devname); break; case LUN_FAILED: dev_warn(&h->pdev->dev, "%s: LUN failure detected\n", h->devname); break; case REPORT_LUNS_CHANGED: dev_warn(&h->pdev->dev, "%s: report LUN data changed\n", h->devname); /* * Note: this REPORT_LUNS_CHANGED condition only occurs on the external * target (array) devices. */ break; case POWER_OR_RESET: dev_warn(&h->pdev->dev, "%s: a power on or device reset detected\n", h->devname); break; case UNIT_ATTENTION_CLEARED: dev_warn(&h->pdev->dev, "%s: unit attention cleared by another initiator\n", h->devname); break; default: dev_warn(&h->pdev->dev, "%s: unknown unit attention detected\n", h->devname); break; } return 1; } static int check_for_busy(struct ctlr_info *h, struct CommandList *c) { if (c->err_info->CommandStatus != CMD_TARGET_STATUS || (c->err_info->ScsiStatus != SAM_STAT_BUSY && c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL)) return 0; dev_warn(&h->pdev->dev, HPSA "device busy"); return 1; } static u32 lockup_detected(struct ctlr_info *h); static ssize_t host_show_lockup_detected(struct device *dev, struct device_attribute *attr, char *buf) { int ld; struct ctlr_info *h; struct Scsi_Host *shost = class_to_shost(dev); h = shost_to_hba(shost); ld = lockup_detected(h); return sprintf(buf, "ld=%d\n", ld); } static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int status, len; struct ctlr_info *h; struct Scsi_Host *shost = class_to_shost(dev); char tmpbuf[10]; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; strncpy(tmpbuf, buf, len); tmpbuf[len] = '\0'; if (sscanf(tmpbuf, "%d", &status) != 1) return -EINVAL; h = shost_to_hba(shost); h->acciopath_status = !!status; dev_warn(&h->pdev->dev, "hpsa: HP SSD Smart Path %s via sysfs update.\n", h->acciopath_status ? "enabled" : "disabled"); return count; } static ssize_t host_store_raid_offload_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int debug_level, len; struct ctlr_info *h; struct Scsi_Host *shost = class_to_shost(dev); char tmpbuf[10]; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; strncpy(tmpbuf, buf, len); tmpbuf[len] = '\0'; if (sscanf(tmpbuf, "%d", &debug_level) != 1) return -EINVAL; if (debug_level < 0) debug_level = 0; h = shost_to_hba(shost); h->raid_offload_debug = debug_level; dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n", h->raid_offload_debug); return count; } static ssize_t host_store_rescan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ctlr_info *h; struct Scsi_Host *shost = class_to_shost(dev); h = shost_to_hba(shost); hpsa_scan_start(h->scsi_host); return count; } static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device) { device->offload_enabled = 0; device->offload_to_be_enabled = 0; } static ssize_t host_show_firmware_revision(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h; struct Scsi_Host *shost = class_to_shost(dev); unsigned char *fwrev; h = shost_to_hba(shost); if (!h->hba_inquiry_data) return 0; fwrev = &h->hba_inquiry_data[32]; return snprintf(buf, 20, "%c%c%c%c\n", fwrev[0], fwrev[1], fwrev[2], fwrev[3]); } static ssize_t host_show_commands_outstanding(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ctlr_info *h = shost_to_hba(shost); return snprintf(buf, 20, "%d\n", atomic_read(&h->commands_outstanding)); } static ssize_t host_show_transport_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h; struct Scsi_Host *shost = class_to_shost(dev); h = shost_to_hba(shost); return snprintf(buf, 20, "%s\n", h->transMethod & CFGTBL_Trans_Performant ? "performant" : "simple"); } static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h; struct Scsi_Host *shost = class_to_shost(dev); h = shost_to_hba(shost); return snprintf(buf, 30, "HP SSD Smart Path %s\n", (h->acciopath_status == 1) ? "enabled" : "disabled"); } /* List of controllers which cannot be hard reset on kexec with reset_devices */ static u32 unresettable_controller[] = { 0x324a103C, /* Smart Array P712m */ 0x324b103C, /* Smart Array P711m */ 0x3223103C, /* Smart Array P800 */ 0x3234103C, /* Smart Array P400 */ 0x3235103C, /* Smart Array P400i */ 0x3211103C, /* Smart Array E200i */ 0x3212103C, /* Smart Array E200 */ 0x3213103C, /* Smart Array E200i */ 0x3214103C, /* Smart Array E200i */ 0x3215103C, /* Smart Array E200i */ 0x3237103C, /* Smart Array E500 */ 0x323D103C, /* Smart Array P700m */ 0x40800E11, /* Smart Array 5i */ 0x409C0E11, /* Smart Array 6400 */ 0x409D0E11, /* Smart Array 6400 EM */ 0x40700E11, /* Smart Array 5300 */ 0x40820E11, /* Smart Array 532 */ 0x40830E11, /* Smart Array 5312 */ 0x409A0E11, /* Smart Array 641 */ 0x409B0E11, /* Smart Array 642 */ 0x40910E11, /* Smart Array 6i */ }; /* List of controllers which cannot even be soft reset */ static u32 soft_unresettable_controller[] = { 0x40800E11, /* Smart Array 5i */ 0x40700E11, /* Smart Array 5300 */ 0x40820E11, /* Smart Array 532 */ 0x40830E11, /* Smart Array 5312 */ 0x409A0E11, /* Smart Array 641 */ 0x409B0E11, /* Smart Array 642 */ 0x40910E11, /* Smart Array 6i */ /* Exclude 640x boards. These are two pci devices in one slot * which share a battery backed cache module. One controls the * cache, the other accesses the cache through the one that controls * it. If we reset the one controlling the cache, the other will * likely not be happy. Just forbid resetting this conjoined mess. * The 640x isn't really supported by hpsa anyway. */ 0x409C0E11, /* Smart Array 6400 */ 0x409D0E11, /* Smart Array 6400 EM */ }; static int board_id_in_array(u32 a[], int nelems, u32 board_id) { int i; for (i = 0; i < nelems; i++) if (a[i] == board_id) return 1; return 0; } static int ctlr_is_hard_resettable(u32 board_id) { return !board_id_in_array(unresettable_controller, ARRAY_SIZE(unresettable_controller), board_id); } static int ctlr_is_soft_resettable(u32 board_id) { return !board_id_in_array(soft_unresettable_controller, ARRAY_SIZE(soft_unresettable_controller), board_id); } static int ctlr_is_resettable(u32 board_id) { return ctlr_is_hard_resettable(board_id) || ctlr_is_soft_resettable(board_id); } static ssize_t host_show_resettable(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h; struct Scsi_Host *shost = class_to_shost(dev); h = shost_to_hba(shost); return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); } static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) { return (scsi3addr[3] & 0xC0) == 0x40; } static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6", "1(+0)ADM", "UNKNOWN", "PHYS DRV" }; #define HPSA_RAID_0 0 #define HPSA_RAID_4 1 #define HPSA_RAID_1 2 /* also used for RAID 10 */ #define HPSA_RAID_5 3 /* also used for RAID 50 */ #define HPSA_RAID_51 4 #define HPSA_RAID_6 5 /* also used for RAID 60 */ #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2) #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1) static inline bool is_logical_device(struct hpsa_scsi_dev_t *device) { return !device->physical_device; } static ssize_t raid_level_show(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t l = 0; unsigned char rlevel; struct ctlr_info *h; struct scsi_device *sdev; struct hpsa_scsi_dev_t *hdev; unsigned long flags; sdev = to_scsi_device(dev); h = sdev_to_hba(sdev); spin_lock_irqsave(&h->lock, flags); hdev = sdev->hostdata; if (!hdev) { spin_unlock_irqrestore(&h->lock, flags); return -ENODEV; } /* Is this even a logical drive? */ if (!is_logical_device(hdev)) { spin_unlock_irqrestore(&h->lock, flags); l = snprintf(buf, PAGE_SIZE, "N/A\n"); return l; } rlevel = hdev->raid_level; spin_unlock_irqrestore(&h->lock, flags); if (rlevel > RAID_UNKNOWN) rlevel = RAID_UNKNOWN; l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); return l; } static ssize_t lunid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h; struct scsi_device *sdev; struct hpsa_scsi_dev_t *hdev; unsigned long flags; unsigned char lunid[8]; sdev = to_scsi_device(dev); h = sdev_to_hba(sdev); spin_lock_irqsave(&h->lock, flags); hdev = sdev->hostdata; if (!hdev) { spin_unlock_irqrestore(&h->lock, flags); return -ENODEV; } memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); spin_unlock_irqrestore(&h->lock, flags); return snprintf(buf, 20, "0x%8phN\n", lunid); } static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h; struct scsi_device *sdev; struct hpsa_scsi_dev_t *hdev; unsigned long flags; unsigned char sn[16]; sdev = to_scsi_device(dev); h = sdev_to_hba(sdev); spin_lock_irqsave(&h->lock, flags); hdev = sdev->hostdata; if (!hdev) { spin_unlock_irqrestore(&h->lock, flags); return -ENODEV; } memcpy(sn, hdev->device_id, sizeof(sn)); spin_unlock_irqrestore(&h->lock, flags); return snprintf(buf, 16 * 2 + 2, "%02X%02X%02X%02X%02X%02X%02X%02X" "%02X%02X%02X%02X%02X%02X%02X%02X\n", sn[0], sn[1], sn[2], sn[3], sn[4], sn[5], sn[6], sn[7], sn[8], sn[9], sn[10], sn[11], sn[12], sn[13], sn[14], sn[15]); } static ssize_t sas_address_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h; struct scsi_device *sdev; struct hpsa_scsi_dev_t *hdev; unsigned long flags; u64 sas_address; sdev = to_scsi_device(dev); h = sdev_to_hba(sdev); spin_lock_irqsave(&h->lock, flags); hdev = sdev->hostdata; if (!hdev || is_logical_device(hdev) || !hdev->expose_device) { spin_unlock_irqrestore(&h->lock, flags); return -ENODEV; } sas_address = hdev->sas_address; spin_unlock_irqrestore(&h->lock, flags); return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address); } static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h; struct scsi_device *sdev; struct hpsa_scsi_dev_t *hdev; unsigned long flags; int offload_enabled; sdev = to_scsi_device(dev); h = sdev_to_hba(sdev); spin_lock_irqsave(&h->lock, flags); hdev = sdev->hostdata; if (!hdev) { spin_unlock_irqrestore(&h->lock, flags); return -ENODEV; } offload_enabled = hdev->offload_enabled; spin_unlock_irqrestore(&h->lock, flags); if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) return snprintf(buf, 20, "%d\n", offload_enabled); else return snprintf(buf, 40, "%s\n", "Not applicable for a controller"); } #define MAX_PATHS 8 static ssize_t path_info_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h; struct scsi_device *sdev; struct hpsa_scsi_dev_t *hdev; unsigned long flags; int i; int output_len = 0; u8 box; u8 bay; u8 path_map_index = 0; char *active; unsigned char phys_connector[2]; sdev = to_scsi_device(dev); h = sdev_to_hba(sdev); spin_lock_irqsave(&h->devlock, flags); hdev = sdev->hostdata; if (!hdev) { spin_unlock_irqrestore(&h->devlock, flags); return -ENODEV; } bay = hdev->bay; for (i = 0; i < MAX_PATHS; i++) { path_map_index = 1<<i; if (i == hdev->active_path_index) active = "Active"; else if (hdev->path_map & path_map_index) active = "Inactive"; else continue; output_len += scnprintf(buf + output_len, PAGE_SIZE - output_len, "[%d:%d:%d:%d] %20.20s ", h->scsi_host->host_no, hdev->bus, hdev->target, hdev->lun, scsi_device_type(hdev->devtype)); if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) { output_len += scnprintf(buf + output_len, PAGE_SIZE - output_len, "%s\n", active); continue; } box = hdev->box[i]; memcpy(&phys_connector, &hdev->phys_connector[i], sizeof(phys_connector)); if (phys_connector[0] < '0') phys_connector[0] = '0'; if (phys_connector[1] < '0') phys_connector[1] = '0'; output_len += scnprintf(buf + output_len, PAGE_SIZE - output_len, "PORT: %.2s ", phys_connector); if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) && hdev->expose_device) { if (box == 0 || box == 0xFF) { output_len += scnprintf(buf + output_len, PAGE_SIZE - output_len, "BAY: %hhu %s\n", bay, active); } else { output_len += scnprintf(buf + output_len, PAGE_SIZE - output_len, "BOX: %hhu BAY: %hhu %s\n", box, bay, active); } } else if (box != 0 && box != 0xFF) { output_len += scnprintf(buf + output_len, PAGE_SIZE - output_len, "BOX: %hhu %s\n", box, active); } else output_len += scnprintf(buf + output_len, PAGE_SIZE - output_len, "%s\n", active); } spin_unlock_irqrestore(&h->devlock, flags); return output_len; } static ssize_t host_show_ctlr_num(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h; struct Scsi_Host *shost = class_to_shost(dev); h = shost_to_hba(shost); return snprintf(buf, 20, "%d\n", h->ctlr); } static ssize_t host_show_legacy_board(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h; struct Scsi_Host *shost = class_to_shost(dev); h = shost_to_hba(shost); return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0); } static DEVICE_ATTR_RO(raid_level); static DEVICE_ATTR_RO(lunid); static DEVICE_ATTR_RO(unique_id); static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); static DEVICE_ATTR_RO(sas_address); static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, host_show_hp_ssd_smart_path_enabled, NULL); static DEVICE_ATTR_RO(path_info); static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH, host_show_hp_ssd_smart_path_status, host_store_hp_ssd_smart_path_status); static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL, host_store_raid_offload_debug); static DEVICE_ATTR(firmware_revision, S_IRUGO, host_show_firmware_revision, NULL); static DEVICE_ATTR(commands_outstanding, S_IRUGO, host_show_commands_outstanding, NULL); static DEVICE_ATTR(transport_mode, S_IRUGO, host_show_transport_mode, NULL); static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL); static DEVICE_ATTR(lockup_detected, S_IRUGO, host_show_lockup_detected, NULL); static DEVICE_ATTR(ctlr_num, S_IRUGO, host_show_ctlr_num, NULL); static DEVICE_ATTR(legacy_board, S_IRUGO, host_show_legacy_board, NULL); static struct attribute *hpsa_sdev_attrs[] = { &dev_attr_raid_level.attr, &dev_attr_lunid.attr, &dev_attr_unique_id.attr, &dev_attr_hp_ssd_smart_path_enabled.attr, &dev_attr_path_info.attr, &dev_attr_sas_address.attr, NULL, }; ATTRIBUTE_GROUPS(hpsa_sdev); static struct attribute *hpsa_shost_attrs[] = { &dev_attr_rescan.attr, &dev_attr_firmware_revision.attr, &dev_attr_commands_outstanding.attr, &dev_attr_transport_mode.attr, &dev_attr_resettable.attr, &dev_attr_hp_ssd_smart_path_status.attr, &dev_attr_raid_offload_debug.attr, &dev_attr_lockup_detected.attr, &dev_attr_ctlr_num.attr, &dev_attr_legacy_board.attr, NULL, }; ATTRIBUTE_GROUPS(hpsa_shost); #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\ HPSA_MAX_CONCURRENT_PASSTHRUS) static const struct scsi_host_template hpsa_driver_template = { .module = THIS_MODULE, .name = HPSA, .proc_name = HPSA, .queuecommand = hpsa_scsi_queue_command, .scan_start = hpsa_scan_start, .scan_finished = hpsa_scan_finished, .change_queue_depth = hpsa_change_queue_depth, .this_id = -1, .eh_device_reset_handler = hpsa_eh_device_reset_handler, .ioctl = hpsa_ioctl, .slave_alloc = hpsa_slave_alloc, .slave_configure = hpsa_slave_configure, .slave_destroy = hpsa_slave_destroy, #ifdef CONFIG_COMPAT .compat_ioctl = hpsa_compat_ioctl, #endif .sdev_groups = hpsa_sdev_groups, .shost_groups = hpsa_shost_groups, .max_sectors = 2048, .no_write_same = 1, }; static inline u32 next_command(struct ctlr_info *h, u8 q) { u32 a; struct reply_queue_buffer *rq = &h->reply_queue[q]; if (h->transMethod & CFGTBL_Trans_io_accel1) return h->access.command_completed(h, q); if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) return h->access.command_completed(h, q); if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { a = rq->head[rq->current_entry]; rq->current_entry++; atomic_dec(&h->commands_outstanding); } else { a = FIFO_EMPTY; } /* Check for wraparound */ if (rq->current_entry == h->max_commands) { rq->current_entry = 0; rq->wraparound ^= 1; } return a; } /* * There are some special bits in the bus address of the * command that we have to set for the controller to know * how to process the command: * * Normal performant mode: * bit 0: 1 means performant mode, 0 means simple mode. * bits 1-3 = block fetch table entry * bits 4-6 = command type (== 0) * * ioaccel1 mode: * bit 0 = "performant mode" bit. * bits 1-3 = block fetch table entry * bits 4-6 = command type (== 110) * (command type is needed because ioaccel1 mode * commands are submitted through the same register as normal * mode commands, so this is how the controller knows whether * the command is normal mode or ioaccel1 mode.) * * ioaccel2 mode: * bit 0 = "performant mode" bit. * bits 1-4 = block fetch table entry (note extra bit) * bits 4-6 = not needed, because ioaccel2 mode has * a separate special register for submitting commands. */ /* * set_performant_mode: Modify the tag for cciss performant * set bit 0 for pull model, bits 3-1 for block fetch * register number */ #define DEFAULT_REPLY_QUEUE (-1) static void set_performant_mode(struct ctlr_info *h, struct CommandList *c, int reply_queue) { if (likely(h->transMethod & CFGTBL_Trans_Performant)) { c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); if (unlikely(!h->msix_vectors)) return; c->Header.ReplyQueue = reply_queue; } } static void set_ioaccel1_performant_mode(struct ctlr_info *h, struct CommandList *c, int reply_queue) { struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; /* * Tell the controller to post the reply to the queue for this * processor. This seems to give the best I/O throughput. */ cp->ReplyQueue = reply_queue; /* * Set the bits in the address sent down to include: * - performant mode bit (bit 0) * - pull count (bits 1-3) * - command type (bits 4-6) */ c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | IOACCEL1_BUSADDR_CMDTYPE; } static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h, struct CommandList *c, int reply_queue) { struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *) &h->ioaccel2_cmd_pool[c->cmdindex]; /* Tell the controller to post the reply to the queue for this * processor. This seems to give the best I/O throughput. */ cp->reply_queue = reply_queue; /* Set the bits in the address sent down to include: * - performant mode bit not used in ioaccel mode 2 * - pull count (bits 0-3) * - command type isn't needed for ioaccel2 */ c->busaddr |= h->ioaccel2_blockFetchTable[0]; } static void set_ioaccel2_performant_mode(struct ctlr_info *h, struct CommandList *c, int reply_queue) { struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; /* * Tell the controller to post the reply to the queue for this * processor. This seems to give the best I/O throughput. */ cp->reply_queue = reply_queue; /* * Set the bits in the address sent down to include: * - performant mode bit not used in ioaccel mode 2 * - pull count (bits 0-3) * - command type isn't needed for ioaccel2 */ c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); } static int is_firmware_flash_cmd(u8 *cdb) { return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE; } /* * During firmware flash, the heartbeat register may not update as frequently * as it should. So we dial down lockup detection during firmware flash. and * dial it back up when firmware flash completes. */ #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ) #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ) #define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ) static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, struct CommandList *c) { if (!is_firmware_flash_cmd(c->Request.CDB)) return; atomic_inc(&h->firmware_flash_in_progress); h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH; } static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, struct CommandList *c) { if (is_firmware_flash_cmd(c->Request.CDB) && atomic_dec_and_test(&h->firmware_flash_in_progress)) h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; } static void __enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c, int reply_queue) { dial_down_lockup_detection_during_fw_flash(h, c); atomic_inc(&h->commands_outstanding); /* * Check to see if the command is being retried. */ if (c->device && !c->retry_pending) atomic_inc(&c->device->commands_outstanding); reply_queue = h->reply_map[raw_smp_processor_id()]; switch (c->cmd_type) { case CMD_IOACCEL1: set_ioaccel1_performant_mode(h, c, reply_queue); writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); break; case CMD_IOACCEL2: set_ioaccel2_performant_mode(h, c, reply_queue); writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); break; case IOACCEL2_TMF: set_ioaccel2_tmf_performant_mode(h, c, reply_queue); writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); break; default: set_performant_mode(h, c, reply_queue); h->access.submit_command(h, c); } } static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c) { __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE); } static inline int is_hba_lunid(unsigned char scsi3addr[]) { return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; } static inline int is_scsi_rev_5(struct ctlr_info *h) { if (!h->hba_inquiry_data) return 0; if ((h->hba_inquiry_data[2] & 0x07) == 5) return 1; return 0; } static int hpsa_find_target_lun(struct ctlr_info *h, unsigned char scsi3addr[], int bus, int *target, int *lun) { /* finds an unused bus, target, lun for a new physical device * assumes h->devlock is held */ int i, found = 0; DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES); bitmap_zero(lun_taken, HPSA_MAX_DEVICES); for (i = 0; i < h->ndevices; i++) { if (h->dev[i]->bus == bus && h->dev[i]->target != -1) __set_bit(h->dev[i]->target, lun_taken); } i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES); if (i < HPSA_MAX_DEVICES) { /* *bus = 1; */ *target = i; *lun = 0; found = 1; } return !found; } static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, char *description) { #define LABEL_SIZE 25 char label[LABEL_SIZE]; if (h == NULL || h->pdev == NULL || h->scsi_host == NULL) return; switch (dev->devtype) { case TYPE_RAID: snprintf(label, LABEL_SIZE, "controller"); break; case TYPE_ENCLOSURE: snprintf(label, LABEL_SIZE, "enclosure"); break; case TYPE_DISK: case TYPE_ZBC: if (dev->external) snprintf(label, LABEL_SIZE, "external"); else if (!is_logical_dev_addr_mode(dev->scsi3addr)) snprintf(label, LABEL_SIZE, "%s", raid_label[PHYSICAL_DRIVE]); else snprintf(label, LABEL_SIZE, "RAID-%s", dev->raid_level > RAID_UNKNOWN ? "?" : raid_label[dev->raid_level]); break; case TYPE_ROM: snprintf(label, LABEL_SIZE, "rom"); break; case TYPE_TAPE: snprintf(label, LABEL_SIZE, "tape"); break; case TYPE_MEDIUM_CHANGER: snprintf(label, LABEL_SIZE, "changer"); break; default: snprintf(label, LABEL_SIZE, "UNKNOWN"); break; } dev_printk(level, &h->pdev->dev, "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n", h->scsi_host->host_no, dev->bus, dev->target, dev->lun, description, scsi_device_type(dev->devtype), dev->vendor, dev->model, label, dev->offload_config ? '+' : '-', dev->offload_to_be_enabled ? '+' : '-', dev->expose_device); } /* Add an entry into h->dev[] array. */ static int hpsa_scsi_add_entry(struct ctlr_info *h, struct hpsa_scsi_dev_t *device, struct hpsa_scsi_dev_t *added[], int *nadded) { /* assumes h->devlock is held */ int n = h->ndevices; int i; unsigned char addr1[8], addr2[8]; struct hpsa_scsi_dev_t *sd; if (n >= HPSA_MAX_DEVICES) { dev_err(&h->pdev->dev, "too many devices, some will be " "inaccessible.\n"); return -1; } /* physical devices do not have lun or target assigned until now. */ if (device->lun != -1) /* Logical device, lun is already assigned. */ goto lun_assigned; /* If this device a non-zero lun of a multi-lun device * byte 4 of the 8-byte LUN addr will contain the logical * unit no, zero otherwise. */ if (device->scsi3addr[4] == 0) { /* This is not a non-zero lun of a multi-lun device */ if (hpsa_find_target_lun(h, device->scsi3addr, device->bus, &device->target, &device->lun) != 0) return -1; goto lun_assigned; } /* This is a non-zero lun of a multi-lun device. * Search through our list and find the device which * has the same 8 byte LUN address, excepting byte 4 and 5. * Assign the same bus and target for this new LUN. * Use the logical unit number from the firmware. */ memcpy(addr1, device->scsi3addr, 8); addr1[4] = 0; addr1[5] = 0; for (i = 0; i < n; i++) { sd = h->dev[i]; memcpy(addr2, sd->scsi3addr, 8); addr2[4] = 0; addr2[5] = 0; /* differ only in byte 4 and 5? */ if (memcmp(addr1, addr2, 8) == 0) { device->bus = sd->bus; device->target = sd->target; device->lun = device->scsi3addr[4]; break; } } if (device->lun == -1) { dev_warn(&h->pdev->dev, "physical device with no LUN=0," " suspect firmware bug or unsupported hardware " "configuration.\n"); return -1; } lun_assigned: h->dev[n] = device; h->ndevices++; added[*nadded] = device; (*nadded)++; hpsa_show_dev_msg(KERN_INFO, h, device, device->expose_device ? "added" : "masked"); return 0; } /* * Called during a scan operation. * * Update an entry in h->dev[] array. */ static void hpsa_scsi_update_entry(struct ctlr_info *h, int entry, struct hpsa_scsi_dev_t *new_entry) { /* assumes h->devlock is held */ BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); /* Raid level changed. */ h->dev[entry]->raid_level = new_entry->raid_level; /* * ioacccel_handle may have changed for a dual domain disk */ h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; /* Raid offload parameters changed. Careful about the ordering. */ if (new_entry->offload_config && new_entry->offload_to_be_enabled) { /* * if drive is newly offload_enabled, we want to copy the * raid map data first. If previously offload_enabled and * offload_config were set, raid map data had better be * the same as it was before. If raid map data has changed * then it had better be the case that * h->dev[entry]->offload_enabled is currently 0. */ h->dev[entry]->raid_map = new_entry->raid_map; h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; } if (new_entry->offload_to_be_enabled) { h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */ } h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled; h->dev[entry]->offload_config = new_entry->offload_config; h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; h->dev[entry]->queue_depth = new_entry->queue_depth; /* * We can turn off ioaccel offload now, but need to delay turning * ioaccel on until we can update h->dev[entry]->phys_disk[], but we * can't do that until all the devices are updated. */ h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled; /* * turn ioaccel off immediately if told to do so. */ if (!new_entry->offload_to_be_enabled) h->dev[entry]->offload_enabled = 0; hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated"); } /* Replace an entry from h->dev[] array. */ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int entry, struct hpsa_scsi_dev_t *new_entry, struct hpsa_scsi_dev_t *added[], int *nadded, struct hpsa_scsi_dev_t *removed[], int *nremoved) { /* assumes h->devlock is held */ BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); removed[*nremoved] = h->dev[entry]; (*nremoved)++; /* * New physical devices won't have target/lun assigned yet * so we need to preserve the values in the slot we are replacing. */ if (new_entry->target == -1) { new_entry->target = h->dev[entry]->target; new_entry->lun = h->dev[entry]->lun; } h->dev[entry] = new_entry; added[*nadded] = new_entry; (*nadded)++; hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced"); } /* Remove an entry from h->dev[] array. */ static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry, struct hpsa_scsi_dev_t *removed[], int *nremoved) { /* assumes h->devlock is held */ int i; struct hpsa_scsi_dev_t *sd; BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); sd = h->dev[entry]; removed[*nremoved] = h->dev[entry]; (*nremoved)++; for (i = entry; i < h->ndevices-1; i++) h->dev[i] = h->dev[i+1]; h->ndevices--; hpsa_show_dev_msg(KERN_INFO, h, sd, "removed"); } #define SCSI3ADDR_EQ(a, b) ( \ (a)[7] == (b)[7] && \ (a)[6] == (b)[6] && \ (a)[5] == (b)[5] && \ (a)[4] == (b)[4] && \ (a)[3] == (b)[3] && \ (a)[2] == (b)[2] && \ (a)[1] == (b)[1] && \ (a)[0] == (b)[0]) static void fixup_botched_add(struct ctlr_info *h, struct hpsa_scsi_dev_t *added) { /* called when scsi_add_device fails in order to re-adjust * h->dev[] to match the mid layer's view. */ unsigned long flags; int i, j; spin_lock_irqsave(&h->lock, flags); for (i = 0; i < h->ndevices; i++) { if (h->dev[i] == added) { for (j = i; j < h->ndevices-1; j++) h->dev[j] = h->dev[j+1]; h->ndevices--; break; } } spin_unlock_irqrestore(&h->lock, flags); kfree(added); } static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, struct hpsa_scsi_dev_t *dev2) { /* we compare everything except lun and target as these * are not yet assigned. Compare parts likely * to differ first */ if (memcmp(dev1->scsi3addr, dev2->scsi3addr, sizeof(dev1->scsi3addr)) != 0) return 0; if (memcmp(dev1->device_id, dev2->device_id, sizeof(dev1->device_id)) != 0) return 0; if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) return 0; if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) return 0; if (dev1->devtype != dev2->devtype) return 0; if (dev1->bus != dev2->bus) return 0; return 1; } static inline int device_updated(struct hpsa_scsi_dev_t *dev1, struct hpsa_scsi_dev_t *dev2) { /* Device attributes that can change, but don't mean * that the device is a different device, nor that the OS * needs to be told anything about the change. */ if (dev1->raid_level != dev2->raid_level) return 1; if (dev1->offload_config != dev2->offload_config) return 1; if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled) return 1; if (!is_logical_dev_addr_mode(dev1->scsi3addr)) if (dev1->queue_depth != dev2->queue_depth) return 1; /* * This can happen for dual domain devices. An active * path change causes the ioaccel handle to change * * for example note the handle differences between p0 and p1 * Device WWN ,WWN hash,Handle * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003 * p1 0x5000C5005FC4DAC9,0x6798C0,0x00040004 */ if (dev1->ioaccel_handle != dev2->ioaccel_handle) return 1; return 0; } /* Find needle in haystack. If exact match found, return DEVICE_SAME, * and return needle location in *index. If scsi3addr matches, but not * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle * location in *index. * In the case of a minor device attribute change, such as RAID level, just * return DEVICE_UPDATED, along with the updated device's location in index. * If needle not found, return DEVICE_NOT_FOUND. */ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, struct hpsa_scsi_dev_t *haystack[], int haystack_size, int *index) { int i; #define DEVICE_NOT_FOUND 0 #define DEVICE_CHANGED 1 #define DEVICE_SAME 2 #define DEVICE_UPDATED 3 if (needle == NULL) return DEVICE_NOT_FOUND; for (i = 0; i < haystack_size; i++) { if (haystack[i] == NULL) /* previously removed. */ continue; if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { *index = i; if (device_is_the_same(needle, haystack[i])) { if (device_updated(needle, haystack[i])) return DEVICE_UPDATED; return DEVICE_SAME; } else { /* Keep offline devices offline */ if (needle->volume_offline) return DEVICE_NOT_FOUND; return DEVICE_CHANGED; } } } *index = -1; return DEVICE_NOT_FOUND; } static void hpsa_monitor_offline_device(struct ctlr_info *h, unsigned char scsi3addr[]) { struct offline_device_entry *device; unsigned long flags; /* Check to see if device is already on the list */ spin_lock_irqsave(&h->offline_device_lock, flags); list_for_each_entry(device, &h->offline_device_list, offline_list) { if (memcmp(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)) == 0) { spin_unlock_irqrestore(&h->offline_device_lock, flags); return; } } spin_unlock_irqrestore(&h->offline_device_lock, flags); /* Device is not on the list, add it. */ device = kmalloc(sizeof(*device), GFP_KERNEL); if (!device) return; memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); spin_lock_irqsave(&h->offline_device_lock, flags); list_add_tail(&device->offline_list, &h->offline_device_list); spin_unlock_irqrestore(&h->offline_device_lock, flags); } /* Print a message explaining various offline volume states */ static void hpsa_show_volume_status(struct ctlr_info *h, struct hpsa_scsi_dev_t *sd) { if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED) dev_info(&h->pdev->dev, "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n", h->scsi_host->host_no, sd->bus, sd->target, sd->lun); switch (sd->volume_offline) { case HPSA_LV_OK: break; case HPSA_LV_UNDERGOING_ERASE: dev_info(&h->pdev->dev, "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n", h->scsi_host->host_no, sd->bus, sd->target, sd->lun); break; case HPSA_LV_NOT_AVAILABLE: dev_info(&h->pdev->dev, "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n", h->scsi_host->host_no, sd->bus, sd->target, sd->lun); break; case HPSA_LV_UNDERGOING_RPI: dev_info(&h->pdev->dev, "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n", h->scsi_host->host_no, sd->bus, sd->target, sd->lun); break; case HPSA_LV_PENDING_RPI: dev_info(&h->pdev->dev, "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n", h->scsi_host->host_no, sd->bus, sd->target, sd->lun); break; case HPSA_LV_ENCRYPTED_NO_KEY: dev_info(&h->pdev->dev, "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n", h->scsi_host->host_no, sd->bus, sd->target, sd->lun); break; case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: dev_info(&h->pdev->dev, "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n", h->scsi_host->host_no, sd->bus, sd->target, sd->lun); break; case HPSA_LV_UNDERGOING_ENCRYPTION: dev_info(&h->pdev->dev, "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n", h->scsi_host->host_no, sd->bus, sd->target, sd->lun); break; case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: dev_info(&h->pdev->dev, "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n", h->scsi_host->host_no, sd->bus, sd->target, sd->lun); break; case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: dev_info(&h->pdev->dev, "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n", h->scsi_host->host_no, sd->bus, sd->target, sd->lun); break; case HPSA_LV_PENDING_ENCRYPTION: dev_info(&h->pdev->dev, "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n", h->scsi_host->host_no, sd->bus, sd->target, sd->lun); break; case HPSA_LV_PENDING_ENCRYPTION_REKEYING: dev_info(&h->pdev->dev, "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n", h->scsi_host->host_no, sd->bus, sd->target, sd->lun); break; } } /* * Figure the list of physical drive pointers for a logical drive with * raid offload configured. */ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev[], int ndevices, struct hpsa_scsi_dev_t *logical_drive) { struct raid_map_data *map = &logical_drive->raid_map; struct raid_map_disk_data *dd = &map->data[0]; int i, j; int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + le16_to_cpu(map->metadata_disks_per_row); int nraid_map_entries = le16_to_cpu(map->row_cnt) * le16_to_cpu(map->layout_map_count) * total_disks_per_row; int nphys_disk = le16_to_cpu(map->layout_map_count) * total_disks_per_row; int qdepth; if (nraid_map_entries > RAID_MAP_MAX_ENTRIES) nraid_map_entries = RAID_MAP_MAX_ENTRIES; logical_drive->nphysical_disks = nraid_map_entries; qdepth = 0; for (i = 0; i < nraid_map_entries; i++) { logical_drive->phys_disk[i] = NULL; if (!logical_drive->offload_config) continue; for (j = 0; j < ndevices; j++) { if (dev[j] == NULL) continue; if (dev[j]->devtype != TYPE_DISK && dev[j]->devtype != TYPE_ZBC) continue; if (is_logical_device(dev[j])) continue; if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle) continue; logical_drive->phys_disk[i] = dev[j]; if (i < nphys_disk) qdepth = min(h->nr_cmds, qdepth + logical_drive->phys_disk[i]->queue_depth); break; } /* * This can happen if a physical drive is removed and * the logical drive is degraded. In that case, the RAID * map data will refer to a physical disk which isn't actually * present. And in that case offload_enabled should already * be 0, but we'll turn it off here just in case */ if (!logical_drive->phys_disk[i]) { dev_warn(&h->pdev->dev, "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n", __func__, h->scsi_host->host_no, logical_drive->bus, logical_drive->target, logical_drive->lun); hpsa_turn_off_ioaccel_for_device(logical_drive); logical_drive->queue_depth = 8; } } if (nraid_map_entries) /* * This is correct for reads, too high for full stripe writes, * way too high for partial stripe writes */ logical_drive->queue_depth = qdepth; else { if (logical_drive->external) logical_drive->queue_depth = EXTERNAL_QD; else logical_drive->queue_depth = h->nr_cmds; } } static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev[], int ndevices) { int i; for (i = 0; i < ndevices; i++) { if (dev[i] == NULL) continue; if (dev[i]->devtype != TYPE_DISK && dev[i]->devtype != TYPE_ZBC) continue; if (!is_logical_device(dev[i])) continue; /* * If offload is currently enabled, the RAID map and * phys_disk[] assignment *better* not be changing * because we would be changing ioaccel phsy_disk[] pointers * on a ioaccel volume processing I/O requests. * * If an ioaccel volume status changed, initially because it was * re-configured and thus underwent a transformation, or * a drive failed, we would have received a state change * request and ioaccel should have been turned off. When the * transformation completes, we get another state change * request to turn ioaccel back on. In this case, we need * to update the ioaccel information. * * Thus: If it is not currently enabled, but will be after * the scan completes, make sure the ioaccel pointers * are up to date. */ if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled) hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]); } } static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) { int rc = 0; if (!h->scsi_host) return 1; if (is_logical_device(device)) /* RAID */ rc = scsi_add_device(h->scsi_host, device->bus, device->target, device->lun); else /* HBA */ rc = hpsa_add_sas_device(h->sas_host, device); return rc; } static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev) { int i; int count = 0; for (i = 0; i < h->nr_cmds; i++) { struct CommandList *c = h->cmd_pool + i; int refcount = atomic_inc_return(&c->refcount); if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, dev->scsi3addr)) { unsigned long flags; spin_lock_irqsave(&h->lock, flags); /* Implied MB */ if (!hpsa_is_cmd_idle(c)) ++count; spin_unlock_irqrestore(&h->lock, flags); } cmd_free(h, c); } return count; } #define NUM_WAIT 20 static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) { int cmds = 0; int waits = 0; int num_wait = NUM_WAIT; if (device->external) num_wait = HPSA_EH_PTRAID_TIMEOUT; while (1) { cmds = hpsa_find_outstanding_commands_for_dev(h, device); if (cmds == 0) break; if (++waits > num_wait) break; msleep(1000); } if (waits > num_wait) { dev_warn(&h->pdev->dev, "%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n", __func__, h->scsi_host->host_no, device->bus, device->target, device->lun, cmds); } } static void hpsa_remove_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) { struct scsi_device *sdev = NULL; if (!h->scsi_host) return; /* * Allow for commands to drain */ device->removed = 1; hpsa_wait_for_outstanding_commands_for_dev(h, device); if (is_logical_device(device)) { /* RAID */ sdev = scsi_device_lookup(h->scsi_host, device->bus, device->target, device->lun); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); } else { /* * We don't expect to get here. Future commands * to this device will get a selection timeout as * if the device were gone. */ hpsa_show_dev_msg(KERN_WARNING, h, device, "didn't find device for removal."); } } else { /* HBA */ hpsa_remove_sas_device(device); } } static void adjust_hpsa_scsi_table(struct ctlr_info *h, struct hpsa_scsi_dev_t *sd[], int nsds) { /* sd contains scsi3 addresses and devtypes, and inquiry * data. This function takes what's in sd to be the current * reality and updates h->dev[] to reflect that reality. */ int i, entry, device_change, changes = 0; struct hpsa_scsi_dev_t *csd; unsigned long flags; struct hpsa_scsi_dev_t **added, **removed; int nadded, nremoved; /* * A reset can cause a device status to change * re-schedule the scan to see what happened. */ spin_lock_irqsave(&h->reset_lock, flags); if (h->reset_in_progress) { h->drv_req_rescan = 1; spin_unlock_irqrestore(&h->reset_lock, flags); return; } spin_unlock_irqrestore(&h->reset_lock, flags); added = kcalloc(HPSA_MAX_DEVICES, sizeof(*added), GFP_KERNEL); removed = kcalloc(HPSA_MAX_DEVICES, sizeof(*removed), GFP_KERNEL); if (!added || !removed) { dev_warn(&h->pdev->dev, "out of memory in " "adjust_hpsa_scsi_table\n"); goto free_and_out; } spin_lock_irqsave(&h->devlock, flags); /* find any devices in h->dev[] that are not in * sd[] and remove them from h->dev[], and for any * devices which have changed, remove the old device * info and add the new device info. * If minor device attributes change, just update * the existing device structure. */ i = 0; nremoved = 0; nadded = 0; while (i < h->ndevices) { csd = h->dev[i]; device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); if (device_change == DEVICE_NOT_FOUND) { changes++; hpsa_scsi_remove_entry(h, i, removed, &nremoved); continue; /* remove ^^^, hence i not incremented */ } else if (device_change == DEVICE_CHANGED) { changes++; hpsa_scsi_replace_entry(h, i, sd[entry], added, &nadded, removed, &nremoved); /* Set it to NULL to prevent it from being freed * at the bottom of hpsa_update_scsi_devices() */ sd[entry] = NULL; } else if (device_change == DEVICE_UPDATED) { hpsa_scsi_update_entry(h, i, sd[entry]); } i++; } /* Now, make sure every device listed in sd[] is also * listed in h->dev[], adding them if they aren't found */ for (i = 0; i < nsds; i++) { if (!sd[i]) /* if already added above. */ continue; /* Don't add devices which are NOT READY, FORMAT IN PROGRESS * as the SCSI mid-layer does not handle such devices well. * It relentlessly loops sending TUR at 3Hz, then READ(10) * at 160Hz, and prevents the system from coming up. */ if (sd[i]->volume_offline) { hpsa_show_volume_status(h, sd[i]); hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline"); continue; } device_change = hpsa_scsi_find_entry(sd[i], h->dev, h->ndevices, &entry); if (device_change == DEVICE_NOT_FOUND) { changes++; if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0) break; sd[i] = NULL; /* prevent from being freed later. */ } else if (device_change == DEVICE_CHANGED) { /* should never happen... */ changes++; dev_warn(&h->pdev->dev, "device unexpectedly changed.\n"); /* but if it does happen, we just ignore that device */ } } hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices); /* * Now that h->dev[]->phys_disk[] is coherent, we can enable * any logical drives that need it enabled. * * The raid map should be current by now. * * We are updating the device list used for I/O requests. */ for (i = 0; i < h->ndevices; i++) { if (h->dev[i] == NULL) continue; h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled; } spin_unlock_irqrestore(&h->devlock, flags); /* Monitor devices which are in one of several NOT READY states to be * brought online later. This must be done without holding h->devlock, * so don't touch h->dev[] */ for (i = 0; i < nsds; i++) { if (!sd[i]) /* if already added above. */ continue; if (sd[i]->volume_offline) hpsa_monitor_offline_device(h, sd[i]->scsi3addr); } /* Don't notify scsi mid layer of any changes the first time through * (or if there are no changes) scsi_scan_host will do it later the * first time through. */ if (!changes) goto free_and_out; /* Notify scsi mid layer of any removed devices */ for (i = 0; i < nremoved; i++) { if (removed[i] == NULL) continue; if (removed[i]->expose_device) hpsa_remove_device(h, removed[i]); kfree(removed[i]); removed[i] = NULL; } /* Notify scsi mid layer of any added devices */ for (i = 0; i < nadded; i++) { int rc = 0; if (added[i] == NULL) continue; if (!(added[i]->expose_device)) continue; rc = hpsa_add_device(h, added[i]); if (!rc) continue; dev_warn(&h->pdev->dev, "addition failed %d, device not added.", rc); /* now we have to remove it from h->dev, * since it didn't get added to scsi mid layer */ fixup_botched_add(h, added[i]); h->drv_req_rescan = 1; } free_and_out: kfree(added); kfree(removed); } /* * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t * * Assume's h->devlock is held. */ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, int bus, int target, int lun) { int i; struct hpsa_scsi_dev_t *sd; for (i = 0; i < h->ndevices; i++) { sd = h->dev[i]; if (sd->bus == bus && sd->target == target && sd->lun == lun) return sd; } return NULL; } static int hpsa_slave_alloc(struct scsi_device *sdev) { struct hpsa_scsi_dev_t *sd = NULL; unsigned long flags; struct ctlr_info *h; h = sdev_to_hba(sdev); spin_lock_irqsave(&h->devlock, flags); if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) { struct scsi_target *starget; struct sas_rphy *rphy; starget = scsi_target(sdev); rphy = target_to_rphy(starget); sd = hpsa_find_device_by_sas_rphy(h, rphy); if (sd) { sd->target = sdev_id(sdev); sd->lun = sdev->lun; } } if (!sd) sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), sdev_id(sdev), sdev->lun); if (sd && sd->expose_device) { atomic_set(&sd->ioaccel_cmds_out, 0); sdev->hostdata = sd; } else sdev->hostdata = NULL; spin_unlock_irqrestore(&h->devlock, flags); return 0; } /* configure scsi device based on internal per-device structure */ #define CTLR_TIMEOUT (120 * HZ) static int hpsa_slave_configure(struct scsi_device *sdev) { struct hpsa_scsi_dev_t *sd; int queue_depth; sd = sdev->hostdata; sdev->no_uld_attach = !sd || !sd->expose_device; if (sd) { sd->was_removed = 0; queue_depth = sd->queue_depth != 0 ? sd->queue_depth : sdev->host->can_queue; if (sd->external) { queue_depth = EXTERNAL_QD; sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT; blk_queue_rq_timeout(sdev->request_queue, HPSA_EH_PTRAID_TIMEOUT); } if (is_hba_lunid(sd->scsi3addr)) { sdev->eh_timeout = CTLR_TIMEOUT; blk_queue_rq_timeout(sdev->request_queue, CTLR_TIMEOUT); } } else { queue_depth = sdev->host->can_queue; } scsi_change_queue_depth(sdev, queue_depth); return 0; } static void hpsa_slave_destroy(struct scsi_device *sdev) { struct hpsa_scsi_dev_t *hdev = NULL; hdev = sdev->hostdata; if (hdev) hdev->was_removed = 1; } static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h) { int i; if (!h->ioaccel2_cmd_sg_list) return; for (i = 0; i < h->nr_cmds; i++) { kfree(h->ioaccel2_cmd_sg_list[i]); h->ioaccel2_cmd_sg_list[i] = NULL; } kfree(h->ioaccel2_cmd_sg_list); h->ioaccel2_cmd_sg_list = NULL; } static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h) { int i; if (h->chainsize <= 0) return 0; h->ioaccel2_cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->ioaccel2_cmd_sg_list), GFP_KERNEL); if (!h->ioaccel2_cmd_sg_list) return -ENOMEM; for (i = 0; i < h->nr_cmds; i++) { h->ioaccel2_cmd_sg_list[i] = kmalloc_array(h->maxsgentries, sizeof(*h->ioaccel2_cmd_sg_list[i]), GFP_KERNEL); if (!h->ioaccel2_cmd_sg_list[i]) goto clean; } return 0; clean: hpsa_free_ioaccel2_sg_chain_blocks(h); return -ENOMEM; } static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) { int i; if (!h->cmd_sg_list) return; for (i = 0; i < h->nr_cmds; i++) { kfree(h->cmd_sg_list[i]); h->cmd_sg_list[i] = NULL; } kfree(h->cmd_sg_list); h->cmd_sg_list = NULL; } static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h) { int i; if (h->chainsize <= 0) return 0; h->cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->cmd_sg_list), GFP_KERNEL); if (!h->cmd_sg_list) return -ENOMEM; for (i = 0; i < h->nr_cmds; i++) { h->cmd_sg_list[i] = kmalloc_array(h->chainsize, sizeof(*h->cmd_sg_list[i]), GFP_KERNEL); if (!h->cmd_sg_list[i]) goto clean; } return 0; clean: hpsa_free_sg_chain_blocks(h); return -ENOMEM; } static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h, struct io_accel2_cmd *cp, struct CommandList *c) { struct ioaccel2_sg_element *chain_block; u64 temp64; u32 chain_size; chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex]; chain_size = le32_to_cpu(cp->sg[0].length); temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size, DMA_TO_DEVICE); if (dma_mapping_error(&h->pdev->dev, temp64)) { /* prevent subsequent unmapping */ cp->sg->address = 0; return -1; } cp->sg->address = cpu_to_le64(temp64); return 0; } static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h, struct io_accel2_cmd *cp) { struct ioaccel2_sg_element *chain_sg; u64 temp64; u32 chain_size; chain_sg = cp->sg; temp64 = le64_to_cpu(chain_sg->address); chain_size = le32_to_cpu(cp->sg[0].length); dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE); } static int hpsa_map_sg_chain_block(struct ctlr_info *h, struct CommandList *c) { struct SGDescriptor *chain_sg, *chain_block; u64 temp64; u32 chain_len; chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; chain_block = h->cmd_sg_list[c->cmdindex]; chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN); chain_len = sizeof(*chain_sg) * (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); chain_sg->Len = cpu_to_le32(chain_len); temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len, DMA_TO_DEVICE); if (dma_mapping_error(&h->pdev->dev, temp64)) { /* prevent subsequent unmapping */ chain_sg->Addr = cpu_to_le64(0); return -1; } chain_sg->Addr = cpu_to_le64(temp64); return 0; } static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, struct CommandList *c) { struct SGDescriptor *chain_sg; if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries) return; chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr), le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE); } /* Decode the various types of errors on ioaccel2 path. * Return 1 for any error that should generate a RAID path retry. * Return 0 for errors that don't require a RAID path retry. */ static int handle_ioaccel_mode2_error(struct ctlr_info *h, struct CommandList *c, struct scsi_cmnd *cmd, struct io_accel2_cmd *c2, struct hpsa_scsi_dev_t *dev) { int data_len; int retry = 0; u32 ioaccel2_resid = 0; switch (c2->error_data.serv_response) { case IOACCEL2_SERV_RESPONSE_COMPLETE: switch (c2->error_data.status) { case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: if (cmd) cmd->result = 0; break; case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: cmd->result |= SAM_STAT_CHECK_CONDITION; if (c2->error_data.data_present != IOACCEL2_SENSE_DATA_PRESENT) { memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); break; } /* copy the sense data */ data_len = c2->error_data.sense_data_len; if (data_len > SCSI_SENSE_BUFFERSIZE) data_len = SCSI_SENSE_BUFFERSIZE; if (data_len > sizeof(c2->error_data.sense_data_buff)) data_len = sizeof(c2->error_data.sense_data_buff); memcpy(cmd->sense_buffer, c2->error_data.sense_data_buff, data_len); retry = 1; break; case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: retry = 1; break; case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: retry = 1; break; case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: retry = 1; break; case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: retry = 1; break; default: retry = 1; break; } break; case IOACCEL2_SERV_RESPONSE_FAILURE: switch (c2->error_data.status) { case IOACCEL2_STATUS_SR_IO_ERROR: case IOACCEL2_STATUS_SR_IO_ABORTED: case IOACCEL2_STATUS_SR_OVERRUN: retry = 1; break; case IOACCEL2_STATUS_SR_UNDERRUN: cmd->result = (DID_OK << 16); /* host byte */ ioaccel2_resid = get_unaligned_le32( &c2->error_data.resid_cnt[0]); scsi_set_resid(cmd, ioaccel2_resid); break; case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE: case IOACCEL2_STATUS_SR_INVALID_DEVICE: case IOACCEL2_STATUS_SR_IOACCEL_DISABLED: /* * Did an HBA disk disappear? We will eventually * get a state change event from the controller but * in the meantime, we need to tell the OS that the * HBA disk is no longer there and stop I/O * from going down. This allows the potential re-insert * of the disk to get the same device node. */ if (dev->physical_device && dev->expose_device) { cmd->result = DID_NO_CONNECT << 16; dev->removed = 1; h->drv_req_rescan = 1; dev_warn(&h->pdev->dev, "%s: device is gone!\n", __func__); } else /* * Retry by sending down the RAID path. * We will get an event from ctlr to * trigger rescan regardless. */ retry = 1; break; default: retry = 1; } break; case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: break; case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: break; case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: retry = 1; break; case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: break; default: retry = 1; break; } if (dev->in_reset) retry = 0; return retry; /* retry on raid path? */ } static void hpsa_cmd_resolve_events(struct ctlr_info *h, struct CommandList *c) { struct hpsa_scsi_dev_t *dev = c->device; /* * Reset c->scsi_cmd here so that the reset handler will know * this command has completed. Then, check to see if the handler is * waiting for this command, and, if so, wake it. */ c->scsi_cmd = SCSI_CMD_IDLE; mb(); /* Declare command idle before checking for pending events. */ if (dev) { atomic_dec(&dev->commands_outstanding); if (dev->in_reset && atomic_read(&dev->commands_outstanding) <= 0) wake_up_all(&h->event_sync_wait_queue); } } static void hpsa_cmd_resolve_and_free(struct ctlr_info *h, struct CommandList *c) { hpsa_cmd_resolve_events(h, c); cmd_tagged_free(h, c); } static void hpsa_cmd_free_and_done(struct ctlr_info *h, struct CommandList *c, struct scsi_cmnd *cmd) { hpsa_cmd_resolve_and_free(h, c); if (cmd) scsi_done(cmd); } static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c) { INIT_WORK(&c->work, hpsa_command_resubmit_worker); queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); } static void process_ioaccel2_completion(struct ctlr_info *h, struct CommandList *c, struct scsi_cmnd *cmd, struct hpsa_scsi_dev_t *dev) { struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; /* check for good status */ if (likely(c2->error_data.serv_response == 0 && c2->error_data.status == 0)) { cmd->result = 0; return hpsa_cmd_free_and_done(h, c, cmd); } /* * Any RAID offload error results in retry which will use * the normal I/O path so the controller can handle whatever is * wrong. */ if (is_logical_device(dev) && c2->error_data.serv_response == IOACCEL2_SERV_RESPONSE_FAILURE) { if (c2->error_data.status == IOACCEL2_STATUS_SR_IOACCEL_DISABLED) { hpsa_turn_off_ioaccel_for_device(dev); } if (dev->in_reset) { cmd->result = DID_RESET << 16; return hpsa_cmd_free_and_done(h, c, cmd); } return hpsa_retry_cmd(h, c); } if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev)) return hpsa_retry_cmd(h, c); return hpsa_cmd_free_and_done(h, c, cmd); } /* Returns 0 on success, < 0 otherwise. */ static int hpsa_evaluate_tmf_status(struct ctlr_info *h, struct CommandList *cp) { u8 tmf_status = cp->err_info->ScsiStatus; switch (tmf_status) { case CISS_TMF_COMPLETE: /* * CISS_TMF_COMPLETE never happens, instead, * ei->CommandStatus == 0 for this case. */ case CISS_TMF_SUCCESS: return 0; case CISS_TMF_INVALID_FRAME: case CISS_TMF_NOT_SUPPORTED: case CISS_TMF_FAILED: case CISS_TMF_WRONG_LUN: case CISS_TMF_OVERLAPPED_TAG: break; default: dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n", tmf_status); break; } return -tmf_status; } static void complete_scsi_command(struct CommandList *cp) { struct scsi_cmnd *cmd; struct ctlr_info *h; struct ErrorInfo *ei; struct hpsa_scsi_dev_t *dev; struct io_accel2_cmd *c2; u8 sense_key; u8 asc; /* additional sense code */ u8 ascq; /* additional sense code qualifier */ unsigned long sense_data_size; ei = cp->err_info; cmd = cp->scsi_cmd; h = cp->h; if (!cmd->device) { cmd->result = DID_NO_CONNECT << 16; return hpsa_cmd_free_and_done(h, cp, cmd); } dev = cmd->device->hostdata; if (!dev) { cmd->result = DID_NO_CONNECT << 16; return hpsa_cmd_free_and_done(h, cp, cmd); } c2 = &h->ioaccel2_cmd_pool[cp->cmdindex]; scsi_dma_unmap(cmd); /* undo the DMA mappings */ if ((cp->cmd_type == CMD_SCSI) && (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) hpsa_unmap_sg_chain_block(h, cp); if ((cp->cmd_type == CMD_IOACCEL2) && (c2->sg[0].chain_indicator == IOACCEL2_CHAIN)) hpsa_unmap_ioaccel2_sg_chain_block(h, c2); cmd->result = (DID_OK << 16); /* host byte */ /* SCSI command has already been cleaned up in SML */ if (dev->was_removed) { hpsa_cmd_resolve_and_free(h, cp); return; } if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) { if (dev->physical_device && dev->expose_device && dev->removed) { cmd->result = DID_NO_CONNECT << 16; return hpsa_cmd_free_and_done(h, cp, cmd); } if (likely(cp->phys_disk != NULL)) atomic_dec(&cp->phys_disk->ioaccel_cmds_out); } /* * We check for lockup status here as it may be set for * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by * fail_all_oustanding_cmds() */ if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) { /* DID_NO_CONNECT will prevent a retry */ cmd->result = DID_NO_CONNECT << 16; return hpsa_cmd_free_and_done(h, cp, cmd); } if (cp->cmd_type == CMD_IOACCEL2) return process_ioaccel2_completion(h, cp, cmd, dev); scsi_set_resid(cmd, ei->ResidualCnt); if (ei->CommandStatus == 0) return hpsa_cmd_free_and_done(h, cp, cmd); /* For I/O accelerator commands, copy over some fields to the normal * CISS header used below for error handling. */ if (cp->cmd_type == CMD_IOACCEL1) { struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; cp->Header.SGList = scsi_sg_count(cmd); cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList); cp->Request.CDBLen = le16_to_cpu(c->io_flags) & IOACCEL1_IOFLAGS_CDBLEN_MASK; cp->Header.tag = c->tag; memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); /* Any RAID offload error results in retry which will use * the normal I/O path so the controller can handle whatever's * wrong. */ if (is_logical_device(dev)) { if (ei->CommandStatus == CMD_IOACCEL_DISABLED) dev->offload_enabled = 0; return hpsa_retry_cmd(h, cp); } } /* an error has occurred */ switch (ei->CommandStatus) { case CMD_TARGET_STATUS: cmd->result |= ei->ScsiStatus; /* copy the sense data */ if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) sense_data_size = SCSI_SENSE_BUFFERSIZE; else sense_data_size = sizeof(ei->SenseInfo); if (ei->SenseLen < sense_data_size) sense_data_size = ei->SenseLen; memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); if (ei->ScsiStatus) decode_sense_data(ei->SenseInfo, sense_data_size, &sense_key, &asc, &ascq); if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { switch (sense_key) { case ABORTED_COMMAND: cmd->result |= DID_SOFT_ERROR << 16; break; case UNIT_ATTENTION: if (asc == 0x3F && ascq == 0x0E) h->drv_req_rescan = 1; break; case ILLEGAL_REQUEST: if (asc == 0x25 && ascq == 0x00) { dev->removed = 1; cmd->result = DID_NO_CONNECT << 16; } break; } break; } /* Problem was not a check condition * Pass it up to the upper layers... */ if (ei->ScsiStatus) { dev_warn(&h->pdev->dev, "cp %p has status 0x%x " "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " "Returning result: 0x%x\n", cp, ei->ScsiStatus, sense_key, asc, ascq, cmd->result); } else { /* scsi status is zero??? How??? */ dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " "Returning no connection.\n", cp), /* Ordinarily, this case should never happen, * but there is a bug in some released firmware * revisions that allows it to happen if, for * example, a 4100 backplane loses power and * the tape drive is in it. We assume that * it's a fatal error of some kind because we * can't show that it wasn't. We will make it * look like selection timeout since that is * the most common reason for this to occur, * and it's severe enough. */ cmd->result = DID_NO_CONNECT << 16; } break; case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ break; case CMD_DATA_OVERRUN: dev_warn(&h->pdev->dev, "CDB %16phN data overrun\n", cp->Request.CDB); break; case CMD_INVALID: { /* print_bytes(cp, sizeof(*cp), 1, 0); print_cmd(cp); */ /* We get CMD_INVALID if you address a non-existent device * instead of a selection timeout (no response). You will * see this if you yank out a drive, then try to access it. * This is kind of a shame because it means that any other * CMD_INVALID (e.g. driver bug) will get interpreted as a * missing target. */ cmd->result = DID_NO_CONNECT << 16; } break; case CMD_PROTOCOL_ERR: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n", cp->Request.CDB); break; case CMD_HARDWARE_ERR: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n", cp->Request.CDB); break; case CMD_CONNECTION_LOST: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n", cp->Request.CDB); break; case CMD_ABORTED: cmd->result = DID_ABORT << 16; break; case CMD_ABORT_FAILED: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n", cp->Request.CDB); break; case CMD_UNSOLICITED_ABORT: cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n", cp->Request.CDB); break; case CMD_TIMEOUT: cmd->result = DID_TIME_OUT << 16; dev_warn(&h->pdev->dev, "CDB %16phN timed out\n", cp->Request.CDB); break; case CMD_UNABORTABLE: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "Command unabortable\n"); break; case CMD_TMF_STATUS: if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */ cmd->result = DID_ERROR << 16; break; case CMD_IOACCEL_DISABLED: /* This only handles the direct pass-through case since RAID * offload is handled above. Just attempt a retry. */ cmd->result = DID_SOFT_ERROR << 16; dev_warn(&h->pdev->dev, "cp %p had HP SSD Smart Path error\n", cp); break; default: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", cp, ei->CommandStatus); } return hpsa_cmd_free_and_done(h, cp, cmd); } static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c, int sg_used, enum dma_data_direction data_direction) { int i; for (i = 0; i < sg_used; i++) dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr), le32_to_cpu(c->SG[i].Len), data_direction); } static int hpsa_map_one(struct pci_dev *pdev, struct CommandList *cp, unsigned char *buf, size_t buflen, enum dma_data_direction data_direction) { u64 addr64; if (buflen == 0 || data_direction == DMA_NONE) { cp->Header.SGList = 0; cp->Header.SGTotal = cpu_to_le16(0); return 0; } addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction); if (dma_mapping_error(&pdev->dev, addr64)) { /* Prevent subsequent unmap of something never mapped */ cp->Header.SGList = 0; cp->Header.SGTotal = cpu_to_le16(0); return -1; } cp->SG[0].Addr = cpu_to_le64(addr64); cp->SG[0].Len = cpu_to_le32(buflen); cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */ cp->Header.SGList = 1; /* no. SGs contig in this cmd */ cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */ return 0; } #define NO_TIMEOUT ((unsigned long) -1) #define DEFAULT_TIMEOUT 30000 /* milliseconds */ static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, struct CommandList *c, int reply_queue, unsigned long timeout_msecs) { DECLARE_COMPLETION_ONSTACK(wait); c->waiting = &wait; __enqueue_cmd_and_start_io(h, c, reply_queue); if (timeout_msecs == NO_TIMEOUT) { /* TODO: get rid of this no-timeout thing */ wait_for_completion_io(&wait); return IO_OK; } if (!wait_for_completion_io_timeout(&wait, msecs_to_jiffies(timeout_msecs))) { dev_warn(&h->pdev->dev, "Command timed out.\n"); return -ETIMEDOUT; } return IO_OK; } static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c, int reply_queue, unsigned long timeout_msecs) { if (unlikely(lockup_detected(h))) { c->err_info->CommandStatus = CMD_CTLR_LOCKUP; return IO_OK; } return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs); } static u32 lockup_detected(struct ctlr_info *h) { int cpu; u32 rc, *lockup_detected; cpu = get_cpu(); lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); rc = *lockup_detected; put_cpu(); return rc; } #define MAX_DRIVER_CMD_RETRIES 25 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, struct CommandList *c, enum dma_data_direction data_direction, unsigned long timeout_msecs) { int backoff_time = 10, retry_count = 0; int rc; do { memset(c->err_info, 0, sizeof(*c->err_info)); rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, timeout_msecs); if (rc) break; retry_count++; if (retry_count > 3) { msleep(backoff_time); if (backoff_time < 1000) backoff_time *= 2; } } while ((check_for_unit_attention(h, c) || check_for_busy(h, c)) && retry_count <= MAX_DRIVER_CMD_RETRIES); hpsa_pci_unmap(h->pdev, c, 1, data_direction); if (retry_count > MAX_DRIVER_CMD_RETRIES) rc = -EIO; return rc; } static void hpsa_print_cmd(struct ctlr_info *h, char *txt, struct CommandList *c) { const u8 *cdb = c->Request.CDB; const u8 *lun = c->Header.LUN.LunAddrBytes; dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n", txt, lun, cdb); } static void hpsa_scsi_interpret_error(struct ctlr_info *h, struct CommandList *cp) { const struct ErrorInfo *ei = cp->err_info; struct device *d = &cp->h->pdev->dev; u8 sense_key, asc, ascq; int sense_len; switch (ei->CommandStatus) { case CMD_TARGET_STATUS: if (ei->SenseLen > sizeof(ei->SenseInfo)) sense_len = sizeof(ei->SenseInfo); else sense_len = ei->SenseLen; decode_sense_data(ei->SenseInfo, sense_len, &sense_key, &asc, &ascq); hpsa_print_cmd(h, "SCSI status", cp); if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n", sense_key, asc, ascq); else dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus); if (ei->ScsiStatus == 0) dev_warn(d, "SCSI status is abnormally zero. " "(probably indicates selection timeout " "reported incorrectly due to a known " "firmware bug, circa July, 2001.)\n"); break; case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ break; case CMD_DATA_OVERRUN: hpsa_print_cmd(h, "overrun condition", cp); break; case CMD_INVALID: { /* controller unfortunately reports SCSI passthru's * to non-existent targets as invalid commands. */ hpsa_print_cmd(h, "invalid command", cp); dev_warn(d, "probably means device no longer present\n"); } break; case CMD_PROTOCOL_ERR: hpsa_print_cmd(h, "protocol error", cp); break; case CMD_HARDWARE_ERR: hpsa_print_cmd(h, "hardware error", cp); break; case CMD_CONNECTION_LOST: hpsa_print_cmd(h, "connection lost", cp); break; case CMD_ABORTED: hpsa_print_cmd(h, "aborted", cp); break; case CMD_ABORT_FAILED: hpsa_print_cmd(h, "abort failed", cp); break; case CMD_UNSOLICITED_ABORT: hpsa_print_cmd(h, "unsolicited abort", cp); break; case CMD_TIMEOUT: hpsa_print_cmd(h, "timed out", cp); break; case CMD_UNABORTABLE: hpsa_print_cmd(h, "unabortable", cp); break; case CMD_CTLR_LOCKUP: hpsa_print_cmd(h, "controller lockup detected", cp); break; default: hpsa_print_cmd(h, "unknown status", cp); dev_warn(d, "Unknown command status %x\n", ei->CommandStatus); } } static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr, u8 page, u8 *buf, size_t bufsize) { int rc = IO_OK; struct CommandList *c; struct ErrorInfo *ei; c = cmd_alloc(h); if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize, page, scsi3addr, TYPE_CMD)) { rc = -1; goto out; } rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, NO_TIMEOUT); if (rc) goto out; ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(h, c); rc = -1; } out: cmd_free(h, c); return rc; } static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h, u8 *scsi3addr) { u8 *buf; u64 sa = 0; int rc = 0; buf = kzalloc(1024, GFP_KERNEL); if (!buf) return 0; rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC, buf, 1024); if (rc) goto out; sa = get_unaligned_be64(buf+12); out: kfree(buf); return sa; } static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, u16 page, unsigned char *buf, unsigned char bufsize) { int rc = IO_OK; struct CommandList *c; struct ErrorInfo *ei; c = cmd_alloc(h); if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD)) { rc = -1; goto out; } rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, NO_TIMEOUT); if (rc) goto out; ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(h, c); rc = -1; } out: cmd_free(h, c); return rc; } static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, u8 reset_type, int reply_queue) { int rc = IO_OK; struct CommandList *c; struct ErrorInfo *ei; c = cmd_alloc(h); c->device = dev; /* fill_cmd can't fail here, no data buffer to map. */ (void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG); rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); if (rc) { dev_warn(&h->pdev->dev, "Failed to send reset command\n"); goto out; } /* no unmap needed here because no data xfer. */ ei = c->err_info; if (ei->CommandStatus != 0) { hpsa_scsi_interpret_error(h, c); rc = -1; } out: cmd_free(h, c); return rc; } static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, struct hpsa_scsi_dev_t *dev, unsigned char *scsi3addr) { int i; bool match = false; struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2; if (hpsa_is_cmd_idle(c)) return false; switch (c->cmd_type) { case CMD_SCSI: case CMD_IOCTL_PEND: match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes, sizeof(c->Header.LUN.LunAddrBytes)); break; case CMD_IOACCEL1: case CMD_IOACCEL2: if (c->phys_disk == dev) { /* HBA mode match */ match = true; } else { /* Possible RAID mode -- check each phys dev. */ /* FIXME: Do we need to take out a lock here? If * so, we could just call hpsa_get_pdisk_of_ioaccel2() * instead. */ for (i = 0; i < dev->nphysical_disks && !match; i++) { /* FIXME: an alternate test might be * * match = dev->phys_disk[i]->ioaccel_handle * == c2->scsi_nexus; */ match = dev->phys_disk[i] == c->phys_disk; } } break; case IOACCEL2_TMF: for (i = 0; i < dev->nphysical_disks && !match; i++) { match = dev->phys_disk[i]->ioaccel_handle == le32_to_cpu(ac->it_nexus); } break; case 0: /* The command is in the middle of being initialized. */ match = false; break; default: dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n", c->cmd_type); BUG(); } return match; } static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, u8 reset_type, int reply_queue) { int rc = 0; /* We can really only handle one reset at a time */ if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) { dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n"); return -EINTR; } rc = hpsa_send_reset(h, dev, reset_type, reply_queue); if (!rc) { /* incremented by sending the reset request */ atomic_dec(&dev->commands_outstanding); wait_event(h->event_sync_wait_queue, atomic_read(&dev->commands_outstanding) <= 0 || lockup_detected(h)); } if (unlikely(lockup_detected(h))) { dev_warn(&h->pdev->dev, "Controller lockup detected during reset wait\n"); rc = -ENODEV; } if (!rc) rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0); mutex_unlock(&h->reset_mutex); return rc; } static void hpsa_get_raid_level(struct ctlr_info *h, unsigned char *scsi3addr, unsigned char *raid_level) { int rc; unsigned char *buf; *raid_level = RAID_UNKNOWN; buf = kzalloc(64, GFP_KERNEL); if (!buf) return; if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_GEOMETRY)) goto exit; rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64); if (rc == 0) *raid_level = buf[8]; if (*raid_level > RAID_UNKNOWN) *raid_level = RAID_UNKNOWN; exit: kfree(buf); return; } #define HPSA_MAP_DEBUG #ifdef HPSA_MAP_DEBUG static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, struct raid_map_data *map_buff) { struct raid_map_disk_data *dd = &map_buff->data[0]; int map, row, col; u16 map_cnt, row_cnt, disks_per_row; if (rc != 0) return; /* Show details only if debugging has been activated. */ if (h->raid_offload_debug < 2) return; dev_info(&h->pdev->dev, "structure_size = %u\n", le32_to_cpu(map_buff->structure_size)); dev_info(&h->pdev->dev, "volume_blk_size = %u\n", le32_to_cpu(map_buff->volume_blk_size)); dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n", le64_to_cpu(map_buff->volume_blk_cnt)); dev_info(&h->pdev->dev, "physicalBlockShift = %u\n", map_buff->phys_blk_shift); dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n", map_buff->parity_rotation_shift); dev_info(&h->pdev->dev, "strip_size = %u\n", le16_to_cpu(map_buff->strip_size)); dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n", le64_to_cpu(map_buff->disk_starting_blk)); dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n", le64_to_cpu(map_buff->disk_blk_cnt)); dev_info(&h->pdev->dev, "data_disks_per_row = %u\n", le16_to_cpu(map_buff->data_disks_per_row)); dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", le16_to_cpu(map_buff->metadata_disks_per_row)); dev_info(&h->pdev->dev, "row_cnt = %u\n", le16_to_cpu(map_buff->row_cnt)); dev_info(&h->pdev->dev, "layout_map_count = %u\n", le16_to_cpu(map_buff->layout_map_count)); dev_info(&h->pdev->dev, "flags = 0x%x\n", le16_to_cpu(map_buff->flags)); dev_info(&h->pdev->dev, "encryption = %s\n", le16_to_cpu(map_buff->flags) & RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF"); dev_info(&h->pdev->dev, "dekindex = %u\n", le16_to_cpu(map_buff->dekindex)); map_cnt = le16_to_cpu(map_buff->layout_map_count); for (map = 0; map < map_cnt; map++) { dev_info(&h->pdev->dev, "Map%u:\n", map); row_cnt = le16_to_cpu(map_buff->row_cnt); for (row = 0; row < row_cnt; row++) { dev_info(&h->pdev->dev, " Row%u:\n", row); disks_per_row = le16_to_cpu(map_buff->data_disks_per_row); for (col = 0; col < disks_per_row; col++, dd++) dev_info(&h->pdev->dev, " D%02u: h=0x%04x xor=%u,%u\n", col, dd->ioaccel_handle, dd->xor_mult[0], dd->xor_mult[1]); disks_per_row = le16_to_cpu(map_buff->metadata_disks_per_row); for (col = 0; col < disks_per_row; col++, dd++) dev_info(&h->pdev->dev, " M%02u: h=0x%04x xor=%u,%u\n", col, dd->ioaccel_handle, dd->xor_mult[0], dd->xor_mult[1]); } } } #else static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h, __attribute__((unused)) int rc, __attribute__((unused)) struct raid_map_data *map_buff) { } #endif static int hpsa_get_raid_map(struct ctlr_info *h, unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) { int rc = 0; struct CommandList *c; struct ErrorInfo *ei; c = cmd_alloc(h); if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, sizeof(this_device->raid_map), 0, scsi3addr, TYPE_CMD)) { dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n"); cmd_free(h, c); return -1; } rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, NO_TIMEOUT); if (rc) goto out; ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(h, c); rc = -1; goto out; } cmd_free(h, c); /* @todo in the future, dynamically allocate RAID map memory */ if (le32_to_cpu(this_device->raid_map.structure_size) > sizeof(this_device->raid_map)) { dev_warn(&h->pdev->dev, "RAID map size is too large!\n"); rc = -1; } hpsa_debug_map_buff(h, rc, &this_device->raid_map); return rc; out: cmd_free(h, c); return rc; } static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h, unsigned char scsi3addr[], u16 bmic_device_index, struct bmic_sense_subsystem_info *buf, size_t bufsize) { int rc = IO_OK; struct CommandList *c; struct ErrorInfo *ei; c = cmd_alloc(h); rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize, 0, RAID_CTLR_LUNID, TYPE_CMD); if (rc) goto out; c->Request.CDB[2] = bmic_device_index & 0xff; c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, NO_TIMEOUT); if (rc) goto out; ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(h, c); rc = -1; } out: cmd_free(h, c); return rc; } static int hpsa_bmic_id_controller(struct ctlr_info *h, struct bmic_identify_controller *buf, size_t bufsize) { int rc = IO_OK; struct CommandList *c; struct ErrorInfo *ei; c = cmd_alloc(h); rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize, 0, RAID_CTLR_LUNID, TYPE_CMD); if (rc) goto out; rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, NO_TIMEOUT); if (rc) goto out; ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(h, c); rc = -1; } out: cmd_free(h, c); return rc; } static int hpsa_bmic_id_physical_device(struct ctlr_info *h, unsigned char scsi3addr[], u16 bmic_device_index, struct bmic_identify_physical_device *buf, size_t bufsize) { int rc = IO_OK; struct CommandList *c; struct ErrorInfo *ei; c = cmd_alloc(h); rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize, 0, RAID_CTLR_LUNID, TYPE_CMD); if (rc) goto out; c->Request.CDB[2] = bmic_device_index & 0xff; c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, NO_TIMEOUT); ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(h, c); rc = -1; } out: cmd_free(h, c); return rc; } /* * get enclosure information * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure * Uses id_physical_device to determine the box_index. */ static void hpsa_get_enclosure_info(struct ctlr_info *h, unsigned char *scsi3addr, struct ReportExtendedLUNdata *rlep, int rle_index, struct hpsa_scsi_dev_t *encl_dev) { int rc = -1; struct CommandList *c = NULL; struct ErrorInfo *ei = NULL; struct bmic_sense_storage_box_params *bssbp = NULL; struct bmic_identify_physical_device *id_phys = NULL; struct ext_report_lun_entry *rle; u16 bmic_device_index = 0; if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN) return; rle = &rlep->LUN[rle_index]; encl_dev->eli = hpsa_get_enclosure_logical_identifier(h, scsi3addr); bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]); if (encl_dev->target == -1 || encl_dev->lun == -1) { rc = IO_OK; goto out; } if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) { rc = IO_OK; goto out; } bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL); if (!bssbp) goto out; id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); if (!id_phys) goto out; rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index, id_phys, sizeof(*id_phys)); if (rc) { dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n", __func__, encl_dev->external, bmic_device_index); goto out; } c = cmd_alloc(h); rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp, sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD); if (rc) goto out; if (id_phys->phys_connector[1] == 'E') c->Request.CDB[5] = id_phys->box_index; else c->Request.CDB[5] = 0; rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, NO_TIMEOUT); if (rc) goto out; ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { rc = -1; goto out; } encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port; memcpy(&encl_dev->phys_connector[id_phys->active_path_number], bssbp->phys_connector, sizeof(bssbp->phys_connector)); rc = IO_OK; out: kfree(bssbp); kfree(id_phys); if (c) cmd_free(h, c); if (rc != IO_OK) hpsa_show_dev_msg(KERN_INFO, h, encl_dev, "Error, could not get enclosure information"); } static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h, unsigned char *scsi3addr) { struct ReportExtendedLUNdata *physdev; u32 nphysicals; u64 sa = 0; int i; physdev = kzalloc(sizeof(*physdev), GFP_KERNEL); if (!physdev) return 0; if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); kfree(physdev); return 0; } nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24; for (i = 0; i < nphysicals; i++) if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) { sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]); break; } kfree(physdev); return sa; } static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr, struct hpsa_scsi_dev_t *dev) { int rc; u64 sa = 0; if (is_hba_lunid(scsi3addr)) { struct bmic_sense_subsystem_info *ssi; ssi = kzalloc(sizeof(*ssi), GFP_KERNEL); if (!ssi) return; rc = hpsa_bmic_sense_subsystem_information(h, scsi3addr, 0, ssi, sizeof(*ssi)); if (rc == 0) { sa = get_unaligned_be64(ssi->primary_world_wide_id); h->sas_address = sa; } kfree(ssi); } else sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr); dev->sas_address = sa; } static void hpsa_ext_ctrl_present(struct ctlr_info *h, struct ReportExtendedLUNdata *physdev) { u32 nphysicals; int i; if (h->discovery_polling) return; nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1; for (i = 0; i < nphysicals; i++) { if (physdev->LUN[i].device_type == BMIC_DEVICE_TYPE_CONTROLLER && !is_hba_lunid(physdev->LUN[i].lunid)) { dev_info(&h->pdev->dev, "External controller present, activate discovery polling and disable rld caching\n"); hpsa_disable_rld_caching(h); h->discovery_polling = 1; break; } } } /* Get a device id from inquiry page 0x83 */ static bool hpsa_vpd_page_supported(struct ctlr_info *h, unsigned char scsi3addr[], u8 page) { int rc; int i; int pages; unsigned char *buf, bufsize; buf = kzalloc(256, GFP_KERNEL); if (!buf) return false; /* Get the size of the page list first */ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, buf, HPSA_VPD_HEADER_SZ); if (rc != 0) goto exit_unsupported; pages = buf[3]; if ((pages + HPSA_VPD_HEADER_SZ) <= 255) bufsize = pages + HPSA_VPD_HEADER_SZ; else bufsize = 255; /* Get the whole VPD page list */ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, buf, bufsize); if (rc != 0) goto exit_unsupported; pages = buf[3]; for (i = 1; i <= pages; i++) if (buf[3 + i] == page) goto exit_supported; exit_unsupported: kfree(buf); return false; exit_supported: kfree(buf); return true; } /* * Called during a scan operation. * Sets ioaccel status on the new device list, not the existing device list * * The device list used during I/O will be updated later in * adjust_hpsa_scsi_table. */ static void hpsa_get_ioaccel_status(struct ctlr_info *h, unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) { int rc; unsigned char *buf; u8 ioaccel_status; this_device->offload_config = 0; this_device->offload_enabled = 0; this_device->offload_to_be_enabled = 0; buf = kzalloc(64, GFP_KERNEL); if (!buf) return; if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) goto out; rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64); if (rc != 0) goto out; #define IOACCEL_STATUS_BYTE 4 #define OFFLOAD_CONFIGURED_BIT 0x01 #define OFFLOAD_ENABLED_BIT 0x02 ioaccel_status = buf[IOACCEL_STATUS_BYTE]; this_device->offload_config = !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); if (this_device->offload_config) { bool offload_enabled = !!(ioaccel_status & OFFLOAD_ENABLED_BIT); /* * Check to see if offload can be enabled. */ if (offload_enabled) { rc = hpsa_get_raid_map(h, scsi3addr, this_device); if (rc) /* could not load raid_map */ goto out; this_device->offload_to_be_enabled = 1; } } out: kfree(buf); return; } /* Get the device id from inquiry page 0x83 */ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, unsigned char *device_id, int index, int buflen) { int rc; unsigned char *buf; /* Does controller have VPD for device id? */ if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID)) return 1; /* not supported */ buf = kzalloc(64, GFP_KERNEL); if (!buf) return -ENOMEM; rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_DEVICE_ID, buf, 64); if (rc == 0) { if (buflen > 16) buflen = 16; memcpy(device_id, &buf[8], buflen); } kfree(buf); return rc; /*0 - got id, otherwise, didn't */ } static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, void *buf, int bufsize, int extended_response) { int rc = IO_OK; struct CommandList *c; unsigned char scsi3addr[8]; struct ErrorInfo *ei; c = cmd_alloc(h); /* address the controller */ memset(scsi3addr, 0, sizeof(scsi3addr)); if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, buf, bufsize, 0, scsi3addr, TYPE_CMD)) { rc = -EAGAIN; goto out; } if (extended_response) c->Request.CDB[1] = extended_response; rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, NO_TIMEOUT); if (rc) goto out; ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(h, c); rc = -EIO; } else { struct ReportLUNdata *rld = buf; if (rld->extended_response_flag != extended_response) { if (!h->legacy_board) { dev_err(&h->pdev->dev, "report luns requested format %u, got %u\n", extended_response, rld->extended_response_flag); rc = -EINVAL; } else rc = -EOPNOTSUPP; } } out: cmd_free(h, c); return rc; } static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, struct ReportExtendedLUNdata *buf, int bufsize) { int rc; struct ReportLUNdata *lbuf; rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize, HPSA_REPORT_PHYS_EXTENDED); if (!rc || rc != -EOPNOTSUPP) return rc; /* REPORT PHYS EXTENDED is not supported */ lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL); if (!lbuf) return -ENOMEM; rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0); if (!rc) { int i; u32 nphys; /* Copy ReportLUNdata header */ memcpy(buf, lbuf, 8); nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8; for (i = 0; i < nphys; i++) memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8); } kfree(lbuf); return rc; } static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, struct ReportLUNdata *buf, int bufsize) { return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); } static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, int bus, int target, int lun) { device->bus = bus; device->target = target; device->lun = lun; } /* Use VPD inquiry to get details of volume status */ static int hpsa_get_volume_status(struct ctlr_info *h, unsigned char scsi3addr[]) { int rc; int status; int size; unsigned char *buf; buf = kzalloc(64, GFP_KERNEL); if (!buf) return HPSA_VPD_LV_STATUS_UNSUPPORTED; /* Does controller have VPD for logical volume status? */ if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) goto exit_failed; /* Get the size of the VPD return buffer */ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, buf, HPSA_VPD_HEADER_SZ); if (rc != 0) goto exit_failed; size = buf[3]; /* Now get the whole VPD buffer */ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, buf, size + HPSA_VPD_HEADER_SZ); if (rc != 0) goto exit_failed; status = buf[4]; /* status byte */ kfree(buf); return status; exit_failed: kfree(buf); return HPSA_VPD_LV_STATUS_UNSUPPORTED; } /* Determine offline status of a volume. * Return either: * 0 (not offline) * 0xff (offline for unknown reasons) * # (integer code indicating one of several NOT READY states * describing why a volume is to be kept offline) */ static unsigned char hpsa_volume_offline(struct ctlr_info *h, unsigned char scsi3addr[]) { struct CommandList *c; unsigned char *sense; u8 sense_key, asc, ascq; int sense_len; int rc, ldstat = 0; #define ASC_LUN_NOT_READY 0x04 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02 c = cmd_alloc(h); (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); if (rc) { cmd_free(h, c); return HPSA_VPD_LV_STATUS_UNSUPPORTED; } sense = c->err_info->SenseInfo; if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) sense_len = sizeof(c->err_info->SenseInfo); else sense_len = c->err_info->SenseLen; decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq); cmd_free(h, c); /* Determine the reason for not ready state */ ldstat = hpsa_get_volume_status(h, scsi3addr); /* Keep volume offline in certain cases: */ switch (ldstat) { case HPSA_LV_FAILED: case HPSA_LV_UNDERGOING_ERASE: case HPSA_LV_NOT_AVAILABLE: case HPSA_LV_UNDERGOING_RPI: case HPSA_LV_PENDING_RPI: case HPSA_LV_ENCRYPTED_NO_KEY: case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: case HPSA_LV_UNDERGOING_ENCRYPTION: case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: return ldstat; case HPSA_VPD_LV_STATUS_UNSUPPORTED: /* If VPD status page isn't available, * use ASC/ASCQ to determine state */ if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) || (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)) return ldstat; break; default: break; } return HPSA_LV_OK; } static int hpsa_update_device_info(struct ctlr_info *h, unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, unsigned char *is_OBDR_device) { #define OBDR_SIG_OFFSET 43 #define OBDR_TAPE_SIG "$DR-10" #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) unsigned char *inq_buff; unsigned char *obdr_sig; int rc = 0; inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); if (!inq_buff) { rc = -ENOMEM; goto bail_out; } /* Do an inquiry to the device to see what it is. */ if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { dev_err(&h->pdev->dev, "%s: inquiry failed, device will be skipped.\n", __func__); rc = HPSA_INQUIRY_FAILED; goto bail_out; } scsi_sanitize_inquiry_string(&inq_buff[8], 8); scsi_sanitize_inquiry_string(&inq_buff[16], 16); this_device->devtype = (inq_buff[0] & 0x1f); memcpy(this_device->scsi3addr, scsi3addr, 8); memcpy(this_device->vendor, &inq_buff[8], sizeof(this_device->vendor)); memcpy(this_device->model, &inq_buff[16], sizeof(this_device->model)); this_device->rev = inq_buff[2]; memset(this_device->device_id, 0, sizeof(this_device->device_id)); if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8, sizeof(this_device->device_id)) < 0) { dev_err(&h->pdev->dev, "hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n", h->ctlr, __func__, h->scsi_host->host_no, this_device->bus, this_device->target, this_device->lun, scsi_device_type(this_device->devtype), this_device->model); rc = HPSA_LV_FAILED; goto bail_out; } if ((this_device->devtype == TYPE_DISK || this_device->devtype == TYPE_ZBC) && is_logical_dev_addr_mode(scsi3addr)) { unsigned char volume_offline; hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) hpsa_get_ioaccel_status(h, scsi3addr, this_device); volume_offline = hpsa_volume_offline(h, scsi3addr); if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED && h->legacy_board) { /* * Legacy boards might not support volume status */ dev_info(&h->pdev->dev, "C0:T%d:L%d Volume status not available, assuming online.\n", this_device->target, this_device->lun); volume_offline = 0; } this_device->volume_offline = volume_offline; if (volume_offline == HPSA_LV_FAILED) { rc = HPSA_LV_FAILED; dev_err(&h->pdev->dev, "%s: LV failed, device will be skipped.\n", __func__); goto bail_out; } } else { this_device->raid_level = RAID_UNKNOWN; this_device->offload_config = 0; hpsa_turn_off_ioaccel_for_device(this_device); this_device->hba_ioaccel_enabled = 0; this_device->volume_offline = 0; this_device->queue_depth = h->nr_cmds; } if (this_device->external) this_device->queue_depth = EXTERNAL_QD; if (is_OBDR_device) { /* See if this is a One-Button-Disaster-Recovery device * by looking for "$DR-10" at offset 43 in inquiry data. */ obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; *is_OBDR_device = (this_device->devtype == TYPE_ROM && strncmp(obdr_sig, OBDR_TAPE_SIG, OBDR_SIG_LEN) == 0); } kfree(inq_buff); return 0; bail_out: kfree(inq_buff); return rc; } /* * Helper function to assign bus, target, lun mapping of devices. * Logical drive target and lun are assigned at this time, but * physical device lun and target assignment are deferred (assigned * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) */ static void figure_bus_target_lun(struct ctlr_info *h, u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device) { u32 lunid = get_unaligned_le32(lunaddrbytes); if (!is_logical_dev_addr_mode(lunaddrbytes)) { /* physical device, target and lun filled in later */ if (is_hba_lunid(lunaddrbytes)) { int bus = HPSA_HBA_BUS; if (!device->rev) bus = HPSA_LEGACY_HBA_BUS; hpsa_set_bus_target_lun(device, bus, 0, lunid & 0x3fff); } else /* defer target, lun assignment for physical devices */ hpsa_set_bus_target_lun(device, HPSA_PHYSICAL_DEVICE_BUS, -1, -1); return; } /* It's a logical device */ if (device->external) { hpsa_set_bus_target_lun(device, HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff, lunid & 0x00ff); return; } hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS, 0, lunid & 0x3fff); } static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position, int i, int nphysicals, int nlocal_logicals) { /* In report logicals, local logicals are listed first, * then any externals. */ int logicals_start = nphysicals + (raid_ctlr_position == 0); if (i == raid_ctlr_position) return 0; if (i < logicals_start) return 0; /* i is in logicals range, but still within local logicals */ if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals) return 0; return 1; /* it's an external lun */ } /* * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, * logdev. The number of luns in physdev and logdev are returned in * *nphysicals and *nlogicals, respectively. * Returns 0 on success, -1 otherwise. */ static int hpsa_gather_lun_info(struct ctlr_info *h, struct ReportExtendedLUNdata *physdev, u32 *nphysicals, struct ReportLUNdata *logdev, u32 *nlogicals) { if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); return -1; } *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24; if (*nphysicals > HPSA_MAX_PHYS_LUN) { dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN); *nphysicals = HPSA_MAX_PHYS_LUN; } if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) { dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); return -1; } *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; /* Reject Logicals in excess of our max capability. */ if (*nlogicals > HPSA_MAX_LUN) { dev_warn(&h->pdev->dev, "maximum logical LUNs (%d) exceeded. " "%d LUNs ignored.\n", HPSA_MAX_LUN, *nlogicals - HPSA_MAX_LUN); *nlogicals = HPSA_MAX_LUN; } if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { dev_warn(&h->pdev->dev, "maximum logical + physical LUNs (%d) exceeded. " "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; } return 0; } static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, int nphysicals, int nlogicals, struct ReportExtendedLUNdata *physdev_list, struct ReportLUNdata *logdev_list) { /* Helper function, figure out where the LUN ID info is coming from * given index i, lists of physical and logical devices, where in * the list the raid controller is supposed to appear (first or last) */ int logicals_start = nphysicals + (raid_ctlr_position == 0); int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); if (i == raid_ctlr_position) return RAID_CTLR_LUNID; if (i < logicals_start) return &physdev_list->LUN[i - (raid_ctlr_position == 0)].lunid[0]; if (i < last_device) return &logdev_list->LUN[i - nphysicals - (raid_ctlr_position == 0)][0]; BUG(); return NULL; } /* get physical drive ioaccel handle and queue depth */ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, struct ReportExtendedLUNdata *rlep, int rle_index, struct bmic_identify_physical_device *id_phys) { int rc; struct ext_report_lun_entry *rle; if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN) return; rle = &rlep->LUN[rle_index]; dev->ioaccel_handle = rle->ioaccel_handle; if ((rle->device_flags & 0x08) && dev->ioaccel_handle) dev->hba_ioaccel_enabled = 1; memset(id_phys, 0, sizeof(*id_phys)); rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0], GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys, sizeof(*id_phys)); if (!rc) /* Reserve space for FW operations */ #define DRIVE_CMDS_RESERVED_FOR_FW 2 #define DRIVE_QUEUE_DEPTH 7 dev->queue_depth = le16_to_cpu(id_phys->current_queue_depth_limit) - DRIVE_CMDS_RESERVED_FOR_FW; else dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */ } static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device, struct ReportExtendedLUNdata *rlep, int rle_index, struct bmic_identify_physical_device *id_phys) { struct ext_report_lun_entry *rle; if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN) return; rle = &rlep->LUN[rle_index]; if ((rle->device_flags & 0x08) && this_device->ioaccel_handle) this_device->hba_ioaccel_enabled = 1; memcpy(&this_device->active_path_index, &id_phys->active_path_number, sizeof(this_device->active_path_index)); memcpy(&this_device->path_map, &id_phys->redundant_path_present_map, sizeof(this_device->path_map)); memcpy(&this_device->box, &id_phys->alternate_paths_phys_box_on_port, sizeof(this_device->box)); memcpy(&this_device->phys_connector, &id_phys->alternate_paths_phys_connector, sizeof(this_device->phys_connector)); memcpy(&this_device->bay, &id_phys->phys_bay_in_box, sizeof(this_device->bay)); } /* get number of local logical disks. */ static int hpsa_set_local_logical_count(struct ctlr_info *h, struct bmic_identify_controller *id_ctlr, u32 *nlocals) { int rc; if (!id_ctlr) { dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n", __func__); return -ENOMEM; } memset(id_ctlr, 0, sizeof(*id_ctlr)); rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr)); if (!rc) if (id_ctlr->configured_logical_drive_count < 255) *nlocals = id_ctlr->configured_logical_drive_count; else *nlocals = le16_to_cpu( id_ctlr->extended_logical_unit_count); else *nlocals = -1; return rc; } static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes) { struct bmic_identify_physical_device *id_phys; bool is_spare = false; int rc; id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); if (!id_phys) return false; rc = hpsa_bmic_id_physical_device(h, lunaddrbytes, GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys, sizeof(*id_phys)); if (rc == 0) is_spare = (id_phys->more_flags >> 6) & 0x01; kfree(id_phys); return is_spare; } #define RPL_DEV_FLAG_NON_DISK 0x1 #define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2 #define RPL_DEV_FLAG_UNCONFIG_DISK 0x4 #define BMIC_DEVICE_TYPE_ENCLOSURE 6 static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes, struct ext_report_lun_entry *rle) { u8 device_flags; u8 device_type; if (!MASKED_DEVICE(lunaddrbytes)) return false; device_flags = rle->device_flags; device_type = rle->device_type; if (device_flags & RPL_DEV_FLAG_NON_DISK) { if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE) return false; return true; } if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED)) return false; if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK) return false; /* * Spares may be spun down, we do not want to * do an Inquiry to a RAID set spare drive as * that would have them spun up, that is a * performance hit because I/O to the RAID device * stops while the spin up occurs which can take * over 50 seconds. */ if (hpsa_is_disk_spare(h, lunaddrbytes)) return true; return false; } static void hpsa_update_scsi_devices(struct ctlr_info *h) { /* the idea here is we could get notified * that some devices have changed, so we do a report * physical luns and report logical luns cmd, and adjust * our list of devices accordingly. * * The scsi3addr's of devices won't change so long as the * adapter is not reset. That means we can rescan and * tell which devices we already know about, vs. new * devices, vs. disappearing devices. */ struct ReportExtendedLUNdata *physdev_list = NULL; struct ReportLUNdata *logdev_list = NULL; struct bmic_identify_physical_device *id_phys = NULL; struct bmic_identify_controller *id_ctlr = NULL; u32 nphysicals = 0; u32 nlogicals = 0; u32 nlocal_logicals = 0; u32 ndev_allocated = 0; struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; int ncurrent = 0; int i, ndevs_to_allocate; int raid_ctlr_position; bool physical_device; currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL); physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL); logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL); tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL); if (!currentsd || !physdev_list || !logdev_list || !tmpdevice || !id_phys || !id_ctlr) { dev_err(&h->pdev->dev, "out of memory\n"); goto out; } h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */ if (hpsa_gather_lun_info(h, physdev_list, &nphysicals, logdev_list, &nlogicals)) { h->drv_req_rescan = 1; goto out; } /* Set number of local logicals (non PTRAID) */ if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) { dev_warn(&h->pdev->dev, "%s: Can't determine number of local logical devices.\n", __func__); } /* We might see up to the maximum number of logical and physical disks * plus external target devices, and a device for the local RAID * controller. */ ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1; hpsa_ext_ctrl_present(h, physdev_list); /* Allocate the per device structures */ for (i = 0; i < ndevs_to_allocate; i++) { if (i >= HPSA_MAX_DEVICES) { dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded." " %d devices ignored.\n", HPSA_MAX_DEVICES, ndevs_to_allocate - HPSA_MAX_DEVICES); break; } currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); if (!currentsd[i]) { h->drv_req_rescan = 1; goto out; } ndev_allocated++; } if (is_scsi_rev_5(h)) raid_ctlr_position = 0; else raid_ctlr_position = nphysicals + nlogicals; /* adjust our table of devices */ for (i = 0; i < nphysicals + nlogicals + 1; i++) { u8 *lunaddrbytes, is_OBDR = 0; int rc = 0; int phys_dev_index = i - (raid_ctlr_position == 0); bool skip_device = false; memset(tmpdevice, 0, sizeof(*tmpdevice)); physical_device = i < nphysicals + (raid_ctlr_position == 0); /* Figure out where the LUN ID info is coming from */ lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, i, nphysicals, nlogicals, physdev_list, logdev_list); /* Determine if this is a lun from an external target array */ tmpdevice->external = figure_external_status(h, raid_ctlr_position, i, nphysicals, nlocal_logicals); /* * Skip over some devices such as a spare. */ if (phys_dev_index >= 0 && !tmpdevice->external && physical_device) { skip_device = hpsa_skip_device(h, lunaddrbytes, &physdev_list->LUN[phys_dev_index]); if (skip_device) continue; } /* Get device type, vendor, model, device id, raid_map */ rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice, &is_OBDR); if (rc == -ENOMEM) { dev_warn(&h->pdev->dev, "Out of memory, rescan deferred.\n"); h->drv_req_rescan = 1; goto out; } if (rc) { h->drv_req_rescan = 1; continue; } figure_bus_target_lun(h, lunaddrbytes, tmpdevice); this_device = currentsd[ncurrent]; *this_device = *tmpdevice; this_device->physical_device = physical_device; /* * Expose all devices except for physical devices that * are masked. */ if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device) this_device->expose_device = 0; else this_device->expose_device = 1; /* * Get the SAS address for physical devices that are exposed. */ if (this_device->physical_device && this_device->expose_device) hpsa_get_sas_address(h, lunaddrbytes, this_device); switch (this_device->devtype) { case TYPE_ROM: /* We don't *really* support actual CD-ROM devices, * just "One Button Disaster Recovery" tape drive * which temporarily pretends to be a CD-ROM drive. * So we check that the device is really an OBDR tape * device by checking for "$DR-10" in bytes 43-48 of * the inquiry data. */ if (is_OBDR) ncurrent++; break; case TYPE_DISK: case TYPE_ZBC: if (this_device->physical_device) { /* The disk is in HBA mode. */ /* Never use RAID mapper in HBA mode. */ this_device->offload_enabled = 0; hpsa_get_ioaccel_drive_info(h, this_device, physdev_list, phys_dev_index, id_phys); hpsa_get_path_info(this_device, physdev_list, phys_dev_index, id_phys); } ncurrent++; break; case TYPE_TAPE: case TYPE_MEDIUM_CHANGER: ncurrent++; break; case TYPE_ENCLOSURE: if (!this_device->external) hpsa_get_enclosure_info(h, lunaddrbytes, physdev_list, phys_dev_index, this_device); ncurrent++; break; case TYPE_RAID: /* Only present the Smartarray HBA as a RAID controller. * If it's a RAID controller other than the HBA itself * (an external RAID controller, MSA500 or similar) * don't present it. */ if (!is_hba_lunid(lunaddrbytes)) break; ncurrent++; break; default: break; } if (ncurrent >= HPSA_MAX_DEVICES) break; } if (h->sas_host == NULL) { int rc = 0; rc = hpsa_add_sas_host(h); if (rc) { dev_warn(&h->pdev->dev, "Could not add sas host %d\n", rc); goto out; } } adjust_hpsa_scsi_table(h, currentsd, ncurrent); out: kfree(tmpdevice); for (i = 0; i < ndev_allocated; i++) kfree(currentsd[i]); kfree(currentsd); kfree(physdev_list); kfree(logdev_list); kfree(id_ctlr); kfree(id_phys); } static void hpsa_set_sg_descriptor(struct SGDescriptor *desc, struct scatterlist *sg) { u64 addr64 = (u64) sg_dma_address(sg); unsigned int len = sg_dma_len(sg); desc->Addr = cpu_to_le64(addr64); desc->Len = cpu_to_le32(len); desc->Ext = 0; } /* * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci * dma mapping and fills in the scatter gather entries of the * hpsa command, cp. */ static int hpsa_scatter_gather(struct ctlr_info *h, struct CommandList *cp, struct scsi_cmnd *cmd) { struct scatterlist *sg; int use_sg, i, sg_limit, chained; struct SGDescriptor *curr_sg; BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); use_sg = scsi_dma_map(cmd); if (use_sg < 0) return use_sg; if (!use_sg) goto sglist_finished; /* * If the number of entries is greater than the max for a single list, * then we have a chained list; we will set up all but one entry in the * first list (the last entry is saved for link information); * otherwise, we don't have a chained list and we'll set up at each of * the entries in the one list. */ curr_sg = cp->SG; chained = use_sg > h->max_cmd_sg_entries; sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg; scsi_for_each_sg(cmd, sg, sg_limit, i) { hpsa_set_sg_descriptor(curr_sg, sg); curr_sg++; } if (chained) { /* * Continue with the chained list. Set curr_sg to the chained * list. Modify the limit to the total count less the entries * we've already set up. Resume the scan at the list entry * where the previous loop left off. */ curr_sg = h->cmd_sg_list[cp->cmdindex]; sg_limit = use_sg - sg_limit; for_each_sg(sg, sg, sg_limit, i) { hpsa_set_sg_descriptor(curr_sg, sg); curr_sg++; } } /* Back the pointer up to the last entry and mark it as "last". */ (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST); if (use_sg + chained > h->maxSG) h->maxSG = use_sg + chained; if (chained) { cp->Header.SGList = h->max_cmd_sg_entries; cp->Header.SGTotal = cpu_to_le16(use_sg + 1); if (hpsa_map_sg_chain_block(h, cp)) { scsi_dma_unmap(cmd); return -1; } return 0; } sglist_finished: cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */ return 0; } static inline void warn_zero_length_transfer(struct ctlr_info *h, u8 *cdb, int cdb_len, const char *func) { dev_warn(&h->pdev->dev, "%s: Blocking zero-length request: CDB:%*phN\n", func, cdb_len, cdb); } #define IO_ACCEL_INELIGIBLE 1 /* zero-length transfers trigger hardware errors. */ static bool is_zero_length_transfer(u8 *cdb) { u32 block_cnt; /* Block zero-length transfer sizes on certain commands. */ switch (cdb[0]) { case READ_10: case WRITE_10: case VERIFY: /* 0x2F */ case WRITE_VERIFY: /* 0x2E */ block_cnt = get_unaligned_be16(&cdb[7]); break; case READ_12: case WRITE_12: case VERIFY_12: /* 0xAF */ case WRITE_VERIFY_12: /* 0xAE */ block_cnt = get_unaligned_be32(&cdb[6]); break; case READ_16: case WRITE_16: case VERIFY_16: /* 0x8F */ block_cnt = get_unaligned_be32(&cdb[10]); break; default: return false; } return block_cnt == 0; } static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) { int is_write = 0; u32 block; u32 block_cnt; /* Perform some CDB fixups if needed using 10 byte reads/writes only */ switch (cdb[0]) { case WRITE_6: case WRITE_12: is_write = 1; fallthrough; case READ_6: case READ_12: if (*cdb_len == 6) { block = (((cdb[1] & 0x1F) << 16) | (cdb[2] << 8) | cdb[3]); block_cnt = cdb[4]; if (block_cnt == 0) block_cnt = 256; } else { BUG_ON(*cdb_len != 12); block = get_unaligned_be32(&cdb[2]); block_cnt = get_unaligned_be32(&cdb[6]); } if (block_cnt > 0xffff) return IO_ACCEL_INELIGIBLE; cdb[0] = is_write ? WRITE_10 : READ_10; cdb[1] = 0; cdb[2] = (u8) (block >> 24); cdb[3] = (u8) (block >> 16); cdb[4] = (u8) (block >> 8); cdb[5] = (u8) (block); cdb[6] = 0; cdb[7] = (u8) (block_cnt >> 8); cdb[8] = (u8) (block_cnt); cdb[9] = 0; *cdb_len = 10; break; } return 0; } static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) { struct scsi_cmnd *cmd = c->scsi_cmd; struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; unsigned int len; unsigned int total_len = 0; struct scatterlist *sg; u64 addr64; int use_sg, i; struct SGDescriptor *curr_sg; u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; /* TODO: implement chaining support */ if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { atomic_dec(&phys_disk->ioaccel_cmds_out); return IO_ACCEL_INELIGIBLE; } BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); if (is_zero_length_transfer(cdb)) { warn_zero_length_transfer(h, cdb, cdb_len, __func__); atomic_dec(&phys_disk->ioaccel_cmds_out); return IO_ACCEL_INELIGIBLE; } if (fixup_ioaccel_cdb(cdb, &cdb_len)) { atomic_dec(&phys_disk->ioaccel_cmds_out); return IO_ACCEL_INELIGIBLE; } c->cmd_type = CMD_IOACCEL1; /* Adjust the DMA address to point to the accelerated command buffer */ c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + (c->cmdindex * sizeof(*cp)); BUG_ON(c->busaddr & 0x0000007F); use_sg = scsi_dma_map(cmd); if (use_sg < 0) { atomic_dec(&phys_disk->ioaccel_cmds_out); return use_sg; } if (use_sg) { curr_sg = cp->SG; scsi_for_each_sg(cmd, sg, use_sg, i) { addr64 = (u64) sg_dma_address(sg); len = sg_dma_len(sg); total_len += len; curr_sg->Addr = cpu_to_le64(addr64); curr_sg->Len = cpu_to_le32(len); curr_sg->Ext = cpu_to_le32(0); curr_sg++; } (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); switch (cmd->sc_data_direction) { case DMA_TO_DEVICE: control |= IOACCEL1_CONTROL_DATA_OUT; break; case DMA_FROM_DEVICE: control |= IOACCEL1_CONTROL_DATA_IN; break; case DMA_NONE: control |= IOACCEL1_CONTROL_NODATAXFER; break; default: dev_err(&h->pdev->dev, "unknown data direction: %d\n", cmd->sc_data_direction); BUG(); break; } } else { control |= IOACCEL1_CONTROL_NODATAXFER; } c->Header.SGList = use_sg; /* Fill out the command structure to submit */ cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF); cp->transfer_len = cpu_to_le32(total_len); cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ | (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK)); cp->control = cpu_to_le32(control); memcpy(cp->CDB, cdb, cdb_len); memcpy(cp->CISS_LUN, scsi3addr, 8); /* Tag was already set at init time. */ enqueue_cmd_and_start_io(h, c); return 0; } /* * Queue a command directly to a device behind the controller using the * I/O accelerator path. */ static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, struct CommandList *c) { struct scsi_cmnd *cmd = c->scsi_cmd; struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; if (!dev) return -1; c->phys_disk = dev; if (dev->in_reset) return -1; return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev); } /* * Set encryption parameters for the ioaccel2 request */ static void set_encrypt_ioaccel2(struct ctlr_info *h, struct CommandList *c, struct io_accel2_cmd *cp) { struct scsi_cmnd *cmd = c->scsi_cmd; struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; struct raid_map_data *map = &dev->raid_map; u64 first_block; /* Are we doing encryption on this device */ if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON)) return; /* Set the data encryption key index. */ cp->dekindex = map->dekindex; /* Set the encryption enable flag, encoded into direction field. */ cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK; /* Set encryption tweak values based on logical block address * If block size is 512, tweak value is LBA. * For other block sizes, tweak is (LBA * block size)/ 512) */ switch (cmd->cmnd[0]) { /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ case READ_6: case WRITE_6: first_block = (((cmd->cmnd[1] & 0x1F) << 16) | (cmd->cmnd[2] << 8) | cmd->cmnd[3]); break; case WRITE_10: case READ_10: /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ case WRITE_12: case READ_12: first_block = get_unaligned_be32(&cmd->cmnd[2]); break; case WRITE_16: case READ_16: first_block = get_unaligned_be64(&cmd->cmnd[2]); break; default: dev_err(&h->pdev->dev, "ERROR: %s: size (0x%x) not supported for encryption\n", __func__, cmd->cmnd[0]); BUG(); break; } if (le32_to_cpu(map->volume_blk_size) != 512) first_block = first_block * le32_to_cpu(map->volume_blk_size)/512; cp->tweak_lower = cpu_to_le32(first_block); cp->tweak_upper = cpu_to_le32(first_block >> 32); } static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) { struct scsi_cmnd *cmd = c->scsi_cmd; struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; struct ioaccel2_sg_element *curr_sg; int use_sg, i; struct scatterlist *sg; u64 addr64; u32 len; u32 total_len = 0; if (!cmd->device) return -1; if (!cmd->device->hostdata) return -1; BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); if (is_zero_length_transfer(cdb)) { warn_zero_length_transfer(h, cdb, cdb_len, __func__); atomic_dec(&phys_disk->ioaccel_cmds_out); return IO_ACCEL_INELIGIBLE; } if (fixup_ioaccel_cdb(cdb, &cdb_len)) { atomic_dec(&phys_disk->ioaccel_cmds_out); return IO_ACCEL_INELIGIBLE; } c->cmd_type = CMD_IOACCEL2; /* Adjust the DMA address to point to the accelerated command buffer */ c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + (c->cmdindex * sizeof(*cp)); BUG_ON(c->busaddr & 0x0000007F); memset(cp, 0, sizeof(*cp)); cp->IU_type = IOACCEL2_IU_TYPE; use_sg = scsi_dma_map(cmd); if (use_sg < 0) { atomic_dec(&phys_disk->ioaccel_cmds_out); return use_sg; } if (use_sg) { curr_sg = cp->sg; if (use_sg > h->ioaccel_maxsg) { addr64 = le64_to_cpu( h->ioaccel2_cmd_sg_list[c->cmdindex]->address); curr_sg->address = cpu_to_le64(addr64); curr_sg->length = 0; curr_sg->reserved[0] = 0; curr_sg->reserved[1] = 0; curr_sg->reserved[2] = 0; curr_sg->chain_indicator = IOACCEL2_CHAIN; curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex]; } scsi_for_each_sg(cmd, sg, use_sg, i) { addr64 = (u64) sg_dma_address(sg); len = sg_dma_len(sg); total_len += len; curr_sg->address = cpu_to_le64(addr64); curr_sg->length = cpu_to_le32(len); curr_sg->reserved[0] = 0; curr_sg->reserved[1] = 0; curr_sg->reserved[2] = 0; curr_sg->chain_indicator = 0; curr_sg++; } /* * Set the last s/g element bit */ (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG; switch (cmd->sc_data_direction) { case DMA_TO_DEVICE: cp->direction &= ~IOACCEL2_DIRECTION_MASK; cp->direction |= IOACCEL2_DIR_DATA_OUT; break; case DMA_FROM_DEVICE: cp->direction &= ~IOACCEL2_DIRECTION_MASK; cp->direction |= IOACCEL2_DIR_DATA_IN; break; case DMA_NONE: cp->direction &= ~IOACCEL2_DIRECTION_MASK; cp->direction |= IOACCEL2_DIR_NO_DATA; break; default: dev_err(&h->pdev->dev, "unknown data direction: %d\n", cmd->sc_data_direction); BUG(); break; } } else { cp->direction &= ~IOACCEL2_DIRECTION_MASK; cp->direction |= IOACCEL2_DIR_NO_DATA; } /* Set encryption parameters, if necessary */ set_encrypt_ioaccel2(h, c, cp); cp->scsi_nexus = cpu_to_le32(ioaccel_handle); cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT); memcpy(cp->cdb, cdb, sizeof(cp->cdb)); cp->data_len = cpu_to_le32(total_len); cp->err_ptr = cpu_to_le64(c->busaddr + offsetof(struct io_accel2_cmd, error_data)); cp->err_len = cpu_to_le32(sizeof(cp->error_data)); /* fill in sg elements */ if (use_sg > h->ioaccel_maxsg) { cp->sg_count = 1; cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0])); if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) { atomic_dec(&phys_disk->ioaccel_cmds_out); scsi_dma_unmap(cmd); return -1; } } else cp->sg_count = (u8) use_sg; if (phys_disk->in_reset) { cmd->result = DID_RESET << 16; return -1; } enqueue_cmd_and_start_io(h, c); return 0; } /* * Queue a command to the correct I/O accelerator path. */ static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) { if (!c->scsi_cmd->device) return -1; if (!c->scsi_cmd->device->hostdata) return -1; if (phys_disk->in_reset) return -1; /* Try to honor the device's queue depth */ if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) > phys_disk->queue_depth) { atomic_dec(&phys_disk->ioaccel_cmds_out); return IO_ACCEL_INELIGIBLE; } if (h->transMethod & CFGTBL_Trans_io_accel1) return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, cdb, cdb_len, scsi3addr, phys_disk); else return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, cdb, cdb_len, scsi3addr, phys_disk); } static void raid_map_helper(struct raid_map_data *map, int offload_to_mirror, u32 *map_index, u32 *current_group) { if (offload_to_mirror == 0) { /* use physical disk in the first mirrored group. */ *map_index %= le16_to_cpu(map->data_disks_per_row); return; } do { /* determine mirror group that *map_index indicates */ *current_group = *map_index / le16_to_cpu(map->data_disks_per_row); if (offload_to_mirror == *current_group) continue; if (*current_group < le16_to_cpu(map->layout_map_count) - 1) { /* select map index from next group */ *map_index += le16_to_cpu(map->data_disks_per_row); (*current_group)++; } else { /* select map index from first group */ *map_index %= le16_to_cpu(map->data_disks_per_row); *current_group = 0; } } while (offload_to_mirror != *current_group); } /* * Attempt to perform offload RAID mapping for a logical volume I/O. */ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, struct CommandList *c) { struct scsi_cmnd *cmd = c->scsi_cmd; struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; struct raid_map_data *map = &dev->raid_map; struct raid_map_disk_data *dd = &map->data[0]; int is_write = 0; u32 map_index; u64 first_block, last_block; u32 block_cnt; u32 blocks_per_row; u64 first_row, last_row; u32 first_row_offset, last_row_offset; u32 first_column, last_column; u64 r0_first_row, r0_last_row; u32 r5or6_blocks_per_row; u64 r5or6_first_row, r5or6_last_row; u32 r5or6_first_row_offset, r5or6_last_row_offset; u32 r5or6_first_column, r5or6_last_column; u32 total_disks_per_row; u32 stripesize; u32 first_group, last_group, current_group; u32 map_row; u32 disk_handle; u64 disk_block; u32 disk_block_cnt; u8 cdb[16]; u8 cdb_len; u16 strip_size; #if BITS_PER_LONG == 32 u64 tmpdiv; #endif int offload_to_mirror; if (!dev) return -1; if (dev->in_reset) return -1; /* check for valid opcode, get LBA and block count */ switch (cmd->cmnd[0]) { case WRITE_6: is_write = 1; fallthrough; case READ_6: first_block = (((cmd->cmnd[1] & 0x1F) << 16) | (cmd->cmnd[2] << 8) | cmd->cmnd[3]); block_cnt = cmd->cmnd[4]; if (block_cnt == 0) block_cnt = 256; break; case WRITE_10: is_write = 1; fallthrough; case READ_10: first_block = (((u64) cmd->cmnd[2]) << 24) | (((u64) cmd->cmnd[3]) << 16) | (((u64) cmd->cmnd[4]) << 8) | cmd->cmnd[5]; block_cnt = (((u32) cmd->cmnd[7]) << 8) | cmd->cmnd[8]; break; case WRITE_12: is_write = 1; fallthrough; case READ_12: first_block = (((u64) cmd->cmnd[2]) << 24) | (((u64) cmd->cmnd[3]) << 16) | (((u64) cmd->cmnd[4]) << 8) | cmd->cmnd[5]; block_cnt = (((u32) cmd->cmnd[6]) << 24) | (((u32) cmd->cmnd[7]) << 16) | (((u32) cmd->cmnd[8]) << 8) | cmd->cmnd[9]; break; case WRITE_16: is_write = 1; fallthrough; case READ_16: first_block = (((u64) cmd->cmnd[2]) << 56) | (((u64) cmd->cmnd[3]) << 48) | (((u64) cmd->cmnd[4]) << 40) | (((u64) cmd->cmnd[5]) << 32) | (((u64) cmd->cmnd[6]) << 24) | (((u64) cmd->cmnd[7]) << 16) | (((u64) cmd->cmnd[8]) << 8) | cmd->cmnd[9]; block_cnt = (((u32) cmd->cmnd[10]) << 24) | (((u32) cmd->cmnd[11]) << 16) | (((u32) cmd->cmnd[12]) << 8) | cmd->cmnd[13]; break; default: return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */ } last_block = first_block + block_cnt - 1; /* check for write to non-RAID-0 */ if (is_write && dev->raid_level != 0) return IO_ACCEL_INELIGIBLE; /* check for invalid block or wraparound */ if (last_block >= le64_to_cpu(map->volume_blk_cnt) || last_block < first_block) return IO_ACCEL_INELIGIBLE; /* calculate stripe information for the request */ blocks_per_row = le16_to_cpu(map->data_disks_per_row) * le16_to_cpu(map->strip_size); strip_size = le16_to_cpu(map->strip_size); #if BITS_PER_LONG == 32 tmpdiv = first_block; (void) do_div(tmpdiv, blocks_per_row); first_row = tmpdiv; tmpdiv = last_block; (void) do_div(tmpdiv, blocks_per_row); last_row = tmpdiv; first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); tmpdiv = first_row_offset; (void) do_div(tmpdiv, strip_size); first_column = tmpdiv; tmpdiv = last_row_offset; (void) do_div(tmpdiv, strip_size); last_column = tmpdiv; #else first_row = first_block / blocks_per_row; last_row = last_block / blocks_per_row; first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); first_column = first_row_offset / strip_size; last_column = last_row_offset / strip_size; #endif /* if this isn't a single row/column then give to the controller */ if ((first_row != last_row) || (first_column != last_column)) return IO_ACCEL_INELIGIBLE; /* proceeding with driver mapping */ total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + le16_to_cpu(map->metadata_disks_per_row); map_row = ((u32)(first_row >> map->parity_rotation_shift)) % le16_to_cpu(map->row_cnt); map_index = (map_row * total_disks_per_row) + first_column; switch (dev->raid_level) { case HPSA_RAID_0: break; /* nothing special to do */ case HPSA_RAID_1: /* Handles load balance across RAID 1 members. * (2-drive R1 and R10 with even # of drives.) * Appropriate for SSDs, not optimal for HDDs * Ensure we have the correct raid_map. */ if (le16_to_cpu(map->layout_map_count) != 2) { hpsa_turn_off_ioaccel_for_device(dev); return IO_ACCEL_INELIGIBLE; } if (dev->offload_to_mirror) map_index += le16_to_cpu(map->data_disks_per_row); dev->offload_to_mirror = !dev->offload_to_mirror; break; case HPSA_RAID_ADM: /* Handles N-way mirrors (R1-ADM) * and R10 with # of drives divisible by 3.) * Ensure we have the correct raid_map. */ if (le16_to_cpu(map->layout_map_count) != 3) { hpsa_turn_off_ioaccel_for_device(dev); return IO_ACCEL_INELIGIBLE; } offload_to_mirror = dev->offload_to_mirror; raid_map_helper(map, offload_to_mirror, &map_index, &current_group); /* set mirror group to use next time */ offload_to_mirror = (offload_to_mirror >= le16_to_cpu(map->layout_map_count) - 1) ? 0 : offload_to_mirror + 1; dev->offload_to_mirror = offload_to_mirror; /* Avoid direct use of dev->offload_to_mirror within this * function since multiple threads might simultaneously * increment it beyond the range of dev->layout_map_count -1. */ break; case HPSA_RAID_5: case HPSA_RAID_6: if (le16_to_cpu(map->layout_map_count) <= 1) break; /* Verify first and last block are in same RAID group */ r5or6_blocks_per_row = le16_to_cpu(map->strip_size) * le16_to_cpu(map->data_disks_per_row); if (r5or6_blocks_per_row == 0) { hpsa_turn_off_ioaccel_for_device(dev); return IO_ACCEL_INELIGIBLE; } stripesize = r5or6_blocks_per_row * le16_to_cpu(map->layout_map_count); #if BITS_PER_LONG == 32 tmpdiv = first_block; first_group = do_div(tmpdiv, stripesize); tmpdiv = first_group; (void) do_div(tmpdiv, r5or6_blocks_per_row); first_group = tmpdiv; tmpdiv = last_block; last_group = do_div(tmpdiv, stripesize); tmpdiv = last_group; (void) do_div(tmpdiv, r5or6_blocks_per_row); last_group = tmpdiv; #else first_group = (first_block % stripesize) / r5or6_blocks_per_row; last_group = (last_block % stripesize) / r5or6_blocks_per_row; #endif if (first_group != last_group) return IO_ACCEL_INELIGIBLE; /* Verify request is in a single row of RAID 5/6 */ #if BITS_PER_LONG == 32 tmpdiv = first_block; (void) do_div(tmpdiv, stripesize); first_row = r5or6_first_row = r0_first_row = tmpdiv; tmpdiv = last_block; (void) do_div(tmpdiv, stripesize); r5or6_last_row = r0_last_row = tmpdiv; #else first_row = r5or6_first_row = r0_first_row = first_block / stripesize; r5or6_last_row = r0_last_row = last_block / stripesize; #endif if (r5or6_first_row != r5or6_last_row) return IO_ACCEL_INELIGIBLE; /* Verify request is in a single column */ #if BITS_PER_LONG == 32 tmpdiv = first_block; first_row_offset = do_div(tmpdiv, stripesize); tmpdiv = first_row_offset; first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row); r5or6_first_row_offset = first_row_offset; tmpdiv = last_block; r5or6_last_row_offset = do_div(tmpdiv, stripesize); tmpdiv = r5or6_last_row_offset; r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); tmpdiv = r5or6_first_row_offset; (void) do_div(tmpdiv, map->strip_size); first_column = r5or6_first_column = tmpdiv; tmpdiv = r5or6_last_row_offset; (void) do_div(tmpdiv, map->strip_size); r5or6_last_column = tmpdiv; #else first_row_offset = r5or6_first_row_offset = (u32)((first_block % stripesize) % r5or6_blocks_per_row); r5or6_last_row_offset = (u32)((last_block % stripesize) % r5or6_blocks_per_row); first_column = r5or6_first_column = r5or6_first_row_offset / le16_to_cpu(map->strip_size); r5or6_last_column = r5or6_last_row_offset / le16_to_cpu(map->strip_size); #endif if (r5or6_first_column != r5or6_last_column) return IO_ACCEL_INELIGIBLE; /* Request is eligible */ map_row = ((u32)(first_row >> map->parity_rotation_shift)) % le16_to_cpu(map->row_cnt); map_index = (first_group * (le16_to_cpu(map->row_cnt) * total_disks_per_row)) + (map_row * total_disks_per_row) + first_column; break; default: return IO_ACCEL_INELIGIBLE; } if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES)) return IO_ACCEL_INELIGIBLE; c->phys_disk = dev->phys_disk[map_index]; if (!c->phys_disk) return IO_ACCEL_INELIGIBLE; disk_handle = dd[map_index].ioaccel_handle; disk_block = le64_to_cpu(map->disk_starting_blk) + first_row * le16_to_cpu(map->strip_size) + (first_row_offset - first_column * le16_to_cpu(map->strip_size)); disk_block_cnt = block_cnt; /* handle differing logical/physical block sizes */ if (map->phys_blk_shift) { disk_block <<= map->phys_blk_shift; disk_block_cnt <<= map->phys_blk_shift; } BUG_ON(disk_block_cnt > 0xffff); /* build the new CDB for the physical disk I/O */ if (disk_block > 0xffffffff) { cdb[0] = is_write ? WRITE_16 : READ_16; cdb[1] = 0; cdb[2] = (u8) (disk_block >> 56); cdb[3] = (u8) (disk_block >> 48); cdb[4] = (u8) (disk_block >> 40); cdb[5] = (u8) (disk_block >> 32); cdb[6] = (u8) (disk_block >> 24); cdb[7] = (u8) (disk_block >> 16); cdb[8] = (u8) (disk_block >> 8); cdb[9] = (u8) (disk_block); cdb[10] = (u8) (disk_block_cnt >> 24); cdb[11] = (u8) (disk_block_cnt >> 16); cdb[12] = (u8) (disk_block_cnt >> 8); cdb[13] = (u8) (disk_block_cnt); cdb[14] = 0; cdb[15] = 0; cdb_len = 16; } else { cdb[0] = is_write ? WRITE_10 : READ_10; cdb[1] = 0; cdb[2] = (u8) (disk_block >> 24); cdb[3] = (u8) (disk_block >> 16); cdb[4] = (u8) (disk_block >> 8); cdb[5] = (u8) (disk_block); cdb[6] = 0; cdb[7] = (u8) (disk_block_cnt >> 8); cdb[8] = (u8) (disk_block_cnt); cdb[9] = 0; cdb_len = 10; } return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, dev->scsi3addr, dev->phys_disk[map_index]); } /* * Submit commands down the "normal" RAID stack path * All callers to hpsa_ciss_submit must check lockup_detected * beforehand, before (opt.) and after calling cmd_alloc */ static int hpsa_ciss_submit(struct ctlr_info *h, struct CommandList *c, struct scsi_cmnd *cmd, struct hpsa_scsi_dev_t *dev) { cmd->host_scribble = (unsigned char *) c; c->cmd_type = CMD_SCSI; c->scsi_cmd = cmd; c->Header.ReplyQueue = 0; /* unused in simple mode */ memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8); c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT)); /* Fill in the request block... */ c->Request.Timeout = 0; BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); c->Request.CDBLen = cmd->cmd_len; memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); switch (cmd->sc_data_direction) { case DMA_TO_DEVICE: c->Request.type_attr_dir = TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE); break; case DMA_FROM_DEVICE: c->Request.type_attr_dir = TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ); break; case DMA_NONE: c->Request.type_attr_dir = TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE); break; case DMA_BIDIRECTIONAL: /* This can happen if a buggy application does a scsi passthru * and sets both inlen and outlen to non-zero. ( see * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) */ c->Request.type_attr_dir = TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD); /* This is technically wrong, and hpsa controllers should * reject it with CMD_INVALID, which is the most correct * response, but non-fibre backends appear to let it * slide by, and give the same results as if this field * were set correctly. Either way is acceptable for * our purposes here. */ break; default: dev_err(&h->pdev->dev, "unknown data direction: %d\n", cmd->sc_data_direction); BUG(); break; } if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ hpsa_cmd_resolve_and_free(h, c); return SCSI_MLQUEUE_HOST_BUSY; } if (dev->in_reset) { hpsa_cmd_resolve_and_free(h, c); return SCSI_MLQUEUE_HOST_BUSY; } c->device = dev; enqueue_cmd_and_start_io(h, c); /* the cmd'll come back via intr handler in complete_scsi_command() */ return 0; } static void hpsa_cmd_init(struct ctlr_info *h, int index, struct CommandList *c) { dma_addr_t cmd_dma_handle, err_dma_handle; /* Zero out all of commandlist except the last field, refcount */ memset(c, 0, offsetof(struct CommandList, refcount)); c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT)); cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); c->err_info = h->errinfo_pool + index; memset(c->err_info, 0, sizeof(*c->err_info)); err_dma_handle = h->errinfo_pool_dhandle + index * sizeof(*c->err_info); c->cmdindex = index; c->busaddr = (u32) cmd_dma_handle; c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle); c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info)); c->h = h; c->scsi_cmd = SCSI_CMD_IDLE; } static void hpsa_preinitialize_commands(struct ctlr_info *h) { int i; for (i = 0; i < h->nr_cmds; i++) { struct CommandList *c = h->cmd_pool + i; hpsa_cmd_init(h, i, c); atomic_set(&c->refcount, 0); } } static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index, struct CommandList *c) { dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); BUG_ON(c->cmdindex != index); memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); memset(c->err_info, 0, sizeof(*c->err_info)); c->busaddr = (u32) cmd_dma_handle; } static int hpsa_ioaccel_submit(struct ctlr_info *h, struct CommandList *c, struct scsi_cmnd *cmd, bool retry) { struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; int rc = IO_ACCEL_INELIGIBLE; if (!dev) return SCSI_MLQUEUE_HOST_BUSY; if (dev->in_reset) return SCSI_MLQUEUE_HOST_BUSY; if (hpsa_simple_mode) return IO_ACCEL_INELIGIBLE; cmd->host_scribble = (unsigned char *) c; if (dev->offload_enabled) { hpsa_cmd_init(h, c->cmdindex, c); /* Zeroes out all fields */ c->cmd_type = CMD_SCSI; c->scsi_cmd = cmd; c->device = dev; if (retry) /* Resubmit but do not increment device->commands_outstanding. */ c->retry_pending = true; rc = hpsa_scsi_ioaccel_raid_map(h, c); if (rc < 0) /* scsi_dma_map failed. */ rc = SCSI_MLQUEUE_HOST_BUSY; } else if (dev->hba_ioaccel_enabled) { hpsa_cmd_init(h, c->cmdindex, c); /* Zeroes out all fields */ c->cmd_type = CMD_SCSI; c->scsi_cmd = cmd; c->device = dev; if (retry) /* Resubmit but do not increment device->commands_outstanding. */ c->retry_pending = true; rc = hpsa_scsi_ioaccel_direct_map(h, c); if (rc < 0) /* scsi_dma_map failed. */ rc = SCSI_MLQUEUE_HOST_BUSY; } return rc; } static void hpsa_command_resubmit_worker(struct work_struct *work) { struct scsi_cmnd *cmd; struct hpsa_scsi_dev_t *dev; struct CommandList *c = container_of(work, struct CommandList, work); cmd = c->scsi_cmd; dev = cmd->device->hostdata; if (!dev) { cmd->result = DID_NO_CONNECT << 16; return hpsa_cmd_free_and_done(c->h, c, cmd); } if (dev->in_reset) { cmd->result = DID_RESET << 16; return hpsa_cmd_free_and_done(c->h, c, cmd); } if (c->cmd_type == CMD_IOACCEL2) { struct ctlr_info *h = c->h; struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; int rc; if (c2->error_data.serv_response == IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) { /* Resubmit with the retry_pending flag set. */ rc = hpsa_ioaccel_submit(h, c, cmd, true); if (rc == 0) return; if (rc == SCSI_MLQUEUE_HOST_BUSY) { /* * If we get here, it means dma mapping failed. * Try again via scsi mid layer, which will * then get SCSI_MLQUEUE_HOST_BUSY. */ cmd->result = DID_IMM_RETRY << 16; return hpsa_cmd_free_and_done(h, c, cmd); } /* else, fall thru and resubmit down CISS path */ } } hpsa_cmd_partial_init(c->h, c->cmdindex, c); /* * Here we have not come in though queue_command, so we * can set the retry_pending flag to true for a driver initiated * retry attempt (I.E. not a SML retry). * I.E. We are submitting a driver initiated retry. * Note: hpsa_ciss_submit does not zero out the command fields like * ioaccel submit does. */ c->retry_pending = true; if (hpsa_ciss_submit(c->h, c, cmd, dev)) { /* * If we get here, it means dma mapping failed. Try * again via scsi mid layer, which will then get * SCSI_MLQUEUE_HOST_BUSY. * * hpsa_ciss_submit will have already freed c * if it encountered a dma mapping failure. */ cmd->result = DID_IMM_RETRY << 16; scsi_done(cmd); } } /* Running in struct Scsi_Host->host_lock less mode */ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) { struct ctlr_info *h; struct hpsa_scsi_dev_t *dev; struct CommandList *c; int rc = 0; /* Get the ptr to our adapter structure out of cmd->host. */ h = sdev_to_hba(cmd->device); BUG_ON(scsi_cmd_to_rq(cmd)->tag < 0); dev = cmd->device->hostdata; if (!dev) { cmd->result = DID_NO_CONNECT << 16; scsi_done(cmd); return 0; } if (dev->removed) { cmd->result = DID_NO_CONNECT << 16; scsi_done(cmd); return 0; } if (unlikely(lockup_detected(h))) { cmd->result = DID_NO_CONNECT << 16; scsi_done(cmd); return 0; } if (dev->in_reset) return SCSI_MLQUEUE_DEVICE_BUSY; c = cmd_tagged_alloc(h, cmd); if (c == NULL) return SCSI_MLQUEUE_DEVICE_BUSY; /* * This is necessary because the SML doesn't zero out this field during * error recovery. */ cmd->result = 0; /* * Call alternate submit routine for I/O accelerated commands. * Retries always go down the normal I/O path. * Note: If cmd->retries is non-zero, then this is a SML * initiated retry and not a driver initiated retry. * This command has been obtained from cmd_tagged_alloc * and is therefore a brand-new command. */ if (likely(cmd->retries == 0 && !blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) && h->acciopath_status)) { /* Submit with the retry_pending flag unset. */ rc = hpsa_ioaccel_submit(h, c, cmd, false); if (rc == 0) return 0; if (rc == SCSI_MLQUEUE_HOST_BUSY) { hpsa_cmd_resolve_and_free(h, c); return SCSI_MLQUEUE_HOST_BUSY; } } return hpsa_ciss_submit(h, c, cmd, dev); } static void hpsa_scan_complete(struct ctlr_info *h) { unsigned long flags; spin_lock_irqsave(&h->scan_lock, flags); h->scan_finished = 1; wake_up(&h->scan_wait_queue); spin_unlock_irqrestore(&h->scan_lock, flags); } static void hpsa_scan_start(struct Scsi_Host *sh) { struct ctlr_info *h = shost_to_hba(sh); unsigned long flags; /* * Don't let rescans be initiated on a controller known to be locked * up. If the controller locks up *during* a rescan, that thread is * probably hosed, but at least we can prevent new rescan threads from * piling up on a locked up controller. */ if (unlikely(lockup_detected(h))) return hpsa_scan_complete(h); /* * If a scan is already waiting to run, no need to add another */ spin_lock_irqsave(&h->scan_lock, flags); if (h->scan_waiting) { spin_unlock_irqrestore(&h->scan_lock, flags); return; } spin_unlock_irqrestore(&h->scan_lock, flags); /* wait until any scan already in progress is finished. */ while (1) { spin_lock_irqsave(&h->scan_lock, flags); if (h->scan_finished) break; h->scan_waiting = 1; spin_unlock_irqrestore(&h->scan_lock, flags); wait_event(h->scan_wait_queue, h->scan_finished); /* Note: We don't need to worry about a race between this * thread and driver unload because the midlayer will * have incremented the reference count, so unload won't * happen if we're in here. */ } h->scan_finished = 0; /* mark scan as in progress */ h->scan_waiting = 0; spin_unlock_irqrestore(&h->scan_lock, flags); if (unlikely(lockup_detected(h))) return hpsa_scan_complete(h); /* * Do the scan after a reset completion */ spin_lock_irqsave(&h->reset_lock, flags); if (h->reset_in_progress) { h->drv_req_rescan = 1; spin_unlock_irqrestore(&h->reset_lock, flags); hpsa_scan_complete(h); return; } spin_unlock_irqrestore(&h->reset_lock, flags); hpsa_update_scsi_devices(h); hpsa_scan_complete(h); } static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth) { struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata; if (!logical_drive) return -ENODEV; if (qdepth < 1) qdepth = 1; else if (qdepth > logical_drive->queue_depth) qdepth = logical_drive->queue_depth; return scsi_change_queue_depth(sdev, qdepth); } static int hpsa_scan_finished(struct Scsi_Host *sh, unsigned long elapsed_time) { struct ctlr_info *h = shost_to_hba(sh); unsigned long flags; int finished; spin_lock_irqsave(&h->scan_lock, flags); finished = h->scan_finished; spin_unlock_irqrestore(&h->scan_lock, flags); return finished; } static int hpsa_scsi_host_alloc(struct ctlr_info *h) { struct Scsi_Host *sh; sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info)); if (sh == NULL) { dev_err(&h->pdev->dev, "scsi_host_alloc failed\n"); return -ENOMEM; } sh->io_port = 0; sh->n_io_port = 0; sh->this_id = -1; sh->max_channel = 3; sh->max_cmd_len = MAX_COMMAND_SIZE; sh->max_lun = HPSA_MAX_LUN; sh->max_id = HPSA_MAX_LUN; sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS; sh->cmd_per_lun = sh->can_queue; sh->sg_tablesize = h->maxsgentries; sh->transportt = hpsa_sas_transport_template; sh->hostdata[0] = (unsigned long) h; sh->irq = pci_irq_vector(h->pdev, 0); sh->unique_id = sh->irq; h->scsi_host = sh; return 0; } static int hpsa_scsi_add_host(struct ctlr_info *h) { int rv; rv = scsi_add_host(h->scsi_host, &h->pdev->dev); if (rv) { dev_err(&h->pdev->dev, "scsi_add_host failed\n"); return rv; } scsi_scan_host(h->scsi_host); return 0; } /* * The block layer has already gone to the trouble of picking out a unique, * small-integer tag for this request. We use an offset from that value as * an index to select our command block. (The offset allows us to reserve the * low-numbered entries for our own uses.) */ static int hpsa_get_cmd_index(struct scsi_cmnd *scmd) { int idx = scsi_cmd_to_rq(scmd)->tag; if (idx < 0) return idx; /* Offset to leave space for internal cmds. */ return idx += HPSA_NRESERVED_CMDS; } /* * Send a TEST_UNIT_READY command to the specified LUN using the specified * reply queue; returns zero if the unit is ready, and non-zero otherwise. */ static int hpsa_send_test_unit_ready(struct ctlr_info *h, struct CommandList *c, unsigned char lunaddr[], int reply_queue) { int rc; /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD); rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); if (rc) return rc; /* no unmap needed here because no data xfer. */ /* Check if the unit is already ready. */ if (c->err_info->CommandStatus == CMD_SUCCESS) return 0; /* * The first command sent after reset will receive "unit attention" to * indicate that the LUN has been reset...this is actually what we're * looking for (but, success is good too). */ if (c->err_info->CommandStatus == CMD_TARGET_STATUS && c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && (c->err_info->SenseInfo[2] == NO_SENSE || c->err_info->SenseInfo[2] == UNIT_ATTENTION)) return 0; return 1; } /* * Wait for a TEST_UNIT_READY command to complete, retrying as necessary; * returns zero when the unit is ready, and non-zero when giving up. */ static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h, struct CommandList *c, unsigned char lunaddr[], int reply_queue) { int rc; int count = 0; int waittime = 1; /* seconds */ /* Send test unit ready until device ready, or give up. */ for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) { /* * Wait for a bit. do this first, because if we send * the TUR right away, the reset will just abort it. */ msleep(1000 * waittime); rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue); if (!rc) break; /* Increase wait time with each try, up to a point. */ if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) waittime *= 2; dev_warn(&h->pdev->dev, "waiting %d secs for device to become ready.\n", waittime); } return rc; } static int wait_for_device_to_become_ready(struct ctlr_info *h, unsigned char lunaddr[], int reply_queue) { int first_queue; int last_queue; int rq; int rc = 0; struct CommandList *c; c = cmd_alloc(h); /* * If no specific reply queue was requested, then send the TUR * repeatedly, requesting a reply on each reply queue; otherwise execute * the loop exactly once using only the specified queue. */ if (reply_queue == DEFAULT_REPLY_QUEUE) { first_queue = 0; last_queue = h->nreply_queues - 1; } else { first_queue = reply_queue; last_queue = reply_queue; } for (rq = first_queue; rq <= last_queue; rq++) { rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq); if (rc) break; } if (rc) dev_warn(&h->pdev->dev, "giving up on device.\n"); else dev_warn(&h->pdev->dev, "device is ready.\n"); cmd_free(h, c); return rc; } /* Need at least one of these error handlers to keep ../scsi/hosts.c from * complaining. Doing a host- or bus-reset can't do anything good here. */ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) { int rc = SUCCESS; int i; struct ctlr_info *h; struct hpsa_scsi_dev_t *dev = NULL; u8 reset_type; char msg[48]; unsigned long flags; /* find the controller to which the command to be aborted was sent */ h = sdev_to_hba(scsicmd->device); if (h == NULL) /* paranoia */ return FAILED; spin_lock_irqsave(&h->reset_lock, flags); h->reset_in_progress = 1; spin_unlock_irqrestore(&h->reset_lock, flags); if (lockup_detected(h)) { rc = FAILED; goto return_reset_status; } dev = scsicmd->device->hostdata; if (!dev) { dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__); rc = FAILED; goto return_reset_status; } if (dev->devtype == TYPE_ENCLOSURE) { rc = SUCCESS; goto return_reset_status; } /* if controller locked up, we can guarantee command won't complete */ if (lockup_detected(h)) { snprintf(msg, sizeof(msg), "cmd %d RESET FAILED, lockup detected", hpsa_get_cmd_index(scsicmd)); hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); rc = FAILED; goto return_reset_status; } /* this reset request might be the result of a lockup; check */ if (detect_controller_lockup(h)) { snprintf(msg, sizeof(msg), "cmd %d RESET FAILED, new lockup detected", hpsa_get_cmd_index(scsicmd)); hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); rc = FAILED; goto return_reset_status; } /* Do not attempt on controller */ if (is_hba_lunid(dev->scsi3addr)) { rc = SUCCESS; goto return_reset_status; } if (is_logical_dev_addr_mode(dev->scsi3addr)) reset_type = HPSA_DEVICE_RESET_MSG; else reset_type = HPSA_PHYS_TARGET_RESET; sprintf(msg, "resetting %s", reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical "); hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); /* * wait to see if any commands will complete before sending reset */ dev->in_reset = true; /* block any new cmds from OS for this device */ for (i = 0; i < 10; i++) { if (atomic_read(&dev->commands_outstanding) > 0) msleep(1000); else break; } /* send a reset to the SCSI LUN which the command was sent to */ rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE); if (rc == 0) rc = SUCCESS; else rc = FAILED; sprintf(msg, "reset %s %s", reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ", rc == SUCCESS ? "completed successfully" : "failed"); hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); return_reset_status: spin_lock_irqsave(&h->reset_lock, flags); h->reset_in_progress = 0; if (dev) dev->in_reset = false; spin_unlock_irqrestore(&h->reset_lock, flags); return rc; } /* * For operations with an associated SCSI command, a command block is allocated * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the * block request tag as an index into a table of entries. cmd_tagged_free() is * the complement, although cmd_free() may be called instead. * This function is only called for new requests from queue_command. */ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, struct scsi_cmnd *scmd) { int idx = hpsa_get_cmd_index(scmd); struct CommandList *c = h->cmd_pool + idx; if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) { dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n", idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1); /* The index value comes from the block layer, so if it's out of * bounds, it's probably not our bug. */ BUG(); } if (unlikely(!hpsa_is_cmd_idle(c))) { /* * We expect that the SCSI layer will hand us a unique tag * value. Thus, there should never be a collision here between * two requests...because if the selected command isn't idle * then someone is going to be very disappointed. */ if (idx != h->last_collision_tag) { /* Print once per tag */ dev_warn(&h->pdev->dev, "%s: tag collision (tag=%d)\n", __func__, idx); if (scmd) scsi_print_command(scmd); h->last_collision_tag = idx; } return NULL; } atomic_inc(&c->refcount); hpsa_cmd_partial_init(h, idx, c); /* * This is a new command obtained from queue_command so * there have not been any driver initiated retry attempts. */ c->retry_pending = false; return c; } static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c) { /* * Release our reference to the block. We don't need to do anything * else to free it, because it is accessed by index. */ (void)atomic_dec(&c->refcount); } /* * For operations that cannot sleep, a command block is allocated at init, * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track * which ones are free or in use. Lock must be held when calling this. * cmd_free() is the complement. * This function never gives up and returns NULL. If it hangs, * another thread must call cmd_free() to free some tags. */ static struct CommandList *cmd_alloc(struct ctlr_info *h) { struct CommandList *c; int refcount, i; int offset = 0; /* * There is some *extremely* small but non-zero chance that that * multiple threads could get in here, and one thread could * be scanning through the list of bits looking for a free * one, but the free ones are always behind him, and other * threads sneak in behind him and eat them before he can * get to them, so that while there is always a free one, a * very unlucky thread might be starved anyway, never able to * beat the other threads. In reality, this happens so * infrequently as to be indistinguishable from never. * * Note that we start allocating commands before the SCSI host structure * is initialized. Since the search starts at bit zero, this * all works, since we have at least one command structure available; * however, it means that the structures with the low indexes have to be * reserved for driver-initiated requests, while requests from the block * layer will use the higher indexes. */ for (;;) { i = find_next_zero_bit(h->cmd_pool_bits, HPSA_NRESERVED_CMDS, offset); if (unlikely(i >= HPSA_NRESERVED_CMDS)) { offset = 0; continue; } c = h->cmd_pool + i; refcount = atomic_inc_return(&c->refcount); if (unlikely(refcount > 1)) { cmd_free(h, c); /* already in use */ offset = (i + 1) % HPSA_NRESERVED_CMDS; continue; } set_bit(i, h->cmd_pool_bits); break; /* it's ours now. */ } hpsa_cmd_partial_init(h, i, c); c->device = NULL; /* * cmd_alloc is for "internal" commands and they are never * retried. */ c->retry_pending = false; return c; } /* * This is the complementary operation to cmd_alloc(). Note, however, in some * corner cases it may also be used to free blocks allocated by * cmd_tagged_alloc() in which case the ref-count decrement does the trick and * the clear-bit is harmless. */ static void cmd_free(struct ctlr_info *h, struct CommandList *c) { if (atomic_dec_and_test(&c->refcount)) { int i; i = c - h->cmd_pool; clear_bit(i, h->cmd_pool_bits); } } #ifdef CONFIG_COMPAT static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd, void __user *arg) { struct ctlr_info *h = sdev_to_hba(dev); IOCTL32_Command_struct __user *arg32 = arg; IOCTL_Command_struct arg64; int err; u32 cp; if (!arg) return -EINVAL; memset(&arg64, 0, sizeof(arg64)); if (copy_from_user(&arg64, arg32, offsetof(IOCTL_Command_struct, buf))) return -EFAULT; if (get_user(cp, &arg32->buf)) return -EFAULT; arg64.buf = compat_ptr(cp); if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) return -EAGAIN; err = hpsa_passthru_ioctl(h, &arg64); atomic_inc(&h->passthru_cmds_avail); if (err) return err; if (copy_to_user(&arg32->error_info, &arg64.error_info, sizeof(arg32->error_info))) return -EFAULT; return 0; } static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, unsigned int cmd, void __user *arg) { struct ctlr_info *h = sdev_to_hba(dev); BIG_IOCTL32_Command_struct __user *arg32 = arg; BIG_IOCTL_Command_struct arg64; int err; u32 cp; if (!arg) return -EINVAL; memset(&arg64, 0, sizeof(arg64)); if (copy_from_user(&arg64, arg32, offsetof(BIG_IOCTL32_Command_struct, buf))) return -EFAULT; if (get_user(cp, &arg32->buf)) return -EFAULT; arg64.buf = compat_ptr(cp); if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) return -EAGAIN; err = hpsa_big_passthru_ioctl(h, &arg64); atomic_inc(&h->passthru_cmds_avail); if (err) return err; if (copy_to_user(&arg32->error_info, &arg64.error_info, sizeof(arg32->error_info))) return -EFAULT; return 0; } static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd, void __user *arg) { switch (cmd) { case CCISS_GETPCIINFO: case CCISS_GETINTINFO: case CCISS_SETINTINFO: case CCISS_GETNODENAME: case CCISS_SETNODENAME: case CCISS_GETHEARTBEAT: case CCISS_GETBUSTYPES: case CCISS_GETFIRMVER: case CCISS_GETDRIVVER: case CCISS_REVALIDVOLS: case CCISS_DEREGDISK: case CCISS_REGNEWDISK: case CCISS_REGNEWD: case CCISS_RESCANDISK: case CCISS_GETLUNINFO: return hpsa_ioctl(dev, cmd, arg); case CCISS_PASSTHRU32: return hpsa_ioctl32_passthru(dev, cmd, arg); case CCISS_BIG_PASSTHRU32: return hpsa_ioctl32_big_passthru(dev, cmd, arg); default: return -ENOIOCTLCMD; } } #endif static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) { struct hpsa_pci_info pciinfo; if (!argp) return -EINVAL; pciinfo.domain = pci_domain_nr(h->pdev->bus); pciinfo.bus = h->pdev->bus->number; pciinfo.dev_fn = h->pdev->devfn; pciinfo.board_id = h->board_id; if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) return -EFAULT; return 0; } static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) { DriverVer_type DriverVer; unsigned char vmaj, vmin, vsubmin; int rc; rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", &vmaj, &vmin, &vsubmin); if (rc != 3) { dev_info(&h->pdev->dev, "driver version string '%s' " "unrecognized.", HPSA_DRIVER_VERSION); vmaj = 0; vmin = 0; vsubmin = 0; } DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; if (!argp) return -EINVAL; if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) return -EFAULT; return 0; } static int hpsa_passthru_ioctl(struct ctlr_info *h, IOCTL_Command_struct *iocommand) { struct CommandList *c; char *buff = NULL; u64 temp64; int rc = 0; if (!capable(CAP_SYS_RAWIO)) return -EPERM; if ((iocommand->buf_size < 1) && (iocommand->Request.Type.Direction != XFER_NONE)) { return -EINVAL; } if (iocommand->buf_size > 0) { buff = kmalloc(iocommand->buf_size, GFP_KERNEL); if (buff == NULL) return -ENOMEM; if (iocommand->Request.Type.Direction & XFER_WRITE) { /* Copy the data into the buffer we created */ if (copy_from_user(buff, iocommand->buf, iocommand->buf_size)) { rc = -EFAULT; goto out_kfree; } } else { memset(buff, 0, iocommand->buf_size); } } c = cmd_alloc(h); /* Fill in the command type */ c->cmd_type = CMD_IOCTL_PEND; c->scsi_cmd = SCSI_CMD_BUSY; /* Fill in Command Header */ c->Header.ReplyQueue = 0; /* unused in simple mode */ if (iocommand->buf_size > 0) { /* buffer to fill */ c->Header.SGList = 1; c->Header.SGTotal = cpu_to_le16(1); } else { /* no buffers to fill */ c->Header.SGList = 0; c->Header.SGTotal = cpu_to_le16(0); } memcpy(&c->Header.LUN, &iocommand->LUN_info, sizeof(c->Header.LUN)); /* Fill in Request block */ memcpy(&c->Request, &iocommand->Request, sizeof(c->Request)); /* Fill in the scatter gather information */ if (iocommand->buf_size > 0) { temp64 = dma_map_single(&h->pdev->dev, buff, iocommand->buf_size, DMA_BIDIRECTIONAL); if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { c->SG[0].Addr = cpu_to_le64(0); c->SG[0].Len = cpu_to_le32(0); rc = -ENOMEM; goto out; } c->SG[0].Addr = cpu_to_le64(temp64); c->SG[0].Len = cpu_to_le32(iocommand->buf_size); c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ } rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); if (iocommand->buf_size > 0) hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); if (rc) { rc = -EIO; goto out; } /* Copy the error information out */ memcpy(&iocommand->error_info, c->err_info, sizeof(iocommand->error_info)); if ((iocommand->Request.Type.Direction & XFER_READ) && iocommand->buf_size > 0) { /* Copy the data out of the buffer we created */ if (copy_to_user(iocommand->buf, buff, iocommand->buf_size)) { rc = -EFAULT; goto out; } } out: cmd_free(h, c); out_kfree: kfree(buff); return rc; } static int hpsa_big_passthru_ioctl(struct ctlr_info *h, BIG_IOCTL_Command_struct *ioc) { struct CommandList *c; unsigned char **buff = NULL; int *buff_size = NULL; u64 temp64; BYTE sg_used = 0; int status = 0; u32 left; u32 sz; BYTE __user *data_ptr; if (!capable(CAP_SYS_RAWIO)) return -EPERM; if ((ioc->buf_size < 1) && (ioc->Request.Type.Direction != XFER_NONE)) return -EINVAL; /* Check kmalloc limits using all SGs */ if (ioc->malloc_size > MAX_KMALLOC_SIZE) return -EINVAL; if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) return -EINVAL; buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL); if (!buff) { status = -ENOMEM; goto cleanup1; } buff_size = kmalloc_array(SG_ENTRIES_IN_CMD, sizeof(int), GFP_KERNEL); if (!buff_size) { status = -ENOMEM; goto cleanup1; } left = ioc->buf_size; data_ptr = ioc->buf; while (left) { sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; buff_size[sg_used] = sz; buff[sg_used] = kmalloc(sz, GFP_KERNEL); if (buff[sg_used] == NULL) { status = -ENOMEM; goto cleanup1; } if (ioc->Request.Type.Direction & XFER_WRITE) { if (copy_from_user(buff[sg_used], data_ptr, sz)) { status = -EFAULT; goto cleanup1; } } else memset(buff[sg_used], 0, sz); left -= sz; data_ptr += sz; sg_used++; } c = cmd_alloc(h); c->cmd_type = CMD_IOCTL_PEND; c->scsi_cmd = SCSI_CMD_BUSY; c->Header.ReplyQueue = 0; c->Header.SGList = (u8) sg_used; c->Header.SGTotal = cpu_to_le16(sg_used); memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); if (ioc->buf_size > 0) { int i; for (i = 0; i < sg_used; i++) { temp64 = dma_map_single(&h->pdev->dev, buff[i], buff_size[i], DMA_BIDIRECTIONAL); if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { c->SG[i].Addr = cpu_to_le64(0); c->SG[i].Len = cpu_to_le32(0); hpsa_pci_unmap(h->pdev, c, i, DMA_BIDIRECTIONAL); status = -ENOMEM; goto cleanup0; } c->SG[i].Addr = cpu_to_le64(temp64); c->SG[i].Len = cpu_to_le32(buff_size[i]); c->SG[i].Ext = cpu_to_le32(0); } c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); } status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); if (sg_used) hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); if (status) { status = -EIO; goto cleanup0; } /* Copy the error information out */ memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { int i; /* Copy the data out of the buffer we created */ BYTE __user *ptr = ioc->buf; for (i = 0; i < sg_used; i++) { if (copy_to_user(ptr, buff[i], buff_size[i])) { status = -EFAULT; goto cleanup0; } ptr += buff_size[i]; } } status = 0; cleanup0: cmd_free(h, c); cleanup1: if (buff) { int i; for (i = 0; i < sg_used; i++) kfree(buff[i]); kfree(buff); } kfree(buff_size); return status; } static void check_ioctl_unit_attention(struct ctlr_info *h, struct CommandList *c) { if (c->err_info->CommandStatus == CMD_TARGET_STATUS && c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) (void) check_for_unit_attention(h, c); } /* * ioctl */ static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, void __user *argp) { struct ctlr_info *h = sdev_to_hba(dev); int rc; switch (cmd) { case CCISS_DEREGDISK: case CCISS_REGNEWDISK: case CCISS_REGNEWD: hpsa_scan_start(h->scsi_host); return 0; case CCISS_GETPCIINFO: return hpsa_getpciinfo_ioctl(h, argp); case CCISS_GETDRIVVER: return hpsa_getdrivver_ioctl(h, argp); case CCISS_PASSTHRU: { IOCTL_Command_struct iocommand; if (!argp) return -EINVAL; if (copy_from_user(&iocommand, argp, sizeof(iocommand))) return -EFAULT; if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) return -EAGAIN; rc = hpsa_passthru_ioctl(h, &iocommand); atomic_inc(&h->passthru_cmds_avail); if (!rc && copy_to_user(argp, &iocommand, sizeof(iocommand))) rc = -EFAULT; return rc; } case CCISS_BIG_PASSTHRU: { BIG_IOCTL_Command_struct ioc; if (!argp) return -EINVAL; if (copy_from_user(&ioc, argp, sizeof(ioc))) return -EFAULT; if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) return -EAGAIN; rc = hpsa_big_passthru_ioctl(h, &ioc); atomic_inc(&h->passthru_cmds_avail); if (!rc && copy_to_user(argp, &ioc, sizeof(ioc))) rc = -EFAULT; return rc; } default: return -ENOTTY; } } static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type) { struct CommandList *c; c = cmd_alloc(h); /* fill_cmd can't fail here, no data buffer to map */ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, RAID_CTLR_LUNID, TYPE_MSG); c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ c->waiting = NULL; enqueue_cmd_and_start_io(h, c); /* Don't wait for completion, the reset won't complete. Don't free * the command either. This is the last command we will send before * re-initializing everything, so it doesn't matter and won't leak. */ return; } static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, int cmd_type) { enum dma_data_direction dir = DMA_NONE; c->cmd_type = CMD_IOCTL_PEND; c->scsi_cmd = SCSI_CMD_BUSY; c->Header.ReplyQueue = 0; if (buff != NULL && size > 0) { c->Header.SGList = 1; c->Header.SGTotal = cpu_to_le16(1); } else { c->Header.SGList = 0; c->Header.SGTotal = cpu_to_le16(0); } memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); if (cmd_type == TYPE_CMD) { switch (cmd) { case HPSA_INQUIRY: /* are we trying to read a vital product page */ if (page_code & VPD_PAGE) { c->Request.CDB[1] = 0x01; c->Request.CDB[2] = (page_code & 0xff); } c->Request.CDBLen = 6; c->Request.type_attr_dir = TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); c->Request.Timeout = 0; c->Request.CDB[0] = HPSA_INQUIRY; c->Request.CDB[4] = size & 0xFF; break; case RECEIVE_DIAGNOSTIC: c->Request.CDBLen = 6; c->Request.type_attr_dir = TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); c->Request.Timeout = 0; c->Request.CDB[0] = cmd; c->Request.CDB[1] = 1; c->Request.CDB[2] = 1; c->Request.CDB[3] = (size >> 8) & 0xFF; c->Request.CDB[4] = size & 0xFF; break; case HPSA_REPORT_LOG: case HPSA_REPORT_PHYS: /* Talking to controller so It's a physical command mode = 00 target = 0. Nothing to write. */ c->Request.CDBLen = 12; c->Request.type_attr_dir = TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); c->Request.Timeout = 0; c->Request.CDB[0] = cmd; c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ c->Request.CDB[7] = (size >> 16) & 0xFF; c->Request.CDB[8] = (size >> 8) & 0xFF; c->Request.CDB[9] = size & 0xFF; break; case BMIC_SENSE_DIAG_OPTIONS: c->Request.CDBLen = 16; c->Request.type_attr_dir = TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); c->Request.Timeout = 0; /* Spec says this should be BMIC_WRITE */ c->Request.CDB[0] = BMIC_READ; c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS; break; case BMIC_SET_DIAG_OPTIONS: c->Request.CDBLen = 16; c->Request.type_attr_dir = TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_WRITE); c->Request.Timeout = 0; c->Request.CDB[0] = BMIC_WRITE; c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS; break; case HPSA_CACHE_FLUSH: c->Request.CDBLen = 12; c->Request.type_attr_dir = TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_WRITE); c->Request.Timeout = 0; c->Request.CDB[0] = BMIC_WRITE; c->Request.CDB[6] = BMIC_CACHE_FLUSH; c->Request.CDB[7] = (size >> 8) & 0xFF; c->Request.CDB[8] = size & 0xFF; break; case TEST_UNIT_READY: c->Request.CDBLen = 6; c->Request.type_attr_dir = TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); c->Request.Timeout = 0; break; case HPSA_GET_RAID_MAP: c->Request.CDBLen = 12; c->Request.type_attr_dir = TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); c->Request.Timeout = 0; c->Request.CDB[0] = HPSA_CISS_READ; c->Request.CDB[1] = cmd; c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ c->Request.CDB[7] = (size >> 16) & 0xFF; c->Request.CDB[8] = (size >> 8) & 0xFF; c->Request.CDB[9] = size & 0xFF; break; case BMIC_SENSE_CONTROLLER_PARAMETERS: c->Request.CDBLen = 10; c->Request.type_attr_dir = TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); c->Request.Timeout = 0; c->Request.CDB[0] = BMIC_READ; c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS; c->Request.CDB[7] = (size >> 16) & 0xFF; c->Request.CDB[8] = (size >> 8) & 0xFF; break; case BMIC_IDENTIFY_PHYSICAL_DEVICE: c->Request.CDBLen = 10; c->Request.type_attr_dir = TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); c->Request.Timeout = 0; c->Request.CDB[0] = BMIC_READ; c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE; c->Request.CDB[7] = (size >> 16) & 0xFF; c->Request.CDB[8] = (size >> 8) & 0XFF; break; case BMIC_SENSE_SUBSYSTEM_INFORMATION: c->Request.CDBLen = 10; c->Request.type_attr_dir = TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); c->Request.Timeout = 0; c->Request.CDB[0] = BMIC_READ; c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION; c->Request.CDB[7] = (size >> 16) & 0xFF; c->Request.CDB[8] = (size >> 8) & 0XFF; break; case BMIC_SENSE_STORAGE_BOX_PARAMS: c->Request.CDBLen = 10; c->Request.type_attr_dir = TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); c->Request.Timeout = 0; c->Request.CDB[0] = BMIC_READ; c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS; c->Request.CDB[7] = (size >> 16) & 0xFF; c->Request.CDB[8] = (size >> 8) & 0XFF; break; case BMIC_IDENTIFY_CONTROLLER: c->Request.CDBLen = 10; c->Request.type_attr_dir = TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); c->Request.Timeout = 0; c->Request.CDB[0] = BMIC_READ; c->Request.CDB[1] = 0; c->Request.CDB[2] = 0; c->Request.CDB[3] = 0; c->Request.CDB[4] = 0; c->Request.CDB[5] = 0; c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER; c->Request.CDB[7] = (size >> 16) & 0xFF; c->Request.CDB[8] = (size >> 8) & 0XFF; c->Request.CDB[9] = 0; break; default: dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); BUG(); } } else if (cmd_type == TYPE_MSG) { switch (cmd) { case HPSA_PHYS_TARGET_RESET: c->Request.CDBLen = 16; c->Request.type_attr_dir = TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); c->Request.Timeout = 0; /* Don't time out */ memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); c->Request.CDB[0] = HPSA_RESET; c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE; /* Physical target reset needs no control bytes 4-7*/ c->Request.CDB[4] = 0x00; c->Request.CDB[5] = 0x00; c->Request.CDB[6] = 0x00; c->Request.CDB[7] = 0x00; break; case HPSA_DEVICE_RESET_MSG: c->Request.CDBLen = 16; c->Request.type_attr_dir = TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); c->Request.Timeout = 0; /* Don't time out */ memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); c->Request.CDB[0] = cmd; c->Request.CDB[1] = HPSA_RESET_TYPE_LUN; /* If bytes 4-7 are zero, it means reset the */ /* LunID device */ c->Request.CDB[4] = 0x00; c->Request.CDB[5] = 0x00; c->Request.CDB[6] = 0x00; c->Request.CDB[7] = 0x00; break; default: dev_warn(&h->pdev->dev, "unknown message type %d\n", cmd); BUG(); } } else { dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); BUG(); } switch (GET_DIR(c->Request.type_attr_dir)) { case XFER_READ: dir = DMA_FROM_DEVICE; break; case XFER_WRITE: dir = DMA_TO_DEVICE; break; case XFER_NONE: dir = DMA_NONE; break; default: dir = DMA_BIDIRECTIONAL; } if (hpsa_map_one(h->pdev, c, buff, size, dir)) return -1; return 0; } /* * Map (physical) PCI mem into (virtual) kernel space */ static void __iomem *remap_pci_mem(ulong base, ulong size) { ulong page_base = ((ulong) base) & PAGE_MASK; ulong page_offs = ((ulong) base) - page_base; void __iomem *page_remapped = ioremap(page_base, page_offs + size); return page_remapped ? (page_remapped + page_offs) : NULL; } static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) { return h->access.command_completed(h, q); } static inline bool interrupt_pending(struct ctlr_info *h) { return h->access.intr_pending(h); } static inline long interrupt_not_for_us(struct ctlr_info *h) { return (h->access.intr_pending(h) == 0) || (h->interrupts_enabled == 0); } static inline int bad_tag(struct ctlr_info *h, u32 tag_index, u32 raw_tag) { if (unlikely(tag_index >= h->nr_cmds)) { dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); return 1; } return 0; } static inline void finish_cmd(struct CommandList *c) { dial_up_lockup_detection_on_fw_flash_complete(c->h, c); if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI || c->cmd_type == CMD_IOACCEL2)) complete_scsi_command(c); else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF) complete(c->waiting); } /* process completion of an indexed ("direct lookup") command */ static inline void process_indexed_cmd(struct ctlr_info *h, u32 raw_tag) { u32 tag_index; struct CommandList *c; tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT; if (!bad_tag(h, tag_index, raw_tag)) { c = h->cmd_pool + tag_index; finish_cmd(c); } } /* Some controllers, like p400, will give us one interrupt * after a soft reset, even if we turned interrupts off. * Only need to check for this in the hpsa_xxx_discard_completions * functions. */ static int ignore_bogus_interrupt(struct ctlr_info *h) { if (likely(!reset_devices)) return 0; if (likely(h->interrupts_enabled)) return 0; dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " "(known firmware bug.) Ignoring.\n"); return 1; } /* * Convert &h->q[x] (passed to interrupt handlers) back to h. * Relies on (h-q[x] == x) being true for x such that * 0 <= x < MAX_REPLY_QUEUES. */ static struct ctlr_info *queue_to_hba(u8 *queue) { return container_of((queue - *queue), struct ctlr_info, q[0]); } static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue) { struct ctlr_info *h = queue_to_hba(queue); u8 q = *(u8 *) queue; u32 raw_tag; if (ignore_bogus_interrupt(h)) return IRQ_NONE; if (interrupt_not_for_us(h)) return IRQ_NONE; h->last_intr_timestamp = get_jiffies_64(); while (interrupt_pending(h)) { raw_tag = get_next_completion(h, q); while (raw_tag != FIFO_EMPTY) raw_tag = next_command(h, q); } return IRQ_HANDLED; } static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue) { struct ctlr_info *h = queue_to_hba(queue); u32 raw_tag; u8 q = *(u8 *) queue; if (ignore_bogus_interrupt(h)) return IRQ_NONE; h->last_intr_timestamp = get_jiffies_64(); raw_tag = get_next_completion(h, q); while (raw_tag != FIFO_EMPTY) raw_tag = next_command(h, q); return IRQ_HANDLED; } static irqreturn_t do_hpsa_intr_intx(int irq, void *queue) { struct ctlr_info *h = queue_to_hba((u8 *) queue); u32 raw_tag; u8 q = *(u8 *) queue; if (interrupt_not_for_us(h)) return IRQ_NONE; h->last_intr_timestamp = get_jiffies_64(); while (interrupt_pending(h)) { raw_tag = get_next_completion(h, q); while (raw_tag != FIFO_EMPTY) { process_indexed_cmd(h, raw_tag); raw_tag = next_command(h, q); } } return IRQ_HANDLED; } static irqreturn_t do_hpsa_intr_msi(int irq, void *queue) { struct ctlr_info *h = queue_to_hba(queue); u32 raw_tag; u8 q = *(u8 *) queue; h->last_intr_timestamp = get_jiffies_64(); raw_tag = get_next_completion(h, q); while (raw_tag != FIFO_EMPTY) { process_indexed_cmd(h, raw_tag); raw_tag = next_command(h, q); } return IRQ_HANDLED; } /* Send a message CDB to the firmware. Careful, this only works * in simple mode, not performant mode due to the tag lookup. * We only ever use this immediately after a controller reset. */ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, unsigned char type) { struct Command { struct CommandListHeader CommandHeader; struct RequestBlock Request; struct ErrDescriptor ErrorDescriptor; }; struct Command *cmd; static const size_t cmd_sz = sizeof(*cmd) + sizeof(cmd->ErrorDescriptor); dma_addr_t paddr64; __le32 paddr32; u32 tag; void __iomem *vaddr; int i, err; vaddr = pci_ioremap_bar(pdev, 0); if (vaddr == NULL) return -ENOMEM; /* The Inbound Post Queue only accepts 32-bit physical addresses for the * CCISS commands, so they must be allocated from the lower 4GiB of * memory. */ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { iounmap(vaddr); return err; } cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL); if (cmd == NULL) { iounmap(vaddr); return -ENOMEM; } /* This must fit, because of the 32-bit consistent DMA mask. Also, * although there's no guarantee, we assume that the address is at * least 4-byte aligned (most likely, it's page-aligned). */ paddr32 = cpu_to_le32(paddr64); cmd->CommandHeader.ReplyQueue = 0; cmd->CommandHeader.SGList = 0; cmd->CommandHeader.SGTotal = cpu_to_le16(0); cmd->CommandHeader.tag = cpu_to_le64(paddr64); memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); cmd->Request.CDBLen = 16; cmd->Request.type_attr_dir = TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE); cmd->Request.Timeout = 0; /* Don't time out */ cmd->Request.CDB[0] = opcode; cmd->Request.CDB[1] = type; memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ cmd->ErrorDescriptor.Addr = cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd))); cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo)); writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET); for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64) break; msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); } iounmap(vaddr); /* we leak the DMA buffer here ... no choice since the controller could * still complete the command. */ if (i == HPSA_MSG_SEND_RETRY_LIMIT) { dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", opcode, type); return -ETIMEDOUT; } dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64); if (tag & HPSA_ERROR_BIT) { dev_err(&pdev->dev, "controller message %02x:%02x failed\n", opcode, type); return -EIO; } dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", opcode, type); return 0; } #define hpsa_noop(p) hpsa_message(p, 3, 0) static int hpsa_controller_hard_reset(struct pci_dev *pdev, void __iomem *vaddr, u32 use_doorbell) { if (use_doorbell) { /* For everything after the P600, the PCI power state method * of resetting the controller doesn't work, so we have this * other way using the doorbell register. */ dev_info(&pdev->dev, "using doorbell to reset controller\n"); writel(use_doorbell, vaddr + SA5_DOORBELL); /* PMC hardware guys tell us we need a 10 second delay after * doorbell reset and before any attempt to talk to the board * at all to ensure that this actually works and doesn't fall * over in some weird corner cases. */ msleep(10000); } else { /* Try to do it the PCI power state way */ /* Quoting from the Open CISS Specification: "The Power * Management Control/Status Register (CSR) controls the power * state of the device. The normal operating state is D0, * CSR=00h. The software off state is D3, CSR=03h. To reset * the controller, place the interface device in D3 then to D0, * this causes a secondary PCI reset which will reset the * controller." */ int rc = 0; dev_info(&pdev->dev, "using PCI PM to reset controller\n"); /* enter the D3hot power management state */ rc = pci_set_power_state(pdev, PCI_D3hot); if (rc) return rc; msleep(500); /* enter the D0 power management state */ rc = pci_set_power_state(pdev, PCI_D0); if (rc) return rc; /* * The P600 requires a small delay when changing states. * Otherwise we may think the board did not reset and we bail. * This for kdump only and is particular to the P600. */ msleep(500); } return 0; } static void init_driver_version(char *driver_version, int len) { memset(driver_version, 0, len); strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1); } static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable) { char *driver_version; int i, size = sizeof(cfgtable->driver_version); driver_version = kmalloc(size, GFP_KERNEL); if (!driver_version) return -ENOMEM; init_driver_version(driver_version, size); for (i = 0; i < size; i++) writeb(driver_version[i], &cfgtable->driver_version[i]); kfree(driver_version); return 0; } static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable, unsigned char *driver_ver) { int i; for (i = 0; i < sizeof(cfgtable->driver_version); i++) driver_ver[i] = readb(&cfgtable->driver_version[i]); } static int controller_reset_failed(struct CfgTable __iomem *cfgtable) { char *driver_ver, *old_driver_ver; int rc, size = sizeof(cfgtable->driver_version); old_driver_ver = kmalloc_array(2, size, GFP_KERNEL); if (!old_driver_ver) return -ENOMEM; driver_ver = old_driver_ver + size; /* After a reset, the 32 bytes of "driver version" in the cfgtable * should have been changed, otherwise we know the reset failed. */ init_driver_version(old_driver_ver, size); read_driver_ver_from_cfgtable(cfgtable, driver_ver); rc = !memcmp(driver_ver, old_driver_ver, size); kfree(old_driver_ver); return rc; } /* This does a hard reset of the controller using PCI power management * states or the using the doorbell register. */ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id) { u64 cfg_offset; u32 cfg_base_addr; u64 cfg_base_addr_index; void __iomem *vaddr; unsigned long paddr; u32 misc_fw_support; int rc; struct CfgTable __iomem *cfgtable; u32 use_doorbell; u16 command_register; /* For controllers as old as the P600, this is very nearly * the same thing as * * pci_save_state(pci_dev); * pci_set_power_state(pci_dev, PCI_D3hot); * pci_set_power_state(pci_dev, PCI_D0); * pci_restore_state(pci_dev); * * For controllers newer than the P600, the pci power state * method of resetting doesn't work so we have another way * using the doorbell register. */ if (!ctlr_is_resettable(board_id)) { dev_warn(&pdev->dev, "Controller not resettable\n"); return -ENODEV; } /* if controller is soft- but not hard resettable... */ if (!ctlr_is_hard_resettable(board_id)) return -ENOTSUPP; /* try soft reset later. */ /* Save the PCI command register */ pci_read_config_word(pdev, 4, &command_register); pci_save_state(pdev); /* find the first memory BAR, so we can find the cfg table */ rc = hpsa_pci_find_memory_BAR(pdev, &paddr); if (rc) return rc; vaddr = remap_pci_mem(paddr, 0x250); if (!vaddr) return -ENOMEM; /* find cfgtable in order to check if reset via doorbell is supported */ rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, &cfg_base_addr_index, &cfg_offset); if (rc) goto unmap_vaddr; cfgtable = remap_pci_mem(pci_resource_start(pdev, cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); if (!cfgtable) { rc = -ENOMEM; goto unmap_vaddr; } rc = write_driver_ver_to_cfgtable(cfgtable); if (rc) goto unmap_cfgtable; /* If reset via doorbell register is supported, use that. * There are two such methods. Favor the newest method. */ misc_fw_support = readl(&cfgtable->misc_fw_support); use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; if (use_doorbell) { use_doorbell = DOORBELL_CTLR_RESET2; } else { use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; if (use_doorbell) { dev_warn(&pdev->dev, "Soft reset not supported. Firmware update is required.\n"); rc = -ENOTSUPP; /* try soft reset */ goto unmap_cfgtable; } } rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); if (rc) goto unmap_cfgtable; pci_restore_state(pdev); pci_write_config_word(pdev, 4, command_register); /* Some devices (notably the HP Smart Array 5i Controller) need a little pause here */ msleep(HPSA_POST_RESET_PAUSE_MSECS); rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); if (rc) { dev_warn(&pdev->dev, "Failed waiting for board to become ready after hard reset\n"); goto unmap_cfgtable; } rc = controller_reset_failed(vaddr); if (rc < 0) goto unmap_cfgtable; if (rc) { dev_warn(&pdev->dev, "Unable to successfully reset " "controller. Will try soft reset.\n"); rc = -ENOTSUPP; } else { dev_info(&pdev->dev, "board ready after hard reset.\n"); } unmap_cfgtable: iounmap(cfgtable); unmap_vaddr: iounmap(vaddr); return rc; } /* * We cannot read the structure directly, for portability we must use * the io functions. * This is for debug only. */ static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb) { #ifdef HPSA_DEBUG int i; char temp_name[17]; dev_info(dev, "Controller Configuration information\n"); dev_info(dev, "------------------------------------\n"); for (i = 0; i < 4; i++) temp_name[i] = readb(&(tb->Signature[i])); temp_name[4] = '\0'; dev_info(dev, " Signature = %s\n", temp_name); dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); dev_info(dev, " Transport methods supported = 0x%x\n", readl(&(tb->TransportSupport))); dev_info(dev, " Transport methods active = 0x%x\n", readl(&(tb->TransportActive))); dev_info(dev, " Requested transport Method = 0x%x\n", readl(&(tb->HostWrite.TransportRequest))); dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", readl(&(tb->HostWrite.CoalIntDelay))); dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", readl(&(tb->HostWrite.CoalIntCount))); dev_info(dev, " Max outstanding commands = %d\n", readl(&(tb->CmdsOutMax))); dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); for (i = 0; i < 16; i++) temp_name[i] = readb(&(tb->ServerName[i])); temp_name[16] = '\0'; dev_info(dev, " Server Name = %s\n", temp_name); dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat))); #endif /* HPSA_DEBUG */ } static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) { int i, offset, mem_type, bar_type; if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ return 0; offset = 0; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) offset += 4; else { mem_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_MEM_TYPE_MASK; switch (mem_type) { case PCI_BASE_ADDRESS_MEM_TYPE_32: case PCI_BASE_ADDRESS_MEM_TYPE_1M: offset += 4; /* 32 bit */ break; case PCI_BASE_ADDRESS_MEM_TYPE_64: offset += 8; break; default: /* reserved in PCI 2.2 */ dev_warn(&pdev->dev, "base address is invalid\n"); return -1; } } if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) return i + 1; } return -1; } static void hpsa_disable_interrupt_mode(struct ctlr_info *h) { pci_free_irq_vectors(h->pdev); h->msix_vectors = 0; } static void hpsa_setup_reply_map(struct ctlr_info *h) { const struct cpumask *mask; unsigned int queue, cpu; for (queue = 0; queue < h->msix_vectors; queue++) { mask = pci_irq_get_affinity(h->pdev, queue); if (!mask) goto fallback; for_each_cpu(cpu, mask) h->reply_map[cpu] = queue; } return; fallback: for_each_possible_cpu(cpu) h->reply_map[cpu] = 0; } /* If MSI/MSI-X is supported by the kernel we will try to enable it on * controllers that are capable. If not, we use legacy INTx mode. */ static int hpsa_interrupt_mode(struct ctlr_info *h) { unsigned int flags = PCI_IRQ_LEGACY; int ret; /* Some boards advertise MSI but don't really support it */ switch (h->board_id) { case 0x40700E11: case 0x40800E11: case 0x40820E11: case 0x40830E11: break; default: ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); if (ret > 0) { h->msix_vectors = ret; return 0; } flags |= PCI_IRQ_MSI; break; } ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags); if (ret < 0) return ret; return 0; } static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id, bool *legacy_board) { int i; u32 subsystem_vendor_id, subsystem_device_id; subsystem_vendor_id = pdev->subsystem_vendor; subsystem_device_id = pdev->subsystem_device; *board_id = ((subsystem_device_id << 16) & 0xffff0000) | subsystem_vendor_id; if (legacy_board) *legacy_board = false; for (i = 0; i < ARRAY_SIZE(products); i++) if (*board_id == products[i].board_id) { if (products[i].access != &SA5A_access && products[i].access != &SA5B_access) return i; dev_warn(&pdev->dev, "legacy board ID: 0x%08x\n", *board_id); if (legacy_board) *legacy_board = true; return i; } dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n", *board_id); if (legacy_board) *legacy_board = true; return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ } static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, unsigned long *memory_bar) { int i; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { /* addressing mode bits already removed */ *memory_bar = pci_resource_start(pdev, i); dev_dbg(&pdev->dev, "memory BAR = %lx\n", *memory_bar); return 0; } dev_warn(&pdev->dev, "no memory BAR found\n"); return -ENODEV; } static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, int wait_for_ready) { int i, iterations; u32 scratchpad; if (wait_for_ready) iterations = HPSA_BOARD_READY_ITERATIONS; else iterations = HPSA_BOARD_NOT_READY_ITERATIONS; for (i = 0; i < iterations; i++) { scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); if (wait_for_ready) { if (scratchpad == HPSA_FIRMWARE_READY) return 0; } else { if (scratchpad != HPSA_FIRMWARE_READY) return 0; } msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); } dev_warn(&pdev->dev, "board not ready, timed out.\n"); return -ENODEV; } static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, u64 *cfg_offset) { *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); *cfg_base_addr &= (u32) 0x0000ffff; *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); if (*cfg_base_addr_index == -1) { dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); return -ENODEV; } return 0; } static void hpsa_free_cfgtables(struct ctlr_info *h) { if (h->transtable) { iounmap(h->transtable); h->transtable = NULL; } if (h->cfgtable) { iounmap(h->cfgtable); h->cfgtable = NULL; } } /* Find and map CISS config table and transfer table + * several items must be unmapped (freed) later + * */ static int hpsa_find_cfgtables(struct ctlr_info *h) { u64 cfg_offset; u32 cfg_base_addr; u64 cfg_base_addr_index; u32 trans_offset; int rc; rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, &cfg_base_addr_index, &cfg_offset); if (rc) return rc; h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); if (!h->cfgtable) { dev_err(&h->pdev->dev, "Failed mapping cfgtable\n"); return -ENOMEM; } rc = write_driver_ver_to_cfgtable(h->cfgtable); if (rc) return rc; /* Find performant mode table. */ trans_offset = readl(&h->cfgtable->TransMethodOffset); h->transtable = remap_pci_mem(pci_resource_start(h->pdev, cfg_base_addr_index)+cfg_offset+trans_offset, sizeof(*h->transtable)); if (!h->transtable) { dev_err(&h->pdev->dev, "Failed mapping transfer table\n"); hpsa_free_cfgtables(h); return -ENOMEM; } return 0; } static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) { #define MIN_MAX_COMMANDS 16 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS); h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands); /* Limit commands in memory limited kdump scenario. */ if (reset_devices && h->max_commands > 32) h->max_commands = 32; if (h->max_commands < MIN_MAX_COMMANDS) { dev_warn(&h->pdev->dev, "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n", h->max_commands, MIN_MAX_COMMANDS); h->max_commands = MIN_MAX_COMMANDS; } } /* If the controller reports that the total max sg entries is greater than 512, * then we know that chained SG blocks work. (Original smart arrays did not * support chained SG blocks and would return zero for max sg entries.) */ static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h) { return h->maxsgentries > 512; } /* Interrogate the hardware for some limits: * max commands, max SG elements without chaining, and with chaining, * SG chain block size, etc. */ static void hpsa_find_board_params(struct ctlr_info *h) { hpsa_get_max_perf_mode_cmds(h); h->nr_cmds = h->max_commands; h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); h->fw_support = readl(&(h->cfgtable->misc_fw_support)); if (hpsa_supports_chained_sg_blocks(h)) { /* Limit in-command s/g elements to 32 save dma'able memory. */ h->max_cmd_sg_entries = 32; h->chainsize = h->maxsgentries - h->max_cmd_sg_entries; h->maxsgentries--; /* save one for chain pointer */ } else { /* * Original smart arrays supported at most 31 s/g entries * embedded inline in the command (trying to use more * would lock up the controller) */ h->max_cmd_sg_entries = 31; h->maxsgentries = 31; /* default to traditional values */ h->chainsize = 0; } /* Find out what task management functions are supported and cache */ h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags)); if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags)) dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) dev_warn(&h->pdev->dev, "Logical aborts not supported\n"); if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)) dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n"); } static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) { if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { dev_err(&h->pdev->dev, "not a valid CISS config table\n"); return false; } return true; } static inline void hpsa_set_driver_support_bits(struct ctlr_info *h) { u32 driver_support; driver_support = readl(&(h->cfgtable->driver_support)); /* Need to enable prefetch in the SCSI core for 6400 in x86 */ #ifdef CONFIG_X86 driver_support |= ENABLE_SCSI_PREFETCH; #endif driver_support |= ENABLE_UNIT_ATTN; writel(driver_support, &(h->cfgtable->driver_support)); } /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result * in a prefetch beyond physical memory. */ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) { u32 dma_prefetch; if (h->board_id != 0x3225103C) return; dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); dma_prefetch |= 0x8000; writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); } static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) { int i; u32 doorbell_value; unsigned long flags; /* wait until the clear_event_notify bit 6 is cleared by controller. */ for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) { spin_lock_irqsave(&h->lock, flags); doorbell_value = readl(h->vaddr + SA5_DOORBELL); spin_unlock_irqrestore(&h->lock, flags); if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) goto done; /* delay and try again */ msleep(CLEAR_EVENT_WAIT_INTERVAL); } return -ENODEV; done: return 0; } static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h) { int i; u32 doorbell_value; unsigned long flags; /* under certain very rare conditions, this can take awhile. * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right * as we enter this code.) */ for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) { if (h->remove_in_progress) goto done; spin_lock_irqsave(&h->lock, flags); doorbell_value = readl(h->vaddr + SA5_DOORBELL); spin_unlock_irqrestore(&h->lock, flags); if (!(doorbell_value & CFGTBL_ChangeReq)) goto done; /* delay and try again */ msleep(MODE_CHANGE_WAIT_INTERVAL); } return -ENODEV; done: return 0; } /* return -ENODEV or other reason on error, 0 on success */ static int hpsa_enter_simple_mode(struct ctlr_info *h) { u32 trans_support; trans_support = readl(&(h->cfgtable->TransportSupport)); if (!(trans_support & SIMPLE_MODE)) return -ENOTSUPP; h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); /* Update the field, and then ring the doorbell */ writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); if (hpsa_wait_for_mode_change_ack(h)) goto error; print_cfg_table(&h->pdev->dev, h->cfgtable); if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) goto error; h->transMethod = CFGTBL_Trans_Simple; return 0; error: dev_err(&h->pdev->dev, "failed to enter simple mode\n"); return -ENODEV; } /* free items allocated or mapped by hpsa_pci_init */ static void hpsa_free_pci_init(struct ctlr_info *h) { hpsa_free_cfgtables(h); /* pci_init 4 */ iounmap(h->vaddr); /* pci_init 3 */ h->vaddr = NULL; hpsa_disable_interrupt_mode(h); /* pci_init 2 */ /* * call pci_disable_device before pci_release_regions per * Documentation/driver-api/pci/pci.rst */ pci_disable_device(h->pdev); /* pci_init 1 */ pci_release_regions(h->pdev); /* pci_init 2 */ } /* several items must be freed later */ static int hpsa_pci_init(struct ctlr_info *h) { int prod_index, err; bool legacy_board; prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board); if (prod_index < 0) return prod_index; h->product_name = products[prod_index].product_name; h->access = *(products[prod_index].access); h->legacy_board = legacy_board; pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); err = pci_enable_device(h->pdev); if (err) { dev_err(&h->pdev->dev, "failed to enable PCI device\n"); pci_disable_device(h->pdev); return err; } err = pci_request_regions(h->pdev, HPSA); if (err) { dev_err(&h->pdev->dev, "failed to obtain PCI resources\n"); pci_disable_device(h->pdev); return err; } pci_set_master(h->pdev); err = hpsa_interrupt_mode(h); if (err) goto clean1; /* setup mapping between CPU and reply queue */ hpsa_setup_reply_map(h); err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); if (err) goto clean2; /* intmode+region, pci */ h->vaddr = remap_pci_mem(h->paddr, 0x250); if (!h->vaddr) { dev_err(&h->pdev->dev, "failed to remap PCI mem\n"); err = -ENOMEM; goto clean2; /* intmode+region, pci */ } err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); if (err) goto clean3; /* vaddr, intmode+region, pci */ err = hpsa_find_cfgtables(h); if (err) goto clean3; /* vaddr, intmode+region, pci */ hpsa_find_board_params(h); if (!hpsa_CISS_signature_present(h)) { err = -ENODEV; goto clean4; /* cfgtables, vaddr, intmode+region, pci */ } hpsa_set_driver_support_bits(h); hpsa_p600_dma_prefetch_quirk(h); err = hpsa_enter_simple_mode(h); if (err) goto clean4; /* cfgtables, vaddr, intmode+region, pci */ return 0; clean4: /* cfgtables, vaddr, intmode+region, pci */ hpsa_free_cfgtables(h); clean3: /* vaddr, intmode+region, pci */ iounmap(h->vaddr); h->vaddr = NULL; clean2: /* intmode+region, pci */ hpsa_disable_interrupt_mode(h); clean1: /* * call pci_disable_device before pci_release_regions per * Documentation/driver-api/pci/pci.rst */ pci_disable_device(h->pdev); pci_release_regions(h->pdev); return err; } static void hpsa_hba_inquiry(struct ctlr_info *h) { int rc; #define HBA_INQUIRY_BYTE_COUNT 64 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); if (!h->hba_inquiry_data) return; rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); if (rc != 0) { kfree(h->hba_inquiry_data); h->hba_inquiry_data = NULL; } } static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id) { int rc, i; void __iomem *vaddr; if (!reset_devices) return 0; /* kdump kernel is loading, we don't know in which state is * the pci interface. The dev->enable_cnt is equal zero * so we call enable+disable, wait a while and switch it on. */ rc = pci_enable_device(pdev); if (rc) { dev_warn(&pdev->dev, "Failed to enable PCI device\n"); return -ENODEV; } pci_disable_device(pdev); msleep(260); /* a randomly chosen number */ rc = pci_enable_device(pdev); if (rc) { dev_warn(&pdev->dev, "failed to enable device.\n"); return -ENODEV; } pci_set_master(pdev); vaddr = pci_ioremap_bar(pdev, 0); if (vaddr == NULL) { rc = -ENOMEM; goto out_disable; } writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET); iounmap(vaddr); /* Reset the controller with a PCI power-cycle or via doorbell */ rc = hpsa_kdump_hard_reset_controller(pdev, board_id); /* -ENOTSUPP here means we cannot reset the controller * but it's already (and still) up and running in * "performant mode". Or, it might be 640x, which can't reset * due to concerns about shared bbwc between 6402/6404 pair. */ if (rc) goto out_disable; /* Now try to get the controller to respond to a no-op */ dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n"); for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { if (hpsa_noop(pdev) == 0) break; else dev_warn(&pdev->dev, "no-op failed%s\n", (i < 11 ? "; re-trying" : "")); } out_disable: pci_disable_device(pdev); return rc; } static void hpsa_free_cmd_pool(struct ctlr_info *h) { bitmap_free(h->cmd_pool_bits); h->cmd_pool_bits = NULL; if (h->cmd_pool) { dma_free_coherent(&h->pdev->dev, h->nr_cmds * sizeof(struct CommandList), h->cmd_pool, h->cmd_pool_dhandle); h->cmd_pool = NULL; h->cmd_pool_dhandle = 0; } if (h->errinfo_pool) { dma_free_coherent(&h->pdev->dev, h->nr_cmds * sizeof(struct ErrorInfo), h->errinfo_pool, h->errinfo_pool_dhandle); h->errinfo_pool = NULL; h->errinfo_pool_dhandle = 0; } } static int hpsa_alloc_cmd_pool(struct ctlr_info *h) { h->cmd_pool_bits = bitmap_zalloc(h->nr_cmds, GFP_KERNEL); h->cmd_pool = dma_alloc_coherent(&h->pdev->dev, h->nr_cmds * sizeof(*h->cmd_pool), &h->cmd_pool_dhandle, GFP_KERNEL); h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev, h->nr_cmds * sizeof(*h->errinfo_pool), &h->errinfo_pool_dhandle, GFP_KERNEL); if ((h->cmd_pool_bits == NULL) || (h->cmd_pool == NULL) || (h->errinfo_pool == NULL)) { dev_err(&h->pdev->dev, "out of memory in %s", __func__); goto clean_up; } hpsa_preinitialize_commands(h); return 0; clean_up: hpsa_free_cmd_pool(h); return -ENOMEM; } /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */ static void hpsa_free_irqs(struct ctlr_info *h) { int i; int irq_vector = 0; if (hpsa_simple_mode) irq_vector = h->intr_mode; if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) { /* Single reply queue, only one irq to free */ free_irq(pci_irq_vector(h->pdev, irq_vector), &h->q[h->intr_mode]); h->q[h->intr_mode] = 0; return; } for (i = 0; i < h->msix_vectors; i++) { free_irq(pci_irq_vector(h->pdev, i), &h->q[i]); h->q[i] = 0; } for (; i < MAX_REPLY_QUEUES; i++) h->q[i] = 0; } /* returns 0 on success; cleans up and returns -Enn on error */ static int hpsa_request_irqs(struct ctlr_info *h, irqreturn_t (*msixhandler)(int, void *), irqreturn_t (*intxhandler)(int, void *)) { int rc, i; int irq_vector = 0; if (hpsa_simple_mode) irq_vector = h->intr_mode; /* * initialize h->q[x] = x so that interrupt handlers know which * queue to process. */ for (i = 0; i < MAX_REPLY_QUEUES; i++) h->q[i] = (u8) i; if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) { /* If performant mode and MSI-X, use multiple reply queues */ for (i = 0; i < h->msix_vectors; i++) { sprintf(h->intrname[i], "%s-msix%d", h->devname, i); rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler, 0, h->intrname[i], &h->q[i]); if (rc) { int j; dev_err(&h->pdev->dev, "failed to get irq %d for %s\n", pci_irq_vector(h->pdev, i), h->devname); for (j = 0; j < i; j++) { free_irq(pci_irq_vector(h->pdev, j), &h->q[j]); h->q[j] = 0; } for (; j < MAX_REPLY_QUEUES; j++) h->q[j] = 0; return rc; } } } else { /* Use single reply pool */ if (h->msix_vectors > 0 || h->pdev->msi_enabled) { sprintf(h->intrname[0], "%s-msi%s", h->devname, h->msix_vectors ? "x" : ""); rc = request_irq(pci_irq_vector(h->pdev, irq_vector), msixhandler, 0, h->intrname[0], &h->q[h->intr_mode]); } else { sprintf(h->intrname[h->intr_mode], "%s-intx", h->devname); rc = request_irq(pci_irq_vector(h->pdev, irq_vector), intxhandler, IRQF_SHARED, h->intrname[0], &h->q[h->intr_mode]); } } if (rc) { dev_err(&h->pdev->dev, "failed to get irq %d for %s\n", pci_irq_vector(h->pdev, irq_vector), h->devname); hpsa_free_irqs(h); return -ENODEV; } return 0; } static int hpsa_kdump_soft_reset(struct ctlr_info *h) { int rc; hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER); dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY); if (rc) { dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); return rc; } dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); if (rc) { dev_warn(&h->pdev->dev, "Board failed to become ready " "after soft reset.\n"); return rc; } return 0; } static void hpsa_free_reply_queues(struct ctlr_info *h) { int i; for (i = 0; i < h->nreply_queues; i++) { if (!h->reply_queue[i].head) continue; dma_free_coherent(&h->pdev->dev, h->reply_queue_size, h->reply_queue[i].head, h->reply_queue[i].busaddr); h->reply_queue[i].head = NULL; h->reply_queue[i].busaddr = 0; } h->reply_queue_size = 0; } static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) { hpsa_free_performant_mode(h); /* init_one 7 */ hpsa_free_sg_chain_blocks(h); /* init_one 6 */ hpsa_free_cmd_pool(h); /* init_one 5 */ hpsa_free_irqs(h); /* init_one 4 */ scsi_host_put(h->scsi_host); /* init_one 3 */ h->scsi_host = NULL; /* init_one 3 */ hpsa_free_pci_init(h); /* init_one 2_5 */ free_percpu(h->lockup_detected); /* init_one 2 */ h->lockup_detected = NULL; /* init_one 2 */ if (h->resubmit_wq) { destroy_workqueue(h->resubmit_wq); /* init_one 1 */ h->resubmit_wq = NULL; } if (h->rescan_ctlr_wq) { destroy_workqueue(h->rescan_ctlr_wq); h->rescan_ctlr_wq = NULL; } if (h->monitor_ctlr_wq) { destroy_workqueue(h->monitor_ctlr_wq); h->monitor_ctlr_wq = NULL; } kfree(h); /* init_one 1 */ } /* Called when controller lockup detected. */ static void fail_all_outstanding_cmds(struct ctlr_info *h) { int i, refcount; struct CommandList *c; int failcount = 0; flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */ for (i = 0; i < h->nr_cmds; i++) { c = h->cmd_pool + i; refcount = atomic_inc_return(&c->refcount); if (refcount > 1) { c->err_info->CommandStatus = CMD_CTLR_LOCKUP; finish_cmd(c); atomic_dec(&h->commands_outstanding); failcount++; } cmd_free(h, c); } dev_warn(&h->pdev->dev, "failed %d commands in fail_all\n", failcount); } static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) { int cpu; for_each_online_cpu(cpu) { u32 *lockup_detected; lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); *lockup_detected = value; } wmb(); /* be sure the per-cpu variables are out to memory */ } static void controller_lockup_detected(struct ctlr_info *h) { unsigned long flags; u32 lockup_detected; h->access.set_intr_mask(h, HPSA_INTR_OFF); spin_lock_irqsave(&h->lock, flags); lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); if (!lockup_detected) { /* no heartbeat, but controller gave us a zero. */ dev_warn(&h->pdev->dev, "lockup detected after %d but scratchpad register is zero\n", h->heartbeat_sample_interval / HZ); lockup_detected = 0xffffffff; } set_lockup_detected_for_all_cpus(h, lockup_detected); spin_unlock_irqrestore(&h->lock, flags); dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n", lockup_detected, h->heartbeat_sample_interval / HZ); if (lockup_detected == 0xffff0000) { dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n"); writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL); } pci_disable_device(h->pdev); fail_all_outstanding_cmds(h); } static int detect_controller_lockup(struct ctlr_info *h) { u64 now; u32 heartbeat; unsigned long flags; now = get_jiffies_64(); /* If we've received an interrupt recently, we're ok. */ if (time_after64(h->last_intr_timestamp + (h->heartbeat_sample_interval), now)) return false; /* * If we've already checked the heartbeat recently, we're ok. * This could happen if someone sends us a signal. We * otherwise don't care about signals in this thread. */ if (time_after64(h->last_heartbeat_timestamp + (h->heartbeat_sample_interval), now)) return false; /* If heartbeat has not changed since we last looked, we're not ok. */ spin_lock_irqsave(&h->lock, flags); heartbeat = readl(&h->cfgtable->HeartBeat); spin_unlock_irqrestore(&h->lock, flags); if (h->last_heartbeat == heartbeat) { controller_lockup_detected(h); return true; } /* We're ok. */ h->last_heartbeat = heartbeat; h->last_heartbeat_timestamp = now; return false; } /* * Set ioaccel status for all ioaccel volumes. * * Called from monitor controller worker (hpsa_event_monitor_worker) * * A Volume (or Volumes that comprise an Array set) may be undergoing a * transformation, so we will be turning off ioaccel for all volumes that * make up the Array. */ static void hpsa_set_ioaccel_status(struct ctlr_info *h) { int rc; int i; u8 ioaccel_status; unsigned char *buf; struct hpsa_scsi_dev_t *device; if (!h) return; buf = kmalloc(64, GFP_KERNEL); if (!buf) return; /* * Run through current device list used during I/O requests. */ for (i = 0; i < h->ndevices; i++) { int offload_to_be_enabled = 0; int offload_config = 0; device = h->dev[i]; if (!device) continue; if (!hpsa_vpd_page_supported(h, device->scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) continue; memset(buf, 0, 64); rc = hpsa_scsi_do_inquiry(h, device->scsi3addr, VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64); if (rc != 0) continue; ioaccel_status = buf[IOACCEL_STATUS_BYTE]; /* * Check if offload is still configured on */ offload_config = !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); /* * If offload is configured on, check to see if ioaccel * needs to be enabled. */ if (offload_config) offload_to_be_enabled = !!(ioaccel_status & OFFLOAD_ENABLED_BIT); /* * If ioaccel is to be re-enabled, re-enable later during the * scan operation so the driver can get a fresh raidmap * before turning ioaccel back on. */ if (offload_to_be_enabled) continue; /* * Immediately turn off ioaccel for any volume the * controller tells us to. Some of the reasons could be: * transformation - change to the LVs of an Array. * degraded volume - component failure */ hpsa_turn_off_ioaccel_for_device(device); } kfree(buf); } static void hpsa_ack_ctlr_events(struct ctlr_info *h) { char *event_type; if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) return; /* Ask the controller to clear the events we're handling. */ if ((h->transMethod & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)) && (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE || h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) { if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE) event_type = "state change"; if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE) event_type = "configuration change"; /* Stop sending new RAID offload reqs via the IO accelerator */ scsi_block_requests(h->scsi_host); hpsa_set_ioaccel_status(h); hpsa_drain_accel_commands(h); /* Set 'accelerator path config change' bit */ dev_warn(&h->pdev->dev, "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n", h->events, event_type); writel(h->events, &(h->cfgtable->clear_event_notify)); /* Set the "clear event notify field update" bit 6 */ writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); /* Wait until ctlr clears 'clear event notify field', bit 6 */ hpsa_wait_for_clear_event_notify_ack(h); scsi_unblock_requests(h->scsi_host); } else { /* Acknowledge controller notification events. */ writel(h->events, &(h->cfgtable->clear_event_notify)); writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); hpsa_wait_for_clear_event_notify_ack(h); } return; } /* Check a register on the controller to see if there are configuration * changes (added/changed/removed logical drives, etc.) which mean that * we should rescan the controller for devices. * Also check flag for driver-initiated rescan. */ static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) { if (h->drv_req_rescan) { h->drv_req_rescan = 0; return 1; } if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) return 0; h->events = readl(&(h->cfgtable->event_notify)); return h->events & RESCAN_REQUIRED_EVENT_BITS; } /* * Check if any of the offline devices have become ready */ static int hpsa_offline_devices_ready(struct ctlr_info *h) { unsigned long flags; struct offline_device_entry *d; struct list_head *this, *tmp; spin_lock_irqsave(&h->offline_device_lock, flags); list_for_each_safe(this, tmp, &h->offline_device_list) { d = list_entry(this, struct offline_device_entry, offline_list); spin_unlock_irqrestore(&h->offline_device_lock, flags); if (!hpsa_volume_offline(h, d->scsi3addr)) { spin_lock_irqsave(&h->offline_device_lock, flags); list_del(&d->offline_list); spin_unlock_irqrestore(&h->offline_device_lock, flags); return 1; } spin_lock_irqsave(&h->offline_device_lock, flags); } spin_unlock_irqrestore(&h->offline_device_lock, flags); return 0; } static int hpsa_luns_changed(struct ctlr_info *h) { int rc = 1; /* assume there are changes */ struct ReportLUNdata *logdev = NULL; /* if we can't find out if lun data has changed, * assume that it has. */ if (!h->lastlogicals) return rc; logdev = kzalloc(sizeof(*logdev), GFP_KERNEL); if (!logdev) return rc; if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) { dev_warn(&h->pdev->dev, "report luns failed, can't track lun changes.\n"); goto out; } if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) { dev_info(&h->pdev->dev, "Lun changes detected.\n"); memcpy(h->lastlogicals, logdev, sizeof(*logdev)); goto out; } else rc = 0; /* no changes detected. */ out: kfree(logdev); return rc; } static void hpsa_perform_rescan(struct ctlr_info *h) { struct Scsi_Host *sh = NULL; unsigned long flags; /* * Do the scan after the reset */ spin_lock_irqsave(&h->reset_lock, flags); if (h->reset_in_progress) { h->drv_req_rescan = 1; spin_unlock_irqrestore(&h->reset_lock, flags); return; } spin_unlock_irqrestore(&h->reset_lock, flags); sh = scsi_host_get(h->scsi_host); if (sh != NULL) { hpsa_scan_start(sh); scsi_host_put(sh); h->drv_req_rescan = 0; } } /* * watch for controller events */ static void hpsa_event_monitor_worker(struct work_struct *work) { struct ctlr_info *h = container_of(to_delayed_work(work), struct ctlr_info, event_monitor_work); unsigned long flags; spin_lock_irqsave(&h->lock, flags); if (h->remove_in_progress) { spin_unlock_irqrestore(&h->lock, flags); return; } spin_unlock_irqrestore(&h->lock, flags); if (hpsa_ctlr_needs_rescan(h)) { hpsa_ack_ctlr_events(h); hpsa_perform_rescan(h); } spin_lock_irqsave(&h->lock, flags); if (!h->remove_in_progress) queue_delayed_work(h->monitor_ctlr_wq, &h->event_monitor_work, HPSA_EVENT_MONITOR_INTERVAL); spin_unlock_irqrestore(&h->lock, flags); } static void hpsa_rescan_ctlr_worker(struct work_struct *work) { unsigned long flags; struct ctlr_info *h = container_of(to_delayed_work(work), struct ctlr_info, rescan_ctlr_work); spin_lock_irqsave(&h->lock, flags); if (h->remove_in_progress) { spin_unlock_irqrestore(&h->lock, flags); return; } spin_unlock_irqrestore(&h->lock, flags); if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) { hpsa_perform_rescan(h); } else if (h->discovery_polling) { if (hpsa_luns_changed(h)) { dev_info(&h->pdev->dev, "driver discovery polling rescan.\n"); hpsa_perform_rescan(h); } } spin_lock_irqsave(&h->lock, flags); if (!h->remove_in_progress) queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, h->heartbeat_sample_interval); spin_unlock_irqrestore(&h->lock, flags); } static void hpsa_monitor_ctlr_worker(struct work_struct *work) { unsigned long flags; struct ctlr_info *h = container_of(to_delayed_work(work), struct ctlr_info, monitor_ctlr_work); detect_controller_lockup(h); if (lockup_detected(h)) return; spin_lock_irqsave(&h->lock, flags); if (!h->remove_in_progress) queue_delayed_work(h->monitor_ctlr_wq, &h->monitor_ctlr_work, h->heartbeat_sample_interval); spin_unlock_irqrestore(&h->lock, flags); } static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h, char *name) { struct workqueue_struct *wq = NULL; wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr); if (!wq) dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name); return wq; } static void hpda_free_ctlr_info(struct ctlr_info *h) { kfree(h->reply_map); kfree(h); } static struct ctlr_info *hpda_alloc_ctlr_info(void) { struct ctlr_info *h; h = kzalloc(sizeof(*h), GFP_KERNEL); if (!h) return NULL; h->reply_map = kcalloc(nr_cpu_ids, sizeof(*h->reply_map), GFP_KERNEL); if (!h->reply_map) { kfree(h); return NULL; } return h; } static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int rc; struct ctlr_info *h; int try_soft_reset = 0; unsigned long flags; u32 board_id; if (number_of_controllers == 0) printk(KERN_INFO DRIVER_NAME "\n"); rc = hpsa_lookup_board_id(pdev, &board_id, NULL); if (rc < 0) { dev_warn(&pdev->dev, "Board ID not found\n"); return rc; } rc = hpsa_init_reset_devices(pdev, board_id); if (rc) { if (rc != -ENOTSUPP) return rc; /* If the reset fails in a particular way (it has no way to do * a proper hard reset, so returns -ENOTSUPP) we can try to do * a soft reset once we get the controller configured up to the * point that it can accept a command. */ try_soft_reset = 1; rc = 0; } reinit_after_soft_reset: /* Command structures must be aligned on a 32-byte boundary because * the 5 lower bits of the address are used by the hardware. and by * the driver. See comments in hpsa.h for more info. */ BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); h = hpda_alloc_ctlr_info(); if (!h) { dev_err(&pdev->dev, "Failed to allocate controller head\n"); return -ENOMEM; } h->pdev = pdev; h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; INIT_LIST_HEAD(&h->offline_device_list); spin_lock_init(&h->lock); spin_lock_init(&h->offline_device_lock); spin_lock_init(&h->scan_lock); spin_lock_init(&h->reset_lock); atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS); /* Allocate and clear per-cpu variable lockup_detected */ h->lockup_detected = alloc_percpu(u32); if (!h->lockup_detected) { dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n"); rc = -ENOMEM; goto clean1; /* aer/h */ } set_lockup_detected_for_all_cpus(h, 0); rc = hpsa_pci_init(h); if (rc) goto clean2; /* lu, aer/h */ /* relies on h-> settings made by hpsa_pci_init, including * interrupt_mode h->intr */ rc = hpsa_scsi_host_alloc(h); if (rc) goto clean2_5; /* pci, lu, aer/h */ sprintf(h->devname, HPSA "%d", h->scsi_host->host_no); h->ctlr = number_of_controllers; number_of_controllers++; /* configure PCI DMA stuff */ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); if (rc != 0) { rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (rc != 0) { dev_err(&pdev->dev, "no suitable DMA available\n"); goto clean3; /* shost, pci, lu, aer/h */ } } /* make sure the board interrupts are off */ h->access.set_intr_mask(h, HPSA_INTR_OFF); rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx); if (rc) goto clean3; /* shost, pci, lu, aer/h */ rc = hpsa_alloc_cmd_pool(h); if (rc) goto clean4; /* irq, shost, pci, lu, aer/h */ rc = hpsa_alloc_sg_chain_blocks(h); if (rc) goto clean5; /* cmd, irq, shost, pci, lu, aer/h */ init_waitqueue_head(&h->scan_wait_queue); init_waitqueue_head(&h->event_sync_wait_queue); mutex_init(&h->reset_mutex); h->scan_finished = 1; /* no scan currently in progress */ h->scan_waiting = 0; pci_set_drvdata(pdev, h); h->ndevices = 0; spin_lock_init(&h->devlock); rc = hpsa_put_ctlr_into_performant_mode(h); if (rc) goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */ /* create the resubmit workqueue */ h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan"); if (!h->rescan_ctlr_wq) { rc = -ENOMEM; goto clean7; } h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit"); if (!h->resubmit_wq) { rc = -ENOMEM; goto clean7; /* aer/h */ } h->monitor_ctlr_wq = hpsa_create_controller_wq(h, "monitor"); if (!h->monitor_ctlr_wq) { rc = -ENOMEM; goto clean7; } /* * At this point, the controller is ready to take commands. * Now, if reset_devices and the hard reset didn't work, try * the soft reset and see if that works. */ if (try_soft_reset) { /* This is kind of gross. We may or may not get a completion * from the soft reset command, and if we do, then the value * from the fifo may or may not be valid. So, we wait 10 secs * after the reset throwing away any completions we get during * that time. Unregister the interrupt handler and register * fake ones to scoop up any residual completions. */ spin_lock_irqsave(&h->lock, flags); h->access.set_intr_mask(h, HPSA_INTR_OFF); spin_unlock_irqrestore(&h->lock, flags); hpsa_free_irqs(h); rc = hpsa_request_irqs(h, hpsa_msix_discard_completions, hpsa_intx_discard_completions); if (rc) { dev_warn(&h->pdev->dev, "Failed to request_irq after soft reset.\n"); /* * cannot goto clean7 or free_irqs will be called * again. Instead, do its work */ hpsa_free_performant_mode(h); /* clean7 */ hpsa_free_sg_chain_blocks(h); /* clean6 */ hpsa_free_cmd_pool(h); /* clean5 */ /* * skip hpsa_free_irqs(h) clean4 since that * was just called before request_irqs failed */ goto clean3; } rc = hpsa_kdump_soft_reset(h); if (rc) /* Neither hard nor soft reset worked, we're hosed. */ goto clean7; dev_info(&h->pdev->dev, "Board READY.\n"); dev_info(&h->pdev->dev, "Waiting for stale completions to drain.\n"); h->access.set_intr_mask(h, HPSA_INTR_ON); msleep(10000); h->access.set_intr_mask(h, HPSA_INTR_OFF); rc = controller_reset_failed(h->cfgtable); if (rc) dev_info(&h->pdev->dev, "Soft reset appears to have failed.\n"); /* since the controller's reset, we have to go back and re-init * everything. Easiest to just forget what we've done and do it * all over again. */ hpsa_undo_allocations_after_kdump_soft_reset(h); try_soft_reset = 0; if (rc) /* don't goto clean, we already unallocated */ return -ENODEV; goto reinit_after_soft_reset; } /* Enable Accelerated IO path at driver layer */ h->acciopath_status = 1; /* Disable discovery polling.*/ h->discovery_polling = 0; /* Turn the interrupts on so we can service requests */ h->access.set_intr_mask(h, HPSA_INTR_ON); hpsa_hba_inquiry(h); h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL); if (!h->lastlogicals) dev_info(&h->pdev->dev, "Can't track change to report lun data\n"); /* hook into SCSI subsystem */ rc = hpsa_scsi_add_host(h); if (rc) goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */ /* Monitor the controller for firmware lockups */ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); schedule_delayed_work(&h->monitor_ctlr_work, h->heartbeat_sample_interval); INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker); queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, h->heartbeat_sample_interval); INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker); schedule_delayed_work(&h->event_monitor_work, HPSA_EVENT_MONITOR_INTERVAL); return 0; clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */ kfree(h->lastlogicals); clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ hpsa_free_performant_mode(h); h->access.set_intr_mask(h, HPSA_INTR_OFF); clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */ hpsa_free_sg_chain_blocks(h); clean5: /* cmd, irq, shost, pci, lu, aer/h */ hpsa_free_cmd_pool(h); clean4: /* irq, shost, pci, lu, aer/h */ hpsa_free_irqs(h); clean3: /* shost, pci, lu, aer/h */ scsi_host_put(h->scsi_host); h->scsi_host = NULL; clean2_5: /* pci, lu, aer/h */ hpsa_free_pci_init(h); clean2: /* lu, aer/h */ if (h->lockup_detected) { free_percpu(h->lockup_detected); h->lockup_detected = NULL; } clean1: /* wq/aer/h */ if (h->resubmit_wq) { destroy_workqueue(h->resubmit_wq); h->resubmit_wq = NULL; } if (h->rescan_ctlr_wq) { destroy_workqueue(h->rescan_ctlr_wq); h->rescan_ctlr_wq = NULL; } if (h->monitor_ctlr_wq) { destroy_workqueue(h->monitor_ctlr_wq); h->monitor_ctlr_wq = NULL; } hpda_free_ctlr_info(h); return rc; } static void hpsa_flush_cache(struct ctlr_info *h) { char *flush_buf; struct CommandList *c; int rc; if (unlikely(lockup_detected(h))) return; flush_buf = kzalloc(4, GFP_KERNEL); if (!flush_buf) return; c = cmd_alloc(h); if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, RAID_CTLR_LUNID, TYPE_CMD)) { goto out; } rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE, DEFAULT_TIMEOUT); if (rc) goto out; if (c->err_info->CommandStatus != 0) out: dev_warn(&h->pdev->dev, "error flushing cache on controller\n"); cmd_free(h, c); kfree(flush_buf); } /* Make controller gather fresh report lun data each time we * send down a report luns request */ static void hpsa_disable_rld_caching(struct ctlr_info *h) { u32 *options; struct CommandList *c; int rc; /* Don't bother trying to set diag options if locked up */ if (unlikely(h->lockup_detected)) return; options = kzalloc(sizeof(*options), GFP_KERNEL); if (!options) return; c = cmd_alloc(h); /* first, get the current diag options settings */ if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0, RAID_CTLR_LUNID, TYPE_CMD)) goto errout; rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, NO_TIMEOUT); if ((rc != 0) || (c->err_info->CommandStatus != 0)) goto errout; /* Now, set the bit for disabling the RLD caching */ *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING; if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0, RAID_CTLR_LUNID, TYPE_CMD)) goto errout; rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE, NO_TIMEOUT); if ((rc != 0) || (c->err_info->CommandStatus != 0)) goto errout; /* Now verify that it got set: */ if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0, RAID_CTLR_LUNID, TYPE_CMD)) goto errout; rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, NO_TIMEOUT); if ((rc != 0) || (c->err_info->CommandStatus != 0)) goto errout; if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING) goto out; errout: dev_err(&h->pdev->dev, "Error: failed to disable report lun data caching.\n"); out: cmd_free(h, c); kfree(options); } static void __hpsa_shutdown(struct pci_dev *pdev) { struct ctlr_info *h; h = pci_get_drvdata(pdev); /* Turn board interrupts off and send the flush cache command * sendcmd will turn off interrupt, and send the flush... * To write all data in the battery backed cache to disks */ hpsa_flush_cache(h); h->access.set_intr_mask(h, HPSA_INTR_OFF); hpsa_free_irqs(h); /* init_one 4 */ hpsa_disable_interrupt_mode(h); /* pci_init 2 */ } static void hpsa_shutdown(struct pci_dev *pdev) { __hpsa_shutdown(pdev); pci_disable_device(pdev); } static void hpsa_free_device_info(struct ctlr_info *h) { int i; for (i = 0; i < h->ndevices; i++) { kfree(h->dev[i]); h->dev[i] = NULL; } } static void hpsa_remove_one(struct pci_dev *pdev) { struct ctlr_info *h; unsigned long flags; if (pci_get_drvdata(pdev) == NULL) { dev_err(&pdev->dev, "unable to remove device\n"); return; } h = pci_get_drvdata(pdev); /* Get rid of any controller monitoring work items */ spin_lock_irqsave(&h->lock, flags); h->remove_in_progress = 1; spin_unlock_irqrestore(&h->lock, flags); cancel_delayed_work_sync(&h->monitor_ctlr_work); cancel_delayed_work_sync(&h->rescan_ctlr_work); cancel_delayed_work_sync(&h->event_monitor_work); destroy_workqueue(h->rescan_ctlr_wq); destroy_workqueue(h->resubmit_wq); destroy_workqueue(h->monitor_ctlr_wq); hpsa_delete_sas_host(h); /* * Call before disabling interrupts. * scsi_remove_host can trigger I/O operations especially * when multipath is enabled. There can be SYNCHRONIZE CACHE * operations which cannot complete and will hang the system. */ if (h->scsi_host) scsi_remove_host(h->scsi_host); /* init_one 8 */ /* includes hpsa_free_irqs - init_one 4 */ /* includes hpsa_disable_interrupt_mode - pci_init 2 */ __hpsa_shutdown(pdev); hpsa_free_device_info(h); /* scan */ kfree(h->hba_inquiry_data); /* init_one 10 */ h->hba_inquiry_data = NULL; /* init_one 10 */ hpsa_free_ioaccel2_sg_chain_blocks(h); hpsa_free_performant_mode(h); /* init_one 7 */ hpsa_free_sg_chain_blocks(h); /* init_one 6 */ hpsa_free_cmd_pool(h); /* init_one 5 */ kfree(h->lastlogicals); /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */ scsi_host_put(h->scsi_host); /* init_one 3 */ h->scsi_host = NULL; /* init_one 3 */ /* includes hpsa_disable_interrupt_mode - pci_init 2 */ hpsa_free_pci_init(h); /* init_one 2.5 */ free_percpu(h->lockup_detected); /* init_one 2 */ h->lockup_detected = NULL; /* init_one 2 */ hpda_free_ctlr_info(h); /* init_one 1 */ } static int __maybe_unused hpsa_suspend( __attribute__((unused)) struct device *dev) { return -ENOSYS; } static int __maybe_unused hpsa_resume (__attribute__((unused)) struct device *dev) { return -ENOSYS; } static SIMPLE_DEV_PM_OPS(hpsa_pm_ops, hpsa_suspend, hpsa_resume); static struct pci_driver hpsa_pci_driver = { .name = HPSA, .probe = hpsa_init_one, .remove = hpsa_remove_one, .id_table = hpsa_pci_device_id, /* id_table */ .shutdown = hpsa_shutdown, .driver.pm = &hpsa_pm_ops, }; /* Fill in bucket_map[], given nsgs (the max number of * scatter gather elements supported) and bucket[], * which is an array of 8 integers. The bucket[] array * contains 8 different DMA transfer sizes (in 16 * byte increments) which the controller uses to fetch * commands. This function fills in bucket_map[], which * maps a given number of scatter gather elements to one of * the 8 DMA transfer sizes. The point of it is to allow the * controller to only do as much DMA as needed to fetch the * command, with the DMA transfer size encoded in the lower * bits of the command address. */ static void calc_bucket_map(int bucket[], int num_buckets, int nsgs, int min_blocks, u32 *bucket_map) { int i, j, b, size; /* Note, bucket_map must have nsgs+1 entries. */ for (i = 0; i <= nsgs; i++) { /* Compute size of a command with i SG entries */ size = i + min_blocks; b = num_buckets; /* Assume the biggest bucket */ /* Find the bucket that is just big enough */ for (j = 0; j < num_buckets; j++) { if (bucket[j] >= size) { b = j; break; } } /* for a command with i SG entries, use bucket b. */ bucket_map[i] = b; } } /* * return -ENODEV on err, 0 on success (or no action) * allocates numerous items that must be freed later */ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) { int i; unsigned long register_value; unsigned long transMethod = CFGTBL_Trans_Performant | (trans_support & CFGTBL_Trans_use_short_tags) | CFGTBL_Trans_enable_directed_msix | (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)); struct access_method access = SA5_performant_access; /* This is a bit complicated. There are 8 registers on * the controller which we write to to tell it 8 different * sizes of commands which there may be. It's a way of * reducing the DMA done to fetch each command. Encoded into * each command's tag are 3 bits which communicate to the controller * which of the eight sizes that command fits within. The size of * each command depends on how many scatter gather entries there are. * Each SG entry requires 16 bytes. The eight registers are programmed * with the number of 16-byte blocks a command of that size requires. * The smallest command possible requires 5 such 16 byte blocks. * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte * blocks. Note, this only extends to the SG entries contained * within the command block, and does not extend to chained blocks * of SG elements. bft[] contains the eight values we write to * the registers. They are not evenly distributed, but have more * sizes for small commands, and fewer sizes for larger commands. */ int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4}; #define MIN_IOACCEL2_BFT_ENTRY 5 #define HPSA_IOACCEL2_HEADER_SZ 4 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES}; BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16); BUILD_BUG_ON(ARRAY_SIZE(bft) != 8); BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) > 16 * MIN_IOACCEL2_BFT_ENTRY); BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16); BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4); /* 5 = 1 s/g entry or 4k * 6 = 2 s/g entry or 8k * 8 = 4 s/g entry or 16k * 10 = 6 s/g entry or 24k */ /* If the controller supports either ioaccel method then * we can also use the RAID stack submit path that does not * perform the superfluous readl() after each command submission. */ if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)) access = SA5_performant_access_no_read; /* Controller spec: zero out this buffer. */ for (i = 0; i < h->nreply_queues; i++) memset(h->reply_queue[i].head, 0, h->reply_queue_size); bft[7] = SG_ENTRIES_IN_CMD + 4; calc_bucket_map(bft, ARRAY_SIZE(bft), SG_ENTRIES_IN_CMD, 4, h->blockFetchTable); for (i = 0; i < 8; i++) writel(bft[i], &h->transtable->BlockFetch[i]); /* size of controller ring buffer */ writel(h->max_commands, &h->transtable->RepQSize); writel(h->nreply_queues, &h->transtable->RepQCount); writel(0, &h->transtable->RepQCtrAddrLow32); writel(0, &h->transtable->RepQCtrAddrHigh32); for (i = 0; i < h->nreply_queues; i++) { writel(0, &h->transtable->RepQAddr[i].upper); writel(h->reply_queue[i].busaddr, &h->transtable->RepQAddr[i].lower); } writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest)); /* * enable outbound interrupt coalescing in accelerator mode; */ if (trans_support & CFGTBL_Trans_io_accel1) { access = SA5_ioaccel_mode1_access; writel(10, &h->cfgtable->HostWrite.CoalIntDelay); writel(4, &h->cfgtable->HostWrite.CoalIntCount); } else if (trans_support & CFGTBL_Trans_io_accel2) access = SA5_ioaccel_mode2_access; writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); if (hpsa_wait_for_mode_change_ack(h)) { dev_err(&h->pdev->dev, "performant mode problem - doorbell timeout\n"); return -ENODEV; } register_value = readl(&(h->cfgtable->TransportActive)); if (!(register_value & CFGTBL_Trans_Performant)) { dev_err(&h->pdev->dev, "performant mode problem - transport not active\n"); return -ENODEV; } /* Change the access methods to the performant access methods */ h->access = access; h->transMethod = transMethod; if (!((trans_support & CFGTBL_Trans_io_accel1) || (trans_support & CFGTBL_Trans_io_accel2))) return 0; if (trans_support & CFGTBL_Trans_io_accel1) { /* Set up I/O accelerator mode */ for (i = 0; i < h->nreply_queues; i++) { writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); h->reply_queue[i].current_entry = readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); } bft[7] = h->ioaccel_maxsg + 8; calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8, h->ioaccel1_blockFetchTable); /* initialize all reply queue entries to unused */ for (i = 0; i < h->nreply_queues; i++) memset(h->reply_queue[i].head, (u8) IOACCEL_MODE1_REPLY_UNUSED, h->reply_queue_size); /* set all the constant fields in the accelerator command * frames once at init time to save CPU cycles later. */ for (i = 0; i < h->nr_cmds; i++) { struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; cp->function = IOACCEL1_FUNCTION_SCSIIO; cp->err_info = (u32) (h->errinfo_pool_dhandle + (i * sizeof(struct ErrorInfo))); cp->err_info_len = sizeof(struct ErrorInfo); cp->sgl_offset = IOACCEL1_SGLOFFSET; cp->host_context_flags = cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT); cp->timeout_sec = 0; cp->ReplyQueue = 0; cp->tag = cpu_to_le64((i << DIRECT_LOOKUP_SHIFT)); cp->host_addr = cpu_to_le64(h->ioaccel_cmd_pool_dhandle + (i * sizeof(struct io_accel1_cmd))); } } else if (trans_support & CFGTBL_Trans_io_accel2) { u64 cfg_offset, cfg_base_addr_index; u32 bft2_offset, cfg_base_addr; hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, &cfg_base_addr_index, &cfg_offset); BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64); bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg, 4, h->ioaccel2_blockFetchTable); bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset); BUILD_BUG_ON(offsetof(struct CfgTable, io_accel_request_size_offset) != 0xb8); h->ioaccel2_bft2_regs = remap_pci_mem(pci_resource_start(h->pdev, cfg_base_addr_index) + cfg_offset + bft2_offset, ARRAY_SIZE(bft2) * sizeof(*h->ioaccel2_bft2_regs)); for (i = 0; i < ARRAY_SIZE(bft2); i++) writel(bft2[i], &h->ioaccel2_bft2_regs[i]); } writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); if (hpsa_wait_for_mode_change_ack(h)) { dev_err(&h->pdev->dev, "performant mode problem - enabling ioaccel mode\n"); return -ENODEV; } return 0; } /* Free ioaccel1 mode command blocks and block fetch table */ static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h) { if (h->ioaccel_cmd_pool) { dma_free_coherent(&h->pdev->dev, h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); h->ioaccel_cmd_pool = NULL; h->ioaccel_cmd_pool_dhandle = 0; } kfree(h->ioaccel1_blockFetchTable); h->ioaccel1_blockFetchTable = NULL; } /* Allocate ioaccel1 mode command blocks and block fetch table */ static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h) { h->ioaccel_maxsg = readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES) h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES; /* Command structures must be aligned on a 128-byte boundary * because the 7 lower bits of the address are used by the * hardware. */ BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % IOACCEL1_COMMANDLIST_ALIGNMENT); h->ioaccel_cmd_pool = dma_alloc_coherent(&h->pdev->dev, h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL); h->ioaccel1_blockFetchTable = kmalloc(((h->ioaccel_maxsg + 1) * sizeof(u32)), GFP_KERNEL); if ((h->ioaccel_cmd_pool == NULL) || (h->ioaccel1_blockFetchTable == NULL)) goto clean_up; memset(h->ioaccel_cmd_pool, 0, h->nr_cmds * sizeof(*h->ioaccel_cmd_pool)); return 0; clean_up: hpsa_free_ioaccel1_cmd_and_bft(h); return -ENOMEM; } /* Free ioaccel2 mode command blocks and block fetch table */ static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h) { hpsa_free_ioaccel2_sg_chain_blocks(h); if (h->ioaccel2_cmd_pool) { dma_free_coherent(&h->pdev->dev, h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); h->ioaccel2_cmd_pool = NULL; h->ioaccel2_cmd_pool_dhandle = 0; } kfree(h->ioaccel2_blockFetchTable); h->ioaccel2_blockFetchTable = NULL; } /* Allocate ioaccel2 mode command blocks and block fetch table */ static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h) { int rc; /* Allocate ioaccel2 mode command blocks and block fetch table */ h->ioaccel_maxsg = readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % IOACCEL2_COMMANDLIST_ALIGNMENT); h->ioaccel2_cmd_pool = dma_alloc_coherent(&h->pdev->dev, h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL); h->ioaccel2_blockFetchTable = kmalloc(((h->ioaccel_maxsg + 1) * sizeof(u32)), GFP_KERNEL); if ((h->ioaccel2_cmd_pool == NULL) || (h->ioaccel2_blockFetchTable == NULL)) { rc = -ENOMEM; goto clean_up; } rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h); if (rc) goto clean_up; memset(h->ioaccel2_cmd_pool, 0, h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool)); return 0; clean_up: hpsa_free_ioaccel2_cmd_and_bft(h); return rc; } /* Free items allocated by hpsa_put_ctlr_into_performant_mode */ static void hpsa_free_performant_mode(struct ctlr_info *h) { kfree(h->blockFetchTable); h->blockFetchTable = NULL; hpsa_free_reply_queues(h); hpsa_free_ioaccel1_cmd_and_bft(h); hpsa_free_ioaccel2_cmd_and_bft(h); } /* return -ENODEV on error, 0 on success (or no action) * allocates numerous items that must be freed later */ static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) { u32 trans_support; int i, rc; if (hpsa_simple_mode) return 0; trans_support = readl(&(h->cfgtable->TransportSupport)); if (!(trans_support & PERFORMANT_MODE)) return 0; /* Check for I/O accelerator mode support */ if (trans_support & CFGTBL_Trans_io_accel1) { rc = hpsa_alloc_ioaccel1_cmd_and_bft(h); if (rc) return rc; } else if (trans_support & CFGTBL_Trans_io_accel2) { rc = hpsa_alloc_ioaccel2_cmd_and_bft(h); if (rc) return rc; } h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1; hpsa_get_max_perf_mode_cmds(h); /* Performant mode ring buffer and supporting data structures */ h->reply_queue_size = h->max_commands * sizeof(u64); for (i = 0; i < h->nreply_queues; i++) { h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev, h->reply_queue_size, &h->reply_queue[i].busaddr, GFP_KERNEL); if (!h->reply_queue[i].head) { rc = -ENOMEM; goto clean1; /* rq, ioaccel */ } h->reply_queue[i].size = h->max_commands; h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ h->reply_queue[i].current_entry = 0; } /* Need a block fetch table for performant mode */ h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * sizeof(u32)), GFP_KERNEL); if (!h->blockFetchTable) { rc = -ENOMEM; goto clean1; /* rq, ioaccel */ } rc = hpsa_enter_performant_mode(h, trans_support); if (rc) goto clean2; /* bft, rq, ioaccel */ return 0; clean2: /* bft, rq, ioaccel */ kfree(h->blockFetchTable); h->blockFetchTable = NULL; clean1: /* rq, ioaccel */ hpsa_free_reply_queues(h); hpsa_free_ioaccel1_cmd_and_bft(h); hpsa_free_ioaccel2_cmd_and_bft(h); return rc; } static int is_accelerated_cmd(struct CommandList *c) { return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2; } static void hpsa_drain_accel_commands(struct ctlr_info *h) { struct CommandList *c = NULL; int i, accel_cmds_out; int refcount; do { /* wait for all outstanding ioaccel commands to drain out */ accel_cmds_out = 0; for (i = 0; i < h->nr_cmds; i++) { c = h->cmd_pool + i; refcount = atomic_inc_return(&c->refcount); if (refcount > 1) /* Command is allocated */ accel_cmds_out += is_accelerated_cmd(c); cmd_free(h, c); } if (accel_cmds_out <= 0) break; msleep(100); } while (1); } static struct hpsa_sas_phy *hpsa_alloc_sas_phy( struct hpsa_sas_port *hpsa_sas_port) { struct hpsa_sas_phy *hpsa_sas_phy; struct sas_phy *phy; hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL); if (!hpsa_sas_phy) return NULL; phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev, hpsa_sas_port->next_phy_index); if (!phy) { kfree(hpsa_sas_phy); return NULL; } hpsa_sas_port->next_phy_index++; hpsa_sas_phy->phy = phy; hpsa_sas_phy->parent_port = hpsa_sas_port; return hpsa_sas_phy; } static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy) { struct sas_phy *phy = hpsa_sas_phy->phy; sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy); if (hpsa_sas_phy->added_to_port) list_del(&hpsa_sas_phy->phy_list_entry); sas_phy_delete(phy); kfree(hpsa_sas_phy); } static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy) { int rc; struct hpsa_sas_port *hpsa_sas_port; struct sas_phy *phy; struct sas_identify *identify; hpsa_sas_port = hpsa_sas_phy->parent_port; phy = hpsa_sas_phy->phy; identify = &phy->identify; memset(identify, 0, sizeof(*identify)); identify->sas_address = hpsa_sas_port->sas_address; identify->device_type = SAS_END_DEVICE; identify->initiator_port_protocols = SAS_PROTOCOL_STP; identify->target_port_protocols = SAS_PROTOCOL_STP; phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN; phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN; phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; rc = sas_phy_add(hpsa_sas_phy->phy); if (rc) return rc; sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy); list_add_tail(&hpsa_sas_phy->phy_list_entry, &hpsa_sas_port->phy_list_head); hpsa_sas_phy->added_to_port = true; return 0; } static int hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port, struct sas_rphy *rphy) { struct sas_identify *identify; identify = &rphy->identify; identify->sas_address = hpsa_sas_port->sas_address; identify->initiator_port_protocols = SAS_PROTOCOL_STP; identify->target_port_protocols = SAS_PROTOCOL_STP; return sas_rphy_add(rphy); } static struct hpsa_sas_port *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node, u64 sas_address) { int rc; struct hpsa_sas_port *hpsa_sas_port; struct sas_port *port; hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL); if (!hpsa_sas_port) return NULL; INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head); hpsa_sas_port->parent_node = hpsa_sas_node; port = sas_port_alloc_num(hpsa_sas_node->parent_dev); if (!port) goto free_hpsa_port; rc = sas_port_add(port); if (rc) goto free_sas_port; hpsa_sas_port->port = port; hpsa_sas_port->sas_address = sas_address; list_add_tail(&hpsa_sas_port->port_list_entry, &hpsa_sas_node->port_list_head); return hpsa_sas_port; free_sas_port: sas_port_free(port); free_hpsa_port: kfree(hpsa_sas_port); return NULL; } static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port) { struct hpsa_sas_phy *hpsa_sas_phy; struct hpsa_sas_phy *next; list_for_each_entry_safe(hpsa_sas_phy, next, &hpsa_sas_port->phy_list_head, phy_list_entry) hpsa_free_sas_phy(hpsa_sas_phy); sas_port_delete(hpsa_sas_port->port); list_del(&hpsa_sas_port->port_list_entry); kfree(hpsa_sas_port); } static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev) { struct hpsa_sas_node *hpsa_sas_node; hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL); if (hpsa_sas_node) { hpsa_sas_node->parent_dev = parent_dev; INIT_LIST_HEAD(&hpsa_sas_node->port_list_head); } return hpsa_sas_node; } static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node) { struct hpsa_sas_port *hpsa_sas_port; struct hpsa_sas_port *next; if (!hpsa_sas_node) return; list_for_each_entry_safe(hpsa_sas_port, next, &hpsa_sas_node->port_list_head, port_list_entry) hpsa_free_sas_port(hpsa_sas_port); kfree(hpsa_sas_node); } static struct hpsa_scsi_dev_t *hpsa_find_device_by_sas_rphy(struct ctlr_info *h, struct sas_rphy *rphy) { int i; struct hpsa_scsi_dev_t *device; for (i = 0; i < h->ndevices; i++) { device = h->dev[i]; if (!device->sas_port) continue; if (device->sas_port->rphy == rphy) return device; } return NULL; } static int hpsa_add_sas_host(struct ctlr_info *h) { int rc; struct device *parent_dev; struct hpsa_sas_node *hpsa_sas_node; struct hpsa_sas_port *hpsa_sas_port; struct hpsa_sas_phy *hpsa_sas_phy; parent_dev = &h->scsi_host->shost_dev; hpsa_sas_node = hpsa_alloc_sas_node(parent_dev); if (!hpsa_sas_node) return -ENOMEM; hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address); if (!hpsa_sas_port) { rc = -ENODEV; goto free_sas_node; } hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port); if (!hpsa_sas_phy) { rc = -ENODEV; goto free_sas_port; } rc = hpsa_sas_port_add_phy(hpsa_sas_phy); if (rc) goto free_sas_phy; h->sas_host = hpsa_sas_node; return 0; free_sas_phy: sas_phy_free(hpsa_sas_phy->phy); kfree(hpsa_sas_phy); free_sas_port: hpsa_free_sas_port(hpsa_sas_port); free_sas_node: hpsa_free_sas_node(hpsa_sas_node); return rc; } static void hpsa_delete_sas_host(struct ctlr_info *h) { hpsa_free_sas_node(h->sas_host); } static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, struct hpsa_scsi_dev_t *device) { int rc; struct hpsa_sas_port *hpsa_sas_port; struct sas_rphy *rphy; hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address); if (!hpsa_sas_port) return -ENOMEM; rphy = sas_end_device_alloc(hpsa_sas_port->port); if (!rphy) { rc = -ENODEV; goto free_sas_port; } hpsa_sas_port->rphy = rphy; device->sas_port = hpsa_sas_port; rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy); if (rc) goto free_sas_rphy; return 0; free_sas_rphy: sas_rphy_free(rphy); free_sas_port: hpsa_free_sas_port(hpsa_sas_port); device->sas_port = NULL; return rc; } static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device) { if (device->sas_port) { hpsa_free_sas_port(device->sas_port); device->sas_port = NULL; } } static int hpsa_sas_get_linkerrors(struct sas_phy *phy) { return 0; } static int hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) { struct Scsi_Host *shost = phy_to_shost(rphy); struct ctlr_info *h; struct hpsa_scsi_dev_t *sd; if (!shost) return -ENXIO; h = shost_to_hba(shost); if (!h) return -ENXIO; sd = hpsa_find_device_by_sas_rphy(h, rphy); if (!sd) return -ENXIO; *identifier = sd->eli; return 0; } static int hpsa_sas_get_bay_identifier(struct sas_rphy *rphy) { return -ENXIO; } static int hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset) { return 0; } static int hpsa_sas_phy_enable(struct sas_phy *phy, int enable) { return 0; } static int hpsa_sas_phy_setup(struct sas_phy *phy) { return 0; } static void hpsa_sas_phy_release(struct sas_phy *phy) { } static int hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) { return -EINVAL; } static struct sas_function_template hpsa_sas_transport_functions = { .get_linkerrors = hpsa_sas_get_linkerrors, .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier, .get_bay_identifier = hpsa_sas_get_bay_identifier, .phy_reset = hpsa_sas_phy_reset, .phy_enable = hpsa_sas_phy_enable, .phy_setup = hpsa_sas_phy_setup, .phy_release = hpsa_sas_phy_release, .set_phy_speed = hpsa_sas_phy_speed, }; /* * This is it. Register the PCI driver information for the cards we control * the OS will call our registered routines when it finds one of our cards. */ static int __init hpsa_init(void) { int rc; hpsa_sas_transport_template = sas_attach_transport(&hpsa_sas_transport_functions); if (!hpsa_sas_transport_template) return -ENODEV; rc = pci_register_driver(&hpsa_pci_driver); if (rc) sas_release_transport(hpsa_sas_transport_template); return rc; } static void __exit hpsa_cleanup(void) { pci_unregister_driver(&hpsa_pci_driver); sas_release_transport(hpsa_sas_transport_template); } static void __attribute__((unused)) verify_offsets(void) { #define VERIFY_OFFSET(member, offset) \ BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset) VERIFY_OFFSET(structure_size, 0); VERIFY_OFFSET(volume_blk_size, 4); VERIFY_OFFSET(volume_blk_cnt, 8); VERIFY_OFFSET(phys_blk_shift, 16); VERIFY_OFFSET(parity_rotation_shift, 17); VERIFY_OFFSET(strip_size, 18); VERIFY_OFFSET(disk_starting_blk, 20); VERIFY_OFFSET(disk_blk_cnt, 28); VERIFY_OFFSET(data_disks_per_row, 36); VERIFY_OFFSET(metadata_disks_per_row, 38); VERIFY_OFFSET(row_cnt, 40); VERIFY_OFFSET(layout_map_count, 42); VERIFY_OFFSET(flags, 44); VERIFY_OFFSET(dekindex, 46); /* VERIFY_OFFSET(reserved, 48 */ VERIFY_OFFSET(data, 64); #undef VERIFY_OFFSET #define VERIFY_OFFSET(member, offset) \ BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset) VERIFY_OFFSET(IU_type, 0); VERIFY_OFFSET(direction, 1); VERIFY_OFFSET(reply_queue, 2); /* VERIFY_OFFSET(reserved1, 3); */ VERIFY_OFFSET(scsi_nexus, 4); VERIFY_OFFSET(Tag, 8); VERIFY_OFFSET(cdb, 16); VERIFY_OFFSET(cciss_lun, 32); VERIFY_OFFSET(data_len, 40); VERIFY_OFFSET(cmd_priority_task_attr, 44); VERIFY_OFFSET(sg_count, 45); /* VERIFY_OFFSET(reserved3 */ VERIFY_OFFSET(err_ptr, 48); VERIFY_OFFSET(err_len, 56); /* VERIFY_OFFSET(reserved4 */ VERIFY_OFFSET(sg, 64); #undef VERIFY_OFFSET #define VERIFY_OFFSET(member, offset) \ BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset) VERIFY_OFFSET(dev_handle, 0x00); VERIFY_OFFSET(reserved1, 0x02); VERIFY_OFFSET(function, 0x03); VERIFY_OFFSET(reserved2, 0x04); VERIFY_OFFSET(err_info, 0x0C); VERIFY_OFFSET(reserved3, 0x10); VERIFY_OFFSET(err_info_len, 0x12); VERIFY_OFFSET(reserved4, 0x13); VERIFY_OFFSET(sgl_offset, 0x14); VERIFY_OFFSET(reserved5, 0x15); VERIFY_OFFSET(transfer_len, 0x1C); VERIFY_OFFSET(reserved6, 0x20); VERIFY_OFFSET(io_flags, 0x24); VERIFY_OFFSET(reserved7, 0x26); VERIFY_OFFSET(LUN, 0x34); VERIFY_OFFSET(control, 0x3C); VERIFY_OFFSET(CDB, 0x40); VERIFY_OFFSET(reserved8, 0x50); VERIFY_OFFSET(host_context_flags, 0x60); VERIFY_OFFSET(timeout_sec, 0x62); VERIFY_OFFSET(ReplyQueue, 0x64); VERIFY_OFFSET(reserved9, 0x65); VERIFY_OFFSET(tag, 0x68); VERIFY_OFFSET(host_addr, 0x70); VERIFY_OFFSET(CISS_LUN, 0x78); VERIFY_OFFSET(SG, 0x78 + 8); #undef VERIFY_OFFSET } module_init(hpsa_init); module_exit(hpsa_cleanup);
linux-master
drivers/scsi/hpsa.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD am53c974 driver. * Copyright (c) 2014 Hannes Reinecke, SUSE Linux GmbH */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <scsi/scsi_host.h> #include "esp_scsi.h" #define DRV_MODULE_NAME "am53c974" #define DRV_MODULE_VERSION "1.00" static bool am53c974_debug; static bool am53c974_fenab = true; #define esp_dma_log(f, a...) \ do { \ if (am53c974_debug) \ shost_printk(KERN_DEBUG, esp->host, f, ##a); \ } while (0) #define ESP_DMA_CMD 0x10 #define ESP_DMA_STC 0x11 #define ESP_DMA_SPA 0x12 #define ESP_DMA_WBC 0x13 #define ESP_DMA_WAC 0x14 #define ESP_DMA_STATUS 0x15 #define ESP_DMA_SMDLA 0x16 #define ESP_DMA_WMAC 0x17 #define ESP_DMA_CMD_IDLE 0x00 #define ESP_DMA_CMD_BLAST 0x01 #define ESP_DMA_CMD_ABORT 0x02 #define ESP_DMA_CMD_START 0x03 #define ESP_DMA_CMD_MASK 0x03 #define ESP_DMA_CMD_DIAG 0x04 #define ESP_DMA_CMD_MDL 0x10 #define ESP_DMA_CMD_INTE_P 0x20 #define ESP_DMA_CMD_INTE_D 0x40 #define ESP_DMA_CMD_DIR 0x80 #define ESP_DMA_STAT_PWDN 0x01 #define ESP_DMA_STAT_ERROR 0x02 #define ESP_DMA_STAT_ABORT 0x04 #define ESP_DMA_STAT_DONE 0x08 #define ESP_DMA_STAT_SCSIINT 0x10 #define ESP_DMA_STAT_BCMPLT 0x20 /* EEPROM is accessed with 16-bit values */ #define DC390_EEPROM_READ 0x80 #define DC390_EEPROM_LEN 0x40 /* * DC390 EEPROM * * 8 * 4 bytes of per-device options * followed by HBA specific options */ /* Per-device options */ #define DC390_EE_MODE1 0x00 #define DC390_EE_SPEED 0x01 /* HBA-specific options */ #define DC390_EE_ADAPT_SCSI_ID 0x40 #define DC390_EE_MODE2 0x41 #define DC390_EE_DELAY 0x42 #define DC390_EE_TAG_CMD_NUM 0x43 #define DC390_EE_MODE1_PARITY_CHK 0x01 #define DC390_EE_MODE1_SYNC_NEGO 0x02 #define DC390_EE_MODE1_EN_DISC 0x04 #define DC390_EE_MODE1_SEND_START 0x08 #define DC390_EE_MODE1_TCQ 0x10 #define DC390_EE_MODE2_MORE_2DRV 0x01 #define DC390_EE_MODE2_GREATER_1G 0x02 #define DC390_EE_MODE2_RST_SCSI_BUS 0x04 #define DC390_EE_MODE2_ACTIVE_NEGATION 0x08 #define DC390_EE_MODE2_NO_SEEK 0x10 #define DC390_EE_MODE2_LUN_CHECK 0x20 struct pci_esp_priv { struct esp *esp; u8 dma_status; }; static void pci_esp_dma_drain(struct esp *esp); static inline struct pci_esp_priv *pci_esp_get_priv(struct esp *esp) { return dev_get_drvdata(esp->dev); } static void pci_esp_write8(struct esp *esp, u8 val, unsigned long reg) { iowrite8(val, esp->regs + (reg * 4UL)); } static u8 pci_esp_read8(struct esp *esp, unsigned long reg) { return ioread8(esp->regs + (reg * 4UL)); } static void pci_esp_write32(struct esp *esp, u32 val, unsigned long reg) { return iowrite32(val, esp->regs + (reg * 4UL)); } static int pci_esp_irq_pending(struct esp *esp) { struct pci_esp_priv *pep = pci_esp_get_priv(esp); pep->dma_status = pci_esp_read8(esp, ESP_DMA_STATUS); esp_dma_log("dma intr dreg[%02x]\n", pep->dma_status); if (pep->dma_status & (ESP_DMA_STAT_ERROR | ESP_DMA_STAT_ABORT | ESP_DMA_STAT_DONE | ESP_DMA_STAT_SCSIINT)) return 1; return 0; } static void pci_esp_reset_dma(struct esp *esp) { /* Nothing to do ? */ } static void pci_esp_dma_drain(struct esp *esp) { u8 resid; int lim = 1000; if ((esp->sreg & ESP_STAT_PMASK) == ESP_DOP || (esp->sreg & ESP_STAT_PMASK) == ESP_DIP) /* Data-In or Data-Out, nothing to be done */ return; while (--lim > 0) { resid = pci_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES; if (resid <= 1) break; cpu_relax(); } /* * When there is a residual BCMPLT will never be set * (obviously). But we still have to issue the BLAST * command, otherwise the data will not being transferred. * But we'll never know when the BLAST operation is * finished. So check for some time and give up eventually. */ lim = 1000; pci_esp_write8(esp, ESP_DMA_CMD_DIR | ESP_DMA_CMD_BLAST, ESP_DMA_CMD); while (pci_esp_read8(esp, ESP_DMA_STATUS) & ESP_DMA_STAT_BCMPLT) { if (--lim == 0) break; cpu_relax(); } pci_esp_write8(esp, ESP_DMA_CMD_DIR | ESP_DMA_CMD_IDLE, ESP_DMA_CMD); esp_dma_log("DMA blast done (%d tries, %d bytes left)\n", lim, resid); /* BLAST residual handling is currently untested */ if (WARN_ON_ONCE(resid == 1)) { struct esp_cmd_entry *ent = esp->active_cmd; ent->flags |= ESP_CMD_FLAG_RESIDUAL; } } static void pci_esp_dma_invalidate(struct esp *esp) { struct pci_esp_priv *pep = pci_esp_get_priv(esp); esp_dma_log("invalidate DMA\n"); pci_esp_write8(esp, ESP_DMA_CMD_IDLE, ESP_DMA_CMD); pep->dma_status = 0; } static int pci_esp_dma_error(struct esp *esp) { struct pci_esp_priv *pep = pci_esp_get_priv(esp); if (pep->dma_status & ESP_DMA_STAT_ERROR) { u8 dma_cmd = pci_esp_read8(esp, ESP_DMA_CMD); if ((dma_cmd & ESP_DMA_CMD_MASK) == ESP_DMA_CMD_START) pci_esp_write8(esp, ESP_DMA_CMD_ABORT, ESP_DMA_CMD); return 1; } if (pep->dma_status & ESP_DMA_STAT_ABORT) { pci_esp_write8(esp, ESP_DMA_CMD_IDLE, ESP_DMA_CMD); pep->dma_status = pci_esp_read8(esp, ESP_DMA_CMD); return 1; } return 0; } static void pci_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, u32 dma_count, int write, u8 cmd) { struct pci_esp_priv *pep = pci_esp_get_priv(esp); u32 val = 0; BUG_ON(!(cmd & ESP_CMD_DMA)); pep->dma_status = 0; /* Set DMA engine to IDLE */ if (write) /* DMA write direction logic is inverted */ val |= ESP_DMA_CMD_DIR; pci_esp_write8(esp, ESP_DMA_CMD_IDLE | val, ESP_DMA_CMD); pci_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); pci_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); if (esp->config2 & ESP_CONFIG2_FENAB) pci_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI); pci_esp_write32(esp, esp_count, ESP_DMA_STC); pci_esp_write32(esp, addr, ESP_DMA_SPA); esp_dma_log("start dma addr[%x] count[%d:%d]\n", addr, esp_count, dma_count); scsi_esp_cmd(esp, cmd); /* Send DMA Start command */ pci_esp_write8(esp, ESP_DMA_CMD_START | val, ESP_DMA_CMD); } static u32 pci_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) { int dma_limit = 16; u32 base, end; /* * If CONFIG2_FENAB is set we can * handle up to 24 bit addresses */ if (esp->config2 & ESP_CONFIG2_FENAB) dma_limit = 24; if (dma_len > (1U << dma_limit)) dma_len = (1U << dma_limit); /* * Prevent crossing a 24-bit address boundary. */ base = dma_addr & ((1U << 24) - 1U); end = base + dma_len; if (end > (1U << 24)) end = (1U <<24); dma_len = end - base; return dma_len; } static const struct esp_driver_ops pci_esp_ops = { .esp_write8 = pci_esp_write8, .esp_read8 = pci_esp_read8, .irq_pending = pci_esp_irq_pending, .reset_dma = pci_esp_reset_dma, .dma_drain = pci_esp_dma_drain, .dma_invalidate = pci_esp_dma_invalidate, .send_dma_cmd = pci_esp_send_dma_cmd, .dma_error = pci_esp_dma_error, .dma_length_limit = pci_esp_dma_length_limit, }; /* * Read DC-390 eeprom */ static void dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd) { u8 carry_flag = 1, j = 0x80, bval; int i; for (i = 0; i < 9; i++) { if (carry_flag) { pci_write_config_byte(pdev, 0x80, 0x40); bval = 0xc0; } else bval = 0x80; udelay(160); pci_write_config_byte(pdev, 0x80, bval); udelay(160); pci_write_config_byte(pdev, 0x80, 0); udelay(160); carry_flag = (cmd & j) ? 1 : 0; j >>= 1; } } static u16 dc390_eeprom_get_data(struct pci_dev *pdev) { int i; u16 wval = 0; u8 bval; for (i = 0; i < 16; i++) { wval <<= 1; pci_write_config_byte(pdev, 0x80, 0x80); udelay(160); pci_write_config_byte(pdev, 0x80, 0x40); udelay(160); pci_read_config_byte(pdev, 0x00, &bval); if (bval == 0x22) wval |= 1; } return wval; } static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr) { u8 cmd = DC390_EEPROM_READ, i; for (i = 0; i < DC390_EEPROM_LEN; i++) { pci_write_config_byte(pdev, 0xc0, 0); udelay(160); dc390_eeprom_prepare_read(pdev, cmd++); *ptr++ = dc390_eeprom_get_data(pdev); pci_write_config_byte(pdev, 0x80, 0); pci_write_config_byte(pdev, 0x80, 0); udelay(160); } } static void dc390_check_eeprom(struct esp *esp) { struct pci_dev *pdev = to_pci_dev(esp->dev); u8 EEbuf[128]; u16 *ptr = (u16 *)EEbuf, wval = 0; int i; dc390_read_eeprom(pdev, ptr); for (i = 0; i < DC390_EEPROM_LEN; i++, ptr++) wval += *ptr; /* no Tekram EEprom found */ if (wval != 0x1234) { dev_printk(KERN_INFO, &pdev->dev, "No valid Tekram EEprom found\n"); return; } esp->scsi_id = EEbuf[DC390_EE_ADAPT_SCSI_ID]; esp->num_tags = 2 << EEbuf[DC390_EE_TAG_CMD_NUM]; if (EEbuf[DC390_EE_MODE2] & DC390_EE_MODE2_ACTIVE_NEGATION) esp->config4 |= ESP_CONFIG4_RADE | ESP_CONFIG4_RAE; } static int pci_esp_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { const struct scsi_host_template *hostt = &scsi_esp_template; int err = -ENODEV; struct Scsi_Host *shost; struct esp *esp; struct pci_esp_priv *pep; if (pci_enable_device(pdev)) { dev_printk(KERN_INFO, &pdev->dev, "cannot enable device\n"); return -ENODEV; } if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { dev_printk(KERN_INFO, &pdev->dev, "failed to set 32bit DMA mask\n"); goto fail_disable_device; } shost = scsi_host_alloc(hostt, sizeof(struct esp)); if (!shost) { dev_printk(KERN_INFO, &pdev->dev, "failed to allocate scsi host\n"); err = -ENOMEM; goto fail_disable_device; } pep = kzalloc(sizeof(struct pci_esp_priv), GFP_KERNEL); if (!pep) { dev_printk(KERN_INFO, &pdev->dev, "failed to allocate esp_priv\n"); err = -ENOMEM; goto fail_host_alloc; } esp = shost_priv(shost); esp->host = shost; esp->dev = &pdev->dev; esp->ops = &pci_esp_ops; /* * The am53c974 HBA has a design flaw of generating * spurious DMA completion interrupts when using * DMA for command submission. */ esp->flags |= ESP_FLAG_USE_FIFO; /* * Enable CONFIG2_FENAB to allow for large DMA transfers */ if (am53c974_fenab) esp->config2 |= ESP_CONFIG2_FENAB; pep->esp = esp; if (pci_request_regions(pdev, DRV_MODULE_NAME)) { dev_printk(KERN_ERR, &pdev->dev, "pci memory selection failed\n"); goto fail_priv_alloc; } esp->regs = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); if (!esp->regs) { dev_printk(KERN_ERR, &pdev->dev, "pci I/O map failed\n"); err = -EINVAL; goto fail_release_regions; } esp->dma_regs = esp->regs; pci_set_master(pdev); esp->command_block = dma_alloc_coherent(&pdev->dev, 16, &esp->command_block_dma, GFP_KERNEL); if (!esp->command_block) { dev_printk(KERN_ERR, &pdev->dev, "failed to allocate command block\n"); err = -ENOMEM; goto fail_unmap_regs; } pci_set_drvdata(pdev, pep); err = request_irq(pdev->irq, scsi_esp_intr, IRQF_SHARED, DRV_MODULE_NAME, esp); if (err < 0) { dev_printk(KERN_ERR, &pdev->dev, "failed to register IRQ\n"); goto fail_unmap_command_block; } esp->scsi_id = 7; dc390_check_eeprom(esp); shost->this_id = esp->scsi_id; shost->max_id = 8; shost->irq = pdev->irq; shost->io_port = pci_resource_start(pdev, 0); shost->n_io_port = pci_resource_len(pdev, 0); shost->unique_id = shost->io_port; esp->scsi_id_mask = (1 << esp->scsi_id); /* Assume 40MHz clock */ esp->cfreq = 40000000; err = scsi_esp_register(esp); if (err) goto fail_free_irq; return 0; fail_free_irq: free_irq(pdev->irq, esp); fail_unmap_command_block: pci_set_drvdata(pdev, NULL); dma_free_coherent(&pdev->dev, 16, esp->command_block, esp->command_block_dma); fail_unmap_regs: pci_iounmap(pdev, esp->regs); fail_release_regions: pci_release_regions(pdev); fail_priv_alloc: kfree(pep); fail_host_alloc: scsi_host_put(shost); fail_disable_device: pci_disable_device(pdev); return err; } static void pci_esp_remove_one(struct pci_dev *pdev) { struct pci_esp_priv *pep = pci_get_drvdata(pdev); struct esp *esp = pep->esp; scsi_esp_unregister(esp); free_irq(pdev->irq, esp); pci_set_drvdata(pdev, NULL); dma_free_coherent(&pdev->dev, 16, esp->command_block, esp->command_block_dma); pci_iounmap(pdev, esp->regs); pci_release_regions(pdev); pci_disable_device(pdev); kfree(pep); scsi_host_put(esp->host); } static struct pci_device_id am53c974_pci_tbl[] = { { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SCSI, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { } }; MODULE_DEVICE_TABLE(pci, am53c974_pci_tbl); static struct pci_driver am53c974_driver = { .name = DRV_MODULE_NAME, .id_table = am53c974_pci_tbl, .probe = pci_esp_probe_one, .remove = pci_esp_remove_one, }; module_pci_driver(am53c974_driver); MODULE_DESCRIPTION("AM53C974 SCSI driver"); MODULE_AUTHOR("Hannes Reinecke <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); MODULE_ALIAS("tmscsim"); module_param(am53c974_debug, bool, 0644); MODULE_PARM_DESC(am53c974_debug, "Enable debugging"); module_param(am53c974_fenab, bool, 0444); MODULE_PARM_DESC(am53c974_fenab, "Enable 24-bit DMA transfer sizes");
linux-master
drivers/scsi/am53c974.c
// SPDX-License-Identifier: GPL-2.0 /* * SCSI functions used by both the initiator and the target code. */ #include <linux/bug.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/module.h> #include <uapi/linux/pr.h> #include <asm/unaligned.h> #include <scsi/scsi_common.h> MODULE_LICENSE("GPL v2"); /* Command group 3 is reserved and should never be used. */ const unsigned char scsi_command_size_tbl[8] = { 6, 10, 10, 12, 16, 12, 10, 10 }; EXPORT_SYMBOL(scsi_command_size_tbl); /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. * You may not alter any existing entry (although adding new ones is * encouraged once assigned by ANSI/INCITS T10). */ static const char *const scsi_device_types[] = { "Direct-Access ", "Sequential-Access", "Printer ", "Processor ", "WORM ", "CD-ROM ", "Scanner ", "Optical Device ", "Medium Changer ", "Communications ", "ASC IT8 ", "ASC IT8 ", "RAID ", "Enclosure ", "Direct-Access-RBC", "Optical card ", "Bridge controller", "Object storage ", "Automation/Drive ", "Security Manager ", "Direct-Access-ZBC", }; /** * scsi_device_type - Return 17-char string indicating device type. * @type: type number to look up */ const char *scsi_device_type(unsigned type) { if (type == 0x1e) return "Well-known LUN "; if (type == 0x1f) return "No Device "; if (type >= ARRAY_SIZE(scsi_device_types)) return "Unknown "; return scsi_device_types[type]; } EXPORT_SYMBOL(scsi_device_type); enum pr_type scsi_pr_type_to_block(enum scsi_pr_type type) { switch (type) { case SCSI_PR_WRITE_EXCLUSIVE: return PR_WRITE_EXCLUSIVE; case SCSI_PR_EXCLUSIVE_ACCESS: return PR_EXCLUSIVE_ACCESS; case SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY: return PR_WRITE_EXCLUSIVE_REG_ONLY; case SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY: return PR_EXCLUSIVE_ACCESS_REG_ONLY; case SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS: return PR_WRITE_EXCLUSIVE_ALL_REGS; case SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS: return PR_EXCLUSIVE_ACCESS_ALL_REGS; } return 0; } EXPORT_SYMBOL_GPL(scsi_pr_type_to_block); enum scsi_pr_type block_pr_type_to_scsi(enum pr_type type) { switch (type) { case PR_WRITE_EXCLUSIVE: return SCSI_PR_WRITE_EXCLUSIVE; case PR_EXCLUSIVE_ACCESS: return SCSI_PR_EXCLUSIVE_ACCESS; case PR_WRITE_EXCLUSIVE_REG_ONLY: return SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY; case PR_EXCLUSIVE_ACCESS_REG_ONLY: return SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY; case PR_WRITE_EXCLUSIVE_ALL_REGS: return SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS; case PR_EXCLUSIVE_ACCESS_ALL_REGS: return SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS; } return 0; } EXPORT_SYMBOL_GPL(block_pr_type_to_scsi); /** * scsilun_to_int - convert a scsi_lun to an int * @scsilun: struct scsi_lun to be converted. * * Description: * Convert @scsilun from a struct scsi_lun to a four-byte host byte-ordered * integer, and return the result. The caller must check for * truncation before using this function. * * Notes: * For a description of the LUN format, post SCSI-3 see the SCSI * Architecture Model, for SCSI-3 see the SCSI Controller Commands. * * Given a struct scsi_lun of: d2 04 0b 03 00 00 00 00, this function * returns the integer: 0x0b03d204 * * This encoding will return a standard integer LUN for LUNs smaller * than 256, which typically use a single level LUN structure with * addressing method 0. */ u64 scsilun_to_int(struct scsi_lun *scsilun) { int i; u64 lun; lun = 0; for (i = 0; i < sizeof(lun); i += 2) lun = lun | (((u64)scsilun->scsi_lun[i] << ((i + 1) * 8)) | ((u64)scsilun->scsi_lun[i + 1] << (i * 8))); return lun; } EXPORT_SYMBOL(scsilun_to_int); /** * int_to_scsilun - reverts an int into a scsi_lun * @lun: integer to be reverted * @scsilun: struct scsi_lun to be set. * * Description: * Reverts the functionality of the scsilun_to_int, which packed * an 8-byte lun value into an int. This routine unpacks the int * back into the lun value. * * Notes: * Given an integer : 0x0b03d204, this function returns a * struct scsi_lun of: d2 04 0b 03 00 00 00 00 * */ void int_to_scsilun(u64 lun, struct scsi_lun *scsilun) { int i; memset(scsilun->scsi_lun, 0, sizeof(scsilun->scsi_lun)); for (i = 0; i < sizeof(lun); i += 2) { scsilun->scsi_lun[i] = (lun >> 8) & 0xFF; scsilun->scsi_lun[i+1] = lun & 0xFF; lun = lun >> 16; } } EXPORT_SYMBOL(int_to_scsilun); /** * scsi_normalize_sense - normalize main elements from either fixed or * descriptor sense data format into a common format. * * @sense_buffer: byte array containing sense data returned by device * @sb_len: number of valid bytes in sense_buffer * @sshdr: pointer to instance of structure that common * elements are written to. * * Notes: * The "main elements" from sense data are: response_code, sense_key, * asc, ascq and additional_length (only for descriptor format). * * Typically this function can be called after a device has * responded to a SCSI command with the CHECK_CONDITION status. * * Return value: * true if valid sense data information found, else false; */ bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len, struct scsi_sense_hdr *sshdr) { memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); if (!sense_buffer || !sb_len) return false; sshdr->response_code = (sense_buffer[0] & 0x7f); if (!scsi_sense_valid(sshdr)) return false; if (sshdr->response_code >= 0x72) { /* * descriptor format */ if (sb_len > 1) sshdr->sense_key = (sense_buffer[1] & 0xf); if (sb_len > 2) sshdr->asc = sense_buffer[2]; if (sb_len > 3) sshdr->ascq = sense_buffer[3]; if (sb_len > 7) sshdr->additional_length = sense_buffer[7]; } else { /* * fixed format */ if (sb_len > 2) sshdr->sense_key = (sense_buffer[2] & 0xf); if (sb_len > 7) { sb_len = min(sb_len, sense_buffer[7] + 8); if (sb_len > 12) sshdr->asc = sense_buffer[12]; if (sb_len > 13) sshdr->ascq = sense_buffer[13]; } } return true; } EXPORT_SYMBOL(scsi_normalize_sense); /** * scsi_sense_desc_find - search for a given descriptor type in descriptor sense data format. * @sense_buffer: byte array of descriptor format sense data * @sb_len: number of valid bytes in sense_buffer * @desc_type: value of descriptor type to find * (e.g. 0 -> information) * * Notes: * only valid when sense data is in descriptor format * * Return value: * pointer to start of (first) descriptor if found else NULL */ const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len, int desc_type) { int add_sen_len, add_len, desc_len, k; const u8 * descp; if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7]))) return NULL; if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73)) return NULL; add_sen_len = (add_sen_len < (sb_len - 8)) ? add_sen_len : (sb_len - 8); descp = &sense_buffer[8]; for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) { descp += desc_len; add_len = (k < (add_sen_len - 1)) ? descp[1]: -1; desc_len = add_len + 2; if (descp[0] == desc_type) return descp; if (add_len < 0) // short descriptor ?? break; } return NULL; } EXPORT_SYMBOL(scsi_sense_desc_find); /** * scsi_build_sense_buffer - build sense data in a buffer * @desc: Sense format (non-zero == descriptor format, * 0 == fixed format) * @buf: Where to build sense data * @key: Sense key * @asc: Additional sense code * @ascq: Additional sense code qualifier * **/ void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq) { if (desc) { buf[0] = 0x72; /* descriptor, current */ buf[1] = key; buf[2] = asc; buf[3] = ascq; buf[7] = 0; } else { buf[0] = 0x70; /* fixed, current */ buf[2] = key; buf[7] = 0xa; buf[12] = asc; buf[13] = ascq; } } EXPORT_SYMBOL(scsi_build_sense_buffer); /** * scsi_set_sense_information - set the information field in a * formatted sense data buffer * @buf: Where to build sense data * @buf_len: buffer length * @info: 64-bit information value to be set * * Return value: * 0 on success or -EINVAL for invalid sense buffer length **/ int scsi_set_sense_information(u8 *buf, int buf_len, u64 info) { if ((buf[0] & 0x7f) == 0x72) { u8 *ucp, len; len = buf[7]; ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0); if (!ucp) { buf[7] = len + 0xc; ucp = buf + 8 + len; } if (buf_len < len + 0xc) /* Not enough room for info */ return -EINVAL; ucp[0] = 0; ucp[1] = 0xa; ucp[2] = 0x80; /* Valid bit */ ucp[3] = 0; put_unaligned_be64(info, &ucp[4]); } else if ((buf[0] & 0x7f) == 0x70) { /* * Only set the 'VALID' bit if we can represent the value * correctly; otherwise just fill out the lower bytes and * clear the 'VALID' flag. */ if (info <= 0xffffffffUL) buf[0] |= 0x80; else buf[0] &= 0x7f; put_unaligned_be32((u32)info, &buf[3]); } return 0; } EXPORT_SYMBOL(scsi_set_sense_information); /** * scsi_set_sense_field_pointer - set the field pointer sense key * specific information in a formatted sense data buffer * @buf: Where to build sense data * @buf_len: buffer length * @fp: field pointer to be set * @bp: bit pointer to be set * @cd: command/data bit * * Return value: * 0 on success or -EINVAL for invalid sense buffer length */ int scsi_set_sense_field_pointer(u8 *buf, int buf_len, u16 fp, u8 bp, bool cd) { u8 *ucp, len; if ((buf[0] & 0x7f) == 0x72) { len = buf[7]; ucp = (char *)scsi_sense_desc_find(buf, len + 8, 2); if (!ucp) { buf[7] = len + 8; ucp = buf + 8 + len; } if (buf_len < len + 8) /* Not enough room for info */ return -EINVAL; ucp[0] = 2; ucp[1] = 6; ucp[4] = 0x80; /* Valid bit */ if (cd) ucp[4] |= 0x40; if (bp < 0x8) ucp[4] |= 0x8 | bp; put_unaligned_be16(fp, &ucp[5]); } else if ((buf[0] & 0x7f) == 0x70) { len = buf[7]; if (len < 18) buf[7] = 18; buf[15] = 0x80; if (cd) buf[15] |= 0x40; if (bp < 0x8) buf[15] |= 0x8 | bp; put_unaligned_be16(fp, &buf[16]); } return 0; } EXPORT_SYMBOL(scsi_set_sense_field_pointer);
linux-master
drivers/scsi/scsi_common.c
// SPDX-License-Identifier: GPL-2.0-only /* * sd.c Copyright (C) 1992 Drew Eckhardt * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale * * Linux scsi disk driver * Initial versions: Drew Eckhardt * Subsequent revisions: Eric Youngdale * Modification history: * - Drew Eckhardt <[email protected]> original * - Eric Youngdale <[email protected]> add scatter-gather, multiple * outstanding request, and other enhancements. * Support loadable low-level scsi drivers. * - Jirka Hanika <[email protected]> support more scsi disks using * eight major numbers. * - Richard Gooch <[email protected]> support devfs. * - Torben Mathiasen <[email protected]> Resource allocation fixes in * sd_init and cleanups. * - Alex Davis <[email protected]> Fix problem where partition info * not being read in sd_open. Fix problem where removable media * could be ejected after sd_open. * - Douglas Gilbert <[email protected]> cleanup for lk 2.5.x * - Badari Pulavarty <[email protected]>, Matthew Wilcox * <[email protected]>, Kurt Garloff <[email protected]>: * Support 32k/1M disks. * * Logging policy (needs CONFIG_SCSI_LOGGING defined): * - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2 * - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1 * - entering sd_ioctl: SCSI_LOG_IOCTL level 1 * - entering other commands: SCSI_LOG_HLQUEUE level 3 * Note: when the logging level is set by the user, it must be greater * than the level indicated above to trigger output. */ #include <linux/module.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/bio.h> #include <linux/hdreg.h> #include <linux/errno.h> #include <linux/idr.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/blkpg.h> #include <linux/blk-pm.h> #include <linux/delay.h> #include <linux/major.h> #include <linux/mutex.h> #include <linux/string_helpers.h> #include <linux/slab.h> #include <linux/sed-opal.h> #include <linux/pm_runtime.h> #include <linux/pr.h> #include <linux/t10-pi.h> #include <linux/uaccess.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_ioctl.h> #include <scsi/scsicam.h> #include <scsi/scsi_common.h> #include "sd.h" #include "scsi_priv.h" #include "scsi_logging.h" MODULE_AUTHOR("Eric Youngdale"); MODULE_DESCRIPTION("SCSI disk (sd) driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR); MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR); MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR); MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR); MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR); MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR); MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR); MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR); MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR); MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR); MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR); MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR); MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR); MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR); MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR); MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR); MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK); MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD); MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC); #define SD_MINORS 16 static void sd_config_discard(struct scsi_disk *, unsigned int); static void sd_config_write_same(struct scsi_disk *); static int sd_revalidate_disk(struct gendisk *); static void sd_unlock_native_capacity(struct gendisk *disk); static void sd_shutdown(struct device *); static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); static void scsi_disk_release(struct device *cdev); static DEFINE_IDA(sd_index_ida); static mempool_t *sd_page_pool; static struct lock_class_key sd_bio_compl_lkclass; static const char *sd_cache_types[] = { "write through", "none", "write back", "write back, no read (daft)" }; static void sd_set_flush_flag(struct scsi_disk *sdkp) { bool wc = false, fua = false; if (sdkp->WCE) { wc = true; if (sdkp->DPOFUA) fua = true; } blk_queue_write_cache(sdkp->disk->queue, wc, fua); } static ssize_t cache_type_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ct, rcd, wce, sp; struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; char buffer[64]; char *buffer_data; struct scsi_mode_data data; struct scsi_sense_hdr sshdr; static const char temp[] = "temporary "; int len; if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) /* no cache control on RBC devices; theoretically they * can do it, but there's probably so many exceptions * it's not worth the risk */ return -EINVAL; if (strncmp(buf, temp, sizeof(temp) - 1) == 0) { buf += sizeof(temp) - 1; sdkp->cache_override = 1; } else { sdkp->cache_override = 0; } ct = sysfs_match_string(sd_cache_types, buf); if (ct < 0) return -EINVAL; rcd = ct & 0x01 ? 1 : 0; wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0; if (sdkp->cache_override) { sdkp->WCE = wce; sdkp->RCD = rcd; sd_set_flush_flag(sdkp); return count; } if (scsi_mode_sense(sdp, 0x08, 8, 0, buffer, sizeof(buffer), SD_TIMEOUT, sdkp->max_retries, &data, NULL)) return -EINVAL; len = min_t(size_t, sizeof(buffer), data.length - data.header_length - data.block_descriptor_length); buffer_data = buffer + data.header_length + data.block_descriptor_length; buffer_data[2] &= ~0x05; buffer_data[2] |= wce << 2 | rcd; sp = buffer_data[0] & 0x80 ? 1 : 0; buffer_data[0] &= ~0x80; /* * Ensure WP, DPOFUA, and RESERVED fields are cleared in * received mode parameter buffer before doing MODE SELECT. */ data.device_specific = 0; if (scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT, sdkp->max_retries, &data, &sshdr)) { if (scsi_sense_valid(&sshdr)) sd_print_sense_hdr(sdkp, &sshdr); return -EINVAL; } sd_revalidate_disk(sdkp->disk); return count; } static ssize_t manage_start_stop_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; return sprintf(buf, "%u\n", sdp->manage_start_stop); } static ssize_t manage_start_stop_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; bool v; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (kstrtobool(buf, &v)) return -EINVAL; sdp->manage_start_stop = v; return count; } static DEVICE_ATTR_RW(manage_start_stop); static ssize_t allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); return sprintf(buf, "%u\n", sdkp->device->allow_restart); } static ssize_t allow_restart_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { bool v; struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) return -EINVAL; if (kstrtobool(buf, &v)) return -EINVAL; sdp->allow_restart = v; return count; } static DEVICE_ATTR_RW(allow_restart); static ssize_t cache_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); int ct = sdkp->RCD + 2*sdkp->WCE; return sprintf(buf, "%s\n", sd_cache_types[ct]); } static DEVICE_ATTR_RW(cache_type); static ssize_t FUA_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); return sprintf(buf, "%u\n", sdkp->DPOFUA); } static DEVICE_ATTR_RO(FUA); static ssize_t protection_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); return sprintf(buf, "%u\n", sdkp->protection_type); } static ssize_t protection_type_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); unsigned int val; int err; if (!capable(CAP_SYS_ADMIN)) return -EACCES; err = kstrtouint(buf, 10, &val); if (err) return err; if (val <= T10_PI_TYPE3_PROTECTION) sdkp->protection_type = val; return count; } static DEVICE_ATTR_RW(protection_type); static ssize_t protection_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; unsigned int dif, dix; dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type); if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) { dif = 0; dix = 1; } if (!dif && !dix) return sprintf(buf, "none\n"); return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif); } static DEVICE_ATTR_RO(protection_mode); static ssize_t app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); return sprintf(buf, "%u\n", sdkp->ATO); } static DEVICE_ATTR_RO(app_tag_own); static ssize_t thin_provisioning_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); return sprintf(buf, "%u\n", sdkp->lbpme); } static DEVICE_ATTR_RO(thin_provisioning); /* sysfs_match_string() requires dense arrays */ static const char *lbp_mode[] = { [SD_LBP_FULL] = "full", [SD_LBP_UNMAP] = "unmap", [SD_LBP_WS16] = "writesame_16", [SD_LBP_WS10] = "writesame_10", [SD_LBP_ZERO] = "writesame_zero", [SD_LBP_DISABLE] = "disabled", }; static ssize_t provisioning_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]); } static ssize_t provisioning_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; int mode; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (sd_is_zoned(sdkp)) { sd_config_discard(sdkp, SD_LBP_DISABLE); return count; } if (sdp->type != TYPE_DISK) return -EINVAL; mode = sysfs_match_string(lbp_mode, buf); if (mode < 0) return -EINVAL; sd_config_discard(sdkp, mode); return count; } static DEVICE_ATTR_RW(provisioning_mode); /* sysfs_match_string() requires dense arrays */ static const char *zeroing_mode[] = { [SD_ZERO_WRITE] = "write", [SD_ZERO_WS] = "writesame", [SD_ZERO_WS16_UNMAP] = "writesame_16_unmap", [SD_ZERO_WS10_UNMAP] = "writesame_10_unmap", }; static ssize_t zeroing_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]); } static ssize_t zeroing_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); int mode; if (!capable(CAP_SYS_ADMIN)) return -EACCES; mode = sysfs_match_string(zeroing_mode, buf); if (mode < 0) return -EINVAL; sdkp->zeroing_mode = mode; return count; } static DEVICE_ATTR_RW(zeroing_mode); static ssize_t max_medium_access_timeouts_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts); } static ssize_t max_medium_access_timeouts_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); int err; if (!capable(CAP_SYS_ADMIN)) return -EACCES; err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts); return err ? err : count; } static DEVICE_ATTR_RW(max_medium_access_timeouts); static ssize_t max_write_same_blocks_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); return sprintf(buf, "%u\n", sdkp->max_ws_blocks); } static ssize_t max_write_same_blocks_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; unsigned long max; int err; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) return -EINVAL; err = kstrtoul(buf, 10, &max); if (err) return err; if (max == 0) sdp->no_write_same = 1; else if (max <= SD_MAX_WS16_BLOCKS) { sdp->no_write_same = 0; sdkp->max_ws_blocks = max; } sd_config_write_same(sdkp); return count; } static DEVICE_ATTR_RW(max_write_same_blocks); static ssize_t zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); if (sdkp->device->type == TYPE_ZBC) return sprintf(buf, "host-managed\n"); if (sdkp->zoned == 1) return sprintf(buf, "host-aware\n"); if (sdkp->zoned == 2) return sprintf(buf, "drive-managed\n"); return sprintf(buf, "none\n"); } static DEVICE_ATTR_RO(zoned_cap); static ssize_t max_retries_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdev = sdkp->device; int retries, err; err = kstrtoint(buf, 10, &retries); if (err) return err; if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) { sdkp->max_retries = retries; return count; } sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n", SD_MAX_RETRIES); return -EINVAL; } static ssize_t max_retries_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); return sprintf(buf, "%d\n", sdkp->max_retries); } static DEVICE_ATTR_RW(max_retries); static struct attribute *sd_disk_attrs[] = { &dev_attr_cache_type.attr, &dev_attr_FUA.attr, &dev_attr_allow_restart.attr, &dev_attr_manage_start_stop.attr, &dev_attr_protection_type.attr, &dev_attr_protection_mode.attr, &dev_attr_app_tag_own.attr, &dev_attr_thin_provisioning.attr, &dev_attr_provisioning_mode.attr, &dev_attr_zeroing_mode.attr, &dev_attr_max_write_same_blocks.attr, &dev_attr_max_medium_access_timeouts.attr, &dev_attr_zoned_cap.attr, &dev_attr_max_retries.attr, NULL, }; ATTRIBUTE_GROUPS(sd_disk); static struct class sd_disk_class = { .name = "scsi_disk", .dev_release = scsi_disk_release, .dev_groups = sd_disk_groups, }; /* * Don't request a new module, as that could deadlock in multipath * environment. */ static void sd_default_probe(dev_t devt) { } /* * Device no to disk mapping: * * major disc2 disc p1 * |............|.............|....|....| <- dev_t * 31 20 19 8 7 4 3 0 * * Inside a major, we have 16k disks, however mapped non- * contiguously. The first 16 disks are for major0, the next * ones with major1, ... Disk 256 is for major0 again, disk 272 * for major1, ... * As we stay compatible with our numbering scheme, we can reuse * the well-know SCSI majors 8, 65--71, 136--143. */ static int sd_major(int major_idx) { switch (major_idx) { case 0: return SCSI_DISK0_MAJOR; case 1 ... 7: return SCSI_DISK1_MAJOR + major_idx - 1; case 8 ... 15: return SCSI_DISK8_MAJOR + major_idx - 8; default: BUG(); return 0; /* shut up gcc */ } } #ifdef CONFIG_BLK_SED_OPAL static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, bool send) { struct scsi_disk *sdkp = data; struct scsi_device *sdev = sdkp->device; u8 cdb[12] = { 0, }; const struct scsi_exec_args exec_args = { .req_flags = BLK_MQ_REQ_PM, }; int ret; cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN; cdb[1] = secp; put_unaligned_be16(spsp, &cdb[2]); put_unaligned_be32(len, &cdb[6]); ret = scsi_execute_cmd(sdev, cdb, send ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, buffer, len, SD_TIMEOUT, sdkp->max_retries, &exec_args); return ret <= 0 ? ret : -EIO; } #endif /* CONFIG_BLK_SED_OPAL */ /* * Look up the DIX operation based on whether the command is read or * write and whether dix and dif are enabled. */ static unsigned int sd_prot_op(bool write, bool dix, bool dif) { /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */ static const unsigned int ops[] = { /* wrt dix dif */ SCSI_PROT_NORMAL, /* 0 0 0 */ SCSI_PROT_READ_STRIP, /* 0 0 1 */ SCSI_PROT_READ_INSERT, /* 0 1 0 */ SCSI_PROT_READ_PASS, /* 0 1 1 */ SCSI_PROT_NORMAL, /* 1 0 0 */ SCSI_PROT_WRITE_INSERT, /* 1 0 1 */ SCSI_PROT_WRITE_STRIP, /* 1 1 0 */ SCSI_PROT_WRITE_PASS, /* 1 1 1 */ }; return ops[write << 2 | dix << 1 | dif]; } /* * Returns a mask of the protection flags that are valid for a given DIX * operation. */ static unsigned int sd_prot_flag_mask(unsigned int prot_op) { static const unsigned int flag_mask[] = { [SCSI_PROT_NORMAL] = 0, [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI | SCSI_PROT_GUARD_CHECK | SCSI_PROT_REF_CHECK | SCSI_PROT_REF_INCREMENT, [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT | SCSI_PROT_IP_CHECKSUM, [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI | SCSI_PROT_GUARD_CHECK | SCSI_PROT_REF_CHECK | SCSI_PROT_REF_INCREMENT | SCSI_PROT_IP_CHECKSUM, [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI | SCSI_PROT_REF_INCREMENT, [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK | SCSI_PROT_REF_CHECK | SCSI_PROT_REF_INCREMENT | SCSI_PROT_IP_CHECKSUM, [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI | SCSI_PROT_GUARD_CHECK | SCSI_PROT_REF_CHECK | SCSI_PROT_REF_INCREMENT | SCSI_PROT_IP_CHECKSUM, }; return flag_mask[prot_op]; } static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd, unsigned int dix, unsigned int dif) { struct request *rq = scsi_cmd_to_rq(scmd); struct bio *bio = rq->bio; unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif); unsigned int protect = 0; if (dix) { /* DIX Type 0, 1, 2, 3 */ if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM)) scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM; if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) scmd->prot_flags |= SCSI_PROT_GUARD_CHECK; } if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */ scmd->prot_flags |= SCSI_PROT_REF_INCREMENT; if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) scmd->prot_flags |= SCSI_PROT_REF_CHECK; } if (dif) { /* DIX/DIF Type 1, 2, 3 */ scmd->prot_flags |= SCSI_PROT_TRANSFER_PI; if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK)) protect = 3 << 5; /* Disable target PI checking */ else protect = 1 << 5; /* Enable target PI checking */ } scsi_set_prot_op(scmd, prot_op); scsi_set_prot_type(scmd, dif); scmd->prot_flags &= sd_prot_flag_mask(prot_op); return protect; } static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) { struct request_queue *q = sdkp->disk->queue; unsigned int logical_block_size = sdkp->device->sector_size; unsigned int max_blocks = 0; q->limits.discard_alignment = sdkp->unmap_alignment * logical_block_size; q->limits.discard_granularity = max(sdkp->physical_block_size, sdkp->unmap_granularity * logical_block_size); sdkp->provisioning_mode = mode; switch (mode) { case SD_LBP_FULL: case SD_LBP_DISABLE: blk_queue_max_discard_sectors(q, 0); return; case SD_LBP_UNMAP: max_blocks = min_not_zero(sdkp->max_unmap_blocks, (u32)SD_MAX_WS16_BLOCKS); break; case SD_LBP_WS16: if (sdkp->device->unmap_limit_for_ws) max_blocks = sdkp->max_unmap_blocks; else max_blocks = sdkp->max_ws_blocks; max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS); break; case SD_LBP_WS10: if (sdkp->device->unmap_limit_for_ws) max_blocks = sdkp->max_unmap_blocks; else max_blocks = sdkp->max_ws_blocks; max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS); break; case SD_LBP_ZERO: max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)SD_MAX_WS10_BLOCKS); break; } blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9)); } static void *sd_set_special_bvec(struct request *rq, unsigned int data_len) { struct page *page; page = mempool_alloc(sd_page_pool, GFP_ATOMIC); if (!page) return NULL; clear_highpage(page); bvec_set_page(&rq->special_vec, page, data_len, 0); rq->rq_flags |= RQF_SPECIAL_PAYLOAD; return bvec_virt(&rq->special_vec); } static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) { struct scsi_device *sdp = cmd->device; struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_disk *sdkp = scsi_disk(rq->q->disk); u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); unsigned int data_len = 24; char *buf; buf = sd_set_special_bvec(rq, data_len); if (!buf) return BLK_STS_RESOURCE; cmd->cmd_len = 10; cmd->cmnd[0] = UNMAP; cmd->cmnd[8] = 24; put_unaligned_be16(6 + 16, &buf[0]); put_unaligned_be16(16, &buf[2]); put_unaligned_be64(lba, &buf[8]); put_unaligned_be32(nr_blocks, &buf[16]); cmd->allowed = sdkp->max_retries; cmd->transfersize = data_len; rq->timeout = SD_TIMEOUT; return scsi_alloc_sgtables(cmd); } static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, bool unmap) { struct scsi_device *sdp = cmd->device; struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_disk *sdkp = scsi_disk(rq->q->disk); u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); u32 data_len = sdp->sector_size; if (!sd_set_special_bvec(rq, data_len)) return BLK_STS_RESOURCE; cmd->cmd_len = 16; cmd->cmnd[0] = WRITE_SAME_16; if (unmap) cmd->cmnd[1] = 0x8; /* UNMAP */ put_unaligned_be64(lba, &cmd->cmnd[2]); put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); cmd->allowed = sdkp->max_retries; cmd->transfersize = data_len; rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; return scsi_alloc_sgtables(cmd); } static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, bool unmap) { struct scsi_device *sdp = cmd->device; struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_disk *sdkp = scsi_disk(rq->q->disk); u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); u32 data_len = sdp->sector_size; if (!sd_set_special_bvec(rq, data_len)) return BLK_STS_RESOURCE; cmd->cmd_len = 10; cmd->cmnd[0] = WRITE_SAME; if (unmap) cmd->cmnd[1] = 0x8; /* UNMAP */ put_unaligned_be32(lba, &cmd->cmnd[2]); put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); cmd->allowed = sdkp->max_retries; cmd->transfersize = data_len; rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; return scsi_alloc_sgtables(cmd); } static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) { struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_device *sdp = cmd->device; struct scsi_disk *sdkp = scsi_disk(rq->q->disk); u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); if (!(rq->cmd_flags & REQ_NOUNMAP)) { switch (sdkp->zeroing_mode) { case SD_ZERO_WS16_UNMAP: return sd_setup_write_same16_cmnd(cmd, true); case SD_ZERO_WS10_UNMAP: return sd_setup_write_same10_cmnd(cmd, true); } } if (sdp->no_write_same) { rq->rq_flags |= RQF_QUIET; return BLK_STS_TARGET; } if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) return sd_setup_write_same16_cmnd(cmd, false); return sd_setup_write_same10_cmnd(cmd, false); } static void sd_config_write_same(struct scsi_disk *sdkp) { struct request_queue *q = sdkp->disk->queue; unsigned int logical_block_size = sdkp->device->sector_size; if (sdkp->device->no_write_same) { sdkp->max_ws_blocks = 0; goto out; } /* Some devices can not handle block counts above 0xffff despite * supporting WRITE SAME(16). Consequently we default to 64k * blocks per I/O unless the device explicitly advertises a * bigger limit. */ if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS) sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)SD_MAX_WS16_BLOCKS); else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes) sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)SD_MAX_WS10_BLOCKS); else { sdkp->device->no_write_same = 1; sdkp->max_ws_blocks = 0; } if (sdkp->lbprz && sdkp->lbpws) sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP; else if (sdkp->lbprz && sdkp->lbpws10) sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP; else if (sdkp->max_ws_blocks) sdkp->zeroing_mode = SD_ZERO_WS; else sdkp->zeroing_mode = SD_ZERO_WRITE; if (sdkp->max_ws_blocks && sdkp->physical_block_size > logical_block_size) { /* * Reporting a maximum number of blocks that is not aligned * on the device physical size would cause a large write same * request to be split into physically unaligned chunks by * __blkdev_issue_write_zeroes() even if the caller of this * functions took care to align the large request. So make sure * the maximum reported is aligned to the device physical block * size. This is only an optional optimization for regular * disks, but this is mandatory to avoid failure of large write * same requests directed at sequential write required zones of * host-managed ZBC disks. */ sdkp->max_ws_blocks = round_down(sdkp->max_ws_blocks, bytes_to_logical(sdkp->device, sdkp->physical_block_size)); } out: blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks * (logical_block_size >> 9)); } static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd) { struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_disk *sdkp = scsi_disk(rq->q->disk); /* flush requests don't perform I/O, zero the S/G table */ memset(&cmd->sdb, 0, sizeof(cmd->sdb)); if (cmd->device->use_16_for_sync) { cmd->cmnd[0] = SYNCHRONIZE_CACHE_16; cmd->cmd_len = 16; } else { cmd->cmnd[0] = SYNCHRONIZE_CACHE; cmd->cmd_len = 10; } cmd->transfersize = 0; cmd->allowed = sdkp->max_retries; rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER; return BLK_STS_OK; } static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write, sector_t lba, unsigned int nr_blocks, unsigned char flags, unsigned int dld) { cmd->cmd_len = SD_EXT_CDB_SIZE; cmd->cmnd[0] = VARIABLE_LENGTH_CMD; cmd->cmnd[7] = 0x18; /* Additional CDB len */ cmd->cmnd[9] = write ? WRITE_32 : READ_32; cmd->cmnd[10] = flags; cmd->cmnd[11] = dld & 0x07; put_unaligned_be64(lba, &cmd->cmnd[12]); put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */ put_unaligned_be32(nr_blocks, &cmd->cmnd[28]); return BLK_STS_OK; } static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write, sector_t lba, unsigned int nr_blocks, unsigned char flags, unsigned int dld) { cmd->cmd_len = 16; cmd->cmnd[0] = write ? WRITE_16 : READ_16; cmd->cmnd[1] = flags | ((dld >> 2) & 0x01); cmd->cmnd[14] = (dld & 0x03) << 6; cmd->cmnd[15] = 0; put_unaligned_be64(lba, &cmd->cmnd[2]); put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); return BLK_STS_OK; } static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write, sector_t lba, unsigned int nr_blocks, unsigned char flags) { cmd->cmd_len = 10; cmd->cmnd[0] = write ? WRITE_10 : READ_10; cmd->cmnd[1] = flags; cmd->cmnd[6] = 0; cmd->cmnd[9] = 0; put_unaligned_be32(lba, &cmd->cmnd[2]); put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); return BLK_STS_OK; } static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write, sector_t lba, unsigned int nr_blocks, unsigned char flags) { /* Avoid that 0 blocks gets translated into 256 blocks. */ if (WARN_ON_ONCE(nr_blocks == 0)) return BLK_STS_IOERR; if (unlikely(flags & 0x8)) { /* * This happens only if this drive failed 10byte rw * command with ILLEGAL_REQUEST during operation and * thus turned off use_10_for_rw. */ scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n"); return BLK_STS_IOERR; } cmd->cmd_len = 6; cmd->cmnd[0] = write ? WRITE_6 : READ_6; cmd->cmnd[1] = (lba >> 16) & 0x1f; cmd->cmnd[2] = (lba >> 8) & 0xff; cmd->cmnd[3] = lba & 0xff; cmd->cmnd[4] = nr_blocks; cmd->cmnd[5] = 0; return BLK_STS_OK; } /* * Check if a command has a duration limit set. If it does, and the target * device supports CDL and the feature is enabled, return the limit * descriptor index to use. Return 0 (no limit) otherwise. */ static int sd_cdl_dld(struct scsi_disk *sdkp, struct scsi_cmnd *scmd) { struct scsi_device *sdp = sdkp->device; int hint; if (!sdp->cdl_supported || !sdp->cdl_enable) return 0; /* * Use "no limit" if the request ioprio does not specify a duration * limit hint. */ hint = IOPRIO_PRIO_HINT(req_get_ioprio(scsi_cmd_to_rq(scmd))); if (hint < IOPRIO_HINT_DEV_DURATION_LIMIT_1 || hint > IOPRIO_HINT_DEV_DURATION_LIMIT_7) return 0; return (hint - IOPRIO_HINT_DEV_DURATION_LIMIT_1) + 1; } static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) { struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_device *sdp = cmd->device; struct scsi_disk *sdkp = scsi_disk(rq->q->disk); sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq)); sector_t threshold; unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); unsigned int mask = logical_to_sectors(sdp, 1) - 1; bool write = rq_data_dir(rq) == WRITE; unsigned char protect, fua; unsigned int dld; blk_status_t ret; unsigned int dif; bool dix; ret = scsi_alloc_sgtables(cmd); if (ret != BLK_STS_OK) return ret; ret = BLK_STS_IOERR; if (!scsi_device_online(sdp) || sdp->changed) { scmd_printk(KERN_ERR, cmd, "device offline or changed\n"); goto fail; } if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) { scmd_printk(KERN_ERR, cmd, "access beyond end of device\n"); goto fail; } if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) { scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n"); goto fail; } /* * Some SD card readers can't handle accesses which touch the * last one or two logical blocks. Split accesses as needed. */ threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS; if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) { if (lba < threshold) { /* Access up to the threshold but not beyond */ nr_blocks = threshold - lba; } else { /* Access only a single logical block */ nr_blocks = 1; } } if (req_op(rq) == REQ_OP_ZONE_APPEND) { ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks); if (ret) goto fail; } fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0; dix = scsi_prot_sg_count(cmd); dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type); dld = sd_cdl_dld(sdkp, cmd); if (dif || dix) protect = sd_setup_protect_cmnd(cmd, dix, dif); else protect = 0; if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) { ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks, protect | fua, dld); } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) { ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks, protect | fua, dld); } else if ((nr_blocks > 0xff) || (lba > 0x1fffff) || sdp->use_10_for_rw || protect) { ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks, protect | fua); } else { ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks, protect | fua); } if (unlikely(ret != BLK_STS_OK)) goto fail; /* * We shouldn't disconnect in the middle of a sector, so with a dumb * host adapter, it's safe to assume that we can at least transfer * this many bytes between each connect / disconnect. */ cmd->transfersize = sdp->sector_size; cmd->underflow = nr_blocks << 9; cmd->allowed = sdkp->max_retries; cmd->sdb.length = nr_blocks * sdp->sector_size; SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, cmd, "%s: block=%llu, count=%d\n", __func__, (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq))); SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, cmd, "%s %d/%u 512 byte blocks.\n", write ? "writing" : "reading", nr_blocks, blk_rq_sectors(rq))); /* * This indicates that the command is ready from our end to be queued. */ return BLK_STS_OK; fail: scsi_free_sgtables(cmd); return ret; } static blk_status_t sd_init_command(struct scsi_cmnd *cmd) { struct request *rq = scsi_cmd_to_rq(cmd); switch (req_op(rq)) { case REQ_OP_DISCARD: switch (scsi_disk(rq->q->disk)->provisioning_mode) { case SD_LBP_UNMAP: return sd_setup_unmap_cmnd(cmd); case SD_LBP_WS16: return sd_setup_write_same16_cmnd(cmd, true); case SD_LBP_WS10: return sd_setup_write_same10_cmnd(cmd, true); case SD_LBP_ZERO: return sd_setup_write_same10_cmnd(cmd, false); default: return BLK_STS_TARGET; } case REQ_OP_WRITE_ZEROES: return sd_setup_write_zeroes_cmnd(cmd); case REQ_OP_FLUSH: return sd_setup_flush_cmnd(cmd); case REQ_OP_READ: case REQ_OP_WRITE: case REQ_OP_ZONE_APPEND: return sd_setup_read_write_cmnd(cmd); case REQ_OP_ZONE_RESET: return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER, false); case REQ_OP_ZONE_RESET_ALL: return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER, true); case REQ_OP_ZONE_OPEN: return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false); case REQ_OP_ZONE_CLOSE: return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false); case REQ_OP_ZONE_FINISH: return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false); default: WARN_ON_ONCE(1); return BLK_STS_NOTSUPP; } } static void sd_uninit_command(struct scsi_cmnd *SCpnt) { struct request *rq = scsi_cmd_to_rq(SCpnt); if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) mempool_free(rq->special_vec.bv_page, sd_page_pool); } static bool sd_need_revalidate(struct gendisk *disk, struct scsi_disk *sdkp) { if (sdkp->device->removable || sdkp->write_prot) { if (disk_check_media_change(disk)) return true; } /* * Force a full rescan after ioctl(BLKRRPART). While the disk state has * nothing to do with partitions, BLKRRPART is used to force a full * revalidate after things like a format for historical reasons. */ return test_bit(GD_NEED_PART_SCAN, &disk->state); } /** * sd_open - open a scsi disk device * @disk: disk to open * @mode: open mode * * Returns 0 if successful. Returns a negated errno value in case * of error. * * Note: This can be called from a user context (e.g. fsck(1) ) * or from within the kernel (e.g. as a result of a mount(1) ). * In the latter case @inode and @filp carry an abridged amount * of information as noted above. * * Locking: called with disk->open_mutex held. **/ static int sd_open(struct gendisk *disk, blk_mode_t mode) { struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdev = sdkp->device; int retval; if (scsi_device_get(sdev)) return -ENXIO; SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n")); /* * If the device is in error recovery, wait until it is done. * If the device is offline, then disallow any access to it. */ retval = -ENXIO; if (!scsi_block_when_processing_errors(sdev)) goto error_out; if (sd_need_revalidate(disk, sdkp)) sd_revalidate_disk(disk); /* * If the drive is empty, just let the open fail. */ retval = -ENOMEDIUM; if (sdev->removable && !sdkp->media_present && !(mode & BLK_OPEN_NDELAY)) goto error_out; /* * If the device has the write protect tab set, have the open fail * if the user expects to be able to write to the thing. */ retval = -EROFS; if (sdkp->write_prot && (mode & BLK_OPEN_WRITE)) goto error_out; /* * It is possible that the disk changing stuff resulted in * the device being taken offline. If this is the case, * report this to the user, and don't pretend that the * open actually succeeded. */ retval = -ENXIO; if (!scsi_device_online(sdev)) goto error_out; if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) { if (scsi_block_when_processing_errors(sdev)) scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT); } return 0; error_out: scsi_device_put(sdev); return retval; } /** * sd_release - invoked when the (last) close(2) is called on this * scsi disk. * @disk: disk to release * * Returns 0. * * Note: may block (uninterruptible) if error recovery is underway * on this disk. * * Locking: called with disk->open_mutex held. **/ static void sd_release(struct gendisk *disk) { struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdev = sdkp->device; SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) { if (scsi_block_when_processing_errors(sdev)) scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); } scsi_device_put(sdev); } static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); struct scsi_device *sdp = sdkp->device; struct Scsi_Host *host = sdp->host; sector_t capacity = logical_to_sectors(sdp, sdkp->capacity); int diskinfo[4]; /* default to most commonly used values */ diskinfo[0] = 0x40; /* 1 << 6 */ diskinfo[1] = 0x20; /* 1 << 5 */ diskinfo[2] = capacity >> 11; /* override with calculated, extended default, or driver values */ if (host->hostt->bios_param) host->hostt->bios_param(sdp, bdev, capacity, diskinfo); else scsicam_bios_param(bdev, capacity, diskinfo); geo->heads = diskinfo[0]; geo->sectors = diskinfo[1]; geo->cylinders = diskinfo[2]; return 0; } /** * sd_ioctl - process an ioctl * @bdev: target block device * @mode: open mode * @cmd: ioctl command number * @arg: this is third argument given to ioctl(2) system call. * Often contains a pointer. * * Returns 0 if successful (some ioctls return positive numbers on * success as well). Returns a negated errno value in case of error. * * Note: most ioctls are forward onto the block subsystem or further * down in the scsi subsystem. **/ static int sd_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct gendisk *disk = bdev->bd_disk; struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdp = sdkp->device; void __user *p = (void __user *)arg; int error; SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, " "cmd=0x%x\n", disk->disk_name, cmd)); if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO)) return -ENOIOCTLCMD; /* * If we are in the middle of error recovery, don't let anyone * else try and use this device. Also, if error recovery fails, it * may try and take the device offline, in which case all further * access to the device is prohibited. */ error = scsi_ioctl_block_when_processing_errors(sdp, cmd, (mode & BLK_OPEN_NDELAY)); if (error) return error; if (is_sed_ioctl(cmd)) return sed_ioctl(sdkp->opal_dev, cmd, p); return scsi_ioctl(sdp, mode & BLK_OPEN_WRITE, cmd, p); } static void set_media_not_present(struct scsi_disk *sdkp) { if (sdkp->media_present) sdkp->device->changed = 1; if (sdkp->device->removable) { sdkp->media_present = 0; sdkp->capacity = 0; } } static int media_not_present(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) { if (!scsi_sense_valid(sshdr)) return 0; /* not invoked for commands that could return deferred errors */ switch (sshdr->sense_key) { case UNIT_ATTENTION: case NOT_READY: /* medium not present */ if (sshdr->asc == 0x3A) { set_media_not_present(sdkp); return 1; } } return 0; } /** * sd_check_events - check media events * @disk: kernel device descriptor * @clearing: disk events currently being cleared * * Returns mask of DISK_EVENT_*. * * Note: this function is invoked from the block subsystem. **/ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) { struct scsi_disk *sdkp = disk->private_data; struct scsi_device *sdp; int retval; bool disk_changed; if (!sdkp) return 0; sdp = sdkp->device; SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); /* * If the device is offline, don't send any commands - just pretend as * if the command failed. If the device ever comes back online, we * can deal with it then. It is only because of unrecoverable errors * that we would ever take a device offline in the first place. */ if (!scsi_device_online(sdp)) { set_media_not_present(sdkp); goto out; } /* * Using TEST_UNIT_READY enables differentiation between drive with * no cartridge loaded - NOT READY, drive with changed cartridge - * UNIT ATTENTION, or with same cartridge - GOOD STATUS. * * Drives that auto spin down. eg iomega jaz 1G, will be started * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever * sd_revalidate() is called. */ if (scsi_block_when_processing_errors(sdp)) { struct scsi_sense_hdr sshdr = { 0, }; retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries, &sshdr); /* failed to execute TUR, assume media not present */ if (retval < 0 || host_byte(retval)) { set_media_not_present(sdkp); goto out; } if (media_not_present(sdkp, &sshdr)) goto out; } /* * For removable scsi disk we have to recognise the presence * of a disk in the drive. */ if (!sdkp->media_present) sdp->changed = 1; sdkp->media_present = 1; out: /* * sdp->changed is set under the following conditions: * * Medium present state has changed in either direction. * Device has indicated UNIT_ATTENTION. */ disk_changed = sdp->changed; sdp->changed = 0; return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0; } static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) { int retries, res; struct scsi_device *sdp = sdkp->device; const int timeout = sdp->request_queue->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER; struct scsi_sense_hdr my_sshdr; const struct scsi_exec_args exec_args = { .req_flags = BLK_MQ_REQ_PM, /* caller might not be interested in sense, but we need it */ .sshdr = sshdr ? : &my_sshdr, }; if (!scsi_device_online(sdp)) return -ENODEV; sshdr = exec_args.sshdr; for (retries = 3; retries > 0; --retries) { unsigned char cmd[16] = { 0 }; if (sdp->use_16_for_sync) cmd[0] = SYNCHRONIZE_CACHE_16; else cmd[0] = SYNCHRONIZE_CACHE; /* * Leave the rest of the command zero to indicate * flush everything. */ res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, timeout, sdkp->max_retries, &exec_args); if (res == 0) break; } if (res) { sd_print_result(sdkp, "Synchronize Cache(10) failed", res); if (res < 0) return res; if (scsi_status_is_check_condition(res) && scsi_sense_valid(sshdr)) { sd_print_sense_hdr(sdkp, sshdr); /* we need to evaluate the error return */ if (sshdr->asc == 0x3a || /* medium not present */ sshdr->asc == 0x20 || /* invalid command */ (sshdr->asc == 0x74 && sshdr->ascq == 0x71)) /* drive is password locked */ /* this is no error here */ return 0; } switch (host_byte(res)) { /* ignore errors due to racing a disconnection */ case DID_BAD_TARGET: case DID_NO_CONNECT: return 0; /* signal the upper layer it might try again */ case DID_BUS_BUSY: case DID_IMM_RETRY: case DID_REQUEUE: case DID_SOFT_ERROR: return -EBUSY; default: return -EIO; } } return 0; } static void sd_rescan(struct device *dev) { struct scsi_disk *sdkp = dev_get_drvdata(dev); sd_revalidate_disk(sdkp->disk); } static int sd_get_unique_id(struct gendisk *disk, u8 id[16], enum blk_unique_id type) { struct scsi_device *sdev = scsi_disk(disk)->device; const struct scsi_vpd *vpd; const unsigned char *d; int ret = -ENXIO, len; rcu_read_lock(); vpd = rcu_dereference(sdev->vpd_pg83); if (!vpd) goto out_unlock; ret = -EINVAL; for (d = vpd->data + 4; d < vpd->data + vpd->len; d += d[3] + 4) { /* we only care about designators with LU association */ if (((d[1] >> 4) & 0x3) != 0x00) continue; if ((d[1] & 0xf) != type) continue; /* * Only exit early if a 16-byte descriptor was found. Otherwise * keep looking as one with more entropy might still show up. */ len = d[3]; if (len != 8 && len != 12 && len != 16) continue; ret = len; memcpy(id, d + 4, len); if (len == 16) break; } out_unlock: rcu_read_unlock(); return ret; } static int sd_scsi_to_pr_err(struct scsi_sense_hdr *sshdr, int result) { switch (host_byte(result)) { case DID_TRANSPORT_MARGINAL: case DID_TRANSPORT_DISRUPTED: case DID_BUS_BUSY: return PR_STS_RETRY_PATH_FAILURE; case DID_NO_CONNECT: return PR_STS_PATH_FAILED; case DID_TRANSPORT_FAILFAST: return PR_STS_PATH_FAST_FAILED; } switch (status_byte(result)) { case SAM_STAT_RESERVATION_CONFLICT: return PR_STS_RESERVATION_CONFLICT; case SAM_STAT_CHECK_CONDITION: if (!scsi_sense_valid(sshdr)) return PR_STS_IOERR; if (sshdr->sense_key == ILLEGAL_REQUEST && (sshdr->asc == 0x26 || sshdr->asc == 0x24)) return -EINVAL; fallthrough; default: return PR_STS_IOERR; } } static int sd_pr_in_command(struct block_device *bdev, u8 sa, unsigned char *data, int data_len) { struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); struct scsi_device *sdev = sdkp->device; struct scsi_sense_hdr sshdr; u8 cmd[10] = { PERSISTENT_RESERVE_IN, sa }; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, }; int result; put_unaligned_be16(data_len, &cmd[7]); result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, data, data_len, SD_TIMEOUT, sdkp->max_retries, &exec_args); if (scsi_status_is_check_condition(result) && scsi_sense_valid(&sshdr)) { sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result); scsi_print_sense_hdr(sdev, NULL, &sshdr); } if (result <= 0) return result; return sd_scsi_to_pr_err(&sshdr, result); } static int sd_pr_read_keys(struct block_device *bdev, struct pr_keys *keys_info) { int result, i, data_offset, num_copy_keys; u32 num_keys = keys_info->num_keys; int data_len = num_keys * 8 + 8; u8 *data; data = kzalloc(data_len, GFP_KERNEL); if (!data) return -ENOMEM; result = sd_pr_in_command(bdev, READ_KEYS, data, data_len); if (result) goto free_data; keys_info->generation = get_unaligned_be32(&data[0]); keys_info->num_keys = get_unaligned_be32(&data[4]) / 8; data_offset = 8; num_copy_keys = min(num_keys, keys_info->num_keys); for (i = 0; i < num_copy_keys; i++) { keys_info->keys[i] = get_unaligned_be64(&data[data_offset]); data_offset += 8; } free_data: kfree(data); return result; } static int sd_pr_read_reservation(struct block_device *bdev, struct pr_held_reservation *rsv) { struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); struct scsi_device *sdev = sdkp->device; u8 data[24] = { }; int result, len; result = sd_pr_in_command(bdev, READ_RESERVATION, data, sizeof(data)); if (result) return result; len = get_unaligned_be32(&data[4]); if (!len) return 0; /* Make sure we have at least the key and type */ if (len < 14) { sdev_printk(KERN_INFO, sdev, "READ RESERVATION failed due to short return buffer of %d bytes\n", len); return -EINVAL; } rsv->generation = get_unaligned_be32(&data[0]); rsv->key = get_unaligned_be64(&data[8]); rsv->type = scsi_pr_type_to_block(data[21] & 0x0f); return 0; } static int sd_pr_out_command(struct block_device *bdev, u8 sa, u64 key, u64 sa_key, enum scsi_pr_type type, u8 flags) { struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); struct scsi_device *sdev = sdkp->device; struct scsi_sense_hdr sshdr; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, }; int result; u8 cmd[16] = { 0, }; u8 data[24] = { 0, }; cmd[0] = PERSISTENT_RESERVE_OUT; cmd[1] = sa; cmd[2] = type; put_unaligned_be32(sizeof(data), &cmd[5]); put_unaligned_be64(key, &data[0]); put_unaligned_be64(sa_key, &data[8]); data[20] = flags; result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, &data, sizeof(data), SD_TIMEOUT, sdkp->max_retries, &exec_args); if (scsi_status_is_check_condition(result) && scsi_sense_valid(&sshdr)) { sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result); scsi_print_sense_hdr(sdev, NULL, &sshdr); } if (result <= 0) return result; return sd_scsi_to_pr_err(&sshdr, result); } static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, u32 flags) { if (flags & ~PR_FL_IGNORE_KEY) return -EOPNOTSUPP; return sd_pr_out_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00, old_key, new_key, 0, (1 << 0) /* APTPL */); } static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, u32 flags) { if (flags) return -EOPNOTSUPP; return sd_pr_out_command(bdev, 0x01, key, 0, block_pr_type_to_scsi(type), 0); } static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type) { return sd_pr_out_command(bdev, 0x02, key, 0, block_pr_type_to_scsi(type), 0); } static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, enum pr_type type, bool abort) { return sd_pr_out_command(bdev, abort ? 0x05 : 0x04, old_key, new_key, block_pr_type_to_scsi(type), 0); } static int sd_pr_clear(struct block_device *bdev, u64 key) { return sd_pr_out_command(bdev, 0x03, key, 0, 0, 0); } static const struct pr_ops sd_pr_ops = { .pr_register = sd_pr_register, .pr_reserve = sd_pr_reserve, .pr_release = sd_pr_release, .pr_preempt = sd_pr_preempt, .pr_clear = sd_pr_clear, .pr_read_keys = sd_pr_read_keys, .pr_read_reservation = sd_pr_read_reservation, }; static void scsi_disk_free_disk(struct gendisk *disk) { struct scsi_disk *sdkp = scsi_disk(disk); put_device(&sdkp->disk_dev); } static const struct block_device_operations sd_fops = { .owner = THIS_MODULE, .open = sd_open, .release = sd_release, .ioctl = sd_ioctl, .getgeo = sd_getgeo, .compat_ioctl = blkdev_compat_ptr_ioctl, .check_events = sd_check_events, .unlock_native_capacity = sd_unlock_native_capacity, .report_zones = sd_zbc_report_zones, .get_unique_id = sd_get_unique_id, .free_disk = scsi_disk_free_disk, .pr_ops = &sd_pr_ops, }; /** * sd_eh_reset - reset error handling callback * @scmd: sd-issued command that has failed * * This function is called by the SCSI midlayer before starting * SCSI EH. When counting medium access failures we have to be * careful to register it only only once per device and SCSI EH run; * there might be several timed out commands which will cause the * 'max_medium_access_timeouts' counter to trigger after the first * SCSI EH run already and set the device to offline. * So this function resets the internal counter before starting SCSI EH. **/ static void sd_eh_reset(struct scsi_cmnd *scmd) { struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk); /* New SCSI EH run, reset gate variable */ sdkp->ignore_medium_access_errors = false; } /** * sd_eh_action - error handling callback * @scmd: sd-issued command that has failed * @eh_disp: The recovery disposition suggested by the midlayer * * This function is called by the SCSI midlayer upon completion of an * error test command (currently TEST UNIT READY). The result of sending * the eh command is passed in eh_disp. We're looking for devices that * fail medium access commands but are OK with non access commands like * test unit ready (so wrongly see the device as having a successful * recovery) **/ static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp) { struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk); struct scsi_device *sdev = scmd->device; if (!scsi_device_online(sdev) || !scsi_medium_access_command(scmd) || host_byte(scmd->result) != DID_TIME_OUT || eh_disp != SUCCESS) return eh_disp; /* * The device has timed out executing a medium access command. * However, the TEST UNIT READY command sent during error * handling completed successfully. Either the device is in the * process of recovering or has it suffered an internal failure * that prevents access to the storage medium. */ if (!sdkp->ignore_medium_access_errors) { sdkp->medium_access_timed_out++; sdkp->ignore_medium_access_errors = true; } /* * If the device keeps failing read/write commands but TEST UNIT * READY always completes successfully we assume that medium * access is no longer possible and take the device offline. */ if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) { scmd_printk(KERN_ERR, scmd, "Medium access timeout failure. Offlining disk!\n"); mutex_lock(&sdev->state_mutex); scsi_device_set_state(sdev, SDEV_OFFLINE); mutex_unlock(&sdev->state_mutex); return SUCCESS; } return eh_disp; } static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) { struct request *req = scsi_cmd_to_rq(scmd); struct scsi_device *sdev = scmd->device; unsigned int transferred, good_bytes; u64 start_lba, end_lba, bad_lba; /* * Some commands have a payload smaller than the device logical * block size (e.g. INQUIRY on a 4K disk). */ if (scsi_bufflen(scmd) <= sdev->sector_size) return 0; /* Check if we have a 'bad_lba' information */ if (!scsi_get_sense_info_fld(scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, &bad_lba)) return 0; /* * If the bad lba was reported incorrectly, we have no idea where * the error is. */ start_lba = sectors_to_logical(sdev, blk_rq_pos(req)); end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd)); if (bad_lba < start_lba || bad_lba >= end_lba) return 0; /* * resid is optional but mostly filled in. When it's unused, * its value is zero, so we assume the whole buffer transferred */ transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd); /* This computation should always be done in terms of the * resolution of the device's medium. */ good_bytes = logical_to_bytes(sdev, bad_lba - start_lba); return min(good_bytes, transferred); } /** * sd_done - bottom half handler: called when the lower level * driver has completed (successfully or otherwise) a scsi command. * @SCpnt: mid-level's per command structure. * * Note: potentially run from within an ISR. Must not block. **/ static int sd_done(struct scsi_cmnd *SCpnt) { int result = SCpnt->result; unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt); unsigned int sector_size = SCpnt->device->sector_size; unsigned int resid; struct scsi_sense_hdr sshdr; struct request *req = scsi_cmd_to_rq(SCpnt); struct scsi_disk *sdkp = scsi_disk(req->q->disk); int sense_valid = 0; int sense_deferred = 0; switch (req_op(req)) { case REQ_OP_DISCARD: case REQ_OP_WRITE_ZEROES: case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_RESET_ALL: case REQ_OP_ZONE_OPEN: case REQ_OP_ZONE_CLOSE: case REQ_OP_ZONE_FINISH: if (!result) { good_bytes = blk_rq_bytes(req); scsi_set_resid(SCpnt, 0); } else { good_bytes = 0; scsi_set_resid(SCpnt, blk_rq_bytes(req)); } break; default: /* * In case of bogus fw or device, we could end up having * an unaligned partial completion. Check this here and force * alignment. */ resid = scsi_get_resid(SCpnt); if (resid & (sector_size - 1)) { sd_printk(KERN_INFO, sdkp, "Unaligned partial completion (resid=%u, sector_sz=%u)\n", resid, sector_size); scsi_print_command(SCpnt); resid = min(scsi_bufflen(SCpnt), round_up(resid, sector_size)); scsi_set_resid(SCpnt, resid); } } if (result) { sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); if (sense_valid) sense_deferred = scsi_sense_is_deferred(&sshdr); } sdkp->medium_access_timed_out = 0; if (!scsi_status_is_check_condition(result) && (!sense_valid || sense_deferred)) goto out; switch (sshdr.sense_key) { case HARDWARE_ERROR: case MEDIUM_ERROR: good_bytes = sd_completed_bytes(SCpnt); break; case RECOVERED_ERROR: good_bytes = scsi_bufflen(SCpnt); break; case NO_SENSE: /* This indicates a false check condition, so ignore it. An * unknown amount of data was transferred so treat it as an * error. */ SCpnt->result = 0; memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); break; case ABORTED_COMMAND: if (sshdr.asc == 0x10) /* DIF: Target detected corruption */ good_bytes = sd_completed_bytes(SCpnt); break; case ILLEGAL_REQUEST: switch (sshdr.asc) { case 0x10: /* DIX: Host detected corruption */ good_bytes = sd_completed_bytes(SCpnt); break; case 0x20: /* INVALID COMMAND OPCODE */ case 0x24: /* INVALID FIELD IN CDB */ switch (SCpnt->cmnd[0]) { case UNMAP: sd_config_discard(sdkp, SD_LBP_DISABLE); break; case WRITE_SAME_16: case WRITE_SAME: if (SCpnt->cmnd[1] & 8) { /* UNMAP */ sd_config_discard(sdkp, SD_LBP_DISABLE); } else { sdkp->device->no_write_same = 1; sd_config_write_same(sdkp); req->rq_flags |= RQF_QUIET; } break; } } break; default: break; } out: if (sd_is_zoned(sdkp)) good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr); SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt, "sd_done: completed %d of %d bytes\n", good_bytes, scsi_bufflen(SCpnt))); return good_bytes; } /* * spinup disk - called only in sd_revalidate_disk() */ static void sd_spinup_disk(struct scsi_disk *sdkp) { unsigned char cmd[10]; unsigned long spintime_expire = 0; int retries, spintime; unsigned int the_result; struct scsi_sense_hdr sshdr; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, }; int sense_valid = 0; spintime = 0; /* Spin up drives, as required. Only do this at boot time */ /* Spinup needs to be done for module loads too. */ do { retries = 0; do { bool media_was_present = sdkp->media_present; cmd[0] = TEST_UNIT_READY; memset((void *) &cmd[1], 0, 9); the_result = scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, NULL, 0, SD_TIMEOUT, sdkp->max_retries, &exec_args); /* * If the drive has indicated to us that it * doesn't have any media in it, don't bother * with any more polling. */ if (media_not_present(sdkp, &sshdr)) { if (media_was_present) sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n"); return; } if (the_result) sense_valid = scsi_sense_valid(&sshdr); retries++; } while (retries < 3 && (!scsi_status_is_good(the_result) || (scsi_status_is_check_condition(the_result) && sense_valid && sshdr.sense_key == UNIT_ATTENTION))); if (!scsi_status_is_check_condition(the_result)) { /* no sense, TUR either succeeded or failed * with a status error */ if(!spintime && !scsi_status_is_good(the_result)) { sd_print_result(sdkp, "Test Unit Ready failed", the_result); } break; } /* * The device does not want the automatic start to be issued. */ if (sdkp->device->no_start_on_add) break; if (sense_valid && sshdr.sense_key == NOT_READY) { if (sshdr.asc == 4 && sshdr.ascq == 3) break; /* manual intervention required */ if (sshdr.asc == 4 && sshdr.ascq == 0xb) break; /* standby */ if (sshdr.asc == 4 && sshdr.ascq == 0xc) break; /* unavailable */ if (sshdr.asc == 4 && sshdr.ascq == 0x1b) break; /* sanitize in progress */ /* * Issue command to spin up drive when not ready */ if (!spintime) { sd_printk(KERN_NOTICE, sdkp, "Spinning up disk..."); cmd[0] = START_STOP; cmd[1] = 1; /* Return immediately */ memset((void *) &cmd[2], 0, 8); cmd[4] = 1; /* Start spin cycle */ if (sdkp->device->start_stop_pwr_cond) cmd[4] |= 1 << 4; scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, NULL, 0, SD_TIMEOUT, sdkp->max_retries, &exec_args); spintime_expire = jiffies + 100 * HZ; spintime = 1; } /* Wait 1 second for next try */ msleep(1000); printk(KERN_CONT "."); /* * Wait for USB flash devices with slow firmware. * Yes, this sense key/ASC combination shouldn't * occur here. It's characteristic of these devices. */ } else if (sense_valid && sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x28) { if (!spintime) { spintime_expire = jiffies + 5 * HZ; spintime = 1; } /* Wait 1 second for next try */ msleep(1000); } else { /* we don't understand the sense code, so it's * probably pointless to loop */ if(!spintime) { sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n"); sd_print_sense_hdr(sdkp, &sshdr); } break; } } while (spintime && time_before_eq(jiffies, spintime_expire)); if (spintime) { if (scsi_status_is_good(the_result)) printk(KERN_CONT "ready\n"); else printk(KERN_CONT "not responding...\n"); } } /* * Determine whether disk supports Data Integrity Field. */ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer) { struct scsi_device *sdp = sdkp->device; u8 type; if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) { sdkp->protection_type = 0; return 0; } type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ if (type > T10_PI_TYPE3_PROTECTION) { sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \ " protection type %u. Disabling disk!\n", type); sdkp->protection_type = 0; return -ENODEV; } sdkp->protection_type = type; return 0; } static void sd_config_protection(struct scsi_disk *sdkp) { struct scsi_device *sdp = sdkp->device; sd_dif_config_host(sdkp); if (!sdkp->protection_type) return; if (!scsi_host_dif_capable(sdp->host, sdkp->protection_type)) { sd_first_printk(KERN_NOTICE, sdkp, "Disabling DIF Type %u protection\n", sdkp->protection_type); sdkp->protection_type = 0; } sd_first_printk(KERN_NOTICE, sdkp, "Enabling DIF Type %u protection\n", sdkp->protection_type); } static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, struct scsi_sense_hdr *sshdr, int sense_valid, int the_result) { if (sense_valid) sd_print_sense_hdr(sdkp, sshdr); else sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n"); /* * Set dirty bit for removable devices if not ready - * sometimes drives will not report this properly. */ if (sdp->removable && sense_valid && sshdr->sense_key == NOT_READY) set_media_not_present(sdkp); /* * We used to set media_present to 0 here to indicate no media * in the drive, but some drives fail read capacity even with * media present, so we can't do that. */ sdkp->capacity = 0; /* unknown mapped to zero - as usual */ } #define RC16_LEN 32 #if RC16_LEN > SD_BUF_SIZE #error RC16_LEN must not be more than SD_BUF_SIZE #endif #define READ_CAPACITY_RETRIES_ON_RESET 10 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, unsigned char *buffer) { unsigned char cmd[16]; struct scsi_sense_hdr sshdr; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, }; int sense_valid = 0; int the_result; int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET; unsigned int alignment; unsigned long long lba; unsigned sector_size; if (sdp->no_read_capacity_16) return -EINVAL; do { memset(cmd, 0, 16); cmd[0] = SERVICE_ACTION_IN_16; cmd[1] = SAI_READ_CAPACITY_16; cmd[13] = RC16_LEN; memset(buffer, 0, RC16_LEN); the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buffer, RC16_LEN, SD_TIMEOUT, sdkp->max_retries, &exec_args); if (media_not_present(sdkp, &sshdr)) return -ENODEV; if (the_result > 0) { sense_valid = scsi_sense_valid(&sshdr); if (sense_valid && sshdr.sense_key == ILLEGAL_REQUEST && (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00) /* Invalid Command Operation Code or * Invalid Field in CDB, just retry * silently with RC10 */ return -EINVAL; if (sense_valid && sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29 && sshdr.ascq == 0x00) /* Device reset might occur several times, * give it one more chance */ if (--reset_retries > 0) continue; } retries--; } while (the_result && retries); if (the_result) { sd_print_result(sdkp, "Read Capacity(16) failed", the_result); read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); return -EINVAL; } sector_size = get_unaligned_be32(&buffer[8]); lba = get_unaligned_be64(&buffer[0]); if (sd_read_protection_type(sdkp, buffer) < 0) { sdkp->capacity = 0; return -ENODEV; } /* Logical blocks per physical block exponent */ sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size; /* RC basis */ sdkp->rc_basis = (buffer[12] >> 4) & 0x3; /* Lowest aligned logical block */ alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size; blk_queue_alignment_offset(sdp->request_queue, alignment); if (alignment && sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "physical block alignment offset: %u\n", alignment); if (buffer[14] & 0x80) { /* LBPME */ sdkp->lbpme = 1; if (buffer[14] & 0x40) /* LBPRZ */ sdkp->lbprz = 1; sd_config_discard(sdkp, SD_LBP_WS16); } sdkp->capacity = lba + 1; return sector_size; } static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp, unsigned char *buffer) { unsigned char cmd[16]; struct scsi_sense_hdr sshdr; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, }; int sense_valid = 0; int the_result; int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET; sector_t lba; unsigned sector_size; do { cmd[0] = READ_CAPACITY; memset(&cmd[1], 0, 9); memset(buffer, 0, 8); the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buffer, 8, SD_TIMEOUT, sdkp->max_retries, &exec_args); if (media_not_present(sdkp, &sshdr)) return -ENODEV; if (the_result > 0) { sense_valid = scsi_sense_valid(&sshdr); if (sense_valid && sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29 && sshdr.ascq == 0x00) /* Device reset might occur several times, * give it one more chance */ if (--reset_retries > 0) continue; } retries--; } while (the_result && retries); if (the_result) { sd_print_result(sdkp, "Read Capacity(10) failed", the_result); read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); return -EINVAL; } sector_size = get_unaligned_be32(&buffer[4]); lba = get_unaligned_be32(&buffer[0]); if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) { /* Some buggy (usb cardreader) devices return an lba of 0xffffffff when the want to report a size of 0 (with which they really mean no media is present) */ sdkp->capacity = 0; sdkp->physical_block_size = sector_size; return sector_size; } sdkp->capacity = lba + 1; sdkp->physical_block_size = sector_size; return sector_size; } static int sd_try_rc16_first(struct scsi_device *sdp) { if (sdp->host->max_cmd_len < 16) return 0; if (sdp->try_rc_10_first) return 0; if (sdp->scsi_level > SCSI_SPC_2) return 1; if (scsi_device_protection(sdp)) return 1; return 0; } /* * read disk capacity */ static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) { int sector_size; struct scsi_device *sdp = sdkp->device; if (sd_try_rc16_first(sdp)) { sector_size = read_capacity_16(sdkp, sdp, buffer); if (sector_size == -EOVERFLOW) goto got_data; if (sector_size == -ENODEV) return; if (sector_size < 0) sector_size = read_capacity_10(sdkp, sdp, buffer); if (sector_size < 0) return; } else { sector_size = read_capacity_10(sdkp, sdp, buffer); if (sector_size == -EOVERFLOW) goto got_data; if (sector_size < 0) return; if ((sizeof(sdkp->capacity) > 4) && (sdkp->capacity > 0xffffffffULL)) { int old_sector_size = sector_size; sd_printk(KERN_NOTICE, sdkp, "Very big device. " "Trying to use READ CAPACITY(16).\n"); sector_size = read_capacity_16(sdkp, sdp, buffer); if (sector_size < 0) { sd_printk(KERN_NOTICE, sdkp, "Using 0xffffffff as device size\n"); sdkp->capacity = 1 + (sector_t) 0xffffffff; sector_size = old_sector_size; goto got_data; } /* Remember that READ CAPACITY(16) succeeded */ sdp->try_rc_10_first = 0; } } /* Some devices are known to return the total number of blocks, * not the highest block number. Some devices have versions * which do this and others which do not. Some devices we might * suspect of doing this but we don't know for certain. * * If we know the reported capacity is wrong, decrement it. If * we can only guess, then assume the number of blocks is even * (usually true but not always) and err on the side of lowering * the capacity. */ if (sdp->fix_capacity || (sdp->guess_capacity && (sdkp->capacity & 0x01))) { sd_printk(KERN_INFO, sdkp, "Adjusting the sector count " "from its reported value: %llu\n", (unsigned long long) sdkp->capacity); --sdkp->capacity; } got_data: if (sector_size == 0) { sector_size = 512; sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, " "assuming 512.\n"); } if (sector_size != 512 && sector_size != 1024 && sector_size != 2048 && sector_size != 4096) { sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n", sector_size); /* * The user might want to re-format the drive with * a supported sectorsize. Once this happens, it * would be relatively trivial to set the thing up. * For this reason, we leave the thing in the table. */ sdkp->capacity = 0; /* * set a bogus sector size so the normal read/write * logic in the block layer will eventually refuse any * request on this device without tripping over power * of two sector size assumptions */ sector_size = 512; } blk_queue_logical_block_size(sdp->request_queue, sector_size); blk_queue_physical_block_size(sdp->request_queue, sdkp->physical_block_size); sdkp->device->sector_size = sector_size; if (sdkp->capacity > 0xffffffff) sdp->use_16_for_rw = 1; } /* * Print disk capacity */ static void sd_print_capacity(struct scsi_disk *sdkp, sector_t old_capacity) { int sector_size = sdkp->device->sector_size; char cap_str_2[10], cap_str_10[10]; if (!sdkp->first_scan && old_capacity == sdkp->capacity) return; string_get_size(sdkp->capacity, sector_size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); string_get_size(sdkp->capacity, sector_size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); sd_printk(KERN_NOTICE, sdkp, "%llu %d-byte logical blocks: (%s/%s)\n", (unsigned long long)sdkp->capacity, sector_size, cap_str_10, cap_str_2); if (sdkp->physical_block_size != sector_size) sd_printk(KERN_NOTICE, sdkp, "%u-byte physical blocks\n", sdkp->physical_block_size); } /* called with buffer of length 512 */ static inline int sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage, unsigned char *buffer, int len, struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) { /* * If we must use MODE SENSE(10), make sure that the buffer length * is at least 8 bytes so that the mode sense header fits. */ if (sdkp->device->use_10_for_ms && len < 8) len = 8; return scsi_mode_sense(sdkp->device, dbd, modepage, 0, buffer, len, SD_TIMEOUT, sdkp->max_retries, data, sshdr); } /* * read write protect setting, if possible - called only in sd_revalidate_disk() * called with buffer of length SD_BUF_SIZE */ static void sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) { int res; struct scsi_device *sdp = sdkp->device; struct scsi_mode_data data; int old_wp = sdkp->write_prot; set_disk_ro(sdkp->disk, 0); if (sdp->skip_ms_page_3f) { sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n"); return; } if (sdp->use_192_bytes_for_3f) { res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL); } else { /* * First attempt: ask for all pages (0x3F), but only 4 bytes. * We have to start carefully: some devices hang if we ask * for more than is available. */ res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL); /* * Second attempt: ask for page 0 When only page 0 is * implemented, a request for page 3F may return Sense Key * 5: Illegal Request, Sense Code 24: Invalid field in * CDB. */ if (res < 0) res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL); /* * Third attempt: ask 255 bytes, as we did earlier. */ if (res < 0) res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255, &data, NULL); } if (res < 0) { sd_first_printk(KERN_WARNING, sdkp, "Test WP failed, assume Write Enabled\n"); } else { sdkp->write_prot = ((data.device_specific & 0x80) != 0); set_disk_ro(sdkp->disk, sdkp->write_prot); if (sdkp->first_scan || old_wp != sdkp->write_prot) { sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", sdkp->write_prot ? "on" : "off"); sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer); } } } /* * sd_read_cache_type - called only from sd_revalidate_disk() * called with buffer of length SD_BUF_SIZE */ static void sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) { int len = 0, res; struct scsi_device *sdp = sdkp->device; int dbd; int modepage; int first_len; struct scsi_mode_data data; struct scsi_sense_hdr sshdr; int old_wce = sdkp->WCE; int old_rcd = sdkp->RCD; int old_dpofua = sdkp->DPOFUA; if (sdkp->cache_override) return; first_len = 4; if (sdp->skip_ms_page_8) { if (sdp->type == TYPE_RBC) goto defaults; else { if (sdp->skip_ms_page_3f) goto defaults; modepage = 0x3F; if (sdp->use_192_bytes_for_3f) first_len = 192; dbd = 0; } } else if (sdp->type == TYPE_RBC) { modepage = 6; dbd = 8; } else { modepage = 8; dbd = 0; } /* cautiously ask */ res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len, &data, &sshdr); if (res < 0) goto bad_sense; if (!data.header_length) { modepage = 6; first_len = 0; sd_first_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n"); } /* that went OK, now ask for the proper length */ len = data.length; /* * We're only interested in the first three bytes, actually. * But the data cache page is defined for the first 20. */ if (len < 3) goto bad_sense; else if (len > SD_BUF_SIZE) { sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter " "data from %d to %d bytes\n", len, SD_BUF_SIZE); len = SD_BUF_SIZE; } if (modepage == 0x3F && sdp->use_192_bytes_for_3f) len = 192; /* Get the data */ if (len > first_len) res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len, &data, &sshdr); if (!res) { int offset = data.header_length + data.block_descriptor_length; while (offset < len) { u8 page_code = buffer[offset] & 0x3F; u8 spf = buffer[offset] & 0x40; if (page_code == 8 || page_code == 6) { /* We're interested only in the first 3 bytes. */ if (len - offset <= 2) { sd_first_printk(KERN_ERR, sdkp, "Incomplete mode parameter " "data\n"); goto defaults; } else { modepage = page_code; goto Page_found; } } else { /* Go to the next page */ if (spf && len - offset > 3) offset += 4 + (buffer[offset+2] << 8) + buffer[offset+3]; else if (!spf && len - offset > 1) offset += 2 + buffer[offset+1]; else { sd_first_printk(KERN_ERR, sdkp, "Incomplete mode " "parameter data\n"); goto defaults; } } } sd_first_printk(KERN_WARNING, sdkp, "No Caching mode page found\n"); goto defaults; Page_found: if (modepage == 8) { sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); } else { sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0); sdkp->RCD = 0; } sdkp->DPOFUA = (data.device_specific & 0x10) != 0; if (sdp->broken_fua) { sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); sdkp->DPOFUA = 0; } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw && !sdkp->device->use_16_for_rw) { sd_first_printk(KERN_NOTICE, sdkp, "Uses READ/WRITE(6), disabling FUA\n"); sdkp->DPOFUA = 0; } /* No cache flush allowed for write protected devices */ if (sdkp->WCE && sdkp->write_prot) sdkp->WCE = 0; if (sdkp->first_scan || old_wce != sdkp->WCE || old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA) sd_printk(KERN_NOTICE, sdkp, "Write cache: %s, read cache: %s, %s\n", sdkp->WCE ? "enabled" : "disabled", sdkp->RCD ? "disabled" : "enabled", sdkp->DPOFUA ? "supports DPO and FUA" : "doesn't support DPO or FUA"); return; } bad_sense: if (scsi_sense_valid(&sshdr) && sshdr.sense_key == ILLEGAL_REQUEST && sshdr.asc == 0x24 && sshdr.ascq == 0x0) /* Invalid field in CDB */ sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n"); else sd_first_printk(KERN_ERR, sdkp, "Asking for cache data failed\n"); defaults: if (sdp->wce_default_on) { sd_first_printk(KERN_NOTICE, sdkp, "Assuming drive cache: write back\n"); sdkp->WCE = 1; } else { sd_first_printk(KERN_WARNING, sdkp, "Assuming drive cache: write through\n"); sdkp->WCE = 0; } sdkp->RCD = 0; sdkp->DPOFUA = 0; } /* * The ATO bit indicates whether the DIF application tag is available * for use by the operating system. */ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) { int res, offset; struct scsi_device *sdp = sdkp->device; struct scsi_mode_data data; struct scsi_sense_hdr sshdr; if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) return; if (sdkp->protection_type == 0) return; res = scsi_mode_sense(sdp, 1, 0x0a, 0, buffer, 36, SD_TIMEOUT, sdkp->max_retries, &data, &sshdr); if (res < 0 || !data.header_length || data.length < 6) { sd_first_printk(KERN_WARNING, sdkp, "getting Control mode page failed, assume no ATO\n"); if (scsi_sense_valid(&sshdr)) sd_print_sense_hdr(sdkp, &sshdr); return; } offset = data.header_length + data.block_descriptor_length; if ((buffer[offset] & 0x3f) != 0x0a) { sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n"); return; } if ((buffer[offset + 5] & 0x80) == 0) return; sdkp->ATO = 1; return; } /** * sd_read_block_limits - Query disk device for preferred I/O sizes. * @sdkp: disk to query */ static void sd_read_block_limits(struct scsi_disk *sdkp) { struct scsi_vpd *vpd; rcu_read_lock(); vpd = rcu_dereference(sdkp->device->vpd_pgb0); if (!vpd || vpd->len < 16) goto out; sdkp->min_xfer_blocks = get_unaligned_be16(&vpd->data[6]); sdkp->max_xfer_blocks = get_unaligned_be32(&vpd->data[8]); sdkp->opt_xfer_blocks = get_unaligned_be32(&vpd->data[12]); if (vpd->len >= 64) { unsigned int lba_count, desc_count; sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]); if (!sdkp->lbpme) goto out; lba_count = get_unaligned_be32(&vpd->data[20]); desc_count = get_unaligned_be32(&vpd->data[24]); if (lba_count && desc_count) sdkp->max_unmap_blocks = lba_count; sdkp->unmap_granularity = get_unaligned_be32(&vpd->data[28]); if (vpd->data[32] & 0x80) sdkp->unmap_alignment = get_unaligned_be32(&vpd->data[32]) & ~(1 << 31); if (!sdkp->lbpvpd) { /* LBP VPD page not provided */ if (sdkp->max_unmap_blocks) sd_config_discard(sdkp, SD_LBP_UNMAP); else sd_config_discard(sdkp, SD_LBP_WS16); } else { /* LBP VPD page tells us what to use */ if (sdkp->lbpu && sdkp->max_unmap_blocks) sd_config_discard(sdkp, SD_LBP_UNMAP); else if (sdkp->lbpws) sd_config_discard(sdkp, SD_LBP_WS16); else if (sdkp->lbpws10) sd_config_discard(sdkp, SD_LBP_WS10); else sd_config_discard(sdkp, SD_LBP_DISABLE); } } out: rcu_read_unlock(); } /** * sd_read_block_characteristics - Query block dev. characteristics * @sdkp: disk to query */ static void sd_read_block_characteristics(struct scsi_disk *sdkp) { struct request_queue *q = sdkp->disk->queue; struct scsi_vpd *vpd; u16 rot; u8 zoned; rcu_read_lock(); vpd = rcu_dereference(sdkp->device->vpd_pgb1); if (!vpd || vpd->len < 8) { rcu_read_unlock(); return; } rot = get_unaligned_be16(&vpd->data[4]); zoned = (vpd->data[8] >> 4) & 3; rcu_read_unlock(); if (rot == 1) { blk_queue_flag_set(QUEUE_FLAG_NONROT, q); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); } if (sdkp->device->type == TYPE_ZBC) { /* * Host-managed: Per ZBC and ZAC specifications, writes in * sequential write required zones of host-managed devices must * be aligned to the device physical block size. */ disk_set_zoned(sdkp->disk, BLK_ZONED_HM); blk_queue_zone_write_granularity(q, sdkp->physical_block_size); } else { sdkp->zoned = zoned; if (sdkp->zoned == 1) { /* Host-aware */ disk_set_zoned(sdkp->disk, BLK_ZONED_HA); } else { /* Regular disk or drive managed disk */ disk_set_zoned(sdkp->disk, BLK_ZONED_NONE); } } if (!sdkp->first_scan) return; if (blk_queue_is_zoned(q)) { sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware"); } else { if (sdkp->zoned == 1) sd_printk(KERN_NOTICE, sdkp, "Host-aware SMR disk used as regular disk\n"); else if (sdkp->zoned == 2) sd_printk(KERN_NOTICE, sdkp, "Drive-managed SMR disk\n"); } } /** * sd_read_block_provisioning - Query provisioning VPD page * @sdkp: disk to query */ static void sd_read_block_provisioning(struct scsi_disk *sdkp) { struct scsi_vpd *vpd; if (sdkp->lbpme == 0) return; rcu_read_lock(); vpd = rcu_dereference(sdkp->device->vpd_pgb2); if (!vpd || vpd->len < 8) { rcu_read_unlock(); return; } sdkp->lbpvpd = 1; sdkp->lbpu = (vpd->data[5] >> 7) & 1; /* UNMAP */ sdkp->lbpws = (vpd->data[5] >> 6) & 1; /* WRITE SAME(16) w/ UNMAP */ sdkp->lbpws10 = (vpd->data[5] >> 5) & 1; /* WRITE SAME(10) w/ UNMAP */ rcu_read_unlock(); } static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) { struct scsi_device *sdev = sdkp->device; if (sdev->host->no_write_same) { sdev->no_write_same = 1; return; } if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY, 0) < 0) { struct scsi_vpd *vpd; sdev->no_report_opcodes = 1; /* Disable WRITE SAME if REPORT SUPPORTED OPERATION * CODES is unsupported and the device has an ATA * Information VPD page (SAT). */ rcu_read_lock(); vpd = rcu_dereference(sdev->vpd_pg89); if (vpd) sdev->no_write_same = 1; rcu_read_unlock(); } if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16, 0) == 1) sdkp->ws16 = 1; if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME, 0) == 1) sdkp->ws10 = 1; } static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer) { struct scsi_device *sdev = sdkp->device; if (!sdev->security_supported) return; if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, SECURITY_PROTOCOL_IN, 0) == 1 && scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, SECURITY_PROTOCOL_OUT, 0) == 1) sdkp->security = 1; } static inline sector_t sd64_to_sectors(struct scsi_disk *sdkp, u8 *buf) { return logical_to_sectors(sdkp->device, get_unaligned_be64(buf)); } /** * sd_read_cpr - Query concurrent positioning ranges * @sdkp: disk to query */ static void sd_read_cpr(struct scsi_disk *sdkp) { struct blk_independent_access_ranges *iars = NULL; unsigned char *buffer = NULL; unsigned int nr_cpr = 0; int i, vpd_len, buf_len = SD_BUF_SIZE; u8 *desc; /* * We need to have the capacity set first for the block layer to be * able to check the ranges. */ if (sdkp->first_scan) return; if (!sdkp->capacity) goto out; /* * Concurrent Positioning Ranges VPD: there can be at most 256 ranges, * leading to a maximum page size of 64 + 256*32 bytes. */ buf_len = 64 + 256*32; buffer = kmalloc(buf_len, GFP_KERNEL); if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb9, buffer, buf_len)) goto out; /* We must have at least a 64B header and one 32B range descriptor */ vpd_len = get_unaligned_be16(&buffer[2]) + 4; if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) { sd_printk(KERN_ERR, sdkp, "Invalid Concurrent Positioning Ranges VPD page\n"); goto out; } nr_cpr = (vpd_len - 64) / 32; if (nr_cpr == 1) { nr_cpr = 0; goto out; } iars = disk_alloc_independent_access_ranges(sdkp->disk, nr_cpr); if (!iars) { nr_cpr = 0; goto out; } desc = &buffer[64]; for (i = 0; i < nr_cpr; i++, desc += 32) { if (desc[0] != i) { sd_printk(KERN_ERR, sdkp, "Invalid Concurrent Positioning Range number\n"); nr_cpr = 0; break; } iars->ia_range[i].sector = sd64_to_sectors(sdkp, desc + 8); iars->ia_range[i].nr_sectors = sd64_to_sectors(sdkp, desc + 16); } out: disk_set_independent_access_ranges(sdkp->disk, iars); if (nr_cpr && sdkp->nr_actuators != nr_cpr) { sd_printk(KERN_NOTICE, sdkp, "%u concurrent positioning ranges\n", nr_cpr); sdkp->nr_actuators = nr_cpr; } kfree(buffer); } static bool sd_validate_min_xfer_size(struct scsi_disk *sdkp) { struct scsi_device *sdp = sdkp->device; unsigned int min_xfer_bytes = logical_to_bytes(sdp, sdkp->min_xfer_blocks); if (sdkp->min_xfer_blocks == 0) return false; if (min_xfer_bytes & (sdkp->physical_block_size - 1)) { sd_first_printk(KERN_WARNING, sdkp, "Preferred minimum I/O size %u bytes not a " \ "multiple of physical block size (%u bytes)\n", min_xfer_bytes, sdkp->physical_block_size); sdkp->min_xfer_blocks = 0; return false; } sd_first_printk(KERN_INFO, sdkp, "Preferred minimum I/O size %u bytes\n", min_xfer_bytes); return true; } /* * Determine the device's preferred I/O size for reads and writes * unless the reported value is unreasonably small, large, not a * multiple of the physical block size, or simply garbage. */ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp, unsigned int dev_max) { struct scsi_device *sdp = sdkp->device; unsigned int opt_xfer_bytes = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); unsigned int min_xfer_bytes = logical_to_bytes(sdp, sdkp->min_xfer_blocks); if (sdkp->opt_xfer_blocks == 0) return false; if (sdkp->opt_xfer_blocks > dev_max) { sd_first_printk(KERN_WARNING, sdkp, "Optimal transfer size %u logical blocks " \ "> dev_max (%u logical blocks)\n", sdkp->opt_xfer_blocks, dev_max); return false; } if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) { sd_first_printk(KERN_WARNING, sdkp, "Optimal transfer size %u logical blocks " \ "> sd driver limit (%u logical blocks)\n", sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS); return false; } if (opt_xfer_bytes < PAGE_SIZE) { sd_first_printk(KERN_WARNING, sdkp, "Optimal transfer size %u bytes < " \ "PAGE_SIZE (%u bytes)\n", opt_xfer_bytes, (unsigned int)PAGE_SIZE); return false; } if (min_xfer_bytes && opt_xfer_bytes % min_xfer_bytes) { sd_first_printk(KERN_WARNING, sdkp, "Optimal transfer size %u bytes not a " \ "multiple of preferred minimum block " \ "size (%u bytes)\n", opt_xfer_bytes, min_xfer_bytes); return false; } if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) { sd_first_printk(KERN_WARNING, sdkp, "Optimal transfer size %u bytes not a " \ "multiple of physical block size (%u bytes)\n", opt_xfer_bytes, sdkp->physical_block_size); return false; } sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n", opt_xfer_bytes); return true; } /** * sd_revalidate_disk - called the first time a new disk is seen, * performs disk spin up, read_capacity, etc. * @disk: struct gendisk we care about **/ static int sd_revalidate_disk(struct gendisk *disk) { struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdp = sdkp->device; struct request_queue *q = sdkp->disk->queue; sector_t old_capacity = sdkp->capacity; unsigned char *buffer; unsigned int dev_max, rw_max; SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_revalidate_disk\n")); /* * If the device is offline, don't try and read capacity or any * of the other niceties. */ if (!scsi_device_online(sdp)) goto out; buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL); if (!buffer) { sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory " "allocation failure.\n"); goto out; } sd_spinup_disk(sdkp); /* * Without media there is no reason to ask; moreover, some devices * react badly if we do. */ if (sdkp->media_present) { sd_read_capacity(sdkp, buffer); /* * set the default to rotational. All non-rotational devices * support the block characteristics VPD page, which will * cause this to be updated correctly and any device which * doesn't support it should be treated as rotational. */ blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); if (scsi_device_supports_vpd(sdp)) { sd_read_block_provisioning(sdkp); sd_read_block_limits(sdkp); sd_read_block_characteristics(sdkp); sd_zbc_read_zones(sdkp, buffer); sd_read_cpr(sdkp); } sd_print_capacity(sdkp, old_capacity); sd_read_write_protect_flag(sdkp, buffer); sd_read_cache_type(sdkp, buffer); sd_read_app_tag_own(sdkp, buffer); sd_read_write_same(sdkp, buffer); sd_read_security(sdkp, buffer); sd_config_protection(sdkp); } /* * We now have all cache related info, determine how we deal * with flush requests. */ sd_set_flush_flag(sdkp); /* Initial block count limit based on CDB TRANSFER LENGTH field size. */ dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS; /* Some devices report a maximum block count for READ/WRITE requests. */ dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks); q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); if (sd_validate_min_xfer_size(sdkp)) blk_queue_io_min(sdkp->disk->queue, logical_to_bytes(sdp, sdkp->min_xfer_blocks)); else blk_queue_io_min(sdkp->disk->queue, 0); if (sd_validate_opt_xfer_size(sdkp, dev_max)) { q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); } else { q->limits.io_opt = 0; rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), (sector_t)BLK_DEF_MAX_SECTORS); } /* * Limit default to SCSI host optimal sector limit if set. There may be * an impact on performance for when the size of a request exceeds this * host limit. */ rw_max = min_not_zero(rw_max, sdp->host->opt_sectors); /* Do not exceed controller limit */ rw_max = min(rw_max, queue_max_hw_sectors(q)); /* * Only update max_sectors if previously unset or if the current value * exceeds the capabilities of the hardware. */ if (sdkp->first_scan || q->limits.max_sectors > q->limits.max_dev_sectors || q->limits.max_sectors > q->limits.max_hw_sectors) q->limits.max_sectors = rw_max; sdkp->first_scan = 0; set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity)); sd_config_write_same(sdkp); kfree(buffer); /* * For a zoned drive, revalidating the zones can be done only once * the gendisk capacity is set. So if this fails, set back the gendisk * capacity to 0. */ if (sd_zbc_revalidate_zones(sdkp)) set_capacity_and_notify(disk, 0); out: return 0; } /** * sd_unlock_native_capacity - unlock native capacity * @disk: struct gendisk to set capacity for * * Block layer calls this function if it detects that partitions * on @disk reach beyond the end of the device. If the SCSI host * implements ->unlock_native_capacity() method, it's invoked to * give it a chance to adjust the device capacity. * * CONTEXT: * Defined by block layer. Might sleep. */ static void sd_unlock_native_capacity(struct gendisk *disk) { struct scsi_device *sdev = scsi_disk(disk)->device; if (sdev->host->hostt->unlock_native_capacity) sdev->host->hostt->unlock_native_capacity(sdev); } /** * sd_format_disk_name - format disk name * @prefix: name prefix - ie. "sd" for SCSI disks * @index: index of the disk to format name for * @buf: output buffer * @buflen: length of the output buffer * * SCSI disk names starts at sda. The 26th device is sdz and the * 27th is sdaa. The last one for two lettered suffix is sdzz * which is followed by sdaaa. * * This is basically 26 base counting with one extra 'nil' entry * at the beginning from the second digit on and can be * determined using similar method as 26 base conversion with the * index shifted -1 after each digit is computed. * * CONTEXT: * Don't care. * * RETURNS: * 0 on success, -errno on failure. */ static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen) { const int base = 'z' - 'a' + 1; char *begin = buf + strlen(prefix); char *end = buf + buflen; char *p; int unit; p = end - 1; *p = '\0'; unit = base; do { if (p == begin) return -EINVAL; *--p = 'a' + (index % unit); index = (index / unit) - 1; } while (index >= 0); memmove(begin, p, end - p); memcpy(buf, prefix, strlen(prefix)); return 0; } /** * sd_probe - called during driver initialization and whenever a * new scsi device is attached to the system. It is called once * for each scsi device (not just disks) present. * @dev: pointer to device object * * Returns 0 if successful (or not interested in this scsi device * (e.g. scanner)); 1 when there is an error. * * Note: this function is invoked from the scsi mid-level. * This function sets up the mapping between a given * <host,channel,id,lun> (found in sdp) and new device name * (e.g. /dev/sda). More precisely it is the block device major * and minor number that is chosen here. * * Assume sd_probe is not re-entrant (for time being) * Also think about sd_probe() and sd_remove() running coincidentally. **/ static int sd_probe(struct device *dev) { struct scsi_device *sdp = to_scsi_device(dev); struct scsi_disk *sdkp; struct gendisk *gd; int index; int error; scsi_autopm_get_device(sdp); error = -ENODEV; if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC && sdp->type != TYPE_MOD && sdp->type != TYPE_RBC) goto out; if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && sdp->type == TYPE_ZBC) { sdev_printk(KERN_WARNING, sdp, "Unsupported ZBC host-managed device.\n"); goto out; } SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp, "sd_probe\n")); error = -ENOMEM; sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL); if (!sdkp) goto out; gd = blk_mq_alloc_disk_for_queue(sdp->request_queue, &sd_bio_compl_lkclass); if (!gd) goto out_free; index = ida_alloc(&sd_index_ida, GFP_KERNEL); if (index < 0) { sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n"); goto out_put; } error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); if (error) { sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n"); goto out_free_index; } sdkp->device = sdp; sdkp->disk = gd; sdkp->index = index; sdkp->max_retries = SD_MAX_RETRIES; atomic_set(&sdkp->openers, 0); atomic_set(&sdkp->device->ioerr_cnt, 0); if (!sdp->request_queue->rq_timeout) { if (sdp->type != TYPE_MOD) blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT); else blk_queue_rq_timeout(sdp->request_queue, SD_MOD_TIMEOUT); } device_initialize(&sdkp->disk_dev); sdkp->disk_dev.parent = get_device(dev); sdkp->disk_dev.class = &sd_disk_class; dev_set_name(&sdkp->disk_dev, "%s", dev_name(dev)); error = device_add(&sdkp->disk_dev); if (error) { put_device(&sdkp->disk_dev); goto out; } dev_set_drvdata(dev, sdkp); gd->major = sd_major((index & 0xf0) >> 4); gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); gd->minors = SD_MINORS; gd->fops = &sd_fops; gd->private_data = sdkp; /* defaults, until the device tells us otherwise */ sdp->sector_size = 512; sdkp->capacity = 0; sdkp->media_present = 1; sdkp->write_prot = 0; sdkp->cache_override = 0; sdkp->WCE = 0; sdkp->RCD = 0; sdkp->ATO = 0; sdkp->first_scan = 1; sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS; sd_revalidate_disk(gd); if (sdp->removable) { gd->flags |= GENHD_FL_REMOVABLE; gd->events |= DISK_EVENT_MEDIA_CHANGE; gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT; } blk_pm_runtime_init(sdp->request_queue, dev); if (sdp->rpm_autosuspend) { pm_runtime_set_autosuspend_delay(dev, sdp->host->hostt->rpm_autosuspend_delay); } error = device_add_disk(dev, gd, NULL); if (error) { put_device(&sdkp->disk_dev); put_disk(gd); goto out; } if (sdkp->security) { sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit); if (sdkp->opal_dev) sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n"); } sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", sdp->removable ? "removable " : ""); scsi_autopm_put_device(sdp); return 0; out_free_index: ida_free(&sd_index_ida, index); out_put: put_disk(gd); out_free: kfree(sdkp); out: scsi_autopm_put_device(sdp); return error; } /** * sd_remove - called whenever a scsi disk (previously recognized by * sd_probe) is detached from the system. It is called (potentially * multiple times) during sd module unload. * @dev: pointer to device object * * Note: this function is invoked from the scsi mid-level. * This function potentially frees up a device name (e.g. /dev/sdc) * that could be re-used by a subsequent sd_probe(). * This function is not called when the built-in sd driver is "exit-ed". **/ static int sd_remove(struct device *dev) { struct scsi_disk *sdkp = dev_get_drvdata(dev); scsi_autopm_get_device(sdkp->device); device_del(&sdkp->disk_dev); del_gendisk(sdkp->disk); sd_shutdown(dev); put_disk(sdkp->disk); return 0; } static void scsi_disk_release(struct device *dev) { struct scsi_disk *sdkp = to_scsi_disk(dev); ida_free(&sd_index_ida, sdkp->index); sd_zbc_free_zone_info(sdkp); put_device(&sdkp->device->sdev_gendev); free_opal_dev(sdkp->opal_dev); kfree(sdkp); } static int sd_start_stop_device(struct scsi_disk *sdkp, int start) { unsigned char cmd[6] = { START_STOP }; /* START_VALID */ struct scsi_sense_hdr sshdr; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, .req_flags = BLK_MQ_REQ_PM, }; struct scsi_device *sdp = sdkp->device; int res; if (start) cmd[4] |= 1; /* START */ if (sdp->start_stop_pwr_cond) cmd[4] |= start ? 1 << 4 : 3 << 4; /* Active or Standby */ if (!scsi_device_online(sdp)) return -ENODEV; res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, SD_TIMEOUT, sdkp->max_retries, &exec_args); if (res) { sd_print_result(sdkp, "Start/Stop Unit failed", res); if (res > 0 && scsi_sense_valid(&sshdr)) { sd_print_sense_hdr(sdkp, &sshdr); /* 0x3a is medium not present */ if (sshdr.asc == 0x3a) res = 0; } } /* SCSI error codes must not go to the generic layer */ if (res) return -EIO; return 0; } /* * Send a SYNCHRONIZE CACHE instruction down to the device through * the normal SCSI command structure. Wait for the command to * complete. */ static void sd_shutdown(struct device *dev) { struct scsi_disk *sdkp = dev_get_drvdata(dev); if (!sdkp) return; /* this can happen */ if (pm_runtime_suspended(dev)) return; if (sdkp->WCE && sdkp->media_present) { sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); sd_sync_cache(sdkp, NULL); } if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) { sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); sd_start_stop_device(sdkp, 0); } } static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) { struct scsi_disk *sdkp = dev_get_drvdata(dev); struct scsi_sense_hdr sshdr; int ret = 0; if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ return 0; if (sdkp->WCE && sdkp->media_present) { if (!sdkp->device->silence_suspend) sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); ret = sd_sync_cache(sdkp, &sshdr); if (ret) { /* ignore OFFLINE device */ if (ret == -ENODEV) return 0; if (!scsi_sense_valid(&sshdr) || sshdr.sense_key != ILLEGAL_REQUEST) return ret; /* * sshdr.sense_key == ILLEGAL_REQUEST means this drive * doesn't support sync. There's not much to do and * suspend shouldn't fail. */ ret = 0; } } if (sdkp->device->manage_start_stop) { if (!sdkp->device->silence_suspend) sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); /* an error is not worth aborting a system sleep */ ret = sd_start_stop_device(sdkp, 0); if (ignore_stop_errors) ret = 0; } return ret; } static int sd_suspend_system(struct device *dev) { if (pm_runtime_suspended(dev)) return 0; return sd_suspend_common(dev, true); } static int sd_suspend_runtime(struct device *dev) { return sd_suspend_common(dev, false); } static int sd_resume(struct device *dev) { struct scsi_disk *sdkp = dev_get_drvdata(dev); int ret = 0; if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ return 0; if (!sdkp->device->manage_start_stop) return 0; if (!sdkp->device->no_start_on_resume) { sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); ret = sd_start_stop_device(sdkp, 1); } if (!ret) opal_unlock_from_suspend(sdkp->opal_dev); return ret; } static int sd_resume_system(struct device *dev) { if (pm_runtime_suspended(dev)) return 0; return sd_resume(dev); } static int sd_resume_runtime(struct device *dev) { struct scsi_disk *sdkp = dev_get_drvdata(dev); struct scsi_device *sdp; if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ return 0; sdp = sdkp->device; if (sdp->ignore_media_change) { /* clear the device's sense data */ static const u8 cmd[10] = { REQUEST_SENSE }; const struct scsi_exec_args exec_args = { .req_flags = BLK_MQ_REQ_PM, }; if (scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, sdp->request_queue->rq_timeout, 1, &exec_args)) sd_printk(KERN_NOTICE, sdkp, "Failed to clear sense data\n"); } return sd_resume(dev); } static const struct dev_pm_ops sd_pm_ops = { .suspend = sd_suspend_system, .resume = sd_resume_system, .poweroff = sd_suspend_system, .restore = sd_resume_system, .runtime_suspend = sd_suspend_runtime, .runtime_resume = sd_resume_runtime, }; static struct scsi_driver sd_template = { .gendrv = { .name = "sd", .owner = THIS_MODULE, .probe = sd_probe, .probe_type = PROBE_PREFER_ASYNCHRONOUS, .remove = sd_remove, .shutdown = sd_shutdown, .pm = &sd_pm_ops, }, .rescan = sd_rescan, .init_command = sd_init_command, .uninit_command = sd_uninit_command, .done = sd_done, .eh_action = sd_eh_action, .eh_reset = sd_eh_reset, }; /** * init_sd - entry point for this driver (both when built in or when * a module). * * Note: this function registers this driver with the scsi mid-level. **/ static int __init init_sd(void) { int majors = 0, i, err; SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n")); for (i = 0; i < SD_MAJORS; i++) { if (__register_blkdev(sd_major(i), "sd", sd_default_probe)) continue; majors++; } if (!majors) return -ENODEV; err = class_register(&sd_disk_class); if (err) goto err_out; sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0); if (!sd_page_pool) { printk(KERN_ERR "sd: can't init discard page pool\n"); err = -ENOMEM; goto err_out_class; } err = scsi_register_driver(&sd_template.gendrv); if (err) goto err_out_driver; return 0; err_out_driver: mempool_destroy(sd_page_pool); err_out_class: class_unregister(&sd_disk_class); err_out: for (i = 0; i < SD_MAJORS; i++) unregister_blkdev(sd_major(i), "sd"); return err; } /** * exit_sd - exit point for this driver (when it is a module). * * Note: this function unregisters this driver from the scsi mid-level. **/ static void __exit exit_sd(void) { int i; SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n")); scsi_unregister_driver(&sd_template.gendrv); mempool_destroy(sd_page_pool); class_unregister(&sd_disk_class); for (i = 0; i < SD_MAJORS; i++) unregister_blkdev(sd_major(i), "sd"); } module_init(init_sd); module_exit(exit_sd); void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) { scsi_print_sense_hdr(sdkp->device, sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr); } void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result) { const char *hb_string = scsi_hostbyte_string(result); if (hb_string) sd_printk(KERN_INFO, sdkp, "%s: Result: hostbyte=%s driverbyte=%s\n", msg, hb_string ? hb_string : "invalid", "DRIVER_OK"); else sd_printk(KERN_INFO, sdkp, "%s: Result: hostbyte=0x%02x driverbyte=%s\n", msg, host_byte(result), "DRIVER_OK"); }
linux-master
drivers/scsi/sd.c
// SPDX-License-Identifier: GPL-2.0-only /* * SCSI Media Changer device driver for Linux 2.6 * * (c) 1996-2003 Gerd Knorr <[email protected]> * */ #define VERSION "0.25" #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/major.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/blkdev.h> #include <linux/completion.h> #include <linux/compat.h> #include <linux/chio.h> /* here are all the ioctls */ #include <linux/mutex.h> #include <linux/idr.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_ioctl.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dbg.h> #define CH_DT_MAX 16 #define CH_TYPES 8 #define CH_MAX_DEVS 128 MODULE_DESCRIPTION("device driver for scsi media changer devices"); MODULE_AUTHOR("Gerd Knorr <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(SCSI_CHANGER_MAJOR); MODULE_ALIAS_SCSI_DEVICE(TYPE_MEDIUM_CHANGER); static int init = 1; module_param(init, int, 0444); MODULE_PARM_DESC(init, \ "initialize element status on driver load (default: on)"); static int timeout_move = 300; module_param(timeout_move, int, 0644); MODULE_PARM_DESC(timeout_move,"timeout for move commands " "(default: 300 seconds)"); static int timeout_init = 3600; module_param(timeout_init, int, 0644); MODULE_PARM_DESC(timeout_init,"timeout for INITIALIZE ELEMENT STATUS " "(default: 3600 seconds)"); static int verbose = 1; module_param(verbose, int, 0644); MODULE_PARM_DESC(verbose,"be verbose (default: on)"); static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug,"enable/disable debug messages, also prints more " "detailed sense codes on scsi errors (default: off)"); static int dt_id[CH_DT_MAX] = { [ 0 ... (CH_DT_MAX-1) ] = -1 }; static int dt_lun[CH_DT_MAX]; module_param_array(dt_id, int, NULL, 0444); module_param_array(dt_lun, int, NULL, 0444); /* tell the driver about vendor-specific slots */ static int vendor_firsts[CH_TYPES-4]; static int vendor_counts[CH_TYPES-4]; module_param_array(vendor_firsts, int, NULL, 0444); module_param_array(vendor_counts, int, NULL, 0444); static const char * vendor_labels[CH_TYPES-4] = { "v0", "v1", "v2", "v3" }; // module_param_string_array(vendor_labels, NULL, 0444); #define ch_printk(prefix, ch, fmt, a...) \ sdev_prefix_printk(prefix, (ch)->device, (ch)->name, fmt, ##a) #define DPRINTK(fmt, arg...) \ do { \ if (debug) \ ch_printk(KERN_DEBUG, ch, fmt, ##arg); \ } while (0) #define VPRINTK(level, fmt, arg...) \ do { \ if (verbose) \ ch_printk(level, ch, fmt, ##arg); \ } while (0) /* ------------------------------------------------------------------- */ #define MAX_RETRIES 1 static struct class * ch_sysfs_class; typedef struct { struct kref ref; struct list_head list; int minor; char name[8]; struct scsi_device *device; struct scsi_device **dt; /* ptrs to data transfer elements */ u_int firsts[CH_TYPES]; u_int counts[CH_TYPES]; u_int unit_attention; u_int voltags; struct mutex lock; } scsi_changer; static DEFINE_IDR(ch_index_idr); static DEFINE_SPINLOCK(ch_index_lock); static const struct { unsigned char sense; unsigned char asc; unsigned char ascq; int errno; } ch_err[] = { /* Just filled in what looks right. Hav'nt checked any standard paper for these errno assignments, so they may be wrong... */ { .sense = ILLEGAL_REQUEST, .asc = 0x21, .ascq = 0x01, .errno = EBADSLT, /* Invalid element address */ },{ .sense = ILLEGAL_REQUEST, .asc = 0x28, .ascq = 0x01, .errno = EBADE, /* Import or export element accessed */ },{ .sense = ILLEGAL_REQUEST, .asc = 0x3B, .ascq = 0x0D, .errno = EXFULL, /* Medium destination element full */ },{ .sense = ILLEGAL_REQUEST, .asc = 0x3B, .ascq = 0x0E, .errno = EBADE, /* Medium source element empty */ },{ .sense = ILLEGAL_REQUEST, .asc = 0x20, .ascq = 0x00, .errno = EBADRQC, /* Invalid command operation code */ },{ /* end of list */ } }; /* ------------------------------------------------------------------- */ static int ch_find_errno(struct scsi_sense_hdr *sshdr) { int i,errno = 0; /* Check to see if additional sense information is available */ if (scsi_sense_valid(sshdr) && sshdr->asc != 0) { for (i = 0; ch_err[i].errno != 0; i++) { if (ch_err[i].sense == sshdr->sense_key && ch_err[i].asc == sshdr->asc && ch_err[i].ascq == sshdr->ascq) { errno = -ch_err[i].errno; break; } } } if (errno == 0) errno = -EIO; return errno; } static int ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len, void *buffer, unsigned int buflength, enum req_op op) { int errno, retries = 0, timeout, result; struct scsi_sense_hdr sshdr; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, }; timeout = (cmd[0] == INITIALIZE_ELEMENT_STATUS) ? timeout_init : timeout_move; retry: errno = 0; result = scsi_execute_cmd(ch->device, cmd, op, buffer, buflength, timeout * HZ, MAX_RETRIES, &exec_args); if (result < 0) return result; if (scsi_sense_valid(&sshdr)) { if (debug) scsi_print_sense_hdr(ch->device, ch->name, &sshdr); errno = ch_find_errno(&sshdr); switch(sshdr.sense_key) { case UNIT_ATTENTION: ch->unit_attention = 1; if (retries++ < 3) goto retry; break; } } return errno; } /* ------------------------------------------------------------------------ */ static int ch_elem_to_typecode(scsi_changer *ch, u_int elem) { int i; for (i = 0; i < CH_TYPES; i++) { if (elem >= ch->firsts[i] && elem < ch->firsts[i] + ch->counts[i]) return i+1; } return 0; } static int ch_read_element_status(scsi_changer *ch, u_int elem, char *data) { u_char cmd[12]; u_char *buffer; int result; buffer = kmalloc(512, GFP_KERNEL); if(!buffer) return -ENOMEM; retry: memset(cmd,0,sizeof(cmd)); cmd[0] = READ_ELEMENT_STATUS; cmd[1] = ((ch->device->lun & 0x7) << 5) | (ch->voltags ? 0x10 : 0) | ch_elem_to_typecode(ch,elem); cmd[2] = (elem >> 8) & 0xff; cmd[3] = elem & 0xff; cmd[5] = 1; cmd[9] = 255; if (0 == (result = ch_do_scsi(ch, cmd, 12, buffer, 256, REQ_OP_DRV_IN))) { if (((buffer[16] << 8) | buffer[17]) != elem) { DPRINTK("asked for element 0x%02x, got 0x%02x\n", elem,(buffer[16] << 8) | buffer[17]); kfree(buffer); return -EIO; } memcpy(data,buffer+16,16); } else { if (ch->voltags) { ch->voltags = 0; VPRINTK(KERN_INFO, "device has no volume tag support\n"); goto retry; } DPRINTK("READ ELEMENT STATUS for element 0x%x failed\n",elem); } kfree(buffer); return result; } static int ch_init_elem(scsi_changer *ch) { int err; u_char cmd[6]; VPRINTK(KERN_INFO, "INITIALIZE ELEMENT STATUS, may take some time ...\n"); memset(cmd,0,sizeof(cmd)); cmd[0] = INITIALIZE_ELEMENT_STATUS; cmd[1] = (ch->device->lun & 0x7) << 5; err = ch_do_scsi(ch, cmd, 6, NULL, 0, REQ_OP_DRV_IN); VPRINTK(KERN_INFO, "... finished\n"); return err; } static int ch_readconfig(scsi_changer *ch) { u_char cmd[10], data[16]; u_char *buffer; int result,id,lun,i; u_int elem; buffer = kzalloc(512, GFP_KERNEL); if (!buffer) return -ENOMEM; memset(cmd,0,sizeof(cmd)); cmd[0] = MODE_SENSE; cmd[1] = (ch->device->lun & 0x7) << 5; cmd[2] = 0x1d; cmd[4] = 255; result = ch_do_scsi(ch, cmd, 10, buffer, 255, REQ_OP_DRV_IN); if (0 != result) { cmd[1] |= (1<<3); result = ch_do_scsi(ch, cmd, 10, buffer, 255, REQ_OP_DRV_IN); } if (0 == result) { ch->firsts[CHET_MT] = (buffer[buffer[3]+ 6] << 8) | buffer[buffer[3]+ 7]; ch->counts[CHET_MT] = (buffer[buffer[3]+ 8] << 8) | buffer[buffer[3]+ 9]; ch->firsts[CHET_ST] = (buffer[buffer[3]+10] << 8) | buffer[buffer[3]+11]; ch->counts[CHET_ST] = (buffer[buffer[3]+12] << 8) | buffer[buffer[3]+13]; ch->firsts[CHET_IE] = (buffer[buffer[3]+14] << 8) | buffer[buffer[3]+15]; ch->counts[CHET_IE] = (buffer[buffer[3]+16] << 8) | buffer[buffer[3]+17]; ch->firsts[CHET_DT] = (buffer[buffer[3]+18] << 8) | buffer[buffer[3]+19]; ch->counts[CHET_DT] = (buffer[buffer[3]+20] << 8) | buffer[buffer[3]+21]; VPRINTK(KERN_INFO, "type #1 (mt): 0x%x+%d [medium transport]\n", ch->firsts[CHET_MT], ch->counts[CHET_MT]); VPRINTK(KERN_INFO, "type #2 (st): 0x%x+%d [storage]\n", ch->firsts[CHET_ST], ch->counts[CHET_ST]); VPRINTK(KERN_INFO, "type #3 (ie): 0x%x+%d [import/export]\n", ch->firsts[CHET_IE], ch->counts[CHET_IE]); VPRINTK(KERN_INFO, "type #4 (dt): 0x%x+%d [data transfer]\n", ch->firsts[CHET_DT], ch->counts[CHET_DT]); } else { VPRINTK(KERN_INFO, "reading element address assignment page failed!\n"); } /* vendor specific element types */ for (i = 0; i < 4; i++) { if (0 == vendor_counts[i]) continue; if (NULL == vendor_labels[i]) continue; ch->firsts[CHET_V1+i] = vendor_firsts[i]; ch->counts[CHET_V1+i] = vendor_counts[i]; VPRINTK(KERN_INFO, "type #%d (v%d): 0x%x+%d [%s, vendor specific]\n", i+5,i+1,vendor_firsts[i],vendor_counts[i], vendor_labels[i]); } /* look up the devices of the data transfer elements */ ch->dt = kcalloc(ch->counts[CHET_DT], sizeof(*ch->dt), GFP_KERNEL); if (!ch->dt) { kfree(buffer); return -ENOMEM; } for (elem = 0; elem < ch->counts[CHET_DT]; elem++) { id = -1; lun = 0; if (elem < CH_DT_MAX && -1 != dt_id[elem]) { id = dt_id[elem]; lun = dt_lun[elem]; VPRINTK(KERN_INFO, "dt 0x%x: [insmod option] ", elem+ch->firsts[CHET_DT]); } else if (0 != ch_read_element_status (ch,elem+ch->firsts[CHET_DT],data)) { VPRINTK(KERN_INFO, "dt 0x%x: READ ELEMENT STATUS failed\n", elem+ch->firsts[CHET_DT]); } else { VPRINTK(KERN_INFO, "dt 0x%x: ",elem+ch->firsts[CHET_DT]); if (data[6] & 0x80) { VPRINTK(KERN_CONT, "not this SCSI bus\n"); ch->dt[elem] = NULL; } else if (0 == (data[6] & 0x30)) { VPRINTK(KERN_CONT, "ID/LUN unknown\n"); ch->dt[elem] = NULL; } else { id = ch->device->id; lun = 0; if (data[6] & 0x20) id = data[7]; if (data[6] & 0x10) lun = data[6] & 7; } } if (-1 != id) { VPRINTK(KERN_CONT, "ID %i, LUN %i, ",id,lun); ch->dt[elem] = scsi_device_lookup(ch->device->host, ch->device->channel, id,lun); if (!ch->dt[elem]) { /* should not happen */ VPRINTK(KERN_CONT, "Huh? device not found!\n"); } else { VPRINTK(KERN_CONT, "name: %8.8s %16.16s %4.4s\n", ch->dt[elem]->vendor, ch->dt[elem]->model, ch->dt[elem]->rev); } } } ch->voltags = 1; kfree(buffer); return 0; } /* ------------------------------------------------------------------------ */ static int ch_position(scsi_changer *ch, u_int trans, u_int elem, int rotate) { u_char cmd[10]; DPRINTK("position: 0x%x\n",elem); if (0 == trans) trans = ch->firsts[CHET_MT]; memset(cmd,0,sizeof(cmd)); cmd[0] = POSITION_TO_ELEMENT; cmd[1] = (ch->device->lun & 0x7) << 5; cmd[2] = (trans >> 8) & 0xff; cmd[3] = trans & 0xff; cmd[4] = (elem >> 8) & 0xff; cmd[5] = elem & 0xff; cmd[8] = rotate ? 1 : 0; return ch_do_scsi(ch, cmd, 10, NULL, 0, REQ_OP_DRV_IN); } static int ch_move(scsi_changer *ch, u_int trans, u_int src, u_int dest, int rotate) { u_char cmd[12]; DPRINTK("move: 0x%x => 0x%x\n",src,dest); if (0 == trans) trans = ch->firsts[CHET_MT]; memset(cmd,0,sizeof(cmd)); cmd[0] = MOVE_MEDIUM; cmd[1] = (ch->device->lun & 0x7) << 5; cmd[2] = (trans >> 8) & 0xff; cmd[3] = trans & 0xff; cmd[4] = (src >> 8) & 0xff; cmd[5] = src & 0xff; cmd[6] = (dest >> 8) & 0xff; cmd[7] = dest & 0xff; cmd[10] = rotate ? 1 : 0; return ch_do_scsi(ch, cmd, 12, NULL, 0, REQ_OP_DRV_IN); } static int ch_exchange(scsi_changer *ch, u_int trans, u_int src, u_int dest1, u_int dest2, int rotate1, int rotate2) { u_char cmd[12]; DPRINTK("exchange: 0x%x => 0x%x => 0x%x\n", src,dest1,dest2); if (0 == trans) trans = ch->firsts[CHET_MT]; memset(cmd,0,sizeof(cmd)); cmd[0] = EXCHANGE_MEDIUM; cmd[1] = (ch->device->lun & 0x7) << 5; cmd[2] = (trans >> 8) & 0xff; cmd[3] = trans & 0xff; cmd[4] = (src >> 8) & 0xff; cmd[5] = src & 0xff; cmd[6] = (dest1 >> 8) & 0xff; cmd[7] = dest1 & 0xff; cmd[8] = (dest2 >> 8) & 0xff; cmd[9] = dest2 & 0xff; cmd[10] = (rotate1 ? 1 : 0) | (rotate2 ? 2 : 0); return ch_do_scsi(ch, cmd, 12, NULL, 0, REQ_OP_DRV_IN); } static void ch_check_voltag(char *tag) { int i; for (i = 0; i < 32; i++) { /* restrict to ascii */ if (tag[i] >= 0x7f || tag[i] < 0x20) tag[i] = ' '; /* don't allow search wildcards */ if (tag[i] == '?' || tag[i] == '*') tag[i] = ' '; } } static int ch_set_voltag(scsi_changer *ch, u_int elem, int alternate, int clear, u_char *tag) { u_char cmd[12]; u_char *buffer; int result; buffer = kzalloc(512, GFP_KERNEL); if (!buffer) return -ENOMEM; DPRINTK("%s %s voltag: 0x%x => \"%s\"\n", clear ? "clear" : "set", alternate ? "alternate" : "primary", elem, tag); memset(cmd,0,sizeof(cmd)); cmd[0] = SEND_VOLUME_TAG; cmd[1] = ((ch->device->lun & 0x7) << 5) | ch_elem_to_typecode(ch,elem); cmd[2] = (elem >> 8) & 0xff; cmd[3] = elem & 0xff; cmd[5] = clear ? (alternate ? 0x0d : 0x0c) : (alternate ? 0x0b : 0x0a); cmd[9] = 255; memcpy(buffer,tag,32); ch_check_voltag(buffer); result = ch_do_scsi(ch, cmd, 12, buffer, 256, REQ_OP_DRV_OUT); kfree(buffer); return result; } static int ch_gstatus(scsi_changer *ch, int type, unsigned char __user *dest) { int retval = 0; u_char data[16]; unsigned int i; mutex_lock(&ch->lock); for (i = 0; i < ch->counts[type]; i++) { if (0 != ch_read_element_status (ch, ch->firsts[type]+i,data)) { retval = -EIO; break; } put_user(data[2], dest+i); if (data[2] & CESTATUS_EXCEPT) VPRINTK(KERN_INFO, "element 0x%x: asc=0x%x, ascq=0x%x\n", ch->firsts[type]+i, (int)data[4],(int)data[5]); retval = ch_read_element_status (ch, ch->firsts[type]+i,data); if (0 != retval) break; } mutex_unlock(&ch->lock); return retval; } /* ------------------------------------------------------------------------ */ static void ch_destroy(struct kref *ref) { scsi_changer *ch = container_of(ref, scsi_changer, ref); ch->device = NULL; kfree(ch->dt); kfree(ch); } static int ch_release(struct inode *inode, struct file *file) { scsi_changer *ch = file->private_data; scsi_device_put(ch->device); file->private_data = NULL; kref_put(&ch->ref, ch_destroy); return 0; } static int ch_open(struct inode *inode, struct file *file) { scsi_changer *ch; int minor = iminor(inode); spin_lock(&ch_index_lock); ch = idr_find(&ch_index_idr, minor); if (ch == NULL || !kref_get_unless_zero(&ch->ref)) { spin_unlock(&ch_index_lock); return -ENXIO; } spin_unlock(&ch_index_lock); if (scsi_device_get(ch->device)) { kref_put(&ch->ref, ch_destroy); return -ENXIO; } /* Synchronize with ch_probe() */ mutex_lock(&ch->lock); file->private_data = ch; mutex_unlock(&ch->lock); return 0; } static int ch_checkrange(scsi_changer *ch, unsigned int type, unsigned int unit) { if (type >= CH_TYPES || unit >= ch->counts[type]) return -1; return 0; } struct changer_element_status32 { int ces_type; compat_uptr_t ces_data; }; #define CHIOGSTATUS32 _IOW('c', 8, struct changer_element_status32) static long ch_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { scsi_changer *ch = file->private_data; int retval; void __user *argp = (void __user *)arg; retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd, file->f_flags & O_NDELAY); if (retval) return retval; switch (cmd) { case CHIOGPARAMS: { struct changer_params params; params.cp_curpicker = 0; params.cp_npickers = ch->counts[CHET_MT]; params.cp_nslots = ch->counts[CHET_ST]; params.cp_nportals = ch->counts[CHET_IE]; params.cp_ndrives = ch->counts[CHET_DT]; if (copy_to_user(argp, &params, sizeof(params))) return -EFAULT; return 0; } case CHIOGVPARAMS: { struct changer_vendor_params vparams; memset(&vparams,0,sizeof(vparams)); if (ch->counts[CHET_V1]) { vparams.cvp_n1 = ch->counts[CHET_V1]; strncpy(vparams.cvp_label1,vendor_labels[0],16); } if (ch->counts[CHET_V2]) { vparams.cvp_n2 = ch->counts[CHET_V2]; strncpy(vparams.cvp_label2,vendor_labels[1],16); } if (ch->counts[CHET_V3]) { vparams.cvp_n3 = ch->counts[CHET_V3]; strncpy(vparams.cvp_label3,vendor_labels[2],16); } if (ch->counts[CHET_V4]) { vparams.cvp_n4 = ch->counts[CHET_V4]; strncpy(vparams.cvp_label4,vendor_labels[3],16); } if (copy_to_user(argp, &vparams, sizeof(vparams))) return -EFAULT; return 0; } case CHIOPOSITION: { struct changer_position pos; if (copy_from_user(&pos, argp, sizeof (pos))) return -EFAULT; if (0 != ch_checkrange(ch, pos.cp_type, pos.cp_unit)) { DPRINTK("CHIOPOSITION: invalid parameter\n"); return -EBADSLT; } mutex_lock(&ch->lock); retval = ch_position(ch,0, ch->firsts[pos.cp_type] + pos.cp_unit, pos.cp_flags & CP_INVERT); mutex_unlock(&ch->lock); return retval; } case CHIOMOVE: { struct changer_move mv; if (copy_from_user(&mv, argp, sizeof (mv))) return -EFAULT; if (0 != ch_checkrange(ch, mv.cm_fromtype, mv.cm_fromunit) || 0 != ch_checkrange(ch, mv.cm_totype, mv.cm_tounit )) { DPRINTK("CHIOMOVE: invalid parameter\n"); return -EBADSLT; } mutex_lock(&ch->lock); retval = ch_move(ch,0, ch->firsts[mv.cm_fromtype] + mv.cm_fromunit, ch->firsts[mv.cm_totype] + mv.cm_tounit, mv.cm_flags & CM_INVERT); mutex_unlock(&ch->lock); return retval; } case CHIOEXCHANGE: { struct changer_exchange mv; if (copy_from_user(&mv, argp, sizeof (mv))) return -EFAULT; if (0 != ch_checkrange(ch, mv.ce_srctype, mv.ce_srcunit ) || 0 != ch_checkrange(ch, mv.ce_fdsttype, mv.ce_fdstunit) || 0 != ch_checkrange(ch, mv.ce_sdsttype, mv.ce_sdstunit)) { DPRINTK("CHIOEXCHANGE: invalid parameter\n"); return -EBADSLT; } mutex_lock(&ch->lock); retval = ch_exchange (ch,0, ch->firsts[mv.ce_srctype] + mv.ce_srcunit, ch->firsts[mv.ce_fdsttype] + mv.ce_fdstunit, ch->firsts[mv.ce_sdsttype] + mv.ce_sdstunit, mv.ce_flags & CE_INVERT1, mv.ce_flags & CE_INVERT2); mutex_unlock(&ch->lock); return retval; } case CHIOGSTATUS: { struct changer_element_status ces; if (copy_from_user(&ces, argp, sizeof (ces))) return -EFAULT; if (ces.ces_type < 0 || ces.ces_type >= CH_TYPES) return -EINVAL; return ch_gstatus(ch, ces.ces_type, ces.ces_data); } #ifdef CONFIG_COMPAT case CHIOGSTATUS32: { struct changer_element_status32 ces32; if (copy_from_user(&ces32, argp, sizeof(ces32))) return -EFAULT; if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES) return -EINVAL; return ch_gstatus(ch, ces32.ces_type, compat_ptr(ces32.ces_data)); } #endif case CHIOGELEM: { struct changer_get_element cge; u_char ch_cmd[12]; u_char *buffer; unsigned int elem; int result,i; if (copy_from_user(&cge, argp, sizeof (cge))) return -EFAULT; if (0 != ch_checkrange(ch, cge.cge_type, cge.cge_unit)) return -EINVAL; elem = ch->firsts[cge.cge_type] + cge.cge_unit; buffer = kmalloc(512, GFP_KERNEL); if (!buffer) return -ENOMEM; mutex_lock(&ch->lock); voltag_retry: memset(ch_cmd, 0, sizeof(ch_cmd)); ch_cmd[0] = READ_ELEMENT_STATUS; ch_cmd[1] = ((ch->device->lun & 0x7) << 5) | (ch->voltags ? 0x10 : 0) | ch_elem_to_typecode(ch,elem); ch_cmd[2] = (elem >> 8) & 0xff; ch_cmd[3] = elem & 0xff; ch_cmd[5] = 1; ch_cmd[9] = 255; result = ch_do_scsi(ch, ch_cmd, 12, buffer, 256, REQ_OP_DRV_IN); if (!result) { cge.cge_status = buffer[18]; cge.cge_flags = 0; if (buffer[18] & CESTATUS_EXCEPT) { cge.cge_errno = EIO; } if (buffer[25] & 0x80) { cge.cge_flags |= CGE_SRC; if (buffer[25] & 0x40) cge.cge_flags |= CGE_INVERT; elem = (buffer[26]<<8) | buffer[27]; for (i = 0; i < 4; i++) { if (elem >= ch->firsts[i] && elem < ch->firsts[i] + ch->counts[i]) { cge.cge_srctype = i; cge.cge_srcunit = elem-ch->firsts[i]; } } } if ((buffer[22] & 0x30) == 0x30) { cge.cge_flags |= CGE_IDLUN; cge.cge_id = buffer[23]; cge.cge_lun = buffer[22] & 7; } if (buffer[9] & 0x80) { cge.cge_flags |= CGE_PVOLTAG; memcpy(cge.cge_pvoltag,buffer+28,36); } if (buffer[9] & 0x40) { cge.cge_flags |= CGE_AVOLTAG; memcpy(cge.cge_avoltag,buffer+64,36); } } else if (ch->voltags) { ch->voltags = 0; VPRINTK(KERN_INFO, "device has no volume tag support\n"); goto voltag_retry; } kfree(buffer); mutex_unlock(&ch->lock); if (copy_to_user(argp, &cge, sizeof (cge))) return -EFAULT; return result; } case CHIOINITELEM: { mutex_lock(&ch->lock); retval = ch_init_elem(ch); mutex_unlock(&ch->lock); return retval; } case CHIOSVOLTAG: { struct changer_set_voltag csv; int elem; if (copy_from_user(&csv, argp, sizeof(csv))) return -EFAULT; if (0 != ch_checkrange(ch, csv.csv_type, csv.csv_unit)) { DPRINTK("CHIOSVOLTAG: invalid parameter\n"); return -EBADSLT; } elem = ch->firsts[csv.csv_type] + csv.csv_unit; mutex_lock(&ch->lock); retval = ch_set_voltag(ch, elem, csv.csv_flags & CSV_AVOLTAG, csv.csv_flags & CSV_CLEARTAG, csv.csv_voltag); mutex_unlock(&ch->lock); return retval; } default: return scsi_ioctl(ch->device, file->f_mode & FMODE_WRITE, cmd, argp); } } /* ------------------------------------------------------------------------ */ static int ch_probe(struct device *dev) { struct scsi_device *sd = to_scsi_device(dev); struct device *class_dev; int ret; scsi_changer *ch; if (sd->type != TYPE_MEDIUM_CHANGER) return -ENODEV; ch = kzalloc(sizeof(*ch), GFP_KERNEL); if (NULL == ch) return -ENOMEM; idr_preload(GFP_KERNEL); spin_lock(&ch_index_lock); ret = idr_alloc(&ch_index_idr, ch, 0, CH_MAX_DEVS + 1, GFP_NOWAIT); spin_unlock(&ch_index_lock); idr_preload_end(); if (ret < 0) { if (ret == -ENOSPC) ret = -ENODEV; goto free_ch; } ch->minor = ret; sprintf(ch->name,"ch%d",ch->minor); ret = scsi_device_get(sd); if (ret) { sdev_printk(KERN_WARNING, sd, "ch%d: failed to get device\n", ch->minor); goto remove_idr; } mutex_init(&ch->lock); kref_init(&ch->ref); ch->device = sd; class_dev = device_create(ch_sysfs_class, dev, MKDEV(SCSI_CHANGER_MAJOR, ch->minor), ch, "s%s", ch->name); if (IS_ERR(class_dev)) { sdev_printk(KERN_WARNING, sd, "ch%d: device_create failed\n", ch->minor); ret = PTR_ERR(class_dev); goto put_device; } mutex_lock(&ch->lock); ret = ch_readconfig(ch); if (ret) { mutex_unlock(&ch->lock); goto destroy_dev; } if (init) ch_init_elem(ch); mutex_unlock(&ch->lock); dev_set_drvdata(dev, ch); sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name); return 0; destroy_dev: device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor)); put_device: scsi_device_put(sd); remove_idr: idr_remove(&ch_index_idr, ch->minor); free_ch: kfree(ch); return ret; } static int ch_remove(struct device *dev) { scsi_changer *ch = dev_get_drvdata(dev); spin_lock(&ch_index_lock); idr_remove(&ch_index_idr, ch->minor); dev_set_drvdata(dev, NULL); spin_unlock(&ch_index_lock); device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR,ch->minor)); scsi_device_put(ch->device); kref_put(&ch->ref, ch_destroy); return 0; } static struct scsi_driver ch_template = { .gendrv = { .name = "ch", .owner = THIS_MODULE, .probe = ch_probe, .remove = ch_remove, }, }; static const struct file_operations changer_fops = { .owner = THIS_MODULE, .open = ch_open, .release = ch_release, .unlocked_ioctl = ch_ioctl, .compat_ioctl = compat_ptr_ioctl, .llseek = noop_llseek, }; static int __init init_ch_module(void) { int rc; printk(KERN_INFO "SCSI Media Changer driver v" VERSION " \n"); ch_sysfs_class = class_create("scsi_changer"); if (IS_ERR(ch_sysfs_class)) { rc = PTR_ERR(ch_sysfs_class); return rc; } rc = register_chrdev(SCSI_CHANGER_MAJOR,"ch",&changer_fops); if (rc < 0) { printk("Unable to get major %d for SCSI-Changer\n", SCSI_CHANGER_MAJOR); goto fail1; } rc = scsi_register_driver(&ch_template.gendrv); if (rc < 0) goto fail2; return 0; fail2: unregister_chrdev(SCSI_CHANGER_MAJOR, "ch"); fail1: class_destroy(ch_sysfs_class); return rc; } static void __exit exit_ch_module(void) { scsi_unregister_driver(&ch_template.gendrv); unregister_chrdev(SCSI_CHANGER_MAJOR, "ch"); class_destroy(ch_sysfs_class); idr_destroy(&ch_index_idr); } module_init(init_ch_module); module_exit(exit_ch_module);
linux-master
drivers/scsi/ch.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Linux MegaRAID device driver * * Copyright (c) 2002 LSI Logic Corporation. * * Copyright (c) 2002 Red Hat, Inc. All rights reserved. * - fixes * - speed-ups (list handling fixes, issued_list, optimizations.) * - lots of cleanups. * * Copyright (c) 2003 Christoph Hellwig <[email protected]> * - new-style, hotplug-aware pci probing and scsi registration * * Version : v2.00.4 Mon Nov 14 14:02:43 EST 2005 - Seokmann Ju * <[email protected]> * * Description: Linux device driver for LSI Logic MegaRAID controller * * Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 471, 490, 493 * 518, 520, 531, 532 * * This driver is supported by LSI Logic, with assistance from Red Hat, Dell, * and others. Please send updates to the mailing list * [email protected] . */ #include <linux/mm.h> #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/uaccess.h> #include <asm/io.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/reboot.h> #include <linux/module.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/mutex.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsicam.h> #include "megaraid.h" #define MEGARAID_MODULE_VERSION "2.00.4" MODULE_AUTHOR ("[email protected]"); MODULE_DESCRIPTION ("LSI Logic MegaRAID legacy driver"); MODULE_LICENSE ("GPL"); MODULE_VERSION(MEGARAID_MODULE_VERSION); static DEFINE_MUTEX(megadev_mutex); static unsigned int max_cmd_per_lun = DEF_CMD_PER_LUN; module_param(max_cmd_per_lun, uint, 0); MODULE_PARM_DESC(max_cmd_per_lun, "Maximum number of commands which can be issued to a single LUN (default=DEF_CMD_PER_LUN=63)"); static unsigned short int max_sectors_per_io = MAX_SECTORS_PER_IO; module_param(max_sectors_per_io, ushort, 0); MODULE_PARM_DESC(max_sectors_per_io, "Maximum number of sectors per I/O request (default=MAX_SECTORS_PER_IO=128)"); static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT; module_param(max_mbox_busy_wait, ushort, 0); MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)"); #define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20) #define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C) #define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20) #define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C) /* * Global variables */ static int hba_count; static adapter_t *hba_soft_state[MAX_CONTROLLERS]; static struct proc_dir_entry *mega_proc_dir_entry; /* For controller re-ordering */ static struct mega_hbas mega_hbas[MAX_CONTROLLERS]; static long megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); /* * The File Operations structure for the serial/ioctl interface of the driver */ static const struct file_operations megadev_fops = { .owner = THIS_MODULE, .unlocked_ioctl = megadev_unlocked_ioctl, .open = megadev_open, .llseek = noop_llseek, }; /* * Array to structures for storing the information about the controllers. This * information is sent to the user level applications, when they do an ioctl * for this information. */ static struct mcontroller mcontroller[MAX_CONTROLLERS]; /* The current driver version */ static u32 driver_ver = 0x02000000; /* major number used by the device for character interface */ static int major; #define IS_RAID_CH(hba, ch) (((hba)->mega_ch_class >> (ch)) & 0x01) /* * Debug variable to print some diagnostic messages */ static int trace_level; /** * mega_setup_mailbox() * @adapter: pointer to our soft state * * Allocates a 8 byte aligned memory for the handshake mailbox. */ static int mega_setup_mailbox(adapter_t *adapter) { unsigned long align; adapter->una_mbox64 = dma_alloc_coherent(&adapter->dev->dev, sizeof(mbox64_t), &adapter->una_mbox64_dma, GFP_KERNEL); if( !adapter->una_mbox64 ) return -1; adapter->mbox = &adapter->una_mbox64->mbox; adapter->mbox = (mbox_t *)((((unsigned long) adapter->mbox) + 15) & (~0UL ^ 0xFUL)); adapter->mbox64 = (mbox64_t *)(((unsigned long)adapter->mbox) - 8); align = ((void *)adapter->mbox) - ((void *)&adapter->una_mbox64->mbox); adapter->mbox_dma = adapter->una_mbox64_dma + 8 + align; /* * Register the mailbox if the controller is an io-mapped controller */ if( adapter->flag & BOARD_IOMAP ) { outb(adapter->mbox_dma & 0xFF, adapter->host->io_port + MBOX_PORT0); outb((adapter->mbox_dma >> 8) & 0xFF, adapter->host->io_port + MBOX_PORT1); outb((adapter->mbox_dma >> 16) & 0xFF, adapter->host->io_port + MBOX_PORT2); outb((adapter->mbox_dma >> 24) & 0xFF, adapter->host->io_port + MBOX_PORT3); outb(ENABLE_MBOX_BYTE, adapter->host->io_port + ENABLE_MBOX_REGION); irq_ack(adapter); irq_enable(adapter); } return 0; } /* * mega_query_adapter() * @adapter - pointer to our soft state * * Issue the adapter inquiry commands to the controller and find out * information and parameter about the devices attached */ static int mega_query_adapter(adapter_t *adapter) { dma_addr_t prod_info_dma_handle; mega_inquiry3 *inquiry3; struct mbox_out mbox; u8 *raw_mbox = (u8 *)&mbox; int retval; /* Initialize adapter inquiry mailbox */ memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); memset(&mbox, 0, sizeof(mbox)); /* * Try to issue Inquiry3 command * if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and * update enquiry3 structure */ mbox.xferaddr = (u32)adapter->buf_dma_handle; inquiry3 = (mega_inquiry3 *)adapter->mega_buffer; raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ raw_mbox[2] = NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */ raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */ /* Issue a blocking command to the card */ if ((retval = issue_scb_block(adapter, raw_mbox))) { /* the adapter does not support 40ld */ mraid_ext_inquiry *ext_inq; mraid_inquiry *inq; dma_addr_t dma_handle; ext_inq = dma_alloc_coherent(&adapter->dev->dev, sizeof(mraid_ext_inquiry), &dma_handle, GFP_KERNEL); if( ext_inq == NULL ) return -1; inq = &ext_inq->raid_inq; mbox.xferaddr = (u32)dma_handle; /*issue old 0x04 command to adapter */ mbox.cmd = MEGA_MBOXCMD_ADPEXTINQ; issue_scb_block(adapter, raw_mbox); /* * update Enquiry3 and ProductInfo structures with * mraid_inquiry structure */ mega_8_to_40ld(inq, inquiry3, (mega_product_info *)&adapter->product_info); dma_free_coherent(&adapter->dev->dev, sizeof(mraid_ext_inquiry), ext_inq, dma_handle); } else { /*adapter supports 40ld */ adapter->flag |= BOARD_40LD; /* * get product_info, which is static information and will be * unchanged */ prod_info_dma_handle = dma_map_single(&adapter->dev->dev, (void *)&adapter->product_info, sizeof(mega_product_info), DMA_FROM_DEVICE); mbox.xferaddr = prod_info_dma_handle; raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */ if ((retval = issue_scb_block(adapter, raw_mbox))) dev_warn(&adapter->dev->dev, "Product_info cmd failed with error: %d\n", retval); dma_unmap_single(&adapter->dev->dev, prod_info_dma_handle, sizeof(mega_product_info), DMA_FROM_DEVICE); } /* * kernel scans the channels from 0 to <= max_channel */ adapter->host->max_channel = adapter->product_info.nchannels + NVIRT_CHAN -1; adapter->host->max_id = 16; /* max targets per channel */ adapter->host->max_lun = 7; /* Up to 7 luns for non disk devices */ adapter->host->cmd_per_lun = max_cmd_per_lun; adapter->numldrv = inquiry3->num_ldrv; adapter->max_cmds = adapter->product_info.max_commands; if(adapter->max_cmds > MAX_COMMANDS) adapter->max_cmds = MAX_COMMANDS; adapter->host->can_queue = adapter->max_cmds - 1; /* * Get the maximum number of scatter-gather elements supported by this * firmware */ mega_get_max_sgl(adapter); adapter->host->sg_tablesize = adapter->sglen; /* use HP firmware and bios version encoding Note: fw_version[0|1] and bios_version[0|1] were originally shifted right 8 bits making them zero. This 0 value was hardcoded to fix sparse warnings. */ if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) { snprintf(adapter->fw_version, sizeof(adapter->fw_version), "%c%d%d.%d%d", adapter->product_info.fw_version[2], 0, adapter->product_info.fw_version[1] & 0x0f, 0, adapter->product_info.fw_version[0] & 0x0f); snprintf(adapter->bios_version, sizeof(adapter->fw_version), "%c%d%d.%d%d", adapter->product_info.bios_version[2], 0, adapter->product_info.bios_version[1] & 0x0f, 0, adapter->product_info.bios_version[0] & 0x0f); } else { memcpy(adapter->fw_version, (char *)adapter->product_info.fw_version, 4); adapter->fw_version[4] = 0; memcpy(adapter->bios_version, (char *)adapter->product_info.bios_version, 4); adapter->bios_version[4] = 0; } dev_notice(&adapter->dev->dev, "[%s:%s] detected %d logical drives\n", adapter->fw_version, adapter->bios_version, adapter->numldrv); /* * Do we support extended (>10 bytes) cdbs */ adapter->support_ext_cdb = mega_support_ext_cdb(adapter); if (adapter->support_ext_cdb) dev_notice(&adapter->dev->dev, "supports extended CDBs\n"); return 0; } /** * mega_runpendq() * @adapter: pointer to our soft state * * Runs through the list of pending requests. */ static inline void mega_runpendq(adapter_t *adapter) { if(!list_empty(&adapter->pending_list)) __mega_runpendq(adapter); } /* * megaraid_queue() * @scmd - Issue this scsi command * @done - the callback hook into the scsi mid-layer * * The command queuing entry point for the mid-layer. */ static int megaraid_queue_lck(struct scsi_cmnd *scmd) { adapter_t *adapter; scb_t *scb; int busy=0; unsigned long flags; adapter = (adapter_t *)scmd->device->host->hostdata; /* * Allocate and build a SCB request * busy flag will be set if mega_build_cmd() command could not * allocate scb. We will return non-zero status in that case. * NOTE: scb can be null even though certain commands completed * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, we would * return 0 in that case. */ spin_lock_irqsave(&adapter->lock, flags); scb = mega_build_cmd(adapter, scmd, &busy); if (!scb) goto out; scb->state |= SCB_PENDQ; list_add_tail(&scb->list, &adapter->pending_list); /* * Check if the HBA is in quiescent state, e.g., during a * delete logical drive opertion. If it is, don't run * the pending_list. */ if (atomic_read(&adapter->quiescent) == 0) mega_runpendq(adapter); busy = 0; out: spin_unlock_irqrestore(&adapter->lock, flags); return busy; } static DEF_SCSI_QCMD(megaraid_queue) /** * mega_allocate_scb() * @adapter: pointer to our soft state * @cmd: scsi command from the mid-layer * * Allocate a SCB structure. This is the central structure for controller * commands. */ static inline scb_t * mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd) { struct list_head *head = &adapter->free_list; scb_t *scb; /* Unlink command from Free List */ if( !list_empty(head) ) { scb = list_entry(head->next, scb_t, list); list_del_init(head->next); scb->state = SCB_ACTIVE; scb->cmd = cmd; scb->dma_type = MEGA_DMA_TYPE_NONE; return scb; } return NULL; } /** * mega_get_ldrv_num() * @adapter: pointer to our soft state * @cmd: scsi mid layer command * @channel: channel on the controller * * Calculate the logical drive number based on the information in scsi command * and the channel number. */ static inline int mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel) { int tgt; int ldrv_num; tgt = cmd->device->id; if ( tgt > adapter->this_id ) tgt--; /* we do not get inquires for initiator id */ ldrv_num = (channel * 15) + tgt; /* * If we have a logical drive with boot enabled, project it first */ if( adapter->boot_ldrv_enabled ) { if( ldrv_num == 0 ) { ldrv_num = adapter->boot_ldrv; } else { if( ldrv_num <= adapter->boot_ldrv ) { ldrv_num--; } } } /* * If "delete logical drive" feature is enabled on this controller. * Do only if at least one delete logical drive operation was done. * * Also, after logical drive deletion, instead of logical drive number, * the value returned should be 0x80+logical drive id. * * These is valid only for IO commands. */ if (adapter->support_random_del && adapter->read_ldidmap ) switch (cmd->cmnd[0]) { case READ_6: case WRITE_6: case READ_10: case WRITE_10: ldrv_num += 0x80; } return ldrv_num; } /** * mega_build_cmd() * @adapter: pointer to our soft state * @cmd: Prepare using this scsi command * @busy: busy flag if no resources * * Prepares a command and scatter gather list for the controller. This routine * also finds out if the commands is intended for a logical drive or a * physical device and prepares the controller command accordingly. * * We also re-order the logical drives and physical devices based on their * boot settings. */ static scb_t * mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy) { mega_passthru *pthru; scb_t *scb; mbox_t *mbox; u32 seg; char islogical; int max_ldrv_num; int channel = 0; int target = 0; int ldrv_num = 0; /* logical drive number */ /* * We know what channels our logical drives are on - mega_find_card() */ islogical = adapter->logdrv_chan[cmd->device->channel]; /* * The theory: If physical drive is chosen for boot, all the physical * devices are exported before the logical drives, otherwise physical * devices are pushed after logical drives, in which case - Kernel sees * the physical devices on virtual channel which is obviously converted * to actual channel on the HBA. */ if( adapter->boot_pdrv_enabled ) { if( islogical ) { /* logical channel */ channel = cmd->device->channel - adapter->product_info.nchannels; } else { /* this is physical channel */ channel = cmd->device->channel; target = cmd->device->id; /* * boot from a physical disk, that disk needs to be * exposed first IF both the channels are SCSI, then * booting from the second channel is not allowed. */ if( target == 0 ) { target = adapter->boot_pdrv_tgt; } else if( target == adapter->boot_pdrv_tgt ) { target = 0; } } } else { if( islogical ) { /* this is the logical channel */ channel = cmd->device->channel; } else { /* physical channel */ channel = cmd->device->channel - NVIRT_CHAN; target = cmd->device->id; } } if(islogical) { /* have just LUN 0 for each target on virtual channels */ if (cmd->device->lun) { cmd->result = (DID_BAD_TARGET << 16); scsi_done(cmd); return NULL; } ldrv_num = mega_get_ldrv_num(adapter, cmd, channel); max_ldrv_num = (adapter->flag & BOARD_40LD) ? MAX_LOGICAL_DRIVES_40LD : MAX_LOGICAL_DRIVES_8LD; /* * max_ldrv_num increases by 0x80 if some logical drive was * deleted. */ if(adapter->read_ldidmap) max_ldrv_num += 0x80; if(ldrv_num > max_ldrv_num ) { cmd->result = (DID_BAD_TARGET << 16); scsi_done(cmd); return NULL; } } else { if( cmd->device->lun > 7) { /* * Do not support lun >7 for physically accessed * devices */ cmd->result = (DID_BAD_TARGET << 16); scsi_done(cmd); return NULL; } } /* * * Logical drive commands * */ if(islogical) { switch (cmd->cmnd[0]) { case TEST_UNIT_READY: #if MEGA_HAVE_CLUSTERING /* * Do we support clustering and is the support enabled * If no, return success always */ if( !adapter->has_cluster ) { cmd->result = (DID_OK << 16); scsi_done(cmd); return NULL; } if(!(scb = mega_allocate_scb(adapter, cmd))) { *busy = 1; return NULL; } scb->raw_mbox[0] = MEGA_CLUSTER_CMD; scb->raw_mbox[2] = MEGA_RESERVATION_STATUS; scb->raw_mbox[3] = ldrv_num; scb->dma_direction = DMA_NONE; return scb; #else cmd->result = (DID_OK << 16); scsi_done(cmd); return NULL; #endif case MODE_SENSE: { char *buf; struct scatterlist *sg; sg = scsi_sglist(cmd); buf = kmap_atomic(sg_page(sg)) + sg->offset; memset(buf, 0, cmd->cmnd[4]); kunmap_atomic(buf - sg->offset); cmd->result = (DID_OK << 16); scsi_done(cmd); return NULL; } case READ_CAPACITY: case INQUIRY: if(!(adapter->flag & (1L << cmd->device->channel))) { dev_notice(&adapter->dev->dev, "scsi%d: scanning scsi channel %d " "for logical drives\n", adapter->host->host_no, cmd->device->channel); adapter->flag |= (1L << cmd->device->channel); } /* Allocate a SCB and initialize passthru */ if(!(scb = mega_allocate_scb(adapter, cmd))) { *busy = 1; return NULL; } pthru = scb->pthru; mbox = (mbox_t *)scb->raw_mbox; memset(mbox, 0, sizeof(scb->raw_mbox)); memset(pthru, 0, sizeof(mega_passthru)); pthru->timeout = 0; pthru->ars = 1; pthru->reqsenselen = 14; pthru->islogical = 1; pthru->logdrv = ldrv_num; pthru->cdblen = cmd->cmd_len; memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); if( adapter->has_64bit_addr ) { mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64; } else { mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU; } scb->dma_direction = DMA_FROM_DEVICE; pthru->numsgelements = mega_build_sglist(adapter, scb, &pthru->dataxferaddr, &pthru->dataxferlen); mbox->m_out.xferaddr = scb->pthru_dma_addr; return scb; case READ_6: case WRITE_6: case READ_10: case WRITE_10: case READ_12: case WRITE_12: /* Allocate a SCB and initialize mailbox */ if(!(scb = mega_allocate_scb(adapter, cmd))) { *busy = 1; return NULL; } mbox = (mbox_t *)scb->raw_mbox; memset(mbox, 0, sizeof(scb->raw_mbox)); mbox->m_out.logdrv = ldrv_num; /* * A little hack: 2nd bit is zero for all scsi read * commands and is set for all scsi write commands */ if( adapter->has_64bit_addr ) { mbox->m_out.cmd = (*cmd->cmnd & 0x02) ? MEGA_MBOXCMD_LWRITE64: MEGA_MBOXCMD_LREAD64 ; } else { mbox->m_out.cmd = (*cmd->cmnd & 0x02) ? MEGA_MBOXCMD_LWRITE: MEGA_MBOXCMD_LREAD ; } /* * 6-byte READ(0x08) or WRITE(0x0A) cdb */ if( cmd->cmd_len == 6 ) { mbox->m_out.numsectors = (u32) cmd->cmnd[4]; mbox->m_out.lba = ((u32)cmd->cmnd[1] << 16) | ((u32)cmd->cmnd[2] << 8) | (u32)cmd->cmnd[3]; mbox->m_out.lba &= 0x1FFFFF; #if MEGA_HAVE_STATS /* * Take modulo 0x80, since the logical drive * number increases by 0x80 when a logical * drive was deleted */ if (*cmd->cmnd == READ_6) { adapter->nreads[ldrv_num%0x80]++; adapter->nreadblocks[ldrv_num%0x80] += mbox->m_out.numsectors; } else { adapter->nwrites[ldrv_num%0x80]++; adapter->nwriteblocks[ldrv_num%0x80] += mbox->m_out.numsectors; } #endif } /* * 10-byte READ(0x28) or WRITE(0x2A) cdb */ if( cmd->cmd_len == 10 ) { mbox->m_out.numsectors = (u32)cmd->cmnd[8] | ((u32)cmd->cmnd[7] << 8); mbox->m_out.lba = ((u32)cmd->cmnd[2] << 24) | ((u32)cmd->cmnd[3] << 16) | ((u32)cmd->cmnd[4] << 8) | (u32)cmd->cmnd[5]; #if MEGA_HAVE_STATS if (*cmd->cmnd == READ_10) { adapter->nreads[ldrv_num%0x80]++; adapter->nreadblocks[ldrv_num%0x80] += mbox->m_out.numsectors; } else { adapter->nwrites[ldrv_num%0x80]++; adapter->nwriteblocks[ldrv_num%0x80] += mbox->m_out.numsectors; } #endif } /* * 12-byte READ(0xA8) or WRITE(0xAA) cdb */ if( cmd->cmd_len == 12 ) { mbox->m_out.lba = ((u32)cmd->cmnd[2] << 24) | ((u32)cmd->cmnd[3] << 16) | ((u32)cmd->cmnd[4] << 8) | (u32)cmd->cmnd[5]; mbox->m_out.numsectors = ((u32)cmd->cmnd[6] << 24) | ((u32)cmd->cmnd[7] << 16) | ((u32)cmd->cmnd[8] << 8) | (u32)cmd->cmnd[9]; #if MEGA_HAVE_STATS if (*cmd->cmnd == READ_12) { adapter->nreads[ldrv_num%0x80]++; adapter->nreadblocks[ldrv_num%0x80] += mbox->m_out.numsectors; } else { adapter->nwrites[ldrv_num%0x80]++; adapter->nwriteblocks[ldrv_num%0x80] += mbox->m_out.numsectors; } #endif } /* * If it is a read command */ if( (*cmd->cmnd & 0x0F) == 0x08 ) { scb->dma_direction = DMA_FROM_DEVICE; } else { scb->dma_direction = DMA_TO_DEVICE; } /* Calculate Scatter-Gather info */ mbox->m_out.numsgelements = mega_build_sglist(adapter, scb, (u32 *)&mbox->m_out.xferaddr, &seg); return scb; #if MEGA_HAVE_CLUSTERING case RESERVE: case RELEASE: /* * Do we support clustering and is the support enabled */ if( ! adapter->has_cluster ) { cmd->result = (DID_BAD_TARGET << 16); scsi_done(cmd); return NULL; } /* Allocate a SCB and initialize mailbox */ if(!(scb = mega_allocate_scb(adapter, cmd))) { *busy = 1; return NULL; } scb->raw_mbox[0] = MEGA_CLUSTER_CMD; scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ? MEGA_RESERVE_LD : MEGA_RELEASE_LD; scb->raw_mbox[3] = ldrv_num; scb->dma_direction = DMA_NONE; return scb; #endif default: cmd->result = (DID_BAD_TARGET << 16); scsi_done(cmd); return NULL; } } /* * Passthru drive commands */ else { /* Allocate a SCB and initialize passthru */ if(!(scb = mega_allocate_scb(adapter, cmd))) { *busy = 1; return NULL; } mbox = (mbox_t *)scb->raw_mbox; memset(mbox, 0, sizeof(scb->raw_mbox)); if( adapter->support_ext_cdb ) { mega_prepare_extpassthru(adapter, scb, cmd, channel, target); mbox->m_out.cmd = MEGA_MBOXCMD_EXTPTHRU; mbox->m_out.xferaddr = scb->epthru_dma_addr; } else { pthru = mega_prepare_passthru(adapter, scb, cmd, channel, target); /* Initialize mailbox */ if( adapter->has_64bit_addr ) { mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64; } else { mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU; } mbox->m_out.xferaddr = scb->pthru_dma_addr; } return scb; } return NULL; } /** * mega_prepare_passthru() * @adapter: pointer to our soft state * @scb: our scsi control block * @cmd: scsi command from the mid-layer * @channel: actual channel on the controller * @target: actual id on the controller. * * prepare a command for the scsi physical devices. */ static mega_passthru * mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd, int channel, int target) { mega_passthru *pthru; pthru = scb->pthru; memset(pthru, 0, sizeof (mega_passthru)); /* 0=6sec/1=60sec/2=10min/3=3hrs */ pthru->timeout = 2; pthru->ars = 1; pthru->reqsenselen = 14; pthru->islogical = 0; pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; pthru->target = (adapter->flag & BOARD_40LD) ? (channel << 4) | target : target; pthru->cdblen = cmd->cmd_len; pthru->logdrv = cmd->device->lun; memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); /* Not sure about the direction */ scb->dma_direction = DMA_BIDIRECTIONAL; /* Special Code for Handling READ_CAPA/ INQ using bounce buffers */ switch (cmd->cmnd[0]) { case INQUIRY: case READ_CAPACITY: if(!(adapter->flag & (1L << cmd->device->channel))) { dev_notice(&adapter->dev->dev, "scsi%d: scanning scsi channel %d [P%d] " "for physical devices\n", adapter->host->host_no, cmd->device->channel, channel); adapter->flag |= (1L << cmd->device->channel); } fallthrough; default: pthru->numsgelements = mega_build_sglist(adapter, scb, &pthru->dataxferaddr, &pthru->dataxferlen); break; } return pthru; } /** * mega_prepare_extpassthru() * @adapter: pointer to our soft state * @scb: our scsi control block * @cmd: scsi command from the mid-layer * @channel: actual channel on the controller * @target: actual id on the controller. * * prepare a command for the scsi physical devices. This rountine prepares * commands for devices which can take extended CDBs (>10 bytes) */ static mega_ext_passthru * mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd, int channel, int target) { mega_ext_passthru *epthru; epthru = scb->epthru; memset(epthru, 0, sizeof(mega_ext_passthru)); /* 0=6sec/1=60sec/2=10min/3=3hrs */ epthru->timeout = 2; epthru->ars = 1; epthru->reqsenselen = 14; epthru->islogical = 0; epthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; epthru->target = (adapter->flag & BOARD_40LD) ? (channel << 4) | target : target; epthru->cdblen = cmd->cmd_len; epthru->logdrv = cmd->device->lun; memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len); /* Not sure about the direction */ scb->dma_direction = DMA_BIDIRECTIONAL; switch(cmd->cmnd[0]) { case INQUIRY: case READ_CAPACITY: if(!(adapter->flag & (1L << cmd->device->channel))) { dev_notice(&adapter->dev->dev, "scsi%d: scanning scsi channel %d [P%d] " "for physical devices\n", adapter->host->host_no, cmd->device->channel, channel); adapter->flag |= (1L << cmd->device->channel); } fallthrough; default: epthru->numsgelements = mega_build_sglist(adapter, scb, &epthru->dataxferaddr, &epthru->dataxferlen); break; } return epthru; } static void __mega_runpendq(adapter_t *adapter) { scb_t *scb; struct list_head *pos, *next; /* Issue any pending commands to the card */ list_for_each_safe(pos, next, &adapter->pending_list) { scb = list_entry(pos, scb_t, list); if( !(scb->state & SCB_ISSUED) ) { if( issue_scb(adapter, scb) != 0 ) return; } } return; } /** * issue_scb() * @adapter: pointer to our soft state * @scb: scsi control block * * Post a command to the card if the mailbox is available, otherwise return * busy. We also take the scb from the pending list if the mailbox is * available. */ static int issue_scb(adapter_t *adapter, scb_t *scb) { volatile mbox64_t *mbox64 = adapter->mbox64; volatile mbox_t *mbox = adapter->mbox; unsigned int i = 0; if(unlikely(mbox->m_in.busy)) { do { udelay(1); i++; } while( mbox->m_in.busy && (i < max_mbox_busy_wait) ); if(mbox->m_in.busy) return -1; } /* Copy mailbox data into host structure */ memcpy((char *)&mbox->m_out, (char *)scb->raw_mbox, sizeof(struct mbox_out)); mbox->m_out.cmdid = scb->idx; /* Set cmdid */ mbox->m_in.busy = 1; /* Set busy */ /* * Increment the pending queue counter */ atomic_inc(&adapter->pend_cmds); switch (mbox->m_out.cmd) { case MEGA_MBOXCMD_LREAD64: case MEGA_MBOXCMD_LWRITE64: case MEGA_MBOXCMD_PASSTHRU64: case MEGA_MBOXCMD_EXTPTHRU: mbox64->xfer_segment_lo = mbox->m_out.xferaddr; mbox64->xfer_segment_hi = 0; mbox->m_out.xferaddr = 0xFFFFFFFF; break; default: mbox64->xfer_segment_lo = 0; mbox64->xfer_segment_hi = 0; } /* * post the command */ scb->state |= SCB_ISSUED; if( likely(adapter->flag & BOARD_MEMMAP) ) { mbox->m_in.poll = 0; mbox->m_in.ack = 0; WRINDOOR(adapter, adapter->mbox_dma | 0x1); } else { irq_enable(adapter); issue_command(adapter); } return 0; } /* * Wait until the controller's mailbox is available */ static inline int mega_busywait_mbox (adapter_t *adapter) { if (adapter->mbox->m_in.busy) return __mega_busywait_mbox(adapter); return 0; } /** * issue_scb_block() * @adapter: pointer to our soft state * @raw_mbox: the mailbox * * Issue a scb in synchronous and non-interrupt mode */ static int issue_scb_block(adapter_t *adapter, u_char *raw_mbox) { volatile mbox64_t *mbox64 = adapter->mbox64; volatile mbox_t *mbox = adapter->mbox; u8 byte; /* Wait until mailbox is free */ if(mega_busywait_mbox (adapter)) goto bug_blocked_mailbox; /* Copy mailbox data into host structure */ memcpy((char *) mbox, raw_mbox, sizeof(struct mbox_out)); mbox->m_out.cmdid = 0xFE; mbox->m_in.busy = 1; switch (raw_mbox[0]) { case MEGA_MBOXCMD_LREAD64: case MEGA_MBOXCMD_LWRITE64: case MEGA_MBOXCMD_PASSTHRU64: case MEGA_MBOXCMD_EXTPTHRU: mbox64->xfer_segment_lo = mbox->m_out.xferaddr; mbox64->xfer_segment_hi = 0; mbox->m_out.xferaddr = 0xFFFFFFFF; break; default: mbox64->xfer_segment_lo = 0; mbox64->xfer_segment_hi = 0; } if( likely(adapter->flag & BOARD_MEMMAP) ) { mbox->m_in.poll = 0; mbox->m_in.ack = 0; mbox->m_in.numstatus = 0xFF; mbox->m_in.status = 0xFF; WRINDOOR(adapter, adapter->mbox_dma | 0x1); while((volatile u8)mbox->m_in.numstatus == 0xFF) cpu_relax(); mbox->m_in.numstatus = 0xFF; while( (volatile u8)mbox->m_in.poll != 0x77 ) cpu_relax(); mbox->m_in.poll = 0; mbox->m_in.ack = 0x77; WRINDOOR(adapter, adapter->mbox_dma | 0x2); while(RDINDOOR(adapter) & 0x2) cpu_relax(); } else { irq_disable(adapter); issue_command(adapter); while (!((byte = irq_state(adapter)) & INTR_VALID)) cpu_relax(); set_irq_state(adapter, byte); irq_enable(adapter); irq_ack(adapter); } return mbox->m_in.status; bug_blocked_mailbox: dev_warn(&adapter->dev->dev, "Blocked mailbox......!!\n"); udelay (1000); return -1; } /** * megaraid_isr_iomapped() * @irq: irq * @devp: pointer to our soft state * * Interrupt service routine for io-mapped controllers. * Find out if our device is interrupting. If yes, acknowledge the interrupt * and service the completed commands. */ static irqreturn_t megaraid_isr_iomapped(int irq, void *devp) { adapter_t *adapter = devp; unsigned long flags; u8 status; u8 nstatus; u8 completed[MAX_FIRMWARE_STATUS]; u8 byte; int handled = 0; /* * loop till F/W has more commands for us to complete. */ spin_lock_irqsave(&adapter->lock, flags); do { /* Check if a valid interrupt is pending */ byte = irq_state(adapter); if( (byte & VALID_INTR_BYTE) == 0 ) { /* * No more pending commands */ goto out_unlock; } set_irq_state(adapter, byte); while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus) == 0xFF) cpu_relax(); adapter->mbox->m_in.numstatus = 0xFF; status = adapter->mbox->m_in.status; /* * decrement the pending queue counter */ atomic_sub(nstatus, &adapter->pend_cmds); memcpy(completed, (void *)adapter->mbox->m_in.completed, nstatus); /* Acknowledge interrupt */ irq_ack(adapter); mega_cmd_done(adapter, completed, nstatus, status); mega_rundoneq(adapter); handled = 1; /* Loop through any pending requests */ if(atomic_read(&adapter->quiescent) == 0) { mega_runpendq(adapter); } } while(1); out_unlock: spin_unlock_irqrestore(&adapter->lock, flags); return IRQ_RETVAL(handled); } /** * megaraid_isr_memmapped() * @irq: irq * @devp: pointer to our soft state * * Interrupt service routine for memory-mapped controllers. * Find out if our device is interrupting. If yes, acknowledge the interrupt * and service the completed commands. */ static irqreturn_t megaraid_isr_memmapped(int irq, void *devp) { adapter_t *adapter = devp; unsigned long flags; u8 status; u32 dword = 0; u8 nstatus; u8 completed[MAX_FIRMWARE_STATUS]; int handled = 0; /* * loop till F/W has more commands for us to complete. */ spin_lock_irqsave(&adapter->lock, flags); do { /* Check if a valid interrupt is pending */ dword = RDOUTDOOR(adapter); if(dword != 0x10001234) { /* * No more pending commands */ goto out_unlock; } WROUTDOOR(adapter, 0x10001234); while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus) == 0xFF) { cpu_relax(); } adapter->mbox->m_in.numstatus = 0xFF; status = adapter->mbox->m_in.status; /* * decrement the pending queue counter */ atomic_sub(nstatus, &adapter->pend_cmds); memcpy(completed, (void *)adapter->mbox->m_in.completed, nstatus); /* Acknowledge interrupt */ WRINDOOR(adapter, 0x2); handled = 1; while( RDINDOOR(adapter) & 0x02 ) cpu_relax(); mega_cmd_done(adapter, completed, nstatus, status); mega_rundoneq(adapter); /* Loop through any pending requests */ if(atomic_read(&adapter->quiescent) == 0) { mega_runpendq(adapter); } } while(1); out_unlock: spin_unlock_irqrestore(&adapter->lock, flags); return IRQ_RETVAL(handled); } /** * mega_cmd_done() * @adapter: pointer to our soft state * @completed: array of ids of completed commands * @nstatus: number of completed commands * @status: status of the last command completed * * Complete the commands and call the scsi mid-layer callback hooks. */ static void mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status) { mega_ext_passthru *epthru = NULL; struct scatterlist *sgl; struct scsi_cmnd *cmd = NULL; mega_passthru *pthru = NULL; mbox_t *mbox = NULL; u8 c; scb_t *scb; int islogical; int cmdid; int i; /* * for all the commands completed, call the mid-layer callback routine * and free the scb. */ for( i = 0; i < nstatus; i++ ) { cmdid = completed[i]; /* * Only free SCBs for the commands coming down from the * mid-layer, not for which were issued internally * * For internal command, restore the status returned by the * firmware so that user can interpret it. */ if (cmdid == CMDID_INT_CMDS) { scb = &adapter->int_scb; cmd = scb->cmd; list_del_init(&scb->list); scb->state = SCB_FREE; adapter->int_status = status; complete(&adapter->int_waitq); } else { scb = &adapter->scb_list[cmdid]; /* * Make sure f/w has completed a valid command */ if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) { dev_crit(&adapter->dev->dev, "invalid command " "Id %d, scb->state:%x, scsi cmd:%p\n", cmdid, scb->state, scb->cmd); continue; } /* * Was a abort issued for this command */ if( scb->state & SCB_ABORT ) { dev_warn(&adapter->dev->dev, "aborted cmd [%x] complete\n", scb->idx); scb->cmd->result = (DID_ABORT << 16); list_add_tail(SCSI_LIST(scb->cmd), &adapter->completed_list); mega_free_scb(adapter, scb); continue; } /* * Was a reset issued for this command */ if( scb->state & SCB_RESET ) { dev_warn(&adapter->dev->dev, "reset cmd [%x] complete\n", scb->idx); scb->cmd->result = (DID_RESET << 16); list_add_tail(SCSI_LIST(scb->cmd), &adapter->completed_list); mega_free_scb (adapter, scb); continue; } cmd = scb->cmd; pthru = scb->pthru; epthru = scb->epthru; mbox = (mbox_t *)scb->raw_mbox; #if MEGA_HAVE_STATS { int logdrv = mbox->m_out.logdrv; islogical = adapter->logdrv_chan[cmd->channel]; /* * Maintain an error counter for the logical drive. * Some application like SNMP agent need such * statistics */ if( status && islogical && (cmd->cmnd[0] == READ_6 || cmd->cmnd[0] == READ_10 || cmd->cmnd[0] == READ_12)) { /* * Logical drive number increases by 0x80 when * a logical drive is deleted */ adapter->rd_errors[logdrv%0x80]++; } if( status && islogical && (cmd->cmnd[0] == WRITE_6 || cmd->cmnd[0] == WRITE_10 || cmd->cmnd[0] == WRITE_12)) { /* * Logical drive number increases by 0x80 when * a logical drive is deleted */ adapter->wr_errors[logdrv%0x80]++; } } #endif } /* * Do not return the presence of hard disk on the channel so, * inquiry sent, and returned data==hard disk or removable * hard disk and not logical, request should return failure! - * PJ */ islogical = adapter->logdrv_chan[cmd->device->channel]; if( cmd->cmnd[0] == INQUIRY && !islogical ) { sgl = scsi_sglist(cmd); if( sg_page(sgl) ) { c = *(unsigned char *) sg_virt(&sgl[0]); } else { dev_warn(&adapter->dev->dev, "invalid sg\n"); c = 0; } if(IS_RAID_CH(adapter, cmd->device->channel) && ((c & 0x1F ) == TYPE_DISK)) { status = 0xF0; } } /* clear result; otherwise, success returns corrupt value */ cmd->result = 0; /* Convert MegaRAID status to Linux error code */ switch (status) { case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD */ cmd->result |= (DID_OK << 16); break; case 0x02: /* ERROR_ABORTED, i.e. SCSI_STATUS_CHECK_CONDITION */ /* set sense_buffer and result fields */ if( mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU || mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU64 ) { memcpy(cmd->sense_buffer, pthru->reqsensearea, 14); cmd->result = SAM_STAT_CHECK_CONDITION; } else { if (mbox->m_out.cmd == MEGA_MBOXCMD_EXTPTHRU) { memcpy(cmd->sense_buffer, epthru->reqsensearea, 14); cmd->result = SAM_STAT_CHECK_CONDITION; } else scsi_build_sense(cmd, 0, ABORTED_COMMAND, 0, 0); } break; case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e. SCSI_STATUS_BUSY */ cmd->result |= (DID_BUS_BUSY << 16) | status; break; default: #if MEGA_HAVE_CLUSTERING /* * If TEST_UNIT_READY fails, we know * MEGA_RESERVATION_STATUS failed */ if( cmd->cmnd[0] == TEST_UNIT_READY ) { cmd->result |= (DID_ERROR << 16) | SAM_STAT_RESERVATION_CONFLICT; } else /* * Error code returned is 1 if Reserve or Release * failed or the input parameter is invalid */ if( status == 1 && (cmd->cmnd[0] == RESERVE || cmd->cmnd[0] == RELEASE) ) { cmd->result |= (DID_ERROR << 16) | SAM_STAT_RESERVATION_CONFLICT; } else #endif cmd->result |= (DID_BAD_TARGET << 16)|status; } mega_free_scb(adapter, scb); /* Add Scsi_Command to end of completed queue */ list_add_tail(SCSI_LIST(cmd), &adapter->completed_list); } } /* * mega_runpendq() * * Run through the list of completed requests and finish it */ static void mega_rundoneq (adapter_t *adapter) { struct megaraid_cmd_priv *cmd_priv; list_for_each_entry(cmd_priv, &adapter->completed_list, entry) scsi_done(megaraid_to_scsi_cmd(cmd_priv)); INIT_LIST_HEAD(&adapter->completed_list); } /* * Free a SCB structure * Note: We assume the scsi commands associated with this scb is not free yet. */ static void mega_free_scb(adapter_t *adapter, scb_t *scb) { switch( scb->dma_type ) { case MEGA_DMA_TYPE_NONE: break; case MEGA_SGLIST: scsi_dma_unmap(scb->cmd); break; default: break; } /* * Remove from the pending list */ list_del_init(&scb->list); /* Link the scb back into free list */ scb->state = SCB_FREE; scb->cmd = NULL; list_add(&scb->list, &adapter->free_list); } static int __mega_busywait_mbox (adapter_t *adapter) { volatile mbox_t *mbox = adapter->mbox; long counter; for (counter = 0; counter < 10000; counter++) { if (!mbox->m_in.busy) return 0; udelay(100); cond_resched(); } return -1; /* give up after 1 second */ } /* * Copies data to SGLIST * Note: For 64 bit cards, we need a minimum of one SG element for read/write */ static int mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len) { struct scatterlist *sg; struct scsi_cmnd *cmd; int sgcnt; int idx; cmd = scb->cmd; /* * Copy Scatter-Gather list info into controller structure. * * The number of sg elements returned must not exceed our limit */ sgcnt = scsi_dma_map(cmd); scb->dma_type = MEGA_SGLIST; BUG_ON(sgcnt > adapter->sglen || sgcnt < 0); *len = 0; if (scsi_sg_count(cmd) == 1 && !adapter->has_64bit_addr) { sg = scsi_sglist(cmd); scb->dma_h_bulkdata = sg_dma_address(sg); *buf = (u32)scb->dma_h_bulkdata; *len = sg_dma_len(sg); return 0; } scsi_for_each_sg(cmd, sg, sgcnt, idx) { if (adapter->has_64bit_addr) { scb->sgl64[idx].address = sg_dma_address(sg); *len += scb->sgl64[idx].length = sg_dma_len(sg); } else { scb->sgl[idx].address = sg_dma_address(sg); *len += scb->sgl[idx].length = sg_dma_len(sg); } } /* Reset pointer and length fields */ *buf = scb->sgl_dma_addr; /* Return count of SG requests */ return sgcnt; } /* * mega_8_to_40ld() * * takes all info in AdapterInquiry structure and puts it into ProductInfo and * Enquiry3 structures for later use */ static void mega_8_to_40ld(mraid_inquiry *inquiry, mega_inquiry3 *enquiry3, mega_product_info *product_info) { int i; product_info->max_commands = inquiry->adapter_info.max_commands; enquiry3->rebuild_rate = inquiry->adapter_info.rebuild_rate; product_info->nchannels = inquiry->adapter_info.nchannels; for (i = 0; i < 4; i++) { product_info->fw_version[i] = inquiry->adapter_info.fw_version[i]; product_info->bios_version[i] = inquiry->adapter_info.bios_version[i]; } enquiry3->cache_flush_interval = inquiry->adapter_info.cache_flush_interval; product_info->dram_size = inquiry->adapter_info.dram_size; enquiry3->num_ldrv = inquiry->logdrv_info.num_ldrv; for (i = 0; i < MAX_LOGICAL_DRIVES_8LD; i++) { enquiry3->ldrv_size[i] = inquiry->logdrv_info.ldrv_size[i]; enquiry3->ldrv_prop[i] = inquiry->logdrv_info.ldrv_prop[i]; enquiry3->ldrv_state[i] = inquiry->logdrv_info.ldrv_state[i]; } for (i = 0; i < (MAX_PHYSICAL_DRIVES); i++) enquiry3->pdrv_state[i] = inquiry->pdrv_info.pdrv_state[i]; } static inline void mega_free_sgl(adapter_t *adapter) { scb_t *scb; int i; for(i = 0; i < adapter->max_cmds; i++) { scb = &adapter->scb_list[i]; if( scb->sgl64 ) { dma_free_coherent(&adapter->dev->dev, sizeof(mega_sgl64) * adapter->sglen, scb->sgl64, scb->sgl_dma_addr); scb->sgl64 = NULL; } if( scb->pthru ) { dma_free_coherent(&adapter->dev->dev, sizeof(mega_passthru), scb->pthru, scb->pthru_dma_addr); scb->pthru = NULL; } if( scb->epthru ) { dma_free_coherent(&adapter->dev->dev, sizeof(mega_ext_passthru), scb->epthru, scb->epthru_dma_addr); scb->epthru = NULL; } } } /* * Get information about the card/driver */ const char * megaraid_info(struct Scsi_Host *host) { static char buffer[512]; adapter_t *adapter; adapter = (adapter_t *)host->hostdata; sprintf (buffer, "LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns", adapter->fw_version, adapter->product_info.max_commands, adapter->host->max_id, adapter->host->max_channel, (u32)adapter->host->max_lun); return buffer; } /* * Abort a previous SCSI request. Only commands on the pending list can be * aborted. All the commands issued to the F/W must complete. */ static int megaraid_abort(struct scsi_cmnd *cmd) { adapter_t *adapter; int rval; adapter = (adapter_t *)cmd->device->host->hostdata; rval = megaraid_abort_and_reset(adapter, cmd, SCB_ABORT); /* * This is required here to complete any completed requests * to be communicated over to the mid layer. */ mega_rundoneq(adapter); return rval; } static int megaraid_reset(struct scsi_cmnd *cmd) { adapter_t *adapter; megacmd_t mc; int rval; adapter = (adapter_t *)cmd->device->host->hostdata; #if MEGA_HAVE_CLUSTERING mc.cmd = MEGA_CLUSTER_CMD; mc.opcode = MEGA_RESET_RESERVATIONS; if( mega_internal_command(adapter, &mc, NULL) != 0 ) { dev_warn(&adapter->dev->dev, "reservation reset failed\n"); } else { dev_info(&adapter->dev->dev, "reservation reset\n"); } #endif spin_lock_irq(&adapter->lock); rval = megaraid_abort_and_reset(adapter, cmd, SCB_RESET); /* * This is required here to complete any completed requests * to be communicated over to the mid layer. */ mega_rundoneq(adapter); spin_unlock_irq(&adapter->lock); return rval; } /** * megaraid_abort_and_reset() * @adapter: megaraid soft state * @cmd: scsi command to be aborted or reset * @aor: abort or reset flag * * Try to locate the scsi command in the pending queue. If found and is not * issued to the controller, abort/reset it. Otherwise return failure */ static int megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor) { struct list_head *pos, *next; scb_t *scb; dev_warn(&adapter->dev->dev, "%s cmd=%x <c=%d t=%d l=%d>\n", (aor == SCB_ABORT)? "ABORTING":"RESET", cmd->cmnd[0], cmd->device->channel, cmd->device->id, (u32)cmd->device->lun); if(list_empty(&adapter->pending_list)) return FAILED; list_for_each_safe(pos, next, &adapter->pending_list) { scb = list_entry(pos, scb_t, list); if (scb->cmd == cmd) { /* Found command */ scb->state |= aor; /* * Check if this command has firmware ownership. If * yes, we cannot reset this command. Whenever f/w * completes this command, we will return appropriate * status from ISR. */ if( scb->state & SCB_ISSUED ) { dev_warn(&adapter->dev->dev, "%s[%x], fw owner\n", (aor==SCB_ABORT) ? "ABORTING":"RESET", scb->idx); return FAILED; } else { /* * Not yet issued! Remove from the pending * list */ dev_warn(&adapter->dev->dev, "%s-[%x], driver owner\n", (aor==SCB_ABORT) ? "ABORTING":"RESET", scb->idx); mega_free_scb(adapter, scb); if( aor == SCB_ABORT ) { cmd->result = (DID_ABORT << 16); } else { cmd->result = (DID_RESET << 16); } list_add_tail(SCSI_LIST(cmd), &adapter->completed_list); return SUCCESS; } } } return FAILED; } static inline int make_local_pdev(adapter_t *adapter, struct pci_dev **pdev) { *pdev = pci_alloc_dev(NULL); if( *pdev == NULL ) return -1; memcpy(*pdev, adapter->dev, sizeof(struct pci_dev)); if (dma_set_mask(&(*pdev)->dev, DMA_BIT_MASK(32)) != 0) { kfree(*pdev); return -1; } return 0; } static inline void free_local_pdev(struct pci_dev *pdev) { kfree(pdev); } /** * mega_allocate_inquiry() * @dma_handle: handle returned for dma address * @pdev: handle to pci device * * allocates memory for inquiry structure */ static inline void * mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev) { return dma_alloc_coherent(&pdev->dev, sizeof(mega_inquiry3), dma_handle, GFP_KERNEL); } static inline void mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev) { dma_free_coherent(&pdev->dev, sizeof(mega_inquiry3), inquiry, dma_handle); } #ifdef CONFIG_PROC_FS /* Following code handles /proc fs */ /** * proc_show_config() * @m: Synthetic file construction data * @v: File iterator * * Display configuration information about the controller. */ static int proc_show_config(struct seq_file *m, void *v) { adapter_t *adapter = m->private; seq_puts(m, MEGARAID_VERSION); if(adapter->product_info.product_name[0]) seq_printf(m, "%s\n", adapter->product_info.product_name); seq_puts(m, "Controller Type: "); if( adapter->flag & BOARD_MEMMAP ) seq_puts(m, "438/466/467/471/493/518/520/531/532\n"); else seq_puts(m, "418/428/434\n"); if(adapter->flag & BOARD_40LD) seq_puts(m, "Controller Supports 40 Logical Drives\n"); if(adapter->flag & BOARD_64BIT) seq_puts(m, "Controller capable of 64-bit memory addressing\n"); if( adapter->has_64bit_addr ) seq_puts(m, "Controller using 64-bit memory addressing\n"); else seq_puts(m, "Controller is not using 64-bit memory addressing\n"); seq_printf(m, "Base = %08lx, Irq = %d, ", adapter->base, adapter->host->irq); seq_printf(m, "Logical Drives = %d, Channels = %d\n", adapter->numldrv, adapter->product_info.nchannels); seq_printf(m, "Version =%s:%s, DRAM = %dMb\n", adapter->fw_version, adapter->bios_version, adapter->product_info.dram_size); seq_printf(m, "Controller Queue Depth = %d, Driver Queue Depth = %d\n", adapter->product_info.max_commands, adapter->max_cmds); seq_printf(m, "support_ext_cdb = %d\n", adapter->support_ext_cdb); seq_printf(m, "support_random_del = %d\n", adapter->support_random_del); seq_printf(m, "boot_ldrv_enabled = %d\n", adapter->boot_ldrv_enabled); seq_printf(m, "boot_ldrv = %d\n", adapter->boot_ldrv); seq_printf(m, "boot_pdrv_enabled = %d\n", adapter->boot_pdrv_enabled); seq_printf(m, "boot_pdrv_ch = %d\n", adapter->boot_pdrv_ch); seq_printf(m, "boot_pdrv_tgt = %d\n", adapter->boot_pdrv_tgt); seq_printf(m, "quiescent = %d\n", atomic_read(&adapter->quiescent)); seq_printf(m, "has_cluster = %d\n", adapter->has_cluster); seq_puts(m, "\nModule Parameters:\n"); seq_printf(m, "max_cmd_per_lun = %d\n", max_cmd_per_lun); seq_printf(m, "max_sectors_per_io = %d\n", max_sectors_per_io); return 0; } /** * proc_show_stat() * @m: Synthetic file construction data * @v: File iterator * * Display statistical information about the I/O activity. */ static int proc_show_stat(struct seq_file *m, void *v) { adapter_t *adapter = m->private; #if MEGA_HAVE_STATS int i; #endif seq_puts(m, "Statistical Information for this controller\n"); seq_printf(m, "pend_cmds = %d\n", atomic_read(&adapter->pend_cmds)); #if MEGA_HAVE_STATS for(i = 0; i < adapter->numldrv; i++) { seq_printf(m, "Logical Drive %d:\n", i); seq_printf(m, "\tReads Issued = %lu, Writes Issued = %lu\n", adapter->nreads[i], adapter->nwrites[i]); seq_printf(m, "\tSectors Read = %lu, Sectors Written = %lu\n", adapter->nreadblocks[i], adapter->nwriteblocks[i]); seq_printf(m, "\tRead errors = %lu, Write errors = %lu\n\n", adapter->rd_errors[i], adapter->wr_errors[i]); } #else seq_puts(m, "IO and error counters not compiled in driver.\n"); #endif return 0; } /** * proc_show_mbox() * @m: Synthetic file construction data * @v: File iterator * * Display mailbox information for the last command issued. This information * is good for debugging. */ static int proc_show_mbox(struct seq_file *m, void *v) { adapter_t *adapter = m->private; volatile mbox_t *mbox = adapter->mbox; seq_puts(m, "Contents of Mail Box Structure\n"); seq_printf(m, " Fw Command = 0x%02x\n", mbox->m_out.cmd); seq_printf(m, " Cmd Sequence = 0x%02x\n", mbox->m_out.cmdid); seq_printf(m, " No of Sectors= %04d\n", mbox->m_out.numsectors); seq_printf(m, " LBA = 0x%02x\n", mbox->m_out.lba); seq_printf(m, " DTA = 0x%08x\n", mbox->m_out.xferaddr); seq_printf(m, " Logical Drive= 0x%02x\n", mbox->m_out.logdrv); seq_printf(m, " No of SG Elmt= 0x%02x\n", mbox->m_out.numsgelements); seq_printf(m, " Busy = %01x\n", mbox->m_in.busy); seq_printf(m, " Status = 0x%02x\n", mbox->m_in.status); return 0; } /** * proc_show_rebuild_rate() * @m: Synthetic file construction data * @v: File iterator * * Display current rebuild rate */ static int proc_show_rebuild_rate(struct seq_file *m, void *v) { adapter_t *adapter = m->private; dma_addr_t dma_handle; caddr_t inquiry; struct pci_dev *pdev; if( make_local_pdev(adapter, &pdev) != 0 ) return 0; if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) goto free_pdev; if( mega_adapinq(adapter, dma_handle) != 0 ) { seq_puts(m, "Adapter inquiry failed.\n"); dev_warn(&adapter->dev->dev, "inquiry failed\n"); goto free_inquiry; } if( adapter->flag & BOARD_40LD ) seq_printf(m, "Rebuild Rate: [%d%%]\n", ((mega_inquiry3 *)inquiry)->rebuild_rate); else seq_printf(m, "Rebuild Rate: [%d%%]\n", ((mraid_ext_inquiry *) inquiry)->raid_inq.adapter_info.rebuild_rate); free_inquiry: mega_free_inquiry(inquiry, dma_handle, pdev); free_pdev: free_local_pdev(pdev); return 0; } /** * proc_show_battery() * @m: Synthetic file construction data * @v: File iterator * * Display information about the battery module on the controller. */ static int proc_show_battery(struct seq_file *m, void *v) { adapter_t *adapter = m->private; dma_addr_t dma_handle; caddr_t inquiry; struct pci_dev *pdev; u8 battery_status; if( make_local_pdev(adapter, &pdev) != 0 ) return 0; if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) goto free_pdev; if( mega_adapinq(adapter, dma_handle) != 0 ) { seq_puts(m, "Adapter inquiry failed.\n"); dev_warn(&adapter->dev->dev, "inquiry failed\n"); goto free_inquiry; } if( adapter->flag & BOARD_40LD ) { battery_status = ((mega_inquiry3 *)inquiry)->battery_status; } else { battery_status = ((mraid_ext_inquiry *)inquiry)-> raid_inq.adapter_info.battery_status; } /* * Decode the battery status */ seq_printf(m, "Battery Status:[%d]", battery_status); if(battery_status == MEGA_BATT_CHARGE_DONE) seq_puts(m, " Charge Done"); if(battery_status & MEGA_BATT_MODULE_MISSING) seq_puts(m, " Module Missing"); if(battery_status & MEGA_BATT_LOW_VOLTAGE) seq_puts(m, " Low Voltage"); if(battery_status & MEGA_BATT_TEMP_HIGH) seq_puts(m, " Temperature High"); if(battery_status & MEGA_BATT_PACK_MISSING) seq_puts(m, " Pack Missing"); if(battery_status & MEGA_BATT_CHARGE_INPROG) seq_puts(m, " Charge In-progress"); if(battery_status & MEGA_BATT_CHARGE_FAIL) seq_puts(m, " Charge Fail"); if(battery_status & MEGA_BATT_CYCLES_EXCEEDED) seq_puts(m, " Cycles Exceeded"); seq_putc(m, '\n'); free_inquiry: mega_free_inquiry(inquiry, dma_handle, pdev); free_pdev: free_local_pdev(pdev); return 0; } /* * Display scsi inquiry */ static void mega_print_inquiry(struct seq_file *m, char *scsi_inq) { int i; seq_puts(m, " Vendor: "); seq_write(m, scsi_inq + 8, 8); seq_puts(m, " Model: "); seq_write(m, scsi_inq + 16, 16); seq_puts(m, " Rev: "); seq_write(m, scsi_inq + 32, 4); seq_putc(m, '\n'); i = scsi_inq[0] & 0x1f; seq_printf(m, " Type: %s ", scsi_device_type(i)); seq_printf(m, " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 ) seq_puts(m, " CCS\n"); else seq_putc(m, '\n'); } /** * proc_show_pdrv() * @m: Synthetic file construction data * @adapter: pointer to our soft state * @channel: channel * * Display information about the physical drives. */ static int proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel) { dma_addr_t dma_handle; char *scsi_inq; dma_addr_t scsi_inq_dma_handle; caddr_t inquiry; struct pci_dev *pdev; u8 *pdrv_state; u8 state; int tgt; int max_channels; int i; if( make_local_pdev(adapter, &pdev) != 0 ) return 0; if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) goto free_pdev; if( mega_adapinq(adapter, dma_handle) != 0 ) { seq_puts(m, "Adapter inquiry failed.\n"); dev_warn(&adapter->dev->dev, "inquiry failed\n"); goto free_inquiry; } scsi_inq = dma_alloc_coherent(&pdev->dev, 256, &scsi_inq_dma_handle, GFP_KERNEL); if( scsi_inq == NULL ) { seq_puts(m, "memory not available for scsi inq.\n"); goto free_inquiry; } if( adapter->flag & BOARD_40LD ) { pdrv_state = ((mega_inquiry3 *)inquiry)->pdrv_state; } else { pdrv_state = ((mraid_ext_inquiry *)inquiry)-> raid_inq.pdrv_info.pdrv_state; } max_channels = adapter->product_info.nchannels; if( channel >= max_channels ) { goto free_pci; } for( tgt = 0; tgt <= MAX_TARGET; tgt++ ) { i = channel*16 + tgt; state = *(pdrv_state + i); switch( state & 0x0F ) { case PDRV_ONLINE: seq_printf(m, "Channel:%2d Id:%2d State: Online", channel, tgt); break; case PDRV_FAILED: seq_printf(m, "Channel:%2d Id:%2d State: Failed", channel, tgt); break; case PDRV_RBLD: seq_printf(m, "Channel:%2d Id:%2d State: Rebuild", channel, tgt); break; case PDRV_HOTSPARE: seq_printf(m, "Channel:%2d Id:%2d State: Hot spare", channel, tgt); break; default: seq_printf(m, "Channel:%2d Id:%2d State: Un-configured", channel, tgt); break; } /* * This interface displays inquiries for disk drives * only. Inquries for logical drives and non-disk * devices are available through /proc/scsi/scsi */ memset(scsi_inq, 0, 256); if( mega_internal_dev_inquiry(adapter, channel, tgt, scsi_inq_dma_handle) || (scsi_inq[0] & 0x1F) != TYPE_DISK ) { continue; } /* * Check for overflow. We print less than 240 * characters for inquiry */ seq_puts(m, ".\n"); mega_print_inquiry(m, scsi_inq); } free_pci: dma_free_coherent(&pdev->dev, 256, scsi_inq, scsi_inq_dma_handle); free_inquiry: mega_free_inquiry(inquiry, dma_handle, pdev); free_pdev: free_local_pdev(pdev); return 0; } /** * proc_show_pdrv_ch0() * @m: Synthetic file construction data * @v: File iterator * * Display information about the physical drives on physical channel 0. */ static int proc_show_pdrv_ch0(struct seq_file *m, void *v) { return proc_show_pdrv(m, m->private, 0); } /** * proc_show_pdrv_ch1() * @m: Synthetic file construction data * @v: File iterator * * Display information about the physical drives on physical channel 1. */ static int proc_show_pdrv_ch1(struct seq_file *m, void *v) { return proc_show_pdrv(m, m->private, 1); } /** * proc_show_pdrv_ch2() * @m: Synthetic file construction data * @v: File iterator * * Display information about the physical drives on physical channel 2. */ static int proc_show_pdrv_ch2(struct seq_file *m, void *v) { return proc_show_pdrv(m, m->private, 2); } /** * proc_show_pdrv_ch3() * @m: Synthetic file construction data * @v: File iterator * * Display information about the physical drives on physical channel 3. */ static int proc_show_pdrv_ch3(struct seq_file *m, void *v) { return proc_show_pdrv(m, m->private, 3); } /** * proc_show_rdrv() * @m: Synthetic file construction data * @adapter: pointer to our soft state * @start: starting logical drive to display * @end: ending logical drive to display * * We do not print the inquiry information since its already available through * /proc/scsi/scsi interface */ static int proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end ) { dma_addr_t dma_handle; logdrv_param *lparam; megacmd_t mc; char *disk_array; dma_addr_t disk_array_dma_handle; caddr_t inquiry; struct pci_dev *pdev; u8 *rdrv_state; int num_ldrv; u32 array_sz; int i; if( make_local_pdev(adapter, &pdev) != 0 ) return 0; if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) goto free_pdev; if( mega_adapinq(adapter, dma_handle) != 0 ) { seq_puts(m, "Adapter inquiry failed.\n"); dev_warn(&adapter->dev->dev, "inquiry failed\n"); goto free_inquiry; } memset(&mc, 0, sizeof(megacmd_t)); if( adapter->flag & BOARD_40LD ) { array_sz = sizeof(disk_array_40ld); rdrv_state = ((mega_inquiry3 *)inquiry)->ldrv_state; num_ldrv = ((mega_inquiry3 *)inquiry)->num_ldrv; } else { array_sz = sizeof(disk_array_8ld); rdrv_state = ((mraid_ext_inquiry *)inquiry)-> raid_inq.logdrv_info.ldrv_state; num_ldrv = ((mraid_ext_inquiry *)inquiry)-> raid_inq.logdrv_info.num_ldrv; } disk_array = dma_alloc_coherent(&pdev->dev, array_sz, &disk_array_dma_handle, GFP_KERNEL); if( disk_array == NULL ) { seq_puts(m, "memory not available.\n"); goto free_inquiry; } mc.xferaddr = (u32)disk_array_dma_handle; if( adapter->flag & BOARD_40LD ) { mc.cmd = FC_NEW_CONFIG; mc.opcode = OP_DCMD_READ_CONFIG; if( mega_internal_command(adapter, &mc, NULL) ) { seq_puts(m, "40LD read config failed.\n"); goto free_pci; } } else { mc.cmd = NEW_READ_CONFIG_8LD; if( mega_internal_command(adapter, &mc, NULL) ) { mc.cmd = READ_CONFIG_8LD; if( mega_internal_command(adapter, &mc, NULL) ) { seq_puts(m, "8LD read config failed.\n"); goto free_pci; } } } for( i = start; i < ( (end+1 < num_ldrv) ? end+1 : num_ldrv ); i++ ) { if( adapter->flag & BOARD_40LD ) { lparam = &((disk_array_40ld *)disk_array)->ldrv[i].lparam; } else { lparam = &((disk_array_8ld *)disk_array)->ldrv[i].lparam; } /* * Check for overflow. We print less than 240 characters for * information about each logical drive. */ seq_printf(m, "Logical drive:%2d:, ", i); switch( rdrv_state[i] & 0x0F ) { case RDRV_OFFLINE: seq_puts(m, "state: offline"); break; case RDRV_DEGRADED: seq_puts(m, "state: degraded"); break; case RDRV_OPTIMAL: seq_puts(m, "state: optimal"); break; case RDRV_DELETED: seq_puts(m, "state: deleted"); break; default: seq_puts(m, "state: unknown"); break; } /* * Check if check consistency or initialization is going on * for this logical drive. */ if( (rdrv_state[i] & 0xF0) == 0x20 ) seq_puts(m, ", check-consistency in progress"); else if( (rdrv_state[i] & 0xF0) == 0x10 ) seq_puts(m, ", initialization in progress"); seq_putc(m, '\n'); seq_printf(m, "Span depth:%3d, ", lparam->span_depth); seq_printf(m, "RAID level:%3d, ", lparam->level); seq_printf(m, "Stripe size:%3d, ", lparam->stripe_sz ? lparam->stripe_sz/2: 128); seq_printf(m, "Row size:%3d\n", lparam->row_size); seq_puts(m, "Read Policy: "); switch(lparam->read_ahead) { case NO_READ_AHEAD: seq_puts(m, "No read ahead, "); break; case READ_AHEAD: seq_puts(m, "Read ahead, "); break; case ADAP_READ_AHEAD: seq_puts(m, "Adaptive, "); break; } seq_puts(m, "Write Policy: "); switch(lparam->write_mode) { case WRMODE_WRITE_THRU: seq_puts(m, "Write thru, "); break; case WRMODE_WRITE_BACK: seq_puts(m, "Write back, "); break; } seq_puts(m, "Cache Policy: "); switch(lparam->direct_io) { case CACHED_IO: seq_puts(m, "Cached IO\n\n"); break; case DIRECT_IO: seq_puts(m, "Direct IO\n\n"); break; } } free_pci: dma_free_coherent(&pdev->dev, array_sz, disk_array, disk_array_dma_handle); free_inquiry: mega_free_inquiry(inquiry, dma_handle, pdev); free_pdev: free_local_pdev(pdev); return 0; } /** * proc_show_rdrv_10() * @m: Synthetic file construction data * @v: File iterator * * Display real time information about the logical drives 0 through 9. */ static int proc_show_rdrv_10(struct seq_file *m, void *v) { return proc_show_rdrv(m, m->private, 0, 9); } /** * proc_show_rdrv_20() * @m: Synthetic file construction data * @v: File iterator * * Display real time information about the logical drives 0 through 9. */ static int proc_show_rdrv_20(struct seq_file *m, void *v) { return proc_show_rdrv(m, m->private, 10, 19); } /** * proc_show_rdrv_30() * @m: Synthetic file construction data * @v: File iterator * * Display real time information about the logical drives 0 through 9. */ static int proc_show_rdrv_30(struct seq_file *m, void *v) { return proc_show_rdrv(m, m->private, 20, 29); } /** * proc_show_rdrv_40() * @m: Synthetic file construction data * @v: File iterator * * Display real time information about the logical drives 0 through 9. */ static int proc_show_rdrv_40(struct seq_file *m, void *v) { return proc_show_rdrv(m, m->private, 30, 39); } /** * mega_create_proc_entry() * @index: index in soft state array * @parent: parent node for this /proc entry * * Creates /proc entries for our controllers. */ static void mega_create_proc_entry(int index, struct proc_dir_entry *parent) { adapter_t *adapter = hba_soft_state[index]; struct proc_dir_entry *dir; u8 string[16]; sprintf(string, "hba%d", adapter->host->host_no); dir = proc_mkdir_data(string, 0, parent, adapter); if (!dir) { dev_warn(&adapter->dev->dev, "proc_mkdir failed\n"); return; } proc_create_single_data("config", S_IRUSR, dir, proc_show_config, adapter); proc_create_single_data("stat", S_IRUSR, dir, proc_show_stat, adapter); proc_create_single_data("mailbox", S_IRUSR, dir, proc_show_mbox, adapter); #if MEGA_HAVE_ENH_PROC proc_create_single_data("rebuild-rate", S_IRUSR, dir, proc_show_rebuild_rate, adapter); proc_create_single_data("battery-status", S_IRUSR, dir, proc_show_battery, adapter); proc_create_single_data("diskdrives-ch0", S_IRUSR, dir, proc_show_pdrv_ch0, adapter); proc_create_single_data("diskdrives-ch1", S_IRUSR, dir, proc_show_pdrv_ch1, adapter); proc_create_single_data("diskdrives-ch2", S_IRUSR, dir, proc_show_pdrv_ch2, adapter); proc_create_single_data("diskdrives-ch3", S_IRUSR, dir, proc_show_pdrv_ch3, adapter); proc_create_single_data("raiddrives-0-9", S_IRUSR, dir, proc_show_rdrv_10, adapter); proc_create_single_data("raiddrives-10-19", S_IRUSR, dir, proc_show_rdrv_20, adapter); proc_create_single_data("raiddrives-20-29", S_IRUSR, dir, proc_show_rdrv_30, adapter); proc_create_single_data("raiddrives-30-39", S_IRUSR, dir, proc_show_rdrv_40, adapter); #endif } #else static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent) { } #endif /* * megaraid_biosparam() * * Return the disk geometry for a particular disk */ static int megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { adapter_t *adapter; int heads; int sectors; int cylinders; /* Get pointer to host config structure */ adapter = (adapter_t *)sdev->host->hostdata; if (IS_RAID_CH(adapter, sdev->channel)) { /* Default heads (64) & sectors (32) */ heads = 64; sectors = 32; cylinders = (ulong)capacity / (heads * sectors); /* * Handle extended translation size for logical drives * > 1Gb */ if ((ulong)capacity >= 0x200000) { heads = 255; sectors = 63; cylinders = (ulong)capacity / (heads * sectors); } /* return result */ geom[0] = heads; geom[1] = sectors; geom[2] = cylinders; } else { if (scsi_partsize(bdev, capacity, geom)) return 0; dev_info(&adapter->dev->dev, "invalid partition on this disk on channel %d\n", sdev->channel); /* Default heads (64) & sectors (32) */ heads = 64; sectors = 32; cylinders = (ulong)capacity / (heads * sectors); /* Handle extended translation size for logical drives > 1Gb */ if ((ulong)capacity >= 0x200000) { heads = 255; sectors = 63; cylinders = (ulong)capacity / (heads * sectors); } /* return result */ geom[0] = heads; geom[1] = sectors; geom[2] = cylinders; } return 0; } /** * mega_init_scb() * @adapter: pointer to our soft state * * Allocate memory for the various pointers in the scb structures: * scatter-gather list pointer, passthru and extended passthru structure * pointers. */ static int mega_init_scb(adapter_t *adapter) { scb_t *scb; int i; for( i = 0; i < adapter->max_cmds; i++ ) { scb = &adapter->scb_list[i]; scb->sgl64 = NULL; scb->sgl = NULL; scb->pthru = NULL; scb->epthru = NULL; } for( i = 0; i < adapter->max_cmds; i++ ) { scb = &adapter->scb_list[i]; scb->idx = i; scb->sgl64 = dma_alloc_coherent(&adapter->dev->dev, sizeof(mega_sgl64) * adapter->sglen, &scb->sgl_dma_addr, GFP_KERNEL); scb->sgl = (mega_sglist *)scb->sgl64; if( !scb->sgl ) { dev_warn(&adapter->dev->dev, "RAID: Can't allocate sglist\n"); mega_free_sgl(adapter); return -1; } scb->pthru = dma_alloc_coherent(&adapter->dev->dev, sizeof(mega_passthru), &scb->pthru_dma_addr, GFP_KERNEL); if( !scb->pthru ) { dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n"); mega_free_sgl(adapter); return -1; } scb->epthru = dma_alloc_coherent(&adapter->dev->dev, sizeof(mega_ext_passthru), &scb->epthru_dma_addr, GFP_KERNEL); if( !scb->epthru ) { dev_warn(&adapter->dev->dev, "Can't allocate extended passthru\n"); mega_free_sgl(adapter); return -1; } scb->dma_type = MEGA_DMA_TYPE_NONE; /* * Link to free list * lock not required since we are loading the driver, so no * commands possible right now. */ scb->state = SCB_FREE; scb->cmd = NULL; list_add(&scb->list, &adapter->free_list); } return 0; } /** * megadev_open() * @inode: unused * @filep: unused * * Routines for the character/ioctl interface to the driver. Find out if this * is a valid open. */ static int megadev_open (struct inode *inode, struct file *filep) { /* * Only allow superuser to access private ioctl interface */ if( !capable(CAP_SYS_ADMIN) ) return -EACCES; return 0; } /** * megadev_ioctl() * @filep: Our device file * @cmd: ioctl command * @arg: user buffer * * ioctl entry point for our private ioctl interface. We move the data in from * the user space, prepare the command (if necessary, convert the old MIMD * ioctl to new ioctl command), and issue a synchronous command to the * controller. */ static int megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { adapter_t *adapter; nitioctl_t uioc; int adapno; int rval; mega_passthru __user *upthru; /* user address for passthru */ mega_passthru *pthru; /* copy user passthru here */ dma_addr_t pthru_dma_hndl; void *data = NULL; /* data to be transferred */ dma_addr_t data_dma_hndl; /* dma handle for data xfer area */ megacmd_t mc; #if MEGA_HAVE_STATS megastat_t __user *ustats = NULL; int num_ldrv = 0; #endif u32 uxferaddr = 0; struct pci_dev *pdev; /* * Make sure only USCSICMD are issued through this interface. * MIMD application would still fire different command. */ if( (_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD) ) { return -EINVAL; } /* * Check and convert a possible MIMD command to NIT command. * mega_m_to_n() copies the data from the user space, so we do not * have to do it here. * NOTE: We will need some user address to copyout the data, therefore * the inteface layer will also provide us with the required user * addresses. */ memset(&uioc, 0, sizeof(nitioctl_t)); if( (rval = mega_m_to_n( (void __user *)arg, &uioc)) != 0 ) return rval; switch( uioc.opcode ) { case GET_DRIVER_VER: if( put_user(driver_ver, (u32 __user *)uioc.uioc_uaddr) ) return (-EFAULT); break; case GET_N_ADAP: if( put_user(hba_count, (u32 __user *)uioc.uioc_uaddr) ) return (-EFAULT); /* * Shucks. MIMD interface returns a positive value for number * of adapters. TODO: Change it to return 0 when there is no * applicatio using mimd interface. */ return hba_count; case GET_ADAP_INFO: /* * Which adapter */ if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) return (-ENODEV); if( copy_to_user(uioc.uioc_uaddr, mcontroller+adapno, sizeof(struct mcontroller)) ) return (-EFAULT); break; #if MEGA_HAVE_STATS case GET_STATS: /* * Which adapter */ if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) return (-ENODEV); adapter = hba_soft_state[adapno]; ustats = uioc.uioc_uaddr; if( copy_from_user(&num_ldrv, &ustats->num_ldrv, sizeof(int)) ) return (-EFAULT); /* * Check for the validity of the logical drive number */ if( num_ldrv >= MAX_LOGICAL_DRIVES_40LD ) return -EINVAL; if( copy_to_user(ustats->nreads, adapter->nreads, num_ldrv*sizeof(u32)) ) return -EFAULT; if( copy_to_user(ustats->nreadblocks, adapter->nreadblocks, num_ldrv*sizeof(u32)) ) return -EFAULT; if( copy_to_user(ustats->nwrites, adapter->nwrites, num_ldrv*sizeof(u32)) ) return -EFAULT; if( copy_to_user(ustats->nwriteblocks, adapter->nwriteblocks, num_ldrv*sizeof(u32)) ) return -EFAULT; if( copy_to_user(ustats->rd_errors, adapter->rd_errors, num_ldrv*sizeof(u32)) ) return -EFAULT; if( copy_to_user(ustats->wr_errors, adapter->wr_errors, num_ldrv*sizeof(u32)) ) return -EFAULT; return 0; #endif case MBOX_CMD: /* * Which adapter */ if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) return (-ENODEV); adapter = hba_soft_state[adapno]; /* * Deletion of logical drive is a special case. The adapter * should be quiescent before this command is issued. */ if( uioc.uioc_rmbox[0] == FC_DEL_LOGDRV && uioc.uioc_rmbox[2] == OP_DEL_LOGDRV ) { /* * Do we support this feature */ if( !adapter->support_random_del ) { dev_warn(&adapter->dev->dev, "logdrv " "delete on non-supporting F/W\n"); return (-EINVAL); } rval = mega_del_logdrv( adapter, uioc.uioc_rmbox[3] ); if( rval == 0 ) { memset(&mc, 0, sizeof(megacmd_t)); mc.status = rval; rval = mega_n_to_m((void __user *)arg, &mc); } return rval; } /* * This interface only support the regular passthru commands. * Reject extended passthru and 64-bit passthru */ if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 || uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) { dev_warn(&adapter->dev->dev, "rejected passthru\n"); return (-EINVAL); } /* * For all internal commands, the buffer must be allocated in * <4GB address range */ if( make_local_pdev(adapter, &pdev) != 0 ) return -EIO; /* Is it a passthru command or a DCMD */ if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) { /* Passthru commands */ pthru = dma_alloc_coherent(&pdev->dev, sizeof(mega_passthru), &pthru_dma_hndl, GFP_KERNEL); if( pthru == NULL ) { free_local_pdev(pdev); return (-ENOMEM); } /* * The user passthru structure */ upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr; /* * Copy in the user passthru here. */ if( copy_from_user(pthru, upthru, sizeof(mega_passthru)) ) { dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru, pthru_dma_hndl); free_local_pdev(pdev); return (-EFAULT); } /* * Is there a data transfer */ if( pthru->dataxferlen ) { data = dma_alloc_coherent(&pdev->dev, pthru->dataxferlen, &data_dma_hndl, GFP_KERNEL); if( data == NULL ) { dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru, pthru_dma_hndl); free_local_pdev(pdev); return (-ENOMEM); } /* * Save the user address and point the kernel * address at just allocated memory */ uxferaddr = pthru->dataxferaddr; pthru->dataxferaddr = data_dma_hndl; } /* * Is data coming down-stream */ if( pthru->dataxferlen && (uioc.flags & UIOC_WR) ) { /* * Get the user data */ if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr, pthru->dataxferlen) ) { rval = (-EFAULT); goto freemem_and_return; } } memset(&mc, 0, sizeof(megacmd_t)); mc.cmd = MEGA_MBOXCMD_PASSTHRU; mc.xferaddr = (u32)pthru_dma_hndl; /* * Issue the command */ mega_internal_command(adapter, &mc, pthru); rval = mega_n_to_m((void __user *)arg, &mc); if( rval ) goto freemem_and_return; /* * Is data going up-stream */ if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) { if( copy_to_user((char __user *)(unsigned long) uxferaddr, data, pthru->dataxferlen) ) { rval = (-EFAULT); } } /* * Send the request sense data also, irrespective of * whether the user has asked for it or not. */ if (copy_to_user(upthru->reqsensearea, pthru->reqsensearea, 14)) rval = -EFAULT; freemem_and_return: if( pthru->dataxferlen ) { dma_free_coherent(&pdev->dev, pthru->dataxferlen, data, data_dma_hndl); } dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru, pthru_dma_hndl); free_local_pdev(pdev); return rval; } else { /* DCMD commands */ /* * Is there a data transfer */ if( uioc.xferlen ) { data = dma_alloc_coherent(&pdev->dev, uioc.xferlen, &data_dma_hndl, GFP_KERNEL); if( data == NULL ) { free_local_pdev(pdev); return (-ENOMEM); } uxferaddr = MBOX(uioc)->xferaddr; } /* * Is data coming down-stream */ if( uioc.xferlen && (uioc.flags & UIOC_WR) ) { /* * Get the user data */ if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr, uioc.xferlen) ) { dma_free_coherent(&pdev->dev, uioc.xferlen, data, data_dma_hndl); free_local_pdev(pdev); return (-EFAULT); } } memcpy(&mc, MBOX(uioc), sizeof(megacmd_t)); mc.xferaddr = (u32)data_dma_hndl; /* * Issue the command */ mega_internal_command(adapter, &mc, NULL); rval = mega_n_to_m((void __user *)arg, &mc); if( rval ) { if( uioc.xferlen ) { dma_free_coherent(&pdev->dev, uioc.xferlen, data, data_dma_hndl); } free_local_pdev(pdev); return rval; } /* * Is data going up-stream */ if( uioc.xferlen && (uioc.flags & UIOC_RD) ) { if( copy_to_user((char __user *)(unsigned long) uxferaddr, data, uioc.xferlen) ) { rval = (-EFAULT); } } if( uioc.xferlen ) { dma_free_coherent(&pdev->dev, uioc.xferlen, data, data_dma_hndl); } free_local_pdev(pdev); return rval; } default: return (-EINVAL); } return 0; } static long megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { int ret; mutex_lock(&megadev_mutex); ret = megadev_ioctl(filep, cmd, arg); mutex_unlock(&megadev_mutex); return ret; } /** * mega_m_to_n() * @arg: user address * @uioc: new ioctl structure * * A thin layer to convert older mimd interface ioctl structure to NIT ioctl * structure * * Converts the older mimd ioctl structure to newer NIT structure */ static int mega_m_to_n(void __user *arg, nitioctl_t *uioc) { struct uioctl_t uioc_mimd; char signature[8] = {0}; u8 opcode; u8 subopcode; /* * check is the application conforms to NIT. We do not have to do much * in that case. * We exploit the fact that the signature is stored in the very * beginning of the structure. */ if( copy_from_user(signature, arg, 7) ) return (-EFAULT); if( memcmp(signature, "MEGANIT", 7) == 0 ) { /* * NOTE NOTE: The nit ioctl is still under flux because of * change of mailbox definition, in HPE. No applications yet * use this interface and let's not have applications use this * interface till the new specifitions are in place. */ return -EINVAL; #if 0 if( copy_from_user(uioc, arg, sizeof(nitioctl_t)) ) return (-EFAULT); return 0; #endif } /* * Else assume we have mimd uioctl_t as arg. Convert to nitioctl_t * * Get the user ioctl structure */ if( copy_from_user(&uioc_mimd, arg, sizeof(struct uioctl_t)) ) return (-EFAULT); /* * Get the opcode and subopcode for the commands */ opcode = uioc_mimd.ui.fcs.opcode; subopcode = uioc_mimd.ui.fcs.subopcode; switch (opcode) { case 0x82: switch (subopcode) { case MEGAIOC_QDRVRVER: /* Query driver version */ uioc->opcode = GET_DRIVER_VER; uioc->uioc_uaddr = uioc_mimd.data; break; case MEGAIOC_QNADAP: /* Get # of adapters */ uioc->opcode = GET_N_ADAP; uioc->uioc_uaddr = uioc_mimd.data; break; case MEGAIOC_QADAPINFO: /* Get adapter information */ uioc->opcode = GET_ADAP_INFO; uioc->adapno = uioc_mimd.ui.fcs.adapno; uioc->uioc_uaddr = uioc_mimd.data; break; default: return(-EINVAL); } break; case 0x81: uioc->opcode = MBOX_CMD; uioc->adapno = uioc_mimd.ui.fcs.adapno; memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); uioc->xferlen = uioc_mimd.ui.fcs.length; if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; break; case 0x80: uioc->opcode = MBOX_CMD; uioc->adapno = uioc_mimd.ui.fcs.adapno; memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); /* * Choose the xferlen bigger of input and output data */ uioc->xferlen = uioc_mimd.outlen > uioc_mimd.inlen ? uioc_mimd.outlen : uioc_mimd.inlen; if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; break; default: return (-EINVAL); } return 0; } /* * mega_n_to_m() * @arg: user address * @mc: mailbox command * * Updates the status information to the application, depending on application * conforms to older mimd ioctl interface or newer NIT ioctl interface */ static int mega_n_to_m(void __user *arg, megacmd_t *mc) { nitioctl_t __user *uiocp; megacmd_t __user *umc; mega_passthru __user *upthru; struct uioctl_t __user *uioc_mimd; char signature[8] = {0}; /* * check is the application conforms to NIT. */ if( copy_from_user(signature, arg, 7) ) return -EFAULT; if( memcmp(signature, "MEGANIT", 7) == 0 ) { uiocp = arg; if( put_user(mc->status, (u8 __user *)&MBOX_P(uiocp)->status) ) return (-EFAULT); if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { umc = MBOX_P(uiocp); if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr)) return -EFAULT; if( put_user(mc->status, (u8 __user *)&upthru->scsistatus)) return (-EFAULT); } } else { uioc_mimd = arg; if( put_user(mc->status, (u8 __user *)&uioc_mimd->mbox[17]) ) return (-EFAULT); if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { umc = (megacmd_t __user *)uioc_mimd->mbox; if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr)) return (-EFAULT); if( put_user(mc->status, (u8 __user *)&upthru->scsistatus) ) return (-EFAULT); } } return 0; } /* * MEGARAID 'FW' commands. */ /** * mega_is_bios_enabled() * @adapter: pointer to our soft state * * issue command to find out if the BIOS is enabled for this controller */ static int mega_is_bios_enabled(adapter_t *adapter) { struct mbox_out mbox; unsigned char *raw_mbox = (u8 *)&mbox; memset(&mbox, 0, sizeof(mbox)); memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); mbox.xferaddr = (u32)adapter->buf_dma_handle; raw_mbox[0] = IS_BIOS_ENABLED; raw_mbox[2] = GET_BIOS; issue_scb_block(adapter, raw_mbox); return *(char *)adapter->mega_buffer; } /** * mega_enum_raid_scsi() * @adapter: pointer to our soft state * * Find out what channels are RAID/SCSI. This information is used to * differentiate the virtual channels and physical channels and to support * ROMB feature and non-disk devices. */ static void mega_enum_raid_scsi(adapter_t *adapter) { struct mbox_out mbox; unsigned char *raw_mbox = (u8 *)&mbox; int i; memset(&mbox, 0, sizeof(mbox)); /* * issue command to find out what channels are raid/scsi */ raw_mbox[0] = CHNL_CLASS; raw_mbox[2] = GET_CHNL_CLASS; memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); mbox.xferaddr = (u32)adapter->buf_dma_handle; /* * Non-ROMB firmware fail this command, so all channels * must be shown RAID */ adapter->mega_ch_class = 0xFF; if(!issue_scb_block(adapter, raw_mbox)) { adapter->mega_ch_class = *((char *)adapter->mega_buffer); } for( i = 0; i < adapter->product_info.nchannels; i++ ) { if( (adapter->mega_ch_class >> i) & 0x01 ) { dev_info(&adapter->dev->dev, "channel[%d] is raid\n", i); } else { dev_info(&adapter->dev->dev, "channel[%d] is scsi\n", i); } } return; } /** * mega_get_boot_drv() * @adapter: pointer to our soft state * * Find out which device is the boot device. Note, any logical drive or any * phyical device (e.g., a CDROM) can be designated as a boot device. */ static void mega_get_boot_drv(adapter_t *adapter) { struct private_bios_data *prv_bios_data; struct mbox_out mbox; unsigned char *raw_mbox = (u8 *)&mbox; u16 cksum = 0; u8 *cksum_p; u8 boot_pdrv; int i; memset(&mbox, 0, sizeof(mbox)); raw_mbox[0] = BIOS_PVT_DATA; raw_mbox[2] = GET_BIOS_PVT_DATA; memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); mbox.xferaddr = (u32)adapter->buf_dma_handle; adapter->boot_ldrv_enabled = 0; adapter->boot_ldrv = 0; adapter->boot_pdrv_enabled = 0; adapter->boot_pdrv_ch = 0; adapter->boot_pdrv_tgt = 0; if(issue_scb_block(adapter, raw_mbox) == 0) { prv_bios_data = (struct private_bios_data *)adapter->mega_buffer; cksum = 0; cksum_p = (char *)prv_bios_data; for (i = 0; i < 14; i++ ) { cksum += (u16)(*cksum_p++); } if (prv_bios_data->cksum == (u16)(0-cksum) ) { /* * If MSB is set, a physical drive is set as boot * device */ if( prv_bios_data->boot_drv & 0x80 ) { adapter->boot_pdrv_enabled = 1; boot_pdrv = prv_bios_data->boot_drv & 0x7F; adapter->boot_pdrv_ch = boot_pdrv / 16; adapter->boot_pdrv_tgt = boot_pdrv % 16; } else { adapter->boot_ldrv_enabled = 1; adapter->boot_ldrv = prv_bios_data->boot_drv; } } } } /** * mega_support_random_del() * @adapter: pointer to our soft state * * Find out if this controller supports random deletion and addition of * logical drives */ static int mega_support_random_del(adapter_t *adapter) { struct mbox_out mbox; unsigned char *raw_mbox = (u8 *)&mbox; int rval; memset(&mbox, 0, sizeof(mbox)); /* * issue command */ raw_mbox[0] = FC_DEL_LOGDRV; raw_mbox[2] = OP_SUP_DEL_LOGDRV; rval = issue_scb_block(adapter, raw_mbox); return !rval; } /** * mega_support_ext_cdb() * @adapter: pointer to our soft state * * Find out if this firmware support cdblen > 10 */ static int mega_support_ext_cdb(adapter_t *adapter) { struct mbox_out mbox; unsigned char *raw_mbox = (u8 *)&mbox; int rval; memset(&mbox, 0, sizeof(mbox)); /* * issue command to find out if controller supports extended CDBs. */ raw_mbox[0] = 0xA4; raw_mbox[2] = 0x16; rval = issue_scb_block(adapter, raw_mbox); return !rval; } /** * mega_del_logdrv() * @adapter: pointer to our soft state * @logdrv: logical drive to be deleted * * Delete the specified logical drive. It is the responsibility of the user * app to let the OS know about this operation. */ static int mega_del_logdrv(adapter_t *adapter, int logdrv) { unsigned long flags; scb_t *scb; int rval; /* * Stop sending commands to the controller, queue them internally. * When deletion is complete, ISR will flush the queue. */ atomic_set(&adapter->quiescent, 1); /* * Wait till all the issued commands are complete and there are no * commands in the pending queue */ while (atomic_read(&adapter->pend_cmds) > 0 || !list_empty(&adapter->pending_list)) msleep(1000); /* sleep for 1s */ rval = mega_do_del_logdrv(adapter, logdrv); spin_lock_irqsave(&adapter->lock, flags); /* * If delete operation was successful, add 0x80 to the logical drive * ids for commands in the pending queue. */ if (adapter->read_ldidmap) { struct list_head *pos; list_for_each(pos, &adapter->pending_list) { scb = list_entry(pos, scb_t, list); if (scb->pthru->logdrv < 0x80 ) scb->pthru->logdrv += 0x80; } } atomic_set(&adapter->quiescent, 0); mega_runpendq(adapter); spin_unlock_irqrestore(&adapter->lock, flags); return rval; } static int mega_do_del_logdrv(adapter_t *adapter, int logdrv) { megacmd_t mc; int rval; memset( &mc, 0, sizeof(megacmd_t)); mc.cmd = FC_DEL_LOGDRV; mc.opcode = OP_DEL_LOGDRV; mc.subopcode = logdrv; rval = mega_internal_command(adapter, &mc, NULL); /* log this event */ if(rval) { dev_warn(&adapter->dev->dev, "Delete LD-%d failed", logdrv); return rval; } /* * After deleting first logical drive, the logical drives must be * addressed by adding 0x80 to the logical drive id. */ adapter->read_ldidmap = 1; return rval; } /** * mega_get_max_sgl() * @adapter: pointer to our soft state * * Find out the maximum number of scatter-gather elements supported by this * version of the firmware */ static void mega_get_max_sgl(adapter_t *adapter) { struct mbox_out mbox; unsigned char *raw_mbox = (u8 *)&mbox; memset(&mbox, 0, sizeof(mbox)); memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); mbox.xferaddr = (u32)adapter->buf_dma_handle; raw_mbox[0] = MAIN_MISC_OPCODE; raw_mbox[2] = GET_MAX_SG_SUPPORT; if( issue_scb_block(adapter, raw_mbox) ) { /* * f/w does not support this command. Choose the default value */ adapter->sglen = MIN_SGLIST; } else { adapter->sglen = *((char *)adapter->mega_buffer); /* * Make sure this is not more than the resources we are * planning to allocate */ if ( adapter->sglen > MAX_SGLIST ) adapter->sglen = MAX_SGLIST; } return; } /** * mega_support_cluster() * @adapter: pointer to our soft state * * Find out if this firmware support cluster calls. */ static int mega_support_cluster(adapter_t *adapter) { struct mbox_out mbox; unsigned char *raw_mbox = (u8 *)&mbox; memset(&mbox, 0, sizeof(mbox)); memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); mbox.xferaddr = (u32)adapter->buf_dma_handle; /* * Try to get the initiator id. This command will succeed iff the * clustering is available on this HBA. */ raw_mbox[0] = MEGA_GET_TARGET_ID; if( issue_scb_block(adapter, raw_mbox) == 0 ) { /* * Cluster support available. Get the initiator target id. * Tell our id to mid-layer too. */ adapter->this_id = *(u32 *)adapter->mega_buffer; adapter->host->this_id = adapter->this_id; return 1; } return 0; } #ifdef CONFIG_PROC_FS /** * mega_adapinq() * @adapter: pointer to our soft state * @dma_handle: DMA address of the buffer * * Issue internal commands while interrupts are available. * We only issue direct mailbox commands from within the driver. ioctl() * interface using these routines can issue passthru commands. */ static int mega_adapinq(adapter_t *adapter, dma_addr_t dma_handle) { megacmd_t mc; memset(&mc, 0, sizeof(megacmd_t)); if( adapter->flag & BOARD_40LD ) { mc.cmd = FC_NEW_CONFIG; mc.opcode = NC_SUBOP_ENQUIRY3; mc.subopcode = ENQ3_GET_SOLICITED_FULL; } else { mc.cmd = MEGA_MBOXCMD_ADPEXTINQ; } mc.xferaddr = (u32)dma_handle; if ( mega_internal_command(adapter, &mc, NULL) != 0 ) { return -1; } return 0; } /** * mega_internal_dev_inquiry() * @adapter: pointer to our soft state * @ch: channel for this device * @tgt: ID of this device * @buf_dma_handle: DMA address of the buffer * * Issue the scsi inquiry for the specified device. */ static int mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt, dma_addr_t buf_dma_handle) { mega_passthru *pthru; dma_addr_t pthru_dma_handle; megacmd_t mc; int rval; struct pci_dev *pdev; /* * For all internal commands, the buffer must be allocated in <4GB * address range */ if( make_local_pdev(adapter, &pdev) != 0 ) return -1; pthru = dma_alloc_coherent(&pdev->dev, sizeof(mega_passthru), &pthru_dma_handle, GFP_KERNEL); if( pthru == NULL ) { free_local_pdev(pdev); return -1; } pthru->timeout = 2; pthru->ars = 1; pthru->reqsenselen = 14; pthru->islogical = 0; pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : ch; pthru->target = (adapter->flag & BOARD_40LD) ? (ch << 4)|tgt : tgt; pthru->cdblen = 6; pthru->cdb[0] = INQUIRY; pthru->cdb[1] = 0; pthru->cdb[2] = 0; pthru->cdb[3] = 0; pthru->cdb[4] = 255; pthru->cdb[5] = 0; pthru->dataxferaddr = (u32)buf_dma_handle; pthru->dataxferlen = 256; memset(&mc, 0, sizeof(megacmd_t)); mc.cmd = MEGA_MBOXCMD_PASSTHRU; mc.xferaddr = (u32)pthru_dma_handle; rval = mega_internal_command(adapter, &mc, pthru); dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru, pthru_dma_handle); free_local_pdev(pdev); return rval; } #endif /** * mega_internal_command() * @adapter: pointer to our soft state * @mc: the mailbox command * @pthru: Passthru structure for DCDB commands * * Issue the internal commands in interrupt mode. * The last argument is the address of the passthru structure if the command * to be fired is a passthru command * * Note: parameter 'pthru' is null for non-passthru commands. */ static int mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru) { unsigned long flags; scb_t *scb; int rval; /* * The internal commands share one command id and hence are * serialized. This is so because we want to reserve maximum number of * available command ids for the I/O commands. */ mutex_lock(&adapter->int_mtx); scb = &adapter->int_scb; memset(scb, 0, sizeof(scb_t)); scb->idx = CMDID_INT_CMDS; scb->state |= SCB_ACTIVE | SCB_PENDQ; memcpy(scb->raw_mbox, mc, sizeof(megacmd_t)); /* * Is it a passthru command */ if (mc->cmd == MEGA_MBOXCMD_PASSTHRU) scb->pthru = pthru; spin_lock_irqsave(&adapter->lock, flags); list_add_tail(&scb->list, &adapter->pending_list); /* * Check if the HBA is in quiescent state, e.g., during a * delete logical drive opertion. If it is, don't run * the pending_list. */ if (atomic_read(&adapter->quiescent) == 0) mega_runpendq(adapter); spin_unlock_irqrestore(&adapter->lock, flags); wait_for_completion(&adapter->int_waitq); mc->status = rval = adapter->int_status; /* * Print a debug message for all failed commands. Applications can use * this information. */ if (rval && trace_level) { dev_info(&adapter->dev->dev, "cmd [%x, %x, %x] status:[%x]\n", mc->cmd, mc->opcode, mc->subopcode, rval); } mutex_unlock(&adapter->int_mtx); return rval; } static const struct scsi_host_template megaraid_template = { .module = THIS_MODULE, .name = "MegaRAID", .proc_name = "megaraid_legacy", .info = megaraid_info, .queuecommand = megaraid_queue, .bios_param = megaraid_biosparam, .max_sectors = MAX_SECTORS_PER_IO, .can_queue = MAX_COMMANDS, .this_id = DEFAULT_INITIATOR_ID, .sg_tablesize = MAX_SGLIST, .cmd_per_lun = DEF_CMD_PER_LUN, .eh_abort_handler = megaraid_abort, .eh_device_reset_handler = megaraid_reset, .eh_bus_reset_handler = megaraid_reset, .eh_host_reset_handler = megaraid_reset, .no_write_same = 1, .cmd_size = sizeof(struct megaraid_cmd_priv), }; static int megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct Scsi_Host *host; adapter_t *adapter; unsigned long mega_baseport, tbase, flag = 0; u16 subsysid, subsysvid; u8 pci_bus, pci_dev_func; int irq, i, j; int error = -ENODEV; if (hba_count >= MAX_CONTROLLERS) goto out; if (pci_enable_device(pdev)) goto out; pci_set_master(pdev); pci_bus = pdev->bus->number; pci_dev_func = pdev->devfn; /* * The megaraid3 stuff reports the ID of the Intel part which is not * remotely specific to the megaraid */ if (pdev->vendor == PCI_VENDOR_ID_INTEL) { u16 magic; /* * Don't fall over the Compaq management cards using the same * PCI identifier */ if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ && pdev->subsystem_device == 0xC000) goto out_disable_device; /* Now check the magic signature byte */ pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic); if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE) goto out_disable_device; /* Ok it is probably a megaraid */ } /* * For these vendor and device ids, signature offsets are not * valid and 64 bit is implicit */ if (id->driver_data & BOARD_64BIT) flag |= BOARD_64BIT; else { u32 magic64; pci_read_config_dword(pdev, PCI_CONF_AMISIG64, &magic64); if (magic64 == HBA_SIGNATURE_64BIT) flag |= BOARD_64BIT; } subsysvid = pdev->subsystem_vendor; subsysid = pdev->subsystem_device; dev_notice(&pdev->dev, "found 0x%4.04x:0x%4.04x\n", id->vendor, id->device); /* Read the base port and IRQ from PCI */ mega_baseport = pci_resource_start(pdev, 0); irq = pdev->irq; tbase = mega_baseport; if (pci_resource_flags(pdev, 0) & IORESOURCE_MEM) { flag |= BOARD_MEMMAP; if (!request_mem_region(mega_baseport, 128, "megaraid")) { dev_warn(&pdev->dev, "mem region busy!\n"); goto out_disable_device; } mega_baseport = (unsigned long)ioremap(mega_baseport, 128); if (!mega_baseport) { dev_warn(&pdev->dev, "could not map hba memory\n"); goto out_release_region; } } else { flag |= BOARD_IOMAP; mega_baseport += 0x10; if (!request_region(mega_baseport, 16, "megaraid")) goto out_disable_device; } /* Initialize SCSI Host structure */ host = scsi_host_alloc(&megaraid_template, sizeof(adapter_t)); if (!host) goto out_iounmap; adapter = (adapter_t *)host->hostdata; memset(adapter, 0, sizeof(adapter_t)); dev_notice(&pdev->dev, "scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n", host->host_no, mega_baseport, irq); adapter->base = mega_baseport; if (flag & BOARD_MEMMAP) adapter->mmio_base = (void __iomem *) mega_baseport; INIT_LIST_HEAD(&adapter->free_list); INIT_LIST_HEAD(&adapter->pending_list); INIT_LIST_HEAD(&adapter->completed_list); adapter->flag = flag; spin_lock_init(&adapter->lock); host->cmd_per_lun = max_cmd_per_lun; host->max_sectors = max_sectors_per_io; adapter->dev = pdev; adapter->host = host; adapter->host->irq = irq; if (flag & BOARD_MEMMAP) adapter->host->base = tbase; else { adapter->host->io_port = tbase; adapter->host->n_io_port = 16; } adapter->host->unique_id = (pci_bus << 8) | pci_dev_func; /* * Allocate buffer to issue internal commands. */ adapter->mega_buffer = dma_alloc_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE, &adapter->buf_dma_handle, GFP_KERNEL); if (!adapter->mega_buffer) { dev_warn(&pdev->dev, "out of RAM\n"); goto out_host_put; } adapter->scb_list = kmalloc_array(MAX_COMMANDS, sizeof(scb_t), GFP_KERNEL); if (!adapter->scb_list) { dev_warn(&pdev->dev, "out of RAM\n"); goto out_free_cmd_buffer; } if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ? megaraid_isr_memmapped : megaraid_isr_iomapped, IRQF_SHARED, "megaraid", adapter)) { dev_warn(&pdev->dev, "Couldn't register IRQ %d!\n", irq); goto out_free_scb_list; } if (mega_setup_mailbox(adapter)) goto out_free_irq; if (mega_query_adapter(adapter)) goto out_free_mbox; /* * Have checks for some buggy f/w */ if ((subsysid == 0x1111) && (subsysvid == 0x1111)) { /* * Which firmware */ if (!strcmp(adapter->fw_version, "3.00") || !strcmp(adapter->fw_version, "3.01")) { dev_warn(&pdev->dev, "Your card is a Dell PERC " "2/SC RAID controller with " "firmware\nmegaraid: 3.00 or 3.01. " "This driver is known to have " "corruption issues\nmegaraid: with " "those firmware versions on this " "specific card. In order\nmegaraid: " "to protect your data, please upgrade " "your firmware to version\nmegaraid: " "3.10 or later, available from the " "Dell Technical Support web\n" "megaraid: site at\nhttp://support." "dell.com/us/en/filelib/download/" "index.asp?fileid=2940\n" ); } } /* * If we have a HP 1M(0x60E7)/2M(0x60E8) controller with * firmware H.01.07, H.01.08, and H.01.09 disable 64 bit * support, since this firmware cannot handle 64 bit * addressing */ if ((subsysvid == PCI_VENDOR_ID_HP) && ((subsysid == 0x60E7) || (subsysid == 0x60E8))) { /* * which firmware */ if (!strcmp(adapter->fw_version, "H01.07") || !strcmp(adapter->fw_version, "H01.08") || !strcmp(adapter->fw_version, "H01.09") ) { dev_warn(&pdev->dev, "Firmware H.01.07, " "H.01.08, and H.01.09 on 1M/2M " "controllers\n" "do not support 64 bit " "addressing.\nDISABLING " "64 bit support.\n"); adapter->flag &= ~BOARD_64BIT; } } if (mega_is_bios_enabled(adapter)) mega_hbas[hba_count].is_bios_enabled = 1; mega_hbas[hba_count].hostdata_addr = adapter; /* * Find out which channel is raid and which is scsi. This is * for ROMB support. */ mega_enum_raid_scsi(adapter); /* * Find out if a logical drive is set as the boot drive. If * there is one, will make that as the first logical drive. * ROMB: Do we have to boot from a physical drive. Then all * the physical drives would appear before the logical disks. * Else, all the physical drives would be exported to the mid * layer after logical drives. */ mega_get_boot_drv(adapter); if (adapter->boot_pdrv_enabled) { j = adapter->product_info.nchannels; for( i = 0; i < j; i++ ) adapter->logdrv_chan[i] = 0; for( i = j; i < NVIRT_CHAN + j; i++ ) adapter->logdrv_chan[i] = 1; } else { for (i = 0; i < NVIRT_CHAN; i++) adapter->logdrv_chan[i] = 1; for (i = NVIRT_CHAN; i < MAX_CHANNELS+NVIRT_CHAN; i++) adapter->logdrv_chan[i] = 0; adapter->mega_ch_class <<= NVIRT_CHAN; } /* * Do we support random deletion and addition of logical * drives */ adapter->read_ldidmap = 0; /* set it after first logdrv delete cmd */ adapter->support_random_del = mega_support_random_del(adapter); /* Initialize SCBs */ if (mega_init_scb(adapter)) goto out_free_mbox; /* * Reset the pending commands counter */ atomic_set(&adapter->pend_cmds, 0); /* * Reset the adapter quiescent flag */ atomic_set(&adapter->quiescent, 0); hba_soft_state[hba_count] = adapter; /* * Fill in the structure which needs to be passed back to the * application when it does an ioctl() for controller related * information. */ i = hba_count; mcontroller[i].base = mega_baseport; mcontroller[i].irq = irq; mcontroller[i].numldrv = adapter->numldrv; mcontroller[i].pcibus = pci_bus; mcontroller[i].pcidev = id->device; mcontroller[i].pcifun = PCI_FUNC (pci_dev_func); mcontroller[i].pciid = -1; mcontroller[i].pcivendor = id->vendor; mcontroller[i].pcislot = PCI_SLOT(pci_dev_func); mcontroller[i].uid = (pci_bus << 8) | pci_dev_func; /* Set the Mode of addressing to 64 bit if we can */ if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) { dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); adapter->has_64bit_addr = 1; } else { dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); adapter->has_64bit_addr = 0; } mutex_init(&adapter->int_mtx); init_completion(&adapter->int_waitq); adapter->this_id = DEFAULT_INITIATOR_ID; adapter->host->this_id = DEFAULT_INITIATOR_ID; #if MEGA_HAVE_CLUSTERING /* * Is cluster support enabled on this controller * Note: In a cluster the HBAs ( the initiators ) will have * different target IDs and we cannot assume it to be 7. Call * to mega_support_cluster() will get the target ids also if * the cluster support is available */ adapter->has_cluster = mega_support_cluster(adapter); if (adapter->has_cluster) { dev_notice(&pdev->dev, "Cluster driver, initiator id:%d\n", adapter->this_id); } #endif pci_set_drvdata(pdev, host); mega_create_proc_entry(hba_count, mega_proc_dir_entry); error = scsi_add_host(host, &pdev->dev); if (error) goto out_free_mbox; scsi_scan_host(host); hba_count++; return 0; out_free_mbox: dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t), adapter->una_mbox64, adapter->una_mbox64_dma); out_free_irq: free_irq(adapter->host->irq, adapter); out_free_scb_list: kfree(adapter->scb_list); out_free_cmd_buffer: dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE, adapter->mega_buffer, adapter->buf_dma_handle); out_host_put: scsi_host_put(host); out_iounmap: if (flag & BOARD_MEMMAP) iounmap((void *)mega_baseport); out_release_region: if (flag & BOARD_MEMMAP) release_mem_region(tbase, 128); else release_region(mega_baseport, 16); out_disable_device: pci_disable_device(pdev); out: return error; } static void __megaraid_shutdown(adapter_t *adapter) { u_char raw_mbox[sizeof(struct mbox_out)]; mbox_t *mbox = (mbox_t *)raw_mbox; int i; /* Flush adapter cache */ memset(&mbox->m_out, 0, sizeof(raw_mbox)); raw_mbox[0] = FLUSH_ADAPTER; free_irq(adapter->host->irq, adapter); /* Issue a blocking (interrupts disabled) command to the card */ issue_scb_block(adapter, raw_mbox); /* Flush disks cache */ memset(&mbox->m_out, 0, sizeof(raw_mbox)); raw_mbox[0] = FLUSH_SYSTEM; /* Issue a blocking (interrupts disabled) command to the card */ issue_scb_block(adapter, raw_mbox); if (atomic_read(&adapter->pend_cmds) > 0) dev_warn(&adapter->dev->dev, "pending commands!!\n"); /* * Have a delibrate delay to make sure all the caches are * actually flushed. */ for (i = 0; i <= 10; i++) mdelay(1000); } static void megaraid_remove_one(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); adapter_t *adapter = (adapter_t *)host->hostdata; char buf[12] = { 0 }; scsi_remove_host(host); __megaraid_shutdown(adapter); /* Free our resources */ if (adapter->flag & BOARD_MEMMAP) { iounmap((void *)adapter->base); release_mem_region(adapter->host->base, 128); } else release_region(adapter->base, 16); mega_free_sgl(adapter); sprintf(buf, "hba%d", adapter->host->host_no); remove_proc_subtree(buf, mega_proc_dir_entry); dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE, adapter->mega_buffer, adapter->buf_dma_handle); kfree(adapter->scb_list); dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t), adapter->una_mbox64, adapter->una_mbox64_dma); scsi_host_put(host); pci_disable_device(pdev); hba_count--; } static void megaraid_shutdown(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); adapter_t *adapter = (adapter_t *)host->hostdata; __megaraid_shutdown(adapter); } static struct pci_device_id megaraid_pci_tbl[] = { {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl); static struct pci_driver megaraid_pci_driver = { .name = "megaraid_legacy", .id_table = megaraid_pci_tbl, .probe = megaraid_probe_one, .remove = megaraid_remove_one, .shutdown = megaraid_shutdown, }; static int __init megaraid_init(void) { int error; if ((max_cmd_per_lun <= 0) || (max_cmd_per_lun > MAX_CMD_PER_LUN)) max_cmd_per_lun = MAX_CMD_PER_LUN; if (max_mbox_busy_wait > MBOX_BUSY_WAIT) max_mbox_busy_wait = MBOX_BUSY_WAIT; #ifdef CONFIG_PROC_FS mega_proc_dir_entry = proc_mkdir("megaraid", NULL); if (!mega_proc_dir_entry) { printk(KERN_WARNING "megaraid: failed to create megaraid root\n"); } #endif error = pci_register_driver(&megaraid_pci_driver); if (error) { #ifdef CONFIG_PROC_FS remove_proc_entry("megaraid", NULL); #endif return error; } /* * Register the driver as a character device, for applications * to access it for ioctls. * First argument (major) to register_chrdev implies a dynamic * major number allocation. */ major = register_chrdev(0, "megadev_legacy", &megadev_fops); if (major < 0) { printk(KERN_WARNING "megaraid: failed to register char device\n"); } return 0; } static void __exit megaraid_exit(void) { /* * Unregister the character device interface to the driver. */ unregister_chrdev(major, "megadev_legacy"); pci_unregister_driver(&megaraid_pci_driver); #ifdef CONFIG_PROC_FS remove_proc_entry("megaraid", NULL); #endif } module_init(megaraid_init); module_exit(megaraid_exit); /* vi: set ts=8 sw=8 tw=78: */
linux-master
drivers/scsi/megaraid.c
// SPDX-License-Identifier: GPL-2.0-only /* mac_esp.c: ESP front-end for Macintosh Quadra systems. * * Adapted from jazz_esp.c and the old mac_esp.c. * * The pseudo DMA algorithm is based on the one used in NetBSD. * See sys/arch/mac68k/obio/esp.c for some background information. * * Copyright (C) 2007-2008 Finn Thain */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/nubus.h> #include <linux/slab.h> #include <asm/irq.h> #include <asm/dma.h> #include <asm/macints.h> #include <asm/macintosh.h> #include <asm/mac_via.h> #include <scsi/scsi_host.h> #include "esp_scsi.h" #define DRV_MODULE_NAME "mac_esp" #define PFX DRV_MODULE_NAME ": " #define DRV_VERSION "1.000" #define DRV_MODULE_RELDATE "Sept 15, 2007" #define MAC_ESP_IO_BASE 0x50F00000 #define MAC_ESP_REGS_QUADRA (MAC_ESP_IO_BASE + 0x10000) #define MAC_ESP_REGS_QUADRA2 (MAC_ESP_IO_BASE + 0xF000) #define MAC_ESP_REGS_QUADRA3 (MAC_ESP_IO_BASE + 0x18000) #define MAC_ESP_REGS_SPACING 0x402 #define MAC_ESP_PDMA_REG 0xF9800024 #define MAC_ESP_PDMA_REG_SPACING 0x4 #define MAC_ESP_PDMA_IO_OFFSET 0x100 #define esp_read8(REG) mac_esp_read8(esp, REG) #define esp_write8(VAL, REG) mac_esp_write8(esp, VAL, REG) struct mac_esp_priv { struct esp *esp; void __iomem *pdma_regs; void __iomem *pdma_io; }; static struct esp *esp_chips[2]; static DEFINE_SPINLOCK(esp_chips_lock); #define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \ dev_get_drvdata((esp)->dev)) static inline void mac_esp_write8(struct esp *esp, u8 val, unsigned long reg) { nubus_writeb(val, esp->regs + reg * 16); } static inline u8 mac_esp_read8(struct esp *esp, unsigned long reg) { return nubus_readb(esp->regs + reg * 16); } static void mac_esp_reset_dma(struct esp *esp) { /* Nothing to do. */ } static void mac_esp_dma_drain(struct esp *esp) { /* Nothing to do. */ } static void mac_esp_dma_invalidate(struct esp *esp) { /* Nothing to do. */ } static int mac_esp_dma_error(struct esp *esp) { return esp->send_cmd_error; } static inline int mac_esp_wait_for_empty_fifo(struct esp *esp) { int i = 500000; do { if (!(esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES)) return 0; if (esp_read8(ESP_STATUS) & ESP_STAT_INTR) return 1; udelay(2); } while (--i); printk(KERN_ERR PFX "FIFO is not empty (sreg %02x)\n", esp_read8(ESP_STATUS)); esp->send_cmd_error = 1; return 1; } static inline int mac_esp_wait_for_dreq(struct esp *esp) { struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); int i = 500000; do { if (mep->pdma_regs == NULL) { if (via2_scsi_drq_pending()) return 0; } else { if (nubus_readl(mep->pdma_regs) & 0x200) return 0; } if (esp_read8(ESP_STATUS) & ESP_STAT_INTR) return 1; udelay(2); } while (--i); printk(KERN_ERR PFX "PDMA timeout (sreg %02x)\n", esp_read8(ESP_STATUS)); esp->send_cmd_error = 1; return 1; } #define MAC_ESP_PDMA_LOOP(operands) \ asm volatile ( \ " tstw %1 \n" \ " jbeq 20f \n" \ "1: movew " operands " \n" \ "2: movew " operands " \n" \ "3: movew " operands " \n" \ "4: movew " operands " \n" \ "5: movew " operands " \n" \ "6: movew " operands " \n" \ "7: movew " operands " \n" \ "8: movew " operands " \n" \ "9: movew " operands " \n" \ "10: movew " operands " \n" \ "11: movew " operands " \n" \ "12: movew " operands " \n" \ "13: movew " operands " \n" \ "14: movew " operands " \n" \ "15: movew " operands " \n" \ "16: movew " operands " \n" \ " subqw #1,%1 \n" \ " jbne 1b \n" \ "20: tstw %2 \n" \ " jbeq 30f \n" \ "21: movew " operands " \n" \ " subqw #1,%2 \n" \ " jbne 21b \n" \ "30: tstw %3 \n" \ " jbeq 40f \n" \ "31: moveb " operands " \n" \ "32: nop \n" \ "40: \n" \ " \n" \ " .section __ex_table,\"a\" \n" \ " .align 4 \n" \ " .long 1b,40b \n" \ " .long 2b,40b \n" \ " .long 3b,40b \n" \ " .long 4b,40b \n" \ " .long 5b,40b \n" \ " .long 6b,40b \n" \ " .long 7b,40b \n" \ " .long 8b,40b \n" \ " .long 9b,40b \n" \ " .long 10b,40b \n" \ " .long 11b,40b \n" \ " .long 12b,40b \n" \ " .long 13b,40b \n" \ " .long 14b,40b \n" \ " .long 15b,40b \n" \ " .long 16b,40b \n" \ " .long 21b,40b \n" \ " .long 31b,40b \n" \ " .long 32b,40b \n" \ " .previous \n" \ : "+a" (addr), "+r" (count32), "+r" (count2) \ : "g" (count1), "a" (mep->pdma_io)) static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count, u32 dma_count, int write, u8 cmd) { struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); esp->send_cmd_error = 0; if (!write) scsi_esp_cmd(esp, ESP_CMD_FLUSH); esp_write8((esp_count >> 0) & 0xFF, ESP_TCLOW); esp_write8((esp_count >> 8) & 0xFF, ESP_TCMED); scsi_esp_cmd(esp, cmd); do { unsigned int count32 = esp_count >> 5; unsigned int count2 = (esp_count & 0x1F) >> 1; unsigned int count1 = esp_count & 1; unsigned int start_addr = addr; if (mac_esp_wait_for_dreq(esp)) break; if (write) { MAC_ESP_PDMA_LOOP("%4@,%0@+"); esp_count -= addr - start_addr; } else { unsigned int n; MAC_ESP_PDMA_LOOP("%0@+,%4@"); if (mac_esp_wait_for_empty_fifo(esp)) break; n = (esp_read8(ESP_TCMED) << 8) + esp_read8(ESP_TCLOW); addr = start_addr + esp_count - n; esp_count = n; } } while (esp_count); } static int mac_esp_irq_pending(struct esp *esp) { if (esp_read8(ESP_STATUS) & ESP_STAT_INTR) return 1; return 0; } static u32 mac_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) { return dma_len > 0xFFFF ? 0xFFFF : dma_len; } static irqreturn_t mac_scsi_esp_intr(int irq, void *dev_id) { int got_intr; /* * This is an edge triggered IRQ, so we have to be careful to * avoid missing a transition when it is shared by two ESP devices. */ do { got_intr = 0; if (esp_chips[0] && (mac_esp_read8(esp_chips[0], ESP_STATUS) & ESP_STAT_INTR)) { (void)scsi_esp_intr(irq, esp_chips[0]); got_intr = 1; } if (esp_chips[1] && (mac_esp_read8(esp_chips[1], ESP_STATUS) & ESP_STAT_INTR)) { (void)scsi_esp_intr(irq, esp_chips[1]); got_intr = 1; } } while (got_intr); return IRQ_HANDLED; } static struct esp_driver_ops mac_esp_ops = { .esp_write8 = mac_esp_write8, .esp_read8 = mac_esp_read8, .irq_pending = mac_esp_irq_pending, .dma_length_limit = mac_esp_dma_length_limit, .reset_dma = mac_esp_reset_dma, .dma_drain = mac_esp_dma_drain, .dma_invalidate = mac_esp_dma_invalidate, .send_dma_cmd = mac_esp_send_pdma_cmd, .dma_error = mac_esp_dma_error, }; static int esp_mac_probe(struct platform_device *dev) { const struct scsi_host_template *tpnt = &scsi_esp_template; struct Scsi_Host *host; struct esp *esp; int err; struct mac_esp_priv *mep; if (!MACH_IS_MAC) return -ENODEV; if (dev->id > 1) return -ENODEV; host = scsi_host_alloc(tpnt, sizeof(struct esp)); err = -ENOMEM; if (!host) goto fail; host->max_id = 8; host->dma_boundary = PAGE_SIZE - 1; esp = shost_priv(host); esp->host = host; esp->dev = &dev->dev; esp->command_block = kzalloc(16, GFP_KERNEL); if (!esp->command_block) goto fail_unlink; esp->command_block_dma = (dma_addr_t)esp->command_block; esp->scsi_id = 7; host->this_id = esp->scsi_id; esp->scsi_id_mask = 1 << esp->scsi_id; mep = kzalloc(sizeof(struct mac_esp_priv), GFP_KERNEL); if (!mep) goto fail_free_command_block; mep->esp = esp; platform_set_drvdata(dev, mep); switch (macintosh_config->scsi_type) { case MAC_SCSI_QUADRA: esp->cfreq = 16500000; esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA; mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET; mep->pdma_regs = NULL; break; case MAC_SCSI_QUADRA2: esp->cfreq = 25000000; esp->regs = (void __iomem *)(MAC_ESP_REGS_QUADRA2 + dev->id * MAC_ESP_REGS_SPACING); mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET; mep->pdma_regs = (void __iomem *)(MAC_ESP_PDMA_REG + dev->id * MAC_ESP_PDMA_REG_SPACING); nubus_writel(0x1d1, mep->pdma_regs); break; case MAC_SCSI_QUADRA3: /* These quadras have a real DMA controller (the PSC) but we * don't know how to drive it so we must use PIO instead. */ esp->cfreq = 25000000; esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA3; mep->pdma_io = NULL; mep->pdma_regs = NULL; break; } esp->fifo_reg = esp->regs + ESP_FDATA * 16; esp->ops = &mac_esp_ops; esp->flags = ESP_FLAG_NO_DMA_MAP; if (mep->pdma_io == NULL) { printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id); esp_write8(0, ESP_TCLOW); esp_write8(0, ESP_TCMED); esp->flags |= ESP_FLAG_DISABLE_SYNC; mac_esp_ops.send_dma_cmd = esp_send_pio_cmd; } else { printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id); } host->irq = IRQ_MAC_SCSI; /* The request_irq() call is intended to succeed for the first device * and fail for the second device. */ err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL); spin_lock(&esp_chips_lock); if (err < 0 && esp_chips[!dev->id] == NULL) { spin_unlock(&esp_chips_lock); goto fail_free_priv; } esp_chips[dev->id] = esp; spin_unlock(&esp_chips_lock); err = scsi_esp_register(esp); if (err) goto fail_free_irq; return 0; fail_free_irq: spin_lock(&esp_chips_lock); esp_chips[dev->id] = NULL; if (esp_chips[!dev->id] == NULL) { spin_unlock(&esp_chips_lock); free_irq(host->irq, NULL); } else spin_unlock(&esp_chips_lock); fail_free_priv: kfree(mep); fail_free_command_block: kfree(esp->command_block); fail_unlink: scsi_host_put(host); fail: return err; } static int esp_mac_remove(struct platform_device *dev) { struct mac_esp_priv *mep = platform_get_drvdata(dev); struct esp *esp = mep->esp; unsigned int irq = esp->host->irq; scsi_esp_unregister(esp); spin_lock(&esp_chips_lock); esp_chips[dev->id] = NULL; if (esp_chips[!dev->id] == NULL) { spin_unlock(&esp_chips_lock); free_irq(irq, NULL); } else spin_unlock(&esp_chips_lock); kfree(mep); kfree(esp->command_block); scsi_host_put(esp->host); return 0; } static struct platform_driver esp_mac_driver = { .probe = esp_mac_probe, .remove = esp_mac_remove, .driver = { .name = DRV_MODULE_NAME, }, }; module_platform_driver(esp_mac_driver); MODULE_DESCRIPTION("Mac ESP SCSI driver"); MODULE_AUTHOR("Finn Thain"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); MODULE_ALIAS("platform:" DRV_MODULE_NAME);
linux-master
drivers/scsi/mac_esp.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 1996 John Shifflett, GeoLog Consulting * [email protected] * [email protected] */ /* * Drew Eckhardt's excellent 'Generic NCR5380' sources from Linux-PC * provided much of the inspiration and some of the code for this * driver. Everything I know about Amiga DMA was gleaned from careful * reading of Hamish Mcdonald's original wd33c93 driver; in fact, I * borrowed shamelessly from all over that source. Thanks Hamish! * * _This_ driver is (I feel) an improvement over the old one in * several respects: * * - Target Disconnection/Reconnection is now supported. Any * system with more than one device active on the SCSI bus * will benefit from this. The driver defaults to what I * call 'adaptive disconnect' - meaning that each command * is evaluated individually as to whether or not it should * be run with the option to disconnect/reselect (if the * device chooses), or as a "SCSI-bus-hog". * * - Synchronous data transfers are now supported. Because of * a few devices that choke after telling the driver that * they can do sync transfers, we don't automatically use * this faster protocol - it can be enabled via the command- * line on a device-by-device basis. * * - Runtime operating parameters can now be specified through * the 'amiboot' or the 'insmod' command line. For amiboot do: * "amiboot [usual stuff] wd33c93=blah,blah,blah" * The defaults should be good for most people. See the comment * for 'setup_strings' below for more details. * * - The old driver relied exclusively on what the Western Digital * docs call "Combination Level 2 Commands", which are a great * idea in that the CPU is relieved of a lot of interrupt * overhead. However, by accepting a certain (user-settable) * amount of additional interrupts, this driver achieves * better control over the SCSI bus, and data transfers are * almost as fast while being much easier to define, track, * and debug. * * * TODO: * more speed. linked commands. * * * People with bug reports, wish-lists, complaints, comments, * or improvements are asked to pah-leeez email me (John Shifflett) * at [email protected] or [email protected]! I'm anxious to get * this thing into as good a shape as possible, and I'm positive * there are lots of lurking bugs and "Stupid Places". * * Updates: * * Added support for pre -A chips, which don't have advanced features * and will generate CSR_RESEL rather than CSR_RESEL_AM. * Richard Hirst <[email protected]> August 2000 * * Added support for Burst Mode DMA and Fast SCSI. Enabled the use of * default_sx_per for asynchronous data transfers. Added adjustment * of transfer periods in sx_table to the actual input-clock. * peter fuerst <[email protected]> February 2007 */ #include <linux/module.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/blkdev.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <asm/irq.h> #include "wd33c93.h" #define optimum_sx_per(hostdata) (hostdata)->sx_table[1].period_ns #define WD33C93_VERSION "1.26++" #define WD33C93_DATE "10/Feb/2007" MODULE_AUTHOR("John Shifflett"); MODULE_DESCRIPTION("Generic WD33C93 SCSI driver"); MODULE_LICENSE("GPL"); /* * 'setup_strings' is a single string used to pass operating parameters and * settings from the kernel/module command-line to the driver. 'setup_args[]' * is an array of strings that define the compile-time default values for * these settings. If Linux boots with an amiboot or insmod command-line, * those settings are combined with 'setup_args[]'. Note that amiboot * command-lines are prefixed with "wd33c93=" while insmod uses a * "setup_strings=" prefix. The driver recognizes the following keywords * (lower case required) and arguments: * * - nosync:bitmask -bitmask is a byte where the 1st 7 bits correspond with * the 7 possible SCSI devices. Set a bit to negotiate for * asynchronous transfers on that device. To maintain * backwards compatibility, a command-line such as * "wd33c93=255" will be automatically translated to * "wd33c93=nosync:0xff". * - nodma:x -x = 1 to disable DMA, x = 0 to enable it. Argument is * optional - if not present, same as "nodma:1". * - period:ns -ns is the minimum # of nanoseconds in a SCSI data transfer * period. Default is 500; acceptable values are 250 - 1000. * - disconnect:x -x = 0 to never allow disconnects, 2 to always allow them. * x = 1 does 'adaptive' disconnects, which is the default * and generally the best choice. * - debug:x -If 'DEBUGGING_ON' is defined, x is a bit mask that causes * various types of debug output to printed - see the DB_xxx * defines in wd33c93.h * - clock:x -x = clock input in MHz for WD33c93 chip. Normal values * would be from 8 through 20. Default is 8. * - burst:x -x = 1 to use Burst Mode (or Demand-Mode) DMA, x = 0 to use * Single Byte DMA, which is the default. Argument is * optional - if not present, same as "burst:1". * - fast:x -x = 1 to enable Fast SCSI, which is only effective with * input-clock divisor 4 (WD33C93_FS_16_20), x = 0 to disable * it, which is the default. Argument is optional - if not * present, same as "fast:1". * - next -No argument. Used to separate blocks of keywords when * there's more than one host adapter in the system. * * Syntax Notes: * - Numeric arguments can be decimal or the '0x' form of hex notation. There * _must_ be a colon between a keyword and its numeric argument, with no * spaces. * - Keywords are separated by commas, no spaces, in the standard kernel * command-line manner. * - A keyword in the 'nth' comma-separated command-line member will overwrite * the 'nth' element of setup_args[]. A blank command-line member (in * other words, a comma with no preceding keyword) will _not_ overwrite * the corresponding setup_args[] element. * - If a keyword is used more than once, the first one applies to the first * SCSI host found, the second to the second card, etc, unless the 'next' * keyword is used to change the order. * * Some amiboot examples (for insmod, use 'setup_strings' instead of 'wd33c93'): * - wd33c93=nosync:255 * - wd33c93=nodma * - wd33c93=nodma:1 * - wd33c93=disconnect:2,nosync:0x08,period:250 * - wd33c93=debug:0x1c */ /* Normally, no defaults are specified */ static char *setup_args[] = { "", "", "", "", "", "", "", "", "", "" }; static char *setup_strings; module_param(setup_strings, charp, 0); static void wd33c93_execute(struct Scsi_Host *instance); static inline uchar read_wd33c93(const wd33c93_regs regs, uchar reg_num) { *regs.SASR = reg_num; mb(); return (*regs.SCMD); } static unsigned long read_wd33c93_count(const wd33c93_regs regs) { unsigned long value; *regs.SASR = WD_TRANSFER_COUNT_MSB; mb(); value = *regs.SCMD << 16; value |= *regs.SCMD << 8; value |= *regs.SCMD; mb(); return value; } static inline uchar read_aux_stat(const wd33c93_regs regs) { return *regs.SASR; } static inline void write_wd33c93(const wd33c93_regs regs, uchar reg_num, uchar value) { *regs.SASR = reg_num; mb(); *regs.SCMD = value; mb(); } static void write_wd33c93_count(const wd33c93_regs regs, unsigned long value) { *regs.SASR = WD_TRANSFER_COUNT_MSB; mb(); *regs.SCMD = value >> 16; *regs.SCMD = value >> 8; *regs.SCMD = value; mb(); } static inline void write_wd33c93_cmd(const wd33c93_regs regs, uchar cmd) { *regs.SASR = WD_COMMAND; mb(); *regs.SCMD = cmd; mb(); } static inline void write_wd33c93_cdb(const wd33c93_regs regs, uint len, uchar cmnd[]) { int i; *regs.SASR = WD_CDB_1; for (i = 0; i < len; i++) *regs.SCMD = cmnd[i]; } static inline uchar read_1_byte(const wd33c93_regs regs) { uchar asr; uchar x = 0; write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_wd33c93_cmd(regs, WD_CMD_TRANS_INFO | 0x80); do { asr = read_aux_stat(regs); if (asr & ASR_DBR) x = read_wd33c93(regs, WD_DATA); } while (!(asr & ASR_INT)); return x; } static int round_period(unsigned int period, const struct sx_period *sx_table) { int x; for (x = 1; sx_table[x].period_ns; x++) { if ((period <= sx_table[x - 0].period_ns) && (period > sx_table[x - 1].period_ns)) { return x; } } return 7; } /* * Calculate Synchronous Transfer Register value from SDTR code. */ static uchar calc_sync_xfer(unsigned int period, unsigned int offset, unsigned int fast, const struct sx_period *sx_table) { /* When doing Fast SCSI synchronous data transfers, the corresponding * value in 'sx_table' is two times the actually used transfer period. */ uchar result; if (offset && fast) { fast = STR_FSS; period *= 2; } else { fast = 0; } period *= 4; /* convert SDTR code to ns */ result = sx_table[round_period(period,sx_table)].reg_value; result |= (offset < OPTIMUM_SX_OFF) ? offset : OPTIMUM_SX_OFF; result |= fast; return result; } /* * Calculate SDTR code bytes [3],[4] from period and offset. */ static inline void calc_sync_msg(unsigned int period, unsigned int offset, unsigned int fast, uchar msg[2]) { /* 'period' is a "normal"-mode value, like the ones in 'sx_table'. The * actually used transfer period for Fast SCSI synchronous data * transfers is half that value. */ period /= 4; if (offset && fast) period /= 2; msg[0] = period; msg[1] = offset; } static int wd33c93_queuecommand_lck(struct scsi_cmnd *cmd) { struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd); struct WD33C93_hostdata *hostdata; struct scsi_cmnd *tmp; hostdata = (struct WD33C93_hostdata *) cmd->device->host->hostdata; DB(DB_QUEUE_COMMAND, printk("Q-%d-%02x( ", cmd->device->id, cmd->cmnd[0])) /* Set up a few fields in the scsi_cmnd structure for our own use: * - host_scribble is the pointer to the next cmd in the input queue * - result is what you'd expect */ cmd->host_scribble = NULL; cmd->result = 0; /* We use the Scsi_Pointer structure that's included with each command * as a scratchpad (as it's intended to be used!). The handy thing about * the SCp.xxx fields is that they're always associated with a given * cmd, and are preserved across disconnect-reselect. This means we * can pretty much ignore SAVE_POINTERS and RESTORE_POINTERS messages * if we keep all the critical pointers and counters in SCp: * - SCp.ptr is the pointer into the RAM buffer * - SCp.this_residual is the size of that buffer * - SCp.buffer points to the current scatter-gather buffer * - SCp.buffers_residual tells us how many S.G. buffers there are * - SCp.have_data_in is not used * - SCp.sent_command is not used * - SCp.phase records this command's SRCID_ER bit setting */ if (scsi_bufflen(cmd)) { scsi_pointer->buffer = scsi_sglist(cmd); scsi_pointer->buffers_residual = scsi_sg_count(cmd) - 1; scsi_pointer->ptr = sg_virt(scsi_pointer->buffer); scsi_pointer->this_residual = scsi_pointer->buffer->length; } else { scsi_pointer->buffer = NULL; scsi_pointer->buffers_residual = 0; scsi_pointer->ptr = NULL; scsi_pointer->this_residual = 0; } /* WD docs state that at the conclusion of a "LEVEL2" command, the * status byte can be retrieved from the LUN register. Apparently, * this is the case only for *uninterrupted* LEVEL2 commands! If * there are any unexpected phases entered, even if they are 100% * legal (different devices may choose to do things differently), * the LEVEL2 command sequence is exited. This often occurs prior * to receiving the status byte, in which case the driver does a * status phase interrupt and gets the status byte on its own. * While such a command can then be "resumed" (ie restarted to * finish up as a LEVEL2 command), the LUN register will NOT be * a valid status byte at the command's conclusion, and we must * use the byte obtained during the earlier interrupt. Here, we * preset SCp.Status to an illegal value (0xff) so that when * this command finally completes, we can tell where the actual * status byte is stored. */ scsi_pointer->Status = ILLEGAL_STATUS_BYTE; /* * Add the cmd to the end of 'input_Q'. Note that REQUEST SENSE * commands are added to the head of the queue so that the desired * sense data is not lost before REQUEST_SENSE executes. */ spin_lock_irq(&hostdata->lock); if (!(hostdata->input_Q) || (cmd->cmnd[0] == REQUEST_SENSE)) { cmd->host_scribble = (uchar *) hostdata->input_Q; hostdata->input_Q = cmd; } else { /* find the end of the queue */ for (tmp = (struct scsi_cmnd *) hostdata->input_Q; tmp->host_scribble; tmp = (struct scsi_cmnd *) tmp->host_scribble) ; tmp->host_scribble = (uchar *) cmd; } /* We know that there's at least one command in 'input_Q' now. * Go see if any of them are runnable! */ wd33c93_execute(cmd->device->host); DB(DB_QUEUE_COMMAND, printk(")Q ")) spin_unlock_irq(&hostdata->lock); return 0; } DEF_SCSI_QCMD(wd33c93_queuecommand) /* * This routine attempts to start a scsi command. If the host_card is * already connected, we give up immediately. Otherwise, look through * the input_Q, using the first command we find that's intended * for a currently non-busy target/lun. * * wd33c93_execute() is always called with interrupts disabled or from * the wd33c93_intr itself, which means that a wd33c93 interrupt * cannot occur while we are in here. */ static void wd33c93_execute(struct Scsi_Host *instance) { struct scsi_pointer *scsi_pointer; struct WD33C93_hostdata *hostdata = (struct WD33C93_hostdata *) instance->hostdata; const wd33c93_regs regs = hostdata->regs; struct scsi_cmnd *cmd, *prev; DB(DB_EXECUTE, printk("EX(")) if (hostdata->selecting || hostdata->connected) { DB(DB_EXECUTE, printk(")EX-0 ")) return; } /* * Search through the input_Q for a command destined * for an idle target/lun. */ cmd = (struct scsi_cmnd *) hostdata->input_Q; prev = NULL; while (cmd) { if (!(hostdata->busy[cmd->device->id] & (1 << (cmd->device->lun & 0xff)))) break; prev = cmd; cmd = (struct scsi_cmnd *) cmd->host_scribble; } /* quit if queue empty or all possible targets are busy */ if (!cmd) { DB(DB_EXECUTE, printk(")EX-1 ")) return; } /* remove command from queue */ if (prev) prev->host_scribble = cmd->host_scribble; else hostdata->input_Q = (struct scsi_cmnd *) cmd->host_scribble; #ifdef PROC_STATISTICS hostdata->cmd_cnt[cmd->device->id]++; #endif /* * Start the selection process */ if (cmd->sc_data_direction == DMA_TO_DEVICE) write_wd33c93(regs, WD_DESTINATION_ID, cmd->device->id); else write_wd33c93(regs, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD); /* Now we need to figure out whether or not this command is a good * candidate for disconnect/reselect. We guess to the best of our * ability, based on a set of hierarchical rules. When several * devices are operating simultaneously, disconnects are usually * an advantage. In a single device system, or if only 1 device * is being accessed, transfers usually go faster if disconnects * are not allowed: * * + Commands should NEVER disconnect if hostdata->disconnect = * DIS_NEVER (this holds for tape drives also), and ALWAYS * disconnect if hostdata->disconnect = DIS_ALWAYS. * + Tape drive commands should always be allowed to disconnect. * + Disconnect should be allowed if disconnected_Q isn't empty. * + Commands should NOT disconnect if input_Q is empty. * + Disconnect should be allowed if there are commands in input_Q * for a different target/lun. In this case, the other commands * should be made disconnect-able, if not already. * * I know, I know - this code would flunk me out of any * "C Programming 101" class ever offered. But it's easy * to change around and experiment with for now. */ scsi_pointer = WD33C93_scsi_pointer(cmd); scsi_pointer->phase = 0; /* assume no disconnect */ if (hostdata->disconnect == DIS_NEVER) goto no; if (hostdata->disconnect == DIS_ALWAYS) goto yes; if (cmd->device->type == 1) /* tape drive? */ goto yes; if (hostdata->disconnected_Q) /* other commands disconnected? */ goto yes; if (!(hostdata->input_Q)) /* input_Q empty? */ goto no; for (prev = (struct scsi_cmnd *) hostdata->input_Q; prev; prev = (struct scsi_cmnd *) prev->host_scribble) { if ((prev->device->id != cmd->device->id) || (prev->device->lun != cmd->device->lun)) { for (prev = (struct scsi_cmnd *) hostdata->input_Q; prev; prev = (struct scsi_cmnd *) prev->host_scribble) WD33C93_scsi_pointer(prev)->phase = 1; goto yes; } } goto no; yes: scsi_pointer->phase = 1; #ifdef PROC_STATISTICS hostdata->disc_allowed_cnt[cmd->device->id]++; #endif no: write_wd33c93(regs, WD_SOURCE_ID, scsi_pointer->phase ? SRCID_ER : 0); write_wd33c93(regs, WD_TARGET_LUN, (u8)cmd->device->lun); write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]); hostdata->busy[cmd->device->id] |= (1 << (cmd->device->lun & 0xFF)); if ((hostdata->level2 == L2_NONE) || (hostdata->sync_stat[cmd->device->id] == SS_UNSET)) { /* * Do a 'Select-With-ATN' command. This will end with * one of the following interrupts: * CSR_RESEL_AM: failure - can try again later. * CSR_TIMEOUT: failure - give up. * CSR_SELECT: success - proceed. */ hostdata->selecting = cmd; /* Every target has its own synchronous transfer setting, kept in the * sync_xfer array, and a corresponding status byte in sync_stat[]. * Each target's sync_stat[] entry is initialized to SX_UNSET, and its * sync_xfer[] entry is initialized to the default/safe value. SS_UNSET * means that the parameters are undetermined as yet, and that we * need to send an SDTR message to this device after selection is * complete: We set SS_FIRST to tell the interrupt routine to do so. * If we've been asked not to try synchronous transfers on this * target (and _all_ luns within it), we'll still send the SDTR message * later, but at that time we'll negotiate for async by specifying a * sync fifo depth of 0. */ if (hostdata->sync_stat[cmd->device->id] == SS_UNSET) hostdata->sync_stat[cmd->device->id] = SS_FIRST; hostdata->state = S_SELECTING; write_wd33c93_count(regs, 0); /* guarantee a DATA_PHASE interrupt */ write_wd33c93_cmd(regs, WD_CMD_SEL_ATN); } else { /* * Do a 'Select-With-ATN-Xfer' command. This will end with * one of the following interrupts: * CSR_RESEL_AM: failure - can try again later. * CSR_TIMEOUT: failure - give up. * anything else: success - proceed. */ hostdata->connected = cmd; write_wd33c93(regs, WD_COMMAND_PHASE, 0); /* copy command_descriptor_block into WD chip * (take advantage of auto-incrementing) */ write_wd33c93_cdb(regs, cmd->cmd_len, cmd->cmnd); /* The wd33c93 only knows about Group 0, 1, and 5 commands when * it's doing a 'select-and-transfer'. To be safe, we write the * size of the CDB into the OWN_ID register for every case. This * way there won't be problems with vendor-unique, audio, etc. */ write_wd33c93(regs, WD_OWN_ID, cmd->cmd_len); /* When doing a non-disconnect command with DMA, we can save * ourselves a DATA phase interrupt later by setting everything * up ahead of time. */ if (scsi_pointer->phase == 0 && hostdata->no_dma == 0) { if (hostdata->dma_setup(cmd, (cmd->sc_data_direction == DMA_TO_DEVICE) ? DATA_OUT_DIR : DATA_IN_DIR)) write_wd33c93_count(regs, 0); /* guarantee a DATA_PHASE interrupt */ else { write_wd33c93_count(regs, scsi_pointer->this_residual); write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | hostdata->dma_mode); hostdata->dma = D_DMA_RUNNING; } } else write_wd33c93_count(regs, 0); /* guarantee a DATA_PHASE interrupt */ hostdata->state = S_RUNNING_LEVEL2; write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER); } /* * Since the SCSI bus can handle only 1 connection at a time, * we get out of here now. If the selection fails, or when * the command disconnects, we'll come back to this routine * to search the input_Q again... */ DB(DB_EXECUTE, printk("%s)EX-2 ", scsi_pointer->phase ? "d:" : "")) } static void transfer_pio(const wd33c93_regs regs, uchar * buf, int cnt, int data_in_dir, struct WD33C93_hostdata *hostdata) { uchar asr; DB(DB_TRANSFER, printk("(%p,%d,%s:", buf, cnt, data_in_dir ? "in" : "out")) write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_wd33c93_count(regs, cnt); write_wd33c93_cmd(regs, WD_CMD_TRANS_INFO); if (data_in_dir) { do { asr = read_aux_stat(regs); if (asr & ASR_DBR) *buf++ = read_wd33c93(regs, WD_DATA); } while (!(asr & ASR_INT)); } else { do { asr = read_aux_stat(regs); if (asr & ASR_DBR) write_wd33c93(regs, WD_DATA, *buf++); } while (!(asr & ASR_INT)); } /* Note: we are returning with the interrupt UN-cleared. * Since (presumably) an entire I/O operation has * completed, the bus phase is probably different, and * the interrupt routine will discover this when it * responds to the uncleared int. */ } static void transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd, int data_in_dir) { struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd); struct WD33C93_hostdata *hostdata; unsigned long length; hostdata = (struct WD33C93_hostdata *) cmd->device->host->hostdata; /* Normally, you'd expect 'this_residual' to be non-zero here. * In a series of scatter-gather transfers, however, this * routine will usually be called with 'this_residual' equal * to 0 and 'buffers_residual' non-zero. This means that a * previous transfer completed, clearing 'this_residual', and * now we need to setup the next scatter-gather buffer as the * source or destination for THIS transfer. */ if (!scsi_pointer->this_residual && scsi_pointer->buffers_residual) { scsi_pointer->buffer = sg_next(scsi_pointer->buffer); --scsi_pointer->buffers_residual; scsi_pointer->this_residual = scsi_pointer->buffer->length; scsi_pointer->ptr = sg_virt(scsi_pointer->buffer); } if (!scsi_pointer->this_residual) /* avoid bogus setups */ return; write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]); /* 'hostdata->no_dma' is TRUE if we don't even want to try DMA. * Update 'this_residual' and 'ptr' after 'transfer_pio()' returns. */ if (hostdata->no_dma || hostdata->dma_setup(cmd, data_in_dir)) { #ifdef PROC_STATISTICS hostdata->pio_cnt++; #endif transfer_pio(regs, (uchar *) scsi_pointer->ptr, scsi_pointer->this_residual, data_in_dir, hostdata); length = scsi_pointer->this_residual; scsi_pointer->this_residual = read_wd33c93_count(regs); scsi_pointer->ptr += length - scsi_pointer->this_residual; } /* We are able to do DMA (in fact, the Amiga hardware is * already going!), so start up the wd33c93 in DMA mode. * We set 'hostdata->dma' = D_DMA_RUNNING so that when the * transfer completes and causes an interrupt, we're * reminded to tell the Amiga to shut down its end. We'll * postpone the updating of 'this_residual' and 'ptr' * until then. */ else { #ifdef PROC_STATISTICS hostdata->dma_cnt++; #endif write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | hostdata->dma_mode); write_wd33c93_count(regs, scsi_pointer->this_residual); if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && scsi_pointer->phase == 0)) { write_wd33c93(regs, WD_COMMAND_PHASE, 0x45); write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER); hostdata->state = S_RUNNING_LEVEL2; } else write_wd33c93_cmd(regs, WD_CMD_TRANS_INFO); hostdata->dma = D_DMA_RUNNING; } } void wd33c93_intr(struct Scsi_Host *instance) { struct scsi_pointer *scsi_pointer; struct WD33C93_hostdata *hostdata = (struct WD33C93_hostdata *) instance->hostdata; const wd33c93_regs regs = hostdata->regs; struct scsi_cmnd *patch, *cmd; uchar asr, sr, phs, id, lun, *ucp, msg; unsigned long length, flags; asr = read_aux_stat(regs); if (!(asr & ASR_INT) || (asr & ASR_BSY)) return; spin_lock_irqsave(&hostdata->lock, flags); #ifdef PROC_STATISTICS hostdata->int_cnt++; #endif cmd = (struct scsi_cmnd *) hostdata->connected; /* assume we're connected */ scsi_pointer = WD33C93_scsi_pointer(cmd); sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear the interrupt */ phs = read_wd33c93(regs, WD_COMMAND_PHASE); DB(DB_INTR, printk("{%02x:%02x-", asr, sr)) /* After starting a DMA transfer, the next interrupt * is guaranteed to be in response to completion of * the transfer. Since the Amiga DMA hardware runs in * in an open-ended fashion, it needs to be told when * to stop; do that here if D_DMA_RUNNING is true. * Also, we have to update 'this_residual' and 'ptr' * based on the contents of the TRANSFER_COUNT register, * in case the device decided to do an intermediate * disconnect (a device may do this if it has to do a * seek, or just to be nice and let other devices have * some bus time during long transfers). After doing * whatever is needed, we go on and service the WD3393 * interrupt normally. */ if (hostdata->dma == D_DMA_RUNNING) { DB(DB_TRANSFER, printk("[%p/%d:", scsi_pointer->ptr, scsi_pointer->this_residual)) hostdata->dma_stop(cmd->device->host, cmd, 1); hostdata->dma = D_DMA_OFF; length = scsi_pointer->this_residual; scsi_pointer->this_residual = read_wd33c93_count(regs); scsi_pointer->ptr += length - scsi_pointer->this_residual; DB(DB_TRANSFER, printk("%p/%d]", scsi_pointer->ptr, scsi_pointer->this_residual)) } /* Respond to the specific WD3393 interrupt - there are quite a few! */ switch (sr) { case CSR_TIMEOUT: DB(DB_INTR, printk("TIMEOUT")) if (hostdata->state == S_RUNNING_LEVEL2) hostdata->connected = NULL; else { cmd = (struct scsi_cmnd *) hostdata->selecting; /* get a valid cmd */ hostdata->selecting = NULL; } cmd->result = DID_NO_CONNECT << 16; hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); hostdata->state = S_UNCONNECTED; scsi_done(cmd); /* From esp.c: * There is a window of time within the scsi_done() path * of execution where interrupts are turned back on full * blast and left that way. During that time we could * reconnect to a disconnected command, then we'd bomb * out below. We could also end up executing two commands * at _once_. ...just so you know why the restore_flags() * is here... */ spin_unlock_irqrestore(&hostdata->lock, flags); /* We are not connected to a target - check to see if there * are commands waiting to be executed. */ wd33c93_execute(instance); break; /* Note: this interrupt should not occur in a LEVEL2 command */ case CSR_SELECT: DB(DB_INTR, printk("SELECT")) hostdata->connected = cmd = (struct scsi_cmnd *) hostdata->selecting; hostdata->selecting = NULL; /* construct an IDENTIFY message with correct disconnect bit */ hostdata->outgoing_msg[0] = IDENTIFY(0, cmd->device->lun); if (scsi_pointer->phase) hostdata->outgoing_msg[0] |= 0x40; if (hostdata->sync_stat[cmd->device->id] == SS_FIRST) { hostdata->sync_stat[cmd->device->id] = SS_WAITING; /* Tack on a 2nd message to ask about synchronous transfers. If we've * been asked to do only asynchronous transfers on this device, we * request a fifo depth of 0, which is equivalent to async - should * solve the problems some people have had with GVP's Guru ROM. */ hostdata->outgoing_msg[1] = EXTENDED_MESSAGE; hostdata->outgoing_msg[2] = 3; hostdata->outgoing_msg[3] = EXTENDED_SDTR; if (hostdata->no_sync & (1 << cmd->device->id)) { calc_sync_msg(hostdata->default_sx_per, 0, 0, hostdata->outgoing_msg + 4); } else { calc_sync_msg(optimum_sx_per(hostdata), OPTIMUM_SX_OFF, hostdata->fast, hostdata->outgoing_msg + 4); } hostdata->outgoing_len = 6; #ifdef SYNC_DEBUG ucp = hostdata->outgoing_msg + 1; printk(" sending SDTR %02x03%02x%02x%02x ", ucp[0], ucp[2], ucp[3], ucp[4]); #endif } else hostdata->outgoing_len = 1; hostdata->state = S_CONNECTED; spin_unlock_irqrestore(&hostdata->lock, flags); break; case CSR_XFER_DONE | PHS_DATA_IN: case CSR_UNEXP | PHS_DATA_IN: case CSR_SRV_REQ | PHS_DATA_IN: DB(DB_INTR, printk("IN-%d.%d", scsi_pointer->this_residual, scsi_pointer->buffers_residual)) transfer_bytes(regs, cmd, DATA_IN_DIR); if (hostdata->state != S_RUNNING_LEVEL2) hostdata->state = S_CONNECTED; spin_unlock_irqrestore(&hostdata->lock, flags); break; case CSR_XFER_DONE | PHS_DATA_OUT: case CSR_UNEXP | PHS_DATA_OUT: case CSR_SRV_REQ | PHS_DATA_OUT: DB(DB_INTR, printk("OUT-%d.%d", scsi_pointer->this_residual, scsi_pointer->buffers_residual)) transfer_bytes(regs, cmd, DATA_OUT_DIR); if (hostdata->state != S_RUNNING_LEVEL2) hostdata->state = S_CONNECTED; spin_unlock_irqrestore(&hostdata->lock, flags); break; /* Note: this interrupt should not occur in a LEVEL2 command */ case CSR_XFER_DONE | PHS_COMMAND: case CSR_UNEXP | PHS_COMMAND: case CSR_SRV_REQ | PHS_COMMAND: DB(DB_INTR, printk("CMND-%02x", cmd->cmnd[0])) transfer_pio(regs, cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, hostdata); hostdata->state = S_CONNECTED; spin_unlock_irqrestore(&hostdata->lock, flags); break; case CSR_XFER_DONE | PHS_STATUS: case CSR_UNEXP | PHS_STATUS: case CSR_SRV_REQ | PHS_STATUS: DB(DB_INTR, printk("STATUS=")) scsi_pointer->Status = read_1_byte(regs); DB(DB_INTR, printk("%02x", scsi_pointer->Status)) if (hostdata->level2 >= L2_BASIC) { sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear interrupt */ udelay(7); hostdata->state = S_RUNNING_LEVEL2; write_wd33c93(regs, WD_COMMAND_PHASE, 0x50); write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER); } else { hostdata->state = S_CONNECTED; } spin_unlock_irqrestore(&hostdata->lock, flags); break; case CSR_XFER_DONE | PHS_MESS_IN: case CSR_UNEXP | PHS_MESS_IN: case CSR_SRV_REQ | PHS_MESS_IN: DB(DB_INTR, printk("MSG_IN=")) msg = read_1_byte(regs); sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear interrupt */ udelay(7); hostdata->incoming_msg[hostdata->incoming_ptr] = msg; if (hostdata->incoming_msg[0] == EXTENDED_MESSAGE) msg = EXTENDED_MESSAGE; else hostdata->incoming_ptr = 0; scsi_pointer->Message = msg; switch (msg) { case COMMAND_COMPLETE: DB(DB_INTR, printk("CCMP")) write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_PRE_CMP_DISC; break; case SAVE_POINTERS: DB(DB_INTR, printk("SDP")) write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; case RESTORE_POINTERS: DB(DB_INTR, printk("RDP")) if (hostdata->level2 >= L2_BASIC) { write_wd33c93(regs, WD_COMMAND_PHASE, 0x45); write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER); hostdata->state = S_RUNNING_LEVEL2; } else { write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; } break; case DISCONNECT: DB(DB_INTR, printk("DIS")) cmd->device->disconnect = 1; write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_PRE_TMP_DISC; break; case MESSAGE_REJECT: DB(DB_INTR, printk("REJ")) #ifdef SYNC_DEBUG printk("-REJ-"); #endif if (hostdata->sync_stat[cmd->device->id] == SS_WAITING) { hostdata->sync_stat[cmd->device->id] = SS_SET; /* we want default_sx_per, not DEFAULT_SX_PER */ hostdata->sync_xfer[cmd->device->id] = calc_sync_xfer(hostdata->default_sx_per / 4, 0, 0, hostdata->sx_table); } write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; case EXTENDED_MESSAGE: DB(DB_INTR, printk("EXT")) ucp = hostdata->incoming_msg; #ifdef SYNC_DEBUG printk("%02x", ucp[hostdata->incoming_ptr]); #endif /* Is this the last byte of the extended message? */ if ((hostdata->incoming_ptr >= 2) && (hostdata->incoming_ptr == (ucp[1] + 1))) { switch (ucp[2]) { /* what's the EXTENDED code? */ case EXTENDED_SDTR: /* default to default async period */ id = calc_sync_xfer(hostdata-> default_sx_per / 4, 0, 0, hostdata->sx_table); if (hostdata->sync_stat[cmd->device->id] != SS_WAITING) { /* A device has sent an unsolicited SDTR message; rather than go * through the effort of decoding it and then figuring out what * our reply should be, we're just gonna say that we have a * synchronous fifo depth of 0. This will result in asynchronous * transfers - not ideal but so much easier. * Actually, this is OK because it assures us that if we don't * specifically ask for sync transfers, we won't do any. */ write_wd33c93_cmd(regs, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ hostdata->outgoing_msg[0] = EXTENDED_MESSAGE; hostdata->outgoing_msg[1] = 3; hostdata->outgoing_msg[2] = EXTENDED_SDTR; calc_sync_msg(hostdata-> default_sx_per, 0, 0, hostdata->outgoing_msg + 3); hostdata->outgoing_len = 5; } else { if (ucp[4]) /* well, sync transfer */ id = calc_sync_xfer(ucp[3], ucp[4], hostdata->fast, hostdata->sx_table); else if (ucp[3]) /* very unlikely... */ id = calc_sync_xfer(ucp[3], ucp[4], 0, hostdata->sx_table); } hostdata->sync_xfer[cmd->device->id] = id; #ifdef SYNC_DEBUG printk(" sync_xfer=%02x\n", hostdata->sync_xfer[cmd->device->id]); #endif hostdata->sync_stat[cmd->device->id] = SS_SET; write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; case EXTENDED_WDTR: write_wd33c93_cmd(regs, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ printk("sending WDTR "); hostdata->outgoing_msg[0] = EXTENDED_MESSAGE; hostdata->outgoing_msg[1] = 2; hostdata->outgoing_msg[2] = EXTENDED_WDTR; hostdata->outgoing_msg[3] = 0; /* 8 bit transfer width */ hostdata->outgoing_len = 4; write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; default: write_wd33c93_cmd(regs, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ printk ("Rejecting Unknown Extended Message(%02x). ", ucp[2]); hostdata->outgoing_msg[0] = MESSAGE_REJECT; hostdata->outgoing_len = 1; write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; } hostdata->incoming_ptr = 0; } /* We need to read more MESS_IN bytes for the extended message */ else { hostdata->incoming_ptr++; write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; } break; default: printk("Rejecting Unknown Message(%02x) ", msg); write_wd33c93_cmd(regs, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ hostdata->outgoing_msg[0] = MESSAGE_REJECT; hostdata->outgoing_len = 1; write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; } spin_unlock_irqrestore(&hostdata->lock, flags); break; /* Note: this interrupt will occur only after a LEVEL2 command */ case CSR_SEL_XFER_DONE: /* Make sure that reselection is enabled at this point - it may * have been turned off for the command that just completed. */ write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER); if (phs == 0x60) { DB(DB_INTR, printk("SX-DONE")) scsi_pointer->Message = COMMAND_COMPLETE; lun = read_wd33c93(regs, WD_TARGET_LUN); DB(DB_INTR, printk(":%d.%d", scsi_pointer->Status, lun)) hostdata->connected = NULL; hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); hostdata->state = S_UNCONNECTED; if (scsi_pointer->Status == ILLEGAL_STATUS_BYTE) scsi_pointer->Status = lun; if (cmd->cmnd[0] == REQUEST_SENSE && scsi_pointer->Status != SAM_STAT_GOOD) { set_host_byte(cmd, DID_ERROR); } else { set_host_byte(cmd, DID_OK); scsi_msg_to_host_byte(cmd, scsi_pointer->Message); set_status_byte(cmd, scsi_pointer->Status); } scsi_done(cmd); /* We are no longer connected to a target - check to see if * there are commands waiting to be executed. */ spin_unlock_irqrestore(&hostdata->lock, flags); wd33c93_execute(instance); } else { printk ("%02x:%02x:%02x: Unknown SEL_XFER_DONE phase!!---", asr, sr, phs); spin_unlock_irqrestore(&hostdata->lock, flags); } break; /* Note: this interrupt will occur only after a LEVEL2 command */ case CSR_SDP: DB(DB_INTR, printk("SDP")) hostdata->state = S_RUNNING_LEVEL2; write_wd33c93(regs, WD_COMMAND_PHASE, 0x41); write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER); spin_unlock_irqrestore(&hostdata->lock, flags); break; case CSR_XFER_DONE | PHS_MESS_OUT: case CSR_UNEXP | PHS_MESS_OUT: case CSR_SRV_REQ | PHS_MESS_OUT: DB(DB_INTR, printk("MSG_OUT=")) /* To get here, we've probably requested MESSAGE_OUT and have * already put the correct bytes in outgoing_msg[] and filled * in outgoing_len. We simply send them out to the SCSI bus. * Sometimes we get MESSAGE_OUT phase when we're not expecting * it - like when our SDTR message is rejected by a target. Some * targets send the REJECT before receiving all of the extended * message, and then seem to go back to MESSAGE_OUT for a byte * or two. Not sure why, or if I'm doing something wrong to * cause this to happen. Regardless, it seems that sending * NOP messages in these situations results in no harm and * makes everyone happy. */ if (hostdata->outgoing_len == 0) { hostdata->outgoing_len = 1; hostdata->outgoing_msg[0] = NOP; } transfer_pio(regs, hostdata->outgoing_msg, hostdata->outgoing_len, DATA_OUT_DIR, hostdata); DB(DB_INTR, printk("%02x", hostdata->outgoing_msg[0])) hostdata->outgoing_len = 0; hostdata->state = S_CONNECTED; spin_unlock_irqrestore(&hostdata->lock, flags); break; case CSR_UNEXP_DISC: /* I think I've seen this after a request-sense that was in response * to an error condition, but not sure. We certainly need to do * something when we get this interrupt - the question is 'what?'. * Let's think positively, and assume some command has finished * in a legal manner (like a command that provokes a request-sense), * so we treat it as a normal command-complete-disconnect. */ /* Make sure that reselection is enabled at this point - it may * have been turned off for the command that just completed. */ write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER); if (cmd == NULL) { printk(" - Already disconnected! "); hostdata->state = S_UNCONNECTED; spin_unlock_irqrestore(&hostdata->lock, flags); return; } DB(DB_INTR, printk("UNEXP_DISC")) hostdata->connected = NULL; hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); hostdata->state = S_UNCONNECTED; if (cmd->cmnd[0] == REQUEST_SENSE && scsi_pointer->Status != SAM_STAT_GOOD) { set_host_byte(cmd, DID_ERROR); } else { set_host_byte(cmd, DID_OK); scsi_msg_to_host_byte(cmd, scsi_pointer->Message); set_status_byte(cmd, scsi_pointer->Status); } scsi_done(cmd); /* We are no longer connected to a target - check to see if * there are commands waiting to be executed. */ /* look above for comments on scsi_done() */ spin_unlock_irqrestore(&hostdata->lock, flags); wd33c93_execute(instance); break; case CSR_DISC: /* Make sure that reselection is enabled at this point - it may * have been turned off for the command that just completed. */ write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER); DB(DB_INTR, printk("DISC")) if (cmd == NULL) { printk(" - Already disconnected! "); hostdata->state = S_UNCONNECTED; } switch (hostdata->state) { case S_PRE_CMP_DISC: hostdata->connected = NULL; hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); hostdata->state = S_UNCONNECTED; DB(DB_INTR, printk(":%d", scsi_pointer->Status)) if (cmd->cmnd[0] == REQUEST_SENSE && scsi_pointer->Status != SAM_STAT_GOOD) { set_host_byte(cmd, DID_ERROR); } else { set_host_byte(cmd, DID_OK); scsi_msg_to_host_byte(cmd, scsi_pointer->Message); set_status_byte(cmd, scsi_pointer->Status); } scsi_done(cmd); break; case S_PRE_TMP_DISC: case S_RUNNING_LEVEL2: cmd->host_scribble = (uchar *) hostdata->disconnected_Q; hostdata->disconnected_Q = cmd; hostdata->connected = NULL; hostdata->state = S_UNCONNECTED; #ifdef PROC_STATISTICS hostdata->disc_done_cnt[cmd->device->id]++; #endif break; default: printk("*** Unexpected DISCONNECT interrupt! ***"); hostdata->state = S_UNCONNECTED; } /* We are no longer connected to a target - check to see if * there are commands waiting to be executed. */ spin_unlock_irqrestore(&hostdata->lock, flags); wd33c93_execute(instance); break; case CSR_RESEL_AM: case CSR_RESEL: DB(DB_INTR, printk("RESEL%s", sr == CSR_RESEL_AM ? "_AM" : "")) /* Old chips (pre -A ???) don't have advanced features and will * generate CSR_RESEL. In that case we have to extract the LUN the * hard way (see below). * First we have to make sure this reselection didn't * happen during Arbitration/Selection of some other device. * If yes, put losing command back on top of input_Q. */ if (hostdata->level2 <= L2_NONE) { if (hostdata->selecting) { cmd = (struct scsi_cmnd *) hostdata->selecting; hostdata->selecting = NULL; hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); cmd->host_scribble = (uchar *) hostdata->input_Q; hostdata->input_Q = cmd; } } else { if (cmd) { if (phs == 0x00) { hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); cmd->host_scribble = (uchar *) hostdata->input_Q; hostdata->input_Q = cmd; } else { printk ("---%02x:%02x:%02x-TROUBLE: Intrusive ReSelect!---", asr, sr, phs); while (1) printk("\r"); } } } /* OK - find out which device reselected us. */ id = read_wd33c93(regs, WD_SOURCE_ID); id &= SRCID_MASK; /* and extract the lun from the ID message. (Note that we don't * bother to check for a valid message here - I guess this is * not the right way to go, but...) */ if (sr == CSR_RESEL_AM) { lun = read_wd33c93(regs, WD_DATA); if (hostdata->level2 < L2_RESELECT) write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); lun &= 7; } else { /* Old chip; wait for msgin phase to pick up the LUN. */ for (lun = 255; lun; lun--) { if ((asr = read_aux_stat(regs)) & ASR_INT) break; udelay(10); } if (!(asr & ASR_INT)) { printk ("wd33c93: Reselected without IDENTIFY\n"); lun = 0; } else { /* Verify this is a change to MSG_IN and read the message */ sr = read_wd33c93(regs, WD_SCSI_STATUS); udelay(7); if (sr == (CSR_ABORT | PHS_MESS_IN) || sr == (CSR_UNEXP | PHS_MESS_IN) || sr == (CSR_SRV_REQ | PHS_MESS_IN)) { /* Got MSG_IN, grab target LUN */ lun = read_1_byte(regs); /* Now we expect a 'paused with ACK asserted' int.. */ asr = read_aux_stat(regs); if (!(asr & ASR_INT)) { udelay(10); asr = read_aux_stat(regs); if (!(asr & ASR_INT)) printk ("wd33c93: No int after LUN on RESEL (%02x)\n", asr); } sr = read_wd33c93(regs, WD_SCSI_STATUS); udelay(7); if (sr != CSR_MSGIN) printk ("wd33c93: Not paused with ACK on RESEL (%02x)\n", sr); lun &= 7; write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); } else { printk ("wd33c93: Not MSG_IN on reselect (%02x)\n", sr); lun = 0; } } } /* Now we look for the command that's reconnecting. */ cmd = (struct scsi_cmnd *) hostdata->disconnected_Q; patch = NULL; while (cmd) { if (id == cmd->device->id && lun == (u8)cmd->device->lun) break; patch = cmd; cmd = (struct scsi_cmnd *) cmd->host_scribble; } /* Hmm. Couldn't find a valid command.... What to do? */ if (!cmd) { printk ("---TROUBLE: target %d.%d not in disconnect queue---", id, (u8)lun); spin_unlock_irqrestore(&hostdata->lock, flags); return; } /* Ok, found the command - now start it up again. */ if (patch) patch->host_scribble = cmd->host_scribble; else hostdata->disconnected_Q = (struct scsi_cmnd *) cmd->host_scribble; hostdata->connected = cmd; /* We don't need to worry about 'initialize_SCp()' or 'hostdata->busy[]' * because these things are preserved over a disconnect. * But we DO need to fix the DPD bit so it's correct for this command. */ if (cmd->sc_data_direction == DMA_TO_DEVICE) write_wd33c93(regs, WD_DESTINATION_ID, cmd->device->id); else write_wd33c93(regs, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD); if (hostdata->level2 >= L2_RESELECT) { write_wd33c93_count(regs, 0); /* we want a DATA_PHASE interrupt */ write_wd33c93(regs, WD_COMMAND_PHASE, 0x45); write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER); hostdata->state = S_RUNNING_LEVEL2; } else hostdata->state = S_CONNECTED; spin_unlock_irqrestore(&hostdata->lock, flags); break; default: printk("--UNKNOWN INTERRUPT:%02x:%02x:%02x--", asr, sr, phs); spin_unlock_irqrestore(&hostdata->lock, flags); } DB(DB_INTR, printk("} ")) } static void reset_wd33c93(struct Scsi_Host *instance) { struct WD33C93_hostdata *hostdata = (struct WD33C93_hostdata *) instance->hostdata; const wd33c93_regs regs = hostdata->regs; uchar sr; #ifdef CONFIG_SGI_IP22 { int busycount = 0; extern void sgiwd93_reset(unsigned long); /* wait 'til the chip gets some time for us */ while ((read_aux_stat(regs) & ASR_BSY) && busycount++ < 100) udelay (10); /* * there are scsi devices out there, which manage to lock up * the wd33c93 in a busy condition. In this state it won't * accept the reset command. The only way to solve this is to * give the chip a hardware reset (if possible). The code below * does this for the SGI Indy, where this is possible */ /* still busy ? */ if (read_aux_stat(regs) & ASR_BSY) sgiwd93_reset(instance->base); /* yeah, give it the hard one */ } #endif write_wd33c93(regs, WD_OWN_ID, OWNID_EAF | OWNID_RAF | instance->this_id | hostdata->clock_freq); write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER, calc_sync_xfer(hostdata->default_sx_per / 4, DEFAULT_SX_OFF, 0, hostdata->sx_table)); write_wd33c93(regs, WD_COMMAND, WD_CMD_RESET); #ifdef CONFIG_MVME147_SCSI udelay(25); /* The old wd33c93 on MVME147 needs this, at least */ #endif while (!(read_aux_stat(regs) & ASR_INT)) ; sr = read_wd33c93(regs, WD_SCSI_STATUS); hostdata->microcode = read_wd33c93(regs, WD_CDB_1); if (sr == 0x00) hostdata->chip = C_WD33C93; else if (sr == 0x01) { write_wd33c93(regs, WD_QUEUE_TAG, 0xa5); /* any random number */ sr = read_wd33c93(regs, WD_QUEUE_TAG); if (sr == 0xa5) { hostdata->chip = C_WD33C93B; write_wd33c93(regs, WD_QUEUE_TAG, 0); } else hostdata->chip = C_WD33C93A; } else hostdata->chip = C_UNKNOWN_CHIP; if (hostdata->chip != C_WD33C93B) /* Fast SCSI unavailable */ hostdata->fast = 0; write_wd33c93(regs, WD_TIMEOUT_PERIOD, TIMEOUT_PERIOD_VALUE); write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); } int wd33c93_host_reset(struct scsi_cmnd * SCpnt) { struct Scsi_Host *instance; struct WD33C93_hostdata *hostdata; int i; instance = SCpnt->device->host; spin_lock_irq(instance->host_lock); hostdata = (struct WD33C93_hostdata *) instance->hostdata; printk("scsi%d: reset. ", instance->host_no); disable_irq(instance->irq); hostdata->dma_stop(instance, NULL, 0); for (i = 0; i < 8; i++) { hostdata->busy[i] = 0; hostdata->sync_xfer[i] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF, 0, hostdata->sx_table); hostdata->sync_stat[i] = SS_UNSET; /* using default sync values */ } hostdata->input_Q = NULL; hostdata->selecting = NULL; hostdata->connected = NULL; hostdata->disconnected_Q = NULL; hostdata->state = S_UNCONNECTED; hostdata->dma = D_DMA_OFF; hostdata->incoming_ptr = 0; hostdata->outgoing_len = 0; reset_wd33c93(instance); SCpnt->result = DID_RESET << 16; enable_irq(instance->irq); spin_unlock_irq(instance->host_lock); return SUCCESS; } int wd33c93_abort(struct scsi_cmnd * cmd) { struct Scsi_Host *instance; struct WD33C93_hostdata *hostdata; wd33c93_regs regs; struct scsi_cmnd *tmp, *prev; disable_irq(cmd->device->host->irq); instance = cmd->device->host; hostdata = (struct WD33C93_hostdata *) instance->hostdata; regs = hostdata->regs; /* * Case 1 : If the command hasn't been issued yet, we simply remove it * from the input_Q. */ tmp = (struct scsi_cmnd *) hostdata->input_Q; prev = NULL; while (tmp) { if (tmp == cmd) { if (prev) prev->host_scribble = cmd->host_scribble; else hostdata->input_Q = (struct scsi_cmnd *) cmd->host_scribble; cmd->host_scribble = NULL; cmd->result = DID_ABORT << 16; printk ("scsi%d: Abort - removing command from input_Q. ", instance->host_no); enable_irq(cmd->device->host->irq); scsi_done(cmd); return SUCCESS; } prev = tmp; tmp = (struct scsi_cmnd *) tmp->host_scribble; } /* * Case 2 : If the command is connected, we're going to fail the abort * and let the high level SCSI driver retry at a later time or * issue a reset. * * Timeouts, and therefore aborted commands, will be highly unlikely * and handling them cleanly in this situation would make the common * case of noresets less efficient, and would pollute our code. So, * we fail. */ if (hostdata->connected == cmd) { uchar sr, asr; unsigned long timeout; printk("scsi%d: Aborting connected command - ", instance->host_no); printk("stopping DMA - "); if (hostdata->dma == D_DMA_RUNNING) { hostdata->dma_stop(instance, cmd, 0); hostdata->dma = D_DMA_OFF; } printk("sending wd33c93 ABORT command - "); write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_wd33c93_cmd(regs, WD_CMD_ABORT); /* Now we have to attempt to flush out the FIFO... */ printk("flushing fifo - "); timeout = 1000000; do { asr = read_aux_stat(regs); if (asr & ASR_DBR) read_wd33c93(regs, WD_DATA); } while (!(asr & ASR_INT) && timeout-- > 0); sr = read_wd33c93(regs, WD_SCSI_STATUS); printk ("asr=%02x, sr=%02x, %ld bytes un-transferred (timeout=%ld) - ", asr, sr, read_wd33c93_count(regs), timeout); /* * Abort command processed. * Still connected. * We must disconnect. */ printk("sending wd33c93 DISCONNECT command - "); write_wd33c93_cmd(regs, WD_CMD_DISCONNECT); timeout = 1000000; asr = read_aux_stat(regs); while ((asr & ASR_CIP) && timeout-- > 0) asr = read_aux_stat(regs); sr = read_wd33c93(regs, WD_SCSI_STATUS); printk("asr=%02x, sr=%02x.", asr, sr); hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); hostdata->connected = NULL; hostdata->state = S_UNCONNECTED; cmd->result = DID_ABORT << 16; /* sti();*/ wd33c93_execute(instance); enable_irq(cmd->device->host->irq); scsi_done(cmd); return SUCCESS; } /* * Case 3: If the command is currently disconnected from the bus, * we're not going to expend much effort here: Let's just return * an ABORT_SNOOZE and hope for the best... */ tmp = (struct scsi_cmnd *) hostdata->disconnected_Q; while (tmp) { if (tmp == cmd) { printk ("scsi%d: Abort - command found on disconnected_Q - ", instance->host_no); printk("Abort SNOOZE. "); enable_irq(cmd->device->host->irq); return FAILED; } tmp = (struct scsi_cmnd *) tmp->host_scribble; } /* * Case 4 : If we reached this point, the command was not found in any of * the queues. * * We probably reached this point because of an unlikely race condition * between the command completing successfully and the abortion code, * so we won't panic, but we will notify the user in case something really * broke. */ /* sti();*/ wd33c93_execute(instance); enable_irq(cmd->device->host->irq); printk("scsi%d: warning : SCSI command probably completed successfully" " before abortion. ", instance->host_no); return FAILED; } #define MAX_WD33C93_HOSTS 4 #define MAX_SETUP_ARGS ARRAY_SIZE(setup_args) #define SETUP_BUFFER_SIZE 200 static char setup_buffer[SETUP_BUFFER_SIZE]; static char setup_used[MAX_SETUP_ARGS]; static int done_setup = 0; static int wd33c93_setup(char *str) { int i; char *p1, *p2; /* The kernel does some processing of the command-line before calling * this function: If it begins with any decimal or hex number arguments, * ints[0] = how many numbers found and ints[1] through [n] are the values * themselves. str points to where the non-numeric arguments (if any) * start: We do our own parsing of those. We construct synthetic 'nosync' * keywords out of numeric args (to maintain compatibility with older * versions) and then add the rest of the arguments. */ p1 = setup_buffer; *p1 = '\0'; if (str) strncpy(p1, str, SETUP_BUFFER_SIZE - strlen(setup_buffer)); setup_buffer[SETUP_BUFFER_SIZE - 1] = '\0'; p1 = setup_buffer; i = 0; while (*p1 && (i < MAX_SETUP_ARGS)) { p2 = strchr(p1, ','); if (p2) { *p2 = '\0'; if (p1 != p2) setup_args[i] = p1; p1 = p2 + 1; i++; } else { setup_args[i] = p1; break; } } for (i = 0; i < MAX_SETUP_ARGS; i++) setup_used[i] = 0; done_setup = 1; return 1; } __setup("wd33c93=", wd33c93_setup); /* check_setup_args() returns index if key found, 0 if not */ static int check_setup_args(char *key, int *flags, int *val, char *buf) { int x; char *cp; for (x = 0; x < MAX_SETUP_ARGS; x++) { if (setup_used[x]) continue; if (!strncmp(setup_args[x], key, strlen(key))) break; if (!strncmp(setup_args[x], "next", strlen("next"))) return 0; } if (x == MAX_SETUP_ARGS) return 0; setup_used[x] = 1; cp = setup_args[x] + strlen(key); *val = -1; if (*cp != ':') return ++x; cp++; if ((*cp >= '0') && (*cp <= '9')) { *val = simple_strtoul(cp, NULL, 0); } return ++x; } /* * Calculate internal data-transfer-clock cycle from input-clock * frequency (/MHz) and fill 'sx_table'. * * The original driver used to rely on a fixed sx_table, containing periods * for (only) the lower limits of the respective input-clock-frequency ranges * (8-10/12-15/16-20 MHz). Although it seems, that no problems occurred with * this setting so far, it might be desirable to adjust the transfer periods * closer to the really attached, possibly 25% higher, input-clock, since * - the wd33c93 may really use a significant shorter period, than it has * negotiated (eg. thrashing the target, which expects 4/8MHz, with 5/10MHz * instead). * - the wd33c93 may ask the target for a lower transfer rate, than the target * is capable of (eg. negotiating for an assumed minimum of 252ns instead of * possible 200ns, which indeed shows up in tests as an approx. 10% lower * transfer rate). */ static inline unsigned int round_4(unsigned int x) { switch (x & 3) { case 1: --x; break; case 2: ++x; fallthrough; case 3: ++x; } return x; } static void calc_sx_table(unsigned int mhz, struct sx_period sx_table[9]) { unsigned int d, i; if (mhz < 11) d = 2; /* divisor for 8-10 MHz input-clock */ else if (mhz < 16) d = 3; /* divisor for 12-15 MHz input-clock */ else d = 4; /* divisor for 16-20 MHz input-clock */ d = (100000 * d) / 2 / mhz; /* 100 x DTCC / nanosec */ sx_table[0].period_ns = 1; sx_table[0].reg_value = 0x20; for (i = 1; i < 8; i++) { sx_table[i].period_ns = round_4((i+1)*d / 100); sx_table[i].reg_value = (i+1)*0x10; } sx_table[7].reg_value = 0; sx_table[8].period_ns = 0; sx_table[8].reg_value = 0; } /* * check and, maybe, map an init- or "clock:"- argument. */ static uchar set_clk_freq(int freq, int *mhz) { int x = freq; if (WD33C93_FS_8_10 == freq) freq = 8; else if (WD33C93_FS_12_15 == freq) freq = 12; else if (WD33C93_FS_16_20 == freq) freq = 16; else if (freq > 7 && freq < 11) x = WD33C93_FS_8_10; else if (freq > 11 && freq < 16) x = WD33C93_FS_12_15; else if (freq > 15 && freq < 21) x = WD33C93_FS_16_20; else { /* Hmm, wouldn't it be safer to assume highest freq here? */ x = WD33C93_FS_8_10; freq = 8; } *mhz = freq; return x; } /* * to be used with the resync: fast: ... options */ static inline void set_resync ( struct WD33C93_hostdata *hd, int mask ) { int i; for (i = 0; i < 8; i++) if (mask & (1 << i)) hd->sync_stat[i] = SS_UNSET; } void wd33c93_init(struct Scsi_Host *instance, const wd33c93_regs regs, dma_setup_t setup, dma_stop_t stop, int clock_freq) { struct WD33C93_hostdata *hostdata; int i; int flags; int val; char buf[32]; if (!done_setup && setup_strings) wd33c93_setup(setup_strings); hostdata = (struct WD33C93_hostdata *) instance->hostdata; hostdata->regs = regs; hostdata->clock_freq = set_clk_freq(clock_freq, &i); calc_sx_table(i, hostdata->sx_table); hostdata->dma_setup = setup; hostdata->dma_stop = stop; hostdata->dma_bounce_buffer = NULL; hostdata->dma_bounce_len = 0; for (i = 0; i < 8; i++) { hostdata->busy[i] = 0; hostdata->sync_xfer[i] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF, 0, hostdata->sx_table); hostdata->sync_stat[i] = SS_UNSET; /* using default sync values */ #ifdef PROC_STATISTICS hostdata->cmd_cnt[i] = 0; hostdata->disc_allowed_cnt[i] = 0; hostdata->disc_done_cnt[i] = 0; #endif } hostdata->input_Q = NULL; hostdata->selecting = NULL; hostdata->connected = NULL; hostdata->disconnected_Q = NULL; hostdata->state = S_UNCONNECTED; hostdata->dma = D_DMA_OFF; hostdata->level2 = L2_BASIC; hostdata->disconnect = DIS_ADAPTIVE; hostdata->args = DEBUG_DEFAULTS; hostdata->incoming_ptr = 0; hostdata->outgoing_len = 0; hostdata->default_sx_per = DEFAULT_SX_PER; hostdata->no_dma = 0; /* default is DMA enabled */ #ifdef PROC_INTERFACE hostdata->proc = PR_VERSION | PR_INFO | PR_STATISTICS | PR_CONNECTED | PR_INPUTQ | PR_DISCQ | PR_STOP; #ifdef PROC_STATISTICS hostdata->dma_cnt = 0; hostdata->pio_cnt = 0; hostdata->int_cnt = 0; #endif #endif if (check_setup_args("clock", &flags, &val, buf)) { hostdata->clock_freq = set_clk_freq(val, &val); calc_sx_table(val, hostdata->sx_table); } if (check_setup_args("nosync", &flags, &val, buf)) hostdata->no_sync = val; if (check_setup_args("nodma", &flags, &val, buf)) hostdata->no_dma = (val == -1) ? 1 : val; if (check_setup_args("period", &flags, &val, buf)) hostdata->default_sx_per = hostdata->sx_table[round_period((unsigned int) val, hostdata->sx_table)].period_ns; if (check_setup_args("disconnect", &flags, &val, buf)) { if ((val >= DIS_NEVER) && (val <= DIS_ALWAYS)) hostdata->disconnect = val; else hostdata->disconnect = DIS_ADAPTIVE; } if (check_setup_args("level2", &flags, &val, buf)) hostdata->level2 = val; if (check_setup_args("debug", &flags, &val, buf)) hostdata->args = val & DB_MASK; if (check_setup_args("burst", &flags, &val, buf)) hostdata->dma_mode = val ? CTRL_BURST:CTRL_DMA; if (WD33C93_FS_16_20 == hostdata->clock_freq /* divisor 4 */ && check_setup_args("fast", &flags, &val, buf)) hostdata->fast = !!val; if ((i = check_setup_args("next", &flags, &val, buf))) { while (i) setup_used[--i] = 1; } #ifdef PROC_INTERFACE if (check_setup_args("proc", &flags, &val, buf)) hostdata->proc = val; #endif spin_lock_irq(&hostdata->lock); reset_wd33c93(instance); spin_unlock_irq(&hostdata->lock); printk("wd33c93-%d: chip=%s/%d no_sync=0x%x no_dma=%d", instance->host_no, (hostdata->chip == C_WD33C93) ? "WD33c93" : (hostdata->chip == C_WD33C93A) ? "WD33c93A" : (hostdata->chip == C_WD33C93B) ? "WD33c93B" : "unknown", hostdata->microcode, hostdata->no_sync, hostdata->no_dma); #ifdef DEBUGGING_ON printk(" debug_flags=0x%02x\n", hostdata->args); #else printk(" debugging=OFF\n"); #endif printk(" setup_args="); for (i = 0; i < MAX_SETUP_ARGS; i++) printk("%s,", setup_args[i]); printk("\n"); printk(" Version %s - %s\n", WD33C93_VERSION, WD33C93_DATE); } int wd33c93_write_info(struct Scsi_Host *instance, char *buf, int len) { #ifdef PROC_INTERFACE char *bp; struct WD33C93_hostdata *hd; int x; hd = (struct WD33C93_hostdata *) instance->hostdata; /* We accept the following * keywords (same format as command-line, but arguments are not optional): * debug * disconnect * period * resync * proc * nodma * level2 * burst * fast * nosync */ buf[len] = '\0'; for (bp = buf; *bp; ) { while (',' == *bp || ' ' == *bp) ++bp; if (!strncmp(bp, "debug:", 6)) { hd->args = simple_strtoul(bp+6, &bp, 0) & DB_MASK; } else if (!strncmp(bp, "disconnect:", 11)) { x = simple_strtoul(bp+11, &bp, 0); if (x < DIS_NEVER || x > DIS_ALWAYS) x = DIS_ADAPTIVE; hd->disconnect = x; } else if (!strncmp(bp, "period:", 7)) { x = simple_strtoul(bp+7, &bp, 0); hd->default_sx_per = hd->sx_table[round_period((unsigned int) x, hd->sx_table)].period_ns; } else if (!strncmp(bp, "resync:", 7)) { set_resync(hd, (int)simple_strtoul(bp+7, &bp, 0)); } else if (!strncmp(bp, "proc:", 5)) { hd->proc = simple_strtoul(bp+5, &bp, 0); } else if (!strncmp(bp, "nodma:", 6)) { hd->no_dma = simple_strtoul(bp+6, &bp, 0); } else if (!strncmp(bp, "level2:", 7)) { hd->level2 = simple_strtoul(bp+7, &bp, 0); } else if (!strncmp(bp, "burst:", 6)) { hd->dma_mode = simple_strtol(bp+6, &bp, 0) ? CTRL_BURST:CTRL_DMA; } else if (!strncmp(bp, "fast:", 5)) { x = !!simple_strtol(bp+5, &bp, 0); if (x != hd->fast) set_resync(hd, 0xff); hd->fast = x; } else if (!strncmp(bp, "nosync:", 7)) { x = simple_strtoul(bp+7, &bp, 0); set_resync(hd, x ^ hd->no_sync); hd->no_sync = x; } else { break; /* unknown keyword,syntax-error,... */ } } return len; #else return 0; #endif } int wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance) { #ifdef PROC_INTERFACE struct WD33C93_hostdata *hd; struct scsi_cmnd *cmd; int x; hd = (struct WD33C93_hostdata *) instance->hostdata; spin_lock_irq(&hd->lock); if (hd->proc & PR_VERSION) seq_printf(m, "\nVersion %s - %s.", WD33C93_VERSION, WD33C93_DATE); if (hd->proc & PR_INFO) { seq_printf(m, "\nclock_freq=%02x no_sync=%02x no_dma=%d" " dma_mode=%02x fast=%d", hd->clock_freq, hd->no_sync, hd->no_dma, hd->dma_mode, hd->fast); seq_puts(m, "\nsync_xfer[] = "); for (x = 0; x < 7; x++) seq_printf(m, "\t%02x", hd->sync_xfer[x]); seq_puts(m, "\nsync_stat[] = "); for (x = 0; x < 7; x++) seq_printf(m, "\t%02x", hd->sync_stat[x]); } #ifdef PROC_STATISTICS if (hd->proc & PR_STATISTICS) { seq_puts(m, "\ncommands issued: "); for (x = 0; x < 7; x++) seq_printf(m, "\t%ld", hd->cmd_cnt[x]); seq_puts(m, "\ndisconnects allowed:"); for (x = 0; x < 7; x++) seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]); seq_puts(m, "\ndisconnects done: "); for (x = 0; x < 7; x++) seq_printf(m, "\t%ld", hd->disc_done_cnt[x]); seq_printf(m, "\ninterrupts: %ld, DATA_PHASE ints: %ld DMA, %ld PIO", hd->int_cnt, hd->dma_cnt, hd->pio_cnt); } #endif if (hd->proc & PR_CONNECTED) { seq_puts(m, "\nconnected: "); if (hd->connected) { cmd = (struct scsi_cmnd *) hd->connected; seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); } } if (hd->proc & PR_INPUTQ) { seq_puts(m, "\ninput_Q: "); cmd = (struct scsi_cmnd *) hd->input_Q; while (cmd) { seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); cmd = (struct scsi_cmnd *) cmd->host_scribble; } } if (hd->proc & PR_DISCQ) { seq_puts(m, "\ndisconnected_Q:"); cmd = (struct scsi_cmnd *) hd->disconnected_Q; while (cmd) { seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); cmd = (struct scsi_cmnd *) cmd->host_scribble; } } seq_putc(m, '\n'); spin_unlock_irq(&hd->lock); #endif /* PROC_INTERFACE */ return 0; } EXPORT_SYMBOL(wd33c93_host_reset); EXPORT_SYMBOL(wd33c93_init); EXPORT_SYMBOL(wd33c93_abort); EXPORT_SYMBOL(wd33c93_queuecommand); EXPORT_SYMBOL(wd33c93_intr); EXPORT_SYMBOL(wd33c93_show_info); EXPORT_SYMBOL(wd33c93_write_info);
linux-master
drivers/scsi/wd33c93.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/mm.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <asm/page.h> #include <asm/mvme147hw.h> #include <asm/irq.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include "wd33c93.h" #include "mvme147.h" static irqreturn_t mvme147_intr(int irq, void *data) { struct Scsi_Host *instance = data; if (irq == MVME147_IRQ_SCSI_PORT) wd33c93_intr(instance); else m147_pcc->dma_intr = 0x89; /* Ack and enable ints */ return IRQ_HANDLED; } static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd); struct Scsi_Host *instance = cmd->device->host; struct WD33C93_hostdata *hdata = shost_priv(instance); unsigned char flags = 0x01; unsigned long addr = virt_to_bus(scsi_pointer->ptr); /* setup dma direction */ if (!dir_in) flags |= 0x04; /* remember direction */ hdata->dma_dir = dir_in; if (dir_in) { /* invalidate any cache */ cache_clear(addr, scsi_pointer->this_residual); } else { /* push any dirty cache */ cache_push(addr, scsi_pointer->this_residual); } /* start DMA */ m147_pcc->dma_bcr = scsi_pointer->this_residual | (1 << 24); m147_pcc->dma_dadr = addr; m147_pcc->dma_cntrl = flags; /* return success */ return 0; } static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, int status) { m147_pcc->dma_cntrl = 0; } static const struct scsi_host_template mvme147_host_template = { .module = THIS_MODULE, .proc_name = "MVME147", .name = "MVME147 built-in SCSI", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, .eh_host_reset_handler = wd33c93_host_reset, .show_info = wd33c93_show_info, .write_info = wd33c93_write_info, .can_queue = CAN_QUEUE, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = CMD_PER_LUN, .cmd_size = sizeof(struct scsi_pointer), }; static struct Scsi_Host *mvme147_shost; static int __init mvme147_init(void) { wd33c93_regs regs; struct WD33C93_hostdata *hdata; int error = -ENOMEM; if (!MACH_IS_MVME147) return 0; mvme147_shost = scsi_host_alloc(&mvme147_host_template, sizeof(struct WD33C93_hostdata)); if (!mvme147_shost) goto err_out; mvme147_shost->base = 0xfffe4000; mvme147_shost->irq = MVME147_IRQ_SCSI_PORT; regs.SASR = (volatile unsigned char *)0xfffe4000; regs.SCMD = (volatile unsigned char *)0xfffe4001; hdata = shost_priv(mvme147_shost); hdata->no_sync = 0xff; hdata->fast = 0; hdata->dma_mode = CTRL_DMA; wd33c93_init(mvme147_shost, regs, dma_setup, dma_stop, WD33C93_FS_8_10); error = request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0, "MVME147 SCSI PORT", mvme147_shost); if (error) goto err_unregister; error = request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0, "MVME147 SCSI DMA", mvme147_shost); if (error) goto err_free_irq; #if 0 /* Disabled; causes problems booting */ m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */ udelay(100); m147_pcc->scsi_interrupt = 0x00; /* Negate SCSI bus reset */ udelay(2000); m147_pcc->scsi_interrupt = 0x40; /* Clear bus reset interrupt */ #endif m147_pcc->scsi_interrupt = 0x09; /* Enable interrupt */ m147_pcc->dma_cntrl = 0x00; /* ensure DMA is stopped */ m147_pcc->dma_intr = 0x89; /* Ack and enable ints */ error = scsi_add_host(mvme147_shost, NULL); if (error) goto err_free_irq; scsi_scan_host(mvme147_shost); return 0; err_free_irq: free_irq(MVME147_IRQ_SCSI_PORT, mvme147_shost); err_unregister: scsi_host_put(mvme147_shost); err_out: return error; } static void __exit mvme147_exit(void) { scsi_remove_host(mvme147_shost); /* XXX Make sure DMA is stopped! */ free_irq(MVME147_IRQ_SCSI_PORT, mvme147_shost); free_irq(MVME147_IRQ_SCSI_DMA, mvme147_shost); scsi_host_put(mvme147_shost); } module_init(mvme147_init); module_exit(mvme147_exit);
linux-master
drivers/scsi/mvme147.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * sim710.c - Copyright (C) 1999 Richard Hirst <[email protected]> * *---------------------------------------------------------------------------- *---------------------------------------------------------------------------- * * MCA card detection code by Trent McNair. (now deleted) * Fixes to not explicitly nul bss data from Xavier Bestel. * Some multiboard fixes from Rolf Eike Beer. * Auto probing of EISA config space from Trevor Hemsley. * * Rewritten to use 53c700.c by [email protected] */ #include <linux/module.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/device.h> #include <linux/init.h> #include <linux/eisa.h> #include <linux/interrupt.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_spi.h> #include "53c700.h" /* Must be enough for EISA */ #define MAX_SLOTS 8 static __u8 __initdata id_array[MAX_SLOTS] = { [0 ... MAX_SLOTS-1] = 7 }; static char *sim710; /* command line passed by insmod */ MODULE_AUTHOR("Richard Hirst"); MODULE_DESCRIPTION("Simple NCR53C710 driver"); MODULE_LICENSE("GPL"); module_param(sim710, charp, 0); #ifdef MODULE #define ARG_SEP ' ' #else #define ARG_SEP ',' #endif static __init int param_setup(char *str) { char *pos = str, *next; int slot = -1; while(pos != NULL && (next = strchr(pos, ':')) != NULL) { int val = (int)simple_strtoul(++next, NULL, 0); if(!strncmp(pos, "slot:", 5)) slot = val; else if(!strncmp(pos, "id:", 3)) { if(slot == -1) { printk(KERN_WARNING "sim710: Must specify slot for id parameter\n"); } else if(slot >= MAX_SLOTS) { printk(KERN_WARNING "sim710: Illegal slot %d for id %d\n", slot, val); } else { id_array[slot] = val; } } if((pos = strchr(pos, ARG_SEP)) != NULL) pos++; } return 1; } __setup("sim710=", param_setup); static struct scsi_host_template sim710_driver_template = { .name = "LSI (Symbios) 710 EISA", .proc_name = "sim710", .this_id = 7, .module = THIS_MODULE, }; static int sim710_probe_common(struct device *dev, unsigned long base_addr, int irq, int clock, int differential, int scsi_id) { struct Scsi_Host * host = NULL; struct NCR_700_Host_Parameters *hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL); printk(KERN_NOTICE "sim710: %s\n", dev_name(dev)); printk(KERN_NOTICE "sim710: irq = %d, clock = %d, base = 0x%lx, scsi_id = %d\n", irq, clock, base_addr, scsi_id); if(hostdata == NULL) { printk(KERN_ERR "sim710: Failed to allocate host data\n"); goto out; } if(request_region(base_addr, 64, "sim710") == NULL) { printk(KERN_ERR "sim710: Failed to reserve IO region 0x%lx\n", base_addr); goto out_free; } /* Fill in the three required pieces of hostdata */ hostdata->base = ioport_map(base_addr, 64); hostdata->differential = differential; hostdata->clock = clock; hostdata->chip710 = 1; hostdata->burst_length = 8; /* and register the chip */ if((host = NCR_700_detect(&sim710_driver_template, hostdata, dev)) == NULL) { printk(KERN_ERR "sim710: No host detected; card configuration problem?\n"); goto out_release; } host->this_id = scsi_id; host->base = base_addr; host->irq = irq; if (request_irq(irq, NCR_700_intr, IRQF_SHARED, "sim710", host)) { printk(KERN_ERR "sim710: request_irq failed\n"); goto out_put_host; } dev_set_drvdata(dev, host); scsi_scan_host(host); return 0; out_put_host: scsi_host_put(host); out_release: release_region(base_addr, 64); out_free: kfree(hostdata); out: return -ENODEV; } static int sim710_device_remove(struct device *dev) { struct Scsi_Host *host = dev_get_drvdata(dev); struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)host->hostdata[0]; scsi_remove_host(host); NCR_700_release(host); kfree(hostdata); free_irq(host->irq, host); release_region(host->base, 64); return 0; } #ifdef CONFIG_EISA static struct eisa_device_id sim710_eisa_ids[] = { { "CPQ4410" }, { "CPQ4411" }, { "HWP0C80" }, { "" } }; MODULE_DEVICE_TABLE(eisa, sim710_eisa_ids); static int sim710_eisa_probe(struct device *dev) { struct eisa_device *edev = to_eisa_device(dev); unsigned long io_addr = edev->base_addr; char eisa_cpq_irqs[] = { 11, 14, 15, 10, 9, 0 }; char eisa_hwp_irqs[] = { 3, 4, 5, 7, 12, 10, 11, 0}; char *eisa_irqs; unsigned char irq_index; unsigned char irq, differential = 0, scsi_id = 7; if(strcmp(edev->id.sig, "HWP0C80") == 0) { __u8 val; eisa_irqs = eisa_hwp_irqs; irq_index = (inb(io_addr + 0xc85) & 0x7) - 1; val = inb(io_addr + 0x4); scsi_id = ffs(val) - 1; if(scsi_id > 7 || (val & ~(1<<scsi_id)) != 0) { printk(KERN_ERR "sim710.c, EISA card %s has incorrect scsi_id, setting to 7\n", dev_name(dev)); scsi_id = 7; } } else { eisa_irqs = eisa_cpq_irqs; irq_index = inb(io_addr + 0xc88) & 0x07; } if(irq_index >= strlen(eisa_irqs)) { printk("sim710.c: irq nasty\n"); return -ENODEV; } irq = eisa_irqs[irq_index]; return sim710_probe_common(dev, io_addr, irq, 50, differential, scsi_id); } static struct eisa_driver sim710_eisa_driver = { .id_table = sim710_eisa_ids, .driver = { .name = "sim710", .probe = sim710_eisa_probe, .remove = sim710_device_remove, }, }; #endif /* CONFIG_EISA */ static int __init sim710_init(void) { #ifdef MODULE if (sim710) param_setup(sim710); #endif #ifdef CONFIG_EISA /* * FIXME: We'd really like to return -ENODEV if no devices have actually * been found. However eisa_driver_register() only reports problems * with kobject_register() so simply return success for now. */ eisa_driver_register(&sim710_eisa_driver); #endif return 0; } static void __exit sim710_exit(void) { #ifdef CONFIG_EISA eisa_driver_unregister(&sim710_eisa_driver); #endif } module_init(sim710_init); module_exit(sim710_exit);
linux-master
drivers/scsi/sim710.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/bsg.h> #include <scsi/scsi.h> #include <scsi/scsi_ioctl.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/sg.h> #include "scsi_priv.h" #define uptr64(val) ((void __user *)(uintptr_t)(val)) static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr, bool open_for_write, unsigned int timeout) { struct scsi_cmnd *scmd; struct request *rq; struct bio *bio; int ret; if (hdr->protocol != BSG_PROTOCOL_SCSI || hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD) return -EINVAL; if (hdr->dout_xfer_len && hdr->din_xfer_len) { pr_warn_once("BIDI support in bsg has been removed.\n"); return -EOPNOTSUPP; } rq = scsi_alloc_request(q, hdr->dout_xfer_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); if (IS_ERR(rq)) return PTR_ERR(rq); rq->timeout = timeout; scmd = blk_mq_rq_to_pdu(rq); scmd->cmd_len = hdr->request_len; if (scmd->cmd_len > sizeof(scmd->cmnd)) { ret = -EINVAL; goto out_put_request; } ret = -EFAULT; if (copy_from_user(scmd->cmnd, uptr64(hdr->request), scmd->cmd_len)) goto out_put_request; ret = -EPERM; if (!scsi_cmd_allowed(scmd->cmnd, open_for_write)) goto out_put_request; ret = 0; if (hdr->dout_xfer_len) { ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp), hdr->dout_xfer_len, GFP_KERNEL); } else if (hdr->din_xfer_len) { ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp), hdr->din_xfer_len, GFP_KERNEL); } if (ret) goto out_put_request; bio = rq->bio; blk_execute_rq(rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL)); /* * fill in all the output members */ hdr->device_status = scmd->result & 0xff; hdr->transport_status = host_byte(scmd->result); hdr->driver_status = 0; if (scsi_status_is_check_condition(scmd->result)) hdr->driver_status = DRIVER_SENSE; hdr->info = 0; if (hdr->device_status || hdr->transport_status || hdr->driver_status) hdr->info |= SG_INFO_CHECK; hdr->response_len = 0; if (scmd->sense_len && hdr->response) { int len = min_t(unsigned int, hdr->max_response_len, scmd->sense_len); if (copy_to_user(uptr64(hdr->response), scmd->sense_buffer, len)) ret = -EFAULT; else hdr->response_len = len; } if (rq_data_dir(rq) == READ) hdr->din_resid = scmd->resid_len; else hdr->dout_resid = scmd->resid_len; blk_rq_unmap_user(bio); out_put_request: blk_mq_free_request(rq); return ret; } struct bsg_device *scsi_bsg_register_queue(struct scsi_device *sdev) { return bsg_register_queue(sdev->request_queue, &sdev->sdev_gendev, dev_name(&sdev->sdev_gendev), scsi_bsg_sg_io_fn); }
linux-master
drivers/scsi/scsi_bsg.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2005-2006 Dell Inc. * * Serial Attached SCSI (SAS) transport class. * * The SAS transport class contains common code to deal with SAS HBAs, * an aproximated representation of SAS topologies in the driver model, * and various sysfs attributes to expose these topologies and management * interfaces to userspace. * * In addition to the basic SCSI core objects this transport class * introduces two additional intermediate objects: The SAS PHY * as represented by struct sas_phy defines an "outgoing" PHY on * a SAS HBA or Expander, and the SAS remote PHY represented by * struct sas_rphy defines an "incoming" PHY on a SAS Expander or * end device. Note that this is purely a software concept, the * underlying hardware for a PHY and a remote PHY is the exactly * the same. * * There is no concept of a SAS port in this code, users can see * what PHYs form a wide port based on the port_identifier attribute, * which is the same for all PHYs in a port. */ #include <linux/init.h> #include <linux/module.h> #include <linux/jiffies.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/blkdev.h> #include <linux/bsg.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> #include "scsi_sas_internal.h" struct sas_host_attrs { struct list_head rphy_list; struct mutex lock; struct request_queue *q; u32 next_target_id; u32 next_expander_id; int next_port_id; }; #define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data) /* * Hack to allow attributes of the same name in different objects. */ #define SAS_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \ struct device_attribute dev_attr_##_prefix##_##_name = \ __ATTR(_name,_mode,_show,_store) /* * Pretty printing helpers */ #define sas_bitfield_name_match(title, table) \ static ssize_t \ get_sas_##title##_names(u32 table_key, char *buf) \ { \ char *prefix = ""; \ ssize_t len = 0; \ int i; \ \ for (i = 0; i < ARRAY_SIZE(table); i++) { \ if (table[i].value & table_key) { \ len += sprintf(buf + len, "%s%s", \ prefix, table[i].name); \ prefix = ", "; \ } \ } \ len += sprintf(buf + len, "\n"); \ return len; \ } #define sas_bitfield_name_set(title, table) \ static ssize_t \ set_sas_##title##_names(u32 *table_key, const char *buf) \ { \ ssize_t len = 0; \ int i; \ \ for (i = 0; i < ARRAY_SIZE(table); i++) { \ len = strlen(table[i].name); \ if (strncmp(buf, table[i].name, len) == 0 && \ (buf[len] == '\n' || buf[len] == '\0')) { \ *table_key = table[i].value; \ return 0; \ } \ } \ return -EINVAL; \ } #define sas_bitfield_name_search(title, table) \ static ssize_t \ get_sas_##title##_names(u32 table_key, char *buf) \ { \ ssize_t len = 0; \ int i; \ \ for (i = 0; i < ARRAY_SIZE(table); i++) { \ if (table[i].value == table_key) { \ len += sprintf(buf + len, "%s", \ table[i].name); \ break; \ } \ } \ len += sprintf(buf + len, "\n"); \ return len; \ } static struct { u32 value; char *name; } sas_device_type_names[] = { { SAS_PHY_UNUSED, "unused" }, { SAS_END_DEVICE, "end device" }, { SAS_EDGE_EXPANDER_DEVICE, "edge expander" }, { SAS_FANOUT_EXPANDER_DEVICE, "fanout expander" }, }; sas_bitfield_name_search(device_type, sas_device_type_names) static struct { u32 value; char *name; } sas_protocol_names[] = { { SAS_PROTOCOL_SATA, "sata" }, { SAS_PROTOCOL_SMP, "smp" }, { SAS_PROTOCOL_STP, "stp" }, { SAS_PROTOCOL_SSP, "ssp" }, }; sas_bitfield_name_match(protocol, sas_protocol_names) static struct { u32 value; char *name; } sas_linkspeed_names[] = { { SAS_LINK_RATE_UNKNOWN, "Unknown" }, { SAS_PHY_DISABLED, "Phy disabled" }, { SAS_LINK_RATE_FAILED, "Link Rate failed" }, { SAS_SATA_SPINUP_HOLD, "Spin-up hold" }, { SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" }, { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" }, { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" }, { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" }, { SAS_LINK_RATE_22_5_GBPS, "22.5 Gbit" }, }; sas_bitfield_name_search(linkspeed, sas_linkspeed_names) sas_bitfield_name_set(linkspeed, sas_linkspeed_names) static struct sas_end_device *sas_sdev_to_rdev(struct scsi_device *sdev) { struct sas_rphy *rphy = target_to_rphy(sdev->sdev_target); struct sas_end_device *rdev; BUG_ON(rphy->identify.device_type != SAS_END_DEVICE); rdev = rphy_to_end_device(rphy); return rdev; } static int sas_smp_dispatch(struct bsg_job *job) { struct Scsi_Host *shost = dev_to_shost(job->dev); struct sas_rphy *rphy = NULL; if (!scsi_is_host_device(job->dev)) rphy = dev_to_rphy(job->dev); if (!job->reply_payload.payload_len) { dev_warn(job->dev, "space for a smp response is missing\n"); bsg_job_done(job, -EINVAL, 0); return 0; } to_sas_internal(shost->transportt)->f->smp_handler(job, shost, rphy); return 0; } static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy) { struct request_queue *q; if (!to_sas_internal(shost->transportt)->f->smp_handler) { printk("%s can't handle SMP requests\n", shost->hostt->name); return 0; } if (rphy) { q = bsg_setup_queue(&rphy->dev, dev_name(&rphy->dev), sas_smp_dispatch, NULL, 0); if (IS_ERR(q)) return PTR_ERR(q); rphy->q = q; } else { char name[20]; snprintf(name, sizeof(name), "sas_host%d", shost->host_no); q = bsg_setup_queue(&shost->shost_gendev, name, sas_smp_dispatch, NULL, 0); if (IS_ERR(q)) return PTR_ERR(q); to_sas_host_attrs(shost)->q = q; } return 0; } /* * SAS host attributes */ static int sas_host_setup(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); struct device *dma_dev = shost->dma_dev; INIT_LIST_HEAD(&sas_host->rphy_list); mutex_init(&sas_host->lock); sas_host->next_target_id = 0; sas_host->next_expander_id = 0; sas_host->next_port_id = 0; if (sas_bsg_initialize(shost, NULL)) dev_printk(KERN_ERR, dev, "fail to a bsg device %d\n", shost->host_no); if (dma_dev->dma_mask) { shost->opt_sectors = min_t(unsigned int, shost->max_sectors, dma_opt_mapping_size(dma_dev) >> SECTOR_SHIFT); } return 0; } static int sas_host_remove(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); struct request_queue *q = to_sas_host_attrs(shost)->q; bsg_remove_queue(q); return 0; } static DECLARE_TRANSPORT_CLASS(sas_host_class, "sas_host", sas_host_setup, sas_host_remove, NULL); static int sas_host_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct sas_internal *i; if (!scsi_is_host_device(dev)) return 0; shost = dev_to_shost(dev); if (!shost->transportt) return 0; if (shost->transportt->host_attrs.ac.class != &sas_host_class.class) return 0; i = to_sas_internal(shost->transportt); return &i->t.host_attrs.ac == cont; } static int do_sas_phy_delete(struct device *dev, void *data) { int pass = (int)(unsigned long)data; if (pass == 0 && scsi_is_sas_port(dev)) sas_port_delete(dev_to_sas_port(dev)); else if (pass == 1 && scsi_is_sas_phy(dev)) sas_phy_delete(dev_to_phy(dev)); return 0; } /** * sas_remove_children - tear down a devices SAS data structures * @dev: device belonging to the sas object * * Removes all SAS PHYs and remote PHYs for a given object */ void sas_remove_children(struct device *dev) { device_for_each_child(dev, (void *)0, do_sas_phy_delete); device_for_each_child(dev, (void *)1, do_sas_phy_delete); } EXPORT_SYMBOL(sas_remove_children); /** * sas_remove_host - tear down a Scsi_Host's SAS data structures * @shost: Scsi Host that is torn down * * Removes all SAS PHYs and remote PHYs for a given Scsi_Host and remove the * Scsi_Host as well. * * Note: Do not call scsi_remove_host() on the Scsi_Host any more, as it is * already removed. */ void sas_remove_host(struct Scsi_Host *shost) { sas_remove_children(&shost->shost_gendev); scsi_remove_host(shost); } EXPORT_SYMBOL(sas_remove_host); /** * sas_get_address - return the SAS address of the device * @sdev: scsi device * * Returns the SAS address of the scsi device */ u64 sas_get_address(struct scsi_device *sdev) { struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); return rdev->rphy.identify.sas_address; } EXPORT_SYMBOL(sas_get_address); /** * sas_tlr_supported - checking TLR bit in vpd 0x90 * @sdev: scsi device struct * * Check Transport Layer Retries are supported or not. * If vpd page 0x90 is present, TRL is supported. * */ unsigned int sas_tlr_supported(struct scsi_device *sdev) { const int vpd_len = 32; struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); char *buffer = kzalloc(vpd_len, GFP_KERNEL); int ret = 0; if (!buffer) goto out; if (scsi_get_vpd_page(sdev, 0x90, buffer, vpd_len)) goto out; /* * Magic numbers: the VPD Protocol page (0x90) * has a 4 byte header and then one entry per device port * the TLR bit is at offset 8 on each port entry * if we take the first port, that's at total offset 12 */ ret = buffer[12] & 0x01; out: kfree(buffer); rdev->tlr_supported = ret; return ret; } EXPORT_SYMBOL_GPL(sas_tlr_supported); /** * sas_disable_tlr - setting TLR flags * @sdev: scsi device struct * * Seting tlr_enabled flag to 0. * */ void sas_disable_tlr(struct scsi_device *sdev) { struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); rdev->tlr_enabled = 0; } EXPORT_SYMBOL_GPL(sas_disable_tlr); /** * sas_enable_tlr - setting TLR flags * @sdev: scsi device struct * * Seting tlr_enabled flag 1. * */ void sas_enable_tlr(struct scsi_device *sdev) { unsigned int tlr_supported = 0; tlr_supported = sas_tlr_supported(sdev); if (tlr_supported) { struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); rdev->tlr_enabled = 1; } return; } EXPORT_SYMBOL_GPL(sas_enable_tlr); unsigned int sas_is_tlr_enabled(struct scsi_device *sdev) { struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); return rdev->tlr_enabled; } EXPORT_SYMBOL_GPL(sas_is_tlr_enabled); /* * SAS Phy attributes */ #define sas_phy_show_simple(field, name, format_string, cast) \ static ssize_t \ show_sas_phy_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct sas_phy *phy = transport_class_to_phy(dev); \ \ return snprintf(buf, 20, format_string, cast phy->field); \ } #define sas_phy_simple_attr(field, name, format_string, type) \ sas_phy_show_simple(field, name, format_string, (type)) \ static DEVICE_ATTR(name, S_IRUGO, show_sas_phy_##name, NULL) #define sas_phy_show_protocol(field, name) \ static ssize_t \ show_sas_phy_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct sas_phy *phy = transport_class_to_phy(dev); \ \ if (!phy->field) \ return snprintf(buf, 20, "none\n"); \ return get_sas_protocol_names(phy->field, buf); \ } #define sas_phy_protocol_attr(field, name) \ sas_phy_show_protocol(field, name) \ static DEVICE_ATTR(name, S_IRUGO, show_sas_phy_##name, NULL) #define sas_phy_show_linkspeed(field) \ static ssize_t \ show_sas_phy_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct sas_phy *phy = transport_class_to_phy(dev); \ \ return get_sas_linkspeed_names(phy->field, buf); \ } /* Fudge to tell if we're minimum or maximum */ #define sas_phy_store_linkspeed(field) \ static ssize_t \ store_sas_phy_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct sas_phy *phy = transport_class_to_phy(dev); \ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); \ struct sas_internal *i = to_sas_internal(shost->transportt); \ u32 value; \ struct sas_phy_linkrates rates = {0}; \ int error; \ \ error = set_sas_linkspeed_names(&value, buf); \ if (error) \ return error; \ rates.field = value; \ error = i->f->set_phy_speed(phy, &rates); \ \ return error ? error : count; \ } #define sas_phy_linkspeed_rw_attr(field) \ sas_phy_show_linkspeed(field) \ sas_phy_store_linkspeed(field) \ static DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, \ store_sas_phy_##field) #define sas_phy_linkspeed_attr(field) \ sas_phy_show_linkspeed(field) \ static DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, NULL) #define sas_phy_show_linkerror(field) \ static ssize_t \ show_sas_phy_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct sas_phy *phy = transport_class_to_phy(dev); \ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); \ struct sas_internal *i = to_sas_internal(shost->transportt); \ int error; \ \ error = i->f->get_linkerrors ? i->f->get_linkerrors(phy) : 0; \ if (error) \ return error; \ return snprintf(buf, 20, "%u\n", phy->field); \ } #define sas_phy_linkerror_attr(field) \ sas_phy_show_linkerror(field) \ static DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, NULL) static ssize_t show_sas_device_type(struct device *dev, struct device_attribute *attr, char *buf) { struct sas_phy *phy = transport_class_to_phy(dev); if (!phy->identify.device_type) return snprintf(buf, 20, "none\n"); return get_sas_device_type_names(phy->identify.device_type, buf); } static DEVICE_ATTR(device_type, S_IRUGO, show_sas_device_type, NULL); static ssize_t do_sas_phy_enable(struct device *dev, size_t count, int enable) { struct sas_phy *phy = transport_class_to_phy(dev); struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); struct sas_internal *i = to_sas_internal(shost->transportt); int error; error = i->f->phy_enable(phy, enable); if (error) return error; phy->enabled = enable; return count; }; static ssize_t store_sas_phy_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { if (count < 1) return -EINVAL; switch (buf[0]) { case '0': do_sas_phy_enable(dev, count, 0); break; case '1': do_sas_phy_enable(dev, count, 1); break; default: return -EINVAL; } return count; } static ssize_t show_sas_phy_enable(struct device *dev, struct device_attribute *attr, char *buf) { struct sas_phy *phy = transport_class_to_phy(dev); return snprintf(buf, 20, "%d\n", phy->enabled); } static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, show_sas_phy_enable, store_sas_phy_enable); static ssize_t do_sas_phy_reset(struct device *dev, size_t count, int hard_reset) { struct sas_phy *phy = transport_class_to_phy(dev); struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); struct sas_internal *i = to_sas_internal(shost->transportt); int error; error = i->f->phy_reset(phy, hard_reset); if (error) return error; phy->enabled = 1; return count; }; static ssize_t store_sas_link_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return do_sas_phy_reset(dev, count, 0); } static DEVICE_ATTR(link_reset, S_IWUSR, NULL, store_sas_link_reset); static ssize_t store_sas_hard_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return do_sas_phy_reset(dev, count, 1); } static DEVICE_ATTR(hard_reset, S_IWUSR, NULL, store_sas_hard_reset); sas_phy_protocol_attr(identify.initiator_port_protocols, initiator_port_protocols); sas_phy_protocol_attr(identify.target_port_protocols, target_port_protocols); sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", unsigned long long); sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); sas_phy_linkspeed_attr(negotiated_linkrate); sas_phy_linkspeed_attr(minimum_linkrate_hw); sas_phy_linkspeed_rw_attr(minimum_linkrate); sas_phy_linkspeed_attr(maximum_linkrate_hw); sas_phy_linkspeed_rw_attr(maximum_linkrate); sas_phy_linkerror_attr(invalid_dword_count); sas_phy_linkerror_attr(running_disparity_error_count); sas_phy_linkerror_attr(loss_of_dword_sync_count); sas_phy_linkerror_attr(phy_reset_problem_count); static int sas_phy_setup(struct transport_container *tc, struct device *dev, struct device *cdev) { struct sas_phy *phy = dev_to_phy(dev); struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); struct sas_internal *i = to_sas_internal(shost->transportt); if (i->f->phy_setup) i->f->phy_setup(phy); return 0; } static DECLARE_TRANSPORT_CLASS(sas_phy_class, "sas_phy", sas_phy_setup, NULL, NULL); static int sas_phy_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct sas_internal *i; if (!scsi_is_sas_phy(dev)) return 0; shost = dev_to_shost(dev->parent); if (!shost->transportt) return 0; if (shost->transportt->host_attrs.ac.class != &sas_host_class.class) return 0; i = to_sas_internal(shost->transportt); return &i->phy_attr_cont.ac == cont; } static void sas_phy_release(struct device *dev) { struct sas_phy *phy = dev_to_phy(dev); struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); struct sas_internal *i = to_sas_internal(shost->transportt); if (i->f->phy_release) i->f->phy_release(phy); put_device(dev->parent); kfree(phy); } /** * sas_phy_alloc - allocates and initialize a SAS PHY structure * @parent: Parent device * @number: Phy index * * Allocates an SAS PHY structure. It will be added in the device tree * below the device specified by @parent, which has to be either a Scsi_Host * or sas_rphy. * * Returns: * SAS PHY allocated or %NULL if the allocation failed. */ struct sas_phy *sas_phy_alloc(struct device *parent, int number) { struct Scsi_Host *shost = dev_to_shost(parent); struct sas_phy *phy; phy = kzalloc(sizeof(*phy), GFP_KERNEL); if (!phy) return NULL; phy->number = number; phy->enabled = 1; device_initialize(&phy->dev); phy->dev.parent = get_device(parent); phy->dev.release = sas_phy_release; INIT_LIST_HEAD(&phy->port_siblings); if (scsi_is_sas_expander_device(parent)) { struct sas_rphy *rphy = dev_to_rphy(parent); dev_set_name(&phy->dev, "phy-%d:%d:%d", shost->host_no, rphy->scsi_target_id, number); } else dev_set_name(&phy->dev, "phy-%d:%d", shost->host_no, number); transport_setup_device(&phy->dev); return phy; } EXPORT_SYMBOL(sas_phy_alloc); /** * sas_phy_add - add a SAS PHY to the device hierarchy * @phy: The PHY to be added * * Publishes a SAS PHY to the rest of the system. */ int sas_phy_add(struct sas_phy *phy) { int error; error = device_add(&phy->dev); if (error) return error; error = transport_add_device(&phy->dev); if (error) { device_del(&phy->dev); return error; } transport_configure_device(&phy->dev); return 0; } EXPORT_SYMBOL(sas_phy_add); /** * sas_phy_free - free a SAS PHY * @phy: SAS PHY to free * * Frees the specified SAS PHY. * * Note: * This function must only be called on a PHY that has not * successfully been added using sas_phy_add(). */ void sas_phy_free(struct sas_phy *phy) { transport_destroy_device(&phy->dev); put_device(&phy->dev); } EXPORT_SYMBOL(sas_phy_free); /** * sas_phy_delete - remove SAS PHY * @phy: SAS PHY to remove * * Removes the specified SAS PHY. If the SAS PHY has an * associated remote PHY it is removed before. */ void sas_phy_delete(struct sas_phy *phy) { struct device *dev = &phy->dev; /* this happens if the phy is still part of a port when deleted */ BUG_ON(!list_empty(&phy->port_siblings)); transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); put_device(dev); } EXPORT_SYMBOL(sas_phy_delete); /** * scsi_is_sas_phy - check if a struct device represents a SAS PHY * @dev: device to check * * Returns: * %1 if the device represents a SAS PHY, %0 else */ int scsi_is_sas_phy(const struct device *dev) { return dev->release == sas_phy_release; } EXPORT_SYMBOL(scsi_is_sas_phy); /* * SAS Port attributes */ #define sas_port_show_simple(field, name, format_string, cast) \ static ssize_t \ show_sas_port_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct sas_port *port = transport_class_to_sas_port(dev); \ \ return snprintf(buf, 20, format_string, cast port->field); \ } #define sas_port_simple_attr(field, name, format_string, type) \ sas_port_show_simple(field, name, format_string, (type)) \ static DEVICE_ATTR(name, S_IRUGO, show_sas_port_##name, NULL) sas_port_simple_attr(num_phys, num_phys, "%d\n", int); static DECLARE_TRANSPORT_CLASS(sas_port_class, "sas_port", NULL, NULL, NULL); static int sas_port_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct sas_internal *i; if (!scsi_is_sas_port(dev)) return 0; shost = dev_to_shost(dev->parent); if (!shost->transportt) return 0; if (shost->transportt->host_attrs.ac.class != &sas_host_class.class) return 0; i = to_sas_internal(shost->transportt); return &i->port_attr_cont.ac == cont; } static void sas_port_release(struct device *dev) { struct sas_port *port = dev_to_sas_port(dev); BUG_ON(!list_empty(&port->phy_list)); put_device(dev->parent); kfree(port); } static void sas_port_create_link(struct sas_port *port, struct sas_phy *phy) { int res; res = sysfs_create_link(&port->dev.kobj, &phy->dev.kobj, dev_name(&phy->dev)); if (res) goto err; res = sysfs_create_link(&phy->dev.kobj, &port->dev.kobj, "port"); if (res) goto err; return; err: printk(KERN_ERR "%s: Cannot create port links, err=%d\n", __func__, res); } static void sas_port_delete_link(struct sas_port *port, struct sas_phy *phy) { sysfs_remove_link(&port->dev.kobj, dev_name(&phy->dev)); sysfs_remove_link(&phy->dev.kobj, "port"); } /** sas_port_alloc - allocate and initialize a SAS port structure * * @parent: parent device * @port_id: port number * * Allocates a SAS port structure. It will be added to the device tree * below the device specified by @parent which must be either a Scsi_Host * or a sas_expander_device. * * Returns %NULL on error */ struct sas_port *sas_port_alloc(struct device *parent, int port_id) { struct Scsi_Host *shost = dev_to_shost(parent); struct sas_port *port; port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) return NULL; port->port_identifier = port_id; device_initialize(&port->dev); port->dev.parent = get_device(parent); port->dev.release = sas_port_release; mutex_init(&port->phy_list_mutex); INIT_LIST_HEAD(&port->phy_list); if (scsi_is_sas_expander_device(parent)) { struct sas_rphy *rphy = dev_to_rphy(parent); dev_set_name(&port->dev, "port-%d:%d:%d", shost->host_no, rphy->scsi_target_id, port->port_identifier); } else dev_set_name(&port->dev, "port-%d:%d", shost->host_no, port->port_identifier); transport_setup_device(&port->dev); return port; } EXPORT_SYMBOL(sas_port_alloc); /** sas_port_alloc_num - allocate and initialize a SAS port structure * * @parent: parent device * * Allocates a SAS port structure and a number to go with it. This * interface is really for adapters where the port number has no * meansing, so the sas class should manage them. It will be added to * the device tree below the device specified by @parent which must be * either a Scsi_Host or a sas_expander_device. * * Returns %NULL on error */ struct sas_port *sas_port_alloc_num(struct device *parent) { int index; struct Scsi_Host *shost = dev_to_shost(parent); struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); /* FIXME: use idr for this eventually */ mutex_lock(&sas_host->lock); if (scsi_is_sas_expander_device(parent)) { struct sas_rphy *rphy = dev_to_rphy(parent); struct sas_expander_device *exp = rphy_to_expander_device(rphy); index = exp->next_port_id++; } else index = sas_host->next_port_id++; mutex_unlock(&sas_host->lock); return sas_port_alloc(parent, index); } EXPORT_SYMBOL(sas_port_alloc_num); /** * sas_port_add - add a SAS port to the device hierarchy * @port: port to be added * * publishes a port to the rest of the system */ int sas_port_add(struct sas_port *port) { int error; /* No phys should be added until this is made visible */ BUG_ON(!list_empty(&port->phy_list)); error = device_add(&port->dev); if (error) return error; transport_add_device(&port->dev); transport_configure_device(&port->dev); return 0; } EXPORT_SYMBOL(sas_port_add); /** * sas_port_free - free a SAS PORT * @port: SAS PORT to free * * Frees the specified SAS PORT. * * Note: * This function must only be called on a PORT that has not * successfully been added using sas_port_add(). */ void sas_port_free(struct sas_port *port) { transport_destroy_device(&port->dev); put_device(&port->dev); } EXPORT_SYMBOL(sas_port_free); /** * sas_port_delete - remove SAS PORT * @port: SAS PORT to remove * * Removes the specified SAS PORT. If the SAS PORT has an * associated phys, unlink them from the port as well. */ void sas_port_delete(struct sas_port *port) { struct device *dev = &port->dev; struct sas_phy *phy, *tmp_phy; if (port->rphy) { sas_rphy_delete(port->rphy); port->rphy = NULL; } mutex_lock(&port->phy_list_mutex); list_for_each_entry_safe(phy, tmp_phy, &port->phy_list, port_siblings) { sas_port_delete_link(port, phy); list_del_init(&phy->port_siblings); } mutex_unlock(&port->phy_list_mutex); if (port->is_backlink) { struct device *parent = port->dev.parent; sysfs_remove_link(&port->dev.kobj, dev_name(parent)); port->is_backlink = 0; } transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); put_device(dev); } EXPORT_SYMBOL(sas_port_delete); /** * scsi_is_sas_port - check if a struct device represents a SAS port * @dev: device to check * * Returns: * %1 if the device represents a SAS Port, %0 else */ int scsi_is_sas_port(const struct device *dev) { return dev->release == sas_port_release; } EXPORT_SYMBOL(scsi_is_sas_port); /** * sas_port_get_phy - try to take a reference on a port member * @port: port to check */ struct sas_phy *sas_port_get_phy(struct sas_port *port) { struct sas_phy *phy; mutex_lock(&port->phy_list_mutex); if (list_empty(&port->phy_list)) phy = NULL; else { struct list_head *ent = port->phy_list.next; phy = list_entry(ent, typeof(*phy), port_siblings); get_device(&phy->dev); } mutex_unlock(&port->phy_list_mutex); return phy; } EXPORT_SYMBOL(sas_port_get_phy); /** * sas_port_add_phy - add another phy to a port to form a wide port * @port: port to add the phy to * @phy: phy to add * * When a port is initially created, it is empty (has no phys). All * ports must have at least one phy to operated, and all wide ports * must have at least two. The current code makes no difference * between ports and wide ports, but the only object that can be * connected to a remote device is a port, so ports must be formed on * all devices with phys if they're connected to anything. */ void sas_port_add_phy(struct sas_port *port, struct sas_phy *phy) { mutex_lock(&port->phy_list_mutex); if (unlikely(!list_empty(&phy->port_siblings))) { /* make sure we're already on this port */ struct sas_phy *tmp; list_for_each_entry(tmp, &port->phy_list, port_siblings) if (tmp == phy) break; /* If this trips, you added a phy that was already * part of a different port */ if (unlikely(tmp != phy)) { dev_printk(KERN_ERR, &port->dev, "trying to add phy %s fails: it's already part of another port\n", dev_name(&phy->dev)); BUG(); } } else { sas_port_create_link(port, phy); list_add_tail(&phy->port_siblings, &port->phy_list); port->num_phys++; } mutex_unlock(&port->phy_list_mutex); } EXPORT_SYMBOL(sas_port_add_phy); /** * sas_port_delete_phy - remove a phy from a port or wide port * @port: port to remove the phy from * @phy: phy to remove * * This operation is used for tearing down ports again. It must be * done to every port or wide port before calling sas_port_delete. */ void sas_port_delete_phy(struct sas_port *port, struct sas_phy *phy) { mutex_lock(&port->phy_list_mutex); sas_port_delete_link(port, phy); list_del_init(&phy->port_siblings); port->num_phys--; mutex_unlock(&port->phy_list_mutex); } EXPORT_SYMBOL(sas_port_delete_phy); void sas_port_mark_backlink(struct sas_port *port) { int res; struct device *parent = port->dev.parent->parent->parent; if (port->is_backlink) return; port->is_backlink = 1; res = sysfs_create_link(&port->dev.kobj, &parent->kobj, dev_name(parent)); if (res) goto err; return; err: printk(KERN_ERR "%s: Cannot create port backlink, err=%d\n", __func__, res); } EXPORT_SYMBOL(sas_port_mark_backlink); /* * SAS remote PHY attributes. */ #define sas_rphy_show_simple(field, name, format_string, cast) \ static ssize_t \ show_sas_rphy_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct sas_rphy *rphy = transport_class_to_rphy(dev); \ \ return snprintf(buf, 20, format_string, cast rphy->field); \ } #define sas_rphy_simple_attr(field, name, format_string, type) \ sas_rphy_show_simple(field, name, format_string, (type)) \ static SAS_DEVICE_ATTR(rphy, name, S_IRUGO, \ show_sas_rphy_##name, NULL) #define sas_rphy_show_protocol(field, name) \ static ssize_t \ show_sas_rphy_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct sas_rphy *rphy = transport_class_to_rphy(dev); \ \ if (!rphy->field) \ return snprintf(buf, 20, "none\n"); \ return get_sas_protocol_names(rphy->field, buf); \ } #define sas_rphy_protocol_attr(field, name) \ sas_rphy_show_protocol(field, name) \ static SAS_DEVICE_ATTR(rphy, name, S_IRUGO, \ show_sas_rphy_##name, NULL) static ssize_t show_sas_rphy_device_type(struct device *dev, struct device_attribute *attr, char *buf) { struct sas_rphy *rphy = transport_class_to_rphy(dev); if (!rphy->identify.device_type) return snprintf(buf, 20, "none\n"); return get_sas_device_type_names( rphy->identify.device_type, buf); } static SAS_DEVICE_ATTR(rphy, device_type, S_IRUGO, show_sas_rphy_device_type, NULL); static ssize_t show_sas_rphy_enclosure_identifier(struct device *dev, struct device_attribute *attr, char *buf) { struct sas_rphy *rphy = transport_class_to_rphy(dev); struct sas_phy *phy = dev_to_phy(rphy->dev.parent); struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); struct sas_internal *i = to_sas_internal(shost->transportt); u64 identifier; int error; error = i->f->get_enclosure_identifier(rphy, &identifier); if (error) return error; return sprintf(buf, "0x%llx\n", (unsigned long long)identifier); } static SAS_DEVICE_ATTR(rphy, enclosure_identifier, S_IRUGO, show_sas_rphy_enclosure_identifier, NULL); static ssize_t show_sas_rphy_bay_identifier(struct device *dev, struct device_attribute *attr, char *buf) { struct sas_rphy *rphy = transport_class_to_rphy(dev); struct sas_phy *phy = dev_to_phy(rphy->dev.parent); struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); struct sas_internal *i = to_sas_internal(shost->transportt); int val; val = i->f->get_bay_identifier(rphy); if (val < 0) return val; return sprintf(buf, "%d\n", val); } static SAS_DEVICE_ATTR(rphy, bay_identifier, S_IRUGO, show_sas_rphy_bay_identifier, NULL); sas_rphy_protocol_attr(identify.initiator_port_protocols, initiator_port_protocols); sas_rphy_protocol_attr(identify.target_port_protocols, target_port_protocols); sas_rphy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", unsigned long long); sas_rphy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); sas_rphy_simple_attr(scsi_target_id, scsi_target_id, "%d\n", u32); /* only need 8 bytes of data plus header (4 or 8) */ #define BUF_SIZE 64 int sas_read_port_mode_page(struct scsi_device *sdev) { char *buffer = kzalloc(BUF_SIZE, GFP_KERNEL), *msdata; struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); struct scsi_mode_data mode_data; int error; if (!buffer) return -ENOMEM; error = scsi_mode_sense(sdev, 1, 0x19, 0, buffer, BUF_SIZE, 30*HZ, 3, &mode_data, NULL); if (error) goto out; msdata = buffer + mode_data.header_length + mode_data.block_descriptor_length; if (msdata - buffer > BUF_SIZE - 8) goto out; error = 0; rdev->ready_led_meaning = msdata[2] & 0x10 ? 1 : 0; rdev->I_T_nexus_loss_timeout = (msdata[4] << 8) + msdata[5]; rdev->initiator_response_timeout = (msdata[6] << 8) + msdata[7]; out: kfree(buffer); return error; } EXPORT_SYMBOL(sas_read_port_mode_page); static DECLARE_TRANSPORT_CLASS(sas_end_dev_class, "sas_end_device", NULL, NULL, NULL); #define sas_end_dev_show_simple(field, name, format_string, cast) \ static ssize_t \ show_sas_end_dev_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct sas_rphy *rphy = transport_class_to_rphy(dev); \ struct sas_end_device *rdev = rphy_to_end_device(rphy); \ \ return snprintf(buf, 20, format_string, cast rdev->field); \ } #define sas_end_dev_simple_attr(field, name, format_string, type) \ sas_end_dev_show_simple(field, name, format_string, (type)) \ static SAS_DEVICE_ATTR(end_dev, name, S_IRUGO, \ show_sas_end_dev_##name, NULL) sas_end_dev_simple_attr(ready_led_meaning, ready_led_meaning, "%d\n", int); sas_end_dev_simple_attr(I_T_nexus_loss_timeout, I_T_nexus_loss_timeout, "%d\n", int); sas_end_dev_simple_attr(initiator_response_timeout, initiator_response_timeout, "%d\n", int); sas_end_dev_simple_attr(tlr_supported, tlr_supported, "%d\n", int); sas_end_dev_simple_attr(tlr_enabled, tlr_enabled, "%d\n", int); static DECLARE_TRANSPORT_CLASS(sas_expander_class, "sas_expander", NULL, NULL, NULL); #define sas_expander_show_simple(field, name, format_string, cast) \ static ssize_t \ show_sas_expander_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct sas_rphy *rphy = transport_class_to_rphy(dev); \ struct sas_expander_device *edev = rphy_to_expander_device(rphy); \ \ return snprintf(buf, 20, format_string, cast edev->field); \ } #define sas_expander_simple_attr(field, name, format_string, type) \ sas_expander_show_simple(field, name, format_string, (type)) \ static SAS_DEVICE_ATTR(expander, name, S_IRUGO, \ show_sas_expander_##name, NULL) sas_expander_simple_attr(vendor_id, vendor_id, "%s\n", char *); sas_expander_simple_attr(product_id, product_id, "%s\n", char *); sas_expander_simple_attr(product_rev, product_rev, "%s\n", char *); sas_expander_simple_attr(component_vendor_id, component_vendor_id, "%s\n", char *); sas_expander_simple_attr(component_id, component_id, "%u\n", unsigned int); sas_expander_simple_attr(component_revision_id, component_revision_id, "%u\n", unsigned int); sas_expander_simple_attr(level, level, "%d\n", int); static DECLARE_TRANSPORT_CLASS(sas_rphy_class, "sas_device", NULL, NULL, NULL); static int sas_rphy_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct sas_internal *i; if (!scsi_is_sas_rphy(dev)) return 0; shost = dev_to_shost(dev->parent->parent); if (!shost->transportt) return 0; if (shost->transportt->host_attrs.ac.class != &sas_host_class.class) return 0; i = to_sas_internal(shost->transportt); return &i->rphy_attr_cont.ac == cont; } static int sas_end_dev_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct sas_internal *i; struct sas_rphy *rphy; if (!scsi_is_sas_rphy(dev)) return 0; shost = dev_to_shost(dev->parent->parent); rphy = dev_to_rphy(dev); if (!shost->transportt) return 0; if (shost->transportt->host_attrs.ac.class != &sas_host_class.class) return 0; i = to_sas_internal(shost->transportt); return &i->end_dev_attr_cont.ac == cont && rphy->identify.device_type == SAS_END_DEVICE; } static int sas_expander_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct sas_internal *i; struct sas_rphy *rphy; if (!scsi_is_sas_rphy(dev)) return 0; shost = dev_to_shost(dev->parent->parent); rphy = dev_to_rphy(dev); if (!shost->transportt) return 0; if (shost->transportt->host_attrs.ac.class != &sas_host_class.class) return 0; i = to_sas_internal(shost->transportt); return &i->expander_attr_cont.ac == cont && (rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE || rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE); } static void sas_expander_release(struct device *dev) { struct sas_rphy *rphy = dev_to_rphy(dev); struct sas_expander_device *edev = rphy_to_expander_device(rphy); put_device(dev->parent); kfree(edev); } static void sas_end_device_release(struct device *dev) { struct sas_rphy *rphy = dev_to_rphy(dev); struct sas_end_device *edev = rphy_to_end_device(rphy); put_device(dev->parent); kfree(edev); } /** * sas_rphy_initialize - common rphy initialization * @rphy: rphy to initialise * * Used by both sas_end_device_alloc() and sas_expander_alloc() to * initialise the common rphy component of each. */ static void sas_rphy_initialize(struct sas_rphy *rphy) { INIT_LIST_HEAD(&rphy->list); } /** * sas_end_device_alloc - allocate an rphy for an end device * @parent: which port * * Allocates an SAS remote PHY structure, connected to @parent. * * Returns: * SAS PHY allocated or %NULL if the allocation failed. */ struct sas_rphy *sas_end_device_alloc(struct sas_port *parent) { struct Scsi_Host *shost = dev_to_shost(&parent->dev); struct sas_end_device *rdev; rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); if (!rdev) { return NULL; } device_initialize(&rdev->rphy.dev); rdev->rphy.dev.parent = get_device(&parent->dev); rdev->rphy.dev.release = sas_end_device_release; if (scsi_is_sas_expander_device(parent->dev.parent)) { struct sas_rphy *rphy = dev_to_rphy(parent->dev.parent); dev_set_name(&rdev->rphy.dev, "end_device-%d:%d:%d", shost->host_no, rphy->scsi_target_id, parent->port_identifier); } else dev_set_name(&rdev->rphy.dev, "end_device-%d:%d", shost->host_no, parent->port_identifier); rdev->rphy.identify.device_type = SAS_END_DEVICE; sas_rphy_initialize(&rdev->rphy); transport_setup_device(&rdev->rphy.dev); return &rdev->rphy; } EXPORT_SYMBOL(sas_end_device_alloc); /** * sas_expander_alloc - allocate an rphy for an end device * @parent: which port * @type: SAS_EDGE_EXPANDER_DEVICE or SAS_FANOUT_EXPANDER_DEVICE * * Allocates an SAS remote PHY structure, connected to @parent. * * Returns: * SAS PHY allocated or %NULL if the allocation failed. */ struct sas_rphy *sas_expander_alloc(struct sas_port *parent, enum sas_device_type type) { struct Scsi_Host *shost = dev_to_shost(&parent->dev); struct sas_expander_device *rdev; struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); BUG_ON(type != SAS_EDGE_EXPANDER_DEVICE && type != SAS_FANOUT_EXPANDER_DEVICE); rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); if (!rdev) { return NULL; } device_initialize(&rdev->rphy.dev); rdev->rphy.dev.parent = get_device(&parent->dev); rdev->rphy.dev.release = sas_expander_release; mutex_lock(&sas_host->lock); rdev->rphy.scsi_target_id = sas_host->next_expander_id++; mutex_unlock(&sas_host->lock); dev_set_name(&rdev->rphy.dev, "expander-%d:%d", shost->host_no, rdev->rphy.scsi_target_id); rdev->rphy.identify.device_type = type; sas_rphy_initialize(&rdev->rphy); transport_setup_device(&rdev->rphy.dev); return &rdev->rphy; } EXPORT_SYMBOL(sas_expander_alloc); /** * sas_rphy_add - add a SAS remote PHY to the device hierarchy * @rphy: The remote PHY to be added * * Publishes a SAS remote PHY to the rest of the system. */ int sas_rphy_add(struct sas_rphy *rphy) { struct sas_port *parent = dev_to_sas_port(rphy->dev.parent); struct Scsi_Host *shost = dev_to_shost(parent->dev.parent); struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); struct sas_identify *identify = &rphy->identify; int error; if (parent->rphy) return -ENXIO; parent->rphy = rphy; error = device_add(&rphy->dev); if (error) return error; transport_add_device(&rphy->dev); transport_configure_device(&rphy->dev); if (sas_bsg_initialize(shost, rphy)) printk("fail to a bsg device %s\n", dev_name(&rphy->dev)); mutex_lock(&sas_host->lock); list_add_tail(&rphy->list, &sas_host->rphy_list); if (identify->device_type == SAS_END_DEVICE && (identify->target_port_protocols & (SAS_PROTOCOL_SSP | SAS_PROTOCOL_STP | SAS_PROTOCOL_SATA))) rphy->scsi_target_id = sas_host->next_target_id++; else if (identify->device_type == SAS_END_DEVICE) rphy->scsi_target_id = -1; mutex_unlock(&sas_host->lock); if (identify->device_type == SAS_END_DEVICE && rphy->scsi_target_id != -1) { int lun; if (identify->target_port_protocols & SAS_PROTOCOL_SSP) lun = SCAN_WILD_CARD; else lun = 0; scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, SCSI_SCAN_INITIAL); } return 0; } EXPORT_SYMBOL(sas_rphy_add); /** * sas_rphy_free - free a SAS remote PHY * @rphy: SAS remote PHY to free * * Frees the specified SAS remote PHY. * * Note: * This function must only be called on a remote * PHY that has not successfully been added using * sas_rphy_add() (or has been sas_rphy_remove()'d) */ void sas_rphy_free(struct sas_rphy *rphy) { struct device *dev = &rphy->dev; struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent); struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); mutex_lock(&sas_host->lock); list_del(&rphy->list); mutex_unlock(&sas_host->lock); transport_destroy_device(dev); put_device(dev); } EXPORT_SYMBOL(sas_rphy_free); /** * sas_rphy_delete - remove and free SAS remote PHY * @rphy: SAS remote PHY to remove and free * * Removes the specified SAS remote PHY and frees it. */ void sas_rphy_delete(struct sas_rphy *rphy) { sas_rphy_remove(rphy); sas_rphy_free(rphy); } EXPORT_SYMBOL(sas_rphy_delete); /** * sas_rphy_unlink - unlink SAS remote PHY * @rphy: SAS remote phy to unlink from its parent port * * Removes port reference to an rphy */ void sas_rphy_unlink(struct sas_rphy *rphy) { struct sas_port *parent = dev_to_sas_port(rphy->dev.parent); parent->rphy = NULL; } EXPORT_SYMBOL(sas_rphy_unlink); /** * sas_rphy_remove - remove SAS remote PHY * @rphy: SAS remote phy to remove * * Removes the specified SAS remote PHY. */ void sas_rphy_remove(struct sas_rphy *rphy) { struct device *dev = &rphy->dev; switch (rphy->identify.device_type) { case SAS_END_DEVICE: scsi_remove_target(dev); break; case SAS_EDGE_EXPANDER_DEVICE: case SAS_FANOUT_EXPANDER_DEVICE: sas_remove_children(dev); break; default: break; } sas_rphy_unlink(rphy); bsg_remove_queue(rphy->q); transport_remove_device(dev); device_del(dev); } EXPORT_SYMBOL(sas_rphy_remove); /** * scsi_is_sas_rphy - check if a struct device represents a SAS remote PHY * @dev: device to check * * Returns: * %1 if the device represents a SAS remote PHY, %0 else */ int scsi_is_sas_rphy(const struct device *dev) { return dev->release == sas_end_device_release || dev->release == sas_expander_release; } EXPORT_SYMBOL(scsi_is_sas_rphy); /* * SCSI scan helper */ static int sas_user_scan(struct Scsi_Host *shost, uint channel, uint id, u64 lun) { struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); struct sas_rphy *rphy; mutex_lock(&sas_host->lock); list_for_each_entry(rphy, &sas_host->rphy_list, list) { if (rphy->identify.device_type != SAS_END_DEVICE || rphy->scsi_target_id == -1) continue; if ((channel == SCAN_WILD_CARD || channel == 0) && (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) { scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, SCSI_SCAN_MANUAL); } } mutex_unlock(&sas_host->lock); return 0; } /* * Setup / Teardown code */ #define SETUP_TEMPLATE(attrb, field, perm, test) \ i->private_##attrb[count] = dev_attr_##field; \ i->private_##attrb[count].attr.mode = perm; \ i->attrb[count] = &i->private_##attrb[count]; \ if (test) \ count++ #define SETUP_TEMPLATE_RW(attrb, field, perm, test, ro_test, ro_perm) \ i->private_##attrb[count] = dev_attr_##field; \ i->private_##attrb[count].attr.mode = perm; \ if (ro_test) { \ i->private_##attrb[count].attr.mode = ro_perm; \ i->private_##attrb[count].store = NULL; \ } \ i->attrb[count] = &i->private_##attrb[count]; \ if (test) \ count++ #define SETUP_RPORT_ATTRIBUTE(field) \ SETUP_TEMPLATE(rphy_attrs, field, S_IRUGO, 1) #define SETUP_OPTIONAL_RPORT_ATTRIBUTE(field, func) \ SETUP_TEMPLATE(rphy_attrs, field, S_IRUGO, i->f->func) #define SETUP_PHY_ATTRIBUTE(field) \ SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, 1) #define SETUP_PHY_ATTRIBUTE_RW(field) \ SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \ !i->f->set_phy_speed, S_IRUGO) #define SETUP_OPTIONAL_PHY_ATTRIBUTE_RW(field, func) \ SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \ !i->f->func, S_IRUGO) #define SETUP_PORT_ATTRIBUTE(field) \ SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1) #define SETUP_OPTIONAL_PHY_ATTRIBUTE(field, func) \ SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, i->f->func) #define SETUP_PHY_ATTRIBUTE_WRONLY(field) \ SETUP_TEMPLATE(phy_attrs, field, S_IWUSR, 1) #define SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(field, func) \ SETUP_TEMPLATE(phy_attrs, field, S_IWUSR, i->f->func) #define SETUP_END_DEV_ATTRIBUTE(field) \ SETUP_TEMPLATE(end_dev_attrs, field, S_IRUGO, 1) #define SETUP_EXPANDER_ATTRIBUTE(field) \ SETUP_TEMPLATE(expander_attrs, expander_##field, S_IRUGO, 1) /** * sas_attach_transport - instantiate SAS transport template * @ft: SAS transport class function template */ struct scsi_transport_template * sas_attach_transport(struct sas_function_template *ft) { struct sas_internal *i; int count; i = kzalloc(sizeof(struct sas_internal), GFP_KERNEL); if (!i) return NULL; i->t.user_scan = sas_user_scan; i->t.host_attrs.ac.attrs = &i->host_attrs[0]; i->t.host_attrs.ac.class = &sas_host_class.class; i->t.host_attrs.ac.match = sas_host_match; transport_container_register(&i->t.host_attrs); i->t.host_size = sizeof(struct sas_host_attrs); i->phy_attr_cont.ac.class = &sas_phy_class.class; i->phy_attr_cont.ac.attrs = &i->phy_attrs[0]; i->phy_attr_cont.ac.match = sas_phy_match; transport_container_register(&i->phy_attr_cont); i->port_attr_cont.ac.class = &sas_port_class.class; i->port_attr_cont.ac.attrs = &i->port_attrs[0]; i->port_attr_cont.ac.match = sas_port_match; transport_container_register(&i->port_attr_cont); i->rphy_attr_cont.ac.class = &sas_rphy_class.class; i->rphy_attr_cont.ac.attrs = &i->rphy_attrs[0]; i->rphy_attr_cont.ac.match = sas_rphy_match; transport_container_register(&i->rphy_attr_cont); i->end_dev_attr_cont.ac.class = &sas_end_dev_class.class; i->end_dev_attr_cont.ac.attrs = &i->end_dev_attrs[0]; i->end_dev_attr_cont.ac.match = sas_end_dev_match; transport_container_register(&i->end_dev_attr_cont); i->expander_attr_cont.ac.class = &sas_expander_class.class; i->expander_attr_cont.ac.attrs = &i->expander_attrs[0]; i->expander_attr_cont.ac.match = sas_expander_match; transport_container_register(&i->expander_attr_cont); i->f = ft; count = 0; SETUP_PHY_ATTRIBUTE(initiator_port_protocols); SETUP_PHY_ATTRIBUTE(target_port_protocols); SETUP_PHY_ATTRIBUTE(device_type); SETUP_PHY_ATTRIBUTE(sas_address); SETUP_PHY_ATTRIBUTE(phy_identifier); SETUP_PHY_ATTRIBUTE(negotiated_linkrate); SETUP_PHY_ATTRIBUTE(minimum_linkrate_hw); SETUP_PHY_ATTRIBUTE_RW(minimum_linkrate); SETUP_PHY_ATTRIBUTE(maximum_linkrate_hw); SETUP_PHY_ATTRIBUTE_RW(maximum_linkrate); SETUP_PHY_ATTRIBUTE(invalid_dword_count); SETUP_PHY_ATTRIBUTE(running_disparity_error_count); SETUP_PHY_ATTRIBUTE(loss_of_dword_sync_count); SETUP_PHY_ATTRIBUTE(phy_reset_problem_count); SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(link_reset, phy_reset); SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(hard_reset, phy_reset); SETUP_OPTIONAL_PHY_ATTRIBUTE_RW(enable, phy_enable); i->phy_attrs[count] = NULL; count = 0; SETUP_PORT_ATTRIBUTE(num_phys); i->port_attrs[count] = NULL; count = 0; SETUP_RPORT_ATTRIBUTE(rphy_initiator_port_protocols); SETUP_RPORT_ATTRIBUTE(rphy_target_port_protocols); SETUP_RPORT_ATTRIBUTE(rphy_device_type); SETUP_RPORT_ATTRIBUTE(rphy_sas_address); SETUP_RPORT_ATTRIBUTE(rphy_phy_identifier); SETUP_RPORT_ATTRIBUTE(rphy_scsi_target_id); SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_enclosure_identifier, get_enclosure_identifier); SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_bay_identifier, get_bay_identifier); i->rphy_attrs[count] = NULL; count = 0; SETUP_END_DEV_ATTRIBUTE(end_dev_ready_led_meaning); SETUP_END_DEV_ATTRIBUTE(end_dev_I_T_nexus_loss_timeout); SETUP_END_DEV_ATTRIBUTE(end_dev_initiator_response_timeout); SETUP_END_DEV_ATTRIBUTE(end_dev_tlr_supported); SETUP_END_DEV_ATTRIBUTE(end_dev_tlr_enabled); i->end_dev_attrs[count] = NULL; count = 0; SETUP_EXPANDER_ATTRIBUTE(vendor_id); SETUP_EXPANDER_ATTRIBUTE(product_id); SETUP_EXPANDER_ATTRIBUTE(product_rev); SETUP_EXPANDER_ATTRIBUTE(component_vendor_id); SETUP_EXPANDER_ATTRIBUTE(component_id); SETUP_EXPANDER_ATTRIBUTE(component_revision_id); SETUP_EXPANDER_ATTRIBUTE(level); i->expander_attrs[count] = NULL; return &i->t; } EXPORT_SYMBOL(sas_attach_transport); /** * sas_release_transport - release SAS transport template instance * @t: transport template instance */ void sas_release_transport(struct scsi_transport_template *t) { struct sas_internal *i = to_sas_internal(t); transport_container_unregister(&i->t.host_attrs); transport_container_unregister(&i->phy_attr_cont); transport_container_unregister(&i->port_attr_cont); transport_container_unregister(&i->rphy_attr_cont); transport_container_unregister(&i->end_dev_attr_cont); transport_container_unregister(&i->expander_attr_cont); kfree(i); } EXPORT_SYMBOL(sas_release_transport); static __init int sas_transport_init(void) { int error; error = transport_class_register(&sas_host_class); if (error) goto out; error = transport_class_register(&sas_phy_class); if (error) goto out_unregister_transport; error = transport_class_register(&sas_port_class); if (error) goto out_unregister_phy; error = transport_class_register(&sas_rphy_class); if (error) goto out_unregister_port; error = transport_class_register(&sas_end_dev_class); if (error) goto out_unregister_rphy; error = transport_class_register(&sas_expander_class); if (error) goto out_unregister_end_dev; return 0; out_unregister_end_dev: transport_class_unregister(&sas_end_dev_class); out_unregister_rphy: transport_class_unregister(&sas_rphy_class); out_unregister_port: transport_class_unregister(&sas_port_class); out_unregister_phy: transport_class_unregister(&sas_phy_class); out_unregister_transport: transport_class_unregister(&sas_host_class); out: return error; } static void __exit sas_transport_exit(void) { transport_class_unregister(&sas_host_class); transport_class_unregister(&sas_phy_class); transport_class_unregister(&sas_port_class); transport_class_unregister(&sas_rphy_class); transport_class_unregister(&sas_end_dev_class); transport_class_unregister(&sas_expander_class); } MODULE_AUTHOR("Christoph Hellwig"); MODULE_DESCRIPTION("SAS Transport Attributes"); MODULE_LICENSE("GPL"); module_init(sas_transport_init); module_exit(sas_transport_exit);
linux-master
drivers/scsi/scsi_transport_sas.c
// SPDX-License-Identifier: GPL-2.0-only /* * Detection routine for the NCR53c710 based BVME6000 SCSI Controllers for Linux. * * Based on work by Alan Hourihane and Kars de Jong * * Rewritten to use 53c700.c by Richard Hirst <[email protected]> */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <asm/bvme6000hw.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_spi.h> #include "53c700.h" MODULE_AUTHOR("Richard Hirst <[email protected]>"); MODULE_DESCRIPTION("BVME6000 NCR53C710 driver"); MODULE_LICENSE("GPL"); static struct scsi_host_template bvme6000_scsi_driver_template = { .name = "BVME6000 NCR53c710 SCSI", .proc_name = "BVME6000", .this_id = 7, .module = THIS_MODULE, }; static struct platform_device *bvme6000_scsi_device; static int bvme6000_probe(struct platform_device *dev) { struct Scsi_Host *host; struct NCR_700_Host_Parameters *hostdata; if (!MACH_IS_BVME6000) goto out; hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL); if (!hostdata) { printk(KERN_ERR "bvme6000-scsi: " "Failed to allocate host data\n"); goto out; } /* Fill in the required pieces of hostdata */ hostdata->base = (void __iomem *)BVME_NCR53C710_BASE; hostdata->clock = 40; /* XXX - depends on the CPU clock! */ hostdata->chip710 = 1; hostdata->dmode_extra = DMODE_FC2; hostdata->dcntl_extra = EA_710; hostdata->ctest7_extra = CTEST7_TT1; /* and register the chip */ host = NCR_700_detect(&bvme6000_scsi_driver_template, hostdata, &dev->dev); if (!host) { printk(KERN_ERR "bvme6000-scsi: No host detected; " "board configuration problem?\n"); goto out_free; } host->base = BVME_NCR53C710_BASE; host->this_id = 7; host->irq = BVME_IRQ_SCSI; if (request_irq(BVME_IRQ_SCSI, NCR_700_intr, 0, "bvme6000-scsi", host)) { printk(KERN_ERR "bvme6000-scsi: request_irq failed\n"); goto out_put_host; } platform_set_drvdata(dev, host); scsi_scan_host(host); return 0; out_put_host: scsi_host_put(host); out_free: kfree(hostdata); out: return -ENODEV; } static int bvme6000_device_remove(struct platform_device *dev) { struct Scsi_Host *host = platform_get_drvdata(dev); struct NCR_700_Host_Parameters *hostdata = shost_priv(host); scsi_remove_host(host); NCR_700_release(host); kfree(hostdata); free_irq(host->irq, host); return 0; } static struct platform_driver bvme6000_scsi_driver = { .driver = { .name = "bvme6000-scsi", }, .probe = bvme6000_probe, .remove = bvme6000_device_remove, }; static int __init bvme6000_scsi_init(void) { int err; err = platform_driver_register(&bvme6000_scsi_driver); if (err) return err; bvme6000_scsi_device = platform_device_register_simple("bvme6000-scsi", -1, NULL, 0); if (IS_ERR(bvme6000_scsi_device)) { platform_driver_unregister(&bvme6000_scsi_driver); return PTR_ERR(bvme6000_scsi_device); } return 0; } static void __exit bvme6000_scsi_exit(void) { platform_device_unregister(bvme6000_scsi_device); platform_driver_unregister(&bvme6000_scsi_driver); } module_init(bvme6000_scsi_init); module_exit(bvme6000_scsi_exit);
linux-master
drivers/scsi/bvme6000_scsi.c
// SPDX-License-Identifier: GPL-2.0-only /* * SCSI low-level driver for the 53c94 SCSI bus adaptor found * on Power Macintosh computers, controlling the external SCSI chain. * We assume the 53c94 is connected to a DBDMA (descriptor-based DMA) * controller. * * Paul Mackerras, August 1996. * Copyright (C) 1996 Paul Mackerras. */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/pgtable.h> #include <asm/dbdma.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/macio.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "mac53c94.h" enum fsc_phase { idle, selecting, dataing, completing, busfreeing, }; struct fsc_state { struct mac53c94_regs __iomem *regs; int intr; struct dbdma_regs __iomem *dma; int dmaintr; int clk_freq; struct Scsi_Host *host; struct scsi_cmnd *request_q; struct scsi_cmnd *request_qtail; struct scsi_cmnd *current_req; /* req we're currently working on */ enum fsc_phase phase; /* what we're currently trying to do */ struct dbdma_cmd *dma_cmds; /* space for dbdma commands, aligned */ void *dma_cmd_space; struct pci_dev *pdev; dma_addr_t dma_addr; struct macio_dev *mdev; }; static void mac53c94_init(struct fsc_state *); static void mac53c94_start(struct fsc_state *); static void mac53c94_interrupt(int, void *); static irqreturn_t do_mac53c94_interrupt(int, void *); static void cmd_done(struct fsc_state *, int result); static void set_dma_cmds(struct fsc_state *, struct scsi_cmnd *); static int mac53c94_queue_lck(struct scsi_cmnd *cmd) { struct fsc_state *state; #if 0 if (cmd->sc_data_direction == DMA_TO_DEVICE) { int i; printk(KERN_DEBUG "mac53c94_queue %p: command is", cmd); for (i = 0; i < cmd->cmd_len; ++i) printk(KERN_CONT " %.2x", cmd->cmnd[i]); printk(KERN_CONT "\n"); printk(KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n", scsi_sg_count(cmd), scsi_bufflen(cmd), scsi_sglist(cmd)); } #endif cmd->host_scribble = NULL; state = (struct fsc_state *) cmd->device->host->hostdata; if (state->request_q == NULL) state->request_q = cmd; else state->request_qtail->host_scribble = (void *) cmd; state->request_qtail = cmd; if (state->phase == idle) mac53c94_start(state); return 0; } static DEF_SCSI_QCMD(mac53c94_queue) static int mac53c94_host_reset(struct scsi_cmnd *cmd) { struct fsc_state *state = (struct fsc_state *) cmd->device->host->hostdata; struct mac53c94_regs __iomem *regs = state->regs; struct dbdma_regs __iomem *dma = state->dma; unsigned long flags; spin_lock_irqsave(cmd->device->host->host_lock, flags); writel((RUN|PAUSE|FLUSH|WAKE) << 16, &dma->control); writeb(CMD_SCSI_RESET, &regs->command); /* assert RST */ udelay(100); /* leave it on for a while (>= 25us) */ writeb(CMD_RESET, &regs->command); udelay(20); mac53c94_init(state); writeb(CMD_NOP, &regs->command); spin_unlock_irqrestore(cmd->device->host->host_lock, flags); return SUCCESS; } static void mac53c94_init(struct fsc_state *state) { struct mac53c94_regs __iomem *regs = state->regs; struct dbdma_regs __iomem *dma = state->dma; writeb(state->host->this_id | CF1_PAR_ENABLE, &regs->config1); writeb(TIMO_VAL(250), &regs->sel_timeout); /* 250ms */ writeb(CLKF_VAL(state->clk_freq), &regs->clk_factor); writeb(CF2_FEATURE_EN, &regs->config2); writeb(0, &regs->config3); writeb(0, &regs->sync_period); writeb(0, &regs->sync_offset); (void)readb(&regs->interrupt); writel((RUN|PAUSE|FLUSH|WAKE) << 16, &dma->control); } /* * Start the next command for a 53C94. * Should be called with interrupts disabled. */ static void mac53c94_start(struct fsc_state *state) { struct scsi_cmnd *cmd; struct mac53c94_regs __iomem *regs = state->regs; int i; if (state->phase != idle || state->current_req != NULL) panic("inappropriate mac53c94_start (state=%p)", state); if (state->request_q == NULL) return; state->current_req = cmd = state->request_q; state->request_q = (struct scsi_cmnd *) cmd->host_scribble; /* Off we go */ writeb(0, &regs->count_lo); writeb(0, &regs->count_mid); writeb(0, &regs->count_hi); writeb(CMD_NOP + CMD_DMA_MODE, &regs->command); udelay(1); writeb(CMD_FLUSH, &regs->command); udelay(1); writeb(cmd->device->id, &regs->dest_id); writeb(0, &regs->sync_period); writeb(0, &regs->sync_offset); /* load the command into the FIFO */ for (i = 0; i < cmd->cmd_len; ++i) writeb(cmd->cmnd[i], &regs->fifo); /* do select without ATN XXX */ writeb(CMD_SELECT, &regs->command); state->phase = selecting; set_dma_cmds(state, cmd); } static irqreturn_t do_mac53c94_interrupt(int irq, void *dev_id) { unsigned long flags; struct Scsi_Host *dev = ((struct fsc_state *) dev_id)->current_req->device->host; spin_lock_irqsave(dev->host_lock, flags); mac53c94_interrupt(irq, dev_id); spin_unlock_irqrestore(dev->host_lock, flags); return IRQ_HANDLED; } static void mac53c94_interrupt(int irq, void *dev_id) { struct fsc_state *state = (struct fsc_state *) dev_id; struct mac53c94_regs __iomem *regs = state->regs; struct dbdma_regs __iomem *dma = state->dma; struct scsi_cmnd *const cmd = state->current_req; struct mac53c94_cmd_priv *const mcmd = mac53c94_priv(cmd); int nb, stat, seq, intr; static int mac53c94_errors; /* * Apparently, reading the interrupt register unlatches * the status and sequence step registers. */ seq = readb(&regs->seqstep); stat = readb(&regs->status); intr = readb(&regs->interrupt); #if 0 printk(KERN_DEBUG "mac53c94_intr, intr=%x stat=%x seq=%x phase=%d\n", intr, stat, seq, state->phase); #endif if (intr & INTR_RESET) { /* SCSI bus was reset */ printk(KERN_INFO "external SCSI bus reset detected\n"); writeb(CMD_NOP, &regs->command); writel(RUN << 16, &dma->control); /* stop dma */ cmd_done(state, DID_RESET << 16); return; } if (intr & INTR_ILL_CMD) { printk(KERN_ERR "53c94: invalid cmd, intr=%x stat=%x seq=%x phase=%d\n", intr, stat, seq, state->phase); cmd_done(state, DID_ERROR << 16); return; } if (stat & STAT_ERROR) { #if 0 /* XXX these seem to be harmless? */ printk("53c94: bad error, intr=%x stat=%x seq=%x phase=%d\n", intr, stat, seq, state->phase); #endif ++mac53c94_errors; writeb(CMD_NOP + CMD_DMA_MODE, &regs->command); } if (!cmd) { printk(KERN_DEBUG "53c94: interrupt with no command active?\n"); return; } if (stat & STAT_PARITY) { printk(KERN_ERR "mac53c94: parity error\n"); cmd_done(state, DID_PARITY << 16); return; } switch (state->phase) { case selecting: if (intr & INTR_DISCONNECT) { /* selection timed out */ cmd_done(state, DID_BAD_TARGET << 16); return; } if (intr != INTR_BUS_SERV + INTR_DONE) { printk(KERN_DEBUG "got intr %x during selection\n", intr); cmd_done(state, DID_ERROR << 16); return; } if ((seq & SS_MASK) != SS_DONE) { printk(KERN_DEBUG "seq step %x after command\n", seq); cmd_done(state, DID_ERROR << 16); return; } writeb(CMD_NOP, &regs->command); /* set DMA controller going if any data to transfer */ if ((stat & (STAT_MSG|STAT_CD)) == 0 && (scsi_sg_count(cmd) > 0 || scsi_bufflen(cmd))) { nb = mcmd->this_residual; if (nb > 0xfff0) nb = 0xfff0; mcmd->this_residual -= nb; writeb(nb, &regs->count_lo); writeb(nb >> 8, &regs->count_mid); writeb(CMD_DMA_MODE + CMD_NOP, &regs->command); writel(virt_to_phys(state->dma_cmds), &dma->cmdptr); writel((RUN << 16) | RUN, &dma->control); writeb(CMD_DMA_MODE + CMD_XFER_DATA, &regs->command); state->phase = dataing; break; } else if ((stat & STAT_PHASE) == STAT_CD + STAT_IO) { /* up to status phase already */ writeb(CMD_I_COMPLETE, &regs->command); state->phase = completing; } else { printk(KERN_DEBUG "in unexpected phase %x after cmd\n", stat & STAT_PHASE); cmd_done(state, DID_ERROR << 16); return; } break; case dataing: if (intr != INTR_BUS_SERV) { printk(KERN_DEBUG "got intr %x before status\n", intr); cmd_done(state, DID_ERROR << 16); return; } if (mcmd->this_residual != 0 && (stat & (STAT_MSG|STAT_CD)) == 0) { /* Set up the count regs to transfer more */ nb = mcmd->this_residual; if (nb > 0xfff0) nb = 0xfff0; mcmd->this_residual -= nb; writeb(nb, &regs->count_lo); writeb(nb >> 8, &regs->count_mid); writeb(CMD_DMA_MODE + CMD_NOP, &regs->command); writeb(CMD_DMA_MODE + CMD_XFER_DATA, &regs->command); break; } if ((stat & STAT_PHASE) != STAT_CD + STAT_IO) { printk(KERN_DEBUG "intr %x before data xfer complete\n", intr); } writel(RUN << 16, &dma->control); /* stop dma */ scsi_dma_unmap(cmd); /* should check dma status */ writeb(CMD_I_COMPLETE, &regs->command); state->phase = completing; break; case completing: if (intr != INTR_DONE) { printk(KERN_DEBUG "got intr %x on completion\n", intr); cmd_done(state, DID_ERROR << 16); return; } mcmd->status = readb(&regs->fifo); mcmd->message = readb(&regs->fifo); writeb(CMD_ACCEPT_MSG, &regs->command); state->phase = busfreeing; break; case busfreeing: if (intr != INTR_DISCONNECT) { printk(KERN_DEBUG "got intr %x when expected disconnect\n", intr); } cmd_done(state, (DID_OK << 16) + (mcmd->message << 8) + mcmd->status); break; default: printk(KERN_DEBUG "don't know about phase %d\n", state->phase); } } static void cmd_done(struct fsc_state *state, int result) { struct scsi_cmnd *cmd; cmd = state->current_req; if (cmd) { cmd->result = result; scsi_done(cmd); state->current_req = NULL; } state->phase = idle; mac53c94_start(state); } /* * Set up DMA commands for transferring data. */ static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd) { int i, dma_cmd, total, nseg; struct scatterlist *scl; struct dbdma_cmd *dcmds; dma_addr_t dma_addr; u32 dma_len; nseg = scsi_dma_map(cmd); BUG_ON(nseg < 0); if (!nseg) return; dma_cmd = cmd->sc_data_direction == DMA_TO_DEVICE ? OUTPUT_MORE : INPUT_MORE; dcmds = state->dma_cmds; total = 0; scsi_for_each_sg(cmd, scl, nseg, i) { dma_addr = sg_dma_address(scl); dma_len = sg_dma_len(scl); if (dma_len > 0xffff) panic("mac53c94: scatterlist element >= 64k"); total += dma_len; dcmds->req_count = cpu_to_le16(dma_len); dcmds->command = cpu_to_le16(dma_cmd); dcmds->phy_addr = cpu_to_le32(dma_addr); dcmds->xfer_status = 0; ++dcmds; } dma_cmd += OUTPUT_LAST - OUTPUT_MORE; dcmds[-1].command = cpu_to_le16(dma_cmd); dcmds->command = cpu_to_le16(DBDMA_STOP); mac53c94_priv(cmd)->this_residual = total; } static const struct scsi_host_template mac53c94_template = { .proc_name = "53c94", .name = "53C94", .queuecommand = mac53c94_queue, .eh_host_reset_handler = mac53c94_host_reset, .can_queue = 1, .this_id = 7, .sg_tablesize = SG_ALL, .max_segment_size = 65535, .cmd_size = sizeof(struct mac53c94_cmd_priv), }; static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match) { struct device_node *node = macio_get_of_node(mdev); struct pci_dev *pdev = macio_get_pci_dev(mdev); struct fsc_state *state; struct Scsi_Host *host; void *dma_cmd_space; const unsigned char *clkprop; int proplen, rc = -ENODEV; if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) { printk(KERN_ERR "mac53c94: expected 2 addrs and intrs" " (got %d/%d)\n", macio_resource_count(mdev), macio_irq_count(mdev)); return -ENODEV; } if (macio_request_resources(mdev, "mac53c94") != 0) { printk(KERN_ERR "mac53c94: unable to request memory resources"); return -EBUSY; } host = scsi_host_alloc(&mac53c94_template, sizeof(struct fsc_state)); if (host == NULL) { printk(KERN_ERR "mac53c94: couldn't register host"); rc = -ENOMEM; goto out_release; } state = (struct fsc_state *) host->hostdata; macio_set_drvdata(mdev, state); state->host = host; state->pdev = pdev; state->mdev = mdev; state->regs = (struct mac53c94_regs __iomem *) ioremap(macio_resource_start(mdev, 0), 0x1000); state->intr = macio_irq(mdev, 0); state->dma = (struct dbdma_regs __iomem *) ioremap(macio_resource_start(mdev, 1), 0x1000); state->dmaintr = macio_irq(mdev, 1); if (state->regs == NULL || state->dma == NULL) { printk(KERN_ERR "mac53c94: ioremap failed for %pOF\n", node); goto out_free; } clkprop = of_get_property(node, "clock-frequency", &proplen); if (clkprop == NULL || proplen != sizeof(int)) { printk(KERN_ERR "%pOF: can't get clock frequency, " "assuming 25MHz\n", node); state->clk_freq = 25000000; } else state->clk_freq = *(int *)clkprop; /* Space for dma command list: +1 for stop command, * +1 to allow for aligning. * XXX FIXME: Use DMA consistent routines */ dma_cmd_space = kmalloc_array(host->sg_tablesize + 2, sizeof(struct dbdma_cmd), GFP_KERNEL); if (!dma_cmd_space) { printk(KERN_ERR "mac53c94: couldn't allocate dma " "command space for %pOF\n", node); rc = -ENOMEM; goto out_free; } state->dma_cmds = (struct dbdma_cmd *)DBDMA_ALIGN(dma_cmd_space); memset(state->dma_cmds, 0, (host->sg_tablesize + 1) * sizeof(struct dbdma_cmd)); state->dma_cmd_space = dma_cmd_space; mac53c94_init(state); if (request_irq(state->intr, do_mac53c94_interrupt, 0, "53C94",state)) { printk(KERN_ERR "mac53C94: can't get irq %d for %pOF\n", state->intr, node); goto out_free_dma; } rc = scsi_add_host(host, &mdev->ofdev.dev); if (rc != 0) goto out_release_irq; scsi_scan_host(host); return 0; out_release_irq: free_irq(state->intr, state); out_free_dma: kfree(state->dma_cmd_space); out_free: if (state->dma != NULL) iounmap(state->dma); if (state->regs != NULL) iounmap(state->regs); scsi_host_put(host); out_release: macio_release_resources(mdev); return rc; } static int mac53c94_remove(struct macio_dev *mdev) { struct fsc_state *fp = (struct fsc_state *)macio_get_drvdata(mdev); struct Scsi_Host *host = fp->host; scsi_remove_host(host); free_irq(fp->intr, fp); if (fp->regs) iounmap(fp->regs); if (fp->dma) iounmap(fp->dma); kfree(fp->dma_cmd_space); scsi_host_put(host); macio_release_resources(mdev); return 0; } static struct of_device_id mac53c94_match[] = { { .name = "53c94", }, {}, }; MODULE_DEVICE_TABLE (of, mac53c94_match); static struct macio_driver mac53c94_driver = { .driver = { .name = "mac53c94", .owner = THIS_MODULE, .of_match_table = mac53c94_match, }, .probe = mac53c94_probe, .remove = mac53c94_remove, }; static int __init init_mac53c94(void) { return macio_register_driver(&mac53c94_driver); } static void __exit exit_mac53c94(void) { return macio_unregister_driver(&mac53c94_driver); } module_init(init_mac53c94); module_exit(exit_mac53c94); MODULE_DESCRIPTION("PowerMac 53c94 SCSI driver"); MODULE_AUTHOR("Paul Mackerras <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/mac53c94.c
// SPDX-License-Identifier: GPL-2.0-or-later /* aha152x.c -- Adaptec AHA-152x driver * Author: Jürgen E. Fischer, [email protected] * Copyright 1993-2004 Jürgen E. Fischer * * $Id: aha152x.c,v 2.7 2004/01/24 11:42:59 fischer Exp $ * * $Log: aha152x.c,v $ * Revision 2.7 2004/01/24 11:42:59 fischer * - gather code that is not used by PCMCIA at the end * - move request_region for !PCMCIA case to detection * - migration to new scsi host api (remove legacy code) * - free host scribble before scsi_done * - fix error handling * - one isapnp device added to id_table * * Revision 2.6 2003/10/30 20:52:47 fischer * - interfaces changes for kernel 2.6 * - aha152x_probe_one introduced for pcmcia stub * - fixed pnpdev handling * - instead of allocation a new one, reuse command for request sense after check condition and reset * - fixes race in is_complete * * Revision 2.5 2002/04/14 11:24:53 fischer * - isapnp support * - abort fixed * - 2.5 support * * Revision 2.4 2000/12/16 12:53:56 fischer * - allow REQUEST SENSE to be queued * - handle shared PCI interrupts * * Revision 2.3 2000/11/04 16:40:26 fischer * - handle data overruns * - extend timeout for data phases * * Revision 2.2 2000/08/08 19:54:53 fischer * - minor changes * * Revision 2.1 2000/05/17 16:23:17 fischer * - signature update * - fix for data out w/o scatter gather * * Revision 2.0 1999/12/25 15:07:32 fischer * - interrupt routine completly reworked * - basic support for new eh code * * Revision 1.21 1999/11/10 23:46:36 fischer * - default to synchronous operation * - synchronous negotiation fixed * - added timeout to loops * - debugging output can be controlled through procfs * * Revision 1.20 1999/11/07 18:37:31 fischer * - synchronous operation works * - resid support for sg driver * * Revision 1.19 1999/11/02 22:39:59 fischer * - moved leading comments to README.aha152x * - new additional module parameters * - updates for 2.3 * - support for the Tripace TC1550 controller * - interrupt handling changed * * Revision 1.18 1996/09/07 20:10:40 fischer * - fixed can_queue handling (multiple outstanding commands working again) * * Revision 1.17 1996/08/17 16:05:14 fischer * - biosparam improved * - interrupt verification * - updated documentation * - cleanups * * Revision 1.16 1996/06/09 00:04:56 root * - added configuration symbols for insmod (aha152x/aha152x1) * * Revision 1.15 1996/04/30 14:52:06 fischer * - proc info fixed * - support for extended translation for >1GB disks * * Revision 1.14 1996/01/17 15:11:20 fischer * - fixed lockup in MESSAGE IN phase after reconnection * * Revision 1.13 1996/01/09 02:15:53 fischer * - some cleanups * - moved request_irq behind controller initialization * (to avoid spurious interrupts) * * Revision 1.12 1995/12/16 12:26:07 fischer * - barrier()s added * - configurable RESET delay added * * Revision 1.11 1995/12/06 21:18:35 fischer * - some minor updates * * Revision 1.10 1995/07/22 19:18:45 fischer * - support for 2 controllers * - started synchronous data transfers (not working yet) * * Revision 1.9 1995/03/18 09:20:24 root * - patches for PCMCIA and modules * * Revision 1.8 1995/01/21 22:07:19 root * - snarf_region => request_region * - aha152x_intr interface change * * Revision 1.7 1995/01/02 23:19:36 root * - updated COMMAND_SIZE to cmd_len * - changed sti() to restore_flags() * - fixed some #ifdef which generated warnings * * Revision 1.6 1994/11/24 20:35:27 root * - problem with odd number of bytes in fifo fixed * * Revision 1.5 1994/10/30 14:39:56 root * - abort code fixed * - debugging improved * * Revision 1.4 1994/09/12 11:33:01 root * - irqaction to request_irq * - abortion updated * * Revision 1.3 1994/08/04 13:53:05 root * - updates for mid-level-driver changes * - accept unexpected BUSFREE phase as error condition * - parity check now configurable * * Revision 1.2 1994/07/03 12:56:36 root * - cleaned up debugging code * - more tweaking on reset delays * - updated abort/reset code (pretty untested...) * * Revision 1.1 1994/05/28 21:18:49 root * - update for mid-level interface change (abort-reset) * - delays after resets adjusted for some slow devices * * Revision 1.0 1994/03/25 12:52:00 root * - Fixed "more data than expected" problem * - added new BIOS signatures * * Revision 0.102 1994/01/31 20:44:12 root * - minor changes in insw/outsw handling * * Revision 0.101 1993/12/13 01:16:27 root * - fixed STATUS phase (non-GOOD stati were dropped sometimes; * fixes problems with CD-ROM sector size detection & media change) * * Revision 0.100 1993/12/10 16:58:47 root * - fix for unsuccessful selections in case of non-continuous id assignments * on the scsi bus. * * Revision 0.99 1993/10/24 16:19:59 root * - fixed DATA IN (rare read errors gone) * * Revision 0.98 1993/10/17 12:54:44 root * - fixed some recent fixes (shame on me) * - moved initialization of scratch area to aha152x_queue * * Revision 0.97 1993/10/09 18:53:53 root * - DATA IN fixed. Rarely left data in the fifo. * * Revision 0.96 1993/10/03 00:53:59 root * - minor changes on DATA IN * * Revision 0.95 1993/09/24 10:36:01 root * - change handling of MSGI after reselection * - fixed sti/cli * - minor changes * * Revision 0.94 1993/09/18 14:08:22 root * - fixed bug in multiple outstanding command code * - changed detection * - support for kernel command line configuration * - reset corrected * - changed message handling * * Revision 0.93 1993/09/15 20:41:19 root * - fixed bugs with multiple outstanding commands * * Revision 0.92 1993/09/13 02:46:33 root * - multiple outstanding commands work (no problems with IBM drive) * * Revision 0.91 1993/09/12 20:51:46 root * added multiple outstanding commands * (some problem with this $%&? IBM device remain) * * Revision 0.9 1993/09/12 11:11:22 root * - corrected auto-configuration * - changed the auto-configuration (added some '#define's) * - added support for dis-/reconnection * * Revision 0.8 1993/09/06 23:09:39 root * - added support for the drive activity light * - minor changes * * Revision 0.7 1993/09/05 14:30:15 root * - improved phase detection * - now using the new snarf_region code of 0.99pl13 * * Revision 0.6 1993/09/02 11:01:38 root * first public release; added some signatures and biosparam() * * Revision 0.5 1993/08/30 10:23:30 root * fixed timing problems with my IBM drive * * Revision 0.4 1993/08/29 14:06:52 root * fixed some problems with timeouts due incomplete commands * * Revision 0.3 1993/08/28 15:55:03 root * writing data works too. mounted and worked on a dos partition * * Revision 0.2 1993/08/27 22:42:07 root * reading data works. Mounted a msdos partition. * * Revision 0.1 1993/08/25 13:38:30 root * first "damn thing doesn't work" version * * Revision 0.0 1993/08/14 19:54:25 root * empty function bodies; detect() works. * ************************************************************************** see Documentation/scsi/aha152x.rst for configuration details **************************************************************************/ #include <linux/module.h> #include <asm/irq.h> #include <linux/io.h> #include <linux/blkdev.h> #include <linux/completion.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/wait.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/proc_fs.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/isapnp.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/list.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport_spi.h> #include <scsi/scsicam.h> #include "aha152x.h" static LIST_HEAD(aha152x_host_list); /* DEFINES */ /* For PCMCIA cards, always use AUTOCONF */ #if defined(AHA152X_PCMCIA) || defined(MODULE) #if !defined(AUTOCONF) #define AUTOCONF #endif #endif #if !defined(AUTOCONF) && !defined(SETUP0) #error define AUTOCONF or SETUP0 #endif #define DO_LOCK(flags) spin_lock_irqsave(&QLOCK,flags) #define DO_UNLOCK(flags) spin_unlock_irqrestore(&QLOCK,flags) #define LEAD "(scsi%d:%d:%d) " #define INFO_LEAD KERN_INFO LEAD #define CMDINFO(cmd) \ (cmd) ? ((cmd)->device->host->host_no) : -1, \ (cmd) ? ((cmd)->device->id & 0x0f) : -1, \ (cmd) ? ((u8)(cmd)->device->lun & 0x07) : -1 static inline void CMD_INC_RESID(struct scsi_cmnd *cmd, int inc) { scsi_set_resid(cmd, scsi_get_resid(cmd) + inc); } #define DELAY_DEFAULT 1000 #if defined(AHA152X_PCMCIA) #define IRQ_MIN 0 #define IRQ_MAX 16 #else #define IRQ_MIN 9 #if defined(__PPC) #define IRQ_MAX (nr_irqs-1) #else #define IRQ_MAX 12 #endif #endif enum { not_issued = 0x0001, /* command not yet issued */ selecting = 0x0002, /* target is being selected */ identified = 0x0004, /* IDENTIFY was sent */ disconnected = 0x0008, /* target disconnected */ completed = 0x0010, /* target sent COMMAND COMPLETE */ aborted = 0x0020, /* ABORT was sent */ resetted = 0x0040, /* BUS DEVICE RESET was sent */ spiordy = 0x0080, /* waiting for SPIORDY to raise */ syncneg = 0x0100, /* synchronous negotiation in progress */ aborting = 0x0200, /* ABORT is pending */ resetting = 0x0400, /* BUS DEVICE RESET is pending */ check_condition = 0x0800, /* requesting sense after CHECK CONDITION */ }; struct aha152x_cmd_priv { char *ptr; int this_residual; struct scatterlist *buffer; int status; int message; int sent_command; int phase; }; static struct aha152x_cmd_priv *aha152x_priv(struct scsi_cmnd *cmd) { return scsi_cmd_priv(cmd); } MODULE_AUTHOR("Jürgen Fischer"); MODULE_DESCRIPTION(AHA152X_REVID); MODULE_LICENSE("GPL"); #if !defined(AHA152X_PCMCIA) #if defined(MODULE) static int io[] = {0, 0}; module_param_hw_array(io, int, ioport, NULL, 0); MODULE_PARM_DESC(io,"base io address of controller"); static int irq[] = {0, 0}; module_param_hw_array(irq, int, irq, NULL, 0); MODULE_PARM_DESC(irq,"interrupt for controller"); static int scsiid[] = {7, 7}; module_param_array(scsiid, int, NULL, 0); MODULE_PARM_DESC(scsiid,"scsi id of controller"); static int reconnect[] = {1, 1}; module_param_array(reconnect, int, NULL, 0); MODULE_PARM_DESC(reconnect,"allow targets to disconnect"); static int parity[] = {1, 1}; module_param_array(parity, int, NULL, 0); MODULE_PARM_DESC(parity,"use scsi parity"); static int sync[] = {1, 1}; module_param_array(sync, int, NULL, 0); MODULE_PARM_DESC(sync,"use synchronous transfers"); static int delay[] = {DELAY_DEFAULT, DELAY_DEFAULT}; module_param_array(delay, int, NULL, 0); MODULE_PARM_DESC(delay,"scsi reset delay"); static int exttrans[] = {0, 0}; module_param_array(exttrans, int, NULL, 0); MODULE_PARM_DESC(exttrans,"use extended translation"); static int aha152x[] = {0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0}; module_param_array(aha152x, int, NULL, 0); MODULE_PARM_DESC(aha152x, "parameters for first controller"); static int aha152x1[] = {0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0}; module_param_array(aha152x1, int, NULL, 0); MODULE_PARM_DESC(aha152x1, "parameters for second controller"); #endif /* MODULE */ #ifdef __ISAPNP__ static struct isapnp_device_id id_table[] = { { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1502), 0 }, { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1505), 0 }, { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1510), 0 }, { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1515), 0 }, { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1520), 0 }, { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x2015), 0 }, { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1522), 0 }, { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x2215), 0 }, { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1530), 0 }, { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x3015), 0 }, { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1532), 0 }, { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x3215), 0 }, { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x6360), 0 }, { ISAPNP_DEVICE_SINGLE_END, } }; MODULE_DEVICE_TABLE(isapnp, id_table); #endif /* ISAPNP */ #endif /* !AHA152X_PCMCIA */ static const struct scsi_host_template aha152x_driver_template; /* * internal states of the host * */ enum aha152x_state { idle=0, unknown, seldo, seldi, selto, busfree, msgo, cmd, msgi, status, datai, datao, parerr, rsti, maxstate }; /* * current state information of the host * */ struct aha152x_hostdata { struct scsi_cmnd *issue_SC; /* pending commands to issue */ struct scsi_cmnd *current_SC; /* current command on the bus */ struct scsi_cmnd *disconnected_SC; /* commands that disconnected */ struct scsi_cmnd *done_SC; /* command that was completed */ spinlock_t lock; /* host lock */ #if defined(AHA152X_STAT) int total_commands; int disconnections; int busfree_without_any_action; int busfree_without_old_command; int busfree_without_new_command; int busfree_without_done_command; int busfree_with_check_condition; int count[maxstate]; int count_trans[maxstate]; unsigned long time[maxstate]; #endif int commands; /* current number of commands */ int reconnect; /* disconnection allowed */ int parity; /* parity checking enabled */ int synchronous; /* synchronous transferes enabled */ int delay; /* reset out delay */ int ext_trans; /* extended translation enabled */ int swint; /* software-interrupt was fired during detect() */ int service; /* bh needs to be run */ int in_intr; /* bh is running */ /* current state, previous state, last state different from current state */ enum aha152x_state state, prevstate, laststate; int target; /* reconnecting target */ unsigned char syncrate[8]; /* current synchronous transfer agreements */ unsigned char syncneg[8]; /* 0: no negotiation; * 1: negotiation in progress; * 2: negotiation completed */ int cmd_i; /* number of sent bytes of current command */ int msgi_len; /* number of received message bytes */ unsigned char msgi[256]; /* received message bytes */ int msgo_i, msgo_len; /* number of sent bytes and length of current messages */ unsigned char msgo[256]; /* pending messages */ int data_len; /* number of sent/received bytes in dataphase */ unsigned long io_port0; unsigned long io_port1; #ifdef __ISAPNP__ struct pnp_dev *pnpdev; #endif struct list_head host_list; }; /* * host specific command extension * */ struct aha152x_scdata { struct scsi_cmnd *next; /* next sc in queue */ struct completion *done;/* semaphore to block on */ struct scsi_eh_save ses; }; /* access macros for hostdata */ #define HOSTDATA(shpnt) ((struct aha152x_hostdata *) &shpnt->hostdata) #define HOSTNO ((shpnt)->host_no) #define CURRENT_SC (HOSTDATA(shpnt)->current_SC) #define DONE_SC (HOSTDATA(shpnt)->done_SC) #define ISSUE_SC (HOSTDATA(shpnt)->issue_SC) #define DISCONNECTED_SC (HOSTDATA(shpnt)->disconnected_SC) #define QLOCK (HOSTDATA(shpnt)->lock) #define QLOCKER (HOSTDATA(shpnt)->locker) #define QLOCKERL (HOSTDATA(shpnt)->lockerl) #define STATE (HOSTDATA(shpnt)->state) #define PREVSTATE (HOSTDATA(shpnt)->prevstate) #define LASTSTATE (HOSTDATA(shpnt)->laststate) #define RECONN_TARGET (HOSTDATA(shpnt)->target) #define CMD_I (HOSTDATA(shpnt)->cmd_i) #define MSGO(i) (HOSTDATA(shpnt)->msgo[i]) #define MSGO_I (HOSTDATA(shpnt)->msgo_i) #define MSGOLEN (HOSTDATA(shpnt)->msgo_len) #define ADDMSGO(x) (MSGOLEN<256 ? (void)(MSGO(MSGOLEN++)=x) : aha152x_error(shpnt,"MSGO overflow")) #define MSGI(i) (HOSTDATA(shpnt)->msgi[i]) #define MSGILEN (HOSTDATA(shpnt)->msgi_len) #define ADDMSGI(x) (MSGILEN<256 ? (void)(MSGI(MSGILEN++)=x) : aha152x_error(shpnt,"MSGI overflow")) #define DATA_LEN (HOSTDATA(shpnt)->data_len) #define SYNCRATE (HOSTDATA(shpnt)->syncrate[CURRENT_SC->device->id]) #define SYNCNEG (HOSTDATA(shpnt)->syncneg[CURRENT_SC->device->id]) #define DELAY (HOSTDATA(shpnt)->delay) #define EXT_TRANS (HOSTDATA(shpnt)->ext_trans) #define TC1550 (HOSTDATA(shpnt)->tc1550) #define RECONNECT (HOSTDATA(shpnt)->reconnect) #define PARITY (HOSTDATA(shpnt)->parity) #define SYNCHRONOUS (HOSTDATA(shpnt)->synchronous) #define HOSTIOPORT0 (HOSTDATA(shpnt)->io_port0) #define HOSTIOPORT1 (HOSTDATA(shpnt)->io_port1) #define SCDATA(SCpnt) ((struct aha152x_scdata *) (SCpnt)->host_scribble) #define SCNEXT(SCpnt) SCDATA(SCpnt)->next #define SCSEM(SCpnt) SCDATA(SCpnt)->done #define SG_ADDRESS(buffer) ((char *) sg_virt((buffer))) /* state handling */ static void seldi_run(struct Scsi_Host *shpnt); static void seldo_run(struct Scsi_Host *shpnt); static void selto_run(struct Scsi_Host *shpnt); static void busfree_run(struct Scsi_Host *shpnt); static void msgo_init(struct Scsi_Host *shpnt); static void msgo_run(struct Scsi_Host *shpnt); static void msgo_end(struct Scsi_Host *shpnt); static void cmd_init(struct Scsi_Host *shpnt); static void cmd_run(struct Scsi_Host *shpnt); static void cmd_end(struct Scsi_Host *shpnt); static void datai_init(struct Scsi_Host *shpnt); static void datai_run(struct Scsi_Host *shpnt); static void datai_end(struct Scsi_Host *shpnt); static void datao_init(struct Scsi_Host *shpnt); static void datao_run(struct Scsi_Host *shpnt); static void datao_end(struct Scsi_Host *shpnt); static void status_run(struct Scsi_Host *shpnt); static void msgi_run(struct Scsi_Host *shpnt); static void msgi_end(struct Scsi_Host *shpnt); static void parerr_run(struct Scsi_Host *shpnt); static void rsti_run(struct Scsi_Host *shpnt); static void is_complete(struct Scsi_Host *shpnt); /* * driver states * */ static struct { char *name; void (*init)(struct Scsi_Host *); void (*run)(struct Scsi_Host *); void (*end)(struct Scsi_Host *); int spio; } states[] = { { "idle", NULL, NULL, NULL, 0}, { "unknown", NULL, NULL, NULL, 0}, { "seldo", NULL, seldo_run, NULL, 0}, { "seldi", NULL, seldi_run, NULL, 0}, { "selto", NULL, selto_run, NULL, 0}, { "busfree", NULL, busfree_run, NULL, 0}, { "msgo", msgo_init, msgo_run, msgo_end, 1}, { "cmd", cmd_init, cmd_run, cmd_end, 1}, { "msgi", NULL, msgi_run, msgi_end, 1}, { "status", NULL, status_run, NULL, 1}, { "datai", datai_init, datai_run, datai_end, 0}, { "datao", datao_init, datao_run, datao_end, 0}, { "parerr", NULL, parerr_run, NULL, 0}, { "rsti", NULL, rsti_run, NULL, 0}, }; /* setup & interrupt */ static irqreturn_t intr(int irq, void *dev_id); static void reset_ports(struct Scsi_Host *shpnt); static void aha152x_error(struct Scsi_Host *shpnt, char *msg); static void done(struct Scsi_Host *shpnt, unsigned char status_byte, unsigned char host_byte); /* diagnostics */ static void show_command(struct scsi_cmnd * ptr); static void show_queues(struct Scsi_Host *shpnt); static void disp_enintr(struct Scsi_Host *shpnt); /* * queue services: * */ static inline void append_SC(struct scsi_cmnd **SC, struct scsi_cmnd *new_SC) { struct scsi_cmnd *end; SCNEXT(new_SC) = NULL; if (!*SC) *SC = new_SC; else { for (end = *SC; SCNEXT(end); end = SCNEXT(end)) ; SCNEXT(end) = new_SC; } } static inline struct scsi_cmnd *remove_first_SC(struct scsi_cmnd ** SC) { struct scsi_cmnd *ptr; ptr = *SC; if (ptr) { *SC = SCNEXT(*SC); SCNEXT(ptr)=NULL; } return ptr; } static inline struct scsi_cmnd *remove_lun_SC(struct scsi_cmnd ** SC, int target, int lun) { struct scsi_cmnd *ptr, *prev; for (ptr = *SC, prev = NULL; ptr && ((ptr->device->id != target) || (ptr->device->lun != lun)); prev = ptr, ptr = SCNEXT(ptr)) ; if (ptr) { if (prev) SCNEXT(prev) = SCNEXT(ptr); else *SC = SCNEXT(ptr); SCNEXT(ptr)=NULL; } return ptr; } static inline struct scsi_cmnd *remove_SC(struct scsi_cmnd **SC, struct scsi_cmnd *SCp) { struct scsi_cmnd *ptr, *prev; for (ptr = *SC, prev = NULL; ptr && SCp!=ptr; prev = ptr, ptr = SCNEXT(ptr)) ; if (ptr) { if (prev) SCNEXT(prev) = SCNEXT(ptr); else *SC = SCNEXT(ptr); SCNEXT(ptr)=NULL; } return ptr; } static irqreturn_t swintr(int irqno, void *dev_id) { struct Scsi_Host *shpnt = dev_id; HOSTDATA(shpnt)->swint++; SETPORT(DMACNTRL0, INTEN); return IRQ_HANDLED; } struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup) { struct Scsi_Host *shpnt; shpnt = scsi_host_alloc(&aha152x_driver_template, sizeof(struct aha152x_hostdata)); if (!shpnt) { printk(KERN_ERR "aha152x: scsi_host_alloc failed\n"); return NULL; } memset(HOSTDATA(shpnt), 0, sizeof *HOSTDATA(shpnt)); INIT_LIST_HEAD(&HOSTDATA(shpnt)->host_list); /* need to have host registered before triggering any interrupt */ list_add_tail(&HOSTDATA(shpnt)->host_list, &aha152x_host_list); shpnt->io_port = setup->io_port; shpnt->n_io_port = IO_RANGE; shpnt->irq = setup->irq; if (!setup->tc1550) { HOSTIOPORT0 = setup->io_port; HOSTIOPORT1 = setup->io_port; } else { HOSTIOPORT0 = setup->io_port+0x10; HOSTIOPORT1 = setup->io_port-0x10; } spin_lock_init(&QLOCK); RECONNECT = setup->reconnect; SYNCHRONOUS = setup->synchronous; PARITY = setup->parity; DELAY = setup->delay; EXT_TRANS = setup->ext_trans; SETPORT(SCSIID, setup->scsiid << 4); shpnt->this_id = setup->scsiid; if (setup->reconnect) shpnt->can_queue = AHA152X_MAXQUEUE; /* RESET OUT */ printk("aha152x: resetting bus...\n"); SETPORT(SCSISEQ, SCSIRSTO); mdelay(256); SETPORT(SCSISEQ, 0); mdelay(DELAY); reset_ports(shpnt); printk(KERN_INFO "aha152x%d%s: " "vital data: rev=%x, " "io=0x%03lx (0x%03lx/0x%03lx), " "irq=%d, " "scsiid=%d, " "reconnect=%s, " "parity=%s, " "synchronous=%s, " "delay=%d, " "extended translation=%s\n", shpnt->host_no, setup->tc1550 ? " (tc1550 mode)" : "", GETPORT(REV) & 0x7, shpnt->io_port, HOSTIOPORT0, HOSTIOPORT1, shpnt->irq, shpnt->this_id, RECONNECT ? "enabled" : "disabled", PARITY ? "enabled" : "disabled", SYNCHRONOUS ? "enabled" : "disabled", DELAY, EXT_TRANS ? "enabled" : "disabled"); /* not expecting any interrupts */ SETPORT(SIMODE0, 0); SETPORT(SIMODE1, 0); if (request_irq(shpnt->irq, swintr, IRQF_SHARED, "aha152x", shpnt)) { printk(KERN_ERR "aha152x%d: irq %d busy.\n", shpnt->host_no, shpnt->irq); goto out_host_put; } HOSTDATA(shpnt)->swint = 0; printk(KERN_INFO "aha152x%d: trying software interrupt, ", shpnt->host_no); mb(); SETPORT(DMACNTRL0, SWINT|INTEN); mdelay(1000); free_irq(shpnt->irq, shpnt); if (!HOSTDATA(shpnt)->swint) { if (TESTHI(DMASTAT, INTSTAT)) { printk("lost.\n"); } else { printk("failed.\n"); } SETPORT(DMACNTRL0, INTEN); printk(KERN_ERR "aha152x%d: irq %d possibly wrong. " "Please verify.\n", shpnt->host_no, shpnt->irq); goto out_host_put; } printk("ok.\n"); /* clear interrupts */ SETPORT(SSTAT0, 0x7f); SETPORT(SSTAT1, 0xef); if (request_irq(shpnt->irq, intr, IRQF_SHARED, "aha152x", shpnt)) { printk(KERN_ERR "aha152x%d: failed to reassign irq %d.\n", shpnt->host_no, shpnt->irq); goto out_host_put; } if( scsi_add_host(shpnt, NULL) ) { free_irq(shpnt->irq, shpnt); printk(KERN_ERR "aha152x%d: failed to add host.\n", shpnt->host_no); goto out_host_put; } scsi_scan_host(shpnt); return shpnt; out_host_put: list_del(&HOSTDATA(shpnt)->host_list); scsi_host_put(shpnt); return NULL; } void aha152x_release(struct Scsi_Host *shpnt) { if (!shpnt) return; scsi_remove_host(shpnt); if (shpnt->irq) free_irq(shpnt->irq, shpnt); #if !defined(AHA152X_PCMCIA) if (shpnt->io_port) release_region(shpnt->io_port, IO_RANGE); #endif #ifdef __ISAPNP__ if (HOSTDATA(shpnt)->pnpdev) pnp_device_detach(HOSTDATA(shpnt)->pnpdev); #endif list_del(&HOSTDATA(shpnt)->host_list); scsi_host_put(shpnt); } /* * setup controller to generate interrupts depending * on current state (lock has to be acquired) * */ static int setup_expected_interrupts(struct Scsi_Host *shpnt) { if(CURRENT_SC) { struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); acp->phase |= 1 << 16; if (acp->phase & selecting) { SETPORT(SSTAT1, SELTO); SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0)); SETPORT(SIMODE1, ENSELTIMO); } else { SETPORT(SIMODE0, (acp->phase & spiordy) ? ENSPIORDY : 0); SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE); } } else if(STATE==seldi) { SETPORT(SIMODE0, 0); SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE); } else { SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0); SETPORT(SIMODE1, ENSCSIRST | ( (ISSUE_SC||DONE_SC) ? ENBUSFREE : 0)); } if(!HOSTDATA(shpnt)->in_intr) SETBITS(DMACNTRL0, INTEN); return TESTHI(DMASTAT, INTSTAT); } /* * Queue a command and setup interrupts for a free bus. */ static int aha152x_internal_queue(struct scsi_cmnd *SCpnt, struct completion *complete, int phase) { struct aha152x_cmd_priv *acp = aha152x_priv(SCpnt); struct Scsi_Host *shpnt = SCpnt->device->host; unsigned long flags; acp->phase = not_issued | phase; acp->status = 0x1; /* Illegal status by SCSI standard */ acp->message = 0; acp->sent_command = 0; if (acp->phase & (resetting | check_condition)) { if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) { scmd_printk(KERN_ERR, SCpnt, "cannot reuse command\n"); return FAILED; } } else { SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC); if(!SCpnt->host_scribble) { scmd_printk(KERN_ERR, SCpnt, "allocation failed\n"); return FAILED; } } SCNEXT(SCpnt) = NULL; SCSEM(SCpnt) = complete; /* setup scratch area SCp.ptr : buffer pointer SCp.this_residual : buffer length SCp.buffer : next buffer SCp.phase : current state of the command */ if ((phase & resetting) || !scsi_sglist(SCpnt)) { acp->ptr = NULL; acp->this_residual = 0; scsi_set_resid(SCpnt, 0); acp->buffer = NULL; } else { scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); acp->buffer = scsi_sglist(SCpnt); acp->ptr = SG_ADDRESS(acp->buffer); acp->this_residual = acp->buffer->length; } DO_LOCK(flags); #if defined(AHA152X_STAT) HOSTDATA(shpnt)->total_commands++; #endif /* Turn led on, when this is the first command. */ HOSTDATA(shpnt)->commands++; if (HOSTDATA(shpnt)->commands==1) SETPORT(PORTA, 1); append_SC(&ISSUE_SC, SCpnt); if(!HOSTDATA(shpnt)->in_intr) setup_expected_interrupts(shpnt); DO_UNLOCK(flags); return 0; } /* * queue a command * */ static int aha152x_queue_lck(struct scsi_cmnd *SCpnt) { return aha152x_internal_queue(SCpnt, NULL, 0); } static DEF_SCSI_QCMD(aha152x_queue) /* * */ static void reset_done(struct scsi_cmnd *SCpnt) { if(SCSEM(SCpnt)) { complete(SCSEM(SCpnt)); } else { printk(KERN_ERR "aha152x: reset_done w/o completion\n"); } } static void aha152x_scsi_done(struct scsi_cmnd *SCpnt) { if (aha152x_priv(SCpnt)->phase & resetting) reset_done(SCpnt); else scsi_done(SCpnt); } /* * Abort a command * */ static int aha152x_abort(struct scsi_cmnd *SCpnt) { struct Scsi_Host *shpnt = SCpnt->device->host; struct scsi_cmnd *ptr; unsigned long flags; DO_LOCK(flags); ptr=remove_SC(&ISSUE_SC, SCpnt); if(ptr) { HOSTDATA(shpnt)->commands--; if (!HOSTDATA(shpnt)->commands) SETPORT(PORTA, 0); DO_UNLOCK(flags); kfree(SCpnt->host_scribble); SCpnt->host_scribble=NULL; return SUCCESS; } DO_UNLOCK(flags); /* * FIXME: * for current command: queue ABORT for message out and raise ATN * for disconnected command: pseudo SC with ABORT message or ABORT on reselection? * */ scmd_printk(KERN_ERR, SCpnt, "cannot abort running or disconnected command\n"); return FAILED; } /* * Reset a device * */ static int aha152x_device_reset(struct scsi_cmnd * SCpnt) { struct Scsi_Host *shpnt = SCpnt->device->host; DECLARE_COMPLETION(done); int ret, issued, disconnected; unsigned char old_cmd_len = SCpnt->cmd_len; unsigned long flags; unsigned long timeleft; if(CURRENT_SC==SCpnt) { scmd_printk(KERN_ERR, SCpnt, "cannot reset current device\n"); return FAILED; } DO_LOCK(flags); issued = remove_SC(&ISSUE_SC, SCpnt) == NULL; disconnected = issued && remove_SC(&DISCONNECTED_SC, SCpnt); DO_UNLOCK(flags); SCpnt->cmd_len = 0; aha152x_internal_queue(SCpnt, &done, resetting); timeleft = wait_for_completion_timeout(&done, 100*HZ); if (!timeleft) { /* remove command from issue queue */ DO_LOCK(flags); remove_SC(&ISSUE_SC, SCpnt); DO_UNLOCK(flags); } SCpnt->cmd_len = old_cmd_len; DO_LOCK(flags); if (aha152x_priv(SCpnt)->phase & resetted) { HOSTDATA(shpnt)->commands--; if (!HOSTDATA(shpnt)->commands) SETPORT(PORTA, 0); kfree(SCpnt->host_scribble); SCpnt->host_scribble=NULL; ret = SUCCESS; } else { /* requeue */ if(!issued) { append_SC(&ISSUE_SC, SCpnt); } else if(disconnected) { append_SC(&DISCONNECTED_SC, SCpnt); } ret = FAILED; } DO_UNLOCK(flags); return ret; } static void free_hard_reset_SCs(struct Scsi_Host *shpnt, struct scsi_cmnd **SCs) { struct scsi_cmnd *ptr; ptr=*SCs; while(ptr) { struct scsi_cmnd *next; if(SCDATA(ptr)) { next = SCNEXT(ptr); } else { scmd_printk(KERN_DEBUG, ptr, "queue corrupted at %p\n", ptr); next = NULL; } if (!ptr->device->soft_reset) { remove_SC(SCs, ptr); HOSTDATA(shpnt)->commands--; kfree(ptr->host_scribble); ptr->host_scribble=NULL; } ptr = next; } } /* * Reset the bus * * AIC-6260 has a hard reset (MRST signal), but apparently * one cannot trigger it via software. So live with * a soft reset; no-one seemed to have cared. */ static int aha152x_bus_reset_host(struct Scsi_Host *shpnt) { unsigned long flags; DO_LOCK(flags); free_hard_reset_SCs(shpnt, &ISSUE_SC); free_hard_reset_SCs(shpnt, &DISCONNECTED_SC); SETPORT(SCSISEQ, SCSIRSTO); mdelay(256); SETPORT(SCSISEQ, 0); mdelay(DELAY); setup_expected_interrupts(shpnt); if(HOSTDATA(shpnt)->commands==0) SETPORT(PORTA, 0); DO_UNLOCK(flags); return SUCCESS; } /* * Reset the bus * */ static int aha152x_bus_reset(struct scsi_cmnd *SCpnt) { return aha152x_bus_reset_host(SCpnt->device->host); } /* * Restore default values to the AIC-6260 registers and reset the fifos * */ static void reset_ports(struct Scsi_Host *shpnt) { unsigned long flags; /* disable interrupts */ SETPORT(DMACNTRL0, RSTFIFO); SETPORT(SCSISEQ, 0); SETPORT(SXFRCTL1, 0); SETPORT(SCSISIG, 0); SETRATE(0); /* clear all interrupt conditions */ SETPORT(SSTAT0, 0x7f); SETPORT(SSTAT1, 0xef); SETPORT(SSTAT4, SYNCERR | FWERR | FRERR); SETPORT(DMACNTRL0, 0); SETPORT(DMACNTRL1, 0); SETPORT(BRSTCNTRL, 0xf1); /* clear SCSI fifos and transfer count */ SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT); SETPORT(SXFRCTL0, CH1); DO_LOCK(flags); setup_expected_interrupts(shpnt); DO_UNLOCK(flags); } /* * Reset the host (bus and controller) * */ int aha152x_host_reset_host(struct Scsi_Host *shpnt) { aha152x_bus_reset_host(shpnt); reset_ports(shpnt); return SUCCESS; } /* * Return the "logical geometry" * */ static int aha152x_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *info_array) { struct Scsi_Host *shpnt = sdev->host; /* try default translation */ info_array[0] = 64; info_array[1] = 32; info_array[2] = (unsigned long)capacity / (64 * 32); /* for disks >1GB do some guessing */ if (info_array[2] >= 1024) { int info[3]; /* try to figure out the geometry from the partition table */ if (scsicam_bios_param(bdev, capacity, info) < 0 || !((info[0] == 64 && info[1] == 32) || (info[0] == 255 && info[1] == 63))) { if (EXT_TRANS) { printk(KERN_NOTICE "aha152x: unable to verify geometry for disk with >1GB.\n" " using extended translation.\n"); info_array[0] = 255; info_array[1] = 63; info_array[2] = (unsigned long)capacity / (255 * 63); } else { printk(KERN_NOTICE "aha152x: unable to verify geometry for disk with >1GB.\n" " Using default translation. Please verify yourself.\n" " Perhaps you need to enable extended translation in the driver.\n" " See Documentation/scsi/aha152x.rst for details.\n"); } } else { info_array[0] = info[0]; info_array[1] = info[1]; info_array[2] = info[2]; if (info[0] == 255 && !EXT_TRANS) { printk(KERN_NOTICE "aha152x: current partition table is using extended translation.\n" " using it also, although it's not explicitly enabled.\n"); } } } return 0; } /* * Internal done function * */ static void done(struct Scsi_Host *shpnt, unsigned char status_byte, unsigned char host_byte) { if (CURRENT_SC) { if(DONE_SC) scmd_printk(KERN_ERR, CURRENT_SC, "there's already a completed command %p " "- will cause abort\n", DONE_SC); DONE_SC = CURRENT_SC; CURRENT_SC = NULL; set_status_byte(DONE_SC, status_byte); set_host_byte(DONE_SC, host_byte); } else printk(KERN_ERR "aha152x: done() called outside of command\n"); } static struct work_struct aha152x_tq; /* * Run service completions on the card with interrupts enabled. * */ static void run(struct work_struct *work) { struct aha152x_hostdata *hd; list_for_each_entry(hd, &aha152x_host_list, host_list) { struct Scsi_Host *shost = container_of((void *)hd, struct Scsi_Host, hostdata); is_complete(shost); } } /* * Interrupt handler * */ static irqreturn_t intr(int irqno, void *dev_id) { struct Scsi_Host *shpnt = dev_id; unsigned long flags; unsigned char rev, dmacntrl0; /* * Read a couple of registers that are known to not be all 1's. If * we read all 1's (-1), that means that either: * * a. The host adapter chip has gone bad, and we cannot control it, * OR * b. The host adapter is a PCMCIA card that has been ejected * * In either case, we cannot do anything with the host adapter at * this point in time. So just ignore the interrupt and return. * In the latter case, the interrupt might actually be meant for * someone else sharing this IRQ, and that driver will handle it. */ rev = GETPORT(REV); dmacntrl0 = GETPORT(DMACNTRL0); if ((rev == 0xFF) && (dmacntrl0 == 0xFF)) return IRQ_NONE; if( TESTLO(DMASTAT, INTSTAT) ) return IRQ_NONE; /* no more interrupts from the controller, while we're busy. INTEN is restored by the BH handler */ CLRBITS(DMACNTRL0, INTEN); DO_LOCK(flags); if( HOSTDATA(shpnt)->service==0 ) { HOSTDATA(shpnt)->service=1; /* Poke the BH handler */ INIT_WORK(&aha152x_tq, run); schedule_work(&aha152x_tq); } DO_UNLOCK(flags); return IRQ_HANDLED; } /* * busfree phase * - handle completition/disconnection/error of current command * - start selection for next command (if any) */ static void busfree_run(struct Scsi_Host *shpnt) { unsigned long flags; #if defined(AHA152X_STAT) int action=0; #endif SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT); SETPORT(SXFRCTL0, CH1); SETPORT(SSTAT1, CLRBUSFREE); if(CURRENT_SC) { struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); #if defined(AHA152X_STAT) action++; #endif acp->phase &= ~syncneg; if (acp->phase & completed) { /* target sent COMMAND COMPLETE */ done(shpnt, acp->status, DID_OK); } else if (acp->phase & aborted) { done(shpnt, acp->status, DID_ABORT); } else if (acp->phase & resetted) { done(shpnt, acp->status, DID_RESET); } else if (acp->phase & disconnected) { /* target sent DISCONNECT */ #if defined(AHA152X_STAT) HOSTDATA(shpnt)->disconnections++; #endif append_SC(&DISCONNECTED_SC, CURRENT_SC); acp->phase |= 1 << 16; CURRENT_SC = NULL; } else { done(shpnt, SAM_STAT_GOOD, DID_ERROR); } #if defined(AHA152X_STAT) } else { HOSTDATA(shpnt)->busfree_without_old_command++; #endif } DO_LOCK(flags); if(DONE_SC) { #if defined(AHA152X_STAT) action++; #endif if (aha152x_priv(DONE_SC)->phase & check_condition) { struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC; struct aha152x_scdata *sc = SCDATA(cmd); scsi_eh_restore_cmnd(cmd, &sc->ses); aha152x_priv(cmd)->status = SAM_STAT_CHECK_CONDITION; HOSTDATA(shpnt)->commands--; if (!HOSTDATA(shpnt)->commands) SETPORT(PORTA, 0); /* turn led off */ } else if (aha152x_priv(DONE_SC)->status == SAM_STAT_CHECK_CONDITION) { #if defined(AHA152X_STAT) HOSTDATA(shpnt)->busfree_with_check_condition++; #endif if (!(aha152x_priv(DONE_SC)->phase & not_issued)) { struct aha152x_scdata *sc; struct scsi_cmnd *ptr = DONE_SC; DONE_SC=NULL; sc = SCDATA(ptr); /* It was allocated in aha152x_internal_queue? */ BUG_ON(!sc); scsi_eh_prep_cmnd(ptr, &sc->ses, NULL, 0, ~0); DO_UNLOCK(flags); aha152x_internal_queue(ptr, NULL, check_condition); DO_LOCK(flags); } } if (DONE_SC) { struct scsi_cmnd *ptr = DONE_SC; DONE_SC=NULL; /* turn led off, when no commands are in the driver */ HOSTDATA(shpnt)->commands--; if (!HOSTDATA(shpnt)->commands) SETPORT(PORTA, 0); /* turn led off */ if (!(aha152x_priv(ptr)->phase & resetting)) { kfree(ptr->host_scribble); ptr->host_scribble=NULL; } DO_UNLOCK(flags); aha152x_scsi_done(ptr); DO_LOCK(flags); } DONE_SC=NULL; #if defined(AHA152X_STAT) } else { HOSTDATA(shpnt)->busfree_without_done_command++; #endif } if(ISSUE_SC) CURRENT_SC = remove_first_SC(&ISSUE_SC); DO_UNLOCK(flags); if(CURRENT_SC) { struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); #if defined(AHA152X_STAT) action++; #endif acp->phase |= selecting; /* clear selection timeout */ SETPORT(SSTAT1, SELTO); SETPORT(SCSIID, (shpnt->this_id << OID_) | CURRENT_SC->device->id); SETPORT(SXFRCTL1, (PARITY ? ENSPCHK : 0 ) | ENSTIMER); SETPORT(SCSISEQ, ENSELO | ENAUTOATNO | (DISCONNECTED_SC ? ENRESELI : 0)); } else { #if defined(AHA152X_STAT) HOSTDATA(shpnt)->busfree_without_new_command++; #endif SETPORT(SCSISEQ, DISCONNECTED_SC ? ENRESELI : 0); } #if defined(AHA152X_STAT) if(!action) HOSTDATA(shpnt)->busfree_without_any_action++; #endif } /* * Selection done (OUT) * - queue IDENTIFY message and SDTR to selected target for message out * (ATN asserted automagically via ENAUTOATNO in busfree()) */ static void seldo_run(struct Scsi_Host *shpnt) { struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); SETPORT(SCSISIG, 0); SETPORT(SSTAT1, CLRBUSFREE); SETPORT(SSTAT1, CLRPHASECHG); acp->phase &= ~(selecting | not_issued); SETPORT(SCSISEQ, 0); if (TESTLO(SSTAT0, SELDO)) { scmd_printk(KERN_ERR, CURRENT_SC, "aha152x: passing bus free condition\n"); done(shpnt, SAM_STAT_GOOD, DID_NO_CONNECT); return; } SETPORT(SSTAT0, CLRSELDO); ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun)); if (acp->phase & aborting) { ADDMSGO(ABORT); } else if (acp->phase & resetting) { ADDMSGO(BUS_DEVICE_RESET); } else if (SYNCNEG==0 && SYNCHRONOUS) { acp->phase |= syncneg; MSGOLEN += spi_populate_sync_msg(&MSGO(MSGOLEN), 50, 8); SYNCNEG=1; /* negotiation in progress */ } SETRATE(SYNCRATE); } /* * Selection timeout * - return command to mid-level with failure cause * */ static void selto_run(struct Scsi_Host *shpnt) { struct aha152x_cmd_priv *acp; SETPORT(SCSISEQ, 0); SETPORT(SSTAT1, CLRSELTIMO); if (!CURRENT_SC) return; acp = aha152x_priv(CURRENT_SC); acp->phase &= ~selecting; if (acp->phase & aborted) done(shpnt, SAM_STAT_GOOD, DID_ABORT); else if (TESTLO(SSTAT0, SELINGO)) done(shpnt, SAM_STAT_GOOD, DID_BUS_BUSY); else /* ARBITRATION won, but SELECTION failed */ done(shpnt, SAM_STAT_GOOD, DID_NO_CONNECT); } /* * Selection in done * - put current command back to issue queue * (reconnection of a disconnected nexus instead * of successful selection out) * */ static void seldi_run(struct Scsi_Host *shpnt) { int selid; int target; unsigned long flags; SETPORT(SCSISIG, 0); SETPORT(SSTAT0, CLRSELDI); SETPORT(SSTAT1, CLRBUSFREE); SETPORT(SSTAT1, CLRPHASECHG); if(CURRENT_SC) { struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); if (!(acp->phase & not_issued)) scmd_printk(KERN_ERR, CURRENT_SC, "command should not have been issued yet\n"); DO_LOCK(flags); append_SC(&ISSUE_SC, CURRENT_SC); DO_UNLOCK(flags); CURRENT_SC = NULL; } if (!DISCONNECTED_SC) return; RECONN_TARGET=-1; selid = GETPORT(SELID) & ~(1 << shpnt->this_id); if (selid==0) { shost_printk(KERN_INFO, shpnt, "target id unknown (%02x)\n", selid); return; } for(target=7; !(selid & (1 << target)); target--) ; if(selid & ~(1 << target)) { shost_printk(KERN_INFO, shpnt, "multiple targets reconnected (%02x)\n", selid); } SETPORT(SCSIID, (shpnt->this_id << OID_) | target); SETPORT(SCSISEQ, 0); SETRATE(HOSTDATA(shpnt)->syncrate[target]); RECONN_TARGET=target; } /* * message in phase * - handle initial message after reconnection to identify * reconnecting nexus * - queue command on DISCONNECTED_SC on DISCONNECT message * - set completed flag on COMMAND COMPLETE * (other completition code moved to busfree_run) * - handle response to SDTR * - clear synchronous transfer agreements on BUS RESET * * FIXME: what about SAVE POINTERS, RESTORE POINTERS? * */ static void msgi_run(struct Scsi_Host *shpnt) { for(;;) { struct aha152x_cmd_priv *acp; int sstat1 = GETPORT(SSTAT1); if(sstat1 & (PHASECHG|PHASEMIS|BUSFREE) || !(sstat1 & REQINIT)) return; if (TESTLO(SSTAT0, SPIORDY)) return; ADDMSGI(GETPORT(SCSIDAT)); if(!CURRENT_SC) { if(LASTSTATE!=seldi) { shost_printk(KERN_ERR, shpnt, "message in w/o current command" " not after reselection\n"); } /* * Handle reselection */ if(!(MSGI(0) & IDENTIFY_BASE)) { shost_printk(KERN_ERR, shpnt, "target didn't identify after reselection\n"); continue; } CURRENT_SC = remove_lun_SC(&DISCONNECTED_SC, RECONN_TARGET, MSGI(0) & 0x3f); if (!CURRENT_SC) { show_queues(shpnt); shost_printk(KERN_ERR, shpnt, "no disconnected command" " for target %d/%d\n", RECONN_TARGET, MSGI(0) & 0x3f); continue; } acp = aha152x_priv(CURRENT_SC); acp->message = MSGI(0); acp->phase &= ~disconnected; MSGILEN=0; /* next message if any */ continue; } acp = aha152x_priv(CURRENT_SC); acp->message = MSGI(0); switch (MSGI(0)) { case DISCONNECT: if (!RECONNECT) scmd_printk(KERN_WARNING, CURRENT_SC, "target was not allowed to disconnect\n"); acp->phase |= disconnected; break; case COMMAND_COMPLETE: acp->phase |= completed; break; case MESSAGE_REJECT: if (SYNCNEG==1) { scmd_printk(KERN_INFO, CURRENT_SC, "Synchronous Data Transfer Request" " was rejected\n"); SYNCNEG=2; /* negotiation completed */ } else scmd_printk(KERN_INFO, CURRENT_SC, "inbound message (MESSAGE REJECT)\n"); break; case SAVE_POINTERS: break; case RESTORE_POINTERS: break; case EXTENDED_MESSAGE: if(MSGILEN<2 || MSGILEN<MSGI(1)+2) { /* not yet completed */ continue; } switch (MSGI(2)) { case EXTENDED_SDTR: { long ticks; if (MSGI(1) != 3) { scmd_printk(KERN_ERR, CURRENT_SC, "SDTR message length!=3\n"); break; } if (!HOSTDATA(shpnt)->synchronous) break; printk(INFO_LEAD, CMDINFO(CURRENT_SC)); spi_print_msg(&MSGI(0)); printk("\n"); ticks = (MSGI(3) * 4 + 49) / 50; if (syncneg) { /* negotiation in progress */ if (ticks > 9 || MSGI(4) < 1 || MSGI(4) > 8) { ADDMSGO(MESSAGE_REJECT); scmd_printk(KERN_INFO, CURRENT_SC, "received Synchronous Data Transfer Request invalid - rejected\n"); break; } SYNCRATE |= ((ticks - 2) << 4) + MSGI(4); } else if (ticks <= 9 && MSGI(4) >= 1) { ADDMSGO(EXTENDED_MESSAGE); ADDMSGO(3); ADDMSGO(EXTENDED_SDTR); if (ticks < 4) { ticks = 4; ADDMSGO(50); } else ADDMSGO(MSGI(3)); if (MSGI(4) > 8) MSGI(4) = 8; ADDMSGO(MSGI(4)); SYNCRATE |= ((ticks - 2) << 4) + MSGI(4); } else { /* requested SDTR is too slow, do it asynchronously */ scmd_printk(KERN_INFO, CURRENT_SC, "Synchronous Data Transfer Request too slow - Rejecting\n"); ADDMSGO(MESSAGE_REJECT); } /* negotiation completed */ SYNCNEG=2; SETRATE(SYNCRATE); } break; case BUS_DEVICE_RESET: { int i; for(i=0; i<8; i++) { HOSTDATA(shpnt)->syncrate[i]=0; HOSTDATA(shpnt)->syncneg[i]=0; } } break; case EXTENDED_MODIFY_DATA_POINTER: case EXTENDED_EXTENDED_IDENTIFY: case EXTENDED_WDTR: default: ADDMSGO(MESSAGE_REJECT); break; } break; } MSGILEN=0; } } static void msgi_end(struct Scsi_Host *shpnt) { if(MSGILEN>0) scmd_printk(KERN_WARNING, CURRENT_SC, "target left before message completed (%d)\n", MSGILEN); if (MSGOLEN > 0 && !(GETPORT(SSTAT1) & BUSFREE)) SETPORT(SCSISIG, P_MSGI | SIG_ATNO); } /* * message out phase * */ static void msgo_init(struct Scsi_Host *shpnt) { if(MSGOLEN==0) { if ((aha152x_priv(CURRENT_SC)->phase & syncneg) && SYNCNEG == 2 && SYNCRATE == 0) { ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun)); } else { scmd_printk(KERN_INFO, CURRENT_SC, "unexpected MESSAGE OUT phase; rejecting\n"); ADDMSGO(MESSAGE_REJECT); } } } /* * message out phase * */ static void msgo_run(struct Scsi_Host *shpnt) { struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); while(MSGO_I<MSGOLEN) { if (TESTLO(SSTAT0, SPIORDY)) return; if (MSGO_I==MSGOLEN-1) { /* Leave MESSAGE OUT after transfer */ SETPORT(SSTAT1, CLRATNO); } if (MSGO(MSGO_I) & IDENTIFY_BASE) acp->phase |= identified; if (MSGO(MSGO_I)==ABORT) acp->phase |= aborted; if (MSGO(MSGO_I)==BUS_DEVICE_RESET) acp->phase |= resetted; SETPORT(SCSIDAT, MSGO(MSGO_I++)); } } static void msgo_end(struct Scsi_Host *shpnt) { if(MSGO_I<MSGOLEN) { scmd_printk(KERN_ERR, CURRENT_SC, "message sent incompletely (%d/%d)\n", MSGO_I, MSGOLEN); if(SYNCNEG==1) { scmd_printk(KERN_INFO, CURRENT_SC, "Synchronous Data Transfer Request was rejected\n"); SYNCNEG=2; } } MSGO_I = 0; MSGOLEN = 0; } /* * command phase * */ static void cmd_init(struct Scsi_Host *shpnt) { if (aha152x_priv(CURRENT_SC)->sent_command) { scmd_printk(KERN_ERR, CURRENT_SC, "command already sent\n"); done(shpnt, SAM_STAT_GOOD, DID_ERROR); return; } CMD_I=0; } /* * command phase * */ static void cmd_run(struct Scsi_Host *shpnt) { while(CMD_I<CURRENT_SC->cmd_len) { if (TESTLO(SSTAT0, SPIORDY)) return; SETPORT(SCSIDAT, CURRENT_SC->cmnd[CMD_I++]); } } static void cmd_end(struct Scsi_Host *shpnt) { if(CMD_I<CURRENT_SC->cmd_len) scmd_printk(KERN_ERR, CURRENT_SC, "command sent incompletely (%d/%d)\n", CMD_I, CURRENT_SC->cmd_len); else aha152x_priv(CURRENT_SC)->sent_command++; } /* * status phase * */ static void status_run(struct Scsi_Host *shpnt) { if (TESTLO(SSTAT0, SPIORDY)) return; aha152x_priv(CURRENT_SC)->status = GETPORT(SCSIDAT); } /* * data in phase * */ static void datai_init(struct Scsi_Host *shpnt) { SETPORT(DMACNTRL0, RSTFIFO); SETPORT(DMACNTRL0, RSTFIFO|ENDMA); SETPORT(SXFRCTL0, CH1|CLRSTCNT); SETPORT(SXFRCTL0, CH1|SCSIEN|DMAEN); SETPORT(SIMODE0, 0); SETPORT(SIMODE1, ENSCSIPERR | ENSCSIRST | ENPHASEMIS | ENBUSFREE); DATA_LEN=0; } static void datai_run(struct Scsi_Host *shpnt) { struct aha152x_cmd_priv *acp; unsigned long the_time; int fifodata, data_count; /* * loop while the phase persists or the fifos are not empty * */ while(TESTLO(DMASTAT, INTSTAT) || TESTLO(DMASTAT, DFIFOEMP) || TESTLO(SSTAT2, SEMPTY)) { /* FIXME: maybe this should be done by setting up * STCNT to trigger ENSWRAP interrupt, instead of * polling for DFIFOFULL */ the_time=jiffies + 100*HZ; while(TESTLO(DMASTAT, DFIFOFULL|INTSTAT) && time_before(jiffies,the_time)) barrier(); if(TESTLO(DMASTAT, DFIFOFULL|INTSTAT)) { scmd_printk(KERN_ERR, CURRENT_SC, "datai timeout\n"); break; } if(TESTHI(DMASTAT, DFIFOFULL)) { fifodata = 128; } else { the_time=jiffies + 100*HZ; while(TESTLO(SSTAT2, SEMPTY) && time_before(jiffies,the_time)) barrier(); if(TESTLO(SSTAT2, SEMPTY)) { scmd_printk(KERN_ERR, CURRENT_SC, "datai sempty timeout"); break; } fifodata = GETPORT(FIFOSTAT); } acp = aha152x_priv(CURRENT_SC); if (acp->this_residual > 0) { while (fifodata > 0 && acp->this_residual > 0) { data_count = fifodata > acp->this_residual ? acp->this_residual : fifodata; fifodata -= data_count; if (data_count & 1) { SETPORT(DMACNTRL0, ENDMA|_8BIT); *acp->ptr++ = GETPORT(DATAPORT); acp->this_residual--; DATA_LEN++; SETPORT(DMACNTRL0, ENDMA); } if (data_count > 1) { data_count >>= 1; insw(DATAPORT, acp->ptr, data_count); acp->ptr += 2 * data_count; acp->this_residual -= 2 * data_count; DATA_LEN += 2 * data_count; } if (acp->this_residual == 0 && !sg_is_last(acp->buffer)) { /* advance to next buffer */ acp->buffer = sg_next(acp->buffer); acp->ptr = SG_ADDRESS(acp->buffer); acp->this_residual = acp->buffer->length; } } } else if (fifodata > 0) { scmd_printk(KERN_ERR, CURRENT_SC, "no buffers left for %d(%d) bytes" " (data overrun!?)\n", fifodata, GETPORT(FIFOSTAT)); SETPORT(DMACNTRL0, ENDMA|_8BIT); while(fifodata>0) { GETPORT(DATAPORT); fifodata--; DATA_LEN++; } SETPORT(DMACNTRL0, ENDMA|_8BIT); } } if(TESTLO(DMASTAT, INTSTAT) || TESTLO(DMASTAT, DFIFOEMP) || TESTLO(SSTAT2, SEMPTY) || GETPORT(FIFOSTAT)>0) { /* * something went wrong, if there's something left in the fifos * or the phase didn't change */ scmd_printk(KERN_ERR, CURRENT_SC, "fifos should be empty and phase should have changed\n"); } if(DATA_LEN!=GETSTCNT()) { scmd_printk(KERN_ERR, CURRENT_SC, "manual transfer count differs from automatic " "(count=%d;stcnt=%d;diff=%d;fifostat=%d)", DATA_LEN, GETSTCNT(), GETSTCNT()-DATA_LEN, GETPORT(FIFOSTAT)); mdelay(10000); } } static void datai_end(struct Scsi_Host *shpnt) { CMD_INC_RESID(CURRENT_SC, -GETSTCNT()); SETPORT(SXFRCTL0, CH1|CLRSTCNT); SETPORT(DMACNTRL0, 0); } /* * data out phase * */ static void datao_init(struct Scsi_Host *shpnt) { SETPORT(DMACNTRL0, WRITE_READ | RSTFIFO); SETPORT(DMACNTRL0, WRITE_READ | ENDMA); SETPORT(SXFRCTL0, CH1|CLRSTCNT); SETPORT(SXFRCTL0, CH1|SCSIEN|DMAEN); SETPORT(SIMODE0, 0); SETPORT(SIMODE1, ENSCSIPERR | ENSCSIRST | ENPHASEMIS | ENBUSFREE ); DATA_LEN = scsi_get_resid(CURRENT_SC); } static void datao_run(struct Scsi_Host *shpnt) { struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); unsigned long the_time; int data_count; /* until phase changes or all data sent */ while (TESTLO(DMASTAT, INTSTAT) && acp->this_residual > 0) { data_count = 128; if (data_count > acp->this_residual) data_count = acp->this_residual; if(TESTLO(DMASTAT, DFIFOEMP)) { scmd_printk(KERN_ERR, CURRENT_SC, "datao fifo not empty (%d)", GETPORT(FIFOSTAT)); break; } if(data_count & 1) { SETPORT(DMACNTRL0,WRITE_READ|ENDMA|_8BIT); SETPORT(DATAPORT, *acp->ptr++); acp->this_residual--; CMD_INC_RESID(CURRENT_SC, -1); SETPORT(DMACNTRL0,WRITE_READ|ENDMA); } if(data_count > 1) { data_count >>= 1; outsw(DATAPORT, acp->ptr, data_count); acp->ptr += 2 * data_count; acp->this_residual -= 2 * data_count; CMD_INC_RESID(CURRENT_SC, -2 * data_count); } if (acp->this_residual == 0 && !sg_is_last(acp->buffer)) { /* advance to next buffer */ acp->buffer = sg_next(acp->buffer); acp->ptr = SG_ADDRESS(acp->buffer); acp->this_residual = acp->buffer->length; } the_time=jiffies + 100*HZ; while(TESTLO(DMASTAT, DFIFOEMP|INTSTAT) && time_before(jiffies,the_time)) barrier(); if(TESTLO(DMASTAT, DFIFOEMP|INTSTAT)) { scmd_printk(KERN_ERR, CURRENT_SC, "dataout timeout\n"); break; } } } static void datao_end(struct Scsi_Host *shpnt) { struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); if(TESTLO(DMASTAT, DFIFOEMP)) { u32 datao_cnt = GETSTCNT(); int datao_out = DATA_LEN - scsi_get_resid(CURRENT_SC); int done; struct scatterlist *sg = scsi_sglist(CURRENT_SC); CMD_INC_RESID(CURRENT_SC, datao_out - datao_cnt); done = scsi_bufflen(CURRENT_SC) - scsi_get_resid(CURRENT_SC); /* Locate the first SG entry not yet sent */ while (done > 0 && !sg_is_last(sg)) { if (done < sg->length) break; done -= sg->length; sg = sg_next(sg); } acp->buffer = sg; acp->ptr = SG_ADDRESS(acp->buffer) + done; acp->this_residual = acp->buffer->length - done; } SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT); SETPORT(SXFRCTL0, CH1); SETPORT(DMACNTRL0, 0); } /* * figure out what state we're in * */ static int update_state(struct Scsi_Host *shpnt) { int dataphase=0; unsigned int stat0 = GETPORT(SSTAT0); unsigned int stat1 = GETPORT(SSTAT1); PREVSTATE = STATE; STATE=unknown; if(stat1 & SCSIRSTI) { STATE=rsti; SETPORT(SCSISEQ,0); SETPORT(SSTAT1,SCSIRSTI); } else if (stat0 & SELDI && PREVSTATE == busfree) { STATE=seldi; } else if (stat0 & SELDO && CURRENT_SC && (aha152x_priv(CURRENT_SC)->phase & selecting)) { STATE=seldo; } else if(stat1 & SELTO) { STATE=selto; } else if(stat1 & BUSFREE) { STATE=busfree; SETPORT(SSTAT1,BUSFREE); } else if(stat1 & SCSIPERR) { STATE=parerr; SETPORT(SSTAT1,SCSIPERR); } else if(stat1 & REQINIT) { switch(GETPORT(SCSISIG) & P_MASK) { case P_MSGI: STATE=msgi; break; case P_MSGO: STATE=msgo; break; case P_DATAO: STATE=datao; break; case P_DATAI: STATE=datai; break; case P_STATUS: STATE=status; break; case P_CMD: STATE=cmd; break; } dataphase=1; } if((stat0 & SELDI) && STATE!=seldi && !dataphase) { scmd_printk(KERN_INFO, CURRENT_SC, "reselection missed?"); } if(STATE!=PREVSTATE) { LASTSTATE=PREVSTATE; } return dataphase; } /* * handle parity error * * FIXME: in which phase? * */ static void parerr_run(struct Scsi_Host *shpnt) { scmd_printk(KERN_ERR, CURRENT_SC, "parity error\n"); done(shpnt, SAM_STAT_GOOD, DID_PARITY); } /* * handle reset in * */ static void rsti_run(struct Scsi_Host *shpnt) { struct scsi_cmnd *ptr; shost_printk(KERN_NOTICE, shpnt, "scsi reset in\n"); ptr=DISCONNECTED_SC; while(ptr) { struct scsi_cmnd *next = SCNEXT(ptr); if (!ptr->device->soft_reset) { remove_SC(&DISCONNECTED_SC, ptr); kfree(ptr->host_scribble); ptr->host_scribble=NULL; set_host_byte(ptr, DID_RESET); aha152x_scsi_done(ptr); } ptr = next; } if(CURRENT_SC && !CURRENT_SC->device->soft_reset) done(shpnt, SAM_STAT_GOOD, DID_RESET); } /* * bottom-half handler * */ static void is_complete(struct Scsi_Host *shpnt) { int dataphase; unsigned long flags; int pending; if(!shpnt) return; DO_LOCK(flags); if( HOSTDATA(shpnt)->service==0 ) { DO_UNLOCK(flags); return; } HOSTDATA(shpnt)->service = 0; if(HOSTDATA(shpnt)->in_intr) { DO_UNLOCK(flags); /* aha152x_error never returns.. */ aha152x_error(shpnt, "bottom-half already running!?"); } HOSTDATA(shpnt)->in_intr++; /* * loop while there are interrupt conditions pending * */ do { unsigned long start = jiffies; DO_UNLOCK(flags); dataphase=update_state(shpnt); /* * end previous state * */ if(PREVSTATE!=STATE && states[PREVSTATE].end) states[PREVSTATE].end(shpnt); /* * disable SPIO mode if previous phase used it * and this one doesn't * */ if(states[PREVSTATE].spio && !states[STATE].spio) { SETPORT(SXFRCTL0, CH1); SETPORT(DMACNTRL0, 0); if(CURRENT_SC) aha152x_priv(CURRENT_SC)->phase &= ~spiordy; } /* * accept current dataphase phase * */ if(dataphase) { SETPORT(SSTAT0, REQINIT); SETPORT(SCSISIG, GETPORT(SCSISIG) & P_MASK); SETPORT(SSTAT1, PHASECHG); } /* * enable SPIO mode if previous didn't use it * and this one does * */ if(!states[PREVSTATE].spio && states[STATE].spio) { SETPORT(DMACNTRL0, 0); SETPORT(SXFRCTL0, CH1|SPIOEN); if(CURRENT_SC) aha152x_priv(CURRENT_SC)->phase |= spiordy; } /* * initialize for new state * */ if(PREVSTATE!=STATE && states[STATE].init) states[STATE].init(shpnt); /* * handle current state * */ if(states[STATE].run) states[STATE].run(shpnt); else scmd_printk(KERN_ERR, CURRENT_SC, "unexpected state (%x)\n", STATE); /* * setup controller to interrupt on * the next expected condition and * loop if it's already there * */ DO_LOCK(flags); pending=setup_expected_interrupts(shpnt); #if defined(AHA152X_STAT) HOSTDATA(shpnt)->count[STATE]++; if(PREVSTATE!=STATE) HOSTDATA(shpnt)->count_trans[STATE]++; HOSTDATA(shpnt)->time[STATE] += jiffies-start; #endif } while(pending); /* * enable interrupts and leave bottom-half * */ HOSTDATA(shpnt)->in_intr--; SETBITS(DMACNTRL0, INTEN); DO_UNLOCK(flags); } /* * Dump the current driver status and panic */ static void aha152x_error(struct Scsi_Host *shpnt, char *msg) { shost_printk(KERN_EMERG, shpnt, "%s\n", msg); show_queues(shpnt); panic("aha152x panic\n"); } /* * display enabled interrupts */ static void disp_enintr(struct Scsi_Host *shpnt) { int s0, s1; s0 = GETPORT(SIMODE0); s1 = GETPORT(SIMODE1); shost_printk(KERN_DEBUG, shpnt, "enabled interrupts (%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n", (s0 & ENSELDO) ? "ENSELDO " : "", (s0 & ENSELDI) ? "ENSELDI " : "", (s0 & ENSELINGO) ? "ENSELINGO " : "", (s0 & ENSWRAP) ? "ENSWRAP " : "", (s0 & ENSDONE) ? "ENSDONE " : "", (s0 & ENSPIORDY) ? "ENSPIORDY " : "", (s0 & ENDMADONE) ? "ENDMADONE " : "", (s1 & ENSELTIMO) ? "ENSELTIMO " : "", (s1 & ENATNTARG) ? "ENATNTARG " : "", (s1 & ENPHASEMIS) ? "ENPHASEMIS " : "", (s1 & ENBUSFREE) ? "ENBUSFREE " : "", (s1 & ENSCSIPERR) ? "ENSCSIPERR " : "", (s1 & ENPHASECHG) ? "ENPHASECHG " : "", (s1 & ENREQINIT) ? "ENREQINIT " : ""); } /* * Show the command data of a command */ static void show_command(struct scsi_cmnd *ptr) { const int phase = aha152x_priv(ptr)->phase; scsi_print_command(ptr); scmd_printk(KERN_DEBUG, ptr, "request_bufflen=%d; resid=%d; " "phase |%s%s%s%s%s%s%s%s%s; next=0x%p", scsi_bufflen(ptr), scsi_get_resid(ptr), phase & not_issued ? "not issued|" : "", phase & selecting ? "selecting|" : "", phase & identified ? "identified|" : "", phase & disconnected ? "disconnected|" : "", phase & completed ? "completed|" : "", phase & spiordy ? "spiordy|" : "", phase & syncneg ? "syncneg|" : "", phase & aborted ? "aborted|" : "", phase & resetted ? "resetted|" : "", SCDATA(ptr) ? SCNEXT(ptr) : NULL); } /* * Dump the queued data */ static void show_queues(struct Scsi_Host *shpnt) { struct scsi_cmnd *ptr; unsigned long flags; DO_LOCK(flags); printk(KERN_DEBUG "\nqueue status:\nissue_SC:\n"); for (ptr = ISSUE_SC; ptr; ptr = SCNEXT(ptr)) show_command(ptr); DO_UNLOCK(flags); printk(KERN_DEBUG "current_SC:\n"); if (CURRENT_SC) show_command(CURRENT_SC); else printk(KERN_DEBUG "none\n"); printk(KERN_DEBUG "disconnected_SC:\n"); for (ptr = DISCONNECTED_SC; ptr; ptr = SCDATA(ptr) ? SCNEXT(ptr) : NULL) show_command(ptr); disp_enintr(shpnt); } static void get_command(struct seq_file *m, struct scsi_cmnd * ptr) { struct aha152x_cmd_priv *acp = aha152x_priv(ptr); const int phase = acp->phase; int i; seq_printf(m, "%p: target=%d; lun=%d; cmnd=( ", ptr, ptr->device->id, (u8)ptr->device->lun); for (i = 0; i < COMMAND_SIZE(ptr->cmnd[0]); i++) seq_printf(m, "0x%02x ", ptr->cmnd[i]); seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |", scsi_get_resid(ptr), acp->this_residual, sg_nents(acp->buffer) - 1); if (phase & not_issued) seq_puts(m, "not issued|"); if (phase & selecting) seq_puts(m, "selecting|"); if (phase & disconnected) seq_puts(m, "disconnected|"); if (phase & aborted) seq_puts(m, "aborted|"); if (phase & identified) seq_puts(m, "identified|"); if (phase & completed) seq_puts(m, "completed|"); if (phase & spiordy) seq_puts(m, "spiordy|"); if (phase & syncneg) seq_puts(m, "syncneg|"); seq_printf(m, "; next=0x%p\n", SCNEXT(ptr)); } static void get_ports(struct seq_file *m, struct Scsi_Host *shpnt) { int s; seq_printf(m, "\n%s: %s(%s) ", CURRENT_SC ? "on bus" : "waiting", states[STATE].name, states[PREVSTATE].name); s = GETPORT(SCSISEQ); seq_puts(m, "SCSISEQ( "); if (s & TEMODEO) seq_puts(m, "TARGET MODE "); if (s & ENSELO) seq_puts(m, "SELO "); if (s & ENSELI) seq_puts(m, "SELI "); if (s & ENRESELI) seq_puts(m, "RESELI "); if (s & ENAUTOATNO) seq_puts(m, "AUTOATNO "); if (s & ENAUTOATNI) seq_puts(m, "AUTOATNI "); if (s & ENAUTOATNP) seq_puts(m, "AUTOATNP "); if (s & SCSIRSTO) seq_puts(m, "SCSIRSTO "); seq_puts(m, ");"); seq_puts(m, " SCSISIG("); s = GETPORT(SCSISIG); switch (s & P_MASK) { case P_DATAO: seq_puts(m, "DATA OUT"); break; case P_DATAI: seq_puts(m, "DATA IN"); break; case P_CMD: seq_puts(m, "COMMAND"); break; case P_STATUS: seq_puts(m, "STATUS"); break; case P_MSGO: seq_puts(m, "MESSAGE OUT"); break; case P_MSGI: seq_puts(m, "MESSAGE IN"); break; default: seq_puts(m, "*invalid*"); break; } seq_puts(m, "); "); seq_printf(m, "INTSTAT (%s); ", TESTHI(DMASTAT, INTSTAT) ? "hi" : "lo"); seq_puts(m, "SSTAT( "); s = GETPORT(SSTAT0); if (s & TARGET) seq_puts(m, "TARGET "); if (s & SELDO) seq_puts(m, "SELDO "); if (s & SELDI) seq_puts(m, "SELDI "); if (s & SELINGO) seq_puts(m, "SELINGO "); if (s & SWRAP) seq_puts(m, "SWRAP "); if (s & SDONE) seq_puts(m, "SDONE "); if (s & SPIORDY) seq_puts(m, "SPIORDY "); if (s & DMADONE) seq_puts(m, "DMADONE "); s = GETPORT(SSTAT1); if (s & SELTO) seq_puts(m, "SELTO "); if (s & ATNTARG) seq_puts(m, "ATNTARG "); if (s & SCSIRSTI) seq_puts(m, "SCSIRSTI "); if (s & PHASEMIS) seq_puts(m, "PHASEMIS "); if (s & BUSFREE) seq_puts(m, "BUSFREE "); if (s & SCSIPERR) seq_puts(m, "SCSIPERR "); if (s & PHASECHG) seq_puts(m, "PHASECHG "); if (s & REQINIT) seq_puts(m, "REQINIT "); seq_puts(m, "); "); seq_puts(m, "SSTAT( "); s = GETPORT(SSTAT0) & GETPORT(SIMODE0); if (s & TARGET) seq_puts(m, "TARGET "); if (s & SELDO) seq_puts(m, "SELDO "); if (s & SELDI) seq_puts(m, "SELDI "); if (s & SELINGO) seq_puts(m, "SELINGO "); if (s & SWRAP) seq_puts(m, "SWRAP "); if (s & SDONE) seq_puts(m, "SDONE "); if (s & SPIORDY) seq_puts(m, "SPIORDY "); if (s & DMADONE) seq_puts(m, "DMADONE "); s = GETPORT(SSTAT1) & GETPORT(SIMODE1); if (s & SELTO) seq_puts(m, "SELTO "); if (s & ATNTARG) seq_puts(m, "ATNTARG "); if (s & SCSIRSTI) seq_puts(m, "SCSIRSTI "); if (s & PHASEMIS) seq_puts(m, "PHASEMIS "); if (s & BUSFREE) seq_puts(m, "BUSFREE "); if (s & SCSIPERR) seq_puts(m, "SCSIPERR "); if (s & PHASECHG) seq_puts(m, "PHASECHG "); if (s & REQINIT) seq_puts(m, "REQINIT "); seq_puts(m, "); "); seq_puts(m, "SXFRCTL0( "); s = GETPORT(SXFRCTL0); if (s & SCSIEN) seq_puts(m, "SCSIEN "); if (s & DMAEN) seq_puts(m, "DMAEN "); if (s & CH1) seq_puts(m, "CH1 "); if (s & CLRSTCNT) seq_puts(m, "CLRSTCNT "); if (s & SPIOEN) seq_puts(m, "SPIOEN "); if (s & CLRCH1) seq_puts(m, "CLRCH1 "); seq_puts(m, "); "); seq_puts(m, "SIGNAL( "); s = GETPORT(SCSISIG); if (s & SIG_ATNI) seq_puts(m, "ATNI "); if (s & SIG_SELI) seq_puts(m, "SELI "); if (s & SIG_BSYI) seq_puts(m, "BSYI "); if (s & SIG_REQI) seq_puts(m, "REQI "); if (s & SIG_ACKI) seq_puts(m, "ACKI "); seq_puts(m, "); "); seq_printf(m, "SELID(%02x), ", GETPORT(SELID)); seq_printf(m, "STCNT(%d), ", GETSTCNT()); seq_puts(m, "SSTAT2( "); s = GETPORT(SSTAT2); if (s & SOFFSET) seq_puts(m, "SOFFSET "); if (s & SEMPTY) seq_puts(m, "SEMPTY "); if (s & SFULL) seq_puts(m, "SFULL "); seq_printf(m, "); SFCNT (%d); ", s & (SFULL | SFCNT)); s = GETPORT(SSTAT3); seq_printf(m, "SCSICNT (%d), OFFCNT(%d), ", (s & 0xf0) >> 4, s & 0x0f); seq_puts(m, "SSTAT4( "); s = GETPORT(SSTAT4); if (s & SYNCERR) seq_puts(m, "SYNCERR "); if (s & FWERR) seq_puts(m, "FWERR "); if (s & FRERR) seq_puts(m, "FRERR "); seq_puts(m, "); "); seq_puts(m, "DMACNTRL0( "); s = GETPORT(DMACNTRL0); seq_printf(m, "%s ", s & _8BIT ? "8BIT" : "16BIT"); seq_printf(m, "%s ", s & DMA ? "DMA" : "PIO"); seq_printf(m, "%s ", s & WRITE_READ ? "WRITE" : "READ"); if (s & ENDMA) seq_puts(m, "ENDMA "); if (s & INTEN) seq_puts(m, "INTEN "); if (s & RSTFIFO) seq_puts(m, "RSTFIFO "); if (s & SWINT) seq_puts(m, "SWINT "); seq_puts(m, "); "); seq_puts(m, "DMASTAT( "); s = GETPORT(DMASTAT); if (s & ATDONE) seq_puts(m, "ATDONE "); if (s & WORDRDY) seq_puts(m, "WORDRDY "); if (s & DFIFOFULL) seq_puts(m, "DFIFOFULL "); if (s & DFIFOEMP) seq_puts(m, "DFIFOEMP "); seq_puts(m, ")\n"); seq_puts(m, "enabled interrupts( "); s = GETPORT(SIMODE0); if (s & ENSELDO) seq_puts(m, "ENSELDO "); if (s & ENSELDI) seq_puts(m, "ENSELDI "); if (s & ENSELINGO) seq_puts(m, "ENSELINGO "); if (s & ENSWRAP) seq_puts(m, "ENSWRAP "); if (s & ENSDONE) seq_puts(m, "ENSDONE "); if (s & ENSPIORDY) seq_puts(m, "ENSPIORDY "); if (s & ENDMADONE) seq_puts(m, "ENDMADONE "); s = GETPORT(SIMODE1); if (s & ENSELTIMO) seq_puts(m, "ENSELTIMO "); if (s & ENATNTARG) seq_puts(m, "ENATNTARG "); if (s & ENPHASEMIS) seq_puts(m, "ENPHASEMIS "); if (s & ENBUSFREE) seq_puts(m, "ENBUSFREE "); if (s & ENSCSIPERR) seq_puts(m, "ENSCSIPERR "); if (s & ENPHASECHG) seq_puts(m, "ENPHASECHG "); if (s & ENREQINIT) seq_puts(m, "ENREQINIT "); seq_puts(m, ")\n"); } static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length) { if(!shpnt || !buffer || length<8 || strncmp("aha152x ", buffer, 8)!=0) return -EINVAL; #if defined(AHA152X_STAT) if(length>13 && strncmp("reset", buffer+8, 5)==0) { int i; HOSTDATA(shpnt)->total_commands=0; HOSTDATA(shpnt)->disconnections=0; HOSTDATA(shpnt)->busfree_without_any_action=0; HOSTDATA(shpnt)->busfree_without_old_command=0; HOSTDATA(shpnt)->busfree_without_new_command=0; HOSTDATA(shpnt)->busfree_without_done_command=0; HOSTDATA(shpnt)->busfree_with_check_condition=0; for (i = idle; i<maxstate; i++) { HOSTDATA(shpnt)->count[i]=0; HOSTDATA(shpnt)->count_trans[i]=0; HOSTDATA(shpnt)->time[i]=0; } shost_printk(KERN_INFO, shpnt, "aha152x: stats reset.\n"); } else #endif { return -EINVAL; } return length; } static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt) { int i; struct scsi_cmnd *ptr; unsigned long flags; seq_puts(m, AHA152X_REVID "\n"); seq_printf(m, "ioports 0x%04lx to 0x%04lx\n", shpnt->io_port, shpnt->io_port + shpnt->n_io_port - 1); seq_printf(m, "interrupt 0x%02x\n", shpnt->irq); seq_printf(m, "disconnection/reconnection %s\n", RECONNECT ? "enabled" : "disabled"); seq_printf(m, "parity checking %s\n", PARITY ? "enabled" : "disabled"); seq_printf(m, "synchronous transfers %s\n", SYNCHRONOUS ? "enabled" : "disabled"); seq_printf(m, "%d commands currently queued\n", HOSTDATA(shpnt)->commands); if(SYNCHRONOUS) { seq_puts(m, "synchronously operating targets (tick=50 ns):\n"); for (i = 0; i < 8; i++) if (HOSTDATA(shpnt)->syncrate[i] & 0x7f) seq_printf(m, "target %d: period %dT/%dns; req/ack offset %d\n", i, (((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2), (((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2) * 50, HOSTDATA(shpnt)->syncrate[i] & 0x0f); } seq_puts(m, "\nqueue status:\n"); DO_LOCK(flags); if (ISSUE_SC) { seq_puts(m, "not yet issued commands:\n"); for (ptr = ISSUE_SC; ptr; ptr = SCNEXT(ptr)) get_command(m, ptr); } else seq_puts(m, "no not yet issued commands\n"); DO_UNLOCK(flags); if (CURRENT_SC) { seq_puts(m, "current command:\n"); get_command(m, CURRENT_SC); } else seq_puts(m, "no current command\n"); if (DISCONNECTED_SC) { seq_puts(m, "disconnected commands:\n"); for (ptr = DISCONNECTED_SC; ptr; ptr = SCNEXT(ptr)) get_command(m, ptr); } else seq_puts(m, "no disconnected commands\n"); get_ports(m, shpnt); #if defined(AHA152X_STAT) seq_printf(m, "statistics:\n" "total commands: %d\n" "disconnections: %d\n" "busfree with check condition: %d\n" "busfree without old command: %d\n" "busfree without new command: %d\n" "busfree without done command: %d\n" "busfree without any action: %d\n" "state " "transitions " "count " "time\n", HOSTDATA(shpnt)->total_commands, HOSTDATA(shpnt)->disconnections, HOSTDATA(shpnt)->busfree_with_check_condition, HOSTDATA(shpnt)->busfree_without_old_command, HOSTDATA(shpnt)->busfree_without_new_command, HOSTDATA(shpnt)->busfree_without_done_command, HOSTDATA(shpnt)->busfree_without_any_action); for(i=0; i<maxstate; i++) { seq_printf(m, "%-10s %-12d %-12d %-12ld\n", states[i].name, HOSTDATA(shpnt)->count_trans[i], HOSTDATA(shpnt)->count[i], HOSTDATA(shpnt)->time[i]); } #endif return 0; } static int aha152x_adjust_queue(struct scsi_device *device) { blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH); return 0; } static const struct scsi_host_template aha152x_driver_template = { .module = THIS_MODULE, .name = AHA152X_REVID, .proc_name = "aha152x", .show_info = aha152x_show_info, .write_info = aha152x_set_info, .queuecommand = aha152x_queue, .eh_abort_handler = aha152x_abort, .eh_device_reset_handler = aha152x_device_reset, .eh_bus_reset_handler = aha152x_bus_reset, .bios_param = aha152x_biosparam, .can_queue = 1, .this_id = 7, .sg_tablesize = SG_ALL, .dma_boundary = PAGE_SIZE - 1, .slave_alloc = aha152x_adjust_queue, .cmd_size = sizeof(struct aha152x_cmd_priv), }; #if !defined(AHA152X_PCMCIA) static int setup_count; static struct aha152x_setup setup[2]; /* possible i/o addresses for the AIC-6260; default first */ static unsigned short ports[] = { 0x340, 0x140 }; #if !defined(SKIP_BIOSTEST) /* possible locations for the Adaptec BIOS; defaults first */ static unsigned int addresses[] = { 0xdc000, /* default first */ 0xc8000, 0xcc000, 0xd0000, 0xd4000, 0xd8000, 0xe0000, 0xeb800, /* VTech Platinum SMP */ 0xf0000, }; /* signatures for various AIC-6[23]60 based controllers. The point in detecting signatures is to avoid useless and maybe harmful probes on ports. I'm not sure that all listed boards pass auto-configuration. For those which fail the BIOS signature is obsolete, because user intervention to supply the configuration is needed anyway. May be an information whether or not the BIOS supports extended translation could be also useful here. */ static struct signature { unsigned char *signature; int sig_offset; int sig_length; } signatures[] = { { "Adaptec AHA-1520 BIOS", 0x102e, 21 }, /* Adaptec 152x */ { "Adaptec AHA-1520B", 0x000b, 17 }, /* Adaptec 152x rev B */ { "Adaptec AHA-1520B", 0x0026, 17 }, /* Iomega Jaz Jet ISA (AIC6370Q) */ { "Adaptec ASW-B626 BIOS", 0x1029, 21 }, /* on-board controller */ { "Adaptec BIOS: ASW-B626", 0x000f, 22 }, /* on-board controller */ { "Adaptec ASW-B626 S2", 0x2e6c, 19 }, /* on-board controller */ { "Adaptec BIOS:AIC-6360", 0x000c, 21 }, /* on-board controller */ { "ScsiPro SP-360 BIOS", 0x2873, 19 }, /* ScsiPro-Controller */ { "GA-400 LOCAL BUS SCSI BIOS", 0x102e, 26 }, /* Gigabyte Local-Bus-SCSI */ { "Adaptec BIOS:AVA-282X", 0x000c, 21 }, /* Adaptec 282x */ { "Adaptec IBM Dock II SCSI", 0x2edd, 24 }, /* IBM Thinkpad Dock II */ { "Adaptec BIOS:AHA-1532P", 0x001c, 22 }, /* IBM Thinkpad Dock II SCSI */ { "DTC3520A Host Adapter BIOS", 0x318a, 26 }, /* DTC 3520A ISA SCSI */ }; #endif /* !SKIP_BIOSTEST */ /* * Test, if port_base is valid. * */ static int aha152x_porttest(int io_port) { int i; SETPORT(io_port + O_DMACNTRL1, 0); /* reset stack pointer */ for (i = 0; i < 16; i++) SETPORT(io_port + O_STACK, i); SETPORT(io_port + O_DMACNTRL1, 0); /* reset stack pointer */ for (i = 0; i < 16 && GETPORT(io_port + O_STACK) == i; i++) ; return (i == 16); } static int tc1550_porttest(int io_port) { int i; SETPORT(io_port + O_TC_DMACNTRL1, 0); /* reset stack pointer */ for (i = 0; i < 16; i++) SETPORT(io_port + O_STACK, i); SETPORT(io_port + O_TC_DMACNTRL1, 0); /* reset stack pointer */ for (i = 0; i < 16 && GETPORT(io_port + O_TC_STACK) == i; i++) ; return (i == 16); } static int checksetup(struct aha152x_setup *setup) { int i; for (i = 0; i < ARRAY_SIZE(ports) && (setup->io_port != ports[i]); i++) ; if (i == ARRAY_SIZE(ports)) return 0; if (!request_region(setup->io_port, IO_RANGE, "aha152x")) { printk(KERN_ERR "aha152x: io port 0x%x busy.\n", setup->io_port); return 0; } if( aha152x_porttest(setup->io_port) ) { setup->tc1550=0; } else if( tc1550_porttest(setup->io_port) ) { setup->tc1550=1; } else { release_region(setup->io_port, IO_RANGE); return 0; } release_region(setup->io_port, IO_RANGE); if ((setup->irq < IRQ_MIN) || (setup->irq > IRQ_MAX)) return 0; if ((setup->scsiid < 0) || (setup->scsiid > 7)) return 0; if ((setup->reconnect < 0) || (setup->reconnect > 1)) return 0; if ((setup->parity < 0) || (setup->parity > 1)) return 0; if ((setup->synchronous < 0) || (setup->synchronous > 1)) return 0; if ((setup->ext_trans < 0) || (setup->ext_trans > 1)) return 0; return 1; } static int __init aha152x_init(void) { int i, j, ok; #if defined(AUTOCONF) aha152x_config conf; #endif #ifdef __ISAPNP__ struct pnp_dev *dev=NULL, *pnpdev[2] = {NULL, NULL}; #endif if ( setup_count ) { printk(KERN_INFO "aha152x: processing commandline: "); for (i = 0; i<setup_count; i++) { if (!checksetup(&setup[i])) { printk(KERN_ERR "\naha152x: %s\n", setup[i].conf); printk(KERN_ERR "aha152x: invalid line\n"); } } printk("ok\n"); } #if defined(SETUP0) if (setup_count < ARRAY_SIZE(setup)) { struct aha152x_setup override = SETUP0; if (setup_count == 0 || (override.io_port != setup[0].io_port)) { if (!checksetup(&override)) { printk(KERN_ERR "\naha152x: invalid override SETUP0={0x%x,%d,%d,%d,%d,%d,%d,%d}\n", override.io_port, override.irq, override.scsiid, override.reconnect, override.parity, override.synchronous, override.delay, override.ext_trans); } else setup[setup_count++] = override; } } #endif #if defined(SETUP1) if (setup_count < ARRAY_SIZE(setup)) { struct aha152x_setup override = SETUP1; if (setup_count == 0 || (override.io_port != setup[0].io_port)) { if (!checksetup(&override)) { printk(KERN_ERR "\naha152x: invalid override SETUP1={0x%x,%d,%d,%d,%d,%d,%d,%d}\n", override.io_port, override.irq, override.scsiid, override.reconnect, override.parity, override.synchronous, override.delay, override.ext_trans); } else setup[setup_count++] = override; } } #endif #if defined(MODULE) if (setup_count<ARRAY_SIZE(setup) && (aha152x[0]!=0 || io[0]!=0 || irq[0]!=0)) { if(aha152x[0]!=0) { setup[setup_count].conf = ""; setup[setup_count].io_port = aha152x[0]; setup[setup_count].irq = aha152x[1]; setup[setup_count].scsiid = aha152x[2]; setup[setup_count].reconnect = aha152x[3]; setup[setup_count].parity = aha152x[4]; setup[setup_count].synchronous = aha152x[5]; setup[setup_count].delay = aha152x[6]; setup[setup_count].ext_trans = aha152x[7]; } else if (io[0] != 0 || irq[0] != 0) { if(io[0]!=0) setup[setup_count].io_port = io[0]; if(irq[0]!=0) setup[setup_count].irq = irq[0]; setup[setup_count].scsiid = scsiid[0]; setup[setup_count].reconnect = reconnect[0]; setup[setup_count].parity = parity[0]; setup[setup_count].synchronous = sync[0]; setup[setup_count].delay = delay[0]; setup[setup_count].ext_trans = exttrans[0]; } if (checksetup(&setup[setup_count])) setup_count++; else printk(KERN_ERR "aha152x: invalid module params io=0x%x, irq=%d,scsiid=%d,reconnect=%d,parity=%d,sync=%d,delay=%d,exttrans=%d\n", setup[setup_count].io_port, setup[setup_count].irq, setup[setup_count].scsiid, setup[setup_count].reconnect, setup[setup_count].parity, setup[setup_count].synchronous, setup[setup_count].delay, setup[setup_count].ext_trans); } if (setup_count<ARRAY_SIZE(setup) && (aha152x1[0]!=0 || io[1]!=0 || irq[1]!=0)) { if(aha152x1[0]!=0) { setup[setup_count].conf = ""; setup[setup_count].io_port = aha152x1[0]; setup[setup_count].irq = aha152x1[1]; setup[setup_count].scsiid = aha152x1[2]; setup[setup_count].reconnect = aha152x1[3]; setup[setup_count].parity = aha152x1[4]; setup[setup_count].synchronous = aha152x1[5]; setup[setup_count].delay = aha152x1[6]; setup[setup_count].ext_trans = aha152x1[7]; } else if (io[1] != 0 || irq[1] != 0) { if(io[1]!=0) setup[setup_count].io_port = io[1]; if(irq[1]!=0) setup[setup_count].irq = irq[1]; setup[setup_count].scsiid = scsiid[1]; setup[setup_count].reconnect = reconnect[1]; setup[setup_count].parity = parity[1]; setup[setup_count].synchronous = sync[1]; setup[setup_count].delay = delay[1]; setup[setup_count].ext_trans = exttrans[1]; } if (checksetup(&setup[setup_count])) setup_count++; else printk(KERN_ERR "aha152x: invalid module params io=0x%x, irq=%d,scsiid=%d,reconnect=%d,parity=%d,sync=%d,delay=%d,exttrans=%d\n", setup[setup_count].io_port, setup[setup_count].irq, setup[setup_count].scsiid, setup[setup_count].reconnect, setup[setup_count].parity, setup[setup_count].synchronous, setup[setup_count].delay, setup[setup_count].ext_trans); } #endif #ifdef __ISAPNP__ for(i=0; setup_count<ARRAY_SIZE(setup) && id_table[i].vendor; i++) { while ( setup_count<ARRAY_SIZE(setup) && (dev=pnp_find_dev(NULL, id_table[i].vendor, id_table[i].function, dev)) ) { if (pnp_device_attach(dev) < 0) continue; if (pnp_activate_dev(dev) < 0) { pnp_device_detach(dev); continue; } if (!pnp_port_valid(dev, 0)) { pnp_device_detach(dev); continue; } if (setup_count==1 && pnp_port_start(dev, 0)==setup[0].io_port) { pnp_device_detach(dev); continue; } setup[setup_count].io_port = pnp_port_start(dev, 0); setup[setup_count].irq = pnp_irq(dev, 0); setup[setup_count].scsiid = 7; setup[setup_count].reconnect = 1; setup[setup_count].parity = 1; setup[setup_count].synchronous = 1; setup[setup_count].delay = DELAY_DEFAULT; setup[setup_count].ext_trans = 0; #if defined(__ISAPNP__) pnpdev[setup_count] = dev; #endif printk (KERN_INFO "aha152x: found ISAPnP adapter at io=0x%03x, irq=%d\n", setup[setup_count].io_port, setup[setup_count].irq); setup_count++; } } #endif #if defined(AUTOCONF) if (setup_count<ARRAY_SIZE(setup)) { #if !defined(SKIP_BIOSTEST) ok = 0; for (i = 0; i < ARRAY_SIZE(addresses) && !ok; i++) { void __iomem *p = ioremap(addresses[i], 0x4000); if (!p) continue; for (j = 0; j<ARRAY_SIZE(signatures) && !ok; j++) ok = check_signature(p + signatures[j].sig_offset, signatures[j].signature, signatures[j].sig_length); iounmap(p); } if (!ok && setup_count == 0) return -ENODEV; printk(KERN_INFO "aha152x: BIOS test: passed, "); #else printk(KERN_INFO "aha152x: "); #endif /* !SKIP_BIOSTEST */ ok = 0; for (i = 0; i < ARRAY_SIZE(ports) && setup_count < 2; i++) { if ((setup_count == 1) && (setup[0].io_port == ports[i])) continue; if (!request_region(ports[i], IO_RANGE, "aha152x")) { printk(KERN_ERR "aha152x: io port 0x%x busy.\n", ports[i]); continue; } if (aha152x_porttest(ports[i])) { setup[setup_count].tc1550 = 0; conf.cf_port = (GETPORT(ports[i] + O_PORTA) << 8) + GETPORT(ports[i] + O_PORTB); } else if (tc1550_porttest(ports[i])) { setup[setup_count].tc1550 = 1; conf.cf_port = (GETPORT(ports[i] + O_TC_PORTA) << 8) + GETPORT(ports[i] + O_TC_PORTB); } else { release_region(ports[i], IO_RANGE); continue; } release_region(ports[i], IO_RANGE); ok++; setup[setup_count].io_port = ports[i]; setup[setup_count].irq = IRQ_MIN + conf.cf_irq; setup[setup_count].scsiid = conf.cf_id; setup[setup_count].reconnect = conf.cf_tardisc; setup[setup_count].parity = !conf.cf_parity; setup[setup_count].synchronous = conf.cf_syncneg; setup[setup_count].delay = DELAY_DEFAULT; setup[setup_count].ext_trans = 0; setup_count++; } if (ok) printk("auto configuration: ok, "); } #endif printk("%d controller(s) configured\n", setup_count); for (i=0; i<setup_count; i++) { if ( request_region(setup[i].io_port, IO_RANGE, "aha152x") ) { struct Scsi_Host *shpnt = aha152x_probe_one(&setup[i]); if( !shpnt ) { release_region(setup[i].io_port, IO_RANGE); #if defined(__ISAPNP__) } else if( pnpdev[i] ) { HOSTDATA(shpnt)->pnpdev=pnpdev[i]; pnpdev[i]=NULL; #endif } } else { printk(KERN_ERR "aha152x: io port 0x%x busy.\n", setup[i].io_port); } #if defined(__ISAPNP__) if( pnpdev[i] ) pnp_device_detach(pnpdev[i]); #endif } return 0; } static void __exit aha152x_exit(void) { struct aha152x_hostdata *hd, *tmp; list_for_each_entry_safe(hd, tmp, &aha152x_host_list, host_list) { struct Scsi_Host *shost = container_of((void *)hd, struct Scsi_Host, hostdata); aha152x_release(shost); } } module_init(aha152x_init); module_exit(aha152x_exit); #if !defined(MODULE) static int __init aha152x_setup(char *str) { int ints[10]; get_options(str, ARRAY_SIZE(ints), ints); if(setup_count>=ARRAY_SIZE(setup)) { printk(KERN_ERR "aha152x: you can only configure up to two controllers\n"); return 1; } setup[setup_count].conf = str; setup[setup_count].io_port = ints[0] >= 1 ? ints[1] : 0x340; setup[setup_count].irq = ints[0] >= 2 ? ints[2] : 11; setup[setup_count].scsiid = ints[0] >= 3 ? ints[3] : 7; setup[setup_count].reconnect = ints[0] >= 4 ? ints[4] : 1; setup[setup_count].parity = ints[0] >= 5 ? ints[5] : 1; setup[setup_count].synchronous = ints[0] >= 6 ? ints[6] : 1; setup[setup_count].delay = ints[0] >= 7 ? ints[7] : DELAY_DEFAULT; setup[setup_count].ext_trans = ints[0] >= 8 ? ints[8] : 0; if (ints[0] > 8) printk(KERN_NOTICE "aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>" "[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>[,<EXT_TRANS>]]]]]]]\n"); else setup_count++; return 1; } __setup("aha152x=", aha152x_setup); #endif #endif /* !AHA152X_PCMCIA */
linux-master
drivers/scsi/aha152x.c
// SPDX-License-Identifier: GPL-2.0-only /* * SCSI Enclosure Services * * Copyright (C) 2008 James Bottomley <[email protected]> */ #include <linux/slab.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/enclosure.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_sas.h> struct ses_device { unsigned char *page1; unsigned char *page1_types; unsigned char *page2; unsigned char *page10; short page1_len; short page1_num_types; short page2_len; short page10_len; }; struct ses_component { u64 addr; }; static bool ses_page2_supported(struct enclosure_device *edev) { struct ses_device *ses_dev = edev->scratch; return (ses_dev->page2 != NULL); } static int ses_probe(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); int err = -ENODEV; if (sdev->type != TYPE_ENCLOSURE) goto out; err = 0; sdev_printk(KERN_NOTICE, sdev, "Attached Enclosure device\n"); out: return err; } #define SES_TIMEOUT (30 * HZ) #define SES_RETRIES 3 static void init_device_slot_control(unsigned char *dest_desc, struct enclosure_component *ecomp, unsigned char *status) { memcpy(dest_desc, status, 4); dest_desc[0] = 0; /* only clear byte 1 for ENCLOSURE_COMPONENT_DEVICE */ if (ecomp->type == ENCLOSURE_COMPONENT_DEVICE) dest_desc[1] = 0; dest_desc[2] &= 0xde; dest_desc[3] &= 0x3c; } static int ses_recv_diag(struct scsi_device *sdev, int page_code, void *buf, int bufflen) { int ret; unsigned char cmd[] = { RECEIVE_DIAGNOSTIC, 1, /* Set PCV bit */ page_code, bufflen >> 8, bufflen & 0xff, 0 }; unsigned char recv_page_code; unsigned int retries = SES_RETRIES; struct scsi_sense_hdr sshdr; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, }; do { ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buf, bufflen, SES_TIMEOUT, 1, &exec_args); } while (ret > 0 && --retries && scsi_sense_valid(&sshdr) && (sshdr.sense_key == NOT_READY || (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29))); if (unlikely(ret)) return ret; recv_page_code = ((unsigned char *)buf)[0]; if (likely(recv_page_code == page_code)) return ret; /* successful diagnostic but wrong page code. This happens to some * USB devices, just print a message and pretend there was an error */ sdev_printk(KERN_ERR, sdev, "Wrong diagnostic page; asked for %d got %u\n", page_code, recv_page_code); return -EINVAL; } static int ses_send_diag(struct scsi_device *sdev, int page_code, void *buf, int bufflen) { int result; unsigned char cmd[] = { SEND_DIAGNOSTIC, 0x10, /* Set PF bit */ 0, bufflen >> 8, bufflen & 0xff, 0 }; struct scsi_sense_hdr sshdr; unsigned int retries = SES_RETRIES; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, }; do { result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, buf, bufflen, SES_TIMEOUT, 1, &exec_args); } while (result > 0 && --retries && scsi_sense_valid(&sshdr) && (sshdr.sense_key == NOT_READY || (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29))); if (result) sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n", result); return result; } static int ses_set_page2_descriptor(struct enclosure_device *edev, struct enclosure_component *ecomp, unsigned char *desc) { int i, j, count = 0, descriptor = ecomp->number; struct scsi_device *sdev = to_scsi_device(edev->edev.parent); struct ses_device *ses_dev = edev->scratch; unsigned char *type_ptr = ses_dev->page1_types; unsigned char *desc_ptr = ses_dev->page2 + 8; /* Clear everything */ memset(desc_ptr, 0, ses_dev->page2_len - 8); for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) { for (j = 0; j < type_ptr[1]; j++) { desc_ptr += 4; if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE) continue; if (count++ == descriptor) { memcpy(desc_ptr, desc, 4); /* set select */ desc_ptr[0] |= 0x80; /* clear reserved, just in case */ desc_ptr[0] &= 0xf0; } } } return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); } static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev, struct enclosure_component *ecomp) { int i, j, count = 0, descriptor = ecomp->number; struct scsi_device *sdev = to_scsi_device(edev->edev.parent); struct ses_device *ses_dev = edev->scratch; unsigned char *type_ptr = ses_dev->page1_types; unsigned char *desc_ptr = ses_dev->page2 + 8; if (ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len) < 0) return NULL; for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) { for (j = 0; j < type_ptr[1]; j++) { desc_ptr += 4; if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE) continue; if (count++ == descriptor) return desc_ptr; } } return NULL; } /* For device slot and array device slot elements, byte 3 bit 6 * is "fault sensed" while byte 3 bit 5 is "fault reqstd". As this * code stands these bits are shifted 4 positions right so in * sysfs they will appear as bits 2 and 1 respectively. Strange. */ static void ses_get_fault(struct enclosure_device *edev, struct enclosure_component *ecomp) { unsigned char *desc; if (!ses_page2_supported(edev)) { ecomp->fault = 0; return; } desc = ses_get_page2_descriptor(edev, ecomp); if (desc) ecomp->fault = (desc[3] & 0x60) >> 4; } static int ses_set_fault(struct enclosure_device *edev, struct enclosure_component *ecomp, enum enclosure_component_setting val) { unsigned char desc[4]; unsigned char *desc_ptr; if (!ses_page2_supported(edev)) return -EINVAL; desc_ptr = ses_get_page2_descriptor(edev, ecomp); if (!desc_ptr) return -EIO; init_device_slot_control(desc, ecomp, desc_ptr); switch (val) { case ENCLOSURE_SETTING_DISABLED: desc[3] &= 0xdf; break; case ENCLOSURE_SETTING_ENABLED: desc[3] |= 0x20; break; default: /* SES doesn't do the SGPIO blink settings */ return -EINVAL; } return ses_set_page2_descriptor(edev, ecomp, desc); } static void ses_get_status(struct enclosure_device *edev, struct enclosure_component *ecomp) { unsigned char *desc; if (!ses_page2_supported(edev)) { ecomp->status = 0; return; } desc = ses_get_page2_descriptor(edev, ecomp); if (desc) ecomp->status = (desc[0] & 0x0f); } static void ses_get_locate(struct enclosure_device *edev, struct enclosure_component *ecomp) { unsigned char *desc; if (!ses_page2_supported(edev)) { ecomp->locate = 0; return; } desc = ses_get_page2_descriptor(edev, ecomp); if (desc) ecomp->locate = (desc[2] & 0x02) ? 1 : 0; } static int ses_set_locate(struct enclosure_device *edev, struct enclosure_component *ecomp, enum enclosure_component_setting val) { unsigned char desc[4]; unsigned char *desc_ptr; if (!ses_page2_supported(edev)) return -EINVAL; desc_ptr = ses_get_page2_descriptor(edev, ecomp); if (!desc_ptr) return -EIO; init_device_slot_control(desc, ecomp, desc_ptr); switch (val) { case ENCLOSURE_SETTING_DISABLED: desc[2] &= 0xfd; break; case ENCLOSURE_SETTING_ENABLED: desc[2] |= 0x02; break; default: /* SES doesn't do the SGPIO blink settings */ return -EINVAL; } return ses_set_page2_descriptor(edev, ecomp, desc); } static int ses_set_active(struct enclosure_device *edev, struct enclosure_component *ecomp, enum enclosure_component_setting val) { unsigned char desc[4]; unsigned char *desc_ptr; if (!ses_page2_supported(edev)) return -EINVAL; desc_ptr = ses_get_page2_descriptor(edev, ecomp); if (!desc_ptr) return -EIO; init_device_slot_control(desc, ecomp, desc_ptr); switch (val) { case ENCLOSURE_SETTING_DISABLED: desc[2] &= 0x7f; ecomp->active = 0; break; case ENCLOSURE_SETTING_ENABLED: desc[2] |= 0x80; ecomp->active = 1; break; default: /* SES doesn't do the SGPIO blink settings */ return -EINVAL; } return ses_set_page2_descriptor(edev, ecomp, desc); } static int ses_show_id(struct enclosure_device *edev, char *buf) { struct ses_device *ses_dev = edev->scratch; unsigned long long id = get_unaligned_be64(ses_dev->page1+8+4); return sprintf(buf, "%#llx\n", id); } static void ses_get_power_status(struct enclosure_device *edev, struct enclosure_component *ecomp) { unsigned char *desc; if (!ses_page2_supported(edev)) { ecomp->power_status = 0; return; } desc = ses_get_page2_descriptor(edev, ecomp); if (desc) ecomp->power_status = (desc[3] & 0x10) ? 0 : 1; } static int ses_set_power_status(struct enclosure_device *edev, struct enclosure_component *ecomp, int val) { unsigned char desc[4]; unsigned char *desc_ptr; if (!ses_page2_supported(edev)) return -EINVAL; desc_ptr = ses_get_page2_descriptor(edev, ecomp); if (!desc_ptr) return -EIO; init_device_slot_control(desc, ecomp, desc_ptr); switch (val) { /* power = 1 is device_off = 0 and vice versa */ case 0: desc[3] |= 0x10; break; case 1: desc[3] &= 0xef; break; default: return -EINVAL; } ecomp->power_status = val; return ses_set_page2_descriptor(edev, ecomp, desc); } static struct enclosure_component_callbacks ses_enclosure_callbacks = { .get_fault = ses_get_fault, .set_fault = ses_set_fault, .get_status = ses_get_status, .get_locate = ses_get_locate, .set_locate = ses_set_locate, .get_power_status = ses_get_power_status, .set_power_status = ses_set_power_status, .set_active = ses_set_active, .show_id = ses_show_id, }; struct ses_host_edev { struct Scsi_Host *shost; struct enclosure_device *edev; }; #if 0 int ses_match_host(struct enclosure_device *edev, void *data) { struct ses_host_edev *sed = data; struct scsi_device *sdev; if (!scsi_is_sdev_device(edev->edev.parent)) return 0; sdev = to_scsi_device(edev->edev.parent); if (sdev->host != sed->shost) return 0; sed->edev = edev; return 1; } #endif /* 0 */ static int ses_process_descriptor(struct enclosure_component *ecomp, unsigned char *desc, int max_desc_len) { int eip = desc[0] & 0x10; int invalid = desc[0] & 0x80; enum scsi_protocol proto = desc[0] & 0x0f; u64 addr = 0; int slot = -1; struct ses_component *scomp = ecomp->scratch; unsigned char *d; if (invalid) return 0; switch (proto) { case SCSI_PROTOCOL_FCP: if (eip) { if (max_desc_len <= 7) return 1; d = desc + 4; slot = d[3]; } break; case SCSI_PROTOCOL_SAS: if (eip) { if (max_desc_len <= 27) return 1; d = desc + 4; slot = d[3]; d = desc + 8; } else { if (max_desc_len <= 23) return 1; d = desc + 4; } /* only take the phy0 addr */ addr = (u64)d[12] << 56 | (u64)d[13] << 48 | (u64)d[14] << 40 | (u64)d[15] << 32 | (u64)d[16] << 24 | (u64)d[17] << 16 | (u64)d[18] << 8 | (u64)d[19]; break; default: /* FIXME: Need to add more protocols than just SAS */ break; } ecomp->slot = slot; scomp->addr = addr; return 0; } struct efd { u64 addr; struct device *dev; }; static int ses_enclosure_find_by_addr(struct enclosure_device *edev, void *data) { struct efd *efd = data; int i; struct ses_component *scomp; for (i = 0; i < edev->components; i++) { scomp = edev->component[i].scratch; if (scomp->addr != efd->addr) continue; if (enclosure_add_device(edev, i, efd->dev) == 0) kobject_uevent(&efd->dev->kobj, KOBJ_CHANGE); return 1; } return 0; } #define INIT_ALLOC_SIZE 32 static void ses_enclosure_data_process(struct enclosure_device *edev, struct scsi_device *sdev, int create) { u32 result; unsigned char *buf = NULL, *type_ptr, *desc_ptr, *addl_desc_ptr = NULL; int i, j, page7_len, len, components; struct ses_device *ses_dev = edev->scratch; int types = ses_dev->page1_num_types; unsigned char *hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL); if (!hdr_buf) goto simple_populate; /* re-read page 10 */ if (ses_dev->page10) ses_recv_diag(sdev, 10, ses_dev->page10, ses_dev->page10_len); /* Page 7 for the descriptors is optional */ result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE); if (result) goto simple_populate; page7_len = len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; /* add 1 for trailing '\0' we'll use */ buf = kzalloc(len + 1, GFP_KERNEL); if (!buf) goto simple_populate; result = ses_recv_diag(sdev, 7, buf, len); if (result) { simple_populate: kfree(buf); buf = NULL; desc_ptr = NULL; len = 0; page7_len = 0; } else { desc_ptr = buf + 8; len = (desc_ptr[2] << 8) + desc_ptr[3]; /* skip past overall descriptor */ desc_ptr += len + 4; } if (ses_dev->page10 && ses_dev->page10_len > 9) addl_desc_ptr = ses_dev->page10 + 8; type_ptr = ses_dev->page1_types; components = 0; for (i = 0; i < types; i++, type_ptr += 4) { for (j = 0; j < type_ptr[1]; j++) { char *name = NULL; struct enclosure_component *ecomp; int max_desc_len; if (desc_ptr) { if (desc_ptr + 3 >= buf + page7_len) { desc_ptr = NULL; } else { len = (desc_ptr[2] << 8) + desc_ptr[3]; desc_ptr += 4; if (desc_ptr + len > buf + page7_len) desc_ptr = NULL; else { /* Add trailing zero - pushes into * reserved space */ desc_ptr[len] = '\0'; name = desc_ptr; } } } if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) { if (create) ecomp = enclosure_component_alloc( edev, components++, type_ptr[0], name); else if (components < edev->components) ecomp = &edev->component[components++]; else ecomp = ERR_PTR(-EINVAL); if (!IS_ERR(ecomp)) { if (addl_desc_ptr) { max_desc_len = ses_dev->page10_len - (addl_desc_ptr - ses_dev->page10); if (ses_process_descriptor(ecomp, addl_desc_ptr, max_desc_len)) addl_desc_ptr = NULL; } if (create) enclosure_component_register( ecomp); } } if (desc_ptr) desc_ptr += len; if (addl_desc_ptr && /* only find additional descriptions for specific devices */ (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE || type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER || /* these elements are optional */ type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT || type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT || type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS)) { addl_desc_ptr += addl_desc_ptr[1] + 2; if (addl_desc_ptr + 1 >= ses_dev->page10 + ses_dev->page10_len) addl_desc_ptr = NULL; } } } kfree(buf); kfree(hdr_buf); } static void ses_match_to_enclosure(struct enclosure_device *edev, struct scsi_device *sdev, int refresh) { struct scsi_device *edev_sdev = to_scsi_device(edev->edev.parent); struct efd efd = { .addr = 0, }; if (refresh) ses_enclosure_data_process(edev, edev_sdev, 0); if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent)) efd.addr = sas_get_address(sdev); if (efd.addr) { efd.dev = &sdev->sdev_gendev; enclosure_for_each_device(ses_enclosure_find_by_addr, &efd); } } static int ses_intf_add(struct device *cdev) { struct scsi_device *sdev = to_scsi_device(cdev->parent); struct scsi_device *tmp_sdev; unsigned char *buf = NULL, *hdr_buf, *type_ptr, page; struct ses_device *ses_dev; u32 result; int i, types, len, components = 0; int err = -ENOMEM; int num_enclosures; struct enclosure_device *edev; struct ses_component *scomp = NULL; if (!scsi_device_enclosure(sdev)) { /* not an enclosure, but might be in one */ struct enclosure_device *prev = NULL; while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) { ses_match_to_enclosure(edev, sdev, 1); prev = edev; } return -ENODEV; } /* TYPE_ENCLOSURE prints a message in probe */ if (sdev->type != TYPE_ENCLOSURE) sdev_printk(KERN_NOTICE, sdev, "Embedded Enclosure Device\n"); ses_dev = kzalloc(sizeof(*ses_dev), GFP_KERNEL); hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL); if (!hdr_buf || !ses_dev) goto err_init_free; page = 1; result = ses_recv_diag(sdev, page, hdr_buf, INIT_ALLOC_SIZE); if (result) goto recv_failed; len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; buf = kzalloc(len, GFP_KERNEL); if (!buf) goto err_free; result = ses_recv_diag(sdev, page, buf, len); if (result) goto recv_failed; types = 0; /* we always have one main enclosure and the rest are referred * to as secondary subenclosures */ num_enclosures = buf[1] + 1; /* begin at the enclosure descriptor */ type_ptr = buf + 8; /* skip all the enclosure descriptors */ for (i = 0; i < num_enclosures && type_ptr < buf + len; i++) { types += type_ptr[2]; type_ptr += type_ptr[3] + 4; } ses_dev->page1_types = type_ptr; ses_dev->page1_num_types = types; for (i = 0; i < types && type_ptr < buf + len; i++, type_ptr += 4) { if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) components += type_ptr[1]; } ses_dev->page1 = buf; ses_dev->page1_len = len; buf = NULL; page = 2; result = ses_recv_diag(sdev, page, hdr_buf, INIT_ALLOC_SIZE); if (result) goto page2_not_supported; len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; buf = kzalloc(len, GFP_KERNEL); if (!buf) goto err_free; /* make sure getting page 2 actually works */ result = ses_recv_diag(sdev, 2, buf, len); if (result) goto recv_failed; ses_dev->page2 = buf; ses_dev->page2_len = len; buf = NULL; /* The additional information page --- allows us * to match up the devices */ page = 10; result = ses_recv_diag(sdev, page, hdr_buf, INIT_ALLOC_SIZE); if (!result) { len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; buf = kzalloc(len, GFP_KERNEL); if (!buf) goto err_free; result = ses_recv_diag(sdev, page, buf, len); if (result) goto recv_failed; ses_dev->page10 = buf; ses_dev->page10_len = len; buf = NULL; } page2_not_supported: if (components > 0) { scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL); if (!scomp) goto err_free; } edev = enclosure_register(cdev->parent, dev_name(&sdev->sdev_gendev), components, &ses_enclosure_callbacks); if (IS_ERR(edev)) { err = PTR_ERR(edev); goto err_free; } kfree(hdr_buf); edev->scratch = ses_dev; for (i = 0; i < components; i++) edev->component[i].scratch = scomp + i; ses_enclosure_data_process(edev, sdev, 1); /* see if there are any devices matching before * we found the enclosure */ shost_for_each_device(tmp_sdev, sdev->host) { if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev)) continue; ses_match_to_enclosure(edev, tmp_sdev, 0); } return 0; recv_failed: sdev_printk(KERN_ERR, sdev, "Failed to get diagnostic page 0x%x\n", page); err = -ENODEV; err_free: kfree(buf); kfree(scomp); kfree(ses_dev->page10); kfree(ses_dev->page2); kfree(ses_dev->page1); err_init_free: kfree(ses_dev); kfree(hdr_buf); sdev_printk(KERN_ERR, sdev, "Failed to bind enclosure %d\n", err); return err; } static int ses_remove(struct device *dev) { return 0; } static void ses_intf_remove_component(struct scsi_device *sdev) { struct enclosure_device *edev, *prev = NULL; while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) { prev = edev; if (!enclosure_remove_device(edev, &sdev->sdev_gendev)) break; } if (edev) put_device(&edev->edev); } static void ses_intf_remove_enclosure(struct scsi_device *sdev) { struct enclosure_device *edev; struct ses_device *ses_dev; /* exact match to this enclosure */ edev = enclosure_find(&sdev->sdev_gendev, NULL); if (!edev) return; ses_dev = edev->scratch; edev->scratch = NULL; kfree(ses_dev->page10); kfree(ses_dev->page1); kfree(ses_dev->page2); kfree(ses_dev); if (edev->components) kfree(edev->component[0].scratch); put_device(&edev->edev); enclosure_unregister(edev); } static void ses_intf_remove(struct device *cdev) { struct scsi_device *sdev = to_scsi_device(cdev->parent); if (!scsi_device_enclosure(sdev)) ses_intf_remove_component(sdev); else ses_intf_remove_enclosure(sdev); } static struct class_interface ses_interface = { .add_dev = ses_intf_add, .remove_dev = ses_intf_remove, }; static struct scsi_driver ses_template = { .gendrv = { .name = "ses", .owner = THIS_MODULE, .probe = ses_probe, .remove = ses_remove, }, }; static int __init ses_init(void) { int err; err = scsi_register_interface(&ses_interface); if (err) return err; err = scsi_register_driver(&ses_template.gendrv); if (err) goto out_unreg; return 0; out_unreg: scsi_unregister_interface(&ses_interface); return err; } static void __exit ses_exit(void) { scsi_unregister_driver(&ses_template.gendrv); scsi_unregister_interface(&ses_interface); } module_init(ses_init); module_exit(ses_exit); MODULE_ALIAS_SCSI_DEVICE(TYPE_ENCLOSURE); MODULE_AUTHOR("James Bottomley"); MODULE_DESCRIPTION("SCSI Enclosure Services (ses) driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/scsi/ses.c
// SPDX-License-Identifier: GPL-2.0 /* * ESP front-end for Amiga ZORRO SCSI systems. * * Copyright (C) 1996 Jesper Skov ([email protected]) * * Copyright (C) 2011,2018 Michael Schmitz ([email protected]) for * migration to ESP SCSI core * * Copyright (C) 2013 Tuomas Vainikka ([email protected]) for * Blizzard 1230 DMA and probe function fixes */ /* * ZORRO bus code from: */ /* * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux. * Amiga MacroSystemUS WarpEngine SCSI controller. * Amiga Technologies/DKB A4091 SCSI controller. * * Written 1997 by Alan Hourihane <[email protected]> * plus modifications of the 53c7xx.c driver to support the Amiga. * * Rewritten to use 53c700.c by Kars de Jong <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include <linux/delay.h> #include <linux/zorro.h> #include <linux/slab.h> #include <linux/pgtable.h> #include <asm/page.h> #include <asm/cacheflush.h> #include <asm/amigahw.h> #include <asm/amigaints.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_spi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_tcq.h> #include "esp_scsi.h" MODULE_AUTHOR("Michael Schmitz <[email protected]>"); MODULE_DESCRIPTION("Amiga Zorro NCR5C9x (ESP) driver"); MODULE_LICENSE("GPL"); /* per-board register layout definitions */ /* Blizzard 1230 DMA interface */ struct blz1230_dma_registers { unsigned char dma_addr; /* DMA address [0x0000] */ unsigned char dmapad2[0x7fff]; unsigned char dma_latch; /* DMA latch [0x8000] */ }; /* Blizzard 1230II DMA interface */ struct blz1230II_dma_registers { unsigned char dma_addr; /* DMA address [0x0000] */ unsigned char dmapad2[0xf]; unsigned char dma_latch; /* DMA latch [0x0010] */ }; /* Blizzard 2060 DMA interface */ struct blz2060_dma_registers { unsigned char dma_led_ctrl; /* DMA led control [0x000] */ unsigned char dmapad1[0x0f]; unsigned char dma_addr0; /* DMA address (MSB) [0x010] */ unsigned char dmapad2[0x03]; unsigned char dma_addr1; /* DMA address [0x014] */ unsigned char dmapad3[0x03]; unsigned char dma_addr2; /* DMA address [0x018] */ unsigned char dmapad4[0x03]; unsigned char dma_addr3; /* DMA address (LSB) [0x01c] */ }; /* DMA control bits */ #define DMA_WRITE 0x80000000 /* Cyberstorm DMA interface */ struct cyber_dma_registers { unsigned char dma_addr0; /* DMA address (MSB) [0x000] */ unsigned char dmapad1[1]; unsigned char dma_addr1; /* DMA address [0x002] */ unsigned char dmapad2[1]; unsigned char dma_addr2; /* DMA address [0x004] */ unsigned char dmapad3[1]; unsigned char dma_addr3; /* DMA address (LSB) [0x006] */ unsigned char dmapad4[0x3fb]; unsigned char cond_reg; /* DMA cond (ro) [0x402] */ #define ctrl_reg cond_reg /* DMA control (wo) [0x402] */ }; /* DMA control bits */ #define CYBER_DMA_WRITE 0x40 /* DMA direction. 1 = write */ #define CYBER_DMA_Z3 0x20 /* 16 (Z2) or 32 (CHIP/Z3) bit DMA transfer */ /* DMA status bits */ #define CYBER_DMA_HNDL_INTR 0x80 /* DMA IRQ pending? */ /* The CyberStorm II DMA interface */ struct cyberII_dma_registers { unsigned char cond_reg; /* DMA cond (ro) [0x000] */ #define ctrl_reg cond_reg /* DMA control (wo) [0x000] */ unsigned char dmapad4[0x3f]; unsigned char dma_addr0; /* DMA address (MSB) [0x040] */ unsigned char dmapad1[3]; unsigned char dma_addr1; /* DMA address [0x044] */ unsigned char dmapad2[3]; unsigned char dma_addr2; /* DMA address [0x048] */ unsigned char dmapad3[3]; unsigned char dma_addr3; /* DMA address (LSB) [0x04c] */ }; /* Fastlane DMA interface */ struct fastlane_dma_registers { unsigned char cond_reg; /* DMA status (ro) [0x0000] */ #define ctrl_reg cond_reg /* DMA control (wo) [0x0000] */ char dmapad1[0x3f]; unsigned char clear_strobe; /* DMA clear (wo) [0x0040] */ }; /* * The controller registers can be found in the Z2 config area at these * offsets: */ #define FASTLANE_ESP_ADDR 0x1000001 /* DMA status bits */ #define FASTLANE_DMA_MINT 0x80 #define FASTLANE_DMA_IACT 0x40 #define FASTLANE_DMA_CREQ 0x20 /* DMA control bits */ #define FASTLANE_DMA_FCODE 0xa0 #define FASTLANE_DMA_MASK 0xf3 #define FASTLANE_DMA_WRITE 0x08 /* 1 = write */ #define FASTLANE_DMA_ENABLE 0x04 /* Enable DMA */ #define FASTLANE_DMA_EDI 0x02 /* Enable DMA IRQ ? */ #define FASTLANE_DMA_ESI 0x01 /* Enable SCSI IRQ */ /* * private data used for driver */ struct zorro_esp_priv { struct esp *esp; /* our ESP instance - for Scsi_host* */ void __iomem *board_base; /* virtual address (Zorro III board) */ int zorro3; /* board is Zorro III */ unsigned char ctrl_data; /* shadow copy of ctrl_reg */ }; /* * On all implementations except for the Oktagon, padding between ESP * registers is three bytes. * On Oktagon, it is one byte - use a different accessor there. * * Oktagon needs PDMA - currently unsupported! */ static void zorro_esp_write8(struct esp *esp, u8 val, unsigned long reg) { writeb(val, esp->regs + (reg * 4UL)); } static u8 zorro_esp_read8(struct esp *esp, unsigned long reg) { return readb(esp->regs + (reg * 4UL)); } static int zorro_esp_irq_pending(struct esp *esp) { /* check ESP status register; DMA has no status reg. */ if (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR) return 1; return 0; } static int cyber_esp_irq_pending(struct esp *esp) { struct cyber_dma_registers __iomem *dregs = esp->dma_regs; unsigned char dma_status = readb(&dregs->cond_reg); /* It's important to check the DMA IRQ bit in the correct way! */ return ((zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR) && (dma_status & CYBER_DMA_HNDL_INTR)); } static int fastlane_esp_irq_pending(struct esp *esp) { struct fastlane_dma_registers __iomem *dregs = esp->dma_regs; unsigned char dma_status; dma_status = readb(&dregs->cond_reg); if (dma_status & FASTLANE_DMA_IACT) return 0; /* not our IRQ */ /* Return non-zero if ESP requested IRQ */ return ( (dma_status & FASTLANE_DMA_CREQ) && (!(dma_status & FASTLANE_DMA_MINT)) && (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)); } static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) { return dma_len > (1U << 16) ? (1U << 16) : dma_len; } static u32 fastlane_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) { /* The old driver used 0xfffc as limit, so do that here too */ return dma_len > 0xfffc ? 0xfffc : dma_len; } static void zorro_esp_reset_dma(struct esp *esp) { /* nothing to do here */ } static void zorro_esp_dma_drain(struct esp *esp) { /* nothing to do here */ } static void zorro_esp_dma_invalidate(struct esp *esp) { /* nothing to do here */ } static void fastlane_esp_dma_invalidate(struct esp *esp) { struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev); struct fastlane_dma_registers __iomem *dregs = esp->dma_regs; unsigned char *ctrl_data = &zep->ctrl_data; *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK); writeb(0, &dregs->clear_strobe); z_writel(0, zep->board_base); } /* Blizzard 1230/60 SCSI-IV DMA */ static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, u32 dma_count, int write, u8 cmd) { struct blz1230_dma_registers __iomem *dregs = esp->dma_regs; u8 phase = esp->sreg & ESP_STAT_PMASK; /* * Use PIO if transferring message bytes to esp->command_block_dma. * PIO requires a virtual address, so substitute esp->command_block * for addr. */ if (phase == ESP_MIP && addr == esp->command_block_dma) { esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count, dma_count, write, cmd); return; } /* Clear the results of a possible prior esp->ops->send_dma_cmd() */ esp->send_cmd_error = 0; esp->send_cmd_residual = 0; if (write) /* DMA receive */ dma_sync_single_for_device(esp->dev, addr, esp_count, DMA_FROM_DEVICE); else /* DMA send */ dma_sync_single_for_device(esp->dev, addr, esp_count, DMA_TO_DEVICE); addr >>= 1; if (write) addr &= ~(DMA_WRITE); else addr |= DMA_WRITE; writeb((addr >> 24) & 0xff, &dregs->dma_latch); writeb((addr >> 24) & 0xff, &dregs->dma_addr); writeb((addr >> 16) & 0xff, &dregs->dma_addr); writeb((addr >> 8) & 0xff, &dregs->dma_addr); writeb(addr & 0xff, &dregs->dma_addr); scsi_esp_cmd(esp, ESP_CMD_DMA); zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); scsi_esp_cmd(esp, cmd); } /* Blizzard 1230-II DMA */ static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, u32 dma_count, int write, u8 cmd) { struct blz1230II_dma_registers __iomem *dregs = esp->dma_regs; u8 phase = esp->sreg & ESP_STAT_PMASK; /* Use PIO if transferring message bytes to esp->command_block_dma */ if (phase == ESP_MIP && addr == esp->command_block_dma) { esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count, dma_count, write, cmd); return; } esp->send_cmd_error = 0; esp->send_cmd_residual = 0; if (write) /* DMA receive */ dma_sync_single_for_device(esp->dev, addr, esp_count, DMA_FROM_DEVICE); else /* DMA send */ dma_sync_single_for_device(esp->dev, addr, esp_count, DMA_TO_DEVICE); addr >>= 1; if (write) addr &= ~(DMA_WRITE); else addr |= DMA_WRITE; writeb((addr >> 24) & 0xff, &dregs->dma_latch); writeb((addr >> 16) & 0xff, &dregs->dma_addr); writeb((addr >> 8) & 0xff, &dregs->dma_addr); writeb(addr & 0xff, &dregs->dma_addr); scsi_esp_cmd(esp, ESP_CMD_DMA); zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); scsi_esp_cmd(esp, cmd); } /* Blizzard 2060 DMA */ static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, u32 dma_count, int write, u8 cmd) { struct blz2060_dma_registers __iomem *dregs = esp->dma_regs; u8 phase = esp->sreg & ESP_STAT_PMASK; /* Use PIO if transferring message bytes to esp->command_block_dma */ if (phase == ESP_MIP && addr == esp->command_block_dma) { esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count, dma_count, write, cmd); return; } esp->send_cmd_error = 0; esp->send_cmd_residual = 0; if (write) /* DMA receive */ dma_sync_single_for_device(esp->dev, addr, esp_count, DMA_FROM_DEVICE); else /* DMA send */ dma_sync_single_for_device(esp->dev, addr, esp_count, DMA_TO_DEVICE); addr >>= 1; if (write) addr &= ~(DMA_WRITE); else addr |= DMA_WRITE; writeb(addr & 0xff, &dregs->dma_addr3); writeb((addr >> 8) & 0xff, &dregs->dma_addr2); writeb((addr >> 16) & 0xff, &dregs->dma_addr1); writeb((addr >> 24) & 0xff, &dregs->dma_addr0); scsi_esp_cmd(esp, ESP_CMD_DMA); zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); scsi_esp_cmd(esp, cmd); } /* Cyberstorm I DMA */ static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, u32 dma_count, int write, u8 cmd) { struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev); struct cyber_dma_registers __iomem *dregs = esp->dma_regs; u8 phase = esp->sreg & ESP_STAT_PMASK; unsigned char *ctrl_data = &zep->ctrl_data; /* Use PIO if transferring message bytes to esp->command_block_dma */ if (phase == ESP_MIP && addr == esp->command_block_dma) { esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count, dma_count, write, cmd); return; } esp->send_cmd_error = 0; esp->send_cmd_residual = 0; zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); if (write) { /* DMA receive */ dma_sync_single_for_device(esp->dev, addr, esp_count, DMA_FROM_DEVICE); addr &= ~(1); } else { /* DMA send */ dma_sync_single_for_device(esp->dev, addr, esp_count, DMA_TO_DEVICE); addr |= 1; } writeb((addr >> 24) & 0xff, &dregs->dma_addr0); writeb((addr >> 16) & 0xff, &dregs->dma_addr1); writeb((addr >> 8) & 0xff, &dregs->dma_addr2); writeb(addr & 0xff, &dregs->dma_addr3); if (write) *ctrl_data &= ~(CYBER_DMA_WRITE); else *ctrl_data |= CYBER_DMA_WRITE; *ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */ writeb(*ctrl_data, &dregs->ctrl_reg); scsi_esp_cmd(esp, cmd); } /* Cyberstorm II DMA */ static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, u32 dma_count, int write, u8 cmd) { struct cyberII_dma_registers __iomem *dregs = esp->dma_regs; u8 phase = esp->sreg & ESP_STAT_PMASK; /* Use PIO if transferring message bytes to esp->command_block_dma */ if (phase == ESP_MIP && addr == esp->command_block_dma) { esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count, dma_count, write, cmd); return; } esp->send_cmd_error = 0; esp->send_cmd_residual = 0; zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); if (write) { /* DMA receive */ dma_sync_single_for_device(esp->dev, addr, esp_count, DMA_FROM_DEVICE); addr &= ~(1); } else { /* DMA send */ dma_sync_single_for_device(esp->dev, addr, esp_count, DMA_TO_DEVICE); addr |= 1; } writeb((addr >> 24) & 0xff, &dregs->dma_addr0); writeb((addr >> 16) & 0xff, &dregs->dma_addr1); writeb((addr >> 8) & 0xff, &dregs->dma_addr2); writeb(addr & 0xff, &dregs->dma_addr3); scsi_esp_cmd(esp, cmd); } /* Fastlane DMA */ static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, u32 dma_count, int write, u8 cmd) { struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev); struct fastlane_dma_registers __iomem *dregs = esp->dma_regs; u8 phase = esp->sreg & ESP_STAT_PMASK; unsigned char *ctrl_data = &zep->ctrl_data; /* Use PIO if transferring message bytes to esp->command_block_dma */ if (phase == ESP_MIP && addr == esp->command_block_dma) { esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count, dma_count, write, cmd); return; } esp->send_cmd_error = 0; esp->send_cmd_residual = 0; zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); if (write) { /* DMA receive */ dma_sync_single_for_device(esp->dev, addr, esp_count, DMA_FROM_DEVICE); addr &= ~(1); } else { /* DMA send */ dma_sync_single_for_device(esp->dev, addr, esp_count, DMA_TO_DEVICE); addr |= 1; } writeb(0, &dregs->clear_strobe); z_writel(addr, ((addr & 0x00ffffff) + zep->board_base)); if (write) { *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK) | FASTLANE_DMA_ENABLE; } else { *ctrl_data = ((*ctrl_data & FASTLANE_DMA_MASK) | FASTLANE_DMA_ENABLE | FASTLANE_DMA_WRITE); } writeb(*ctrl_data, &dregs->ctrl_reg); scsi_esp_cmd(esp, cmd); } static int zorro_esp_dma_error(struct esp *esp) { return esp->send_cmd_error; } /* per-board ESP driver ops */ static const struct esp_driver_ops blz1230_esp_ops = { .esp_write8 = zorro_esp_write8, .esp_read8 = zorro_esp_read8, .irq_pending = zorro_esp_irq_pending, .dma_length_limit = zorro_esp_dma_length_limit, .reset_dma = zorro_esp_reset_dma, .dma_drain = zorro_esp_dma_drain, .dma_invalidate = zorro_esp_dma_invalidate, .send_dma_cmd = zorro_esp_send_blz1230_dma_cmd, .dma_error = zorro_esp_dma_error, }; static const struct esp_driver_ops blz1230II_esp_ops = { .esp_write8 = zorro_esp_write8, .esp_read8 = zorro_esp_read8, .irq_pending = zorro_esp_irq_pending, .dma_length_limit = zorro_esp_dma_length_limit, .reset_dma = zorro_esp_reset_dma, .dma_drain = zorro_esp_dma_drain, .dma_invalidate = zorro_esp_dma_invalidate, .send_dma_cmd = zorro_esp_send_blz1230II_dma_cmd, .dma_error = zorro_esp_dma_error, }; static const struct esp_driver_ops blz2060_esp_ops = { .esp_write8 = zorro_esp_write8, .esp_read8 = zorro_esp_read8, .irq_pending = zorro_esp_irq_pending, .dma_length_limit = zorro_esp_dma_length_limit, .reset_dma = zorro_esp_reset_dma, .dma_drain = zorro_esp_dma_drain, .dma_invalidate = zorro_esp_dma_invalidate, .send_dma_cmd = zorro_esp_send_blz2060_dma_cmd, .dma_error = zorro_esp_dma_error, }; static const struct esp_driver_ops cyber_esp_ops = { .esp_write8 = zorro_esp_write8, .esp_read8 = zorro_esp_read8, .irq_pending = cyber_esp_irq_pending, .dma_length_limit = zorro_esp_dma_length_limit, .reset_dma = zorro_esp_reset_dma, .dma_drain = zorro_esp_dma_drain, .dma_invalidate = zorro_esp_dma_invalidate, .send_dma_cmd = zorro_esp_send_cyber_dma_cmd, .dma_error = zorro_esp_dma_error, }; static const struct esp_driver_ops cyberII_esp_ops = { .esp_write8 = zorro_esp_write8, .esp_read8 = zorro_esp_read8, .irq_pending = zorro_esp_irq_pending, .dma_length_limit = zorro_esp_dma_length_limit, .reset_dma = zorro_esp_reset_dma, .dma_drain = zorro_esp_dma_drain, .dma_invalidate = zorro_esp_dma_invalidate, .send_dma_cmd = zorro_esp_send_cyberII_dma_cmd, .dma_error = zorro_esp_dma_error, }; static const struct esp_driver_ops fastlane_esp_ops = { .esp_write8 = zorro_esp_write8, .esp_read8 = zorro_esp_read8, .irq_pending = fastlane_esp_irq_pending, .dma_length_limit = fastlane_esp_dma_length_limit, .reset_dma = zorro_esp_reset_dma, .dma_drain = zorro_esp_dma_drain, .dma_invalidate = fastlane_esp_dma_invalidate, .send_dma_cmd = zorro_esp_send_fastlane_dma_cmd, .dma_error = zorro_esp_dma_error, }; /* Zorro driver config data */ struct zorro_driver_data { const char *name; unsigned long offset; unsigned long dma_offset; int absolute; /* offset is absolute address */ int scsi_option; const struct esp_driver_ops *esp_ops; }; /* board types */ enum { ZORRO_BLZ1230, ZORRO_BLZ1230II, ZORRO_BLZ2060, ZORRO_CYBER, ZORRO_CYBERII, ZORRO_FASTLANE, }; /* per-board config data */ static const struct zorro_driver_data zorro_esp_boards[] = { [ZORRO_BLZ1230] = { .name = "Blizzard 1230", .offset = 0x8000, .dma_offset = 0x10000, .scsi_option = 1, .esp_ops = &blz1230_esp_ops, }, [ZORRO_BLZ1230II] = { .name = "Blizzard 1230II", .offset = 0x10000, .dma_offset = 0x10021, .scsi_option = 1, .esp_ops = &blz1230II_esp_ops, }, [ZORRO_BLZ2060] = { .name = "Blizzard 2060", .offset = 0x1ff00, .dma_offset = 0x1ffe0, .esp_ops = &blz2060_esp_ops, }, [ZORRO_CYBER] = { .name = "CyberStormI", .offset = 0xf400, .dma_offset = 0xf800, .esp_ops = &cyber_esp_ops, }, [ZORRO_CYBERII] = { .name = "CyberStormII", .offset = 0x1ff03, .dma_offset = 0x1ff43, .scsi_option = 1, .esp_ops = &cyberII_esp_ops, }, [ZORRO_FASTLANE] = { .name = "Fastlane", .offset = 0x1000001, .dma_offset = 0x1000041, .esp_ops = &fastlane_esp_ops, }, }; static const struct zorro_device_id zorro_esp_zorro_tbl[] = { { /* Blizzard 1230 IV */ .id = ZORRO_ID(PHASE5, 0x11, 0), .driver_data = ZORRO_BLZ1230, }, { /* Blizzard 1230 II (Zorro II) or Fastlane (Zorro III) */ .id = ZORRO_ID(PHASE5, 0x0B, 0), .driver_data = ZORRO_BLZ1230II, }, { /* Blizzard 2060 */ .id = ZORRO_ID(PHASE5, 0x18, 0), .driver_data = ZORRO_BLZ2060, }, { /* Cyberstorm */ .id = ZORRO_ID(PHASE5, 0x0C, 0), .driver_data = ZORRO_CYBER, }, { /* Cyberstorm II */ .id = ZORRO_ID(PHASE5, 0x19, 0), .driver_data = ZORRO_CYBERII, }, { 0 } }; MODULE_DEVICE_TABLE(zorro, zorro_esp_zorro_tbl); static int zorro_esp_probe(struct zorro_dev *z, const struct zorro_device_id *ent) { const struct scsi_host_template *tpnt = &scsi_esp_template; struct Scsi_Host *host; struct esp *esp; const struct zorro_driver_data *zdd; struct zorro_esp_priv *zep; unsigned long board, ioaddr, dmaaddr; int err; board = zorro_resource_start(z); zdd = &zorro_esp_boards[ent->driver_data]; pr_info("%s found at address 0x%lx.\n", zdd->name, board); zep = kzalloc(sizeof(*zep), GFP_KERNEL); if (!zep) { pr_err("Can't allocate device private data!\n"); return -ENOMEM; } /* let's figure out whether we have a Zorro II or Zorro III board */ if ((z->rom.er_Type & ERT_TYPEMASK) == ERT_ZORROIII) { if (board > 0xffffff) zep->zorro3 = 1; } else { /* * Even though most of these boards identify as Zorro II, * they are in fact CPU expansion slot boards and have full * access to all of memory. Fix up DMA bitmask here. */ z->dev.coherent_dma_mask = DMA_BIT_MASK(32); } /* * If Zorro III and ID matches Fastlane, our device table entry * contains data for the Blizzard 1230 II board which does share the * same ID. Fix up device table entry here. * TODO: Some Cyberstom060 boards also share this ID but would need * to use the Cyberstorm I driver data ... we catch this by checking * for presence of ESP chip later, but don't try to fix up yet. */ if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) { pr_info("%s at address 0x%lx is Fastlane Z3, fixing data!\n", zdd->name, board); zdd = &zorro_esp_boards[ZORRO_FASTLANE]; } if (zdd->absolute) { ioaddr = zdd->offset; dmaaddr = zdd->dma_offset; } else { ioaddr = board + zdd->offset; dmaaddr = board + zdd->dma_offset; } if (!zorro_request_device(z, zdd->name)) { pr_err("cannot reserve region 0x%lx, abort\n", board); err = -EBUSY; goto fail_free_zep; } host = scsi_host_alloc(tpnt, sizeof(struct esp)); if (!host) { pr_err("No host detected; board configuration problem?\n"); err = -ENOMEM; goto fail_release_device; } host->base = ioaddr; host->this_id = 7; esp = shost_priv(host); esp->host = host; esp->dev = &z->dev; esp->scsi_id = host->this_id; esp->scsi_id_mask = (1 << esp->scsi_id); esp->cfreq = 40000000; zep->esp = esp; dev_set_drvdata(esp->dev, zep); /* additional setup required for Fastlane */ if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) { /* map full address space up to ESP base for DMA */ zep->board_base = ioremap(board, FASTLANE_ESP_ADDR - 1); if (!zep->board_base) { pr_err("Cannot allocate board address space\n"); err = -ENOMEM; goto fail_free_host; } /* initialize DMA control shadow register */ zep->ctrl_data = (FASTLANE_DMA_FCODE | FASTLANE_DMA_EDI | FASTLANE_DMA_ESI); } esp->ops = zdd->esp_ops; if (ioaddr > 0xffffff) esp->regs = ioremap(ioaddr, 0x20); else /* ZorroII address space remapped nocache by early startup */ esp->regs = ZTWO_VADDR(ioaddr); if (!esp->regs) { err = -ENOMEM; goto fail_unmap_fastlane; } esp->fifo_reg = esp->regs + ESP_FDATA * 4; /* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */ if (zdd->scsi_option) { zorro_esp_write8(esp, (ESP_CONFIG1_PENABLE | 7), ESP_CFG1); if (zorro_esp_read8(esp, ESP_CFG1) != (ESP_CONFIG1_PENABLE|7)) { err = -ENODEV; goto fail_unmap_regs; } } if (zep->zorro3) { /* * Only Fastlane Z3 for now - add switch for correct struct * dma_registers size if adding any more */ esp->dma_regs = ioremap(dmaaddr, sizeof(struct fastlane_dma_registers)); } else /* ZorroII address space remapped nocache by early startup */ esp->dma_regs = ZTWO_VADDR(dmaaddr); if (!esp->dma_regs) { err = -ENOMEM; goto fail_unmap_regs; } esp->command_block = dma_alloc_coherent(esp->dev, 16, &esp->command_block_dma, GFP_KERNEL); if (!esp->command_block) { err = -ENOMEM; goto fail_unmap_dma_regs; } host->irq = IRQ_AMIGA_PORTS; err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "Amiga Zorro ESP", esp); if (err < 0) { err = -ENODEV; goto fail_free_command_block; } /* register the chip */ err = scsi_esp_register(esp); if (err) { err = -ENOMEM; goto fail_free_irq; } return 0; fail_free_irq: free_irq(host->irq, esp); fail_free_command_block: dma_free_coherent(esp->dev, 16, esp->command_block, esp->command_block_dma); fail_unmap_dma_regs: if (zep->zorro3) iounmap(esp->dma_regs); fail_unmap_regs: if (ioaddr > 0xffffff) iounmap(esp->regs); fail_unmap_fastlane: if (zep->zorro3) iounmap(zep->board_base); fail_free_host: scsi_host_put(host); fail_release_device: zorro_release_device(z); fail_free_zep: kfree(zep); return err; } static void zorro_esp_remove(struct zorro_dev *z) { struct zorro_esp_priv *zep = dev_get_drvdata(&z->dev); struct esp *esp = zep->esp; struct Scsi_Host *host = esp->host; scsi_esp_unregister(esp); free_irq(host->irq, esp); dma_free_coherent(esp->dev, 16, esp->command_block, esp->command_block_dma); if (zep->zorro3) { iounmap(zep->board_base); iounmap(esp->dma_regs); } if (host->base > 0xffffff) iounmap(esp->regs); scsi_host_put(host); zorro_release_device(z); kfree(zep); } static struct zorro_driver zorro_esp_driver = { .name = KBUILD_MODNAME, .id_table = zorro_esp_zorro_tbl, .probe = zorro_esp_probe, .remove = zorro_esp_remove, }; static int __init zorro_esp_scsi_init(void) { return zorro_register_driver(&zorro_esp_driver); } static void __exit zorro_esp_scsi_exit(void) { zorro_unregister_driver(&zorro_esp_driver); } module_init(zorro_esp_scsi_init); module_exit(zorro_esp_scsi_exit);
linux-master
drivers/scsi/zorro_esp.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Parallel SCSI (SPI) transport specific attributes exported to sysfs. * * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. * Copyright (c) 2004, 2005 James Bottomley <[email protected]> */ #include <linux/ctype.h> #include <linux/init.h> #include <linux/module.h> #include <linux/workqueue.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/sysfs.h> #include <linux/slab.h> #include <linux/suspend.h> #include <scsi/scsi.h> #include "scsi_priv.h" #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_spi.h> #define SPI_NUM_ATTRS 14 /* increase this if you add attributes */ #define SPI_OTHER_ATTRS 1 /* Increase this if you add "always * on" attributes */ #define SPI_HOST_ATTRS 1 #define SPI_MAX_ECHO_BUFFER_SIZE 4096 #define DV_LOOPS 3 #define DV_TIMEOUT (10*HZ) #define DV_RETRIES 3 /* should only need at most * two cc/ua clears */ /* Our blacklist flags */ enum { SPI_BLIST_NOIUS = (__force blist_flags_t)0x1, }; /* blacklist table, modelled on scsi_devinfo.c */ static struct { char *vendor; char *model; blist_flags_t flags; } spi_static_device_list[] __initdata = { {"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS }, {"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS }, {NULL, NULL, 0} }; /* Private data accessors (keep these out of the header file) */ #define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress) #define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex) struct spi_internal { struct scsi_transport_template t; struct spi_function_template *f; }; #define to_spi_internal(tmpl) container_of(tmpl, struct spi_internal, t) static const int ppr_to_ps[] = { /* The PPR values 0-6 are reserved, fill them in when * the committee defines them */ -1, /* 0x00 */ -1, /* 0x01 */ -1, /* 0x02 */ -1, /* 0x03 */ -1, /* 0x04 */ -1, /* 0x05 */ -1, /* 0x06 */ 3125, /* 0x07 */ 6250, /* 0x08 */ 12500, /* 0x09 */ 25000, /* 0x0a */ 30300, /* 0x0b */ 50000, /* 0x0c */ }; /* The PPR values at which you calculate the period in ns by multiplying * by 4 */ #define SPI_STATIC_PPR 0x0c static int sprint_frac(char *dest, int value, int denom) { int frac = value % denom; int result = sprintf(dest, "%d", value / denom); if (frac == 0) return result; dest[result++] = '.'; do { denom /= 10; sprintf(dest + result, "%d", frac / denom); result++; frac %= denom; } while (frac); dest[result++] = '\0'; return result; } static int spi_execute(struct scsi_device *sdev, const void *cmd, enum req_op op, void *buffer, unsigned int bufflen, struct scsi_sense_hdr *sshdr) { int i, result; struct scsi_sense_hdr sshdr_tmp; blk_opf_t opf = op | REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; const struct scsi_exec_args exec_args = { .req_flags = BLK_MQ_REQ_PM, .sshdr = sshdr ? : &sshdr_tmp, }; sshdr = exec_args.sshdr; for(i = 0; i < DV_RETRIES; i++) { /* * The purpose of the RQF_PM flag below is to bypass the * SDEV_QUIESCE state. */ result = scsi_execute_cmd(sdev, cmd, opf, buffer, bufflen, DV_TIMEOUT, 1, &exec_args); if (result < 0 || !scsi_sense_valid(sshdr) || sshdr->sense_key != UNIT_ATTENTION) break; } return result; } static struct { enum spi_signal_type value; char *name; } signal_types[] = { { SPI_SIGNAL_UNKNOWN, "unknown" }, { SPI_SIGNAL_SE, "SE" }, { SPI_SIGNAL_LVD, "LVD" }, { SPI_SIGNAL_HVD, "HVD" }, }; static inline const char *spi_signal_to_string(enum spi_signal_type type) { int i; for (i = 0; i < ARRAY_SIZE(signal_types); i++) { if (type == signal_types[i].value) return signal_types[i].name; } return NULL; } static inline enum spi_signal_type spi_signal_to_value(const char *name) { int i, len; for (i = 0; i < ARRAY_SIZE(signal_types); i++) { len = strlen(signal_types[i].name); if (strncmp(name, signal_types[i].name, len) == 0 && (name[len] == '\n' || name[len] == '\0')) return signal_types[i].value; } return SPI_SIGNAL_UNKNOWN; } static int spi_host_setup(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); spi_signalling(shost) = SPI_SIGNAL_UNKNOWN; return 0; } static int spi_host_configure(struct transport_container *tc, struct device *dev, struct device *cdev); static DECLARE_TRANSPORT_CLASS(spi_host_class, "spi_host", spi_host_setup, NULL, spi_host_configure); static int spi_host_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; if (!scsi_is_host_device(dev)) return 0; shost = dev_to_shost(dev); if (!shost->transportt || shost->transportt->host_attrs.ac.class != &spi_host_class.class) return 0; return &shost->transportt->host_attrs.ac == cont; } static int spi_target_configure(struct transport_container *tc, struct device *dev, struct device *cdev); static int spi_device_configure(struct transport_container *tc, struct device *dev, struct device *cdev) { struct scsi_device *sdev = to_scsi_device(dev); struct scsi_target *starget = sdev->sdev_target; blist_flags_t bflags; bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8], &sdev->inquiry[16], SCSI_DEVINFO_SPI); /* Populate the target capability fields with the values * gleaned from the device inquiry */ spi_support_sync(starget) = scsi_device_sync(sdev); spi_support_wide(starget) = scsi_device_wide(sdev); spi_support_dt(starget) = scsi_device_dt(sdev); spi_support_dt_only(starget) = scsi_device_dt_only(sdev); spi_support_ius(starget) = scsi_device_ius(sdev); if (bflags & SPI_BLIST_NOIUS) { dev_info(dev, "Information Units disabled by blacklist\n"); spi_support_ius(starget) = 0; } spi_support_qas(starget) = scsi_device_qas(sdev); return 0; } static int spi_setup_transport_attrs(struct transport_container *tc, struct device *dev, struct device *cdev) { struct scsi_target *starget = to_scsi_target(dev); spi_period(starget) = -1; /* illegal value */ spi_min_period(starget) = 0; spi_offset(starget) = 0; /* async */ spi_max_offset(starget) = 255; spi_width(starget) = 0; /* narrow */ spi_max_width(starget) = 1; spi_iu(starget) = 0; /* no IU */ spi_max_iu(starget) = 1; spi_dt(starget) = 0; /* ST */ spi_qas(starget) = 0; spi_max_qas(starget) = 1; spi_wr_flow(starget) = 0; spi_rd_strm(starget) = 0; spi_rti(starget) = 0; spi_pcomp_en(starget) = 0; spi_hold_mcs(starget) = 0; spi_dv_pending(starget) = 0; spi_dv_in_progress(starget) = 0; spi_initial_dv(starget) = 0; mutex_init(&spi_dv_mutex(starget)); return 0; } #define spi_transport_show_simple(field, format_string) \ \ static ssize_t \ show_spi_transport_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct scsi_target *starget = transport_class_to_starget(dev); \ struct spi_transport_attrs *tp; \ \ tp = (struct spi_transport_attrs *)&starget->starget_data; \ return snprintf(buf, 20, format_string, tp->field); \ } #define spi_transport_store_simple(field, format_string) \ \ static ssize_t \ store_spi_transport_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ int val; \ struct scsi_target *starget = transport_class_to_starget(dev); \ struct spi_transport_attrs *tp; \ \ tp = (struct spi_transport_attrs *)&starget->starget_data; \ val = simple_strtoul(buf, NULL, 0); \ tp->field = val; \ return count; \ } #define spi_transport_show_function(field, format_string) \ \ static ssize_t \ show_spi_transport_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct scsi_target *starget = transport_class_to_starget(dev); \ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ struct spi_transport_attrs *tp; \ struct spi_internal *i = to_spi_internal(shost->transportt); \ tp = (struct spi_transport_attrs *)&starget->starget_data; \ if (i->f->get_##field) \ i->f->get_##field(starget); \ return snprintf(buf, 20, format_string, tp->field); \ } #define spi_transport_store_function(field, format_string) \ static ssize_t \ store_spi_transport_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ int val; \ struct scsi_target *starget = transport_class_to_starget(dev); \ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ struct spi_internal *i = to_spi_internal(shost->transportt); \ \ if (!i->f->set_##field) \ return -EINVAL; \ val = simple_strtoul(buf, NULL, 0); \ i->f->set_##field(starget, val); \ return count; \ } #define spi_transport_store_max(field, format_string) \ static ssize_t \ store_spi_transport_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ int val; \ struct scsi_target *starget = transport_class_to_starget(dev); \ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ struct spi_internal *i = to_spi_internal(shost->transportt); \ struct spi_transport_attrs *tp \ = (struct spi_transport_attrs *)&starget->starget_data; \ \ if (!i->f->set_##field) \ return -EINVAL; \ val = simple_strtoul(buf, NULL, 0); \ if (val > tp->max_##field) \ val = tp->max_##field; \ i->f->set_##field(starget, val); \ return count; \ } #define spi_transport_rd_attr(field, format_string) \ spi_transport_show_function(field, format_string) \ spi_transport_store_function(field, format_string) \ static DEVICE_ATTR(field, S_IRUGO, \ show_spi_transport_##field, \ store_spi_transport_##field); #define spi_transport_simple_attr(field, format_string) \ spi_transport_show_simple(field, format_string) \ spi_transport_store_simple(field, format_string) \ static DEVICE_ATTR(field, S_IRUGO, \ show_spi_transport_##field, \ store_spi_transport_##field); #define spi_transport_max_attr(field, format_string) \ spi_transport_show_function(field, format_string) \ spi_transport_store_max(field, format_string) \ spi_transport_simple_attr(max_##field, format_string) \ static DEVICE_ATTR(field, S_IRUGO, \ show_spi_transport_##field, \ store_spi_transport_##field); /* The Parallel SCSI Tranport Attributes: */ spi_transport_max_attr(offset, "%d\n"); spi_transport_max_attr(width, "%d\n"); spi_transport_max_attr(iu, "%d\n"); spi_transport_rd_attr(dt, "%d\n"); spi_transport_max_attr(qas, "%d\n"); spi_transport_rd_attr(wr_flow, "%d\n"); spi_transport_rd_attr(rd_strm, "%d\n"); spi_transport_rd_attr(rti, "%d\n"); spi_transport_rd_attr(pcomp_en, "%d\n"); spi_transport_rd_attr(hold_mcs, "%d\n"); /* we only care about the first child device that's a real SCSI device * so we return 1 to terminate the iteration when we find it */ static int child_iter(struct device *dev, void *data) { if (!scsi_is_sdev_device(dev)) return 0; spi_dv_device(to_scsi_device(dev)); return 1; } static ssize_t store_spi_revalidate(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_target *starget = transport_class_to_starget(dev); device_for_each_child(&starget->dev, NULL, child_iter); return count; } static DEVICE_ATTR(revalidate, S_IWUSR, NULL, store_spi_revalidate); /* Translate the period into ns according to the current spec * for SDTR/PPR messages */ static int period_to_str(char *buf, int period) { int len, picosec; if (period < 0 || period > 0xff) { picosec = -1; } else if (period <= SPI_STATIC_PPR) { picosec = ppr_to_ps[period]; } else { picosec = period * 4000; } if (picosec == -1) { len = sprintf(buf, "reserved"); } else { len = sprint_frac(buf, picosec, 1000); } return len; } static ssize_t show_spi_transport_period_helper(char *buf, int period) { int len = period_to_str(buf, period); buf[len++] = '\n'; buf[len] = '\0'; return len; } static ssize_t store_spi_transport_period_helper(struct device *dev, const char *buf, size_t count, int *periodp) { int j, picosec, period = -1; char *endp; picosec = simple_strtoul(buf, &endp, 10) * 1000; if (*endp == '.') { int mult = 100; do { endp++; if (!isdigit(*endp)) break; picosec += (*endp - '0') * mult; mult /= 10; } while (mult > 0); } for (j = 0; j <= SPI_STATIC_PPR; j++) { if (ppr_to_ps[j] < picosec) continue; period = j; break; } if (period == -1) period = picosec / 4000; if (period > 0xff) period = 0xff; *periodp = period; return count; } static ssize_t show_spi_transport_period(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_target *starget = transport_class_to_starget(dev); struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct spi_internal *i = to_spi_internal(shost->transportt); struct spi_transport_attrs *tp = (struct spi_transport_attrs *)&starget->starget_data; if (i->f->get_period) i->f->get_period(starget); return show_spi_transport_period_helper(buf, tp->period); } static ssize_t store_spi_transport_period(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_target *starget = transport_class_to_starget(cdev); struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct spi_internal *i = to_spi_internal(shost->transportt); struct spi_transport_attrs *tp = (struct spi_transport_attrs *)&starget->starget_data; int period, retval; if (!i->f->set_period) return -EINVAL; retval = store_spi_transport_period_helper(cdev, buf, count, &period); if (period < tp->min_period) period = tp->min_period; i->f->set_period(starget, period); return retval; } static DEVICE_ATTR(period, S_IRUGO, show_spi_transport_period, store_spi_transport_period); static ssize_t show_spi_transport_min_period(struct device *cdev, struct device_attribute *attr, char *buf) { struct scsi_target *starget = transport_class_to_starget(cdev); struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct spi_internal *i = to_spi_internal(shost->transportt); struct spi_transport_attrs *tp = (struct spi_transport_attrs *)&starget->starget_data; if (!i->f->set_period) return -EINVAL; return show_spi_transport_period_helper(buf, tp->min_period); } static ssize_t store_spi_transport_min_period(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_target *starget = transport_class_to_starget(cdev); struct spi_transport_attrs *tp = (struct spi_transport_attrs *)&starget->starget_data; return store_spi_transport_period_helper(cdev, buf, count, &tp->min_period); } static DEVICE_ATTR(min_period, S_IRUGO, show_spi_transport_min_period, store_spi_transport_min_period); static ssize_t show_spi_host_signalling(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = transport_class_to_shost(cdev); struct spi_internal *i = to_spi_internal(shost->transportt); if (i->f->get_signalling) i->f->get_signalling(shost); return sprintf(buf, "%s\n", spi_signal_to_string(spi_signalling(shost))); } static ssize_t store_spi_host_signalling(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = transport_class_to_shost(dev); struct spi_internal *i = to_spi_internal(shost->transportt); enum spi_signal_type type = spi_signal_to_value(buf); if (!i->f->set_signalling) return -EINVAL; if (type != SPI_SIGNAL_UNKNOWN) i->f->set_signalling(shost, type); return count; } static DEVICE_ATTR(signalling, S_IRUGO, show_spi_host_signalling, store_spi_host_signalling); static ssize_t show_spi_host_width(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = transport_class_to_shost(cdev); return sprintf(buf, "%s\n", shost->max_id == 16 ? "wide" : "narrow"); } static DEVICE_ATTR(host_width, S_IRUGO, show_spi_host_width, NULL); static ssize_t show_spi_host_hba_id(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = transport_class_to_shost(cdev); return sprintf(buf, "%d\n", shost->this_id); } static DEVICE_ATTR(hba_id, S_IRUGO, show_spi_host_hba_id, NULL); #define DV_SET(x, y) \ if(i->f->set_##x) \ i->f->set_##x(sdev->sdev_target, y) enum spi_compare_returns { SPI_COMPARE_SUCCESS, SPI_COMPARE_FAILURE, SPI_COMPARE_SKIP_TEST, }; /* This is for read/write Domain Validation: If the device supports * an echo buffer, we do read/write tests to it */ static enum spi_compare_returns spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer, u8 *ptr, const int retries) { int len = ptr - buffer; int j, k, r, result; unsigned int pattern = 0x0000ffff; struct scsi_sense_hdr sshdr; const char spi_write_buffer[] = { WRITE_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0 }; const char spi_read_buffer[] = { READ_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0 }; /* set up the pattern buffer. Doesn't matter if we spill * slightly beyond since that's where the read buffer is */ for (j = 0; j < len; ) { /* fill the buffer with counting (test a) */ for ( ; j < min(len, 32); j++) buffer[j] = j; k = j; /* fill the buffer with alternating words of 0x0 and * 0xffff (test b) */ for ( ; j < min(len, k + 32); j += 2) { u16 *word = (u16 *)&buffer[j]; *word = (j & 0x02) ? 0x0000 : 0xffff; } k = j; /* fill with crosstalk (alternating 0x5555 0xaaa) * (test c) */ for ( ; j < min(len, k + 32); j += 2) { u16 *word = (u16 *)&buffer[j]; *word = (j & 0x02) ? 0x5555 : 0xaaaa; } k = j; /* fill with shifting bits (test d) */ for ( ; j < min(len, k + 32); j += 4) { u32 *word = (unsigned int *)&buffer[j]; u32 roll = (pattern & 0x80000000) ? 1 : 0; *word = pattern; pattern = (pattern << 1) | roll; } /* don't bother with random data (test e) */ } for (r = 0; r < retries; r++) { result = spi_execute(sdev, spi_write_buffer, REQ_OP_DRV_OUT, buffer, len, &sshdr); if(result || !scsi_device_online(sdev)) { scsi_device_set_state(sdev, SDEV_QUIESCE); if (scsi_sense_valid(&sshdr) && sshdr.sense_key == ILLEGAL_REQUEST /* INVALID FIELD IN CDB */ && sshdr.asc == 0x24 && sshdr.ascq == 0x00) /* This would mean that the drive lied * to us about supporting an echo * buffer (unfortunately some Western * Digital drives do precisely this) */ return SPI_COMPARE_SKIP_TEST; sdev_printk(KERN_ERR, sdev, "Write Buffer failure %x\n", result); return SPI_COMPARE_FAILURE; } memset(ptr, 0, len); spi_execute(sdev, spi_read_buffer, REQ_OP_DRV_IN, ptr, len, NULL); scsi_device_set_state(sdev, SDEV_QUIESCE); if (memcmp(buffer, ptr, len) != 0) return SPI_COMPARE_FAILURE; } return SPI_COMPARE_SUCCESS; } /* This is for the simplest form of Domain Validation: a read test * on the inquiry data from the device */ static enum spi_compare_returns spi_dv_device_compare_inquiry(struct scsi_device *sdev, u8 *buffer, u8 *ptr, const int retries) { int r, result; const int len = sdev->inquiry_len; const char spi_inquiry[] = { INQUIRY, 0, 0, 0, len, 0 }; for (r = 0; r < retries; r++) { memset(ptr, 0, len); result = spi_execute(sdev, spi_inquiry, REQ_OP_DRV_IN, ptr, len, NULL); if(result || !scsi_device_online(sdev)) { scsi_device_set_state(sdev, SDEV_QUIESCE); return SPI_COMPARE_FAILURE; } /* If we don't have the inquiry data already, the * first read gets it */ if (ptr == buffer) { ptr += len; --r; continue; } if (memcmp(buffer, ptr, len) != 0) /* failure */ return SPI_COMPARE_FAILURE; } return SPI_COMPARE_SUCCESS; } static enum spi_compare_returns spi_dv_retrain(struct scsi_device *sdev, u8 *buffer, u8 *ptr, enum spi_compare_returns (*compare_fn)(struct scsi_device *, u8 *, u8 *, int)) { struct spi_internal *i = to_spi_internal(sdev->host->transportt); struct scsi_target *starget = sdev->sdev_target; int period = 0, prevperiod = 0; enum spi_compare_returns retval; for (;;) { int newperiod; retval = compare_fn(sdev, buffer, ptr, DV_LOOPS); if (retval == SPI_COMPARE_SUCCESS || retval == SPI_COMPARE_SKIP_TEST) break; /* OK, retrain, fallback */ if (i->f->get_iu) i->f->get_iu(starget); if (i->f->get_qas) i->f->get_qas(starget); if (i->f->get_period) i->f->get_period(sdev->sdev_target); /* Here's the fallback sequence; first try turning off * IU, then QAS (if we can control them), then finally * fall down the periods */ if (i->f->set_iu && spi_iu(starget)) { starget_printk(KERN_ERR, starget, "Domain Validation Disabling Information Units\n"); DV_SET(iu, 0); } else if (i->f->set_qas && spi_qas(starget)) { starget_printk(KERN_ERR, starget, "Domain Validation Disabling Quick Arbitration and Selection\n"); DV_SET(qas, 0); } else { newperiod = spi_period(starget); period = newperiod > period ? newperiod : period; if (period < 0x0d) period++; else period += period >> 1; if (unlikely(period > 0xff || period == prevperiod)) { /* Total failure; set to async and return */ starget_printk(KERN_ERR, starget, "Domain Validation Failure, dropping back to Asynchronous\n"); DV_SET(offset, 0); return SPI_COMPARE_FAILURE; } starget_printk(KERN_ERR, starget, "Domain Validation detected failure, dropping back\n"); DV_SET(period, period); prevperiod = period; } } return retval; } static int spi_dv_device_get_echo_buffer(struct scsi_device *sdev, u8 *buffer) { int l, result; /* first off do a test unit ready. This can error out * because of reservations or some other reason. If it * fails, the device won't let us write to the echo buffer * so just return failure */ static const char spi_test_unit_ready[] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; static const char spi_read_buffer_descriptor[] = { READ_BUFFER, 0x0b, 0, 0, 0, 0, 0, 0, 4, 0 }; /* We send a set of three TURs to clear any outstanding * unit attention conditions if they exist (Otherwise the * buffer tests won't be happy). If the TUR still fails * (reservation conflict, device not ready, etc) just * skip the write tests */ for (l = 0; ; l++) { result = spi_execute(sdev, spi_test_unit_ready, REQ_OP_DRV_IN, NULL, 0, NULL); if(result) { if(l >= 3) return 0; } else { /* TUR succeeded */ break; } } result = spi_execute(sdev, spi_read_buffer_descriptor, REQ_OP_DRV_IN, buffer, 4, NULL); if (result) /* Device has no echo buffer */ return 0; return buffer[3] + ((buffer[2] & 0x1f) << 8); } static void spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) { struct spi_internal *i = to_spi_internal(sdev->host->transportt); struct scsi_target *starget = sdev->sdev_target; struct Scsi_Host *shost = sdev->host; int len = sdev->inquiry_len; int min_period = spi_min_period(starget); int max_width = spi_max_width(starget); /* first set us up for narrow async */ DV_SET(offset, 0); DV_SET(width, 0); if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS) != SPI_COMPARE_SUCCESS) { starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n"); /* FIXME: should probably offline the device here? */ return; } if (!spi_support_wide(starget)) { spi_max_width(starget) = 0; max_width = 0; } /* test width */ if (i->f->set_width && max_width) { i->f->set_width(starget, 1); if (spi_dv_device_compare_inquiry(sdev, buffer, buffer + len, DV_LOOPS) != SPI_COMPARE_SUCCESS) { starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n"); i->f->set_width(starget, 0); /* Make sure we don't force wide back on by asking * for a transfer period that requires it */ max_width = 0; if (min_period < 10) min_period = 10; } } if (!i->f->set_period) return; /* device can't handle synchronous */ if (!spi_support_sync(starget) && !spi_support_dt(starget)) return; /* len == -1 is the signal that we need to ascertain the * presence of an echo buffer before trying to use it. len == * 0 means we don't have an echo buffer */ len = -1; retry: /* now set up to the maximum */ DV_SET(offset, spi_max_offset(starget)); DV_SET(period, min_period); /* try QAS requests; this should be harmless to set if the * target supports it */ if (spi_support_qas(starget) && spi_max_qas(starget)) { DV_SET(qas, 1); } else { DV_SET(qas, 0); } if (spi_support_ius(starget) && spi_max_iu(starget) && min_period < 9) { /* This u320 (or u640). Set IU transfers */ DV_SET(iu, 1); /* Then set the optional parameters */ DV_SET(rd_strm, 1); DV_SET(wr_flow, 1); DV_SET(rti, 1); if (min_period == 8) DV_SET(pcomp_en, 1); } else { DV_SET(iu, 0); } /* now that we've done all this, actually check the bus * signal type (if known). Some devices are stupid on * a SE bus and still claim they can try LVD only settings */ if (i->f->get_signalling) i->f->get_signalling(shost); if (spi_signalling(shost) == SPI_SIGNAL_SE || spi_signalling(shost) == SPI_SIGNAL_HVD || !spi_support_dt(starget)) { DV_SET(dt, 0); } else { DV_SET(dt, 1); } /* set width last because it will pull all the other * parameters down to required values */ DV_SET(width, max_width); /* Do the read only INQUIRY tests */ spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len, spi_dv_device_compare_inquiry); /* See if we actually managed to negotiate and sustain DT */ if (i->f->get_dt) i->f->get_dt(starget); /* see if the device has an echo buffer. If it does we can do * the SPI pattern write tests. Because of some broken * devices, we *only* try this on a device that has actually * negotiated DT */ if (len == -1 && spi_dt(starget)) len = spi_dv_device_get_echo_buffer(sdev, buffer); if (len <= 0) { starget_printk(KERN_INFO, starget, "Domain Validation skipping write tests\n"); return; } if (len > SPI_MAX_ECHO_BUFFER_SIZE) { starget_printk(KERN_WARNING, starget, "Echo buffer size %d is too big, trimming to %d\n", len, SPI_MAX_ECHO_BUFFER_SIZE); len = SPI_MAX_ECHO_BUFFER_SIZE; } if (spi_dv_retrain(sdev, buffer, buffer + len, spi_dv_device_echo_buffer) == SPI_COMPARE_SKIP_TEST) { /* OK, the stupid drive can't do a write echo buffer * test after all, fall back to the read tests */ len = 0; goto retry; } } /** spi_dv_device - Do Domain Validation on the device * @sdev: scsi device to validate * * Performs the domain validation on the given device in the * current execution thread. Since DV operations may sleep, * the current thread must have user context. Also no SCSI * related locks that would deadlock I/O issued by the DV may * be held. */ void spi_dv_device(struct scsi_device *sdev) { struct scsi_target *starget = sdev->sdev_target; const int len = SPI_MAX_ECHO_BUFFER_SIZE*2; unsigned int sleep_flags; u8 *buffer; /* * Because this function and the power management code both call * scsi_device_quiesce(), it is not safe to perform domain validation * while suspend or resume is in progress. Hence the * lock/unlock_system_sleep() calls. */ sleep_flags = lock_system_sleep(); if (scsi_autopm_get_device(sdev)) goto unlock_system_sleep; if (unlikely(spi_dv_in_progress(starget))) goto put_autopm; if (unlikely(scsi_device_get(sdev))) goto put_autopm; spi_dv_in_progress(starget) = 1; buffer = kzalloc(len, GFP_KERNEL); if (unlikely(!buffer)) goto put_sdev; /* We need to verify that the actual device will quiesce; the * later target quiesce is just a nice to have */ if (unlikely(scsi_device_quiesce(sdev))) goto free_buffer; scsi_target_quiesce(starget); spi_dv_pending(starget) = 1; mutex_lock(&spi_dv_mutex(starget)); starget_printk(KERN_INFO, starget, "Beginning Domain Validation\n"); spi_dv_device_internal(sdev, buffer); starget_printk(KERN_INFO, starget, "Ending Domain Validation\n"); mutex_unlock(&spi_dv_mutex(starget)); spi_dv_pending(starget) = 0; scsi_target_resume(starget); spi_initial_dv(starget) = 1; free_buffer: kfree(buffer); put_sdev: spi_dv_in_progress(starget) = 0; scsi_device_put(sdev); put_autopm: scsi_autopm_put_device(sdev); unlock_system_sleep: unlock_system_sleep(sleep_flags); } EXPORT_SYMBOL(spi_dv_device); struct work_queue_wrapper { struct work_struct work; struct scsi_device *sdev; }; static void spi_dv_device_work_wrapper(struct work_struct *work) { struct work_queue_wrapper *wqw = container_of(work, struct work_queue_wrapper, work); struct scsi_device *sdev = wqw->sdev; kfree(wqw); spi_dv_device(sdev); spi_dv_pending(sdev->sdev_target) = 0; scsi_device_put(sdev); } /** * spi_schedule_dv_device - schedule domain validation to occur on the device * @sdev: The device to validate * * Identical to spi_dv_device() above, except that the DV will be * scheduled to occur in a workqueue later. All memory allocations * are atomic, so may be called from any context including those holding * SCSI locks. */ void spi_schedule_dv_device(struct scsi_device *sdev) { struct work_queue_wrapper *wqw = kmalloc(sizeof(struct work_queue_wrapper), GFP_ATOMIC); if (unlikely(!wqw)) return; if (unlikely(spi_dv_pending(sdev->sdev_target))) { kfree(wqw); return; } /* Set pending early (dv_device doesn't check it, only sets it) */ spi_dv_pending(sdev->sdev_target) = 1; if (unlikely(scsi_device_get(sdev))) { kfree(wqw); spi_dv_pending(sdev->sdev_target) = 0; return; } INIT_WORK(&wqw->work, spi_dv_device_work_wrapper); wqw->sdev = sdev; schedule_work(&wqw->work); } EXPORT_SYMBOL(spi_schedule_dv_device); /** * spi_display_xfer_agreement - Print the current target transfer agreement * @starget: The target for which to display the agreement * * Each SPI port is required to maintain a transfer agreement for each * other port on the bus. This function prints a one-line summary of * the current agreement; more detailed information is available in sysfs. */ void spi_display_xfer_agreement(struct scsi_target *starget) { struct spi_transport_attrs *tp; tp = (struct spi_transport_attrs *)&starget->starget_data; if (tp->offset > 0 && tp->period > 0) { unsigned int picosec, kb100; char *scsi = "FAST-?"; char tmp[8]; if (tp->period <= SPI_STATIC_PPR) { picosec = ppr_to_ps[tp->period]; switch (tp->period) { case 7: scsi = "FAST-320"; break; case 8: scsi = "FAST-160"; break; case 9: scsi = "FAST-80"; break; case 10: case 11: scsi = "FAST-40"; break; case 12: scsi = "FAST-20"; break; } } else { picosec = tp->period * 4000; if (tp->period < 25) scsi = "FAST-20"; else if (tp->period < 50) scsi = "FAST-10"; else scsi = "FAST-5"; } kb100 = (10000000 + picosec / 2) / picosec; if (tp->width) kb100 *= 2; sprint_frac(tmp, picosec, 1000); dev_info(&starget->dev, "%s %sSCSI %d.%d MB/s %s%s%s%s%s%s%s%s (%s ns, offset %d)\n", scsi, tp->width ? "WIDE " : "", kb100/10, kb100 % 10, tp->dt ? "DT" : "ST", tp->iu ? " IU" : "", tp->qas ? " QAS" : "", tp->rd_strm ? " RDSTRM" : "", tp->rti ? " RTI" : "", tp->wr_flow ? " WRFLOW" : "", tp->pcomp_en ? " PCOMP" : "", tp->hold_mcs ? " HMCS" : "", tmp, tp->offset); } else { dev_info(&starget->dev, "%sasynchronous\n", tp->width ? "wide " : ""); } } EXPORT_SYMBOL(spi_display_xfer_agreement); int spi_populate_width_msg(unsigned char *msg, int width) { msg[0] = EXTENDED_MESSAGE; msg[1] = 2; msg[2] = EXTENDED_WDTR; msg[3] = width; return 4; } EXPORT_SYMBOL_GPL(spi_populate_width_msg); int spi_populate_sync_msg(unsigned char *msg, int period, int offset) { msg[0] = EXTENDED_MESSAGE; msg[1] = 3; msg[2] = EXTENDED_SDTR; msg[3] = period; msg[4] = offset; return 5; } EXPORT_SYMBOL_GPL(spi_populate_sync_msg); int spi_populate_ppr_msg(unsigned char *msg, int period, int offset, int width, int options) { msg[0] = EXTENDED_MESSAGE; msg[1] = 6; msg[2] = EXTENDED_PPR; msg[3] = period; msg[4] = 0; msg[5] = offset; msg[6] = width; msg[7] = options; return 8; } EXPORT_SYMBOL_GPL(spi_populate_ppr_msg); /** * spi_populate_tag_msg - place a tag message in a buffer * @msg: pointer to the area to place the tag * @cmd: pointer to the scsi command for the tag * * Notes: * designed to create the correct type of tag message for the * particular request. Returns the size of the tag message. * May return 0 if TCQ is disabled for this device. **/ int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd) { if (cmd->flags & SCMD_TAGGED) { *msg++ = SIMPLE_QUEUE_TAG; *msg++ = scsi_cmd_to_rq(cmd)->tag; return 2; } return 0; } EXPORT_SYMBOL_GPL(spi_populate_tag_msg); #ifdef CONFIG_SCSI_CONSTANTS static const char * const one_byte_msgs[] = { /* 0x00 */ "Task Complete", NULL /* Extended Message */, "Save Pointers", /* 0x03 */ "Restore Pointers", "Disconnect", "Initiator Error", /* 0x06 */ "Abort Task Set", "Message Reject", "Nop", "Message Parity Error", /* 0x0a */ "Linked Command Complete", "Linked Command Complete w/flag", /* 0x0c */ "Target Reset", "Abort Task", "Clear Task Set", /* 0x0f */ "Initiate Recovery", "Release Recovery", /* 0x11 */ "Terminate Process", "Continue Task", "Target Transfer Disable", /* 0x14 */ NULL, NULL, "Clear ACA", "LUN Reset" }; static const char * const two_byte_msgs[] = { /* 0x20 */ "Simple Queue Tag", "Head of Queue Tag", "Ordered Queue Tag", /* 0x23 */ "Ignore Wide Residue", "ACA" }; static const char * const extended_msgs[] = { /* 0x00 */ "Modify Data Pointer", "Synchronous Data Transfer Request", /* 0x02 */ "SCSI-I Extended Identify", "Wide Data Transfer Request", /* 0x04 */ "Parallel Protocol Request", "Modify Bidirectional Data Pointer" }; static void print_nego(const unsigned char *msg, int per, int off, int width) { if (per) { char buf[20]; period_to_str(buf, msg[per]); printk("period = %s ns ", buf); } if (off) printk("offset = %d ", msg[off]); if (width) printk("width = %d ", 8 << msg[width]); } static void print_ptr(const unsigned char *msg, int msb, const char *desc) { int ptr = (msg[msb] << 24) | (msg[msb+1] << 16) | (msg[msb+2] << 8) | msg[msb+3]; printk("%s = %d ", desc, ptr); } int spi_print_msg(const unsigned char *msg) { int len = 1, i; if (msg[0] == EXTENDED_MESSAGE) { len = 2 + msg[1]; if (len == 2) len += 256; if (msg[2] < ARRAY_SIZE(extended_msgs)) printk ("%s ", extended_msgs[msg[2]]); else printk ("Extended Message, reserved code (0x%02x) ", (int) msg[2]); switch (msg[2]) { case EXTENDED_MODIFY_DATA_POINTER: print_ptr(msg, 3, "pointer"); break; case EXTENDED_SDTR: print_nego(msg, 3, 4, 0); break; case EXTENDED_WDTR: print_nego(msg, 0, 0, 3); break; case EXTENDED_PPR: print_nego(msg, 3, 5, 6); break; case EXTENDED_MODIFY_BIDI_DATA_PTR: print_ptr(msg, 3, "out"); print_ptr(msg, 7, "in"); break; default: for (i = 2; i < len; ++i) printk("%02x ", msg[i]); } /* Identify */ } else if (msg[0] & 0x80) { printk("Identify disconnect %sallowed %s %d ", (msg[0] & 0x40) ? "" : "not ", (msg[0] & 0x20) ? "target routine" : "lun", msg[0] & 0x7); /* Normal One byte */ } else if (msg[0] < 0x1f) { if (msg[0] < ARRAY_SIZE(one_byte_msgs) && one_byte_msgs[msg[0]]) printk("%s ", one_byte_msgs[msg[0]]); else printk("reserved (%02x) ", msg[0]); } else if (msg[0] == 0x55) { printk("QAS Request "); /* Two byte */ } else if (msg[0] <= 0x2f) { if ((msg[0] - 0x20) < ARRAY_SIZE(two_byte_msgs)) printk("%s %02x ", two_byte_msgs[msg[0] - 0x20], msg[1]); else printk("reserved two byte (%02x %02x) ", msg[0], msg[1]); len = 2; } else printk("reserved "); return len; } EXPORT_SYMBOL(spi_print_msg); #else /* ifndef CONFIG_SCSI_CONSTANTS */ int spi_print_msg(const unsigned char *msg) { int len = 1, i; if (msg[0] == EXTENDED_MESSAGE) { len = 2 + msg[1]; if (len == 2) len += 256; for (i = 0; i < len; ++i) printk("%02x ", msg[i]); /* Identify */ } else if (msg[0] & 0x80) { printk("%02x ", msg[0]); /* Normal One byte */ } else if ((msg[0] < 0x1f) || (msg[0] == 0x55)) { printk("%02x ", msg[0]); /* Two byte */ } else if (msg[0] <= 0x2f) { printk("%02x %02x", msg[0], msg[1]); len = 2; } else printk("%02x ", msg[0]); return len; } EXPORT_SYMBOL(spi_print_msg); #endif /* ! CONFIG_SCSI_CONSTANTS */ static int spi_device_match(struct attribute_container *cont, struct device *dev) { struct scsi_device *sdev; struct Scsi_Host *shost; struct spi_internal *i; if (!scsi_is_sdev_device(dev)) return 0; sdev = to_scsi_device(dev); shost = sdev->host; if (!shost->transportt || shost->transportt->host_attrs.ac.class != &spi_host_class.class) return 0; /* Note: this class has no device attributes, so it has * no per-HBA allocation and thus we don't need to distinguish * the attribute containers for the device */ i = to_spi_internal(shost->transportt); if (i->f->deny_binding && i->f->deny_binding(sdev->sdev_target)) return 0; return 1; } static int spi_target_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct scsi_target *starget; struct spi_internal *i; if (!scsi_is_target_device(dev)) return 0; shost = dev_to_shost(dev->parent); if (!shost->transportt || shost->transportt->host_attrs.ac.class != &spi_host_class.class) return 0; i = to_spi_internal(shost->transportt); starget = to_scsi_target(dev); if (i->f->deny_binding && i->f->deny_binding(starget)) return 0; return &i->t.target_attrs.ac == cont; } static DECLARE_TRANSPORT_CLASS(spi_transport_class, "spi_transport", spi_setup_transport_attrs, NULL, spi_target_configure); static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class, spi_device_match, spi_device_configure); static struct attribute *host_attributes[] = { &dev_attr_signalling.attr, &dev_attr_host_width.attr, &dev_attr_hba_id.attr, NULL }; static struct attribute_group host_attribute_group = { .attrs = host_attributes, }; static int spi_host_configure(struct transport_container *tc, struct device *dev, struct device *cdev) { struct kobject *kobj = &cdev->kobj; struct Scsi_Host *shost = transport_class_to_shost(cdev); struct spi_internal *si = to_spi_internal(shost->transportt); struct attribute *attr = &dev_attr_signalling.attr; int rc = 0; if (si->f->set_signalling) rc = sysfs_chmod_file(kobj, attr, attr->mode | S_IWUSR); return rc; } /* returns true if we should be showing the variable. Also * overloads the return by setting 1<<1 if the attribute should * be writeable */ #define TARGET_ATTRIBUTE_HELPER(name) \ (si->f->show_##name ? S_IRUGO : 0) | \ (si->f->set_##name ? S_IWUSR : 0) static umode_t target_attribute_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *cdev = container_of(kobj, struct device, kobj); struct scsi_target *starget = transport_class_to_starget(cdev); struct Scsi_Host *shost = transport_class_to_shost(cdev); struct spi_internal *si = to_spi_internal(shost->transportt); if (attr == &dev_attr_period.attr && spi_support_sync(starget)) return TARGET_ATTRIBUTE_HELPER(period); else if (attr == &dev_attr_min_period.attr && spi_support_sync(starget)) return TARGET_ATTRIBUTE_HELPER(period); else if (attr == &dev_attr_offset.attr && spi_support_sync(starget)) return TARGET_ATTRIBUTE_HELPER(offset); else if (attr == &dev_attr_max_offset.attr && spi_support_sync(starget)) return TARGET_ATTRIBUTE_HELPER(offset); else if (attr == &dev_attr_width.attr && spi_support_wide(starget)) return TARGET_ATTRIBUTE_HELPER(width); else if (attr == &dev_attr_max_width.attr && spi_support_wide(starget)) return TARGET_ATTRIBUTE_HELPER(width); else if (attr == &dev_attr_iu.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(iu); else if (attr == &dev_attr_max_iu.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(iu); else if (attr == &dev_attr_dt.attr && spi_support_dt(starget)) return TARGET_ATTRIBUTE_HELPER(dt); else if (attr == &dev_attr_qas.attr && spi_support_qas(starget)) return TARGET_ATTRIBUTE_HELPER(qas); else if (attr == &dev_attr_max_qas.attr && spi_support_qas(starget)) return TARGET_ATTRIBUTE_HELPER(qas); else if (attr == &dev_attr_wr_flow.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(wr_flow); else if (attr == &dev_attr_rd_strm.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(rd_strm); else if (attr == &dev_attr_rti.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(rti); else if (attr == &dev_attr_pcomp_en.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(pcomp_en); else if (attr == &dev_attr_hold_mcs.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(hold_mcs); else if (attr == &dev_attr_revalidate.attr) return S_IWUSR; return 0; } static struct attribute *target_attributes[] = { &dev_attr_period.attr, &dev_attr_min_period.attr, &dev_attr_offset.attr, &dev_attr_max_offset.attr, &dev_attr_width.attr, &dev_attr_max_width.attr, &dev_attr_iu.attr, &dev_attr_max_iu.attr, &dev_attr_dt.attr, &dev_attr_qas.attr, &dev_attr_max_qas.attr, &dev_attr_wr_flow.attr, &dev_attr_rd_strm.attr, &dev_attr_rti.attr, &dev_attr_pcomp_en.attr, &dev_attr_hold_mcs.attr, &dev_attr_revalidate.attr, NULL }; static struct attribute_group target_attribute_group = { .attrs = target_attributes, .is_visible = target_attribute_is_visible, }; static int spi_target_configure(struct transport_container *tc, struct device *dev, struct device *cdev) { struct kobject *kobj = &cdev->kobj; /* force an update based on parameters read from the device */ sysfs_update_group(kobj, &target_attribute_group); return 0; } struct scsi_transport_template * spi_attach_transport(struct spi_function_template *ft) { struct spi_internal *i = kzalloc(sizeof(struct spi_internal), GFP_KERNEL); if (unlikely(!i)) return NULL; i->t.target_attrs.ac.class = &spi_transport_class.class; i->t.target_attrs.ac.grp = &target_attribute_group; i->t.target_attrs.ac.match = spi_target_match; transport_container_register(&i->t.target_attrs); i->t.target_size = sizeof(struct spi_transport_attrs); i->t.host_attrs.ac.class = &spi_host_class.class; i->t.host_attrs.ac.grp = &host_attribute_group; i->t.host_attrs.ac.match = spi_host_match; transport_container_register(&i->t.host_attrs); i->t.host_size = sizeof(struct spi_host_attrs); i->f = ft; return &i->t; } EXPORT_SYMBOL(spi_attach_transport); void spi_release_transport(struct scsi_transport_template *t) { struct spi_internal *i = to_spi_internal(t); transport_container_unregister(&i->t.target_attrs); transport_container_unregister(&i->t.host_attrs); kfree(i); } EXPORT_SYMBOL(spi_release_transport); static __init int spi_transport_init(void) { int error = scsi_dev_info_add_list(SCSI_DEVINFO_SPI, "SCSI Parallel Transport Class"); if (!error) { int i; for (i = 0; spi_static_device_list[i].vendor; i++) scsi_dev_info_list_add_keyed(1, /* compatible */ spi_static_device_list[i].vendor, spi_static_device_list[i].model, NULL, spi_static_device_list[i].flags, SCSI_DEVINFO_SPI); } error = transport_class_register(&spi_transport_class); if (error) return error; error = anon_transport_class_register(&spi_device_class); return transport_class_register(&spi_host_class); } static void __exit spi_transport_exit(void) { transport_class_unregister(&spi_transport_class); anon_transport_class_unregister(&spi_device_class); transport_class_unregister(&spi_host_class); scsi_dev_info_remove_list(SCSI_DEVINFO_SPI); } MODULE_AUTHOR("Martin Hicks"); MODULE_DESCRIPTION("SPI Transport Attributes"); MODULE_LICENSE("GPL"); module_init(spi_transport_init); module_exit(spi_transport_exit);
linux-master
drivers/scsi/scsi_transport_spi.c
/* * Linux driver for VMware's para-virtualized SCSI HBA. * * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; version 2 of the License and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/pci.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_tcq.h> #include "vmw_pvscsi.h" #define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver" MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC); MODULE_AUTHOR("VMware, Inc."); MODULE_LICENSE("GPL"); MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING); #define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8 #define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1 #define PVSCSI_DEFAULT_QUEUE_DEPTH 254 #define SGL_SIZE PAGE_SIZE struct pvscsi_sg_list { struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT]; }; struct pvscsi_ctx { /* * The index of the context in cmd_map serves as the context ID for a * 1-to-1 mapping completions back to requests. */ struct scsi_cmnd *cmd; struct pvscsi_sg_list *sgl; struct list_head list; dma_addr_t dataPA; dma_addr_t sensePA; dma_addr_t sglPA; struct completion *abort_cmp; }; struct pvscsi_adapter { char *mmioBase; u8 rev; bool use_msg; bool use_req_threshold; spinlock_t hw_lock; struct workqueue_struct *workqueue; struct work_struct work; struct PVSCSIRingReqDesc *req_ring; unsigned req_pages; unsigned req_depth; dma_addr_t reqRingPA; struct PVSCSIRingCmpDesc *cmp_ring; unsigned cmp_pages; dma_addr_t cmpRingPA; struct PVSCSIRingMsgDesc *msg_ring; unsigned msg_pages; dma_addr_t msgRingPA; struct PVSCSIRingsState *rings_state; dma_addr_t ringStatePA; struct pci_dev *dev; struct Scsi_Host *host; struct list_head cmd_pool; struct pvscsi_ctx *cmd_map; }; /* Command line parameters */ static int pvscsi_ring_pages; static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING; static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH; static bool pvscsi_disable_msi; static bool pvscsi_disable_msix; static bool pvscsi_use_msg = true; static bool pvscsi_use_req_threshold = true; #define PVSCSI_RW (S_IRUSR | S_IWUSR) module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW); MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default=" __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) "[up to 16 targets]," __stringify(PVSCSI_SETUP_RINGS_MAX_NUM_PAGES) "[for 16+ targets])"); module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW); MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default=" __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")"); module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW); MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default=" __stringify(PVSCSI_DEFAULT_QUEUE_DEPTH) ")"); module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW); MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW); MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW); MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)"); module_param_named(use_req_threshold, pvscsi_use_req_threshold, bool, PVSCSI_RW); MODULE_PARM_DESC(use_req_threshold, "Use driver-based request coalescing if configured - (default=1)"); static const struct pci_device_id pvscsi_pci_tbl[] = { { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) }, { 0 } }; MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl); static struct device * pvscsi_dev(const struct pvscsi_adapter *adapter) { return &(adapter->dev->dev); } static struct pvscsi_ctx * pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd) { struct pvscsi_ctx *ctx, *end; end = &adapter->cmd_map[adapter->req_depth]; for (ctx = adapter->cmd_map; ctx < end; ctx++) if (ctx->cmd == cmd) return ctx; return NULL; } static struct pvscsi_ctx * pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd) { struct pvscsi_ctx *ctx; if (list_empty(&adapter->cmd_pool)) return NULL; ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list); ctx->cmd = cmd; list_del(&ctx->list); return ctx; } static void pvscsi_release_context(struct pvscsi_adapter *adapter, struct pvscsi_ctx *ctx) { ctx->cmd = NULL; ctx->abort_cmp = NULL; list_add(&ctx->list, &adapter->cmd_pool); } /* * Map a pvscsi_ctx struct to a context ID field value; we map to a simple * non-zero integer. ctx always points to an entry in cmd_map array, hence * the return value is always >=1. */ static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter, const struct pvscsi_ctx *ctx) { return ctx - adapter->cmd_map + 1; } static struct pvscsi_ctx * pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context) { return &adapter->cmd_map[context - 1]; } static void pvscsi_reg_write(const struct pvscsi_adapter *adapter, u32 offset, u32 val) { writel(val, adapter->mmioBase + offset); } static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset) { return readl(adapter->mmioBase + offset); } static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter) { return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS); } static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter, u32 val) { pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val); } static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter) { u32 intr_bits; intr_bits = PVSCSI_INTR_CMPL_MASK; if (adapter->use_msg) intr_bits |= PVSCSI_INTR_MSG_MASK; pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits); } static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter) { pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0); } static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter, u32 cmd, const void *desc, size_t len) { const u32 *ptr = desc; size_t i; len /= sizeof(*ptr); pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd); for (i = 0; i < len; i++) pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]); } static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter, const struct pvscsi_ctx *ctx) { struct PVSCSICmdDescAbortCmd cmd = { 0 }; cmd.target = ctx->cmd->device->id; cmd.context = pvscsi_map_context(adapter, ctx); pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd)); } static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter) { pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0); } static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter) { pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0); } static int scsi_is_rw(unsigned char op) { return op == READ_6 || op == WRITE_6 || op == READ_10 || op == WRITE_10 || op == READ_12 || op == WRITE_12 || op == READ_16 || op == WRITE_16; } static void pvscsi_kick_io(const struct pvscsi_adapter *adapter, unsigned char op) { if (scsi_is_rw(op)) { struct PVSCSIRingsState *s = adapter->rings_state; if (!adapter->use_req_threshold || s->reqProdIdx - s->reqConsIdx >= s->reqCallThreshold) pvscsi_kick_rw_io(adapter); } else { pvscsi_process_request_ring(adapter); } } static void ll_adapter_reset(const struct pvscsi_adapter *adapter) { dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter); pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0); } static void ll_bus_reset(const struct pvscsi_adapter *adapter) { dev_dbg(pvscsi_dev(adapter), "Resetting bus on %p\n", adapter); pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0); } static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target) { struct PVSCSICmdDescResetDevice cmd = { 0 }; dev_dbg(pvscsi_dev(adapter), "Resetting device: target=%u\n", target); cmd.target = target; pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE, &cmd, sizeof(cmd)); } static void pvscsi_create_sg(struct pvscsi_ctx *ctx, struct scatterlist *sg, unsigned count) { unsigned i; struct PVSCSISGElement *sge; BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT); sge = &ctx->sgl->sge[0]; for (i = 0; i < count; i++, sg = sg_next(sg)) { sge[i].addr = sg_dma_address(sg); sge[i].length = sg_dma_len(sg); sge[i].flags = 0; } } /* * Map all data buffers for a command into PCI space and * setup the scatter/gather list if needed. */ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter, struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd, struct PVSCSIRingReqDesc *e) { unsigned count; unsigned bufflen = scsi_bufflen(cmd); struct scatterlist *sg; e->dataLen = bufflen; e->dataAddr = 0; if (bufflen == 0) return 0; sg = scsi_sglist(cmd); count = scsi_sg_count(cmd); if (count != 0) { int segs = scsi_dma_map(cmd); if (segs == -ENOMEM) { scmd_printk(KERN_DEBUG, cmd, "vmw_pvscsi: Failed to map cmd sglist for DMA.\n"); return -ENOMEM; } else if (segs > 1) { pvscsi_create_sg(ctx, sg, segs); e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST; ctx->sglPA = dma_map_single(&adapter->dev->dev, ctx->sgl, SGL_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(&adapter->dev->dev, ctx->sglPA)) { scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: Failed to map ctx sglist for DMA.\n"); scsi_dma_unmap(cmd); ctx->sglPA = 0; return -ENOMEM; } e->dataAddr = ctx->sglPA; } else e->dataAddr = sg_dma_address(sg); } else { /* * In case there is no S/G list, scsi_sglist points * directly to the buffer. */ ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen, cmd->sc_data_direction); if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) { scmd_printk(KERN_DEBUG, cmd, "vmw_pvscsi: Failed to map direct data buffer for DMA.\n"); return -ENOMEM; } e->dataAddr = ctx->dataPA; } return 0; } /* * The device incorrectly doesn't clear the first byte of the sense * buffer in some cases. We have to do it ourselves. * Otherwise we run into trouble when SWIOTLB is forced. */ static void pvscsi_patch_sense(struct scsi_cmnd *cmd) { if (cmd->sense_buffer) cmd->sense_buffer[0] = 0; } static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter, struct pvscsi_ctx *ctx) { struct scsi_cmnd *cmd; unsigned bufflen; cmd = ctx->cmd; bufflen = scsi_bufflen(cmd); if (bufflen != 0) { unsigned count = scsi_sg_count(cmd); if (count != 0) { scsi_dma_unmap(cmd); if (ctx->sglPA) { dma_unmap_single(&adapter->dev->dev, ctx->sglPA, SGL_SIZE, DMA_TO_DEVICE); ctx->sglPA = 0; } } else dma_unmap_single(&adapter->dev->dev, ctx->dataPA, bufflen, cmd->sc_data_direction); } if (cmd->sense_buffer) dma_unmap_single(&adapter->dev->dev, ctx->sensePA, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); } static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter) { adapter->rings_state = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE, &adapter->ringStatePA, GFP_KERNEL); if (!adapter->rings_state) return -ENOMEM; adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages); adapter->req_depth = adapter->req_pages * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; adapter->req_ring = dma_alloc_coherent(&adapter->dev->dev, adapter->req_pages * PAGE_SIZE, &adapter->reqRingPA, GFP_KERNEL); if (!adapter->req_ring) return -ENOMEM; adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING, pvscsi_ring_pages); adapter->cmp_ring = dma_alloc_coherent(&adapter->dev->dev, adapter->cmp_pages * PAGE_SIZE, &adapter->cmpRingPA, GFP_KERNEL); if (!adapter->cmp_ring) return -ENOMEM; BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE)); BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE)); BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE)); if (!adapter->use_msg) return 0; adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING, pvscsi_msg_ring_pages); adapter->msg_ring = dma_alloc_coherent(&adapter->dev->dev, adapter->msg_pages * PAGE_SIZE, &adapter->msgRingPA, GFP_KERNEL); if (!adapter->msg_ring) return -ENOMEM; BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE)); return 0; } static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter) { struct PVSCSICmdDescSetupRings cmd = { 0 }; dma_addr_t base; unsigned i; cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT; cmd.reqRingNumPages = adapter->req_pages; cmd.cmpRingNumPages = adapter->cmp_pages; base = adapter->reqRingPA; for (i = 0; i < adapter->req_pages; i++) { cmd.reqRingPPNs[i] = base >> PAGE_SHIFT; base += PAGE_SIZE; } base = adapter->cmpRingPA; for (i = 0; i < adapter->cmp_pages; i++) { cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT; base += PAGE_SIZE; } memset(adapter->rings_state, 0, PAGE_SIZE); memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE); memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE); pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS, &cmd, sizeof(cmd)); if (adapter->use_msg) { struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 }; cmd_msg.numPages = adapter->msg_pages; base = adapter->msgRingPA; for (i = 0; i < adapter->msg_pages; i++) { cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT; base += PAGE_SIZE; } memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE); pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING, &cmd_msg, sizeof(cmd_msg)); } } static int pvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) { if (!sdev->tagged_supported) qdepth = 1; return scsi_change_queue_depth(sdev, qdepth); } /* * Pull a completion descriptor off and pass the completion back * to the SCSI mid layer. */ static void pvscsi_complete_request(struct pvscsi_adapter *adapter, const struct PVSCSIRingCmpDesc *e) { struct pvscsi_ctx *ctx; struct scsi_cmnd *cmd; struct completion *abort_cmp; u32 btstat = e->hostStatus; u32 sdstat = e->scsiStatus; ctx = pvscsi_get_context(adapter, e->context); cmd = ctx->cmd; abort_cmp = ctx->abort_cmp; pvscsi_unmap_buffers(adapter, ctx); if (sdstat != SAM_STAT_CHECK_CONDITION) pvscsi_patch_sense(cmd); pvscsi_release_context(adapter, ctx); if (abort_cmp) { /* * The command was requested to be aborted. Just signal that * the request completed and swallow the actual cmd completion * here. The abort handler will post a completion for this * command indicating that it got successfully aborted. */ complete(abort_cmp); return; } cmd->result = 0; if (sdstat != SAM_STAT_GOOD && (btstat == BTSTAT_SUCCESS || btstat == BTSTAT_LINKED_COMMAND_COMPLETED || btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) { if (sdstat == SAM_STAT_COMMAND_TERMINATED) { cmd->result = (DID_RESET << 16); } else { cmd->result = (DID_OK << 16) | sdstat; } } else switch (btstat) { case BTSTAT_SUCCESS: case BTSTAT_LINKED_COMMAND_COMPLETED: case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG: /* * Commands like INQUIRY may transfer less data than * requested by the initiator via bufflen. Set residual * count to make upper layer aware of the actual amount * of data returned. There are cases when controller * returns zero dataLen with non zero data - do not set * residual count in that case. */ if (e->dataLen && (e->dataLen < scsi_bufflen(cmd))) scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); cmd->result = (DID_OK << 16); break; case BTSTAT_DATARUN: case BTSTAT_DATA_UNDERRUN: /* Report residual data in underruns */ scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); cmd->result = (DID_ERROR << 16); break; case BTSTAT_SELTIMEO: /* Our emulation returns this for non-connected devs */ cmd->result = (DID_BAD_TARGET << 16); break; case BTSTAT_LUNMISMATCH: case BTSTAT_TAGREJECT: case BTSTAT_BADMSG: case BTSTAT_HAHARDWARE: case BTSTAT_INVPHASE: case BTSTAT_HATIMEOUT: case BTSTAT_NORESPONSE: case BTSTAT_DISCONNECT: case BTSTAT_HASOFTWARE: case BTSTAT_BUSFREE: case BTSTAT_SENSFAILED: cmd->result |= (DID_ERROR << 16); break; case BTSTAT_SENTRST: case BTSTAT_RECVRST: case BTSTAT_BUSRESET: cmd->result = (DID_RESET << 16); break; case BTSTAT_ABORTQUEUE: cmd->result = (DID_BUS_BUSY << 16); break; case BTSTAT_SCSIPARITY: cmd->result = (DID_PARITY << 16); break; default: cmd->result = (DID_ERROR << 16); scmd_printk(KERN_DEBUG, cmd, "Unknown completion status: 0x%x\n", btstat); } dev_dbg(&cmd->device->sdev_gendev, "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n", cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat); scsi_done(cmd); } /* * barrier usage : Since the PVSCSI device is emulated, there could be cases * where we may want to serialize some accesses between the driver and the * emulation layer. We use compiler barriers instead of the more expensive * memory barriers because PVSCSI is only supported on X86 which has strong * memory access ordering. */ static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter) { struct PVSCSIRingsState *s = adapter->rings_state; struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring; u32 cmp_entries = s->cmpNumEntriesLog2; while (s->cmpConsIdx != s->cmpProdIdx) { struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx & MASK(cmp_entries)); /* * This barrier() ensures that *e is not dereferenced while * the device emulation still writes data into the slot. * Since the device emulation advances s->cmpProdIdx only after * updating the slot we want to check it first. */ barrier(); pvscsi_complete_request(adapter, e); /* * This barrier() ensures that compiler doesn't reorder write * to s->cmpConsIdx before the read of (*e) inside * pvscsi_complete_request. Otherwise, device emulation may * overwrite *e before we had a chance to read it. */ barrier(); s->cmpConsIdx++; } } /* * Translate a Linux SCSI request into a request ring entry. */ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd) { struct PVSCSIRingsState *s; struct PVSCSIRingReqDesc *e; struct scsi_device *sdev; u32 req_entries; s = adapter->rings_state; sdev = cmd->device; req_entries = s->reqNumEntriesLog2; /* * If this condition holds, we might have room on the request ring, but * we might not have room on the completion ring for the response. * However, we have already ruled out this possibility - we would not * have successfully allocated a context if it were true, since we only * have one context per request entry. Check for it anyway, since it * would be a serious bug. */ if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) { scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: " "ring full: reqProdIdx=%d cmpConsIdx=%d\n", s->reqProdIdx, s->cmpConsIdx); return -1; } e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries)); e->bus = sdev->channel; e->target = sdev->id; memset(e->lun, 0, sizeof(e->lun)); e->lun[1] = sdev->lun; if (cmd->sense_buffer) { ctx->sensePA = dma_map_single(&adapter->dev->dev, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) { scmd_printk(KERN_DEBUG, cmd, "vmw_pvscsi: Failed to map sense buffer for DMA.\n"); ctx->sensePA = 0; return -ENOMEM; } e->senseAddr = ctx->sensePA; e->senseLen = SCSI_SENSE_BUFFERSIZE; } else { e->senseLen = 0; e->senseAddr = 0; } e->cdbLen = cmd->cmd_len; e->vcpuHint = smp_processor_id(); memcpy(e->cdb, cmd->cmnd, e->cdbLen); e->tag = SIMPLE_QUEUE_TAG; if (cmd->sc_data_direction == DMA_FROM_DEVICE) e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST; else if (cmd->sc_data_direction == DMA_TO_DEVICE) e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE; else if (cmd->sc_data_direction == DMA_NONE) e->flags = PVSCSI_FLAG_CMD_DIR_NONE; else e->flags = 0; if (pvscsi_map_buffers(adapter, ctx, cmd, e) != 0) { if (cmd->sense_buffer) { dma_unmap_single(&adapter->dev->dev, ctx->sensePA, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); ctx->sensePA = 0; } return -ENOMEM; } e->context = pvscsi_map_context(adapter, ctx); barrier(); s->reqProdIdx++; return 0; } static int pvscsi_queue_lck(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct pvscsi_adapter *adapter = shost_priv(host); struct pvscsi_ctx *ctx; unsigned long flags; unsigned char op; spin_lock_irqsave(&adapter->hw_lock, flags); ctx = pvscsi_acquire_context(adapter, cmd); if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) { if (ctx) pvscsi_release_context(adapter, ctx); spin_unlock_irqrestore(&adapter->hw_lock, flags); return SCSI_MLQUEUE_HOST_BUSY; } op = cmd->cmnd[0]; dev_dbg(&cmd->device->sdev_gendev, "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, op); spin_unlock_irqrestore(&adapter->hw_lock, flags); pvscsi_kick_io(adapter, op); return 0; } static DEF_SCSI_QCMD(pvscsi_queue) static int pvscsi_abort(struct scsi_cmnd *cmd) { struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); struct pvscsi_ctx *ctx; unsigned long flags; int result = SUCCESS; DECLARE_COMPLETION_ONSTACK(abort_cmp); int done; scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n", adapter->host->host_no, cmd); spin_lock_irqsave(&adapter->hw_lock, flags); /* * Poll the completion ring first - we might be trying to abort * a command that is waiting to be dispatched in the completion ring. */ pvscsi_process_completion_ring(adapter); /* * If there is no context for the command, it either already succeeded * or else was never properly issued. Not our problem. */ ctx = pvscsi_find_context(adapter, cmd); if (!ctx) { scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd); goto out; } /* * Mark that the command has been requested to be aborted and issue * the abort. */ ctx->abort_cmp = &abort_cmp; pvscsi_abort_cmd(adapter, ctx); spin_unlock_irqrestore(&adapter->hw_lock, flags); /* Wait for 2 secs for the completion. */ done = wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000)); spin_lock_irqsave(&adapter->hw_lock, flags); if (!done) { /* * Failed to abort the command, unmark the fact that it * was requested to be aborted. */ ctx->abort_cmp = NULL; result = FAILED; scmd_printk(KERN_DEBUG, cmd, "Failed to get completion for aborted cmd %p\n", cmd); goto out; } /* * Successfully aborted the command. */ cmd->result = (DID_ABORT << 16); scsi_done(cmd); out: spin_unlock_irqrestore(&adapter->hw_lock, flags); return result; } /* * Abort all outstanding requests. This is only safe to use if the completion * ring will never be walked again or the device has been reset, because it * destroys the 1-1 mapping between context field passed to emulation and our * request structure. */ static void pvscsi_reset_all(struct pvscsi_adapter *adapter) { unsigned i; for (i = 0; i < adapter->req_depth; i++) { struct pvscsi_ctx *ctx = &adapter->cmd_map[i]; struct scsi_cmnd *cmd = ctx->cmd; if (cmd) { scmd_printk(KERN_ERR, cmd, "Forced reset on cmd %p\n", cmd); pvscsi_unmap_buffers(adapter, ctx); pvscsi_patch_sense(cmd); pvscsi_release_context(adapter, ctx); cmd->result = (DID_RESET << 16); scsi_done(cmd); } } } static int pvscsi_host_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct pvscsi_adapter *adapter = shost_priv(host); unsigned long flags; bool use_msg; scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n"); spin_lock_irqsave(&adapter->hw_lock, flags); use_msg = adapter->use_msg; if (use_msg) { adapter->use_msg = false; spin_unlock_irqrestore(&adapter->hw_lock, flags); /* * Now that we know that the ISR won't add more work on the * workqueue we can safely flush any outstanding work. */ flush_workqueue(adapter->workqueue); spin_lock_irqsave(&adapter->hw_lock, flags); } /* * We're going to tear down the entire ring structure and set it back * up, so stalling new requests until all completions are flushed and * the rings are back in place. */ pvscsi_process_request_ring(adapter); ll_adapter_reset(adapter); /* * Now process any completions. Note we do this AFTER adapter reset, * which is strange, but stops races where completions get posted * between processing the ring and issuing the reset. The backend will * not touch the ring memory after reset, so the immediately pre-reset * completion ring state is still valid. */ pvscsi_process_completion_ring(adapter); pvscsi_reset_all(adapter); adapter->use_msg = use_msg; pvscsi_setup_all_rings(adapter); pvscsi_unmask_intr(adapter); spin_unlock_irqrestore(&adapter->hw_lock, flags); return SUCCESS; } static int pvscsi_bus_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct pvscsi_adapter *adapter = shost_priv(host); unsigned long flags; scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n"); /* * We don't want to queue new requests for this bus after * flushing all pending requests to emulation, since new * requests could then sneak in during this bus reset phase, * so take the lock now. */ spin_lock_irqsave(&adapter->hw_lock, flags); pvscsi_process_request_ring(adapter); ll_bus_reset(adapter); pvscsi_process_completion_ring(adapter); spin_unlock_irqrestore(&adapter->hw_lock, flags); return SUCCESS; } static int pvscsi_device_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct pvscsi_adapter *adapter = shost_priv(host); unsigned long flags; scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n", host->host_no, cmd->device->id); /* * We don't want to queue new requests for this device after flushing * all pending requests to emulation, since new requests could then * sneak in during this device reset phase, so take the lock now. */ spin_lock_irqsave(&adapter->hw_lock, flags); pvscsi_process_request_ring(adapter); ll_device_reset(adapter, cmd->device->id); pvscsi_process_completion_ring(adapter); spin_unlock_irqrestore(&adapter->hw_lock, flags); return SUCCESS; } static struct scsi_host_template pvscsi_template; static const char *pvscsi_info(struct Scsi_Host *host) { struct pvscsi_adapter *adapter = shost_priv(host); static char buf[256]; sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: " "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev, adapter->req_pages, adapter->cmp_pages, adapter->msg_pages, pvscsi_template.cmd_per_lun); return buf; } static struct scsi_host_template pvscsi_template = { .module = THIS_MODULE, .name = "VMware PVSCSI Host Adapter", .proc_name = "vmw_pvscsi", .info = pvscsi_info, .queuecommand = pvscsi_queue, .this_id = -1, .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT, .dma_boundary = UINT_MAX, .max_sectors = 0xffff, .change_queue_depth = pvscsi_change_queue_depth, .eh_abort_handler = pvscsi_abort, .eh_device_reset_handler = pvscsi_device_reset, .eh_bus_reset_handler = pvscsi_bus_reset, .eh_host_reset_handler = pvscsi_host_reset, }; static void pvscsi_process_msg(const struct pvscsi_adapter *adapter, const struct PVSCSIRingMsgDesc *e) { struct PVSCSIRingsState *s = adapter->rings_state; struct Scsi_Host *host = adapter->host; struct scsi_device *sdev; printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n", e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2); BUILD_BUG_ON(PVSCSI_MSG_LAST != 2); if (e->type == PVSCSI_MSG_DEV_ADDED) { struct PVSCSIMsgDescDevStatusChanged *desc; desc = (struct PVSCSIMsgDescDevStatusChanged *)e; printk(KERN_INFO "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n", desc->bus, desc->target, desc->lun[1]); if (!scsi_host_get(host)) return; sdev = scsi_device_lookup(host, desc->bus, desc->target, desc->lun[1]); if (sdev) { printk(KERN_INFO "vmw_pvscsi: device already exists\n"); scsi_device_put(sdev); } else scsi_add_device(adapter->host, desc->bus, desc->target, desc->lun[1]); scsi_host_put(host); } else if (e->type == PVSCSI_MSG_DEV_REMOVED) { struct PVSCSIMsgDescDevStatusChanged *desc; desc = (struct PVSCSIMsgDescDevStatusChanged *)e; printk(KERN_INFO "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n", desc->bus, desc->target, desc->lun[1]); if (!scsi_host_get(host)) return; sdev = scsi_device_lookup(host, desc->bus, desc->target, desc->lun[1]); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); } else printk(KERN_INFO "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n", desc->bus, desc->target, desc->lun[1]); scsi_host_put(host); } } static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter) { struct PVSCSIRingsState *s = adapter->rings_state; return s->msgProdIdx != s->msgConsIdx; } static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter) { struct PVSCSIRingsState *s = adapter->rings_state; struct PVSCSIRingMsgDesc *ring = adapter->msg_ring; u32 msg_entries = s->msgNumEntriesLog2; while (pvscsi_msg_pending(adapter)) { struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx & MASK(msg_entries)); barrier(); pvscsi_process_msg(adapter, e); barrier(); s->msgConsIdx++; } } static void pvscsi_msg_workqueue_handler(struct work_struct *data) { struct pvscsi_adapter *adapter; adapter = container_of(data, struct pvscsi_adapter, work); pvscsi_process_msg_ring(adapter); } static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter) { char name[32]; if (!pvscsi_use_msg) return 0; pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, PVSCSI_CMD_SETUP_MSG_RING); if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1) return 0; snprintf(name, sizeof(name), "vmw_pvscsi_wq_%u", adapter->host->host_no); adapter->workqueue = create_singlethread_workqueue(name); if (!adapter->workqueue) { printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n"); return 0; } INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler); return 1; } static bool pvscsi_setup_req_threshold(struct pvscsi_adapter *adapter, bool enable) { u32 val; if (!pvscsi_use_req_threshold) return false; pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, PVSCSI_CMD_SETUP_REQCALLTHRESHOLD); val = pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS); if (val == -1) { printk(KERN_INFO "vmw_pvscsi: device does not support req_threshold\n"); return false; } else { struct PVSCSICmdDescSetupReqCall cmd_msg = { 0 }; cmd_msg.enable = enable; printk(KERN_INFO "vmw_pvscsi: %sabling reqCallThreshold\n", enable ? "en" : "dis"); pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_REQCALLTHRESHOLD, &cmd_msg, sizeof(cmd_msg)); return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) != 0; } } static irqreturn_t pvscsi_isr(int irq, void *devp) { struct pvscsi_adapter *adapter = devp; unsigned long flags; spin_lock_irqsave(&adapter->hw_lock, flags); pvscsi_process_completion_ring(adapter); if (adapter->use_msg && pvscsi_msg_pending(adapter)) queue_work(adapter->workqueue, &adapter->work); spin_unlock_irqrestore(&adapter->hw_lock, flags); return IRQ_HANDLED; } static irqreturn_t pvscsi_shared_isr(int irq, void *devp) { struct pvscsi_adapter *adapter = devp; u32 val = pvscsi_read_intr_status(adapter); if (!(val & PVSCSI_INTR_ALL_SUPPORTED)) return IRQ_NONE; pvscsi_write_intr_status(devp, val); return pvscsi_isr(irq, devp); } static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter) { struct pvscsi_ctx *ctx = adapter->cmd_map; unsigned i; for (i = 0; i < adapter->req_depth; ++i, ++ctx) free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); } static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter) { free_irq(pci_irq_vector(adapter->dev, 0), adapter); pci_free_irq_vectors(adapter->dev); } static void pvscsi_release_resources(struct pvscsi_adapter *adapter) { if (adapter->workqueue) destroy_workqueue(adapter->workqueue); if (adapter->mmioBase) pci_iounmap(adapter->dev, adapter->mmioBase); pci_release_regions(adapter->dev); if (adapter->cmd_map) { pvscsi_free_sgls(adapter); kfree(adapter->cmd_map); } if (adapter->rings_state) dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, adapter->rings_state, adapter->ringStatePA); if (adapter->req_ring) dma_free_coherent(&adapter->dev->dev, adapter->req_pages * PAGE_SIZE, adapter->req_ring, adapter->reqRingPA); if (adapter->cmp_ring) dma_free_coherent(&adapter->dev->dev, adapter->cmp_pages * PAGE_SIZE, adapter->cmp_ring, adapter->cmpRingPA); if (adapter->msg_ring) dma_free_coherent(&adapter->dev->dev, adapter->msg_pages * PAGE_SIZE, adapter->msg_ring, adapter->msgRingPA); } /* * Allocate scatter gather lists. * * These are statically allocated. Trying to be clever was not worth it. * * Dynamic allocation can fail, and we can't go deep into the memory * allocator, since we're a SCSI driver, and trying too hard to allocate * memory might generate disk I/O. We also don't want to fail disk I/O * in that case because we can't get an allocation - the I/O could be * trying to swap out data to free memory. Since that is pathological, * just use a statically allocated scatter list. * */ static int pvscsi_allocate_sg(struct pvscsi_adapter *adapter) { struct pvscsi_ctx *ctx; int i; ctx = adapter->cmd_map; BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE); for (i = 0; i < adapter->req_depth; ++i, ++ctx) { ctx->sgl = (void *)__get_free_pages(GFP_KERNEL, get_order(SGL_SIZE)); ctx->sglPA = 0; BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE)); if (!ctx->sgl) { for (; i >= 0; --i, --ctx) { free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); ctx->sgl = NULL; } return -ENOMEM; } } return 0; } /* * Query the device, fetch the config info and return the * maximum number of targets on the adapter. In case of * failure due to any reason return default i.e. 16. */ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter) { struct PVSCSICmdDescConfigCmd cmd; struct PVSCSIConfigPageHeader *header; struct device *dev; dma_addr_t configPagePA; void *config_page; u32 numPhys = 16; dev = pvscsi_dev(adapter); config_page = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE, &configPagePA, GFP_KERNEL); if (!config_page) { dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n"); goto exit; } BUG_ON(configPagePA & ~PAGE_MASK); /* Fetch config info from the device. */ cmd.configPageAddress = ((u64)PVSCSI_CONFIG_CONTROLLER_ADDRESS) << 32; cmd.configPageNum = PVSCSI_CONFIG_PAGE_CONTROLLER; cmd.cmpAddr = configPagePA; cmd._pad = 0; /* * Mark the completion page header with error values. If the device * completes the command successfully, it sets the status values to * indicate success. */ header = config_page; header->hostStatus = BTSTAT_INVPARAM; header->scsiStatus = SDSTAT_CHECK; pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_CONFIG, &cmd, sizeof cmd); if (header->hostStatus == BTSTAT_SUCCESS && header->scsiStatus == SDSTAT_GOOD) { struct PVSCSIConfigPageController *config; config = config_page; numPhys = config->numPhys; } else dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n", header->hostStatus, header->scsiStatus); dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, config_page, configPagePA); exit: return numPhys; } static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) { unsigned int irq_flag = PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY; struct pvscsi_adapter *adapter; struct pvscsi_adapter adapter_temp; struct Scsi_Host *host = NULL; unsigned int i; int error; u32 max_id; error = -ENODEV; if (pci_enable_device(pdev)) return error; if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n"); } else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n"); } else { printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n"); goto out_disable_device; } /* * Let's use a temp pvscsi_adapter struct until we find the number of * targets on the adapter, after that we will switch to the real * allocated struct. */ adapter = &adapter_temp; memset(adapter, 0, sizeof(*adapter)); adapter->dev = pdev; adapter->rev = pdev->revision; if (pci_request_regions(pdev, "vmw_pvscsi")) { printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n"); goto out_disable_device; } for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)) continue; if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE) continue; break; } if (i == DEVICE_COUNT_RESOURCE) { printk(KERN_ERR "vmw_pvscsi: adapter has no suitable MMIO region\n"); goto out_release_resources_and_disable; } adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE); if (!adapter->mmioBase) { printk(KERN_ERR "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n", i, PVSCSI_MEM_SPACE_SIZE); goto out_release_resources_and_disable; } pci_set_master(pdev); /* * Ask the device for max number of targets before deciding the * default pvscsi_ring_pages value. */ max_id = pvscsi_get_max_targets(adapter); printk(KERN_INFO "vmw_pvscsi: max_id: %u\n", max_id); if (pvscsi_ring_pages == 0) /* * Set the right default value. Up to 16 it is 8, above it is * max. */ pvscsi_ring_pages = (max_id > 16) ? PVSCSI_SETUP_RINGS_MAX_NUM_PAGES : PVSCSI_DEFAULT_NUM_PAGES_PER_RING; printk(KERN_INFO "vmw_pvscsi: setting ring_pages to %d\n", pvscsi_ring_pages); pvscsi_template.can_queue = min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; pvscsi_template.cmd_per_lun = min(pvscsi_template.can_queue, pvscsi_cmd_per_lun); host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter)); if (!host) { printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n"); goto out_release_resources_and_disable; } /* * Let's use the real pvscsi_adapter struct here onwards. */ adapter = shost_priv(host); memset(adapter, 0, sizeof(*adapter)); adapter->dev = pdev; adapter->host = host; /* * Copy back what we already have to the allocated adapter struct. */ adapter->rev = adapter_temp.rev; adapter->mmioBase = adapter_temp.mmioBase; spin_lock_init(&adapter->hw_lock); host->max_channel = 0; host->max_lun = 1; host->max_cmd_len = 16; host->max_id = max_id; pci_set_drvdata(pdev, host); ll_adapter_reset(adapter); adapter->use_msg = pvscsi_setup_msg_workqueue(adapter); error = pvscsi_allocate_rings(adapter); if (error) { printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n"); goto out_release_resources; } /* * From this point on we should reset the adapter if anything goes * wrong. */ pvscsi_setup_all_rings(adapter); adapter->cmd_map = kcalloc(adapter->req_depth, sizeof(struct pvscsi_ctx), GFP_KERNEL); if (!adapter->cmd_map) { printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n"); error = -ENOMEM; goto out_reset_adapter; } INIT_LIST_HEAD(&adapter->cmd_pool); for (i = 0; i < adapter->req_depth; i++) { struct pvscsi_ctx *ctx = adapter->cmd_map + i; list_add(&ctx->list, &adapter->cmd_pool); } error = pvscsi_allocate_sg(adapter); if (error) { printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n"); goto out_reset_adapter; } if (pvscsi_disable_msix) irq_flag &= ~PCI_IRQ_MSIX; if (pvscsi_disable_msi) irq_flag &= ~PCI_IRQ_MSI; error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag); if (error < 0) goto out_reset_adapter; adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true); printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n", adapter->use_req_threshold ? "en" : "dis"); if (adapter->dev->msix_enabled || adapter->dev->msi_enabled) { printk(KERN_INFO "vmw_pvscsi: using MSI%s\n", adapter->dev->msix_enabled ? "-X" : ""); error = request_irq(pci_irq_vector(pdev, 0), pvscsi_isr, 0, "vmw_pvscsi", adapter); } else { printk(KERN_INFO "vmw_pvscsi: using INTx\n"); error = request_irq(pci_irq_vector(pdev, 0), pvscsi_shared_isr, IRQF_SHARED, "vmw_pvscsi", adapter); } if (error) { printk(KERN_ERR "vmw_pvscsi: unable to request IRQ: %d\n", error); goto out_reset_adapter; } error = scsi_add_host(host, &pdev->dev); if (error) { printk(KERN_ERR "vmw_pvscsi: scsi_add_host failed: %d\n", error); goto out_reset_adapter; } dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n", adapter->rev, host->host_no); pvscsi_unmask_intr(adapter); scsi_scan_host(host); return 0; out_reset_adapter: ll_adapter_reset(adapter); out_release_resources: pvscsi_shutdown_intr(adapter); pvscsi_release_resources(adapter); scsi_host_put(host); out_disable_device: pci_disable_device(pdev); return error; out_release_resources_and_disable: pvscsi_shutdown_intr(adapter); pvscsi_release_resources(adapter); goto out_disable_device; } static void __pvscsi_shutdown(struct pvscsi_adapter *adapter) { pvscsi_mask_intr(adapter); if (adapter->workqueue) flush_workqueue(adapter->workqueue); pvscsi_shutdown_intr(adapter); pvscsi_process_request_ring(adapter); pvscsi_process_completion_ring(adapter); ll_adapter_reset(adapter); } static void pvscsi_shutdown(struct pci_dev *dev) { struct Scsi_Host *host = pci_get_drvdata(dev); struct pvscsi_adapter *adapter = shost_priv(host); __pvscsi_shutdown(adapter); } static void pvscsi_remove(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); struct pvscsi_adapter *adapter = shost_priv(host); scsi_remove_host(host); __pvscsi_shutdown(adapter); pvscsi_release_resources(adapter); scsi_host_put(host); pci_disable_device(pdev); } static struct pci_driver pvscsi_pci_driver = { .name = "vmw_pvscsi", .id_table = pvscsi_pci_tbl, .probe = pvscsi_probe, .remove = pvscsi_remove, .shutdown = pvscsi_shutdown, }; static int __init pvscsi_init(void) { pr_info("%s - version %s\n", PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING); return pci_register_driver(&pvscsi_pci_driver); } static void __exit pvscsi_exit(void) { pci_unregister_driver(&pvscsi_pci_driver); } module_init(pvscsi_init); module_exit(pvscsi_exit);
linux-master
drivers/scsi/vmw_pvscsi.c
// SPDX-License-Identifier: GPL-2.0-only /* * scsi_pm.c Copyright (C) 2010 Alan Stern * * SCSI dynamic Power Management * Initial version: Alan Stern <[email protected]> */ #include <linux/pm_runtime.h> #include <linux/export.h> #include <linux/blk-pm.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_host.h> #include "scsi_priv.h" #ifdef CONFIG_PM_SLEEP static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm) { return pm && pm->suspend ? pm->suspend(dev) : 0; } static int do_scsi_freeze(struct device *dev, const struct dev_pm_ops *pm) { return pm && pm->freeze ? pm->freeze(dev) : 0; } static int do_scsi_poweroff(struct device *dev, const struct dev_pm_ops *pm) { return pm && pm->poweroff ? pm->poweroff(dev) : 0; } static int do_scsi_resume(struct device *dev, const struct dev_pm_ops *pm) { return pm && pm->resume ? pm->resume(dev) : 0; } static int do_scsi_thaw(struct device *dev, const struct dev_pm_ops *pm) { return pm && pm->thaw ? pm->thaw(dev) : 0; } static int do_scsi_restore(struct device *dev, const struct dev_pm_ops *pm) { return pm && pm->restore ? pm->restore(dev) : 0; } static int scsi_dev_type_suspend(struct device *dev, int (*cb)(struct device *, const struct dev_pm_ops *)) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int err; err = scsi_device_quiesce(to_scsi_device(dev)); if (err == 0) { err = cb(dev, pm); if (err) scsi_device_resume(to_scsi_device(dev)); } dev_dbg(dev, "scsi suspend: %d\n", err); return err; } static int scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *, const struct dev_pm_ops *)) { if (!scsi_is_sdev_device(dev)) return 0; return scsi_dev_type_suspend(dev, cb); } static int scsi_bus_resume_common(struct device *dev, int (*cb)(struct device *, const struct dev_pm_ops *)) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int err; if (!scsi_is_sdev_device(dev)) return 0; err = cb(dev, pm); scsi_device_resume(to_scsi_device(dev)); dev_dbg(dev, "scsi resume: %d\n", err); return err; } static int scsi_bus_prepare(struct device *dev) { if (scsi_is_host_device(dev)) { /* Wait until async scanning is finished */ scsi_complete_async_scans(); } return 0; } static int scsi_bus_suspend(struct device *dev) { return scsi_bus_suspend_common(dev, do_scsi_suspend); } static int scsi_bus_resume(struct device *dev) { return scsi_bus_resume_common(dev, do_scsi_resume); } static int scsi_bus_freeze(struct device *dev) { return scsi_bus_suspend_common(dev, do_scsi_freeze); } static int scsi_bus_thaw(struct device *dev) { return scsi_bus_resume_common(dev, do_scsi_thaw); } static int scsi_bus_poweroff(struct device *dev) { return scsi_bus_suspend_common(dev, do_scsi_poweroff); } static int scsi_bus_restore(struct device *dev) { return scsi_bus_resume_common(dev, do_scsi_restore); } #else /* CONFIG_PM_SLEEP */ #define scsi_bus_prepare NULL #define scsi_bus_suspend NULL #define scsi_bus_resume NULL #define scsi_bus_freeze NULL #define scsi_bus_thaw NULL #define scsi_bus_poweroff NULL #define scsi_bus_restore NULL #endif /* CONFIG_PM_SLEEP */ static int sdev_runtime_suspend(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; struct scsi_device *sdev = to_scsi_device(dev); int err = 0; err = blk_pre_runtime_suspend(sdev->request_queue); if (err) return err; if (pm && pm->runtime_suspend) err = pm->runtime_suspend(dev); blk_post_runtime_suspend(sdev->request_queue, err); return err; } static int scsi_runtime_suspend(struct device *dev) { int err = 0; dev_dbg(dev, "scsi_runtime_suspend\n"); if (scsi_is_sdev_device(dev)) err = sdev_runtime_suspend(dev); /* Insert hooks here for targets, hosts, and transport classes */ return err; } static int sdev_runtime_resume(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int err = 0; blk_pre_runtime_resume(sdev->request_queue); if (pm && pm->runtime_resume) err = pm->runtime_resume(dev); blk_post_runtime_resume(sdev->request_queue); return err; } static int scsi_runtime_resume(struct device *dev) { int err = 0; dev_dbg(dev, "scsi_runtime_resume\n"); if (scsi_is_sdev_device(dev)) err = sdev_runtime_resume(dev); /* Insert hooks here for targets, hosts, and transport classes */ return err; } static int scsi_runtime_idle(struct device *dev) { dev_dbg(dev, "scsi_runtime_idle\n"); /* Insert hooks here for targets, hosts, and transport classes */ if (scsi_is_sdev_device(dev)) { pm_runtime_mark_last_busy(dev); pm_runtime_autosuspend(dev); return -EBUSY; } return 0; } int scsi_autopm_get_device(struct scsi_device *sdev) { int err; err = pm_runtime_get_sync(&sdev->sdev_gendev); if (err < 0 && err !=-EACCES) pm_runtime_put_sync(&sdev->sdev_gendev); else err = 0; return err; } EXPORT_SYMBOL_GPL(scsi_autopm_get_device); void scsi_autopm_put_device(struct scsi_device *sdev) { pm_runtime_put_sync(&sdev->sdev_gendev); } EXPORT_SYMBOL_GPL(scsi_autopm_put_device); void scsi_autopm_get_target(struct scsi_target *starget) { pm_runtime_get_sync(&starget->dev); } void scsi_autopm_put_target(struct scsi_target *starget) { pm_runtime_put_sync(&starget->dev); } int scsi_autopm_get_host(struct Scsi_Host *shost) { int err; err = pm_runtime_get_sync(&shost->shost_gendev); if (err < 0 && err !=-EACCES) pm_runtime_put_sync(&shost->shost_gendev); else err = 0; return err; } void scsi_autopm_put_host(struct Scsi_Host *shost) { pm_runtime_put_sync(&shost->shost_gendev); } const struct dev_pm_ops scsi_bus_pm_ops = { .prepare = scsi_bus_prepare, .suspend = scsi_bus_suspend, .resume = scsi_bus_resume, .freeze = scsi_bus_freeze, .thaw = scsi_bus_thaw, .poweroff = scsi_bus_poweroff, .restore = scsi_bus_restore, .runtime_suspend = scsi_runtime_suspend, .runtime_resume = scsi_runtime_resume, .runtime_idle = scsi_runtime_idle, };
linux-master
drivers/scsi/scsi_pm.c
// SPDX-License-Identifier: GPL-2.0-only /* Linux Driver for BusLogic MultiMaster and FlashPoint SCSI Host Adapters Copyright 1995-1998 by Leonard N. Zubkoff <[email protected]> The author respectfully requests that any modifications to this software be sent directly to him for evaluation and testing. Special thanks to Wayne Yen, Jin-Lon Hon, and Alex Win of BusLogic, whose advice has been invaluable, to David Gentzel, for writing the original Linux BusLogic driver, and to Paul Gortmaker, for being such a dedicated test site. Finally, special thanks to Mylex/BusLogic for making the FlashPoint SCCB Manager available as freely redistributable source code. */ #define blogic_drvr_version "2.1.17" #define blogic_drvr_date "12 September 2013" #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/mm.h> #include <linux/stat.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/jiffies.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/msdos_partition.h> #include <scsi/scsicam.h> #include <asm/dma.h> #include <asm/io.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include "BusLogic.h" #include "FlashPoint.c" #ifndef FAILURE #define FAILURE (-1) #endif static const struct scsi_host_template blogic_template; /* blogic_drvr_options_count is a count of the number of BusLogic Driver Options specifications provided via the Linux Kernel Command Line or via the Loadable Kernel Module Installation Facility. */ static int blogic_drvr_options_count; /* blogic_drvr_options is an array of Driver Options structures representing BusLogic Driver Options specifications provided via the Linux Kernel Command Line or via the Loadable Kernel Module Installation Facility. */ static struct blogic_drvr_options blogic_drvr_options[BLOGIC_MAX_ADAPTERS]; /* BusLogic can be assigned a string by insmod. */ MODULE_LICENSE("GPL"); #ifdef MODULE static char *BusLogic; module_param(BusLogic, charp, 0); #endif /* blogic_probe_options is a set of Probe Options to be applied across all BusLogic Host Adapters. */ static struct blogic_probe_options blogic_probe_options; /* blogic_global_options is a set of Global Options to be applied across all BusLogic Host Adapters. */ static struct blogic_global_options blogic_global_options; static LIST_HEAD(blogic_host_list); /* blogic_probeinfo_count is the number of entries in blogic_probeinfo_list. */ static int blogic_probeinfo_count; /* blogic_probeinfo_list is the list of I/O Addresses and Bus Probe Information to be checked for potential BusLogic Host Adapters. It is initialized by interrogating the PCI Configuration Space on PCI machines as well as from the list of standard BusLogic I/O Addresses. */ static struct blogic_probeinfo *blogic_probeinfo_list; /* blogic_cmd_failure_reason holds a string identifying the reason why a call to blogic_cmd failed. It is only non-NULL when blogic_cmd returns a failure code. */ static char *blogic_cmd_failure_reason; /* blogic_announce_drvr announces the Driver Version and Date, Author's Name, Copyright Notice, and Electronic Mail Address. */ static void blogic_announce_drvr(struct blogic_adapter *adapter) { blogic_announce("***** BusLogic SCSI Driver Version " blogic_drvr_version " of " blogic_drvr_date " *****\n", adapter); blogic_announce("Copyright 1995-1998 by Leonard N. Zubkoff <[email protected]>\n", adapter); } /* blogic_drvr_info returns the Host Adapter Name to identify this SCSI Driver and Host Adapter. */ static const char *blogic_drvr_info(struct Scsi_Host *host) { struct blogic_adapter *adapter = (struct blogic_adapter *) host->hostdata; return adapter->full_model; } /* blogic_init_ccbs initializes a group of Command Control Blocks (CCBs) for Host Adapter from the blk_size bytes located at blk_pointer. The newly created CCBs are added to Host Adapter's free list. */ static void blogic_init_ccbs(struct blogic_adapter *adapter, void *blk_pointer, int blk_size, dma_addr_t blkp) { struct blogic_ccb *ccb = (struct blogic_ccb *) blk_pointer; unsigned int offset = 0; memset(blk_pointer, 0, blk_size); ccb->allocgrp_head = blkp; ccb->allocgrp_size = blk_size; while ((blk_size -= sizeof(struct blogic_ccb)) >= 0) { ccb->status = BLOGIC_CCB_FREE; ccb->adapter = adapter; ccb->dma_handle = (u32) blkp + offset; if (blogic_flashpoint_type(adapter)) { ccb->callback = blogic_qcompleted_ccb; ccb->base_addr = adapter->fpinfo.base_addr; } ccb->next = adapter->free_ccbs; ccb->next_all = adapter->all_ccbs; adapter->free_ccbs = ccb; adapter->all_ccbs = ccb; adapter->alloc_ccbs++; ccb++; offset += sizeof(struct blogic_ccb); } } /* blogic_create_initccbs allocates the initial CCBs for Host Adapter. */ static bool __init blogic_create_initccbs(struct blogic_adapter *adapter) { int blk_size = BLOGIC_CCB_GRP_ALLOCSIZE * sizeof(struct blogic_ccb); void *blk_pointer; dma_addr_t blkp; while (adapter->alloc_ccbs < adapter->initccbs) { blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev, blk_size, &blkp, GFP_KERNEL); if (blk_pointer == NULL) { blogic_err("UNABLE TO ALLOCATE CCB GROUP - DETACHING\n", adapter); return false; } blogic_init_ccbs(adapter, blk_pointer, blk_size, blkp); } return true; } /* blogic_destroy_ccbs deallocates the CCBs for Host Adapter. */ static void blogic_destroy_ccbs(struct blogic_adapter *adapter) { struct blogic_ccb *next_ccb = adapter->all_ccbs, *ccb, *lastccb = NULL; adapter->all_ccbs = NULL; adapter->free_ccbs = NULL; while ((ccb = next_ccb) != NULL) { next_ccb = ccb->next_all; if (ccb->allocgrp_head) { if (lastccb) dma_free_coherent(&adapter->pci_device->dev, lastccb->allocgrp_size, lastccb, lastccb->allocgrp_head); lastccb = ccb; } } if (lastccb) dma_free_coherent(&adapter->pci_device->dev, lastccb->allocgrp_size, lastccb, lastccb->allocgrp_head); } /* blogic_create_addlccbs allocates Additional CCBs for Host Adapter. If allocation fails and there are no remaining CCBs available, the Driver Queue Depth is decreased to a known safe value to avoid potential deadlocks when multiple host adapters share the same IRQ Channel. */ static void blogic_create_addlccbs(struct blogic_adapter *adapter, int addl_ccbs, bool print_success) { int blk_size = BLOGIC_CCB_GRP_ALLOCSIZE * sizeof(struct blogic_ccb); int prev_alloc = adapter->alloc_ccbs; void *blk_pointer; dma_addr_t blkp; if (addl_ccbs <= 0) return; while (adapter->alloc_ccbs - prev_alloc < addl_ccbs) { blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev, blk_size, &blkp, GFP_KERNEL); if (blk_pointer == NULL) break; blogic_init_ccbs(adapter, blk_pointer, blk_size, blkp); } if (adapter->alloc_ccbs > prev_alloc) { if (print_success) blogic_notice("Allocated %d additional CCBs (total now %d)\n", adapter, adapter->alloc_ccbs - prev_alloc, adapter->alloc_ccbs); return; } blogic_notice("Failed to allocate additional CCBs\n", adapter); if (adapter->drvr_qdepth > adapter->alloc_ccbs - adapter->tgt_count) { adapter->drvr_qdepth = adapter->alloc_ccbs - adapter->tgt_count; adapter->scsi_host->can_queue = adapter->drvr_qdepth; } } /* blogic_alloc_ccb allocates a CCB from Host Adapter's free list, allocating more memory from the Kernel if necessary. The Host Adapter's Lock should already have been acquired by the caller. */ static struct blogic_ccb *blogic_alloc_ccb(struct blogic_adapter *adapter) { static unsigned long serial; struct blogic_ccb *ccb; ccb = adapter->free_ccbs; if (ccb != NULL) { ccb->serial = ++serial; adapter->free_ccbs = ccb->next; ccb->next = NULL; if (adapter->free_ccbs == NULL) blogic_create_addlccbs(adapter, adapter->inc_ccbs, true); return ccb; } blogic_create_addlccbs(adapter, adapter->inc_ccbs, true); ccb = adapter->free_ccbs; if (ccb == NULL) return NULL; ccb->serial = ++serial; adapter->free_ccbs = ccb->next; ccb->next = NULL; return ccb; } /* blogic_dealloc_ccb deallocates a CCB, returning it to the Host Adapter's free list. The Host Adapter's Lock should already have been acquired by the caller. */ static void blogic_dealloc_ccb(struct blogic_ccb *ccb, int dma_unmap) { struct blogic_adapter *adapter = ccb->adapter; if (ccb->command != NULL) scsi_dma_unmap(ccb->command); if (dma_unmap) dma_unmap_single(&adapter->pci_device->dev, ccb->sensedata, ccb->sense_datalen, DMA_FROM_DEVICE); ccb->command = NULL; ccb->status = BLOGIC_CCB_FREE; ccb->next = adapter->free_ccbs; adapter->free_ccbs = ccb; } /* blogic_cmd sends the command opcode to adapter, optionally providing paramlen bytes of param and receiving at most replylen bytes of reply; any excess reply data is received but discarded. On success, this function returns the number of reply bytes read from the Host Adapter (including any discarded data); on failure, it returns -1 if the command was invalid, or -2 if a timeout occurred. blogic_cmd is called exclusively during host adapter detection and initialization, so performance and latency are not critical, and exclusive access to the Host Adapter hardware is assumed. Once the host adapter and driver are initialized, the only Host Adapter command that is issued is the single byte Execute Mailbox Command operation code, which does not require waiting for the Host Adapter Ready bit to be set in the Status Register. */ static int blogic_cmd(struct blogic_adapter *adapter, enum blogic_opcode opcode, void *param, int paramlen, void *reply, int replylen) { unsigned char *param_p = (unsigned char *) param; unsigned char *reply_p = (unsigned char *) reply; union blogic_stat_reg statusreg; union blogic_int_reg intreg; unsigned long processor_flag = 0; int reply_b = 0, result; long timeout; /* Clear out the Reply Data if provided. */ if (replylen > 0) memset(reply, 0, replylen); /* If the IRQ Channel has not yet been acquired, then interrupts must be disabled while issuing host adapter commands since a Command Complete interrupt could occur if the IRQ Channel was previously enabled by another BusLogic Host Adapter or another driver sharing the same IRQ Channel. */ if (!adapter->irq_acquired) local_irq_save(processor_flag); /* Wait for the Host Adapter Ready bit to be set and the Command/Parameter Register Busy bit to be reset in the Status Register. */ timeout = 10000; while (--timeout >= 0) { statusreg.all = blogic_rdstatus(adapter); if (statusreg.sr.adapter_ready && !statusreg.sr.cmd_param_busy) break; udelay(100); } if (timeout < 0) { blogic_cmd_failure_reason = "Timeout waiting for Host Adapter Ready"; result = -2; goto done; } /* Write the opcode to the Command/Parameter Register. */ adapter->adapter_cmd_complete = false; blogic_setcmdparam(adapter, opcode); /* Write any additional Parameter Bytes. */ timeout = 10000; while (paramlen > 0 && --timeout >= 0) { /* Wait 100 microseconds to give the Host Adapter enough time to determine whether the last value written to the Command/Parameter Register was valid or not. If the Command Complete bit is set in the Interrupt Register, then the Command Invalid bit in the Status Register will be reset if the Operation Code or Parameter was valid and the command has completed, or set if the Operation Code or Parameter was invalid. If the Data In Register Ready bit is set in the Status Register, then the Operation Code was valid, and data is waiting to be read back from the Host Adapter. Otherwise, wait for the Command/Parameter Register Busy bit in the Status Register to be reset. */ udelay(100); intreg.all = blogic_rdint(adapter); statusreg.all = blogic_rdstatus(adapter); if (intreg.ir.cmd_complete) break; if (adapter->adapter_cmd_complete) break; if (statusreg.sr.datain_ready) break; if (statusreg.sr.cmd_param_busy) continue; blogic_setcmdparam(adapter, *param_p++); paramlen--; } if (timeout < 0) { blogic_cmd_failure_reason = "Timeout waiting for Parameter Acceptance"; result = -2; goto done; } /* The Modify I/O Address command does not cause a Command Complete Interrupt. */ if (opcode == BLOGIC_MOD_IOADDR) { statusreg.all = blogic_rdstatus(adapter); if (statusreg.sr.cmd_invalid) { blogic_cmd_failure_reason = "Modify I/O Address Invalid"; result = -1; goto done; } if (blogic_global_options.trace_config) blogic_notice("blogic_cmd(%02X) Status = %02X: (Modify I/O Address)\n", adapter, opcode, statusreg.all); result = 0; goto done; } /* Select an appropriate timeout value for awaiting command completion. */ switch (opcode) { case BLOGIC_INQ_DEV0TO7: case BLOGIC_INQ_DEV8TO15: case BLOGIC_INQ_DEV: /* Approximately 60 seconds. */ timeout = 60 * 10000; break; default: /* Approximately 1 second. */ timeout = 10000; break; } /* Receive any Reply Bytes, waiting for either the Command Complete bit to be set in the Interrupt Register, or for the Interrupt Handler to set the Host Adapter Command Completed bit in the Host Adapter structure. */ while (--timeout >= 0) { intreg.all = blogic_rdint(adapter); statusreg.all = blogic_rdstatus(adapter); if (intreg.ir.cmd_complete) break; if (adapter->adapter_cmd_complete) break; if (statusreg.sr.datain_ready) { if (++reply_b <= replylen) *reply_p++ = blogic_rddatain(adapter); else blogic_rddatain(adapter); } if (opcode == BLOGIC_FETCH_LOCALRAM && statusreg.sr.adapter_ready) break; udelay(100); } if (timeout < 0) { blogic_cmd_failure_reason = "Timeout waiting for Command Complete"; result = -2; goto done; } /* Clear any pending Command Complete Interrupt. */ blogic_intreset(adapter); /* Provide tracing information if requested. */ if (blogic_global_options.trace_config) { int i; blogic_notice("blogic_cmd(%02X) Status = %02X: %2d ==> %2d:", adapter, opcode, statusreg.all, replylen, reply_b); if (replylen > reply_b) replylen = reply_b; for (i = 0; i < replylen; i++) blogic_notice(" %02X", adapter, ((unsigned char *) reply)[i]); blogic_notice("\n", adapter); } /* Process Command Invalid conditions. */ if (statusreg.sr.cmd_invalid) { /* Some early BusLogic Host Adapters may not recover properly from a Command Invalid condition, so if this appears to be the case, a Soft Reset is issued to the Host Adapter. Potentially invalid commands are never attempted after Mailbox Initialization is performed, so there should be no Host Adapter state lost by a Soft Reset in response to a Command Invalid condition. */ udelay(1000); statusreg.all = blogic_rdstatus(adapter); if (statusreg.sr.cmd_invalid || statusreg.sr.rsvd || statusreg.sr.datain_ready || statusreg.sr.cmd_param_busy || !statusreg.sr.adapter_ready || !statusreg.sr.init_reqd || statusreg.sr.diag_active || statusreg.sr.diag_failed) { blogic_softreset(adapter); udelay(1000); } blogic_cmd_failure_reason = "Command Invalid"; result = -1; goto done; } /* Handle Excess Parameters Supplied conditions. */ if (paramlen > 0) { blogic_cmd_failure_reason = "Excess Parameters Supplied"; result = -1; goto done; } /* Indicate the command completed successfully. */ blogic_cmd_failure_reason = NULL; result = reply_b; /* Restore the interrupt status if necessary and return. */ done: if (!adapter->irq_acquired) local_irq_restore(processor_flag); return result; } /* blogic_sort_probeinfo sorts a section of blogic_probeinfo_list in order of increasing PCI Bus and Device Number. */ static void __init blogic_sort_probeinfo(struct blogic_probeinfo *probeinfo_list, int probeinfo_cnt) { int last_exchange = probeinfo_cnt - 1, bound, j; while (last_exchange > 0) { bound = last_exchange; last_exchange = 0; for (j = 0; j < bound; j++) { struct blogic_probeinfo *probeinfo1 = &probeinfo_list[j]; struct blogic_probeinfo *probeinfo2 = &probeinfo_list[j + 1]; if (probeinfo1->bus > probeinfo2->bus || (probeinfo1->bus == probeinfo2->bus && (probeinfo1->dev > probeinfo2->dev))) { struct blogic_probeinfo tmp_probeinfo; memcpy(&tmp_probeinfo, probeinfo1, sizeof(struct blogic_probeinfo)); memcpy(probeinfo1, probeinfo2, sizeof(struct blogic_probeinfo)); memcpy(probeinfo2, &tmp_probeinfo, sizeof(struct blogic_probeinfo)); last_exchange = j; } } } } /* blogic_init_mm_probeinfo initializes the list of I/O Address and Bus Probe Information to be checked for potential BusLogic MultiMaster SCSI Host Adapters by interrogating the PCI Configuration Space on PCI machines as well as from the list of standard BusLogic MultiMaster ISA I/O Addresses. It returns the number of PCI MultiMaster Host Adapters found. */ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter) { struct blogic_probeinfo *pr_probeinfo = &blogic_probeinfo_list[blogic_probeinfo_count]; int nonpr_mmindex = blogic_probeinfo_count + 1; int nonpr_mmcount = 0, mmcount = 0; bool force_scan_order = false; bool force_scan_order_checked = false; struct pci_dev *pci_device = NULL; int i; if (blogic_probeinfo_count >= BLOGIC_MAX_ADAPTERS) return 0; blogic_probeinfo_count++; /* Iterate over the MultiMaster PCI Host Adapters. For each enumerated host adapter, determine whether its ISA Compatible I/O Port is enabled and if so, whether it is assigned the Primary I/O Address. A host adapter that is assigned the Primary I/O Address will always be the preferred boot device. The MultiMaster BIOS will first recognize a host adapter at the Primary I/O Address, then any other PCI host adapters, and finally any host adapters located at the remaining standard ISA I/O Addresses. When a PCI host adapter is found with its ISA Compatible I/O Port enabled, a command is issued to disable the ISA Compatible I/O Port, and it is noted that the particular standard ISA I/O Address need not be probed. */ pr_probeinfo->io_addr = 0; while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, pci_device)) != NULL) { struct blogic_adapter *host_adapter = adapter; struct blogic_adapter_info adapter_info; enum blogic_isa_ioport mod_ioaddr_req; unsigned char bus; unsigned char device; unsigned int irq_ch; unsigned long base_addr0; unsigned long base_addr1; unsigned long io_addr; unsigned long pci_addr; if (pci_enable_device(pci_device)) continue; if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32))) continue; bus = pci_device->bus->number; device = pci_device->devfn >> 3; irq_ch = pci_device->irq; io_addr = base_addr0 = pci_resource_start(pci_device, 0); pci_addr = base_addr1 = pci_resource_start(pci_device, 1); if (pci_resource_flags(pci_device, 0) & IORESOURCE_MEM) { blogic_err("BusLogic: Base Address0 0x%lX not I/O for MultiMaster Host Adapter\n", NULL, base_addr0); blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr); continue; } if (pci_resource_flags(pci_device, 1) & IORESOURCE_IO) { blogic_err("BusLogic: Base Address1 0x%lX not Memory for MultiMaster Host Adapter\n", NULL, base_addr1); blogic_err("at PCI Bus %d Device %d PCI Address 0x%lX\n", NULL, bus, device, pci_addr); continue; } if (irq_ch == 0) { blogic_err("BusLogic: IRQ Channel %d invalid for MultiMaster Host Adapter\n", NULL, irq_ch); blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr); continue; } if (blogic_global_options.trace_probe) { blogic_notice("BusLogic: PCI MultiMaster Host Adapter detected at\n", NULL); blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX\n", NULL, bus, device, io_addr, pci_addr); } /* Issue the Inquire PCI Host Adapter Information command to determine the ISA Compatible I/O Port. If the ISA Compatible I/O Port is known and enabled, note that the particular Standard ISA I/O Address should not be probed. */ host_adapter->io_addr = io_addr; blogic_intreset(host_adapter); if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0, &adapter_info, sizeof(adapter_info)) != sizeof(adapter_info)) adapter_info.isa_port = BLOGIC_IO_DISABLE; /* Issue the Modify I/O Address command to disable the ISA Compatible I/O Port. On PCI Host Adapters, the Modify I/O Address command allows modification of the ISA compatible I/O Address that the Host Adapter responds to; it does not affect the PCI compliant I/O Address assigned at system initialization. */ mod_ioaddr_req = BLOGIC_IO_DISABLE; blogic_cmd(host_adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req, sizeof(mod_ioaddr_req), NULL, 0); /* For the first MultiMaster Host Adapter enumerated, issue the Fetch Host Adapter Local RAM command to read byte 45 of the AutoSCSI area, for the setting of the "Use Bus And Device # For PCI Scanning Seq." option. Issue the Inquire Board ID command since this option is only valid for the BT-948/958/958D. */ if (!force_scan_order_checked) { struct blogic_fetch_localram fetch_localram; struct blogic_autoscsi_byte45 autoscsi_byte45; struct blogic_board_id id; fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45; fetch_localram.count = sizeof(autoscsi_byte45); blogic_cmd(host_adapter, BLOGIC_FETCH_LOCALRAM, &fetch_localram, sizeof(fetch_localram), &autoscsi_byte45, sizeof(autoscsi_byte45)); blogic_cmd(host_adapter, BLOGIC_GET_BOARD_ID, NULL, 0, &id, sizeof(id)); if (id.fw_ver_digit1 == '5') force_scan_order = autoscsi_byte45.force_scan_order; force_scan_order_checked = true; } /* Determine whether this MultiMaster Host Adapter has its ISA Compatible I/O Port enabled and is assigned the Primary I/O Address. If it does, then it is the Primary MultiMaster Host Adapter and must be recognized first. If it does not, then it is added to the list for probing after any Primary MultiMaster Host Adapter is probed. */ if (adapter_info.isa_port == BLOGIC_IO_330) { pr_probeinfo->adapter_type = BLOGIC_MULTIMASTER; pr_probeinfo->adapter_bus_type = BLOGIC_PCI_BUS; pr_probeinfo->io_addr = io_addr; pr_probeinfo->pci_addr = pci_addr; pr_probeinfo->bus = bus; pr_probeinfo->dev = device; pr_probeinfo->irq_ch = irq_ch; pr_probeinfo->pci_device = pci_dev_get(pci_device); mmcount++; } else if (blogic_probeinfo_count < BLOGIC_MAX_ADAPTERS) { struct blogic_probeinfo *probeinfo = &blogic_probeinfo_list[blogic_probeinfo_count++]; probeinfo->adapter_type = BLOGIC_MULTIMASTER; probeinfo->adapter_bus_type = BLOGIC_PCI_BUS; probeinfo->io_addr = io_addr; probeinfo->pci_addr = pci_addr; probeinfo->bus = bus; probeinfo->dev = device; probeinfo->irq_ch = irq_ch; probeinfo->pci_device = pci_dev_get(pci_device); nonpr_mmcount++; mmcount++; } else blogic_warn("BusLogic: Too many Host Adapters detected\n", NULL); } /* If the AutoSCSI "Use Bus And Device # For PCI Scanning Seq." option is ON for the first enumerated MultiMaster Host Adapter, and if that host adapter is a BT-948/958/958D, then the MultiMaster BIOS will recognize MultiMaster Host Adapters in the order of increasing PCI Bus and Device Number. In that case, sort the probe information into the same order the BIOS uses. If this option is OFF, then the MultiMaster BIOS will recognize MultiMaster Host Adapters in the order they are enumerated by the PCI BIOS, and hence no sorting is necessary. */ if (force_scan_order) blogic_sort_probeinfo(&blogic_probeinfo_list[nonpr_mmindex], nonpr_mmcount); /* Iterate over the older non-compliant MultiMaster PCI Host Adapters, noting the PCI bus location and assigned IRQ Channel. */ pci_device = NULL; while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC, pci_device)) != NULL) { unsigned char bus; unsigned char device; unsigned int irq_ch; unsigned long io_addr; if (pci_enable_device(pci_device)) continue; if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32))) continue; bus = pci_device->bus->number; device = pci_device->devfn >> 3; irq_ch = pci_device->irq; io_addr = pci_resource_start(pci_device, 0); if (io_addr == 0 || irq_ch == 0) continue; for (i = 0; i < blogic_probeinfo_count; i++) { struct blogic_probeinfo *probeinfo = &blogic_probeinfo_list[i]; if (probeinfo->io_addr == io_addr && probeinfo->adapter_type == BLOGIC_MULTIMASTER) { probeinfo->adapter_bus_type = BLOGIC_PCI_BUS; probeinfo->pci_addr = 0; probeinfo->bus = bus; probeinfo->dev = device; probeinfo->irq_ch = irq_ch; probeinfo->pci_device = pci_dev_get(pci_device); break; } } } return mmcount; } /* blogic_init_fp_probeinfo initializes the list of I/O Address and Bus Probe Information to be checked for potential BusLogic FlashPoint Host Adapters by interrogating the PCI Configuration Space. It returns the number of FlashPoint Host Adapters found. */ static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter) { int fpindex = blogic_probeinfo_count, fpcount = 0; struct pci_dev *pci_device = NULL; /* Interrogate PCI Configuration Space for any FlashPoint Host Adapters. */ while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT, pci_device)) != NULL) { unsigned char bus; unsigned char device; unsigned int irq_ch; unsigned long base_addr0; unsigned long base_addr1; unsigned long io_addr; unsigned long pci_addr; if (pci_enable_device(pci_device)) continue; if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32))) continue; bus = pci_device->bus->number; device = pci_device->devfn >> 3; irq_ch = pci_device->irq; io_addr = base_addr0 = pci_resource_start(pci_device, 0); pci_addr = base_addr1 = pci_resource_start(pci_device, 1); #ifdef CONFIG_SCSI_FLASHPOINT if (pci_resource_flags(pci_device, 0) & IORESOURCE_MEM) { blogic_err("BusLogic: Base Address0 0x%lX not I/O for FlashPoint Host Adapter\n", NULL, base_addr0); blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr); continue; } if (pci_resource_flags(pci_device, 1) & IORESOURCE_IO) { blogic_err("BusLogic: Base Address1 0x%lX not Memory for FlashPoint Host Adapter\n", NULL, base_addr1); blogic_err("at PCI Bus %d Device %d PCI Address 0x%lX\n", NULL, bus, device, pci_addr); continue; } if (irq_ch == 0) { blogic_err("BusLogic: IRQ Channel %d invalid for FlashPoint Host Adapter\n", NULL, irq_ch); blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr); continue; } if (blogic_global_options.trace_probe) { blogic_notice("BusLogic: FlashPoint Host Adapter detected at\n", NULL); blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX\n", NULL, bus, device, io_addr, pci_addr); } if (blogic_probeinfo_count < BLOGIC_MAX_ADAPTERS) { struct blogic_probeinfo *probeinfo = &blogic_probeinfo_list[blogic_probeinfo_count++]; probeinfo->adapter_type = BLOGIC_FLASHPOINT; probeinfo->adapter_bus_type = BLOGIC_PCI_BUS; probeinfo->io_addr = io_addr; probeinfo->pci_addr = pci_addr; probeinfo->bus = bus; probeinfo->dev = device; probeinfo->irq_ch = irq_ch; probeinfo->pci_device = pci_dev_get(pci_device); fpcount++; } else blogic_warn("BusLogic: Too many Host Adapters detected\n", NULL); #else blogic_err("BusLogic: FlashPoint Host Adapter detected at PCI Bus %d Device %d\n", NULL, bus, device); blogic_err("BusLogic: I/O Address 0x%lX PCI Address 0x%lX, irq %d, but FlashPoint\n", NULL, io_addr, pci_addr, irq_ch); blogic_err("BusLogic: support was omitted in this kernel configuration.\n", NULL); #endif } /* The FlashPoint BIOS will scan for FlashPoint Host Adapters in the order of increasing PCI Bus and Device Number, so sort the probe information into the same order the BIOS uses. */ blogic_sort_probeinfo(&blogic_probeinfo_list[fpindex], fpcount); return fpcount; } /* blogic_init_probeinfo_list initializes the list of I/O Address and Bus Probe Information to be checked for potential BusLogic SCSI Host Adapters by interrogating the PCI Configuration Space on PCI machines as well as from the list of standard BusLogic MultiMaster ISA I/O Addresses. By default, if both FlashPoint and PCI MultiMaster Host Adapters are present, this driver will probe for FlashPoint Host Adapters first unless the BIOS primary disk is controlled by the first PCI MultiMaster Host Adapter, in which case MultiMaster Host Adapters will be probed first. The BusLogic Driver Options specifications "MultiMasterFirst" and "FlashPointFirst" can be used to force a particular probe order. */ static void __init blogic_init_probeinfo_list(struct blogic_adapter *adapter) { /* If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint Host Adapters; otherwise, default to the standard ISA MultiMaster probe. */ if (!blogic_probe_options.noprobe_pci) { if (blogic_probe_options.multimaster_first) { blogic_init_mm_probeinfo(adapter); blogic_init_fp_probeinfo(adapter); } else if (blogic_probe_options.flashpoint_first) { blogic_init_fp_probeinfo(adapter); blogic_init_mm_probeinfo(adapter); } else { int fpcount = blogic_init_fp_probeinfo(adapter); int mmcount = blogic_init_mm_probeinfo(adapter); if (fpcount > 0 && mmcount > 0) { struct blogic_probeinfo *probeinfo = &blogic_probeinfo_list[fpcount]; struct blogic_adapter *myadapter = adapter; struct blogic_fetch_localram fetch_localram; struct blogic_bios_drvmap d0_mapbyte; while (probeinfo->adapter_bus_type != BLOGIC_PCI_BUS) probeinfo++; myadapter->io_addr = probeinfo->io_addr; fetch_localram.offset = BLOGIC_BIOS_BASE + BLOGIC_BIOS_DRVMAP; fetch_localram.count = sizeof(d0_mapbyte); blogic_cmd(myadapter, BLOGIC_FETCH_LOCALRAM, &fetch_localram, sizeof(fetch_localram), &d0_mapbyte, sizeof(d0_mapbyte)); /* If the Map Byte for BIOS Drive 0 indicates that BIOS Drive 0 is controlled by this PCI MultiMaster Host Adapter, then reverse the probe order so that MultiMaster Host Adapters are probed before FlashPoint Host Adapters. */ if (d0_mapbyte.diskgeom != BLOGIC_BIOS_NODISK) { struct blogic_probeinfo saved_probeinfo[BLOGIC_MAX_ADAPTERS]; int mmcount = blogic_probeinfo_count - fpcount; memcpy(saved_probeinfo, blogic_probeinfo_list, blogic_probeinfo_count * sizeof(struct blogic_probeinfo)); memcpy(&blogic_probeinfo_list[0], &saved_probeinfo[fpcount], mmcount * sizeof(struct blogic_probeinfo)); memcpy(&blogic_probeinfo_list[mmcount], &saved_probeinfo[0], fpcount * sizeof(struct blogic_probeinfo)); } } } } } /* blogic_failure prints a standardized error message, and then returns false. */ static bool blogic_failure(struct blogic_adapter *adapter, char *msg) { blogic_announce_drvr(adapter); if (adapter->adapter_bus_type == BLOGIC_PCI_BUS) { blogic_err("While configuring BusLogic PCI Host Adapter at\n", adapter); blogic_err("Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX:\n", adapter, adapter->bus, adapter->dev, adapter->io_addr, adapter->pci_addr); } else blogic_err("While configuring BusLogic Host Adapter at I/O Address 0x%lX:\n", adapter, adapter->io_addr); blogic_err("%s FAILED - DETACHING\n", adapter, msg); if (blogic_cmd_failure_reason != NULL) blogic_err("ADDITIONAL FAILURE INFO - %s\n", adapter, blogic_cmd_failure_reason); return false; } /* blogic_probe probes for a BusLogic Host Adapter. */ static bool __init blogic_probe(struct blogic_adapter *adapter) { union blogic_stat_reg statusreg; union blogic_int_reg intreg; union blogic_geo_reg georeg; /* FlashPoint Host Adapters are Probed by the FlashPoint SCCB Manager. */ if (blogic_flashpoint_type(adapter)) { struct fpoint_info *fpinfo = &adapter->fpinfo; fpinfo->base_addr = (u32) adapter->io_addr; fpinfo->irq_ch = adapter->irq_ch; fpinfo->present = false; if (!(FlashPoint_ProbeHostAdapter(fpinfo) == 0 && fpinfo->present)) { blogic_err("BusLogic: FlashPoint Host Adapter detected at PCI Bus %d Device %d\n", adapter, adapter->bus, adapter->dev); blogic_err("BusLogic: I/O Address 0x%lX PCI Address 0x%lX, but FlashPoint\n", adapter, adapter->io_addr, adapter->pci_addr); blogic_err("BusLogic: Probe Function failed to validate it.\n", adapter); return false; } if (blogic_global_options.trace_probe) blogic_notice("BusLogic_Probe(0x%lX): FlashPoint Found\n", adapter, adapter->io_addr); /* Indicate the Host Adapter Probe completed successfully. */ return true; } /* Read the Status, Interrupt, and Geometry Registers to test if there are I/O ports that respond, and to check the values to determine if they are from a BusLogic Host Adapter. A nonexistent I/O port will return 0xFF, in which case there is definitely no BusLogic Host Adapter at this base I/O Address. The test here is a subset of that used by the BusLogic Host Adapter BIOS. */ statusreg.all = blogic_rdstatus(adapter); intreg.all = blogic_rdint(adapter); georeg.all = blogic_rdgeom(adapter); if (blogic_global_options.trace_probe) blogic_notice("BusLogic_Probe(0x%lX): Status 0x%02X, Interrupt 0x%02X, Geometry 0x%02X\n", adapter, adapter->io_addr, statusreg.all, intreg.all, georeg.all); if (statusreg.all == 0 || statusreg.sr.diag_active || statusreg.sr.cmd_param_busy || statusreg.sr.rsvd || statusreg.sr.cmd_invalid || intreg.ir.rsvd != 0) return false; /* Check the undocumented Geometry Register to test if there is an I/O port that responded. Adaptec Host Adapters do not implement the Geometry Register, so this test helps serve to avoid incorrectly recognizing an Adaptec 1542A or 1542B as a BusLogic. Unfortunately, the Adaptec 1542C series does respond to the Geometry Register I/O port, but it will be rejected later when the Inquire Extended Setup Information command is issued in blogic_checkadapter. The AMI FastDisk Host Adapter is a BusLogic clone that implements the same interface as earlier BusLogic Host Adapters, including the undocumented commands, and is therefore supported by this driver. However, the AMI FastDisk always returns 0x00 upon reading the Geometry Register, so the extended translation option should always be left disabled on the AMI FastDisk. */ if (georeg.all == 0xFF) return false; /* Indicate the Host Adapter Probe completed successfully. */ return true; } /* blogic_hwreset issues a Hardware Reset to the Host Adapter and waits for Host Adapter Diagnostics to complete. If hard_reset is true, a Hard Reset is performed which also initiates a SCSI Bus Reset. Otherwise, a Soft Reset is performed which only resets the Host Adapter without forcing a SCSI Bus Reset. */ static bool blogic_hwreset(struct blogic_adapter *adapter, bool hard_reset) { union blogic_stat_reg statusreg; int timeout; /* FlashPoint Host Adapters are Hard Reset by the FlashPoint SCCB Manager. */ if (blogic_flashpoint_type(adapter)) { struct fpoint_info *fpinfo = &adapter->fpinfo; fpinfo->softreset = !hard_reset; fpinfo->report_underrun = true; adapter->cardhandle = FlashPoint_HardwareResetHostAdapter(fpinfo); if (adapter->cardhandle == (void *)FPOINT_BADCARD_HANDLE) return false; /* Indicate the Host Adapter Hard Reset completed successfully. */ return true; } /* Issue a Hard Reset or Soft Reset Command to the Host Adapter. The Host Adapter should respond by setting Diagnostic Active in the Status Register. */ if (hard_reset) blogic_hardreset(adapter); else blogic_softreset(adapter); /* Wait until Diagnostic Active is set in the Status Register. */ timeout = 5 * 10000; while (--timeout >= 0) { statusreg.all = blogic_rdstatus(adapter); if (statusreg.sr.diag_active) break; udelay(100); } if (blogic_global_options.trace_hw_reset) blogic_notice("BusLogic_HardwareReset(0x%lX): Diagnostic Active, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); if (timeout < 0) return false; /* Wait 100 microseconds to allow completion of any initial diagnostic activity which might leave the contents of the Status Register unpredictable. */ udelay(100); /* Wait until Diagnostic Active is reset in the Status Register. */ timeout = 10 * 10000; while (--timeout >= 0) { statusreg.all = blogic_rdstatus(adapter); if (!statusreg.sr.diag_active) break; udelay(100); } if (blogic_global_options.trace_hw_reset) blogic_notice("BusLogic_HardwareReset(0x%lX): Diagnostic Completed, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); if (timeout < 0) return false; /* Wait until at least one of the Diagnostic Failure, Host Adapter Ready, or Data In Register Ready bits is set in the Status Register. */ timeout = 10000; while (--timeout >= 0) { statusreg.all = blogic_rdstatus(adapter); if (statusreg.sr.diag_failed || statusreg.sr.adapter_ready || statusreg.sr.datain_ready) break; udelay(100); } if (blogic_global_options.trace_hw_reset) blogic_notice("BusLogic_HardwareReset(0x%lX): Host Adapter Ready, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); if (timeout < 0) return false; /* If Diagnostic Failure is set or Host Adapter Ready is reset, then an error occurred during the Host Adapter diagnostics. If Data In Register Ready is set, then there is an Error Code available. */ if (statusreg.sr.diag_failed || !statusreg.sr.adapter_ready) { blogic_cmd_failure_reason = NULL; blogic_failure(adapter, "HARD RESET DIAGNOSTICS"); blogic_err("HOST ADAPTER STATUS REGISTER = %02X\n", adapter, statusreg.all); if (statusreg.sr.datain_ready) blogic_err("HOST ADAPTER ERROR CODE = %d\n", adapter, blogic_rddatain(adapter)); return false; } /* Indicate the Host Adapter Hard Reset completed successfully. */ return true; } /* blogic_checkadapter checks to be sure this really is a BusLogic Host Adapter. */ static bool __init blogic_checkadapter(struct blogic_adapter *adapter) { struct blogic_ext_setup ext_setupinfo; unsigned char req_replylen; bool result = true; /* FlashPoint Host Adapters do not require this protection. */ if (blogic_flashpoint_type(adapter)) return true; /* Issue the Inquire Extended Setup Information command. Only genuine BusLogic Host Adapters and true clones support this command. Adaptec 1542C series Host Adapters that respond to the Geometry Register I/O port will fail this command. */ req_replylen = sizeof(ext_setupinfo); if (blogic_cmd(adapter, BLOGIC_INQ_EXTSETUP, &req_replylen, sizeof(req_replylen), &ext_setupinfo, sizeof(ext_setupinfo)) != sizeof(ext_setupinfo)) result = false; /* Provide tracing information if requested and return. */ if (blogic_global_options.trace_probe) blogic_notice("BusLogic_Check(0x%lX): MultiMaster %s\n", adapter, adapter->io_addr, (result ? "Found" : "Not Found")); return result; } /* blogic_rdconfig reads the Configuration Information from Host Adapter and initializes the Host Adapter structure. */ static bool __init blogic_rdconfig(struct blogic_adapter *adapter) { struct blogic_board_id id; struct blogic_config config; struct blogic_setup_info setupinfo; struct blogic_ext_setup ext_setupinfo; unsigned char model[5]; unsigned char fw_ver_digit3; unsigned char fw_ver_letter; struct blogic_adapter_info adapter_info; struct blogic_fetch_localram fetch_localram; struct blogic_autoscsi autoscsi; union blogic_geo_reg georeg; unsigned char req_replylen; unsigned char *tgt, ch; int tgt_id, i; /* Configuration Information for FlashPoint Host Adapters is provided in the fpoint_info structure by the FlashPoint SCCB Manager's Probe Function. Initialize fields in the Host Adapter structure from the fpoint_info structure. */ if (blogic_flashpoint_type(adapter)) { struct fpoint_info *fpinfo = &adapter->fpinfo; tgt = adapter->model; *tgt++ = 'B'; *tgt++ = 'T'; *tgt++ = '-'; for (i = 0; i < sizeof(fpinfo->model); i++) *tgt++ = fpinfo->model[i]; *tgt++ = '\0'; strcpy(adapter->fw_ver, FLASHPOINT_FW_VER); adapter->scsi_id = fpinfo->scsi_id; adapter->ext_trans_enable = fpinfo->ext_trans_enable; adapter->parity = fpinfo->parity; adapter->reset_enabled = !fpinfo->softreset; adapter->level_int = true; adapter->wide = fpinfo->wide; adapter->differential = false; adapter->scam = true; adapter->ultra = true; adapter->ext_lun = true; adapter->terminfo_valid = true; adapter->low_term = fpinfo->low_term; adapter->high_term = fpinfo->high_term; adapter->scam_enabled = fpinfo->scam_enabled; adapter->scam_lev2 = fpinfo->scam_lev2; adapter->drvr_sglimit = BLOGIC_SG_LIMIT; adapter->maxdev = (adapter->wide ? 16 : 8); adapter->maxlun = 32; adapter->initccbs = 4 * BLOGIC_CCB_GRP_ALLOCSIZE; adapter->inc_ccbs = BLOGIC_CCB_GRP_ALLOCSIZE; adapter->drvr_qdepth = 255; adapter->adapter_qdepth = adapter->drvr_qdepth; adapter->sync_ok = fpinfo->sync_ok; adapter->fast_ok = fpinfo->fast_ok; adapter->ultra_ok = fpinfo->ultra_ok; adapter->wide_ok = fpinfo->wide_ok; adapter->discon_ok = fpinfo->discon_ok; adapter->tagq_ok = 0xFFFF; goto common; } /* Issue the Inquire Board ID command. */ if (blogic_cmd(adapter, BLOGIC_GET_BOARD_ID, NULL, 0, &id, sizeof(id)) != sizeof(id)) return blogic_failure(adapter, "INQUIRE BOARD ID"); /* Issue the Inquire Configuration command. */ if (blogic_cmd(adapter, BLOGIC_INQ_CONFIG, NULL, 0, &config, sizeof(config)) != sizeof(config)) return blogic_failure(adapter, "INQUIRE CONFIGURATION"); /* Issue the Inquire Setup Information command. */ req_replylen = sizeof(setupinfo); if (blogic_cmd(adapter, BLOGIC_INQ_SETUPINFO, &req_replylen, sizeof(req_replylen), &setupinfo, sizeof(setupinfo)) != sizeof(setupinfo)) return blogic_failure(adapter, "INQUIRE SETUP INFORMATION"); /* Issue the Inquire Extended Setup Information command. */ req_replylen = sizeof(ext_setupinfo); if (blogic_cmd(adapter, BLOGIC_INQ_EXTSETUP, &req_replylen, sizeof(req_replylen), &ext_setupinfo, sizeof(ext_setupinfo)) != sizeof(ext_setupinfo)) return blogic_failure(adapter, "INQUIRE EXTENDED SETUP INFORMATION"); /* Issue the Inquire Firmware Version 3rd Digit command. */ fw_ver_digit3 = '\0'; if (id.fw_ver_digit1 > '0') if (blogic_cmd(adapter, BLOGIC_INQ_FWVER_D3, NULL, 0, &fw_ver_digit3, sizeof(fw_ver_digit3)) != sizeof(fw_ver_digit3)) return blogic_failure(adapter, "INQUIRE FIRMWARE 3RD DIGIT"); /* Issue the Inquire Host Adapter Model Number command. */ if (ext_setupinfo.bus_type == 'A' && id.fw_ver_digit1 == '2') /* BusLogic BT-542B ISA 2.xx */ strcpy(model, "542B"); else if (ext_setupinfo.bus_type == 'E' && id.fw_ver_digit1 == '2' && (id.fw_ver_digit2 <= '1' || (id.fw_ver_digit2 == '2' && fw_ver_digit3 == '0'))) /* BusLogic BT-742A EISA 2.1x or 2.20 */ strcpy(model, "742A"); else if (ext_setupinfo.bus_type == 'E' && id.fw_ver_digit1 == '0') /* AMI FastDisk EISA Series 441 0.x */ strcpy(model, "747A"); else { req_replylen = sizeof(model); if (blogic_cmd(adapter, BLOGIC_INQ_MODELNO, &req_replylen, sizeof(req_replylen), &model, sizeof(model)) != sizeof(model)) return blogic_failure(adapter, "INQUIRE HOST ADAPTER MODEL NUMBER"); } /* BusLogic MultiMaster Host Adapters can be identified by their model number and the major version number of their firmware as follows: 5.xx BusLogic "W" Series Host Adapters: BT-948/958/958D 4.xx BusLogic "C" Series Host Adapters: BT-946C/956C/956CD/747C/757C/757CD/445C/545C/540CF 3.xx BusLogic "S" Series Host Adapters: BT-747S/747D/757S/757D/445S/545S/542D BT-542B/742A (revision H) 2.xx BusLogic "A" Series Host Adapters: BT-542B/742A (revision G and below) 0.xx AMI FastDisk VLB/EISA BusLogic Clone Host Adapter */ /* Save the Model Name and Host Adapter Name in the Host Adapter structure. */ tgt = adapter->model; *tgt++ = 'B'; *tgt++ = 'T'; *tgt++ = '-'; for (i = 0; i < sizeof(model); i++) { ch = model[i]; if (ch == ' ' || ch == '\0') break; *tgt++ = ch; } *tgt++ = '\0'; /* Save the Firmware Version in the Host Adapter structure. */ tgt = adapter->fw_ver; *tgt++ = id.fw_ver_digit1; *tgt++ = '.'; *tgt++ = id.fw_ver_digit2; if (fw_ver_digit3 != ' ' && fw_ver_digit3 != '\0') *tgt++ = fw_ver_digit3; *tgt = '\0'; /* Issue the Inquire Firmware Version Letter command. */ if (strcmp(adapter->fw_ver, "3.3") >= 0) { if (blogic_cmd(adapter, BLOGIC_INQ_FWVER_LETTER, NULL, 0, &fw_ver_letter, sizeof(fw_ver_letter)) != sizeof(fw_ver_letter)) return blogic_failure(adapter, "INQUIRE FIRMWARE VERSION LETTER"); if (fw_ver_letter != ' ' && fw_ver_letter != '\0') *tgt++ = fw_ver_letter; *tgt = '\0'; } /* Save the Host Adapter SCSI ID in the Host Adapter structure. */ adapter->scsi_id = config.id; /* Determine the Bus Type and save it in the Host Adapter structure, determine and save the IRQ Channel if necessary, and determine and save the DMA Channel for ISA Host Adapters. */ adapter->adapter_bus_type = blogic_adater_bus_types[adapter->model[3] - '4']; if (adapter->irq_ch == 0) { if (config.irq_ch9) adapter->irq_ch = 9; else if (config.irq_ch10) adapter->irq_ch = 10; else if (config.irq_ch11) adapter->irq_ch = 11; else if (config.irq_ch12) adapter->irq_ch = 12; else if (config.irq_ch14) adapter->irq_ch = 14; else if (config.irq_ch15) adapter->irq_ch = 15; } /* Determine whether Extended Translation is enabled and save it in the Host Adapter structure. */ georeg.all = blogic_rdgeom(adapter); adapter->ext_trans_enable = georeg.gr.ext_trans_enable; /* Save the Scatter Gather Limits, Level Sensitive Interrupt flag, Wide SCSI flag, Differential SCSI flag, SCAM Supported flag, and Ultra SCSI flag in the Host Adapter structure. */ adapter->adapter_sglimit = ext_setupinfo.sg_limit; adapter->drvr_sglimit = adapter->adapter_sglimit; if (adapter->adapter_sglimit > BLOGIC_SG_LIMIT) adapter->drvr_sglimit = BLOGIC_SG_LIMIT; if (ext_setupinfo.misc.level_int) adapter->level_int = true; adapter->wide = ext_setupinfo.wide; adapter->differential = ext_setupinfo.differential; adapter->scam = ext_setupinfo.scam; adapter->ultra = ext_setupinfo.ultra; /* Determine whether Extended LUN Format CCBs are supported and save the information in the Host Adapter structure. */ if (adapter->fw_ver[0] == '5' || (adapter->fw_ver[0] == '4' && adapter->wide)) adapter->ext_lun = true; /* Issue the Inquire PCI Host Adapter Information command to read the Termination Information from "W" series MultiMaster Host Adapters. */ if (adapter->fw_ver[0] == '5') { if (blogic_cmd(adapter, BLOGIC_INQ_PCI_INFO, NULL, 0, &adapter_info, sizeof(adapter_info)) != sizeof(adapter_info)) return blogic_failure(adapter, "INQUIRE PCI HOST ADAPTER INFORMATION"); /* Save the Termination Information in the Host Adapter structure. */ if (adapter_info.genericinfo_valid) { adapter->terminfo_valid = true; adapter->low_term = adapter_info.low_term; adapter->high_term = adapter_info.high_term; } } /* Issue the Fetch Host Adapter Local RAM command to read the AutoSCSI data from "W" and "C" series MultiMaster Host Adapters. */ if (adapter->fw_ver[0] >= '4') { fetch_localram.offset = BLOGIC_AUTOSCSI_BASE; fetch_localram.count = sizeof(autoscsi); if (blogic_cmd(adapter, BLOGIC_FETCH_LOCALRAM, &fetch_localram, sizeof(fetch_localram), &autoscsi, sizeof(autoscsi)) != sizeof(autoscsi)) return blogic_failure(adapter, "FETCH HOST ADAPTER LOCAL RAM"); /* Save the Parity Checking Enabled, Bus Reset Enabled, and Termination Information in the Host Adapter structure. */ adapter->parity = autoscsi.parity; adapter->reset_enabled = autoscsi.reset_enabled; if (adapter->fw_ver[0] == '4') { adapter->terminfo_valid = true; adapter->low_term = autoscsi.low_term; adapter->high_term = autoscsi.high_term; } /* Save the Wide Permitted, Fast Permitted, Synchronous Permitted, Disconnect Permitted, Ultra Permitted, and SCAM Information in the Host Adapter structure. */ adapter->wide_ok = autoscsi.wide_ok; adapter->fast_ok = autoscsi.fast_ok; adapter->sync_ok = autoscsi.sync_ok; adapter->discon_ok = autoscsi.discon_ok; if (adapter->ultra) adapter->ultra_ok = autoscsi.ultra_ok; if (adapter->scam) { adapter->scam_enabled = autoscsi.scam_enabled; adapter->scam_lev2 = autoscsi.scam_lev2; } } /* Initialize fields in the Host Adapter structure for "S" and "A" series MultiMaster Host Adapters. */ if (adapter->fw_ver[0] < '4') { if (setupinfo.sync) { adapter->sync_ok = 0xFF; if (adapter->adapter_bus_type == BLOGIC_EISA_BUS) { if (ext_setupinfo.misc.fast_on_eisa) adapter->fast_ok = 0xFF; if (strcmp(adapter->model, "BT-757") == 0) adapter->wide_ok = 0xFF; } } adapter->discon_ok = 0xFF; adapter->parity = setupinfo.parity; adapter->reset_enabled = true; } /* Determine the maximum number of Target IDs and Logical Units supported by this driver for Wide and Narrow Host Adapters. */ adapter->maxdev = (adapter->wide ? 16 : 8); adapter->maxlun = (adapter->ext_lun ? 32 : 8); /* Select appropriate values for the Mailbox Count, Driver Queue Depth, Initial CCBs, and Incremental CCBs variables based on whether or not Strict Round Robin Mode is supported. If Strict Round Robin Mode is supported, then there is no performance degradation in using the maximum possible number of Outgoing and Incoming Mailboxes and allowing the Tagged and Untagged Queue Depths to determine the actual utilization. If Strict Round Robin Mode is not supported, then the Host Adapter must scan all the Outgoing Mailboxes whenever an Outgoing Mailbox entry is made, which can cause a substantial performance penalty. The host adapters actually have room to store the following number of CCBs internally; that is, they can internally queue and manage this many active commands on the SCSI bus simultaneously. Performance measurements demonstrate that the Driver Queue Depth should be set to the Mailbox Count, rather than the Host Adapter Queue Depth (internal CCB capacity), as it is more efficient to have the queued commands waiting in Outgoing Mailboxes if necessary than to block the process in the higher levels of the SCSI Subsystem. 192 BT-948/958/958D 100 BT-946C/956C/956CD/747C/757C/757CD/445C 50 BT-545C/540CF 30 BT-747S/747D/757S/757D/445S/545S/542D/542B/742A */ if (adapter->fw_ver[0] == '5') adapter->adapter_qdepth = 192; else if (adapter->fw_ver[0] == '4') adapter->adapter_qdepth = 100; else adapter->adapter_qdepth = 30; if (strcmp(adapter->fw_ver, "3.31") >= 0) { adapter->strict_rr = true; adapter->mbox_count = BLOGIC_MAX_MAILBOX; } else { adapter->strict_rr = false; adapter->mbox_count = 32; } adapter->drvr_qdepth = adapter->mbox_count; adapter->initccbs = 4 * BLOGIC_CCB_GRP_ALLOCSIZE; adapter->inc_ccbs = BLOGIC_CCB_GRP_ALLOCSIZE; /* Tagged Queuing support is available and operates properly on all "W" series MultiMaster Host Adapters, on "C" series MultiMaster Host Adapters with firmware version 4.22 and above, and on "S" series MultiMaster Host Adapters with firmware version 3.35 and above. */ adapter->tagq_ok = 0; switch (adapter->fw_ver[0]) { case '5': adapter->tagq_ok = 0xFFFF; break; case '4': if (strcmp(adapter->fw_ver, "4.22") >= 0) adapter->tagq_ok = 0xFFFF; break; case '3': if (strcmp(adapter->fw_ver, "3.35") >= 0) adapter->tagq_ok = 0xFFFF; break; } /* Determine the Host Adapter BIOS Address if the BIOS is enabled and save it in the Host Adapter structure. The BIOS is disabled if the bios_addr is 0. */ adapter->bios_addr = ext_setupinfo.bios_addr << 12; /* BusLogic BT-445S Host Adapters prior to board revision E have a hardware bug whereby when the BIOS is enabled, transfers to/from the same address range the BIOS occupies modulo 16MB are handled incorrectly. Only properly functioning BT-445S Host Adapters have firmware version 3.37. */ if (adapter->bios_addr > 0 && strcmp(adapter->model, "BT-445S") == 0 && strcmp(adapter->fw_ver, "3.37") < 0) return blogic_failure(adapter, "Too old firmware"); /* Initialize parameters common to MultiMaster and FlashPoint Host Adapters. */ common: /* Initialize the Host Adapter Full Model Name from the Model Name. */ strcpy(adapter->full_model, "BusLogic "); strcat(adapter->full_model, adapter->model); /* Select an appropriate value for the Tagged Queue Depth either from a BusLogic Driver Options specification, or based on whether this Host Adapter requires that ISA Bounce Buffers be used. The Tagged Queue Depth is left at 0 for automatic determination in BusLogic_SelectQueueDepths. Initialize the Untagged Queue Depth. */ for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++) { unsigned char qdepth = 0; if (adapter->drvr_opts != NULL && adapter->drvr_opts->qdepth[tgt_id] > 0) qdepth = adapter->drvr_opts->qdepth[tgt_id]; adapter->qdepth[tgt_id] = qdepth; } adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH; if (adapter->drvr_opts != NULL) adapter->common_qdepth = adapter->drvr_opts->common_qdepth; if (adapter->common_qdepth > 0 && adapter->common_qdepth < adapter->untag_qdepth) adapter->untag_qdepth = adapter->common_qdepth; /* Tagged Queuing is only allowed if Disconnect/Reconnect is permitted. Therefore, mask the Tagged Queuing Permitted Default bits with the Disconnect/Reconnect Permitted bits. */ adapter->tagq_ok &= adapter->discon_ok; /* Combine the default Tagged Queuing Permitted bits with any BusLogic Driver Options Tagged Queuing specification. */ if (adapter->drvr_opts != NULL) adapter->tagq_ok = (adapter->drvr_opts->tagq_ok & adapter->drvr_opts->tagq_ok_mask) | (adapter->tagq_ok & ~adapter->drvr_opts->tagq_ok_mask); /* Select an appropriate value for Bus Settle Time either from a BusLogic Driver Options specification, or from BLOGIC_BUS_SETTLE_TIME. */ if (adapter->drvr_opts != NULL && adapter->drvr_opts->bus_settle_time > 0) adapter->bus_settle_time = adapter->drvr_opts->bus_settle_time; else adapter->bus_settle_time = BLOGIC_BUS_SETTLE_TIME; /* Indicate reading the Host Adapter Configuration completed successfully. */ return true; } /* blogic_reportconfig reports the configuration of Host Adapter. */ static bool __init blogic_reportconfig(struct blogic_adapter *adapter) { unsigned short alltgt_mask = (1 << adapter->maxdev) - 1; unsigned short sync_ok, fast_ok; unsigned short ultra_ok, wide_ok; unsigned short discon_ok, tagq_ok; bool common_syncneg, common_tagq_depth; char syncstr[BLOGIC_MAXDEV + 1]; char widestr[BLOGIC_MAXDEV + 1]; char discon_str[BLOGIC_MAXDEV + 1]; char tagq_str[BLOGIC_MAXDEV + 1]; char *syncmsg = syncstr; char *widemsg = widestr; char *discon_msg = discon_str; char *tagq_msg = tagq_str; int tgt_id; blogic_info("Configuring BusLogic Model %s %s%s%s%s SCSI Host Adapter\n", adapter, adapter->model, blogic_adapter_busnames[adapter->adapter_bus_type], (adapter->wide ? " Wide" : ""), (adapter->differential ? " Differential" : ""), (adapter->ultra ? " Ultra" : "")); blogic_info(" Firmware Version: %s, I/O Address: 0x%lX, IRQ Channel: %d/%s\n", adapter, adapter->fw_ver, adapter->io_addr, adapter->irq_ch, (adapter->level_int ? "Level" : "Edge")); if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) { blogic_info(" DMA Channel: None, ", adapter); if (adapter->bios_addr > 0) blogic_info("BIOS Address: 0x%X, ", adapter, adapter->bios_addr); else blogic_info("BIOS Address: None, ", adapter); } else { blogic_info(" PCI Bus: %d, Device: %d, Address: ", adapter, adapter->bus, adapter->dev); if (adapter->pci_addr > 0) blogic_info("0x%lX, ", adapter, adapter->pci_addr); else blogic_info("Unassigned, ", adapter); } blogic_info("Host Adapter SCSI ID: %d\n", adapter, adapter->scsi_id); blogic_info(" Parity Checking: %s, Extended Translation: %s\n", adapter, (adapter->parity ? "Enabled" : "Disabled"), (adapter->ext_trans_enable ? "Enabled" : "Disabled")); alltgt_mask &= ~(1 << adapter->scsi_id); sync_ok = adapter->sync_ok & alltgt_mask; fast_ok = adapter->fast_ok & alltgt_mask; ultra_ok = adapter->ultra_ok & alltgt_mask; if ((blogic_multimaster_type(adapter) && (adapter->fw_ver[0] >= '4' || adapter->adapter_bus_type == BLOGIC_EISA_BUS)) || blogic_flashpoint_type(adapter)) { common_syncneg = false; if (sync_ok == 0) { syncmsg = "Disabled"; common_syncneg = true; } else if (sync_ok == alltgt_mask) { if (fast_ok == 0) { syncmsg = "Slow"; common_syncneg = true; } else if (fast_ok == alltgt_mask) { if (ultra_ok == 0) { syncmsg = "Fast"; common_syncneg = true; } else if (ultra_ok == alltgt_mask) { syncmsg = "Ultra"; common_syncneg = true; } } } if (!common_syncneg) { for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) syncstr[tgt_id] = ((!(sync_ok & (1 << tgt_id))) ? 'N' : (!(fast_ok & (1 << tgt_id)) ? 'S' : (!(ultra_ok & (1 << tgt_id)) ? 'F' : 'U'))); syncstr[adapter->scsi_id] = '#'; syncstr[adapter->maxdev] = '\0'; } } else syncmsg = (sync_ok == 0 ? "Disabled" : "Enabled"); wide_ok = adapter->wide_ok & alltgt_mask; if (wide_ok == 0) widemsg = "Disabled"; else if (wide_ok == alltgt_mask) widemsg = "Enabled"; else { for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) widestr[tgt_id] = ((wide_ok & (1 << tgt_id)) ? 'Y' : 'N'); widestr[adapter->scsi_id] = '#'; widestr[adapter->maxdev] = '\0'; } discon_ok = adapter->discon_ok & alltgt_mask; if (discon_ok == 0) discon_msg = "Disabled"; else if (discon_ok == alltgt_mask) discon_msg = "Enabled"; else { for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) discon_str[tgt_id] = ((discon_ok & (1 << tgt_id)) ? 'Y' : 'N'); discon_str[adapter->scsi_id] = '#'; discon_str[adapter->maxdev] = '\0'; } tagq_ok = adapter->tagq_ok & alltgt_mask; if (tagq_ok == 0) tagq_msg = "Disabled"; else if (tagq_ok == alltgt_mask) tagq_msg = "Enabled"; else { for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) tagq_str[tgt_id] = ((tagq_ok & (1 << tgt_id)) ? 'Y' : 'N'); tagq_str[adapter->scsi_id] = '#'; tagq_str[adapter->maxdev] = '\0'; } blogic_info(" Synchronous Negotiation: %s, Wide Negotiation: %s\n", adapter, syncmsg, widemsg); blogic_info(" Disconnect/Reconnect: %s, Tagged Queuing: %s\n", adapter, discon_msg, tagq_msg); if (blogic_multimaster_type(adapter)) { blogic_info(" Scatter/Gather Limit: %d of %d segments, Mailboxes: %d\n", adapter, adapter->drvr_sglimit, adapter->adapter_sglimit, adapter->mbox_count); blogic_info(" Driver Queue Depth: %d, Host Adapter Queue Depth: %d\n", adapter, adapter->drvr_qdepth, adapter->adapter_qdepth); } else blogic_info(" Driver Queue Depth: %d, Scatter/Gather Limit: %d segments\n", adapter, adapter->drvr_qdepth, adapter->drvr_sglimit); blogic_info(" Tagged Queue Depth: ", adapter); common_tagq_depth = true; for (tgt_id = 1; tgt_id < adapter->maxdev; tgt_id++) if (adapter->qdepth[tgt_id] != adapter->qdepth[0]) { common_tagq_depth = false; break; } if (common_tagq_depth) { if (adapter->qdepth[0] > 0) blogic_info("%d", adapter, adapter->qdepth[0]); else blogic_info("Automatic", adapter); } else blogic_info("Individual", adapter); blogic_info(", Untagged Queue Depth: %d\n", adapter, adapter->untag_qdepth); if (adapter->terminfo_valid) { if (adapter->wide) blogic_info(" SCSI Bus Termination: %s", adapter, (adapter->low_term ? (adapter->high_term ? "Both Enabled" : "Low Enabled") : (adapter->high_term ? "High Enabled" : "Both Disabled"))); else blogic_info(" SCSI Bus Termination: %s", adapter, (adapter->low_term ? "Enabled" : "Disabled")); if (adapter->scam) blogic_info(", SCAM: %s", adapter, (adapter->scam_enabled ? (adapter->scam_lev2 ? "Enabled, Level 2" : "Enabled, Level 1") : "Disabled")); blogic_info("\n", adapter); } /* Indicate reporting the Host Adapter configuration completed successfully. */ return true; } /* blogic_getres acquires the system resources necessary to use Host Adapter. */ static bool __init blogic_getres(struct blogic_adapter *adapter) { if (adapter->irq_ch == 0) { blogic_err("NO LEGAL INTERRUPT CHANNEL ASSIGNED - DETACHING\n", adapter); return false; } /* Acquire shared access to the IRQ Channel. */ if (request_irq(adapter->irq_ch, blogic_inthandler, IRQF_SHARED, adapter->full_model, adapter) < 0) { blogic_err("UNABLE TO ACQUIRE IRQ CHANNEL %d - DETACHING\n", adapter, adapter->irq_ch); return false; } adapter->irq_acquired = true; /* Indicate the System Resource Acquisition completed successfully, */ return true; } /* blogic_relres releases any system resources previously acquired by blogic_getres. */ static void blogic_relres(struct blogic_adapter *adapter) { /* Release shared access to the IRQ Channel. */ if (adapter->irq_acquired) free_irq(adapter->irq_ch, adapter); /* Release any allocated memory structs not released elsewhere */ if (adapter->mbox_space) dma_free_coherent(&adapter->pci_device->dev, adapter->mbox_sz, adapter->mbox_space, adapter->mbox_space_handle); pci_dev_put(adapter->pci_device); adapter->mbox_space = NULL; adapter->mbox_space_handle = 0; adapter->mbox_sz = 0; } /* blogic_initadapter initializes Host Adapter. This is the only function called during SCSI Host Adapter detection which modifies the state of the Host Adapter from its initial power on or hard reset state. */ static bool blogic_initadapter(struct blogic_adapter *adapter) { struct blogic_extmbox_req extmbox_req; enum blogic_rr_req rr_req; enum blogic_setccb_fmt setccb_fmt; int tgt_id; /* Initialize the pointers to the first and last CCBs that are queued for completion processing. */ adapter->firstccb = NULL; adapter->lastccb = NULL; /* Initialize the Bus Device Reset Pending CCB, Tagged Queuing Active, Command Successful Flag, Active Commands, and Commands Since Reset for each Target Device. */ for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) { adapter->bdr_pend[tgt_id] = NULL; adapter->tgt_flags[tgt_id].tagq_active = false; adapter->tgt_flags[tgt_id].cmd_good = false; adapter->active_cmds[tgt_id] = 0; adapter->cmds_since_rst[tgt_id] = 0; } /* FlashPoint Host Adapters do not use Outgoing and Incoming Mailboxes. */ if (blogic_flashpoint_type(adapter)) goto done; /* Initialize the Outgoing and Incoming Mailbox pointers. */ adapter->mbox_sz = adapter->mbox_count * (sizeof(struct blogic_outbox) + sizeof(struct blogic_inbox)); adapter->mbox_space = dma_alloc_coherent(&adapter->pci_device->dev, adapter->mbox_sz, &adapter->mbox_space_handle, GFP_KERNEL); if (adapter->mbox_space == NULL) return blogic_failure(adapter, "MAILBOX ALLOCATION"); adapter->first_outbox = (struct blogic_outbox *) adapter->mbox_space; adapter->last_outbox = adapter->first_outbox + adapter->mbox_count - 1; adapter->next_outbox = adapter->first_outbox; adapter->first_inbox = (struct blogic_inbox *) (adapter->last_outbox + 1); adapter->last_inbox = adapter->first_inbox + adapter->mbox_count - 1; adapter->next_inbox = adapter->first_inbox; /* Initialize the Outgoing and Incoming Mailbox structures. */ memset(adapter->first_outbox, 0, adapter->mbox_count * sizeof(struct blogic_outbox)); memset(adapter->first_inbox, 0, adapter->mbox_count * sizeof(struct blogic_inbox)); /* Initialize the Host Adapter's Pointer to the Outgoing/Incoming Mailboxes. */ extmbox_req.mbox_count = adapter->mbox_count; extmbox_req.base_mbox_addr = (u32) adapter->mbox_space_handle; if (blogic_cmd(adapter, BLOGIC_INIT_EXT_MBOX, &extmbox_req, sizeof(extmbox_req), NULL, 0) < 0) return blogic_failure(adapter, "MAILBOX INITIALIZATION"); /* Enable Strict Round Robin Mode if supported by the Host Adapter. In Strict Round Robin Mode, the Host Adapter only looks at the next Outgoing Mailbox for each new command, rather than scanning through all the Outgoing Mailboxes to find any that have new commands in them. Strict Round Robin Mode is significantly more efficient. */ if (adapter->strict_rr) { rr_req = BLOGIC_STRICT_RR_MODE; if (blogic_cmd(adapter, BLOGIC_STRICT_RR, &rr_req, sizeof(rr_req), NULL, 0) < 0) return blogic_failure(adapter, "ENABLE STRICT ROUND ROBIN MODE"); } /* For Host Adapters that support Extended LUN Format CCBs, issue the Set CCB Format command to allow 32 Logical Units per Target Device. */ if (adapter->ext_lun) { setccb_fmt = BLOGIC_EXT_LUN_CCB; if (blogic_cmd(adapter, BLOGIC_SETCCB_FMT, &setccb_fmt, sizeof(setccb_fmt), NULL, 0) < 0) return blogic_failure(adapter, "SET CCB FORMAT"); } /* Announce Successful Initialization. */ done: if (!adapter->adapter_initd) { blogic_info("*** %s Initialized Successfully ***\n", adapter, adapter->full_model); blogic_info("\n", adapter); } else blogic_warn("*** %s Initialized Successfully ***\n", adapter, adapter->full_model); adapter->adapter_initd = true; /* Indicate the Host Adapter Initialization completed successfully. */ return true; } /* blogic_inquiry inquires about the Target Devices accessible through Host Adapter. */ static bool __init blogic_inquiry(struct blogic_adapter *adapter) { u16 installed_devs; u8 installed_devs0to7[8]; struct blogic_setup_info setupinfo; u8 sync_period[BLOGIC_MAXDEV]; unsigned char req_replylen; int tgt_id; /* Wait a few seconds between the Host Adapter Hard Reset which initiates a SCSI Bus Reset and issuing any SCSI Commands. Some SCSI devices get confused if they receive SCSI Commands too soon after a SCSI Bus Reset. */ blogic_delay(adapter->bus_settle_time); /* FlashPoint Host Adapters do not provide for Target Device Inquiry. */ if (blogic_flashpoint_type(adapter)) return true; /* Inhibit the Target Device Inquiry if requested. */ if (adapter->drvr_opts != NULL && adapter->drvr_opts->stop_tgt_inquiry) return true; /* Issue the Inquire Target Devices command for host adapters with firmware version 4.25 or later, or the Inquire Installed Devices ID 0 to 7 command for older host adapters. This is necessary to force Synchronous Transfer Negotiation so that the Inquire Setup Information and Inquire Synchronous Period commands will return valid data. The Inquire Target Devices command is preferable to Inquire Installed Devices ID 0 to 7 since it only probes Logical Unit 0 of each Target Device. */ if (strcmp(adapter->fw_ver, "4.25") >= 0) { /* Issue a Inquire Target Devices command. Inquire Target Devices only tests Logical Unit 0 of each Target Device unlike the Inquire Installed Devices commands which test Logical Units 0 - 7. Two bytes are returned, where byte 0 bit 0 set indicates that Target Device 0 exists, and so on. */ if (blogic_cmd(adapter, BLOGIC_INQ_DEV, NULL, 0, &installed_devs, sizeof(installed_devs)) != sizeof(installed_devs)) return blogic_failure(adapter, "INQUIRE TARGET DEVICES"); for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) adapter->tgt_flags[tgt_id].tgt_exists = (installed_devs & (1 << tgt_id) ? true : false); } else { /* Issue an Inquire Installed Devices command. For each Target Device, a byte is returned where bit 0 set indicates that Logical Unit 0 * exists, bit 1 set indicates that Logical Unit 1 exists, and so on. */ if (blogic_cmd(adapter, BLOGIC_INQ_DEV0TO7, NULL, 0, &installed_devs0to7, sizeof(installed_devs0to7)) != sizeof(installed_devs0to7)) return blogic_failure(adapter, "INQUIRE INSTALLED DEVICES ID 0 TO 7"); for (tgt_id = 0; tgt_id < 8; tgt_id++) adapter->tgt_flags[tgt_id].tgt_exists = installed_devs0to7[tgt_id] != 0; } /* Issue the Inquire Setup Information command. */ req_replylen = sizeof(setupinfo); if (blogic_cmd(adapter, BLOGIC_INQ_SETUPINFO, &req_replylen, sizeof(req_replylen), &setupinfo, sizeof(setupinfo)) != sizeof(setupinfo)) return blogic_failure(adapter, "INQUIRE SETUP INFORMATION"); for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) adapter->sync_offset[tgt_id] = (tgt_id < 8 ? setupinfo.sync0to7[tgt_id].offset : setupinfo.sync8to15[tgt_id - 8].offset); if (strcmp(adapter->fw_ver, "5.06L") >= 0) for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) adapter->tgt_flags[tgt_id].wide_active = (tgt_id < 8 ? (setupinfo.wide_tx_active0to7 & (1 << tgt_id) ? true : false) : (setupinfo.wide_tx_active8to15 & (1 << (tgt_id - 8)) ? true : false)); /* Issue the Inquire Synchronous Period command. */ if (adapter->fw_ver[0] >= '3') { /* Issue a Inquire Synchronous Period command. For each Target Device, a byte is returned which represents the Synchronous Transfer Period in units of 10 nanoseconds. */ req_replylen = sizeof(sync_period); if (blogic_cmd(adapter, BLOGIC_INQ_SYNC_PERIOD, &req_replylen, sizeof(req_replylen), &sync_period, sizeof(sync_period)) != sizeof(sync_period)) return blogic_failure(adapter, "INQUIRE SYNCHRONOUS PERIOD"); for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) adapter->sync_period[tgt_id] = sync_period[tgt_id]; } else for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) if (setupinfo.sync0to7[tgt_id].offset > 0) adapter->sync_period[tgt_id] = 20 + 5 * setupinfo.sync0to7[tgt_id].tx_period; /* Indicate the Target Device Inquiry completed successfully. */ return true; } /* blogic_inithoststruct initializes the fields in the SCSI Host structure. The base, io_port, n_io_ports, irq, and dma_channel fields in the SCSI Host structure are intentionally left uninitialized, as this driver handles acquisition and release of these resources explicitly, as well as ensuring exclusive access to the Host Adapter hardware and data structures through explicit acquisition and release of the Host Adapter's Lock. */ static void __init blogic_inithoststruct(struct blogic_adapter *adapter, struct Scsi_Host *host) { host->max_id = adapter->maxdev; host->max_lun = adapter->maxlun; host->max_channel = 0; host->unique_id = adapter->io_addr; host->this_id = adapter->scsi_id; host->can_queue = adapter->drvr_qdepth; host->sg_tablesize = adapter->drvr_sglimit; host->cmd_per_lun = adapter->untag_qdepth; } /* blogic_slaveconfig will actually set the queue depth on individual scsi devices as they are permanently added to the device chain. We shamelessly rip off the SelectQueueDepths code to make this work mostly like it used to. Since we don't get called once at the end of the scan but instead get called for each device, we have to do things a bit differently. */ static int blogic_slaveconfig(struct scsi_device *dev) { struct blogic_adapter *adapter = (struct blogic_adapter *) dev->host->hostdata; int tgt_id = dev->id; int qdepth = adapter->qdepth[tgt_id]; if (adapter->tgt_flags[tgt_id].tagq_ok && (adapter->tagq_ok & (1 << tgt_id))) { if (qdepth == 0) qdepth = BLOGIC_MAX_AUTO_TAG_DEPTH; adapter->qdepth[tgt_id] = qdepth; scsi_change_queue_depth(dev, qdepth); } else { adapter->tagq_ok &= ~(1 << tgt_id); qdepth = adapter->untag_qdepth; adapter->qdepth[tgt_id] = qdepth; scsi_change_queue_depth(dev, qdepth); } qdepth = 0; for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) if (adapter->tgt_flags[tgt_id].tgt_exists) qdepth += adapter->qdepth[tgt_id]; if (qdepth > adapter->alloc_ccbs) blogic_create_addlccbs(adapter, qdepth - adapter->alloc_ccbs, false); return 0; } /* blogic_init probes for BusLogic Host Adapters at the standard I/O Addresses where they may be located, initializing, registering, and reporting the configuration of each BusLogic Host Adapter it finds. It returns the number of BusLogic Host Adapters successfully initialized and registered. */ static int __init blogic_init(void) { int drvr_optindex = 0, probeindex; struct blogic_adapter *adapter; int ret = 0; #ifdef MODULE if (BusLogic) blogic_setup(BusLogic); #endif if (blogic_probe_options.noprobe) return -ENODEV; blogic_probeinfo_list = kcalloc(BLOGIC_MAX_ADAPTERS, sizeof(struct blogic_probeinfo), GFP_KERNEL); if (blogic_probeinfo_list == NULL) { blogic_err("BusLogic: Unable to allocate Probe Info List\n", NULL); return -ENOMEM; } adapter = kzalloc(sizeof(struct blogic_adapter), GFP_KERNEL); if (adapter == NULL) { kfree(blogic_probeinfo_list); blogic_err("BusLogic: Unable to allocate Prototype Host Adapter\n", NULL); return -ENOMEM; } #ifdef MODULE if (BusLogic != NULL) blogic_setup(BusLogic); #endif blogic_init_probeinfo_list(adapter); for (probeindex = 0; probeindex < blogic_probeinfo_count; probeindex++) { struct blogic_probeinfo *probeinfo = &blogic_probeinfo_list[probeindex]; struct blogic_adapter *myadapter = adapter; struct Scsi_Host *host; if (probeinfo->io_addr == 0) continue; memset(myadapter, 0, sizeof(struct blogic_adapter)); myadapter->adapter_type = probeinfo->adapter_type; myadapter->adapter_bus_type = probeinfo->adapter_bus_type; myadapter->io_addr = probeinfo->io_addr; myadapter->pci_addr = probeinfo->pci_addr; myadapter->bus = probeinfo->bus; myadapter->dev = probeinfo->dev; myadapter->pci_device = probeinfo->pci_device; myadapter->irq_ch = probeinfo->irq_ch; myadapter->addr_count = blogic_adapter_addr_count[myadapter->adapter_type]; /* Make sure region is free prior to probing. */ if (!request_region(myadapter->io_addr, myadapter->addr_count, "BusLogic")) continue; /* Probe the Host Adapter. If unsuccessful, abort further initialization. */ if (!blogic_probe(myadapter)) { release_region(myadapter->io_addr, myadapter->addr_count); continue; } /* Hard Reset the Host Adapter. If unsuccessful, abort further initialization. */ if (!blogic_hwreset(myadapter, true)) { release_region(myadapter->io_addr, myadapter->addr_count); continue; } /* Check the Host Adapter. If unsuccessful, abort further initialization. */ if (!blogic_checkadapter(myadapter)) { release_region(myadapter->io_addr, myadapter->addr_count); continue; } /* Initialize the Driver Options field if provided. */ if (drvr_optindex < blogic_drvr_options_count) myadapter->drvr_opts = &blogic_drvr_options[drvr_optindex++]; /* Announce the Driver Version and Date, Author's Name, Copyright Notice, and Electronic Mail Address. */ blogic_announce_drvr(myadapter); /* Register the SCSI Host structure. */ host = scsi_host_alloc(&blogic_template, sizeof(struct blogic_adapter)); if (host == NULL) { release_region(myadapter->io_addr, myadapter->addr_count); continue; } myadapter = (struct blogic_adapter *) host->hostdata; memcpy(myadapter, adapter, sizeof(struct blogic_adapter)); myadapter->scsi_host = host; myadapter->host_no = host->host_no; /* Add Host Adapter to the end of the list of registered BusLogic Host Adapters. */ list_add_tail(&myadapter->host_list, &blogic_host_list); /* Read the Host Adapter Configuration, Configure the Host Adapter, Acquire the System Resources necessary to use the Host Adapter, then Create the Initial CCBs, Initialize the Host Adapter, and finally perform Target Device Inquiry. From this point onward, any failure will be assumed to be due to a problem with the Host Adapter, rather than due to having mistakenly identified this port as belonging to a BusLogic Host Adapter. The I/O Address range will not be released, thereby preventing it from being incorrectly identified as any other type of Host Adapter. */ if (blogic_rdconfig(myadapter) && blogic_reportconfig(myadapter) && blogic_getres(myadapter) && blogic_create_initccbs(myadapter) && blogic_initadapter(myadapter) && blogic_inquiry(myadapter)) { /* Initialization has been completed successfully. Release and re-register usage of the I/O Address range so that the Model Name of the Host Adapter will appear, and initialize the SCSI Host structure. */ release_region(myadapter->io_addr, myadapter->addr_count); if (!request_region(myadapter->io_addr, myadapter->addr_count, myadapter->full_model)) { printk(KERN_WARNING "BusLogic: Release and re-register of " "port 0x%04lx failed \n", (unsigned long)myadapter->io_addr); blogic_destroy_ccbs(myadapter); blogic_relres(myadapter); list_del(&myadapter->host_list); scsi_host_put(host); ret = -ENOMEM; } else { blogic_inithoststruct(myadapter, host); if (scsi_add_host(host, myadapter->pci_device ? &myadapter->pci_device->dev : NULL)) { printk(KERN_WARNING "BusLogic: scsi_add_host()" "failed!\n"); blogic_destroy_ccbs(myadapter); blogic_relres(myadapter); list_del(&myadapter->host_list); scsi_host_put(host); ret = -ENODEV; } else scsi_scan_host(host); } } else { /* An error occurred during Host Adapter Configuration Querying, Host Adapter Configuration, Resource Acquisition, CCB Creation, Host Adapter Initialization, or Target Device Inquiry, so remove Host Adapter from the list of registered BusLogic Host Adapters, destroy the CCBs, Release the System Resources, and Unregister the SCSI Host. */ blogic_destroy_ccbs(myadapter); blogic_relres(myadapter); list_del(&myadapter->host_list); scsi_host_put(host); ret = -ENODEV; } } kfree(adapter); kfree(blogic_probeinfo_list); blogic_probeinfo_list = NULL; return ret; } /* blogic_deladapter releases all resources previously acquired to support a specific Host Adapter, including the I/O Address range, and unregisters the BusLogic Host Adapter. */ static int __exit blogic_deladapter(struct blogic_adapter *adapter) { struct Scsi_Host *host = adapter->scsi_host; scsi_remove_host(host); /* FlashPoint Host Adapters must first be released by the FlashPoint SCCB Manager. */ if (blogic_flashpoint_type(adapter)) FlashPoint_ReleaseHostAdapter(adapter->cardhandle); /* Destroy the CCBs and release any system resources acquired to support Host Adapter. */ blogic_destroy_ccbs(adapter); blogic_relres(adapter); /* Release usage of the I/O Address range. */ release_region(adapter->io_addr, adapter->addr_count); /* Remove Host Adapter from the list of registered BusLogic Host Adapters. */ list_del(&adapter->host_list); scsi_host_put(host); return 0; } /* blogic_qcompleted_ccb queues CCB for completion processing. */ static void blogic_qcompleted_ccb(struct blogic_ccb *ccb) { struct blogic_adapter *adapter = ccb->adapter; ccb->status = BLOGIC_CCB_COMPLETE; ccb->next = NULL; if (adapter->firstccb == NULL) { adapter->firstccb = ccb; adapter->lastccb = ccb; } else { adapter->lastccb->next = ccb; adapter->lastccb = ccb; } adapter->active_cmds[ccb->tgt_id]--; } /* blogic_resultcode computes a SCSI Subsystem Result Code from the Host Adapter Status and Target Device Status. */ static int blogic_resultcode(struct blogic_adapter *adapter, enum blogic_adapter_status adapter_status, enum blogic_tgt_status tgt_status) { int hoststatus; switch (adapter_status) { case BLOGIC_CMD_CMPLT_NORMAL: case BLOGIC_LINK_CMD_CMPLT: case BLOGIC_LINK_CMD_CMPLT_FLAG: hoststatus = DID_OK; break; case BLOGIC_SELECT_TIMEOUT: hoststatus = DID_TIME_OUT; break; case BLOGIC_INVALID_OUTBOX_CODE: case BLOGIC_INVALID_CMD_CODE: case BLOGIC_BAD_CMD_PARAM: blogic_warn("BusLogic Driver Protocol Error 0x%02X\n", adapter, adapter_status); fallthrough; case BLOGIC_DATA_UNDERRUN: case BLOGIC_DATA_OVERRUN: case BLOGIC_NOEXPECT_BUSFREE: case BLOGIC_LINKCCB_BADLUN: case BLOGIC_AUTOREQSENSE_FAIL: case BLOGIC_TAGQUEUE_REJECT: case BLOGIC_BAD_MSG_RCVD: case BLOGIC_HW_FAIL: case BLOGIC_BAD_RECONNECT: case BLOGIC_ABRT_QUEUE: case BLOGIC_ADAPTER_SW_ERROR: case BLOGIC_HW_TIMEOUT: case BLOGIC_PARITY_ERR: hoststatus = DID_ERROR; break; case BLOGIC_INVALID_BUSPHASE: case BLOGIC_NORESPONSE_TO_ATN: case BLOGIC_HW_RESET: case BLOGIC_RST_FROM_OTHERDEV: case BLOGIC_HW_BDR: hoststatus = DID_RESET; break; default: blogic_warn("Unknown Host Adapter Status 0x%02X\n", adapter, adapter_status); hoststatus = DID_ERROR; break; } return (hoststatus << 16) | tgt_status; } /* * turn the dma address from an inbox into a ccb pointer * This is rather inefficient. */ static struct blogic_ccb * blogic_inbox_to_ccb(struct blogic_adapter *adapter, struct blogic_inbox *inbox) { struct blogic_ccb *ccb; for (ccb = adapter->all_ccbs; ccb; ccb = ccb->next_all) if (inbox->ccb == ccb->dma_handle) break; return ccb; } /* blogic_scan_inbox scans the Incoming Mailboxes saving any Incoming Mailbox entries for completion processing. */ static void blogic_scan_inbox(struct blogic_adapter *adapter) { /* Scan through the Incoming Mailboxes in Strict Round Robin fashion, saving any completed CCBs for further processing. It is essential that for each CCB and SCSI Command issued, command completion processing is performed exactly once. Therefore, only Incoming Mailboxes with completion code Command Completed Without Error, Command Completed With Error, or Command Aborted At Host Request are saved for completion processing. When an Incoming Mailbox has a completion code of Aborted Command Not Found, the CCB had already completed or been aborted before the current Abort request was processed, and so completion processing has already occurred and no further action should be taken. */ struct blogic_inbox *next_inbox = adapter->next_inbox; enum blogic_cmplt_code comp_code; while ((comp_code = next_inbox->comp_code) != BLOGIC_INBOX_FREE) { struct blogic_ccb *ccb = blogic_inbox_to_ccb(adapter, next_inbox); if (!ccb) { /* * This should never happen, unless the CCB list is * corrupted in memory. */ blogic_warn("Could not find CCB for dma address %x\n", adapter, next_inbox->ccb); } else if (comp_code != BLOGIC_CMD_NOTFOUND) { if (ccb->status == BLOGIC_CCB_ACTIVE || ccb->status == BLOGIC_CCB_RESET) { /* Save the Completion Code for this CCB and queue the CCB for completion processing. */ ccb->comp_code = comp_code; blogic_qcompleted_ccb(ccb); } else { /* If a CCB ever appears in an Incoming Mailbox and is not marked as status Active or Reset, then there is most likely a bug in the Host Adapter firmware. */ blogic_warn("Illegal CCB #%ld status %d in Incoming Mailbox\n", adapter, ccb->serial, ccb->status); } } next_inbox->comp_code = BLOGIC_INBOX_FREE; if (++next_inbox > adapter->last_inbox) next_inbox = adapter->first_inbox; } adapter->next_inbox = next_inbox; } /* blogic_process_ccbs iterates over the completed CCBs for Host Adapter setting the SCSI Command Result Codes, deallocating the CCBs, and calling the SCSI Subsystem Completion Routines. The Host Adapter's Lock should already have been acquired by the caller. */ static void blogic_process_ccbs(struct blogic_adapter *adapter) { if (adapter->processing_ccbs) return; adapter->processing_ccbs = true; while (adapter->firstccb != NULL) { struct blogic_ccb *ccb = adapter->firstccb; struct scsi_cmnd *command = ccb->command; adapter->firstccb = ccb->next; if (adapter->firstccb == NULL) adapter->lastccb = NULL; /* Process the Completed CCB. */ if (ccb->opcode == BLOGIC_BDR) { int tgt_id = ccb->tgt_id; blogic_warn("Bus Device Reset CCB #%ld to Target %d Completed\n", adapter, ccb->serial, tgt_id); blogic_inc_count(&adapter->tgt_stats[tgt_id].bdr_done); adapter->tgt_flags[tgt_id].tagq_active = false; adapter->cmds_since_rst[tgt_id] = 0; adapter->last_resetdone[tgt_id] = jiffies; /* Place CCB back on the Host Adapter's free list. */ blogic_dealloc_ccb(ccb, 1); #if 0 /* this needs to be redone different for new EH */ /* Bus Device Reset CCBs have the command field non-NULL only when a Bus Device Reset was requested for a command that did not have a currently active CCB in the Host Adapter (i.e., a Synchronous Bus Device Reset), and hence would not have its Completion Routine called otherwise. */ while (command != NULL) { struct scsi_cmnd *nxt_cmd = command->reset_chain; command->reset_chain = NULL; command->result = DID_RESET << 16; scsi_done(command); command = nxt_cmd; } #endif /* Iterate over the CCBs for this Host Adapter performing completion processing for any CCBs marked as Reset for this Target. */ for (ccb = adapter->all_ccbs; ccb != NULL; ccb = ccb->next_all) if (ccb->status == BLOGIC_CCB_RESET && ccb->tgt_id == tgt_id) { command = ccb->command; blogic_dealloc_ccb(ccb, 1); adapter->active_cmds[tgt_id]--; command->result = DID_RESET << 16; scsi_done(command); } adapter->bdr_pend[tgt_id] = NULL; } else { /* Translate the Completion Code, Host Adapter Status, and Target Device Status into a SCSI Subsystem Result Code. */ switch (ccb->comp_code) { case BLOGIC_INBOX_FREE: case BLOGIC_CMD_NOTFOUND: case BLOGIC_INVALID_CCB: blogic_warn("CCB #%ld to Target %d Impossible State\n", adapter, ccb->serial, ccb->tgt_id); break; case BLOGIC_CMD_COMPLETE_GOOD: adapter->tgt_stats[ccb->tgt_id] .cmds_complete++; adapter->tgt_flags[ccb->tgt_id] .cmd_good = true; command->result = DID_OK << 16; break; case BLOGIC_CMD_ABORT_BY_HOST: blogic_warn("CCB #%ld to Target %d Aborted\n", adapter, ccb->serial, ccb->tgt_id); blogic_inc_count(&adapter->tgt_stats[ccb->tgt_id].aborts_done); command->result = DID_ABORT << 16; break; case BLOGIC_CMD_COMPLETE_ERROR: command->result = blogic_resultcode(adapter, ccb->adapter_status, ccb->tgt_status); if (ccb->adapter_status != BLOGIC_SELECT_TIMEOUT) { adapter->tgt_stats[ccb->tgt_id] .cmds_complete++; if (blogic_global_options.trace_err) { int i; blogic_notice("CCB #%ld Target %d: Result %X Host " "Adapter Status %02X Target Status %02X\n", adapter, ccb->serial, ccb->tgt_id, command->result, ccb->adapter_status, ccb->tgt_status); blogic_notice("CDB ", adapter); for (i = 0; i < ccb->cdblen; i++) blogic_notice(" %02X", adapter, ccb->cdb[i]); blogic_notice("\n", adapter); blogic_notice("Sense ", adapter); for (i = 0; i < ccb->sense_datalen; i++) blogic_notice(" %02X", adapter, command->sense_buffer[i]); blogic_notice("\n", adapter); } } break; } /* When an INQUIRY command completes normally, save the CmdQue (Tagged Queuing Supported) and WBus16 (16 Bit Wide Data Transfers Supported) bits. */ if (ccb->cdb[0] == INQUIRY && ccb->cdb[1] == 0 && ccb->adapter_status == BLOGIC_CMD_CMPLT_NORMAL) { struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[ccb->tgt_id]; struct scsi_inquiry *inquiry = (struct scsi_inquiry *) scsi_sglist(command); tgt_flags->tgt_exists = true; tgt_flags->tagq_ok = inquiry->CmdQue; tgt_flags->wide_ok = inquiry->WBus16; } /* Place CCB back on the Host Adapter's free list. */ blogic_dealloc_ccb(ccb, 1); /* Call the SCSI Command Completion Routine. */ scsi_done(command); } } adapter->processing_ccbs = false; } /* blogic_inthandler handles hardware interrupts from BusLogic Host Adapters. */ static irqreturn_t blogic_inthandler(int irq_ch, void *devid) { struct blogic_adapter *adapter = (struct blogic_adapter *) devid; unsigned long processor_flag; /* Acquire exclusive access to Host Adapter. */ spin_lock_irqsave(adapter->scsi_host->host_lock, processor_flag); /* Handle Interrupts appropriately for each Host Adapter type. */ if (blogic_multimaster_type(adapter)) { union blogic_int_reg intreg; /* Read the Host Adapter Interrupt Register. */ intreg.all = blogic_rdint(adapter); if (intreg.ir.int_valid) { /* Acknowledge the interrupt and reset the Host Adapter Interrupt Register. */ blogic_intreset(adapter); /* Process valid External SCSI Bus Reset and Incoming Mailbox Loaded Interrupts. Command Complete Interrupts are noted, and Outgoing Mailbox Available Interrupts are ignored, as they are never enabled. */ if (intreg.ir.ext_busreset) adapter->adapter_extreset = true; else if (intreg.ir.mailin_loaded) blogic_scan_inbox(adapter); else if (intreg.ir.cmd_complete) adapter->adapter_cmd_complete = true; } } else { /* Check if there is a pending interrupt for this Host Adapter. */ if (FlashPoint_InterruptPending(adapter->cardhandle)) switch (FlashPoint_HandleInterrupt(adapter->cardhandle)) { case FPOINT_NORMAL_INT: break; case FPOINT_EXT_RESET: adapter->adapter_extreset = true; break; case FPOINT_INTERN_ERR: blogic_warn("Internal FlashPoint Error detected - Resetting Host Adapter\n", adapter); adapter->adapter_intern_err = true; break; } } /* Process any completed CCBs. */ if (adapter->firstccb != NULL) blogic_process_ccbs(adapter); /* Reset the Host Adapter if requested. */ if (adapter->adapter_extreset) { blogic_warn("Resetting %s due to External SCSI Bus Reset\n", adapter, adapter->full_model); blogic_inc_count(&adapter->ext_resets); blogic_resetadapter(adapter, false); adapter->adapter_extreset = false; } else if (adapter->adapter_intern_err) { blogic_warn("Resetting %s due to Host Adapter Internal Error\n", adapter, adapter->full_model); blogic_inc_count(&adapter->adapter_intern_errors); blogic_resetadapter(adapter, true); adapter->adapter_intern_err = false; } /* Release exclusive access to Host Adapter. */ spin_unlock_irqrestore(adapter->scsi_host->host_lock, processor_flag); return IRQ_HANDLED; } /* blogic_write_outbox places CCB and Action Code into an Outgoing Mailbox for execution by Host Adapter. The Host Adapter's Lock should already have been acquired by the caller. */ static bool blogic_write_outbox(struct blogic_adapter *adapter, enum blogic_action action, struct blogic_ccb *ccb) { struct blogic_outbox *next_outbox; next_outbox = adapter->next_outbox; if (next_outbox->action == BLOGIC_OUTBOX_FREE) { ccb->status = BLOGIC_CCB_ACTIVE; /* The CCB field must be written before the Action Code field since the Host Adapter is operating asynchronously and the locking code does not protect against simultaneous access by the Host Adapter. */ next_outbox->ccb = ccb->dma_handle; next_outbox->action = action; blogic_execmbox(adapter); if (++next_outbox > adapter->last_outbox) next_outbox = adapter->first_outbox; adapter->next_outbox = next_outbox; if (action == BLOGIC_MBOX_START) { adapter->active_cmds[ccb->tgt_id]++; if (ccb->opcode != BLOGIC_BDR) adapter->tgt_stats[ccb->tgt_id].cmds_tried++; } return true; } return false; } /* Error Handling (EH) support */ static int blogic_hostreset(struct scsi_cmnd *SCpnt) { struct blogic_adapter *adapter = (struct blogic_adapter *) SCpnt->device->host->hostdata; unsigned int id = SCpnt->device->id; struct blogic_tgt_stats *stats = &adapter->tgt_stats[id]; int rc; spin_lock_irq(SCpnt->device->host->host_lock); blogic_inc_count(&stats->adapter_reset_req); rc = blogic_resetadapter(adapter, false); spin_unlock_irq(SCpnt->device->host->host_lock); return rc; } /* blogic_qcmd creates a CCB for Command and places it into an Outgoing Mailbox for execution by the associated Host Adapter. */ static int blogic_qcmd_lck(struct scsi_cmnd *command) { void (*comp_cb)(struct scsi_cmnd *) = scsi_done; struct blogic_adapter *adapter = (struct blogic_adapter *) command->device->host->hostdata; struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[command->device->id]; struct blogic_tgt_stats *tgt_stats = adapter->tgt_stats; unsigned char *cdb = command->cmnd; int cdblen = command->cmd_len; int tgt_id = command->device->id; int lun = command->device->lun; int buflen = scsi_bufflen(command); int count; struct blogic_ccb *ccb; dma_addr_t sense_buf; /* SCSI REQUEST_SENSE commands will be executed automatically by the Host Adapter for any errors, so they should not be executed explicitly unless the Sense Data is zero indicating that no error occurred. */ if (cdb[0] == REQUEST_SENSE && command->sense_buffer[0] != 0) { command->result = DID_OK << 16; comp_cb(command); return 0; } /* Allocate a CCB from the Host Adapter's free list. In the unlikely event that there are none available and memory allocation fails, wait 1 second and try again. If that fails, the Host Adapter is probably hung so signal an error as a Host Adapter Hard Reset should be initiated soon. */ ccb = blogic_alloc_ccb(adapter); if (ccb == NULL) { spin_unlock_irq(adapter->scsi_host->host_lock); blogic_delay(1); spin_lock_irq(adapter->scsi_host->host_lock); ccb = blogic_alloc_ccb(adapter); if (ccb == NULL) { command->result = DID_ERROR << 16; comp_cb(command); return 0; } } /* Initialize the fields in the BusLogic Command Control Block (CCB). */ count = scsi_dma_map(command); BUG_ON(count < 0); if (count) { struct scatterlist *sg; int i; ccb->opcode = BLOGIC_INITIATOR_CCB_SG; ccb->datalen = count * sizeof(struct blogic_sg_seg); if (blogic_multimaster_type(adapter)) ccb->data = (unsigned int) ccb->dma_handle + ((unsigned long) &ccb->sglist - (unsigned long) ccb); else ccb->data = virt_to_32bit_virt(ccb->sglist); scsi_for_each_sg(command, sg, count, i) { ccb->sglist[i].segbytes = sg_dma_len(sg); ccb->sglist[i].segdata = sg_dma_address(sg); } } else if (!count) { ccb->opcode = BLOGIC_INITIATOR_CCB; ccb->datalen = buflen; ccb->data = 0; } switch (cdb[0]) { case READ_6: case READ_10: ccb->datadir = BLOGIC_DATAIN_CHECKED; tgt_stats[tgt_id].read_cmds++; blogic_addcount(&tgt_stats[tgt_id].bytesread, buflen); blogic_incszbucket(tgt_stats[tgt_id].read_sz_buckets, buflen); break; case WRITE_6: case WRITE_10: ccb->datadir = BLOGIC_DATAOUT_CHECKED; tgt_stats[tgt_id].write_cmds++; blogic_addcount(&tgt_stats[tgt_id].byteswritten, buflen); blogic_incszbucket(tgt_stats[tgt_id].write_sz_buckets, buflen); break; default: ccb->datadir = BLOGIC_UNCHECKED_TX; break; } ccb->cdblen = cdblen; ccb->adapter_status = 0; ccb->tgt_status = 0; ccb->tgt_id = tgt_id; ccb->lun = lun; ccb->tag_enable = false; ccb->legacytag_enable = false; /* BusLogic recommends that after a Reset the first couple of commands that are sent to a Target Device be sent in a non Tagged Queue fashion so that the Host Adapter and Target Device can establish Synchronous and Wide Transfer before Queue Tag messages can interfere with the Synchronous and Wide Negotiation messages. By waiting to enable Tagged Queuing until after the first BLOGIC_MAX_TAG_DEPTH commands have been queued, it is assured that after a Reset any pending commands are requeued before Tagged Queuing is enabled and that the Tagged Queuing message will not occur while the partition table is being printed. In addition, some devices do not properly handle the transition from non-tagged to tagged commands, so it is necessary to wait until there are no pending commands for a target device before queuing tagged commands. */ if (adapter->cmds_since_rst[tgt_id]++ >= BLOGIC_MAX_TAG_DEPTH && !tgt_flags->tagq_active && adapter->active_cmds[tgt_id] == 0 && tgt_flags->tagq_ok && (adapter->tagq_ok & (1 << tgt_id))) { tgt_flags->tagq_active = true; blogic_notice("Tagged Queuing now active for Target %d\n", adapter, tgt_id); } if (tgt_flags->tagq_active) { enum blogic_queuetag queuetag = BLOGIC_SIMPLETAG; /* When using Tagged Queuing with Simple Queue Tags, it appears that disk drive controllers do not guarantee that a queued command will not remain in a disconnected state indefinitely if commands that read or write nearer the head position continue to arrive without interruption. Therefore, for each Target Device this driver keeps track of the last time either the queue was empty or an Ordered Queue Tag was issued. If more than 4 seconds (one fifth of the 20 second disk timeout) have elapsed since this last sequence point, this command will be issued with an Ordered Queue Tag rather than a Simple Queue Tag, which forces the Target Device to complete all previously queued commands before this command may be executed. */ if (adapter->active_cmds[tgt_id] == 0) adapter->last_seqpoint[tgt_id] = jiffies; else if (time_after(jiffies, adapter->last_seqpoint[tgt_id] + 4 * HZ)) { adapter->last_seqpoint[tgt_id] = jiffies; queuetag = BLOGIC_ORDEREDTAG; } if (adapter->ext_lun) { ccb->tag_enable = true; ccb->queuetag = queuetag; } else { ccb->legacytag_enable = true; ccb->legacy_tag = queuetag; } } memcpy(ccb->cdb, cdb, cdblen); ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE; ccb->command = command; sense_buf = dma_map_single(&adapter->pci_device->dev, command->sense_buffer, ccb->sense_datalen, DMA_FROM_DEVICE); if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) { blogic_err("DMA mapping for sense data buffer failed\n", adapter); blogic_dealloc_ccb(ccb, 0); return SCSI_MLQUEUE_HOST_BUSY; } ccb->sensedata = sense_buf; if (blogic_multimaster_type(adapter)) { /* Place the CCB in an Outgoing Mailbox. The higher levels of the SCSI Subsystem should not attempt to queue more commands than can be placed in Outgoing Mailboxes, so there should always be one free. In the unlikely event that there are none available, wait 1 second and try again. If that fails, the Host Adapter is probably hung so signal an error as a Host Adapter Hard Reset should be initiated soon. */ if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START, ccb)) { spin_unlock_irq(adapter->scsi_host->host_lock); blogic_warn("Unable to write Outgoing Mailbox - Pausing for 1 second\n", adapter); blogic_delay(1); spin_lock_irq(adapter->scsi_host->host_lock); if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START, ccb)) { blogic_warn("Still unable to write Outgoing Mailbox - Host Adapter Dead?\n", adapter); blogic_dealloc_ccb(ccb, 1); command->result = DID_ERROR << 16; scsi_done(command); } } } else { /* Call the FlashPoint SCCB Manager to start execution of the CCB. */ ccb->status = BLOGIC_CCB_ACTIVE; adapter->active_cmds[tgt_id]++; tgt_stats[tgt_id].cmds_tried++; FlashPoint_StartCCB(adapter->cardhandle, ccb); /* The Command may have already completed and blogic_qcompleted_ccb been called, or it may still be pending. */ if (ccb->status == BLOGIC_CCB_COMPLETE) blogic_process_ccbs(adapter); } return 0; } static DEF_SCSI_QCMD(blogic_qcmd) #if 0 /* blogic_abort aborts Command if possible. */ static int blogic_abort(struct scsi_cmnd *command) { struct blogic_adapter *adapter = (struct blogic_adapter *) command->device->host->hostdata; int tgt_id = command->device->id; struct blogic_ccb *ccb; blogic_inc_count(&adapter->tgt_stats[tgt_id].aborts_request); /* Attempt to find an Active CCB for this Command. If no Active CCB for this Command is found, then no Abort is necessary. */ for (ccb = adapter->all_ccbs; ccb != NULL; ccb = ccb->next_all) if (ccb->command == command) break; if (ccb == NULL) { blogic_warn("Unable to Abort Command to Target %d - No CCB Found\n", adapter, tgt_id); return SUCCESS; } else if (ccb->status == BLOGIC_CCB_COMPLETE) { blogic_warn("Unable to Abort Command to Target %d - CCB Completed\n", adapter, tgt_id); return SUCCESS; } else if (ccb->status == BLOGIC_CCB_RESET) { blogic_warn("Unable to Abort Command to Target %d - CCB Reset\n", adapter, tgt_id); return SUCCESS; } if (blogic_multimaster_type(adapter)) { /* Attempt to Abort this CCB. MultiMaster Firmware versions prior to 5.xx do not generate Abort Tag messages, but only generate the non-tagged Abort message. Since non-tagged commands are not sent by the Host Adapter until the queue of outstanding tagged commands has completed, and the Abort message is treated as a non-tagged command, it is effectively impossible to abort commands when Tagged Queuing is active. Firmware version 5.xx does generate Abort Tag messages, so it is possible to abort commands when Tagged Queuing is active. */ if (adapter->tgt_flags[tgt_id].tagq_active && adapter->fw_ver[0] < '5') { blogic_warn("Unable to Abort CCB #%ld to Target %d - Abort Tag Not Supported\n", adapter, ccb->serial, tgt_id); return FAILURE; } else if (blogic_write_outbox(adapter, BLOGIC_MBOX_ABORT, ccb)) { blogic_warn("Aborting CCB #%ld to Target %d\n", adapter, ccb->serial, tgt_id); blogic_inc_count(&adapter->tgt_stats[tgt_id].aborts_tried); return SUCCESS; } else { blogic_warn("Unable to Abort CCB #%ld to Target %d - No Outgoing Mailboxes\n", adapter, ccb->serial, tgt_id); return FAILURE; } } else { /* Call the FlashPoint SCCB Manager to abort execution of the CCB. */ blogic_warn("Aborting CCB #%ld to Target %d\n", adapter, ccb->serial, tgt_id); blogic_inc_count(&adapter->tgt_stats[tgt_id].aborts_tried); FlashPoint_AbortCCB(adapter->cardhandle, ccb); /* The Abort may have already been completed and blogic_qcompleted_ccb been called, or it may still be pending. */ if (ccb->status == BLOGIC_CCB_COMPLETE) blogic_process_ccbs(adapter); return SUCCESS; } return SUCCESS; } #endif /* blogic_resetadapter resets Host Adapter if possible, marking all currently executing SCSI Commands as having been Reset. */ static int blogic_resetadapter(struct blogic_adapter *adapter, bool hard_reset) { struct blogic_ccb *ccb; int tgt_id; /* * Attempt to Reset and Reinitialize the Host Adapter. */ if (!(blogic_hwreset(adapter, hard_reset) && blogic_initadapter(adapter))) { blogic_err("Resetting %s Failed\n", adapter, adapter->full_model); return FAILURE; } /* * Deallocate all currently executing CCBs. */ for (ccb = adapter->all_ccbs; ccb != NULL; ccb = ccb->next_all) if (ccb->status == BLOGIC_CCB_ACTIVE) blogic_dealloc_ccb(ccb, 1); /* * Wait a few seconds between the Host Adapter Hard Reset which * initiates a SCSI Bus Reset and issuing any SCSI Commands. Some * SCSI devices get confused if they receive SCSI Commands too soon * after a SCSI Bus Reset. */ if (hard_reset) { spin_unlock_irq(adapter->scsi_host->host_lock); blogic_delay(adapter->bus_settle_time); spin_lock_irq(adapter->scsi_host->host_lock); } for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) { adapter->last_resettried[tgt_id] = jiffies; adapter->last_resetdone[tgt_id] = jiffies; } return SUCCESS; } /* blogic_diskparam returns the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk. The default disk geometry is 64 heads, 32 sectors, and the appropriate number of cylinders so as not to exceed drive capacity. In order for disks equal to or larger than 1 GB to be addressable by the BIOS without exceeding the BIOS limitation of 1024 cylinders, Extended Translation may be enabled in AutoSCSI on FlashPoint Host Adapters and on "W" and "C" series MultiMaster Host Adapters, or by a dip switch setting on "S" and "A" series MultiMaster Host Adapters. With Extended Translation enabled, drives between 1 GB inclusive and 2 GB exclusive are given a disk geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive are given a disk geometry of 255 heads and 63 sectors. However, if the BIOS detects that the Extended Translation setting does not match the geometry in the partition table, then the translation inferred from the partition table will be used by the BIOS, and a warning may be displayed. */ static int blogic_diskparam(struct scsi_device *sdev, struct block_device *dev, sector_t capacity, int *params) { struct blogic_adapter *adapter = (struct blogic_adapter *) sdev->host->hostdata; struct bios_diskparam *diskparam = (struct bios_diskparam *) params; unsigned char *buf; if (adapter->ext_trans_enable && capacity >= 2 * 1024 * 1024 /* 1 GB in 512 byte sectors */) { if (capacity >= 4 * 1024 * 1024 /* 2 GB in 512 byte sectors */) { diskparam->heads = 255; diskparam->sectors = 63; } else { diskparam->heads = 128; diskparam->sectors = 32; } } else { diskparam->heads = 64; diskparam->sectors = 32; } diskparam->cylinders = (unsigned long) capacity / (diskparam->heads * diskparam->sectors); buf = scsi_bios_ptable(dev); if (buf == NULL) return 0; /* If the boot sector partition table flag is valid, search for a partition table entry whose end_head matches one of the standard BusLogic geometry translations (64/32, 128/32, or 255/63). */ if (*(unsigned short *) (buf + 64) == MSDOS_LABEL_MAGIC) { struct msdos_partition *part1_entry = (struct msdos_partition *)buf; struct msdos_partition *part_entry = part1_entry; int saved_cyl = diskparam->cylinders, part_no; unsigned char part_end_head = 0, part_end_sector = 0; for (part_no = 0; part_no < 4; part_no++) { part_end_head = part_entry->end_head; part_end_sector = part_entry->end_sector & 0x3F; if (part_end_head == 64 - 1) { diskparam->heads = 64; diskparam->sectors = 32; break; } else if (part_end_head == 128 - 1) { diskparam->heads = 128; diskparam->sectors = 32; break; } else if (part_end_head == 255 - 1) { diskparam->heads = 255; diskparam->sectors = 63; break; } part_entry++; } if (part_no == 4) { part_end_head = part1_entry->end_head; part_end_sector = part1_entry->end_sector & 0x3F; } diskparam->cylinders = (unsigned long) capacity / (diskparam->heads * diskparam->sectors); if (part_no < 4 && part_end_sector == diskparam->sectors) { if (diskparam->cylinders != saved_cyl) blogic_warn("Adopting Geometry %d/%d from Partition Table\n", adapter, diskparam->heads, diskparam->sectors); } else if (part_end_head > 0 || part_end_sector > 0) { blogic_warn("Warning: Partition Table appears to have Geometry %d/%d which is\n", adapter, part_end_head + 1, part_end_sector); blogic_warn("not compatible with current BusLogic Host Adapter Geometry %d/%d\n", adapter, diskparam->heads, diskparam->sectors); } } kfree(buf); return 0; } /* BugLogic_ProcDirectoryInfo implements /proc/scsi/BusLogic/<N>. */ static int blogic_write_info(struct Scsi_Host *shost, char *procbuf, int bytes_avail) { struct blogic_adapter *adapter = (struct blogic_adapter *) shost->hostdata; struct blogic_tgt_stats *tgt_stats; tgt_stats = adapter->tgt_stats; adapter->ext_resets = 0; adapter->adapter_intern_errors = 0; memset(tgt_stats, 0, BLOGIC_MAXDEV * sizeof(struct blogic_tgt_stats)); return 0; } static int blogic_show_info(struct seq_file *m, struct Scsi_Host *shost) { struct blogic_adapter *adapter = (struct blogic_adapter *) shost->hostdata; struct blogic_tgt_stats *tgt_stats; int tgt; tgt_stats = adapter->tgt_stats; seq_write(m, adapter->msgbuf, adapter->msgbuflen); seq_printf(m, "\n\ Current Driver Queue Depth: %d\n\ Currently Allocated CCBs: %d\n", adapter->drvr_qdepth, adapter->alloc_ccbs); seq_puts(m, "\n\n\ DATA TRANSFER STATISTICS\n\ \n\ Target Tagged Queuing Queue Depth Active Attempted Completed\n\ ====== ============== =========== ====== ========= =========\n"); for (tgt = 0; tgt < adapter->maxdev; tgt++) { struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt]; if (!tgt_flags->tgt_exists) continue; seq_printf(m, " %2d %s", tgt, (tgt_flags->tagq_ok ? (tgt_flags->tagq_active ? " Active" : (adapter->tagq_ok & (1 << tgt) ? " Permitted" : " Disabled")) : "Not Supported")); seq_printf(m, " %3d %3u %9u %9u\n", adapter->qdepth[tgt], adapter->active_cmds[tgt], tgt_stats[tgt].cmds_tried, tgt_stats[tgt].cmds_complete); } seq_puts(m, "\n\ Target Read Commands Write Commands Total Bytes Read Total Bytes Written\n\ ====== ============= ============== =================== ===================\n"); for (tgt = 0; tgt < adapter->maxdev; tgt++) { struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt]; if (!tgt_flags->tgt_exists) continue; seq_printf(m, " %2d %9u %9u", tgt, tgt_stats[tgt].read_cmds, tgt_stats[tgt].write_cmds); if (tgt_stats[tgt].bytesread.billions > 0) seq_printf(m, " %9u%09u", tgt_stats[tgt].bytesread.billions, tgt_stats[tgt].bytesread.units); else seq_printf(m, " %9u", tgt_stats[tgt].bytesread.units); if (tgt_stats[tgt].byteswritten.billions > 0) seq_printf(m, " %9u%09u\n", tgt_stats[tgt].byteswritten.billions, tgt_stats[tgt].byteswritten.units); else seq_printf(m, " %9u\n", tgt_stats[tgt].byteswritten.units); } seq_puts(m, "\n\ Target Command 0-1KB 1-2KB 2-4KB 4-8KB 8-16KB\n\ ====== ======= ========= ========= ========= ========= =========\n"); for (tgt = 0; tgt < adapter->maxdev; tgt++) { struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt]; if (!tgt_flags->tgt_exists) continue; seq_printf(m, " %2d Read %9u %9u %9u %9u %9u\n", tgt, tgt_stats[tgt].read_sz_buckets[0], tgt_stats[tgt].read_sz_buckets[1], tgt_stats[tgt].read_sz_buckets[2], tgt_stats[tgt].read_sz_buckets[3], tgt_stats[tgt].read_sz_buckets[4]); seq_printf(m, " %2d Write %9u %9u %9u %9u %9u\n", tgt, tgt_stats[tgt].write_sz_buckets[0], tgt_stats[tgt].write_sz_buckets[1], tgt_stats[tgt].write_sz_buckets[2], tgt_stats[tgt].write_sz_buckets[3], tgt_stats[tgt].write_sz_buckets[4]); } seq_puts(m, "\n\ Target Command 16-32KB 32-64KB 64-128KB 128-256KB 256KB+\n\ ====== ======= ========= ========= ========= ========= =========\n"); for (tgt = 0; tgt < adapter->maxdev; tgt++) { struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt]; if (!tgt_flags->tgt_exists) continue; seq_printf(m, " %2d Read %9u %9u %9u %9u %9u\n", tgt, tgt_stats[tgt].read_sz_buckets[5], tgt_stats[tgt].read_sz_buckets[6], tgt_stats[tgt].read_sz_buckets[7], tgt_stats[tgt].read_sz_buckets[8], tgt_stats[tgt].read_sz_buckets[9]); seq_printf(m, " %2d Write %9u %9u %9u %9u %9u\n", tgt, tgt_stats[tgt].write_sz_buckets[5], tgt_stats[tgt].write_sz_buckets[6], tgt_stats[tgt].write_sz_buckets[7], tgt_stats[tgt].write_sz_buckets[8], tgt_stats[tgt].write_sz_buckets[9]); } seq_puts(m, "\n\n\ ERROR RECOVERY STATISTICS\n\ \n\ Command Aborts Bus Device Resets Host Adapter Resets\n\ Target Requested Completed Requested Completed Requested Completed\n\ ID \\\\\\\\ Attempted //// \\\\\\\\ Attempted //// \\\\\\\\ Attempted ////\n\ ====== ===== ===== ===== ===== ===== ===== ===== ===== =====\n"); for (tgt = 0; tgt < adapter->maxdev; tgt++) { struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt]; if (!tgt_flags->tgt_exists) continue; seq_printf(m, " %2d %5d %5d %5d %5d %5d %5d %5d %5d %5d\n", tgt, tgt_stats[tgt].aborts_request, tgt_stats[tgt].aborts_tried, tgt_stats[tgt].aborts_done, tgt_stats[tgt].bdr_request, tgt_stats[tgt].bdr_tried, tgt_stats[tgt].bdr_done, tgt_stats[tgt].adapter_reset_req, tgt_stats[tgt].adapter_reset_attempt, tgt_stats[tgt].adapter_reset_done); } seq_printf(m, "\nExternal Host Adapter Resets: %d\n", adapter->ext_resets); seq_printf(m, "Host Adapter Internal Errors: %d\n", adapter->adapter_intern_errors); return 0; } /* blogic_msg prints Driver Messages. */ __printf(2, 4) static void blogic_msg(enum blogic_msglevel msglevel, char *fmt, struct blogic_adapter *adapter, ...) { static char buf[BLOGIC_LINEBUF_SIZE]; static bool begin = true; va_list args; int len = 0; va_start(args, adapter); len = vscnprintf(buf, sizeof(buf), fmt, args); va_end(args); if (msglevel == BLOGIC_ANNOUNCE_LEVEL) { static int msglines = 0; strcpy(&adapter->msgbuf[adapter->msgbuflen], buf); adapter->msgbuflen += len; if (++msglines <= 2) printk("%sscsi: %s", blogic_msglevelmap[msglevel], buf); } else if (msglevel == BLOGIC_INFO_LEVEL) { strcpy(&adapter->msgbuf[adapter->msgbuflen], buf); adapter->msgbuflen += len; if (begin) { if (buf[0] != '\n' || len > 1) printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf); } else pr_cont("%s", buf); } else { if (begin) { if (adapter != NULL && adapter->adapter_initd) printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf); else printk("%s%s", blogic_msglevelmap[msglevel], buf); } else pr_cont("%s", buf); } begin = (buf[len - 1] == '\n'); } /* blogic_parse parses an individual option keyword. It returns true and updates the pointer if the keyword is recognized and false otherwise. */ static bool __init blogic_parse(char **str, char *keyword) { char *pointer = *str; while (*keyword != '\0') { char strch = *pointer++; char keywordch = *keyword++; if (strch >= 'A' && strch <= 'Z') strch += 'a' - 'Z'; if (keywordch >= 'A' && keywordch <= 'Z') keywordch += 'a' - 'Z'; if (strch != keywordch) return false; } *str = pointer; return true; } /* blogic_parseopts handles processing of BusLogic Driver Options specifications. BusLogic Driver Options may be specified either via the Linux Kernel Command Line or via the Loadable Kernel Module Installation Facility. Driver Options for multiple host adapters may be specified either by separating the option strings by a semicolon, or by specifying multiple "BusLogic=" strings on the command line. Individual option specifications for a single host adapter are separated by commas. The Probing and Debugging Options apply to all host adapters whereas the remaining options apply individually only to the selected host adapter. The BusLogic Driver Probing Options are described in <file:Documentation/scsi/BusLogic.rst>. */ static int __init blogic_parseopts(char *options) { while (true) { struct blogic_drvr_options *drvr_opts = &blogic_drvr_options[blogic_drvr_options_count++]; int tgt_id; memset(drvr_opts, 0, sizeof(struct blogic_drvr_options)); while (*options != '\0' && *options != ';') { if (blogic_parse(&options, "NoProbePCI")) blogic_probe_options.noprobe_pci = true; else if (blogic_parse(&options, "NoProbe")) blogic_probe_options.noprobe = true; else if (blogic_parse(&options, "NoSortPCI")) blogic_probe_options.nosort_pci = true; else if (blogic_parse(&options, "MultiMasterFirst")) blogic_probe_options.multimaster_first = true; else if (blogic_parse(&options, "FlashPointFirst")) blogic_probe_options.flashpoint_first = true; /* Tagged Queuing Options. */ else if (blogic_parse(&options, "QueueDepth:[") || blogic_parse(&options, "QD:[")) { for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++) { unsigned short qdepth = simple_strtoul(options, &options, 0); if (qdepth > BLOGIC_MAX_TAG_DEPTH) { blogic_err("BusLogic: Invalid Driver Options (invalid Queue Depth %d)\n", NULL, qdepth); return 0; } drvr_opts->qdepth[tgt_id] = qdepth; if (*options == ',') options++; else if (*options == ']') break; else { blogic_err("BusLogic: Invalid Driver Options (',' or ']' expected at '%s')\n", NULL, options); return 0; } } if (*options != ']') { blogic_err("BusLogic: Invalid Driver Options (']' expected at '%s')\n", NULL, options); return 0; } else options++; } else if (blogic_parse(&options, "QueueDepth:") || blogic_parse(&options, "QD:")) { unsigned short qdepth = simple_strtoul(options, &options, 0); if (qdepth == 0 || qdepth > BLOGIC_MAX_TAG_DEPTH) { blogic_err("BusLogic: Invalid Driver Options (invalid Queue Depth %d)\n", NULL, qdepth); return 0; } drvr_opts->common_qdepth = qdepth; for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++) drvr_opts->qdepth[tgt_id] = qdepth; } else if (blogic_parse(&options, "TaggedQueuing:") || blogic_parse(&options, "TQ:")) { if (blogic_parse(&options, "Default")) { drvr_opts->tagq_ok = 0x0000; drvr_opts->tagq_ok_mask = 0x0000; } else if (blogic_parse(&options, "Enable")) { drvr_opts->tagq_ok = 0xFFFF; drvr_opts->tagq_ok_mask = 0xFFFF; } else if (blogic_parse(&options, "Disable")) { drvr_opts->tagq_ok = 0x0000; drvr_opts->tagq_ok_mask = 0xFFFF; } else { unsigned short tgt_bit; for (tgt_id = 0, tgt_bit = 1; tgt_id < BLOGIC_MAXDEV; tgt_id++, tgt_bit <<= 1) switch (*options++) { case 'Y': drvr_opts->tagq_ok |= tgt_bit; drvr_opts->tagq_ok_mask |= tgt_bit; break; case 'N': drvr_opts->tagq_ok &= ~tgt_bit; drvr_opts->tagq_ok_mask |= tgt_bit; break; case 'X': break; default: options--; tgt_id = BLOGIC_MAXDEV; break; } } } /* Miscellaneous Options. */ else if (blogic_parse(&options, "BusSettleTime:") || blogic_parse(&options, "BST:")) { unsigned short bus_settle_time = simple_strtoul(options, &options, 0); if (bus_settle_time > 5 * 60) { blogic_err("BusLogic: Invalid Driver Options (invalid Bus Settle Time %d)\n", NULL, bus_settle_time); return 0; } drvr_opts->bus_settle_time = bus_settle_time; } else if (blogic_parse(&options, "InhibitTargetInquiry")) drvr_opts->stop_tgt_inquiry = true; /* Debugging Options. */ else if (blogic_parse(&options, "TraceProbe")) blogic_global_options.trace_probe = true; else if (blogic_parse(&options, "TraceHardwareReset")) blogic_global_options.trace_hw_reset = true; else if (blogic_parse(&options, "TraceConfiguration")) blogic_global_options.trace_config = true; else if (blogic_parse(&options, "TraceErrors")) blogic_global_options.trace_err = true; else if (blogic_parse(&options, "Debug")) { blogic_global_options.trace_probe = true; blogic_global_options.trace_hw_reset = true; blogic_global_options.trace_config = true; blogic_global_options.trace_err = true; } if (*options == ',') options++; else if (*options != ';' && *options != '\0') { blogic_err("BusLogic: Unexpected Driver Option '%s' ignored\n", NULL, options); *options = '\0'; } } if (!(blogic_drvr_options_count == 0 || blogic_probeinfo_count == 0 || blogic_drvr_options_count == blogic_probeinfo_count)) { blogic_err("BusLogic: Invalid Driver Options (all or no I/O Addresses must be specified)\n", NULL); return 0; } /* Tagged Queuing is disabled when the Queue Depth is 1 since queuing multiple commands is not possible. */ for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++) if (drvr_opts->qdepth[tgt_id] == 1) { unsigned short tgt_bit = 1 << tgt_id; drvr_opts->tagq_ok &= ~tgt_bit; drvr_opts->tagq_ok_mask |= tgt_bit; } if (*options == ';') options++; if (*options == '\0') return 0; } return 1; } /* Get it all started */ static const struct scsi_host_template blogic_template = { .module = THIS_MODULE, .proc_name = "BusLogic", .write_info = blogic_write_info, .show_info = blogic_show_info, .name = "BusLogic", .info = blogic_drvr_info, .queuecommand = blogic_qcmd, .slave_configure = blogic_slaveconfig, .bios_param = blogic_diskparam, .eh_host_reset_handler = blogic_hostreset, #if 0 .eh_abort_handler = blogic_abort, #endif .max_sectors = 128, }; /* blogic_setup handles processing of Kernel Command Line Arguments. */ static int __init blogic_setup(char *str) { int ints[3]; (void) get_options(str, ARRAY_SIZE(ints), ints); if (ints[0] != 0) { blogic_err("BusLogic: Obsolete Command Line Entry Format Ignored\n", NULL); return 0; } if (str == NULL || *str == '\0') return 0; return blogic_parseopts(str); } /* * Exit function. Deletes all hosts associated with this driver. */ static void __exit blogic_exit(void) { struct blogic_adapter *ha, *next; list_for_each_entry_safe(ha, next, &blogic_host_list, host_list) blogic_deladapter(ha); } __setup("BusLogic=", blogic_setup); #ifdef MODULE /*static struct pci_device_id blogic_pci_tbl[] = { { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { } };*/ static const struct pci_device_id blogic_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER)}, {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC)}, {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT)}, {0, }, }; #endif MODULE_DEVICE_TABLE(pci, blogic_pci_tbl); module_init(blogic_init); module_exit(blogic_exit);
linux-master
drivers/scsi/BusLogic.c
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996 David S. Miller ([email protected]) * Copyright (C) 1999 Andrew R. Baker ([email protected]) * Copyright (C) 2001 Florian Lohoff ([email protected]) * Copyright (C) 2003, 07 Ralf Baechle ([email protected]) * * (In all truth, Jed Schimmel wrote all this code.) */ #undef DEBUG #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/gfp.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <asm/sgi/hpc3.h> #include <asm/sgi/ip22.h> #include <asm/sgi/wd.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_tcq.h> #include "wd33c93.h" struct ip22_hostdata { struct WD33C93_hostdata wh; dma_addr_t dma; void *cpu; struct device *dev; }; #define host_to_hostdata(host) ((struct ip22_hostdata *)((host)->hostdata)) struct hpc_chunk { struct hpc_dma_desc desc; u32 _padding; /* align to quadword boundary */ }; /* space for hpc dma descriptors */ #define HPC_DMA_SIZE PAGE_SIZE #define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) static irqreturn_t sgiwd93_intr(int irq, void *dev_id) { struct Scsi_Host * host = dev_id; unsigned long flags; spin_lock_irqsave(host->host_lock, flags); wd33c93_intr(host); spin_unlock_irqrestore(host->host_lock, flags); return IRQ_HANDLED; } static inline void fill_hpc_entries(struct ip22_hostdata *hd, struct scsi_cmnd *cmd, int din) { struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd); unsigned long len = scsi_pointer->this_residual; void *addr = scsi_pointer->ptr; dma_addr_t physaddr; unsigned long count; struct hpc_chunk *hcp; physaddr = dma_map_single(hd->dev, addr, len, DMA_DIR(din)); scsi_pointer->dma_handle = physaddr; hcp = hd->cpu; while (len) { /* * even cntinfo could be up to 16383, without * magic only 8192 works correctly */ count = len > 8192 ? 8192 : len; hcp->desc.pbuf = physaddr; hcp->desc.cntinfo = count; hcp++; len -= count; physaddr += count; } /* * To make sure, if we trip an HPC bug, that we transfer every single * byte, we tag on an extra zero length dma descriptor at the end of * the chain. */ hcp->desc.pbuf = 0; hcp->desc.cntinfo = HPCDMA_EOX; dma_sync_single_for_device(hd->dev, hd->dma, (unsigned long)(hcp + 1) - (unsigned long)hd->cpu, DMA_TO_DEVICE); } static int dma_setup(struct scsi_cmnd *cmd, int datainp) { struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd); struct ip22_hostdata *hdata = host_to_hostdata(cmd->device->host); struct hpc3_scsiregs *hregs = (struct hpc3_scsiregs *) cmd->device->host->base; pr_debug("dma_setup: datainp<%d> hcp<%p> ", datainp, hdata->cpu); hdata->wh.dma_dir = datainp; /* * wd33c93 shouldn't pass us bogus dma_setups, but it does:-( The * other wd33c93 drivers deal with it the same way (which isn't that * obvious). IMHO a better fix would be, not to do these dma setups * in the first place. */ if (scsi_pointer->ptr == NULL || scsi_pointer->this_residual == 0) return 1; fill_hpc_entries(hdata, cmd, datainp); pr_debug(" HPCGO\n"); /* Start up the HPC. */ hregs->ndptr = hdata->dma; if (datainp) hregs->ctrl = HPC3_SCTRL_ACTIVE; else hregs->ctrl = HPC3_SCTRL_ACTIVE | HPC3_SCTRL_DIR; return 0; } static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, int status) { struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt); struct ip22_hostdata *hdata = host_to_hostdata(instance); struct hpc3_scsiregs *hregs; if (!SCpnt) return; if (scsi_pointer->ptr == NULL || scsi_pointer->this_residual == 0) return; hregs = (struct hpc3_scsiregs *) SCpnt->device->host->base; pr_debug("dma_stop: status<%d> ", status); /* First stop the HPC and flush it's FIFO. */ if (hdata->wh.dma_dir) { hregs->ctrl |= HPC3_SCTRL_FLUSH; while (hregs->ctrl & HPC3_SCTRL_ACTIVE) barrier(); } hregs->ctrl = 0; dma_unmap_single(hdata->dev, scsi_pointer->dma_handle, scsi_pointer->this_residual, DMA_DIR(hdata->wh.dma_dir)); pr_debug("\n"); } void sgiwd93_reset(unsigned long base) { struct hpc3_scsiregs *hregs = (struct hpc3_scsiregs *) base; hregs->ctrl = HPC3_SCTRL_CRESET; udelay(50); hregs->ctrl = 0; } EXPORT_SYMBOL_GPL(sgiwd93_reset); static inline void init_hpc_chain(struct ip22_hostdata *hdata) { struct hpc_chunk *hcp = (struct hpc_chunk *)hdata->cpu; dma_addr_t dma = hdata->dma; unsigned long start, end; start = (unsigned long) hcp; end = start + HPC_DMA_SIZE; while (start < end) { hcp->desc.pnext = (u32) (dma + sizeof(struct hpc_chunk)); hcp->desc.cntinfo = HPCDMA_EOX; hcp++; dma += sizeof(struct hpc_chunk); start += sizeof(struct hpc_chunk); } hcp--; hcp->desc.pnext = hdata->dma; } /* * Kludge alert - the SCSI code calls the abort and reset method with int * arguments not with pointers. So this is going to blow up beautyfully * on 64-bit systems with memory outside the compat address spaces. */ static const struct scsi_host_template sgiwd93_template = { .module = THIS_MODULE, .proc_name = "SGIWD93", .name = "SGI WD93", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, .eh_host_reset_handler = wd33c93_host_reset, .can_queue = 16, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = 8, .dma_boundary = PAGE_SIZE - 1, .cmd_size = sizeof(struct scsi_pointer), }; static int sgiwd93_probe(struct platform_device *pdev) { struct sgiwd93_platform_data *pd = pdev->dev.platform_data; unsigned char *wdregs = pd->wdregs; struct hpc3_scsiregs *hregs = pd->hregs; struct ip22_hostdata *hdata; struct Scsi_Host *host; wd33c93_regs regs; unsigned int unit = pd->unit; unsigned int irq = pd->irq; int err; host = scsi_host_alloc(&sgiwd93_template, sizeof(struct ip22_hostdata)); if (!host) { err = -ENOMEM; goto out; } host->base = (unsigned long) hregs; host->irq = irq; hdata = host_to_hostdata(host); hdata->dev = &pdev->dev; hdata->cpu = dma_alloc_noncoherent(&pdev->dev, HPC_DMA_SIZE, &hdata->dma, DMA_TO_DEVICE, GFP_KERNEL); if (!hdata->cpu) { printk(KERN_WARNING "sgiwd93: Could not allocate memory for " "host %d buffer.\n", unit); err = -ENOMEM; goto out_put; } init_hpc_chain(hdata); regs.SASR = wdregs + 3; regs.SCMD = wdregs + 7; hdata->wh.no_sync = 0; hdata->wh.fast = 1; hdata->wh.dma_mode = CTRL_BURST; wd33c93_init(host, regs, dma_setup, dma_stop, WD33C93_FS_MHZ(20)); err = request_irq(irq, sgiwd93_intr, 0, "SGI WD93", host); if (err) { printk(KERN_WARNING "sgiwd93: Could not register irq %d " "for host %d.\n", irq, unit); goto out_free; } platform_set_drvdata(pdev, host); err = scsi_add_host(host, NULL); if (err) goto out_irq; scsi_scan_host(host); return 0; out_irq: free_irq(irq, host); out_free: dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma, DMA_TO_DEVICE); out_put: scsi_host_put(host); out: return err; } static int sgiwd93_remove(struct platform_device *pdev) { struct Scsi_Host *host = platform_get_drvdata(pdev); struct ip22_hostdata *hdata = (struct ip22_hostdata *) host->hostdata; struct sgiwd93_platform_data *pd = pdev->dev.platform_data; scsi_remove_host(host); free_irq(pd->irq, host); dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma, DMA_TO_DEVICE); scsi_host_put(host); return 0; } static struct platform_driver sgiwd93_driver = { .probe = sgiwd93_probe, .remove = sgiwd93_remove, .driver = { .name = "sgiwd93", } }; static int __init sgiwd93_module_init(void) { return platform_driver_register(&sgiwd93_driver); } static void __exit sgiwd93_module_exit(void) { return platform_driver_unregister(&sgiwd93_driver); } module_init(sgiwd93_module_init); module_exit(sgiwd93_module_exit); MODULE_DESCRIPTION("SGI WD33C93 driver"); MODULE_AUTHOR("Ralf Baechle <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:sgiwd93");
linux-master
drivers/scsi/sgiwd93.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * SuperTrak EX Series Storage Controller driver for Linux * * Copyright (C) 2005-2015 Promise Technology Inc. * * Written By: * Ed Lin <[email protected]> */ #include <linux/init.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/pci.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/ktime.h> #include <linux/reboot.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/byteorder.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_eh.h> #define DRV_NAME "stex" #define ST_DRIVER_VERSION "6.02.0000.01" #define ST_VER_MAJOR 6 #define ST_VER_MINOR 02 #define ST_OEM 0000 #define ST_BUILD_VER 01 enum { /* MU register offset */ IMR0 = 0x10, /* MU_INBOUND_MESSAGE_REG0 */ IMR1 = 0x14, /* MU_INBOUND_MESSAGE_REG1 */ OMR0 = 0x18, /* MU_OUTBOUND_MESSAGE_REG0 */ OMR1 = 0x1c, /* MU_OUTBOUND_MESSAGE_REG1 */ IDBL = 0x20, /* MU_INBOUND_DOORBELL */ IIS = 0x24, /* MU_INBOUND_INTERRUPT_STATUS */ IIM = 0x28, /* MU_INBOUND_INTERRUPT_MASK */ ODBL = 0x2c, /* MU_OUTBOUND_DOORBELL */ OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */ OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */ YIOA_STATUS = 0x00, YH2I_INT = 0x20, YINT_EN = 0x34, YI2H_INT = 0x9c, YI2H_INT_C = 0xa0, YH2I_REQ = 0xc0, YH2I_REQ_HI = 0xc4, PSCRATCH0 = 0xb0, PSCRATCH1 = 0xb4, PSCRATCH2 = 0xb8, PSCRATCH3 = 0xbc, PSCRATCH4 = 0xc8, MAILBOX_BASE = 0x1000, MAILBOX_HNDSHK_STS = 0x0, /* MU register value */ MU_INBOUND_DOORBELL_HANDSHAKE = (1 << 0), MU_INBOUND_DOORBELL_REQHEADCHANGED = (1 << 1), MU_INBOUND_DOORBELL_STATUSTAILCHANGED = (1 << 2), MU_INBOUND_DOORBELL_HMUSTOPPED = (1 << 3), MU_INBOUND_DOORBELL_RESET = (1 << 4), MU_OUTBOUND_DOORBELL_HANDSHAKE = (1 << 0), MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = (1 << 1), MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = (1 << 2), MU_OUTBOUND_DOORBELL_BUSCHANGE = (1 << 3), MU_OUTBOUND_DOORBELL_HASEVENT = (1 << 4), MU_OUTBOUND_DOORBELL_REQUEST_RESET = (1 << 27), /* MU status code */ MU_STATE_STARTING = 1, MU_STATE_STARTED = 2, MU_STATE_RESETTING = 3, MU_STATE_FAILED = 4, MU_STATE_STOP = 5, MU_STATE_NOCONNECT = 6, MU_MAX_DELAY = 50, MU_HANDSHAKE_SIGNATURE = 0x55aaaa55, MU_HANDSHAKE_SIGNATURE_HALF = 0x5a5a0000, MU_HARD_RESET_WAIT = 30000, HMU_PARTNER_TYPE = 2, /* firmware returned values */ SRB_STATUS_SUCCESS = 0x01, SRB_STATUS_ERROR = 0x04, SRB_STATUS_BUSY = 0x05, SRB_STATUS_INVALID_REQUEST = 0x06, SRB_STATUS_SELECTION_TIMEOUT = 0x0A, SRB_SEE_SENSE = 0x80, /* task attribute */ TASK_ATTRIBUTE_SIMPLE = 0x0, TASK_ATTRIBUTE_HEADOFQUEUE = 0x1, TASK_ATTRIBUTE_ORDERED = 0x2, TASK_ATTRIBUTE_ACA = 0x4, }; enum { SS_STS_NORMAL = 0x80000000, SS_STS_DONE = 0x40000000, SS_STS_HANDSHAKE = 0x20000000, SS_HEAD_HANDSHAKE = 0x80, SS_H2I_INT_RESET = 0x100, SS_I2H_REQUEST_RESET = 0x2000, SS_MU_OPERATIONAL = 0x80000000, }; enum { STEX_CDB_LENGTH = 16, STATUS_VAR_LEN = 128, /* sg flags */ SG_CF_EOT = 0x80, /* end of table */ SG_CF_64B = 0x40, /* 64 bit item */ SG_CF_HOST = 0x20, /* sg in host memory */ MSG_DATA_DIR_ND = 0, MSG_DATA_DIR_IN = 1, MSG_DATA_DIR_OUT = 2, st_shasta = 0, st_vsc = 1, st_yosemite = 2, st_seq = 3, st_yel = 4, st_P3 = 5, PASSTHRU_REQ_TYPE = 0x00000001, PASSTHRU_REQ_NO_WAKEUP = 0x00000100, ST_INTERNAL_TIMEOUT = 180, ST_TO_CMD = 0, ST_FROM_CMD = 1, /* vendor specific commands of Promise */ MGT_CMD = 0xd8, SINBAND_MGT_CMD = 0xd9, ARRAY_CMD = 0xe0, CONTROLLER_CMD = 0xe1, DEBUGGING_CMD = 0xe2, PASSTHRU_CMD = 0xe3, PASSTHRU_GET_ADAPTER = 0x05, PASSTHRU_GET_DRVVER = 0x10, CTLR_CONFIG_CMD = 0x03, CTLR_SHUTDOWN = 0x0d, CTLR_POWER_STATE_CHANGE = 0x0e, CTLR_POWER_SAVING = 0x01, PASSTHRU_SIGNATURE = 0x4e415041, MGT_CMD_SIGNATURE = 0xba, INQUIRY_EVPD = 0x01, ST_ADDITIONAL_MEM = 0x200000, ST_ADDITIONAL_MEM_MIN = 0x80000, PMIC_SHUTDOWN = 0x0D, PMIC_REUMSE = 0x10, ST_IGNORED = -1, ST_NOTHANDLED = 7, ST_S3 = 3, ST_S4 = 4, ST_S5 = 5, ST_S6 = 6, }; struct st_sgitem { u8 ctrl; /* SG_CF_xxx */ u8 reserved[3]; __le32 count; __le64 addr; }; struct st_ss_sgitem { __le32 addr; __le32 addr_hi; __le32 count; }; struct st_sgtable { __le16 sg_count; __le16 max_sg_count; __le32 sz_in_byte; }; struct st_msg_header { __le64 handle; u8 flag; u8 channel; __le16 timeout; u32 reserved; }; struct handshake_frame { __le64 rb_phy; /* request payload queue physical address */ __le16 req_sz; /* size of each request payload */ __le16 req_cnt; /* count of reqs the buffer can hold */ __le16 status_sz; /* size of each status payload */ __le16 status_cnt; /* count of status the buffer can hold */ __le64 hosttime; /* seconds from Jan 1, 1970 (GMT) */ u8 partner_type; /* who sends this frame */ u8 reserved0[7]; __le32 partner_ver_major; __le32 partner_ver_minor; __le32 partner_ver_oem; __le32 partner_ver_build; __le32 extra_offset; /* NEW */ __le32 extra_size; /* NEW */ __le32 scratch_size; u32 reserved1; }; struct req_msg { __le16 tag; u8 lun; u8 target; u8 task_attr; u8 task_manage; u8 data_dir; u8 payload_sz; /* payload size in 4-byte, not used */ u8 cdb[STEX_CDB_LENGTH]; u32 variable[]; }; struct status_msg { __le16 tag; u8 lun; u8 target; u8 srb_status; u8 scsi_status; u8 reserved; u8 payload_sz; /* payload size in 4-byte */ u8 variable[STATUS_VAR_LEN]; }; struct ver_info { u32 major; u32 minor; u32 oem; u32 build; u32 reserved[2]; }; struct st_frame { u32 base[6]; u32 rom_addr; struct ver_info drv_ver; struct ver_info bios_ver; u32 bus; u32 slot; u32 irq_level; u32 irq_vec; u32 id; u32 subid; u32 dimm_size; u8 dimm_type; u8 reserved[3]; u32 channel; u32 reserved1; }; struct st_drvver { u32 major; u32 minor; u32 oem; u32 build; u32 signature[2]; u8 console_id; u8 host_no; u8 reserved0[2]; u32 reserved[3]; }; struct st_ccb { struct req_msg *req; struct scsi_cmnd *cmd; void *sense_buffer; unsigned int sense_bufflen; int sg_count; u32 req_type; u8 srb_status; u8 scsi_status; u8 reserved[2]; }; struct st_hba { void __iomem *mmio_base; /* iomapped PCI memory space */ void *dma_mem; dma_addr_t dma_handle; size_t dma_size; struct Scsi_Host *host; struct pci_dev *pdev; struct req_msg * (*alloc_rq) (struct st_hba *); int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *); void (*send) (struct st_hba *, struct req_msg *, u16); u32 req_head; u32 req_tail; u32 status_head; u32 status_tail; struct status_msg *status_buffer; void *copy_buffer; /* temp buffer for driver-handled commands */ struct st_ccb *ccb; struct st_ccb *wait_ccb; __le32 *scratch; char work_q_name[20]; struct workqueue_struct *work_q; struct work_struct reset_work; wait_queue_head_t reset_waitq; unsigned int mu_status; unsigned int cardtype; int msi_enabled; int out_req_cnt; u32 extra_offset; u16 rq_count; u16 rq_size; u16 sts_count; u8 supports_pm; int msi_lock; }; struct st_card_info { struct req_msg * (*alloc_rq) (struct st_hba *); int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *); void (*send) (struct st_hba *, struct req_msg *, u16); unsigned int max_id; unsigned int max_lun; unsigned int max_channel; u16 rq_count; u16 rq_size; u16 sts_count; }; static int S6flag; static int stex_halt(struct notifier_block *nb, ulong event, void *buf); static struct notifier_block stex_notifier = { stex_halt, NULL, 0 }; static int msi; module_param(msi, int, 0); MODULE_PARM_DESC(msi, "Enable Message Signaled Interrupts(0=off, 1=on)"); static const char console_inq_page[] = { 0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30, 0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20, /* "Promise " */ 0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E, /* "RAID Con" */ 0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20, /* "sole " */ 0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20, /* "1.00 " */ 0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D, /* "SX/RSAF-" */ 0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20, /* "TE1.00 " */ 0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20 }; MODULE_AUTHOR("Ed Lin"); MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers"); MODULE_LICENSE("GPL"); MODULE_VERSION(ST_DRIVER_VERSION); static struct status_msg *stex_get_status(struct st_hba *hba) { struct status_msg *status = hba->status_buffer + hba->status_tail; ++hba->status_tail; hba->status_tail %= hba->sts_count+1; return status; } static void stex_invalid_field(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { /* "Invalid field in cdb" */ scsi_build_sense(cmd, 0, ILLEGAL_REQUEST, 0x24, 0x0); done(cmd); } static struct req_msg *stex_alloc_req(struct st_hba *hba) { struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size; ++hba->req_head; hba->req_head %= hba->rq_count+1; return req; } static struct req_msg *stex_ss_alloc_req(struct st_hba *hba) { return (struct req_msg *)(hba->dma_mem + hba->req_head * hba->rq_size + sizeof(struct st_msg_header)); } static int stex_map_sg(struct st_hba *hba, struct req_msg *req, struct st_ccb *ccb) { struct scsi_cmnd *cmd; struct scatterlist *sg; struct st_sgtable *dst; struct st_sgitem *table; int i, nseg; cmd = ccb->cmd; nseg = scsi_dma_map(cmd); BUG_ON(nseg < 0); if (nseg) { dst = (struct st_sgtable *)req->variable; ccb->sg_count = nseg; dst->sg_count = cpu_to_le16((u16)nseg); dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize); dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd)); table = (struct st_sgitem *)(dst + 1); scsi_for_each_sg(cmd, sg, nseg, i) { table[i].count = cpu_to_le32((u32)sg_dma_len(sg)); table[i].addr = cpu_to_le64(sg_dma_address(sg)); table[i].ctrl = SG_CF_64B | SG_CF_HOST; } table[--i].ctrl |= SG_CF_EOT; } return nseg; } static int stex_ss_map_sg(struct st_hba *hba, struct req_msg *req, struct st_ccb *ccb) { struct scsi_cmnd *cmd; struct scatterlist *sg; struct st_sgtable *dst; struct st_ss_sgitem *table; int i, nseg; cmd = ccb->cmd; nseg = scsi_dma_map(cmd); BUG_ON(nseg < 0); if (nseg) { dst = (struct st_sgtable *)req->variable; ccb->sg_count = nseg; dst->sg_count = cpu_to_le16((u16)nseg); dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize); dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd)); table = (struct st_ss_sgitem *)(dst + 1); scsi_for_each_sg(cmd, sg, nseg, i) { table[i].count = cpu_to_le32((u32)sg_dma_len(sg)); table[i].addr = cpu_to_le32(sg_dma_address(sg) & 0xffffffff); table[i].addr_hi = cpu_to_le32((sg_dma_address(sg) >> 16) >> 16); } } return nseg; } static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) { struct st_frame *p; size_t count = sizeof(struct st_frame); p = hba->copy_buffer; scsi_sg_copy_to_buffer(ccb->cmd, p, count); memset(p->base, 0, sizeof(u32)*6); *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); p->rom_addr = 0; p->drv_ver.major = ST_VER_MAJOR; p->drv_ver.minor = ST_VER_MINOR; p->drv_ver.oem = ST_OEM; p->drv_ver.build = ST_BUILD_VER; p->bus = hba->pdev->bus->number; p->slot = hba->pdev->devfn; p->irq_level = 0; p->irq_vec = hba->pdev->irq; p->id = hba->pdev->vendor << 16 | hba->pdev->device; p->subid = hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device; scsi_sg_copy_from_buffer(ccb->cmd, p, count); } static void stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) { req->tag = cpu_to_le16(tag); hba->ccb[tag].req = req; hba->out_req_cnt++; writel(hba->req_head, hba->mmio_base + IMR0); writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL); readl(hba->mmio_base + IDBL); /* flush */ } static void stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) { struct scsi_cmnd *cmd; struct st_msg_header *msg_h; dma_addr_t addr; req->tag = cpu_to_le16(tag); hba->ccb[tag].req = req; hba->out_req_cnt++; cmd = hba->ccb[tag].cmd; msg_h = (struct st_msg_header *)req - 1; if (likely(cmd)) { msg_h->channel = (u8)cmd->device->channel; msg_h->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ); } addr = hba->dma_handle + hba->req_head * hba->rq_size; addr += (hba->ccb[tag].sg_count+4)/11; msg_h->handle = cpu_to_le64(addr); ++hba->req_head; hba->req_head %= hba->rq_count+1; if (hba->cardtype == st_P3) { writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI); writel(addr, hba->mmio_base + YH2I_REQ); } else { writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI); readl(hba->mmio_base + YH2I_REQ_HI); /* flush */ writel(addr, hba->mmio_base + YH2I_REQ); readl(hba->mmio_base + YH2I_REQ); /* flush */ } } static void return_abnormal_state(struct st_hba *hba, int status) { struct st_ccb *ccb; unsigned long flags; u16 tag; spin_lock_irqsave(hba->host->host_lock, flags); for (tag = 0; tag < hba->host->can_queue; tag++) { ccb = &hba->ccb[tag]; if (ccb->req == NULL) continue; ccb->req = NULL; if (ccb->cmd) { scsi_dma_unmap(ccb->cmd); ccb->cmd->result = status << 16; scsi_done(ccb->cmd); ccb->cmd = NULL; } } spin_unlock_irqrestore(hba->host->host_lock, flags); } static int stex_slave_config(struct scsi_device *sdev) { sdev->use_10_for_rw = 1; sdev->use_10_for_ms = 1; blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); return 0; } static int stex_queuecommand_lck(struct scsi_cmnd *cmd) { void (*done)(struct scsi_cmnd *) = scsi_done; struct st_hba *hba; struct Scsi_Host *host; unsigned int id, lun; struct req_msg *req; u16 tag; host = cmd->device->host; id = cmd->device->id; lun = cmd->device->lun; hba = (struct st_hba *) &host->hostdata[0]; if (hba->mu_status == MU_STATE_NOCONNECT) { cmd->result = DID_NO_CONNECT; done(cmd); return 0; } if (unlikely(hba->mu_status != MU_STATE_STARTED)) return SCSI_MLQUEUE_HOST_BUSY; switch (cmd->cmnd[0]) { case MODE_SENSE_10: { static char ms10_caching_page[12] = { 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 }; unsigned char page; page = cmd->cmnd[2] & 0x3f; if (page == 0x8 || page == 0x3f) { scsi_sg_copy_from_buffer(cmd, ms10_caching_page, sizeof(ms10_caching_page)); cmd->result = DID_OK << 16; done(cmd); } else stex_invalid_field(cmd, done); return 0; } case REPORT_LUNS: /* * The shasta firmware does not report actual luns in the * target, so fail the command to force sequential lun scan. * Also, the console device does not support this command. */ if (hba->cardtype == st_shasta || id == host->max_id - 1) { stex_invalid_field(cmd, done); return 0; } break; case TEST_UNIT_READY: if (id == host->max_id - 1) { cmd->result = DID_OK << 16; done(cmd); return 0; } break; case INQUIRY: if (lun >= host->max_lun) { cmd->result = DID_NO_CONNECT << 16; done(cmd); return 0; } if (id != host->max_id - 1) break; if (!lun && !cmd->device->channel && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) { scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page, sizeof(console_inq_page)); cmd->result = DID_OK << 16; done(cmd); } else stex_invalid_field(cmd, done); return 0; case PASSTHRU_CMD: if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) { const struct st_drvver ver = { .major = ST_VER_MAJOR, .minor = ST_VER_MINOR, .oem = ST_OEM, .build = ST_BUILD_VER, .signature[0] = PASSTHRU_SIGNATURE, .console_id = host->max_id - 1, .host_no = hba->host->host_no, }; size_t cp_len = sizeof(ver); cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len); if (sizeof(ver) == cp_len) cmd->result = DID_OK << 16; else cmd->result = DID_ERROR << 16; done(cmd); return 0; } break; default: break; } tag = scsi_cmd_to_rq(cmd)->tag; if (unlikely(tag >= host->can_queue)) return SCSI_MLQUEUE_HOST_BUSY; req = hba->alloc_rq(hba); req->lun = lun; req->target = id; /* cdb */ memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH); if (cmd->sc_data_direction == DMA_FROM_DEVICE) req->data_dir = MSG_DATA_DIR_IN; else if (cmd->sc_data_direction == DMA_TO_DEVICE) req->data_dir = MSG_DATA_DIR_OUT; else req->data_dir = MSG_DATA_DIR_ND; hba->ccb[tag].cmd = cmd; hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE; hba->ccb[tag].sense_buffer = cmd->sense_buffer; if (!hba->map_sg(hba, req, &hba->ccb[tag])) { hba->ccb[tag].sg_count = 0; memset(&req->variable[0], 0, 8); } hba->send(hba, req, tag); return 0; } static DEF_SCSI_QCMD(stex_queuecommand) static void stex_scsi_done(struct st_ccb *ccb) { struct scsi_cmnd *cmd = ccb->cmd; int result; if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) { result = ccb->scsi_status; switch (ccb->scsi_status) { case SAM_STAT_GOOD: result |= DID_OK << 16; break; case SAM_STAT_CHECK_CONDITION: result |= DID_OK << 16; break; case SAM_STAT_BUSY: result |= DID_BUS_BUSY << 16; break; default: result |= DID_ERROR << 16; break; } } else if (ccb->srb_status & SRB_SEE_SENSE) result = SAM_STAT_CHECK_CONDITION; else switch (ccb->srb_status) { case SRB_STATUS_SELECTION_TIMEOUT: result = DID_NO_CONNECT << 16; break; case SRB_STATUS_BUSY: result = DID_BUS_BUSY << 16; break; case SRB_STATUS_INVALID_REQUEST: case SRB_STATUS_ERROR: default: result = DID_ERROR << 16; break; } cmd->result = result; scsi_done(cmd); } static void stex_copy_data(struct st_ccb *ccb, struct status_msg *resp, unsigned int variable) { if (resp->scsi_status != SAM_STAT_GOOD) { if (ccb->sense_buffer != NULL) memcpy(ccb->sense_buffer, resp->variable, min(variable, ccb->sense_bufflen)); return; } if (ccb->cmd == NULL) return; scsi_sg_copy_from_buffer(ccb->cmd, resp->variable, variable); } static void stex_check_cmd(struct st_hba *hba, struct st_ccb *ccb, struct status_msg *resp) { if (ccb->cmd->cmnd[0] == MGT_CMD && resp->scsi_status != SAM_STAT_CHECK_CONDITION) scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) - le32_to_cpu(*(__le32 *)&resp->variable[0])); } static void stex_mu_intr(struct st_hba *hba, u32 doorbell) { void __iomem *base = hba->mmio_base; struct status_msg *resp; struct st_ccb *ccb; unsigned int size; u16 tag; if (unlikely(!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED))) return; /* status payloads */ hba->status_head = readl(base + OMR1); if (unlikely(hba->status_head > hba->sts_count)) { printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n", pci_name(hba->pdev)); return; } /* * it's not a valid status payload if: * 1. there are no pending requests(e.g. during init stage) * 2. there are some pending requests, but the controller is in * reset status, and its type is not st_yosemite * firmware of st_yosemite in reset status will return pending requests * to driver, so we allow it to pass */ if (unlikely(hba->out_req_cnt <= 0 || (hba->mu_status == MU_STATE_RESETTING && hba->cardtype != st_yosemite))) { hba->status_tail = hba->status_head; goto update_status; } while (hba->status_tail != hba->status_head) { resp = stex_get_status(hba); tag = le16_to_cpu(resp->tag); if (unlikely(tag >= hba->host->can_queue)) { printk(KERN_WARNING DRV_NAME "(%s): invalid tag\n", pci_name(hba->pdev)); continue; } hba->out_req_cnt--; ccb = &hba->ccb[tag]; if (unlikely(hba->wait_ccb == ccb)) hba->wait_ccb = NULL; if (unlikely(ccb->req == NULL)) { printk(KERN_WARNING DRV_NAME "(%s): lagging req\n", pci_name(hba->pdev)); continue; } size = resp->payload_sz * sizeof(u32); /* payload size */ if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN || size > sizeof(*resp))) { printk(KERN_WARNING DRV_NAME "(%s): bad status size\n", pci_name(hba->pdev)); } else { size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */ if (size) stex_copy_data(ccb, resp, size); } ccb->req = NULL; ccb->srb_status = resp->srb_status; ccb->scsi_status = resp->scsi_status; if (likely(ccb->cmd != NULL)) { if (hba->cardtype == st_yosemite) stex_check_cmd(hba, ccb, resp); if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD && ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER)) stex_controller_info(hba, ccb); scsi_dma_unmap(ccb->cmd); stex_scsi_done(ccb); } else ccb->req_type = 0; } update_status: writel(hba->status_head, base + IMR1); readl(base + IMR1); /* flush */ } static irqreturn_t stex_intr(int irq, void *__hba) { struct st_hba *hba = __hba; void __iomem *base = hba->mmio_base; u32 data; unsigned long flags; spin_lock_irqsave(hba->host->host_lock, flags); data = readl(base + ODBL); if (data && data != 0xffffffff) { /* clear the interrupt */ writel(data, base + ODBL); readl(base + ODBL); /* flush */ stex_mu_intr(hba, data); spin_unlock_irqrestore(hba->host->host_lock, flags); if (unlikely(data & MU_OUTBOUND_DOORBELL_REQUEST_RESET && hba->cardtype == st_shasta)) queue_work(hba->work_q, &hba->reset_work); return IRQ_HANDLED; } spin_unlock_irqrestore(hba->host->host_lock, flags); return IRQ_NONE; } static void stex_ss_mu_intr(struct st_hba *hba) { struct status_msg *resp; struct st_ccb *ccb; __le32 *scratch; unsigned int size; int count = 0; u32 value; u16 tag; if (unlikely(hba->out_req_cnt <= 0 || hba->mu_status == MU_STATE_RESETTING)) return; while (count < hba->sts_count) { scratch = hba->scratch + hba->status_tail; value = le32_to_cpu(*scratch); if (unlikely(!(value & SS_STS_NORMAL))) return; resp = hba->status_buffer + hba->status_tail; *scratch = 0; ++count; ++hba->status_tail; hba->status_tail %= hba->sts_count+1; tag = (u16)value; if (unlikely(tag >= hba->host->can_queue)) { printk(KERN_WARNING DRV_NAME "(%s): invalid tag\n", pci_name(hba->pdev)); continue; } hba->out_req_cnt--; ccb = &hba->ccb[tag]; if (unlikely(hba->wait_ccb == ccb)) hba->wait_ccb = NULL; if (unlikely(ccb->req == NULL)) { printk(KERN_WARNING DRV_NAME "(%s): lagging req\n", pci_name(hba->pdev)); continue; } ccb->req = NULL; if (likely(value & SS_STS_DONE)) { /* normal case */ ccb->srb_status = SRB_STATUS_SUCCESS; ccb->scsi_status = SAM_STAT_GOOD; } else { ccb->srb_status = resp->srb_status; ccb->scsi_status = resp->scsi_status; size = resp->payload_sz * sizeof(u32); if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN || size > sizeof(*resp))) { printk(KERN_WARNING DRV_NAME "(%s): bad status size\n", pci_name(hba->pdev)); } else { size -= sizeof(*resp) - STATUS_VAR_LEN; if (size) stex_copy_data(ccb, resp, size); } if (likely(ccb->cmd != NULL)) stex_check_cmd(hba, ccb, resp); } if (likely(ccb->cmd != NULL)) { scsi_dma_unmap(ccb->cmd); stex_scsi_done(ccb); } else ccb->req_type = 0; } } static irqreturn_t stex_ss_intr(int irq, void *__hba) { struct st_hba *hba = __hba; void __iomem *base = hba->mmio_base; u32 data; unsigned long flags; spin_lock_irqsave(hba->host->host_lock, flags); if (hba->cardtype == st_yel) { data = readl(base + YI2H_INT); if (data && data != 0xffffffff) { /* clear the interrupt */ writel(data, base + YI2H_INT_C); stex_ss_mu_intr(hba); spin_unlock_irqrestore(hba->host->host_lock, flags); if (unlikely(data & SS_I2H_REQUEST_RESET)) queue_work(hba->work_q, &hba->reset_work); return IRQ_HANDLED; } } else { data = readl(base + PSCRATCH4); if (data != 0xffffffff) { if (data != 0) { /* clear the interrupt */ writel(data, base + PSCRATCH1); writel((1 << 22), base + YH2I_INT); } stex_ss_mu_intr(hba); spin_unlock_irqrestore(hba->host->host_lock, flags); if (unlikely(data & SS_I2H_REQUEST_RESET)) queue_work(hba->work_q, &hba->reset_work); return IRQ_HANDLED; } } spin_unlock_irqrestore(hba->host->host_lock, flags); return IRQ_NONE; } static int stex_common_handshake(struct st_hba *hba) { void __iomem *base = hba->mmio_base; struct handshake_frame *h; dma_addr_t status_phys; u32 data; unsigned long before; if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) { writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL); readl(base + IDBL); before = jiffies; while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) { if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { printk(KERN_ERR DRV_NAME "(%s): no handshake signature\n", pci_name(hba->pdev)); return -1; } rmb(); msleep(1); } } udelay(10); data = readl(base + OMR1); if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) { data &= 0x0000ffff; if (hba->host->can_queue > data) { hba->host->can_queue = data; hba->host->cmd_per_lun = data; } } h = (struct handshake_frame *)hba->status_buffer; h->rb_phy = cpu_to_le64(hba->dma_handle); h->req_sz = cpu_to_le16(hba->rq_size); h->req_cnt = cpu_to_le16(hba->rq_count+1); h->status_sz = cpu_to_le16(sizeof(struct status_msg)); h->status_cnt = cpu_to_le16(hba->sts_count+1); h->hosttime = cpu_to_le64(ktime_get_real_seconds()); h->partner_type = HMU_PARTNER_TYPE; if (hba->extra_offset) { h->extra_offset = cpu_to_le32(hba->extra_offset); h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset); } else h->extra_offset = h->extra_size = 0; status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size; writel(status_phys, base + IMR0); readl(base + IMR0); writel((status_phys >> 16) >> 16, base + IMR1); readl(base + IMR1); writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */ readl(base + OMR0); writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL); readl(base + IDBL); /* flush */ udelay(10); before = jiffies; while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) { if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { printk(KERN_ERR DRV_NAME "(%s): no signature after handshake frame\n", pci_name(hba->pdev)); return -1; } rmb(); msleep(1); } writel(0, base + IMR0); readl(base + IMR0); writel(0, base + OMR0); readl(base + OMR0); writel(0, base + IMR1); readl(base + IMR1); writel(0, base + OMR1); readl(base + OMR1); /* flush */ return 0; } static int stex_ss_handshake(struct st_hba *hba) { void __iomem *base = hba->mmio_base; struct st_msg_header *msg_h; struct handshake_frame *h; __le32 *scratch; u32 data, scratch_size, mailboxdata, operationaldata; unsigned long before; int ret = 0; before = jiffies; if (hba->cardtype == st_yel) { operationaldata = readl(base + YIOA_STATUS); while (operationaldata != SS_MU_OPERATIONAL) { if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { printk(KERN_ERR DRV_NAME "(%s): firmware not operational\n", pci_name(hba->pdev)); return -1; } msleep(1); operationaldata = readl(base + YIOA_STATUS); } } else { operationaldata = readl(base + PSCRATCH3); while (operationaldata != SS_MU_OPERATIONAL) { if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { printk(KERN_ERR DRV_NAME "(%s): firmware not operational\n", pci_name(hba->pdev)); return -1; } msleep(1); operationaldata = readl(base + PSCRATCH3); } } msg_h = (struct st_msg_header *)hba->dma_mem; msg_h->handle = cpu_to_le64(hba->dma_handle); msg_h->flag = SS_HEAD_HANDSHAKE; h = (struct handshake_frame *)(msg_h + 1); h->rb_phy = cpu_to_le64(hba->dma_handle); h->req_sz = cpu_to_le16(hba->rq_size); h->req_cnt = cpu_to_le16(hba->rq_count+1); h->status_sz = cpu_to_le16(sizeof(struct status_msg)); h->status_cnt = cpu_to_le16(hba->sts_count+1); h->hosttime = cpu_to_le64(ktime_get_real_seconds()); h->partner_type = HMU_PARTNER_TYPE; h->extra_offset = h->extra_size = 0; scratch_size = (hba->sts_count+1)*sizeof(u32); h->scratch_size = cpu_to_le32(scratch_size); if (hba->cardtype == st_yel) { data = readl(base + YINT_EN); data &= ~4; writel(data, base + YINT_EN); writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); readl(base + YH2I_REQ_HI); writel(hba->dma_handle, base + YH2I_REQ); readl(base + YH2I_REQ); /* flush */ } else { data = readl(base + YINT_EN); data &= ~(1 << 0); data &= ~(1 << 2); writel(data, base + YINT_EN); if (hba->msi_lock == 0) { /* P3 MSI Register cannot access twice */ writel((1 << 6), base + YH2I_INT); hba->msi_lock = 1; } writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); writel(hba->dma_handle, base + YH2I_REQ); } before = jiffies; scratch = hba->scratch; if (hba->cardtype == st_yel) { while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) { if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { printk(KERN_ERR DRV_NAME "(%s): no signature after handshake frame\n", pci_name(hba->pdev)); ret = -1; break; } rmb(); msleep(1); } } else { mailboxdata = readl(base + MAILBOX_BASE + MAILBOX_HNDSHK_STS); while (mailboxdata != SS_STS_HANDSHAKE) { if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { printk(KERN_ERR DRV_NAME "(%s): no signature after handshake frame\n", pci_name(hba->pdev)); ret = -1; break; } rmb(); msleep(1); mailboxdata = readl(base + MAILBOX_BASE + MAILBOX_HNDSHK_STS); } } memset(scratch, 0, scratch_size); msg_h->flag = 0; return ret; } static int stex_handshake(struct st_hba *hba) { int err; unsigned long flags; unsigned int mu_status; if (hba->cardtype == st_yel || hba->cardtype == st_P3) err = stex_ss_handshake(hba); else err = stex_common_handshake(hba); spin_lock_irqsave(hba->host->host_lock, flags); mu_status = hba->mu_status; if (err == 0) { hba->req_head = 0; hba->req_tail = 0; hba->status_head = 0; hba->status_tail = 0; hba->out_req_cnt = 0; hba->mu_status = MU_STATE_STARTED; } else hba->mu_status = MU_STATE_FAILED; if (mu_status == MU_STATE_RESETTING) wake_up_all(&hba->reset_waitq); spin_unlock_irqrestore(hba->host->host_lock, flags); return err; } static int stex_abort(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct st_hba *hba = (struct st_hba *)host->hostdata; u16 tag = scsi_cmd_to_rq(cmd)->tag; void __iomem *base; u32 data; int result = SUCCESS; unsigned long flags; scmd_printk(KERN_INFO, cmd, "aborting command\n"); base = hba->mmio_base; spin_lock_irqsave(host->host_lock, flags); if (tag < host->can_queue && hba->ccb[tag].req && hba->ccb[tag].cmd == cmd) hba->wait_ccb = &hba->ccb[tag]; else goto out; if (hba->cardtype == st_yel) { data = readl(base + YI2H_INT); if (data == 0 || data == 0xffffffff) goto fail_out; writel(data, base + YI2H_INT_C); stex_ss_mu_intr(hba); } else if (hba->cardtype == st_P3) { data = readl(base + PSCRATCH4); if (data == 0xffffffff) goto fail_out; if (data != 0) { writel(data, base + PSCRATCH1); writel((1 << 22), base + YH2I_INT); } stex_ss_mu_intr(hba); } else { data = readl(base + ODBL); if (data == 0 || data == 0xffffffff) goto fail_out; writel(data, base + ODBL); readl(base + ODBL); /* flush */ stex_mu_intr(hba, data); } if (hba->wait_ccb == NULL) { printk(KERN_WARNING DRV_NAME "(%s): lost interrupt\n", pci_name(hba->pdev)); goto out; } fail_out: scsi_dma_unmap(cmd); hba->wait_ccb->req = NULL; /* nullify the req's future return */ hba->wait_ccb = NULL; result = FAILED; out: spin_unlock_irqrestore(host->host_lock, flags); return result; } static void stex_hard_reset(struct st_hba *hba) { struct pci_bus *bus; int i; u16 pci_cmd; u8 pci_bctl; for (i = 0; i < 16; i++) pci_read_config_dword(hba->pdev, i * 4, &hba->pdev->saved_config_space[i]); /* Reset secondary bus. Our controller(MU/ATU) is the only device on secondary bus. Consult Intel 80331/3 developer's manual for detail */ bus = hba->pdev->bus; pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl); pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET; pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl); /* * 1 ms may be enough for 8-port controllers. But 16-port controllers * require more time to finish bus reset. Use 100 ms here for safety */ msleep(100); pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET; pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl); for (i = 0; i < MU_HARD_RESET_WAIT; i++) { pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd); if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER)) break; msleep(1); } ssleep(5); for (i = 0; i < 16; i++) pci_write_config_dword(hba->pdev, i * 4, hba->pdev->saved_config_space[i]); } static int stex_yos_reset(struct st_hba *hba) { void __iomem *base; unsigned long flags, before; int ret = 0; base = hba->mmio_base; writel(MU_INBOUND_DOORBELL_RESET, base + IDBL); readl(base + IDBL); /* flush */ before = jiffies; while (hba->out_req_cnt > 0) { if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) { printk(KERN_WARNING DRV_NAME "(%s): reset timeout\n", pci_name(hba->pdev)); ret = -1; break; } msleep(1); } spin_lock_irqsave(hba->host->host_lock, flags); if (ret == -1) hba->mu_status = MU_STATE_FAILED; else hba->mu_status = MU_STATE_STARTED; wake_up_all(&hba->reset_waitq); spin_unlock_irqrestore(hba->host->host_lock, flags); return ret; } static void stex_ss_reset(struct st_hba *hba) { writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT); readl(hba->mmio_base + YH2I_INT); ssleep(5); } static void stex_p3_reset(struct st_hba *hba) { writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT); ssleep(5); } static int stex_do_reset(struct st_hba *hba) { unsigned long flags; unsigned int mu_status = MU_STATE_RESETTING; spin_lock_irqsave(hba->host->host_lock, flags); if (hba->mu_status == MU_STATE_STARTING) { spin_unlock_irqrestore(hba->host->host_lock, flags); printk(KERN_INFO DRV_NAME "(%s): request reset during init\n", pci_name(hba->pdev)); return 0; } while (hba->mu_status == MU_STATE_RESETTING) { spin_unlock_irqrestore(hba->host->host_lock, flags); wait_event_timeout(hba->reset_waitq, hba->mu_status != MU_STATE_RESETTING, MU_MAX_DELAY * HZ); spin_lock_irqsave(hba->host->host_lock, flags); mu_status = hba->mu_status; } if (mu_status != MU_STATE_RESETTING) { spin_unlock_irqrestore(hba->host->host_lock, flags); return (mu_status == MU_STATE_STARTED) ? 0 : -1; } hba->mu_status = MU_STATE_RESETTING; spin_unlock_irqrestore(hba->host->host_lock, flags); if (hba->cardtype == st_yosemite) return stex_yos_reset(hba); if (hba->cardtype == st_shasta) stex_hard_reset(hba); else if (hba->cardtype == st_yel) stex_ss_reset(hba); else if (hba->cardtype == st_P3) stex_p3_reset(hba); return_abnormal_state(hba, DID_RESET); if (stex_handshake(hba) == 0) return 0; printk(KERN_WARNING DRV_NAME "(%s): resetting: handshake failed\n", pci_name(hba->pdev)); return -1; } static int stex_reset(struct scsi_cmnd *cmd) { struct st_hba *hba; hba = (struct st_hba *) &cmd->device->host->hostdata[0]; shost_printk(KERN_INFO, cmd->device->host, "resetting host\n"); return stex_do_reset(hba) ? FAILED : SUCCESS; } static void stex_reset_work(struct work_struct *work) { struct st_hba *hba = container_of(work, struct st_hba, reset_work); stex_do_reset(hba); } static int stex_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { int heads = 255, sectors = 63; if (capacity < 0x200000) { heads = 64; sectors = 32; } sector_div(capacity, heads * sectors); geom[0] = heads; geom[1] = sectors; geom[2] = capacity; return 0; } static const struct scsi_host_template driver_template = { .module = THIS_MODULE, .name = DRV_NAME, .proc_name = DRV_NAME, .bios_param = stex_biosparam, .queuecommand = stex_queuecommand, .slave_configure = stex_slave_config, .eh_abort_handler = stex_abort, .eh_host_reset_handler = stex_reset, .this_id = -1, .dma_boundary = PAGE_SIZE - 1, }; static struct pci_device_id stex_pci_tbl[] = { /* st_shasta */ { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */ { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, /* SuperTrak EX12350 */ { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, /* SuperTrak EX4350 */ { 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, /* SuperTrak EX24350 */ /* st_vsc */ { 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc }, /* st_yosemite */ { 0x105a, 0x8650, 0x105a, PCI_ANY_ID, 0, 0, st_yosemite }, /* st_seq */ { 0x105a, 0x3360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_seq }, /* st_yel */ { 0x105a, 0x8650, 0x1033, PCI_ANY_ID, 0, 0, st_yel }, { 0x105a, 0x8760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yel }, /* st_P3, pluto */ { PCI_VENDOR_ID_PROMISE, 0x8870, PCI_VENDOR_ID_PROMISE, 0x8870, 0, 0, st_P3 }, /* st_P3, p3 */ { PCI_VENDOR_ID_PROMISE, 0x8870, PCI_VENDOR_ID_PROMISE, 0x4300, 0, 0, st_P3 }, /* st_P3, SymplyStor4E */ { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE, 0x4311, 0, 0, st_P3 }, /* st_P3, SymplyStor8E */ { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE, 0x4312, 0, 0, st_P3 }, /* st_P3, SymplyStor4 */ { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE, 0x4321, 0, 0, st_P3 }, /* st_P3, SymplyStor8 */ { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE, 0x4322, 0, 0, st_P3 }, { } /* terminate list */ }; static struct st_card_info stex_card_info[] = { /* st_shasta */ { .max_id = 17, .max_lun = 8, .max_channel = 0, .rq_count = 32, .rq_size = 1048, .sts_count = 32, .alloc_rq = stex_alloc_req, .map_sg = stex_map_sg, .send = stex_send_cmd, }, /* st_vsc */ { .max_id = 129, .max_lun = 1, .max_channel = 0, .rq_count = 32, .rq_size = 1048, .sts_count = 32, .alloc_rq = stex_alloc_req, .map_sg = stex_map_sg, .send = stex_send_cmd, }, /* st_yosemite */ { .max_id = 2, .max_lun = 256, .max_channel = 0, .rq_count = 256, .rq_size = 1048, .sts_count = 256, .alloc_rq = stex_alloc_req, .map_sg = stex_map_sg, .send = stex_send_cmd, }, /* st_seq */ { .max_id = 129, .max_lun = 1, .max_channel = 0, .rq_count = 32, .rq_size = 1048, .sts_count = 32, .alloc_rq = stex_alloc_req, .map_sg = stex_map_sg, .send = stex_send_cmd, }, /* st_yel */ { .max_id = 129, .max_lun = 256, .max_channel = 3, .rq_count = 801, .rq_size = 512, .sts_count = 801, .alloc_rq = stex_ss_alloc_req, .map_sg = stex_ss_map_sg, .send = stex_ss_send_cmd, }, /* st_P3 */ { .max_id = 129, .max_lun = 256, .max_channel = 0, .rq_count = 801, .rq_size = 512, .sts_count = 801, .alloc_rq = stex_ss_alloc_req, .map_sg = stex_ss_map_sg, .send = stex_ss_send_cmd, }, }; static int stex_request_irq(struct st_hba *hba) { struct pci_dev *pdev = hba->pdev; int status; if (msi || hba->cardtype == st_P3) { status = pci_enable_msi(pdev); if (status != 0) printk(KERN_ERR DRV_NAME "(%s): error %d setting up MSI\n", pci_name(pdev), status); else hba->msi_enabled = 1; } else hba->msi_enabled = 0; status = request_irq(pdev->irq, (hba->cardtype == st_yel || hba->cardtype == st_P3) ? stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba); if (status != 0) { if (hba->msi_enabled) pci_disable_msi(pdev); } return status; } static void stex_free_irq(struct st_hba *hba) { struct pci_dev *pdev = hba->pdev; free_irq(pdev->irq, hba); if (hba->msi_enabled) pci_disable_msi(pdev); } static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct st_hba *hba; struct Scsi_Host *host; const struct st_card_info *ci = NULL; u32 sts_offset, cp_offset, scratch_offset; int err; err = pci_enable_device(pdev); if (err) return err; pci_set_master(pdev); S6flag = 0; register_reboot_notifier(&stex_notifier); host = scsi_host_alloc(&driver_template, sizeof(struct st_hba)); if (!host) { printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n", pci_name(pdev)); err = -ENOMEM; goto out_disable; } hba = (struct st_hba *)host->hostdata; memset(hba, 0, sizeof(struct st_hba)); err = pci_request_regions(pdev, DRV_NAME); if (err < 0) { printk(KERN_ERR DRV_NAME "(%s): request regions failed\n", pci_name(pdev)); goto out_scsi_host_put; } hba->mmio_base = pci_ioremap_bar(pdev, 0); if ( !hba->mmio_base) { printk(KERN_ERR DRV_NAME "(%s): memory map failed\n", pci_name(pdev)); err = -ENOMEM; goto out_release_regions; } err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n", pci_name(pdev)); goto out_iounmap; } hba->cardtype = (unsigned int) id->driver_data; ci = &stex_card_info[hba->cardtype]; switch (id->subdevice) { case 0x4221: case 0x4222: case 0x4223: case 0x4224: case 0x4225: case 0x4226: case 0x4227: case 0x4261: case 0x4262: case 0x4263: case 0x4264: case 0x4265: break; default: if (hba->cardtype == st_yel || hba->cardtype == st_P3) hba->supports_pm = 1; } sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size; if (hba->cardtype == st_yel || hba->cardtype == st_P3) sts_offset += (ci->sts_count+1) * sizeof(u32); cp_offset = sts_offset + (ci->sts_count+1) * sizeof(struct status_msg); hba->dma_size = cp_offset + sizeof(struct st_frame); if (hba->cardtype == st_seq || (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) { hba->extra_offset = hba->dma_size; hba->dma_size += ST_ADDITIONAL_MEM; } hba->dma_mem = dma_alloc_coherent(&pdev->dev, hba->dma_size, &hba->dma_handle, GFP_KERNEL); if (!hba->dma_mem) { /* Retry minimum coherent mapping for st_seq and st_vsc */ if (hba->cardtype == st_seq || (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) { printk(KERN_WARNING DRV_NAME "(%s): allocating min buffer for controller\n", pci_name(pdev)); hba->dma_size = hba->extra_offset + ST_ADDITIONAL_MEM_MIN; hba->dma_mem = dma_alloc_coherent(&pdev->dev, hba->dma_size, &hba->dma_handle, GFP_KERNEL); } if (!hba->dma_mem) { err = -ENOMEM; printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n", pci_name(pdev)); goto out_iounmap; } } hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL); if (!hba->ccb) { err = -ENOMEM; printk(KERN_ERR DRV_NAME "(%s): ccb alloc failed\n", pci_name(pdev)); goto out_pci_free; } if (hba->cardtype == st_yel || hba->cardtype == st_P3) hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset); hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset); hba->copy_buffer = hba->dma_mem + cp_offset; hba->rq_count = ci->rq_count; hba->rq_size = ci->rq_size; hba->sts_count = ci->sts_count; hba->alloc_rq = ci->alloc_rq; hba->map_sg = ci->map_sg; hba->send = ci->send; hba->mu_status = MU_STATE_STARTING; hba->msi_lock = 0; if (hba->cardtype == st_yel || hba->cardtype == st_P3) host->sg_tablesize = 38; else host->sg_tablesize = 32; host->can_queue = ci->rq_count; host->cmd_per_lun = ci->rq_count; host->max_id = ci->max_id; host->max_lun = ci->max_lun; host->max_channel = ci->max_channel; host->unique_id = host->host_no; host->max_cmd_len = STEX_CDB_LENGTH; hba->host = host; hba->pdev = pdev; init_waitqueue_head(&hba->reset_waitq); snprintf(hba->work_q_name, sizeof(hba->work_q_name), "stex_wq_%d", host->host_no); hba->work_q = create_singlethread_workqueue(hba->work_q_name); if (!hba->work_q) { printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n", pci_name(pdev)); err = -ENOMEM; goto out_ccb_free; } INIT_WORK(&hba->reset_work, stex_reset_work); err = stex_request_irq(hba); if (err) { printk(KERN_ERR DRV_NAME "(%s): request irq failed\n", pci_name(pdev)); goto out_free_wq; } err = stex_handshake(hba); if (err) goto out_free_irq; pci_set_drvdata(pdev, hba); err = scsi_add_host(host, &pdev->dev); if (err) { printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n", pci_name(pdev)); goto out_free_irq; } scsi_scan_host(host); return 0; out_free_irq: stex_free_irq(hba); out_free_wq: destroy_workqueue(hba->work_q); out_ccb_free: kfree(hba->ccb); out_pci_free: dma_free_coherent(&pdev->dev, hba->dma_size, hba->dma_mem, hba->dma_handle); out_iounmap: iounmap(hba->mmio_base); out_release_regions: pci_release_regions(pdev); out_scsi_host_put: scsi_host_put(host); out_disable: pci_disable_device(pdev); return err; } static void stex_hba_stop(struct st_hba *hba, int st_sleep_mic) { struct req_msg *req; struct st_msg_header *msg_h; unsigned long flags; unsigned long before; u16 tag = 0; spin_lock_irqsave(hba->host->host_lock, flags); if ((hba->cardtype == st_yel || hba->cardtype == st_P3) && hba->supports_pm == 1) { if (st_sleep_mic == ST_NOTHANDLED) { spin_unlock_irqrestore(hba->host->host_lock, flags); return; } } req = hba->alloc_rq(hba); if (hba->cardtype == st_yel || hba->cardtype == st_P3) { msg_h = (struct st_msg_header *)req - 1; memset(msg_h, 0, hba->rq_size); } else memset(req, 0, hba->rq_size); if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel || hba->cardtype == st_P3) && st_sleep_mic == ST_IGNORED) { req->cdb[0] = MGT_CMD; req->cdb[1] = MGT_CMD_SIGNATURE; req->cdb[2] = CTLR_CONFIG_CMD; req->cdb[3] = CTLR_SHUTDOWN; } else if ((hba->cardtype == st_yel || hba->cardtype == st_P3) && st_sleep_mic != ST_IGNORED) { req->cdb[0] = MGT_CMD; req->cdb[1] = MGT_CMD_SIGNATURE; req->cdb[2] = CTLR_CONFIG_CMD; req->cdb[3] = PMIC_SHUTDOWN; req->cdb[4] = st_sleep_mic; } else { req->cdb[0] = CONTROLLER_CMD; req->cdb[1] = CTLR_POWER_STATE_CHANGE; req->cdb[2] = CTLR_POWER_SAVING; } hba->ccb[tag].cmd = NULL; hba->ccb[tag].sg_count = 0; hba->ccb[tag].sense_bufflen = 0; hba->ccb[tag].sense_buffer = NULL; hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE; hba->send(hba, req, tag); spin_unlock_irqrestore(hba->host->host_lock, flags); before = jiffies; while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) { if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) { hba->ccb[tag].req_type = 0; hba->mu_status = MU_STATE_STOP; return; } msleep(1); } hba->mu_status = MU_STATE_STOP; } static void stex_hba_free(struct st_hba *hba) { stex_free_irq(hba); destroy_workqueue(hba->work_q); iounmap(hba->mmio_base); pci_release_regions(hba->pdev); kfree(hba->ccb); dma_free_coherent(&hba->pdev->dev, hba->dma_size, hba->dma_mem, hba->dma_handle); } static void stex_remove(struct pci_dev *pdev) { struct st_hba *hba = pci_get_drvdata(pdev); hba->mu_status = MU_STATE_NOCONNECT; return_abnormal_state(hba, DID_NO_CONNECT); scsi_remove_host(hba->host); scsi_block_requests(hba->host); stex_hba_free(hba); scsi_host_put(hba->host); pci_disable_device(pdev); unregister_reboot_notifier(&stex_notifier); } static void stex_shutdown(struct pci_dev *pdev) { struct st_hba *hba = pci_get_drvdata(pdev); if (hba->supports_pm == 0) { stex_hba_stop(hba, ST_IGNORED); } else if (hba->supports_pm == 1 && S6flag) { unregister_reboot_notifier(&stex_notifier); stex_hba_stop(hba, ST_S6); } else stex_hba_stop(hba, ST_S5); } static int stex_choice_sleep_mic(struct st_hba *hba, pm_message_t state) { switch (state.event) { case PM_EVENT_SUSPEND: return ST_S3; case PM_EVENT_HIBERNATE: hba->msi_lock = 0; return ST_S4; default: return ST_NOTHANDLED; } } static int stex_suspend(struct pci_dev *pdev, pm_message_t state) { struct st_hba *hba = pci_get_drvdata(pdev); if ((hba->cardtype == st_yel || hba->cardtype == st_P3) && hba->supports_pm == 1) stex_hba_stop(hba, stex_choice_sleep_mic(hba, state)); else stex_hba_stop(hba, ST_IGNORED); return 0; } static int stex_resume(struct pci_dev *pdev) { struct st_hba *hba = pci_get_drvdata(pdev); hba->mu_status = MU_STATE_STARTING; stex_handshake(hba); return 0; } static int stex_halt(struct notifier_block *nb, unsigned long event, void *buf) { S6flag = 1; return NOTIFY_OK; } MODULE_DEVICE_TABLE(pci, stex_pci_tbl); static struct pci_driver stex_pci_driver = { .name = DRV_NAME, .id_table = stex_pci_tbl, .probe = stex_probe, .remove = stex_remove, .shutdown = stex_shutdown, .suspend = stex_suspend, .resume = stex_resume, }; static int __init stex_init(void) { printk(KERN_INFO DRV_NAME ": Promise SuperTrak EX Driver version: %s\n", ST_DRIVER_VERSION); return pci_register_driver(&stex_pci_driver); } static void __exit stex_exit(void) { pci_unregister_driver(&stex_pci_driver); } module_init(stex_init); module_exit(stex_exit);
linux-master
drivers/scsi/stex.c
// SPDX-License-Identifier: GPL-2.0-only /* * Generic Generic NCR5380 driver * * Copyright 1993, Drew Eckhardt * Visionary Computing * (Unix and Linux consulting and custom programming) * [email protected] * +1 (303) 440-4894 * * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin * [email protected] * * NCR53C400A extensions (c) 1996, Ingmar Baumgart * [email protected] * * DTC3181E extensions (c) 1997, Ronald van Cuijlenborg * [email protected] or [email protected] * * Added ISAPNP support for DTC436 adapters, * Thomas Sailer, [email protected] * * See Documentation/scsi/g_NCR5380.rst for more info. */ #include <asm/io.h> #include <linux/blkdev.h> #include <linux/module.h> #include <scsi/scsi_host.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/isa.h> #include <linux/pnp.h> #include <linux/interrupt.h> /* Definitions for the core NCR5380 driver. */ #define NCR5380_read(reg) \ ioread8(hostdata->io + hostdata->offset + (reg)) #define NCR5380_write(reg, value) \ iowrite8(value, hostdata->io + hostdata->offset + (reg)) #define NCR5380_implementation_fields \ int offset; \ int c400_ctl_status; \ int c400_blk_cnt; \ int c400_host_buf; \ int io_width; \ int pdma_residual; \ int board #define NCR5380_dma_xfer_len generic_NCR5380_dma_xfer_len #define NCR5380_dma_recv_setup generic_NCR5380_precv #define NCR5380_dma_send_setup generic_NCR5380_psend #define NCR5380_dma_residual generic_NCR5380_dma_residual #define NCR5380_intr generic_NCR5380_intr #define NCR5380_queue_command generic_NCR5380_queue_command #define NCR5380_abort generic_NCR5380_abort #define NCR5380_host_reset generic_NCR5380_host_reset #define NCR5380_info generic_NCR5380_info #define NCR5380_io_delay(x) udelay(x) #include "NCR5380.h" #define DRV_MODULE_NAME "g_NCR5380" #define NCR53C400_mem_base 0x3880 #define NCR53C400_host_buffer 0x3900 #define NCR53C400_region_size 0x3a00 #define BOARD_NCR5380 0 #define BOARD_NCR53C400 1 #define BOARD_NCR53C400A 2 #define BOARD_DTC3181E 3 #define BOARD_HP_C2502 4 #define IRQ_AUTO 254 #define MAX_CARDS 8 #define DMA_MAX_SIZE 32768 /* old-style parameters for compatibility */ static int ncr_irq = -1; static int ncr_addr; static int ncr_5380; static int ncr_53c400; static int ncr_53c400a; static int dtc_3181e; static int hp_c2502; module_param_hw(ncr_irq, int, irq, 0); module_param_hw(ncr_addr, int, ioport, 0); module_param(ncr_5380, int, 0); module_param(ncr_53c400, int, 0); module_param(ncr_53c400a, int, 0); module_param(dtc_3181e, int, 0); module_param(hp_c2502, int, 0); static int irq[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; module_param_hw_array(irq, int, irq, NULL, 0); MODULE_PARM_DESC(irq, "IRQ number(s) (0=none, 254=auto [default])"); static int base[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; module_param_hw_array(base, int, ioport, NULL, 0); MODULE_PARM_DESC(base, "base address(es)"); static int card[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; module_param_array(card, int, NULL, 0); MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)"); MODULE_ALIAS("g_NCR5380_mmio"); MODULE_LICENSE("GPL"); static void g_NCR5380_trigger_irq(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); /* * An interrupt is triggered whenever BSY = false, SEL = true * and a bit set in the SELECT_ENABLE_REG is asserted on the * SCSI bus. * * Note that the bus is only driven when the phase control signals * (I/O, C/D, and MSG) match those in the TCR. */ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK)); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL); msleep(1); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(SELECT_ENABLE_REG, 0); NCR5380_write(TARGET_COMMAND_REG, 0); } /** * g_NCR5380_probe_irq - find the IRQ of a NCR5380 or equivalent * @instance: SCSI host instance * * Autoprobe for the IRQ line used by the card by triggering an IRQ * and then looking to see what interrupt actually turned up. */ static int g_NCR5380_probe_irq(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); int irq_mask, irq; NCR5380_read(RESET_PARITY_INTERRUPT_REG); irq_mask = probe_irq_on(); g_NCR5380_trigger_irq(instance); irq = probe_irq_off(irq_mask); NCR5380_read(RESET_PARITY_INTERRUPT_REG); if (irq <= 0) return NO_IRQ; return irq; } /* * Configure I/O address of 53C400A or DTC436 by writing magic numbers * to ports 0x779 and 0x379. */ static void magic_configure(int idx, u8 irq, u8 magic[]) { u8 cfg = 0; outb(magic[0], 0x779); outb(magic[1], 0x379); outb(magic[2], 0x379); outb(magic[3], 0x379); outb(magic[4], 0x379); if (irq == 9) irq = 2; if (idx >= 0 && idx <= 7) cfg = 0x80 | idx | (irq << 4); outb(cfg, 0x379); } static irqreturn_t legacy_empty_irq_handler(int irq, void *dev_id) { return IRQ_HANDLED; } static int legacy_find_free_irq(int *irq_table) { while (*irq_table != -1) { if (!request_irq(*irq_table, legacy_empty_irq_handler, IRQF_PROBE_SHARED, "Test IRQ", (void *)irq_table)) { free_irq(*irq_table, (void *) irq_table); return *irq_table; } irq_table++; } return -1; } static unsigned int ncr_53c400a_ports[] = { 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0 }; static unsigned int dtc_3181e_ports[] = { 0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0 }; static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */ 0x59, 0xb9, 0xc5, 0xae, 0xa6 }; static u8 hp_c2502_magic[] = { /* HP C2502 */ 0x0f, 0x22, 0xf0, 0x20, 0x80 }; static int hp_c2502_irqs[] = { 9, 5, 7, 3, 4, -1 }; static int generic_NCR5380_init_one(const struct scsi_host_template *tpnt, struct device *pdev, int base, int irq, int board) { bool is_pmio = base <= 0xffff; int ret; int flags = 0; unsigned int *ports = NULL; u8 *magic = NULL; int i; int port_idx = -1; unsigned long region_size; struct Scsi_Host *instance; struct NCR5380_hostdata *hostdata; u8 __iomem *iomem; switch (board) { case BOARD_NCR5380: flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP; break; case BOARD_NCR53C400A: ports = ncr_53c400a_ports; magic = ncr_53c400a_magic; break; case BOARD_HP_C2502: ports = ncr_53c400a_ports; magic = hp_c2502_magic; break; case BOARD_DTC3181E: ports = dtc_3181e_ports; magic = ncr_53c400a_magic; break; } if (is_pmio && ports && magic) { /* wakeup sequence for the NCR53C400A and DTC3181E */ /* Disable the adapter and look for a free io port */ magic_configure(-1, 0, magic); region_size = 16; if (base) for (i = 0; ports[i]; i++) { if (base == ports[i]) { /* index found */ if (!request_region(ports[i], region_size, "ncr53c80")) return -EBUSY; break; } } else for (i = 0; ports[i]; i++) { if (!request_region(ports[i], region_size, "ncr53c80")) continue; if (inb(ports[i]) == 0xff) break; release_region(ports[i], region_size); } if (ports[i]) { /* At this point we have our region reserved */ magic_configure(i, 0, magic); /* no IRQ yet */ base = ports[i]; outb(0xc0, base + 9); if (inb(base + 9) != 0x80) { ret = -ENODEV; goto out_release; } port_idx = i; } else return -EINVAL; } else if (is_pmio) { /* NCR5380 - no configuration, just grab */ region_size = 8; if (!base || !request_region(base, region_size, "ncr5380")) return -EBUSY; } else { /* MMIO */ region_size = NCR53C400_region_size; if (!request_mem_region(base, region_size, "ncr5380")) return -EBUSY; } if (is_pmio) iomem = ioport_map(base, region_size); else iomem = ioremap(base, region_size); if (!iomem) { ret = -ENOMEM; goto out_release; } instance = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata)); if (instance == NULL) { ret = -ENOMEM; goto out_unmap; } hostdata = shost_priv(instance); hostdata->board = board; hostdata->io = iomem; hostdata->region_size = region_size; if (is_pmio) { hostdata->io_port = base; hostdata->io_width = 1; /* 8-bit PDMA by default */ hostdata->offset = 0; /* * On NCR53C400 boards, NCR5380 registers are mapped 8 past * the base address. */ switch (board) { case BOARD_NCR53C400: hostdata->io_port += 8; hostdata->c400_ctl_status = 0; hostdata->c400_blk_cnt = 1; hostdata->c400_host_buf = 4; break; case BOARD_DTC3181E: hostdata->io_width = 2; /* 16-bit PDMA */ fallthrough; case BOARD_NCR53C400A: case BOARD_HP_C2502: hostdata->c400_ctl_status = 9; hostdata->c400_blk_cnt = 10; hostdata->c400_host_buf = 8; break; } } else { hostdata->base = base; hostdata->offset = NCR53C400_mem_base; switch (board) { case BOARD_NCR53C400: hostdata->c400_ctl_status = 0x100; hostdata->c400_blk_cnt = 0x101; hostdata->c400_host_buf = 0x104; break; case BOARD_DTC3181E: case BOARD_NCR53C400A: case BOARD_HP_C2502: pr_err(DRV_MODULE_NAME ": unknown register offsets\n"); ret = -EINVAL; goto out_unregister; } } /* Check for vacant slot */ NCR5380_write(MODE_REG, 0); if (NCR5380_read(MODE_REG) != 0) { ret = -ENODEV; goto out_unregister; } ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP); if (ret) goto out_unregister; switch (board) { case BOARD_NCR53C400: case BOARD_DTC3181E: case BOARD_NCR53C400A: case BOARD_HP_C2502: NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); } NCR5380_maybe_reset_bus(instance); /* Compatibility with documented NCR5380 kernel parameters */ if (irq == 255 || irq == 0) irq = NO_IRQ; else if (irq == -1) irq = IRQ_AUTO; if (board == BOARD_HP_C2502) { int *irq_table = hp_c2502_irqs; int board_irq = -1; switch (irq) { case NO_IRQ: board_irq = 0; break; case IRQ_AUTO: board_irq = legacy_find_free_irq(irq_table); break; default: while (*irq_table != -1) if (*irq_table++ == irq) board_irq = irq; } if (board_irq <= 0) { board_irq = 0; irq = NO_IRQ; } magic_configure(port_idx, board_irq, magic); } if (irq == IRQ_AUTO) { instance->irq = g_NCR5380_probe_irq(instance); if (instance->irq == NO_IRQ) shost_printk(KERN_INFO, instance, "no irq detected\n"); } else { instance->irq = irq; if (instance->irq == NO_IRQ) shost_printk(KERN_INFO, instance, "no irq provided\n"); } if (instance->irq != NO_IRQ) { if (request_irq(instance->irq, generic_NCR5380_intr, 0, "NCR5380", instance)) { instance->irq = NO_IRQ; shost_printk(KERN_INFO, instance, "irq %d denied\n", instance->irq); } else { shost_printk(KERN_INFO, instance, "irq %d acquired\n", instance->irq); } } ret = scsi_add_host(instance, pdev); if (ret) goto out_free_irq; scsi_scan_host(instance); dev_set_drvdata(pdev, instance); return 0; out_free_irq: if (instance->irq != NO_IRQ) free_irq(instance->irq, instance); NCR5380_exit(instance); out_unregister: scsi_host_put(instance); out_unmap: iounmap(iomem); out_release: if (is_pmio) release_region(base, region_size); else release_mem_region(base, region_size); return ret; } static void generic_NCR5380_release_resources(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); void __iomem *iomem = hostdata->io; unsigned long io_port = hostdata->io_port; unsigned long base = hostdata->base; unsigned long region_size = hostdata->region_size; scsi_remove_host(instance); if (instance->irq != NO_IRQ) free_irq(instance->irq, instance); NCR5380_exit(instance); scsi_host_put(instance); iounmap(iomem); if (io_port) release_region(io_port, region_size); else release_mem_region(base, region_size); } /* wait_for_53c80_access - wait for 53C80 registers to become accessible * @hostdata: scsi host private data * * The registers within the 53C80 logic block are inaccessible until * bit 7 in the 53C400 control status register gets asserted. */ static void wait_for_53c80_access(struct NCR5380_hostdata *hostdata) { int count = 10000; do { if (hostdata->board == BOARD_DTC3181E) udelay(4); /* DTC436 chip hangs without this */ if (NCR5380_read(hostdata->c400_ctl_status) & CSR_53C80_REG) return; } while (--count > 0); scmd_printk(KERN_ERR, hostdata->connected, "53c80 registers not accessible, device will be reset\n"); NCR5380_write(hostdata->c400_ctl_status, CSR_RESET); NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); } /** * generic_NCR5380_precv - pseudo DMA receive * @hostdata: scsi host private data * @dst: buffer to write into * @len: transfer size * * Perform a pseudo DMA mode receive from a 53C400 or equivalent device. */ static inline int generic_NCR5380_precv(struct NCR5380_hostdata *hostdata, unsigned char *dst, int len) { int residual; int start = 0; NCR5380_write(hostdata->c400_ctl_status, CSR_BASE | CSR_TRANS_DIR); NCR5380_write(hostdata->c400_blk_cnt, len / 128); do { if (start == len - 128) { /* Ignore End of DMA interrupt for the final buffer */ if (NCR5380_poll_politely(hostdata, hostdata->c400_ctl_status, CSR_HOST_BUF_NOT_RDY, 0, 0) < 0) break; } else { if (NCR5380_poll_politely2(hostdata, hostdata->c400_ctl_status, CSR_HOST_BUF_NOT_RDY, 0, hostdata->c400_ctl_status, CSR_GATED_53C80_IRQ, CSR_GATED_53C80_IRQ, 0) < 0 || NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) break; } if (hostdata->io_port && hostdata->io_width == 2) insw(hostdata->io_port + hostdata->c400_host_buf, dst + start, 64); else if (hostdata->io_port) insb(hostdata->io_port + hostdata->c400_host_buf, dst + start, 128); else memcpy_fromio(dst + start, hostdata->io + NCR53C400_host_buffer, 128); start += 128; } while (start < len); residual = len - start; if (residual != 0) { /* 53c80 interrupt or transfer timeout. Reset 53c400 logic. */ NCR5380_write(hostdata->c400_ctl_status, CSR_RESET); NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); } wait_for_53c80_access(hostdata); if (residual == 0 && NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_END_DMA_TRANSFER, BASR_END_DMA_TRANSFER, 0) < 0) scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout\n", __func__); hostdata->pdma_residual = residual; return 0; } /** * generic_NCR5380_psend - pseudo DMA send * @hostdata: scsi host private data * @src: buffer to read from * @len: transfer size * * Perform a pseudo DMA mode send to a 53C400 or equivalent device. */ static inline int generic_NCR5380_psend(struct NCR5380_hostdata *hostdata, unsigned char *src, int len) { int residual; int start = 0; NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); NCR5380_write(hostdata->c400_blk_cnt, len / 128); do { if (NCR5380_poll_politely2(hostdata, hostdata->c400_ctl_status, CSR_HOST_BUF_NOT_RDY, 0, hostdata->c400_ctl_status, CSR_GATED_53C80_IRQ, CSR_GATED_53C80_IRQ, 0) < 0 || NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) { /* Both 128 B buffers are in use */ if (start >= 128) start -= 128; if (start >= 128) start -= 128; break; } if (start >= len && NCR5380_read(hostdata->c400_blk_cnt) == 0) break; if (NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ) { /* Host buffer is empty, other one is in use */ if (start >= 128) start -= 128; break; } if (start >= len) continue; if (hostdata->io_port && hostdata->io_width == 2) outsw(hostdata->io_port + hostdata->c400_host_buf, src + start, 64); else if (hostdata->io_port) outsb(hostdata->io_port + hostdata->c400_host_buf, src + start, 128); else memcpy_toio(hostdata->io + NCR53C400_host_buffer, src + start, 128); start += 128; } while (1); residual = len - start; if (residual != 0) { /* 53c80 interrupt or transfer timeout. Reset 53c400 logic. */ NCR5380_write(hostdata->c400_ctl_status, CSR_RESET); NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); } wait_for_53c80_access(hostdata); if (residual == 0) { if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG, TCR_LAST_BYTE_SENT, TCR_LAST_BYTE_SENT, 0) < 0) scmd_printk(KERN_ERR, hostdata->connected, "%s: Last Byte Sent timeout\n", __func__); if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_END_DMA_TRANSFER, BASR_END_DMA_TRANSFER, 0) < 0) scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout\n", __func__); } hostdata->pdma_residual = residual; return 0; } static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata, struct scsi_cmnd *cmd) { int transfersize = NCR5380_to_ncmd(cmd)->this_residual; if (hostdata->flags & FLAG_NO_PSEUDO_DMA) return 0; /* 53C400 datasheet: non-modulo-128-byte transfers should use PIO */ if (transfersize % 128) return 0; /* Limit PDMA send to 512 B to avoid random corruption on DTC3181E */ if (hostdata->board == BOARD_DTC3181E && cmd->sc_data_direction == DMA_TO_DEVICE) transfersize = min(transfersize, 512); return min(transfersize, DMA_MAX_SIZE); } static int generic_NCR5380_dma_residual(struct NCR5380_hostdata *hostdata) { return hostdata->pdma_residual; } /* Include the core driver code. */ #include "NCR5380.c" static const struct scsi_host_template driver_template = { .module = THIS_MODULE, .proc_name = DRV_MODULE_NAME, .name = "Generic NCR5380/NCR53C400 SCSI", .info = generic_NCR5380_info, .queuecommand = generic_NCR5380_queue_command, .eh_abort_handler = generic_NCR5380_abort, .eh_host_reset_handler = generic_NCR5380_host_reset, .can_queue = 16, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = 2, .dma_boundary = PAGE_SIZE - 1, .cmd_size = sizeof(struct NCR5380_cmd), .max_sectors = 128, }; static int generic_NCR5380_isa_match(struct device *pdev, unsigned int ndev) { int ret = generic_NCR5380_init_one(&driver_template, pdev, base[ndev], irq[ndev], card[ndev]); if (ret) { if (base[ndev]) printk(KERN_WARNING "Card not found at address 0x%03x\n", base[ndev]); return 0; } return 1; } static void generic_NCR5380_isa_remove(struct device *pdev, unsigned int ndev) { generic_NCR5380_release_resources(dev_get_drvdata(pdev)); dev_set_drvdata(pdev, NULL); } static struct isa_driver generic_NCR5380_isa_driver = { .match = generic_NCR5380_isa_match, .remove = generic_NCR5380_isa_remove, .driver = { .name = DRV_MODULE_NAME }, }; #ifdef CONFIG_PNP static const struct pnp_device_id generic_NCR5380_pnp_ids[] = { { .id = "DTC436e", .driver_data = BOARD_DTC3181E }, { .id = "" } }; MODULE_DEVICE_TABLE(pnp, generic_NCR5380_pnp_ids); static int generic_NCR5380_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *id) { int base, irq; if (pnp_activate_dev(pdev) < 0) return -EBUSY; base = pnp_port_start(pdev, 0); irq = pnp_irq(pdev, 0); return generic_NCR5380_init_one(&driver_template, &pdev->dev, base, irq, id->driver_data); } static void generic_NCR5380_pnp_remove(struct pnp_dev *pdev) { generic_NCR5380_release_resources(pnp_get_drvdata(pdev)); pnp_set_drvdata(pdev, NULL); } static struct pnp_driver generic_NCR5380_pnp_driver = { .name = DRV_MODULE_NAME, .id_table = generic_NCR5380_pnp_ids, .probe = generic_NCR5380_pnp_probe, .remove = generic_NCR5380_pnp_remove, }; #endif /* defined(CONFIG_PNP) */ static int pnp_registered, isa_registered; static int __init generic_NCR5380_init(void) { int ret = 0; /* compatibility with old-style parameters */ if (irq[0] == -1 && base[0] == 0 && card[0] == -1) { irq[0] = ncr_irq; base[0] = ncr_addr; if (ncr_5380) card[0] = BOARD_NCR5380; if (ncr_53c400) card[0] = BOARD_NCR53C400; if (ncr_53c400a) card[0] = BOARD_NCR53C400A; if (dtc_3181e) card[0] = BOARD_DTC3181E; if (hp_c2502) card[0] = BOARD_HP_C2502; } #ifdef CONFIG_PNP if (!pnp_register_driver(&generic_NCR5380_pnp_driver)) pnp_registered = 1; #endif ret = isa_register_driver(&generic_NCR5380_isa_driver, MAX_CARDS); if (!ret) isa_registered = 1; return (pnp_registered || isa_registered) ? 0 : ret; } static void __exit generic_NCR5380_exit(void) { #ifdef CONFIG_PNP if (pnp_registered) pnp_unregister_driver(&generic_NCR5380_pnp_driver); #endif if (isa_registered) isa_unregister_driver(&generic_NCR5380_isa_driver); } module_init(generic_NCR5380_init); module_exit(generic_NCR5380_exit);
linux-master
drivers/scsi/g_NCR5380.c
/* * atari_scsi.c -- Device dependent functions for the Atari generic SCSI port * * Copyright 1994 Roman Hodek <[email protected]> * * Loosely based on the work of Robert De Vries' team and added: * - working real DMA * - Falcon support (untested yet!) ++bjoern fixed and now it works * - lots of extensions and bug fixes. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. * */ /* * Notes for Falcon SCSI DMA * * The 5380 device is one of several that all share the DMA chip. Hence * "locking" and "unlocking" access to this chip is required. * * Two possible schemes for ST DMA acquisition by atari_scsi are: * 1) The lock is taken for each command separately (i.e. can_queue == 1). * 2) The lock is taken when the first command arrives and released * when the last command is finished (i.e. can_queue > 1). * * The first alternative limits SCSI bus utilization, since interleaving * commands is not possible. The second gives better performance but is * unfair to other drivers needing to use the ST DMA chip. In order to * allow the IDE and floppy drivers equal access to the ST DMA chip * the default is can_queue == 1. */ #include <linux/module.h> #include <linux/types.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/nvram.h> #include <linux/bitops.h> #include <linux/wait.h> #include <linux/platform_device.h> #include <asm/setup.h> #include <asm/atarihw.h> #include <asm/atariints.h> #include <asm/atari_stdma.h> #include <asm/atari_stram.h> #include <asm/io.h> #include <scsi/scsi_host.h> #define DMA_MIN_SIZE 32 /* Definitions for the core NCR5380 driver. */ #define NCR5380_implementation_fields /* none */ static u8 (*atari_scsi_reg_read)(unsigned int); static void (*atari_scsi_reg_write)(unsigned int, u8); #define NCR5380_read(reg) atari_scsi_reg_read(reg) #define NCR5380_write(reg, value) atari_scsi_reg_write(reg, value) #define NCR5380_queue_command atari_scsi_queue_command #define NCR5380_abort atari_scsi_abort #define NCR5380_info atari_scsi_info #define NCR5380_dma_xfer_len atari_scsi_dma_xfer_len #define NCR5380_dma_recv_setup atari_scsi_dma_recv_setup #define NCR5380_dma_send_setup atari_scsi_dma_send_setup #define NCR5380_dma_residual atari_scsi_dma_residual #define NCR5380_acquire_dma_irq(instance) falcon_get_lock(instance) #define NCR5380_release_dma_irq(instance) falcon_release_lock() #include "NCR5380.h" #define IS_A_TT() ATARIHW_PRESENT(TT_SCSI) #define SCSI_DMA_WRITE_P(elt,val) \ do { \ unsigned long v = val; \ tt_scsi_dma.elt##_lo = v & 0xff; \ v >>= 8; \ tt_scsi_dma.elt##_lmd = v & 0xff; \ v >>= 8; \ tt_scsi_dma.elt##_hmd = v & 0xff; \ v >>= 8; \ tt_scsi_dma.elt##_hi = v & 0xff; \ } while(0) #define SCSI_DMA_READ_P(elt) \ (((((((unsigned long)tt_scsi_dma.elt##_hi << 8) | \ (unsigned long)tt_scsi_dma.elt##_hmd) << 8) | \ (unsigned long)tt_scsi_dma.elt##_lmd) << 8) | \ (unsigned long)tt_scsi_dma.elt##_lo) static inline void SCSI_DMA_SETADR(unsigned long adr) { st_dma.dma_lo = (unsigned char)adr; MFPDELAY(); adr >>= 8; st_dma.dma_md = (unsigned char)adr; MFPDELAY(); adr >>= 8; st_dma.dma_hi = (unsigned char)adr; MFPDELAY(); } static inline unsigned long SCSI_DMA_GETADR(void) { unsigned long adr; adr = st_dma.dma_lo; MFPDELAY(); adr |= (st_dma.dma_md & 0xff) << 8; MFPDELAY(); adr |= (st_dma.dma_hi & 0xff) << 16; MFPDELAY(); return adr; } static void atari_scsi_fetch_restbytes(void); static unsigned long atari_dma_residual, atari_dma_startaddr; static short atari_dma_active; /* pointer to the dribble buffer */ static char *atari_dma_buffer; /* precalculated physical address of the dribble buffer */ static unsigned long atari_dma_phys_buffer; /* != 0 tells the Falcon int handler to copy data from the dribble buffer */ static char *atari_dma_orig_addr; /* size of the dribble buffer; 4k seems enough, since the Falcon cannot use * scatter-gather anyway, so most transfers are 1024 byte only. In the rare * cases where requests to physical contiguous buffers have been merged, this * request is <= 4k (one page). So I don't think we have to split transfers * just due to this buffer size... */ #define STRAM_BUFFER_SIZE (4096) /* mask for address bits that can't be used with the ST-DMA */ static unsigned long atari_dma_stram_mask; #define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0) static int setup_can_queue = -1; module_param(setup_can_queue, int, 0); static int setup_cmd_per_lun = -1; module_param(setup_cmd_per_lun, int, 0); static int setup_sg_tablesize = -1; module_param(setup_sg_tablesize, int, 0); static int setup_hostid = -1; module_param(setup_hostid, int, 0); static int setup_toshiba_delay = -1; module_param(setup_toshiba_delay, int, 0); static int scsi_dma_is_ignored_buserr(unsigned char dma_stat) { int i; unsigned long addr = SCSI_DMA_READ_P(dma_addr), end_addr; if (dma_stat & 0x01) { /* A bus error happens when DMA-ing from the last page of a * physical memory chunk (DMA prefetch!), but that doesn't hurt. * Check for this case: */ for (i = 0; i < m68k_num_memory; ++i) { end_addr = m68k_memory[i].addr + m68k_memory[i].size; if (end_addr <= addr && addr <= end_addr + 4) return 1; } } return 0; } static irqreturn_t scsi_tt_intr(int irq, void *dev) { struct Scsi_Host *instance = dev; struct NCR5380_hostdata *hostdata = shost_priv(instance); int dma_stat; dma_stat = tt_scsi_dma.dma_ctrl; dsprintk(NDEBUG_INTR, instance, "NCR5380 interrupt, DMA status = %02x\n", dma_stat & 0xff); /* Look if it was the DMA that has interrupted: First possibility * is that a bus error occurred... */ if (dma_stat & 0x80) { if (!scsi_dma_is_ignored_buserr(dma_stat)) { printk(KERN_ERR "SCSI DMA caused bus error near 0x%08lx\n", SCSI_DMA_READ_P(dma_addr)); printk(KERN_CRIT "SCSI DMA bus error -- bad DMA programming!"); } } /* If the DMA is active but not finished, we have the case * that some other 5380 interrupt occurred within the DMA transfer. * This means we have residual bytes, if the desired end address * is not yet reached. Maybe we have to fetch some bytes from the * rest data register, too. The residual must be calculated from * the address pointer, not the counter register, because only the * addr reg counts bytes not yet written and pending in the rest * data reg! */ if ((dma_stat & 0x02) && !(dma_stat & 0x40)) { atari_dma_residual = hostdata->dma_len - (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr); dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n", atari_dma_residual); if ((signed int)atari_dma_residual < 0) atari_dma_residual = 0; if ((dma_stat & 1) == 0) { /* * After read operations, we maybe have to * transport some rest bytes */ atari_scsi_fetch_restbytes(); } else { /* * There seems to be a nasty bug in some SCSI-DMA/NCR * combinations: If a target disconnects while a write * operation is going on, the address register of the * DMA may be a few bytes farer than it actually read. * This is probably due to DMA prefetching and a delay * between DMA and NCR. Experiments showed that the * dma_addr is 9 bytes to high, but this could vary. * The problem is, that the residual is thus calculated * wrong and the next transfer will start behind where * it should. So we round up the residual to the next * multiple of a sector size, if it isn't already a * multiple and the originally expected transfer size * was. The latter condition is there to ensure that * the correction is taken only for "real" data * transfers and not for, e.g., the parameters of some * other command. These shouldn't disconnect anyway. */ if (atari_dma_residual & 0x1ff) { dprintk(NDEBUG_DMA, "SCSI DMA: DMA bug corrected, " "difference %ld bytes\n", 512 - (atari_dma_residual & 0x1ff)); atari_dma_residual = (atari_dma_residual + 511) & ~0x1ff; } } tt_scsi_dma.dma_ctrl = 0; } /* If the DMA is finished, fetch the rest bytes and turn it off */ if (dma_stat & 0x40) { atari_dma_residual = 0; if ((dma_stat & 1) == 0) atari_scsi_fetch_restbytes(); tt_scsi_dma.dma_ctrl = 0; } NCR5380_intr(irq, dev); return IRQ_HANDLED; } static irqreturn_t scsi_falcon_intr(int irq, void *dev) { struct Scsi_Host *instance = dev; struct NCR5380_hostdata *hostdata = shost_priv(instance); int dma_stat; /* Turn off DMA and select sector counter register before * accessing the status register (Atari recommendation!) */ st_dma.dma_mode_status = 0x90; dma_stat = st_dma.dma_mode_status; /* Bit 0 indicates some error in the DMA process... don't know * what happened exactly (no further docu). */ if (!(dma_stat & 0x01)) { /* DMA error */ printk(KERN_CRIT "SCSI DMA error near 0x%08lx!\n", SCSI_DMA_GETADR()); } /* If the DMA was active, but now bit 1 is not clear, it is some * other 5380 interrupt that finishes the DMA transfer. We have to * calculate the number of residual bytes and give a warning if * bytes are stuck in the ST-DMA fifo (there's no way to reach them!) */ if (atari_dma_active && (dma_stat & 0x02)) { unsigned long transferred; transferred = SCSI_DMA_GETADR() - atari_dma_startaddr; /* The ST-DMA address is incremented in 2-byte steps, but the * data are written only in 16-byte chunks. If the number of * transferred bytes is not divisible by 16, the remainder is * lost somewhere in outer space. */ if (transferred & 15) printk(KERN_ERR "SCSI DMA error: %ld bytes lost in " "ST-DMA fifo\n", transferred & 15); atari_dma_residual = hostdata->dma_len - transferred; dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n", atari_dma_residual); } else atari_dma_residual = 0; atari_dma_active = 0; if (atari_dma_orig_addr) { /* If the dribble buffer was used on a read operation, copy the DMA-ed * data to the original destination address. */ memcpy(atari_dma_orig_addr, phys_to_virt(atari_dma_startaddr), hostdata->dma_len - atari_dma_residual); atari_dma_orig_addr = NULL; } NCR5380_intr(irq, dev); return IRQ_HANDLED; } static void atari_scsi_fetch_restbytes(void) { int nr; char *src, *dst; unsigned long phys_dst; /* fetch rest bytes in the DMA register */ phys_dst = SCSI_DMA_READ_P(dma_addr); nr = phys_dst & 3; if (nr) { /* there are 'nr' bytes left for the last long address before the DMA pointer */ phys_dst ^= nr; dprintk(NDEBUG_DMA, "SCSI DMA: there are %d rest bytes for phys addr 0x%08lx", nr, phys_dst); /* The content of the DMA pointer is a physical address! */ dst = phys_to_virt(phys_dst); dprintk(NDEBUG_DMA, " = virt addr %p\n", dst); for (src = (char *)&tt_scsi_dma.dma_restdata; nr != 0; --nr) *dst++ = *src++; } } /* This function releases the lock on the DMA chip if there is no * connected command and the disconnected queue is empty. */ static void falcon_release_lock(void) { if (IS_A_TT()) return; if (stdma_is_locked_by(scsi_falcon_intr)) stdma_release(); } /* This function manages the locking of the ST-DMA. * If the DMA isn't locked already for SCSI, it tries to lock it by * calling stdma_lock(). But if the DMA is locked by the SCSI code and * there are other drivers waiting for the chip, we do not issue the * command immediately but tell the SCSI mid-layer to defer. */ static int falcon_get_lock(struct Scsi_Host *instance) { if (IS_A_TT()) return 1; if (stdma_is_locked_by(scsi_falcon_intr)) return 1; /* stdma_lock() may sleep which means it can't be used here */ return stdma_try_lock(scsi_falcon_intr, instance); } #ifndef MODULE static int __init atari_scsi_setup(char *str) { /* Format of atascsi parameter is: * atascsi=<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags> * Defaults depend on TT or Falcon, determined at run time. * Negative values mean don't change. */ int ints[8]; get_options(str, ARRAY_SIZE(ints), ints); if (ints[0] < 1) { printk("atari_scsi_setup: no arguments!\n"); return 0; } if (ints[0] >= 1) setup_can_queue = ints[1]; if (ints[0] >= 2) setup_cmd_per_lun = ints[2]; if (ints[0] >= 3) setup_sg_tablesize = ints[3]; if (ints[0] >= 4) setup_hostid = ints[4]; /* ints[5] (use_tagged_queuing) is ignored */ /* ints[6] (use_pdma) is ignored */ if (ints[0] >= 7) setup_toshiba_delay = ints[7]; return 1; } __setup("atascsi=", atari_scsi_setup); #endif /* !MODULE */ static unsigned long atari_scsi_dma_setup(struct NCR5380_hostdata *hostdata, void *data, unsigned long count, int dir) { unsigned long addr = virt_to_phys(data); dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, dir = %d\n", hostdata->host->host_no, data, addr, count, dir); if (!IS_A_TT() && !STRAM_ADDR(addr)) { /* If we have a non-DMAable address on a Falcon, use the dribble * buffer; 'orig_addr' != 0 in the read case tells the interrupt * handler to copy data from the dribble buffer to the originally * wanted address. */ if (dir) memcpy(atari_dma_buffer, data, count); else atari_dma_orig_addr = data; addr = atari_dma_phys_buffer; } atari_dma_startaddr = addr; /* Needed for calculating residual later. */ /* Cache cleanup stuff: On writes, push any dirty cache out before sending * it to the peripheral. (Must be done before DMA setup, since at least * the ST-DMA begins to fill internal buffers right after setup. For * reads, invalidate any cache, may be altered after DMA without CPU * knowledge. * * ++roman: For the Medusa, there's no need at all for that cache stuff, * because the hardware does bus snooping (fine!). */ dma_cache_maintenance(addr, count, dir); if (IS_A_TT()) { tt_scsi_dma.dma_ctrl = dir; SCSI_DMA_WRITE_P(dma_addr, addr); SCSI_DMA_WRITE_P(dma_cnt, count); tt_scsi_dma.dma_ctrl = dir | 2; } else { /* ! IS_A_TT */ /* set address */ SCSI_DMA_SETADR(addr); /* toggle direction bit to clear FIFO and set DMA direction */ dir <<= 8; st_dma.dma_mode_status = 0x90 | dir; st_dma.dma_mode_status = 0x90 | (dir ^ 0x100); st_dma.dma_mode_status = 0x90 | dir; udelay(40); /* On writes, round up the transfer length to the next multiple of 512 * (see also comment at atari_dma_xfer_len()). */ st_dma.fdc_acces_seccount = (count + (dir ? 511 : 0)) >> 9; udelay(40); st_dma.dma_mode_status = 0x10 | dir; udelay(40); /* need not restore value of dir, only boolean value is tested */ atari_dma_active = 1; } return count; } static inline int atari_scsi_dma_recv_setup(struct NCR5380_hostdata *hostdata, unsigned char *data, int count) { return atari_scsi_dma_setup(hostdata, data, count, 0); } static inline int atari_scsi_dma_send_setup(struct NCR5380_hostdata *hostdata, unsigned char *data, int count) { return atari_scsi_dma_setup(hostdata, data, count, 1); } static int atari_scsi_dma_residual(struct NCR5380_hostdata *hostdata) { return atari_dma_residual; } #define CMD_SURELY_BLOCK_MODE 0 #define CMD_SURELY_BYTE_MODE 1 #define CMD_MODE_UNKNOWN 2 static int falcon_classify_cmd(struct scsi_cmnd *cmd) { unsigned char opcode = cmd->cmnd[0]; if (opcode == READ_DEFECT_DATA || opcode == READ_LONG || opcode == READ_BUFFER) return CMD_SURELY_BYTE_MODE; else if (opcode == READ_6 || opcode == READ_10 || opcode == 0xa8 /* READ_12 */ || opcode == READ_REVERSE || opcode == RECOVER_BUFFERED_DATA) { /* In case of a sequential-access target (tape), special care is * needed here: The transfer is block-mode only if the 'fixed' bit is * set! */ if (cmd->device->type == TYPE_TAPE && !(cmd->cmnd[1] & 1)) return CMD_SURELY_BYTE_MODE; else return CMD_SURELY_BLOCK_MODE; } else return CMD_MODE_UNKNOWN; } /* This function calculates the number of bytes that can be transferred via * DMA. On the TT, this is arbitrary, but on the Falcon we have to use the * ST-DMA chip. There are only multiples of 512 bytes possible and max. * 255*512 bytes :-( This means also, that defining READ_OVERRUNS is not * possible on the Falcon, since that would require to program the DMA for * n*512 - atari_read_overrun bytes. But it seems that the Falcon doesn't have * the overrun problem, so this question is academic :-) */ static int atari_scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, struct scsi_cmnd *cmd) { int wanted_len = NCR5380_to_ncmd(cmd)->this_residual; int possible_len, limit; if (wanted_len < DMA_MIN_SIZE) return 0; if (IS_A_TT()) /* TT SCSI DMA can transfer arbitrary #bytes */ return wanted_len; /* ST DMA chip is stupid -- only multiples of 512 bytes! (and max. * 255*512 bytes, but this should be enough) * * ++roman: Aaargl! Another Falcon-SCSI problem... There are some commands * that return a number of bytes which cannot be known beforehand. In this * case, the given transfer length is an "allocation length". Now it * can happen that this allocation length is a multiple of 512 bytes and * the DMA is used. But if not n*512 bytes really arrive, some input data * will be lost in the ST-DMA's FIFO :-( Thus, we have to distinguish * between commands that do block transfers and those that do byte * transfers. But this isn't easy... there are lots of vendor specific * commands, and the user can issue any command via the * SCSI_IOCTL_SEND_COMMAND. * * The solution: We classify SCSI commands in 1) surely block-mode cmd.s, * 2) surely byte-mode cmd.s and 3) cmd.s with unknown mode. In case 1) * and 3), the thing to do is obvious: allow any number of blocks via DMA * or none. In case 2), we apply some heuristic: Byte mode is assumed if * the transfer (allocation) length is < 1024, hoping that no cmd. not * explicitly known as byte mode have such big allocation lengths... * BTW, all the discussion above applies only to reads. DMA writes are * unproblematic anyways, since the targets aborts the transfer after * receiving a sufficient number of bytes. * * Another point: If the transfer is from/to an non-ST-RAM address, we * use the dribble buffer and thus can do only STRAM_BUFFER_SIZE bytes. */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { /* Write operation can always use the DMA, but the transfer size must * be rounded up to the next multiple of 512 (atari_dma_setup() does * this). */ possible_len = wanted_len; } else { /* Read operations: if the wanted transfer length is not a multiple of * 512, we cannot use DMA, since the ST-DMA cannot split transfers * (no interrupt on DMA finished!) */ if (wanted_len & 0x1ff) possible_len = 0; else { /* Now classify the command (see above) and decide whether it is * allowed to do DMA at all */ switch (falcon_classify_cmd(cmd)) { case CMD_SURELY_BLOCK_MODE: possible_len = wanted_len; break; case CMD_SURELY_BYTE_MODE: possible_len = 0; /* DMA prohibited */ break; case CMD_MODE_UNKNOWN: default: /* For unknown commands assume block transfers if the transfer * size/allocation length is >= 1024 */ possible_len = (wanted_len < 1024) ? 0 : wanted_len; break; } } } /* Last step: apply the hard limit on DMA transfers */ limit = (atari_dma_buffer && !STRAM_ADDR(virt_to_phys(NCR5380_to_ncmd(cmd)->ptr))) ? STRAM_BUFFER_SIZE : 255*512; if (possible_len > limit) possible_len = limit; if (possible_len != wanted_len) dprintk(NDEBUG_DMA, "DMA transfer now %d bytes instead of %d\n", possible_len, wanted_len); return possible_len; } /* NCR5380 register access functions * * There are separate functions for TT and Falcon, because the access * methods are quite different. The calling macros NCR5380_read and * NCR5380_write call these functions via function pointers. */ static u8 atari_scsi_tt_reg_read(unsigned int reg) { return tt_scsi_regp[reg * 2]; } static void atari_scsi_tt_reg_write(unsigned int reg, u8 value) { tt_scsi_regp[reg * 2] = value; } static u8 atari_scsi_falcon_reg_read(unsigned int reg) { unsigned long flags; u8 result; reg += 0x88; local_irq_save(flags); dma_wd.dma_mode_status = (u_short)reg; result = (u8)dma_wd.fdc_acces_seccount; local_irq_restore(flags); return result; } static void atari_scsi_falcon_reg_write(unsigned int reg, u8 value) { unsigned long flags; reg += 0x88; local_irq_save(flags); dma_wd.dma_mode_status = (u_short)reg; dma_wd.fdc_acces_seccount = (u_short)value; local_irq_restore(flags); } #include "NCR5380.c" static int atari_scsi_host_reset(struct scsi_cmnd *cmd) { int rv; unsigned long flags; local_irq_save(flags); /* Abort a maybe active DMA transfer */ if (IS_A_TT()) { tt_scsi_dma.dma_ctrl = 0; } else { if (stdma_is_locked_by(scsi_falcon_intr)) st_dma.dma_mode_status = 0x90; atari_dma_active = 0; atari_dma_orig_addr = NULL; } rv = NCR5380_host_reset(cmd); /* The 5380 raises its IRQ line while _RST is active but the ST DMA * "lock" has been released so this interrupt may end up handled by * floppy or IDE driver (if one of them holds the lock). The NCR5380 * interrupt flag has been cleared already. */ local_irq_restore(flags); return rv; } #define DRV_MODULE_NAME "atari_scsi" #define PFX DRV_MODULE_NAME ": " static struct scsi_host_template atari_scsi_template = { .module = THIS_MODULE, .proc_name = DRV_MODULE_NAME, .name = "Atari native SCSI", .info = atari_scsi_info, .queuecommand = atari_scsi_queue_command, .eh_abort_handler = atari_scsi_abort, .eh_host_reset_handler = atari_scsi_host_reset, .this_id = 7, .cmd_per_lun = 2, .dma_boundary = PAGE_SIZE - 1, .cmd_size = sizeof(struct NCR5380_cmd), }; static int __init atari_scsi_probe(struct platform_device *pdev) { struct Scsi_Host *instance; int error; struct resource *irq; int host_flags = 0; irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq) return -ENODEV; if (ATARIHW_PRESENT(TT_SCSI)) { atari_scsi_reg_read = atari_scsi_tt_reg_read; atari_scsi_reg_write = atari_scsi_tt_reg_write; } else { atari_scsi_reg_read = atari_scsi_falcon_reg_read; atari_scsi_reg_write = atari_scsi_falcon_reg_write; } if (ATARIHW_PRESENT(TT_SCSI)) { atari_scsi_template.can_queue = 16; atari_scsi_template.sg_tablesize = SG_ALL; } else { atari_scsi_template.can_queue = 1; atari_scsi_template.sg_tablesize = 1; } if (setup_can_queue > 0) atari_scsi_template.can_queue = setup_can_queue; if (setup_cmd_per_lun > 0) atari_scsi_template.cmd_per_lun = setup_cmd_per_lun; /* Don't increase sg_tablesize on Falcon! */ if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize > 0) atari_scsi_template.sg_tablesize = setup_sg_tablesize; if (setup_hostid >= 0) { atari_scsi_template.this_id = setup_hostid & 7; } else if (IS_REACHABLE(CONFIG_NVRAM)) { /* Test if a host id is set in the NVRam */ if (ATARIHW_PRESENT(TT_CLK)) { unsigned char b; loff_t offset = 16; ssize_t count = nvram_read(&b, 1, &offset); /* Arbitration enabled? (for TOS) * If yes, use configured host ID */ if ((count == 1) && (b & 0x80)) atari_scsi_template.this_id = b & 7; } } /* If running on a Falcon and if there's TT-Ram (i.e., more than one * memory block, since there's always ST-Ram in a Falcon), then * allocate a STRAM_BUFFER_SIZE byte dribble buffer for transfers * from/to alternative Ram. */ if (ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(EXTD_DMA) && m68k_realnum_memory > 1) { atari_dma_buffer = atari_stram_alloc(STRAM_BUFFER_SIZE, "SCSI"); if (!atari_dma_buffer) { pr_err(PFX "can't allocate ST-RAM double buffer\n"); return -ENOMEM; } atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer); atari_dma_orig_addr = NULL; } instance = scsi_host_alloc(&atari_scsi_template, sizeof(struct NCR5380_hostdata)); if (!instance) { error = -ENOMEM; goto fail_alloc; } instance->irq = irq->start; host_flags |= IS_A_TT() ? 0 : FLAG_LATE_DMA_SETUP; host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0; error = NCR5380_init(instance, host_flags); if (error) goto fail_init; if (IS_A_TT()) { error = request_irq(instance->irq, scsi_tt_intr, 0, "NCR5380", instance); if (error) { pr_err(PFX "request irq %d failed, aborting\n", instance->irq); goto fail_irq; } tt_mfp.active_edge |= 0x80; /* SCSI int on L->H */ tt_scsi_dma.dma_ctrl = 0; atari_dma_residual = 0; /* While the read overruns (described by Drew Eckhardt in * NCR5380.c) never happened on TTs, they do in fact on the * Medusa (This was the cause why SCSI didn't work right for * so long there.) Since handling the overruns slows down * a bit, I turned the #ifdef's into a runtime condition. * * In principle it should be sufficient to do max. 1 byte with * PIO, but there is another problem on the Medusa with the DMA * rest data register. So read_overruns is currently set * to 4 to avoid having transfers that aren't a multiple of 4. * If the rest data bug is fixed, this can be lowered to 1. */ if (MACH_IS_MEDUSA) { struct NCR5380_hostdata *hostdata = shost_priv(instance); hostdata->read_overruns = 4; } } else { /* Nothing to do for the interrupt: the ST-DMA is initialized * already. */ atari_dma_residual = 0; atari_dma_active = 0; atari_dma_stram_mask = (ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 : 0xff000000); } NCR5380_maybe_reset_bus(instance); error = scsi_add_host(instance, NULL); if (error) goto fail_host; platform_set_drvdata(pdev, instance); scsi_scan_host(instance); return 0; fail_host: if (IS_A_TT()) free_irq(instance->irq, instance); fail_irq: NCR5380_exit(instance); fail_init: scsi_host_put(instance); fail_alloc: if (atari_dma_buffer) atari_stram_free(atari_dma_buffer); return error; } static int __exit atari_scsi_remove(struct platform_device *pdev) { struct Scsi_Host *instance = platform_get_drvdata(pdev); scsi_remove_host(instance); if (IS_A_TT()) free_irq(instance->irq, instance); NCR5380_exit(instance); scsi_host_put(instance); if (atari_dma_buffer) atari_stram_free(atari_dma_buffer); return 0; } static struct platform_driver atari_scsi_driver = { .remove = __exit_p(atari_scsi_remove), .driver = { .name = DRV_MODULE_NAME, }, }; module_platform_driver_probe(atari_scsi_driver, atari_scsi_probe); MODULE_ALIAS("platform:" DRV_MODULE_NAME); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/atari_scsi.c
#define SUN3_SCSI_VME #include "sun3_scsi.c"
linux-master
drivers/scsi/sun3_scsi_vme.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2010 FUJITSU LIMITED * Copyright (C) 2010 Tomohiro Kusumi <[email protected]> */ #include <linux/kernel.h> #include <linux/trace_seq.h> #include <asm/unaligned.h> #include <trace/events/scsi.h> #define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f) #define SERVICE_ACTION32(cdb) (get_unaligned_be16(&cdb[8])) static const char * scsi_trace_misc(struct trace_seq *, unsigned char *, int); static const char * scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); u32 lba, txlen; lba = get_unaligned_be24(&cdb[1]) & 0x1fffff; /* * From SBC-2: a TRANSFER LENGTH field set to zero specifies that 256 * logical blocks shall be read (READ(6)) or written (WRITE(6)). */ txlen = cdb[4] ? cdb[4] : 256; trace_seq_printf(p, "lba=%u txlen=%u", lba, txlen); trace_seq_putc(p, 0); return ret; } static const char * scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); u32 lba, txlen; lba = get_unaligned_be32(&cdb[2]); txlen = get_unaligned_be16(&cdb[7]); trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen, cdb[1] >> 5); if (cdb[0] == WRITE_SAME) trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1); trace_seq_putc(p, 0); return ret; } static const char * scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); u32 lba, txlen; lba = get_unaligned_be32(&cdb[2]); txlen = get_unaligned_be32(&cdb[6]); trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen, cdb[1] >> 5); trace_seq_putc(p, 0); return ret; } static const char * scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); u64 lba; u32 txlen; lba = get_unaligned_be64(&cdb[2]); txlen = get_unaligned_be32(&cdb[10]); trace_seq_printf(p, "lba=%llu txlen=%u protect=%u", lba, txlen, cdb[1] >> 5); if (cdb[0] == WRITE_SAME_16) trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1); trace_seq_putc(p, 0); return ret; } static const char * scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p), *cmd; u64 lba; u32 ei_lbrt, txlen; switch (SERVICE_ACTION32(cdb)) { case READ_32: cmd = "READ"; break; case VERIFY_32: cmd = "VERIFY"; break; case WRITE_32: cmd = "WRITE"; break; case WRITE_SAME_32: cmd = "WRITE_SAME"; break; default: trace_seq_puts(p, "UNKNOWN"); goto out; } lba = get_unaligned_be64(&cdb[12]); ei_lbrt = get_unaligned_be32(&cdb[20]); txlen = get_unaligned_be32(&cdb[28]); trace_seq_printf(p, "%s_32 lba=%llu txlen=%u protect=%u ei_lbrt=%u", cmd, lba, txlen, cdb[10] >> 5, ei_lbrt); if (SERVICE_ACTION32(cdb) == WRITE_SAME_32) trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1); out: trace_seq_putc(p, 0); return ret; } static const char * scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); unsigned int regions = get_unaligned_be16(&cdb[7]); trace_seq_printf(p, "regions=%u", (regions - 8) / 16); trace_seq_putc(p, 0); return ret; } static const char * scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p), *cmd; u64 lba; u32 alloc_len; switch (SERVICE_ACTION16(cdb)) { case SAI_READ_CAPACITY_16: cmd = "READ_CAPACITY_16"; break; case SAI_GET_LBA_STATUS: cmd = "GET_LBA_STATUS"; break; default: trace_seq_puts(p, "UNKNOWN"); goto out; } lba = get_unaligned_be64(&cdb[2]); alloc_len = get_unaligned_be32(&cdb[10]); trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd, lba, alloc_len); out: trace_seq_putc(p, 0); return ret; } static const char * scsi_trace_maintenance_in(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p), *cmd; u32 alloc_len; switch (SERVICE_ACTION16(cdb)) { case MI_REPORT_IDENTIFYING_INFORMATION: cmd = "REPORT_IDENTIFYING_INFORMATION"; break; case MI_REPORT_TARGET_PGS: cmd = "REPORT_TARGET_PORT_GROUPS"; break; case MI_REPORT_ALIASES: cmd = "REPORT_ALIASES"; break; case MI_REPORT_SUPPORTED_OPERATION_CODES: cmd = "REPORT_SUPPORTED_OPERATION_CODES"; break; case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS: cmd = "REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS"; break; case MI_REPORT_PRIORITY: cmd = "REPORT_PRIORITY"; break; case MI_REPORT_TIMESTAMP: cmd = "REPORT_TIMESTAMP"; break; case MI_MANAGEMENT_PROTOCOL_IN: cmd = "MANAGEMENT_PROTOCOL_IN"; break; default: trace_seq_puts(p, "UNKNOWN"); goto out; } alloc_len = get_unaligned_be32(&cdb[6]); trace_seq_printf(p, "%s alloc_len=%u", cmd, alloc_len); out: trace_seq_putc(p, 0); return ret; } static const char * scsi_trace_maintenance_out(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p), *cmd; u32 alloc_len; switch (SERVICE_ACTION16(cdb)) { case MO_SET_IDENTIFYING_INFORMATION: cmd = "SET_IDENTIFYING_INFORMATION"; break; case MO_SET_TARGET_PGS: cmd = "SET_TARGET_PORT_GROUPS"; break; case MO_CHANGE_ALIASES: cmd = "CHANGE_ALIASES"; break; case MO_SET_PRIORITY: cmd = "SET_PRIORITY"; break; case MO_SET_TIMESTAMP: cmd = "SET_TIMESTAMP"; break; case MO_MANAGEMENT_PROTOCOL_OUT: cmd = "MANAGEMENT_PROTOCOL_OUT"; break; default: trace_seq_puts(p, "UNKNOWN"); goto out; } alloc_len = get_unaligned_be32(&cdb[6]); trace_seq_printf(p, "%s alloc_len=%u", cmd, alloc_len); out: trace_seq_putc(p, 0); return ret; } static const char * scsi_trace_zbc_in(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p), *cmd; u64 zone_id; u32 alloc_len; u8 options; switch (SERVICE_ACTION16(cdb)) { case ZI_REPORT_ZONES: cmd = "REPORT_ZONES"; break; default: trace_seq_puts(p, "UNKNOWN"); goto out; } zone_id = get_unaligned_be64(&cdb[2]); alloc_len = get_unaligned_be32(&cdb[10]); options = cdb[14] & 0x3f; trace_seq_printf(p, "%s zone=%llu alloc_len=%u options=%u partial=%u", cmd, (unsigned long long)zone_id, alloc_len, options, (cdb[14] >> 7) & 1); out: trace_seq_putc(p, 0); return ret; } static const char * scsi_trace_zbc_out(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p), *cmd; u64 zone_id; switch (SERVICE_ACTION16(cdb)) { case ZO_CLOSE_ZONE: cmd = "CLOSE_ZONE"; break; case ZO_FINISH_ZONE: cmd = "FINISH_ZONE"; break; case ZO_OPEN_ZONE: cmd = "OPEN_ZONE"; break; case ZO_RESET_WRITE_POINTER: cmd = "RESET_WRITE_POINTER"; break; default: trace_seq_puts(p, "UNKNOWN"); goto out; } zone_id = get_unaligned_be64(&cdb[2]); trace_seq_printf(p, "%s zone=%llu all=%u", cmd, (unsigned long long)zone_id, cdb[14] & 1); out: trace_seq_putc(p, 0); return ret; } static const char * scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len) { switch (SERVICE_ACTION32(cdb)) { case READ_32: case VERIFY_32: case WRITE_32: case WRITE_SAME_32: return scsi_trace_rw32(p, cdb, len); default: return scsi_trace_misc(p, cdb, len); } } static const char * scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); trace_seq_putc(p, '-'); trace_seq_putc(p, 0); return ret; } const char * scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len) { switch (cdb[0]) { case READ_6: case WRITE_6: return scsi_trace_rw6(p, cdb, len); case READ_10: case VERIFY: case WRITE_10: case WRITE_SAME: return scsi_trace_rw10(p, cdb, len); case READ_12: case VERIFY_12: case WRITE_12: return scsi_trace_rw12(p, cdb, len); case READ_16: case VERIFY_16: case WRITE_16: case WRITE_SAME_16: return scsi_trace_rw16(p, cdb, len); case UNMAP: return scsi_trace_unmap(p, cdb, len); case SERVICE_ACTION_IN_16: return scsi_trace_service_action_in(p, cdb, len); case VARIABLE_LENGTH_CMD: return scsi_trace_varlen(p, cdb, len); case MAINTENANCE_IN: return scsi_trace_maintenance_in(p, cdb, len); case MAINTENANCE_OUT: return scsi_trace_maintenance_out(p, cdb, len); case ZBC_IN: return scsi_trace_zbc_in(p, cdb, len); case ZBC_OUT: return scsi_trace_zbc_out(p, cdb, len); default: return scsi_trace_misc(p, cdb, len); } }
linux-master
drivers/scsi/scsi_trace.c
// SPDX-License-Identifier: GPL-2.0-only /* * Generic Macintosh NCR5380 driver * * Copyright 1998, Michael Schmitz <[email protected]> * * Copyright 2019 Finn Thain * * derived in part from: */ /* * Generic Generic NCR5380 driver * * Copyright 1995, Russell King */ #include <linux/delay.h> #include <linux/types.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <asm/hwtest.h> #include <asm/io.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/setup.h> #include <scsi/scsi_host.h> /* Definitions for the core NCR5380 driver. */ #define NCR5380_implementation_fields int pdma_residual #define NCR5380_read(reg) in_8(hostdata->io + ((reg) << 4)) #define NCR5380_write(reg, value) out_8(hostdata->io + ((reg) << 4), value) #define NCR5380_dma_xfer_len macscsi_dma_xfer_len #define NCR5380_dma_recv_setup macscsi_pread #define NCR5380_dma_send_setup macscsi_pwrite #define NCR5380_dma_residual macscsi_dma_residual #define NCR5380_intr macscsi_intr #define NCR5380_queue_command macscsi_queue_command #define NCR5380_abort macscsi_abort #define NCR5380_host_reset macscsi_host_reset #define NCR5380_info macscsi_info #include "NCR5380.h" static int setup_can_queue = -1; module_param(setup_can_queue, int, 0); static int setup_cmd_per_lun = -1; module_param(setup_cmd_per_lun, int, 0); static int setup_sg_tablesize = -1; module_param(setup_sg_tablesize, int, 0); static int setup_use_pdma = 512; module_param(setup_use_pdma, int, 0); static int setup_hostid = -1; module_param(setup_hostid, int, 0); static int setup_toshiba_delay = -1; module_param(setup_toshiba_delay, int, 0); #ifndef MODULE static int __init mac_scsi_setup(char *str) { int ints[8]; (void)get_options(str, ARRAY_SIZE(ints), ints); if (ints[0] < 1) { pr_err("Usage: mac5380=<can_queue>[,<cmd_per_lun>[,<sg_tablesize>[,<hostid>[,<use_tags>[,<use_pdma>[,<toshiba_delay>]]]]]]\n"); return 0; } if (ints[0] >= 1) setup_can_queue = ints[1]; if (ints[0] >= 2) setup_cmd_per_lun = ints[2]; if (ints[0] >= 3) setup_sg_tablesize = ints[3]; if (ints[0] >= 4) setup_hostid = ints[4]; /* ints[5] (use_tagged_queuing) is ignored */ if (ints[0] >= 6) setup_use_pdma = ints[6]; if (ints[0] >= 7) setup_toshiba_delay = ints[7]; return 1; } __setup("mac5380=", mac_scsi_setup); #endif /* !MODULE */ /* * According to "Inside Macintosh: Devices", Mac OS requires disk drivers to * specify the number of bytes between the delays expected from a SCSI target. * This allows the operating system to "prevent bus errors when a target fails * to deliver the next byte within the processor bus error timeout period." * Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets * so bus errors are unavoidable. * * If a MOVE.B instruction faults, we assume that zero bytes were transferred * and simply retry. That assumption probably depends on target behaviour but * seems to hold up okay. The NOP provides synchronization: without it the * fault can sometimes occur after the program counter has moved past the * offending instruction. Post-increment addressing can't be used. */ #define MOVE_BYTE(operands) \ asm volatile ( \ "1: moveb " operands " \n" \ "11: nop \n" \ " addq #1,%0 \n" \ " subq #1,%1 \n" \ "40: \n" \ " \n" \ ".section .fixup,\"ax\" \n" \ ".even \n" \ "90: movel #1, %2 \n" \ " jra 40b \n" \ ".previous \n" \ " \n" \ ".section __ex_table,\"a\" \n" \ ".align 4 \n" \ ".long 1b,90b \n" \ ".long 11b,90b \n" \ ".previous \n" \ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io)) /* * If a MOVE.W (or MOVE.L) instruction faults, it cannot be retried because * the residual byte count would be uncertain. In that situation the MOVE_WORD * macro clears n in the fixup section to abort the transfer. */ #define MOVE_WORD(operands) \ asm volatile ( \ "1: movew " operands " \n" \ "11: nop \n" \ " subq #2,%1 \n" \ "40: \n" \ " \n" \ ".section .fixup,\"ax\" \n" \ ".even \n" \ "90: movel #0, %1 \n" \ " movel #2, %2 \n" \ " jra 40b \n" \ ".previous \n" \ " \n" \ ".section __ex_table,\"a\" \n" \ ".align 4 \n" \ ".long 1b,90b \n" \ ".long 11b,90b \n" \ ".previous \n" \ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io)) #define MOVE_16_WORDS(operands) \ asm volatile ( \ "1: movew " operands " \n" \ "2: movew " operands " \n" \ "3: movew " operands " \n" \ "4: movew " operands " \n" \ "5: movew " operands " \n" \ "6: movew " operands " \n" \ "7: movew " operands " \n" \ "8: movew " operands " \n" \ "9: movew " operands " \n" \ "10: movew " operands " \n" \ "11: movew " operands " \n" \ "12: movew " operands " \n" \ "13: movew " operands " \n" \ "14: movew " operands " \n" \ "15: movew " operands " \n" \ "16: movew " operands " \n" \ "17: nop \n" \ " subl #32,%1 \n" \ "40: \n" \ " \n" \ ".section .fixup,\"ax\" \n" \ ".even \n" \ "90: movel #0, %1 \n" \ " movel #2, %2 \n" \ " jra 40b \n" \ ".previous \n" \ " \n" \ ".section __ex_table,\"a\" \n" \ ".align 4 \n" \ ".long 1b,90b \n" \ ".long 2b,90b \n" \ ".long 3b,90b \n" \ ".long 4b,90b \n" \ ".long 5b,90b \n" \ ".long 6b,90b \n" \ ".long 7b,90b \n" \ ".long 8b,90b \n" \ ".long 9b,90b \n" \ ".long 10b,90b \n" \ ".long 11b,90b \n" \ ".long 12b,90b \n" \ ".long 13b,90b \n" \ ".long 14b,90b \n" \ ".long 15b,90b \n" \ ".long 16b,90b \n" \ ".long 17b,90b \n" \ ".previous \n" \ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io)) #define MAC_PDMA_DELAY 32 static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n) { unsigned char *addr = start; int result = 0; if (n >= 1) { MOVE_BYTE("%3@,%0@"); if (result) goto out; } if (n >= 1 && ((unsigned long)addr & 1)) { MOVE_BYTE("%3@,%0@"); if (result) goto out; } while (n >= 32) MOVE_16_WORDS("%3@,%0@+"); while (n >= 2) MOVE_WORD("%3@,%0@+"); if (result) return start - addr; /* Negated to indicate uncertain length */ if (n == 1) MOVE_BYTE("%3@,%0@"); out: return addr - start; } static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n) { unsigned char *addr = start; int result = 0; if (n >= 1) { MOVE_BYTE("%0@,%3@"); if (result) goto out; } if (n >= 1 && ((unsigned long)addr & 1)) { MOVE_BYTE("%0@,%3@"); if (result) goto out; } while (n >= 32) MOVE_16_WORDS("%0@+,%3@"); while (n >= 2) MOVE_WORD("%0@+,%3@"); if (result) return start - addr; /* Negated to indicate uncertain length */ if (n == 1) MOVE_BYTE("%0@,%3@"); out: return addr - start; } /* The "SCSI DMA" chip on the IIfx implements this register. */ #define CTRL_REG 0x8 #define CTRL_INTERRUPTS_ENABLE BIT(1) #define CTRL_HANDSHAKE_MODE BIT(3) static inline void write_ctrl_reg(struct NCR5380_hostdata *hostdata, u32 value) { out_be32(hostdata->io + (CTRL_REG << 4), value); } static inline int macscsi_pread(struct NCR5380_hostdata *hostdata, unsigned char *dst, int len) { u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4); unsigned char *d = dst; int result = 0; hostdata->pdma_residual = len; while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_DRQ | BASR_PHASE_MATCH, BASR_DRQ | BASR_PHASE_MATCH, 0)) { int bytes; if (macintosh_config->ident == MAC_MODEL_IIFX) write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE | CTRL_INTERRUPTS_ENABLE); bytes = mac_pdma_recv(s, d, min(hostdata->pdma_residual, 512)); if (bytes > 0) { d += bytes; hostdata->pdma_residual -= bytes; } if (hostdata->pdma_residual == 0) goto out; if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, 0) < 0) scmd_printk(KERN_DEBUG, hostdata->connected, "%s: !REQ and !ACK\n", __func__); if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) goto out; if (bytes == 0) udelay(MAC_PDMA_DELAY); if (bytes >= 0) continue; dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, "%s: bus error (%d/%d)\n", __func__, d - dst, len); NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); result = -1; goto out; } scmd_printk(KERN_ERR, hostdata->connected, "%s: phase mismatch or !DRQ\n", __func__); NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); result = -1; out: if (macintosh_config->ident == MAC_MODEL_IIFX) write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); return result; } static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata, unsigned char *src, int len) { unsigned char *s = src; u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4); int result = 0; hostdata->pdma_residual = len; while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_DRQ | BASR_PHASE_MATCH, BASR_DRQ | BASR_PHASE_MATCH, 0)) { int bytes; if (macintosh_config->ident == MAC_MODEL_IIFX) write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE | CTRL_INTERRUPTS_ENABLE); bytes = mac_pdma_send(s, d, min(hostdata->pdma_residual, 512)); if (bytes > 0) { s += bytes; hostdata->pdma_residual -= bytes; } if (hostdata->pdma_residual == 0) { if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG, TCR_LAST_BYTE_SENT, TCR_LAST_BYTE_SENT, 0) < 0) { scmd_printk(KERN_ERR, hostdata->connected, "%s: Last Byte Sent timeout\n", __func__); result = -1; } goto out; } if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, 0) < 0) scmd_printk(KERN_DEBUG, hostdata->connected, "%s: !REQ and !ACK\n", __func__); if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) goto out; if (bytes == 0) udelay(MAC_PDMA_DELAY); if (bytes >= 0) continue; dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, "%s: bus error (%d/%d)\n", __func__, s - src, len); NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); result = -1; goto out; } scmd_printk(KERN_ERR, hostdata->connected, "%s: phase mismatch or !DRQ\n", __func__); NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); result = -1; out: if (macintosh_config->ident == MAC_MODEL_IIFX) write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); return result; } static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, struct scsi_cmnd *cmd) { int resid = NCR5380_to_ncmd(cmd)->this_residual; if (hostdata->flags & FLAG_NO_PSEUDO_DMA || resid < setup_use_pdma) return 0; return resid; } static int macscsi_dma_residual(struct NCR5380_hostdata *hostdata) { return hostdata->pdma_residual; } #include "NCR5380.c" #define DRV_MODULE_NAME "mac_scsi" #define PFX DRV_MODULE_NAME ": " static struct scsi_host_template mac_scsi_template = { .module = THIS_MODULE, .proc_name = DRV_MODULE_NAME, .name = "Macintosh NCR5380 SCSI", .info = macscsi_info, .queuecommand = macscsi_queue_command, .eh_abort_handler = macscsi_abort, .eh_host_reset_handler = macscsi_host_reset, .can_queue = 16, .this_id = 7, .sg_tablesize = 1, .cmd_per_lun = 2, .dma_boundary = PAGE_SIZE - 1, .cmd_size = sizeof(struct NCR5380_cmd), .max_sectors = 128, }; static int __init mac_scsi_probe(struct platform_device *pdev) { struct Scsi_Host *instance; struct NCR5380_hostdata *hostdata; int error; int host_flags = 0; struct resource *irq, *pio_mem, *pdma_mem = NULL; pio_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!pio_mem) return -ENODEV; pdma_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!hwreg_present((unsigned char *)pio_mem->start + (STATUS_REG << 4))) { pr_info(PFX "no device detected at %pap\n", &pio_mem->start); return -ENODEV; } if (setup_can_queue > 0) mac_scsi_template.can_queue = setup_can_queue; if (setup_cmd_per_lun > 0) mac_scsi_template.cmd_per_lun = setup_cmd_per_lun; if (setup_sg_tablesize > 0) mac_scsi_template.sg_tablesize = setup_sg_tablesize; if (setup_hostid >= 0) mac_scsi_template.this_id = setup_hostid & 7; instance = scsi_host_alloc(&mac_scsi_template, sizeof(struct NCR5380_hostdata)); if (!instance) return -ENOMEM; if (irq) instance->irq = irq->start; else instance->irq = NO_IRQ; hostdata = shost_priv(instance); hostdata->base = pio_mem->start; hostdata->io = (u8 __iomem *)pio_mem->start; if (pdma_mem && setup_use_pdma) hostdata->pdma_io = (u8 __iomem *)pdma_mem->start; else host_flags |= FLAG_NO_PSEUDO_DMA; host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0; error = NCR5380_init(instance, host_flags | FLAG_LATE_DMA_SETUP); if (error) goto fail_init; if (instance->irq != NO_IRQ) { error = request_irq(instance->irq, macscsi_intr, IRQF_SHARED, "NCR5380", instance); if (error) goto fail_irq; } NCR5380_maybe_reset_bus(instance); error = scsi_add_host(instance, NULL); if (error) goto fail_host; platform_set_drvdata(pdev, instance); scsi_scan_host(instance); return 0; fail_host: if (instance->irq != NO_IRQ) free_irq(instance->irq, instance); fail_irq: NCR5380_exit(instance); fail_init: scsi_host_put(instance); return error; } static int __exit mac_scsi_remove(struct platform_device *pdev) { struct Scsi_Host *instance = platform_get_drvdata(pdev); scsi_remove_host(instance); if (instance->irq != NO_IRQ) free_irq(instance->irq, instance); NCR5380_exit(instance); scsi_host_put(instance); return 0; } static struct platform_driver mac_scsi_driver = { .remove = __exit_p(mac_scsi_remove), .driver = { .name = DRV_MODULE_NAME, }, }; module_platform_driver_probe(mac_scsi_driver, mac_scsi_probe); MODULE_ALIAS("platform:" DRV_MODULE_NAME); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/mac_scsi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * iSCSI lib functions * * Copyright (C) 2006 Red Hat, Inc. All rights reserved. * Copyright (C) 2004 - 2006 Mike Christie * Copyright (C) 2004 - 2005 Dmitry Yusupov * Copyright (C) 2004 - 2005 Alex Aizman * maintained by [email protected] */ #include <linux/types.h> #include <linux/kfifo.h> #include <linux/delay.h> #include <linux/log2.h> #include <linux/slab.h> #include <linux/sched/signal.h> #include <linux/module.h> #include <asm/unaligned.h> #include <net/tcp.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_host.h> #include <scsi/scsi.h> #include <scsi/iscsi_proto.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_iscsi.h> #include <scsi/libiscsi.h> #include <trace/events/iscsi.h> static int iscsi_dbg_lib_conn; module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug_libiscsi_conn, "Turn on debugging for connections in libiscsi module. " "Set to 1 to turn on, and zero to turn off. Default is off."); static int iscsi_dbg_lib_session; module_param_named(debug_libiscsi_session, iscsi_dbg_lib_session, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug_libiscsi_session, "Turn on debugging for sessions in libiscsi module. " "Set to 1 to turn on, and zero to turn off. Default is off."); static int iscsi_dbg_lib_eh; module_param_named(debug_libiscsi_eh, iscsi_dbg_lib_eh, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug_libiscsi_eh, "Turn on debugging for error handling in libiscsi module. " "Set to 1 to turn on, and zero to turn off. Default is off."); #define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \ do { \ if (iscsi_dbg_lib_conn) \ iscsi_conn_printk(KERN_INFO, _conn, \ "%s " dbg_fmt, \ __func__, ##arg); \ iscsi_dbg_trace(trace_iscsi_dbg_conn, \ &(_conn)->cls_conn->dev, \ "%s " dbg_fmt, __func__, ##arg);\ } while (0); #define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \ do { \ if (iscsi_dbg_lib_session) \ iscsi_session_printk(KERN_INFO, _session, \ "%s " dbg_fmt, \ __func__, ##arg); \ iscsi_dbg_trace(trace_iscsi_dbg_session, \ &(_session)->cls_session->dev, \ "%s " dbg_fmt, __func__, ##arg); \ } while (0); #define ISCSI_DBG_EH(_session, dbg_fmt, arg...) \ do { \ if (iscsi_dbg_lib_eh) \ iscsi_session_printk(KERN_INFO, _session, \ "%s " dbg_fmt, \ __func__, ##arg); \ iscsi_dbg_trace(trace_iscsi_dbg_eh, \ &(_session)->cls_session->dev, \ "%s " dbg_fmt, __func__, ##arg); \ } while (0); #define ISCSI_CMD_COMPL_WAIT 5 inline void iscsi_conn_queue_xmit(struct iscsi_conn *conn) { struct Scsi_Host *shost = conn->session->host; struct iscsi_host *ihost = shost_priv(shost); if (ihost->workq) queue_work(ihost->workq, &conn->xmitwork); } EXPORT_SYMBOL_GPL(iscsi_conn_queue_xmit); inline void iscsi_conn_queue_recv(struct iscsi_conn *conn) { struct Scsi_Host *shost = conn->session->host; struct iscsi_host *ihost = shost_priv(shost); if (ihost->workq && !test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags)) queue_work(ihost->workq, &conn->recvwork); } EXPORT_SYMBOL_GPL(iscsi_conn_queue_recv); static void __iscsi_update_cmdsn(struct iscsi_session *session, uint32_t exp_cmdsn, uint32_t max_cmdsn) { /* * standard specifies this check for when to update expected and * max sequence numbers */ if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1)) return; if (exp_cmdsn != session->exp_cmdsn && !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn)) session->exp_cmdsn = exp_cmdsn; if (max_cmdsn != session->max_cmdsn && !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) session->max_cmdsn = max_cmdsn; } void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) { __iscsi_update_cmdsn(session, be32_to_cpu(hdr->exp_cmdsn), be32_to_cpu(hdr->max_cmdsn)); } EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); /** * iscsi_prep_data_out_pdu - initialize Data-Out * @task: scsi command task * @r2t: R2T info * @hdr: iscsi data in pdu * * Notes: * Initialize Data-Out within this R2T sequence and finds * proper data_offset within this SCSI command. * * This function is called with connection lock taken. **/ void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t, struct iscsi_data *hdr) { struct iscsi_conn *conn = task->conn; unsigned int left = r2t->data_length - r2t->sent; task->hdr_len = sizeof(struct iscsi_data); memset(hdr, 0, sizeof(struct iscsi_data)); hdr->ttt = r2t->ttt; hdr->datasn = cpu_to_be32(r2t->datasn); r2t->datasn++; hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; hdr->lun = task->lun; hdr->itt = task->hdr_itt; hdr->exp_statsn = r2t->exp_statsn; hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent); if (left > conn->max_xmit_dlength) { hton24(hdr->dlength, conn->max_xmit_dlength); r2t->data_count = conn->max_xmit_dlength; hdr->flags = 0; } else { hton24(hdr->dlength, left); r2t->data_count = left; hdr->flags = ISCSI_FLAG_CMD_FINAL; } conn->dataout_pdus_cnt++; } EXPORT_SYMBOL_GPL(iscsi_prep_data_out_pdu); static int iscsi_add_hdr(struct iscsi_task *task, unsigned len) { unsigned exp_len = task->hdr_len + len; if (exp_len > task->hdr_max) { WARN_ON(1); return -EINVAL; } WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */ task->hdr_len = exp_len; return 0; } /* * make an extended cdb AHS */ static int iscsi_prep_ecdb_ahs(struct iscsi_task *task) { struct scsi_cmnd *cmd = task->sc; unsigned rlen, pad_len; unsigned short ahslength; struct iscsi_ecdb_ahdr *ecdb_ahdr; int rc; ecdb_ahdr = iscsi_next_hdr(task); rlen = cmd->cmd_len - ISCSI_CDB_SIZE; BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb)); ahslength = rlen + sizeof(ecdb_ahdr->reserved); pad_len = iscsi_padding(rlen); rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) + sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len); if (rc) return rc; if (pad_len) memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len); ecdb_ahdr->ahslength = cpu_to_be16(ahslength); ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB; ecdb_ahdr->reserved = 0; memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen); ISCSI_DBG_SESSION(task->conn->session, "iscsi_prep_ecdb_ahs: varlen_cdb_len %d " "rlen %d pad_len %d ahs_length %d iscsi_headers_size " "%u\n", cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len); return 0; } /** * iscsi_check_tmf_restrictions - check if a task is affected by TMF * @task: iscsi task * @opcode: opcode to check for * * During TMF a task has to be checked if it's affected. * All unrelated I/O can be passed through, but I/O to the * affected LUN should be restricted. * If 'fast_abort' is set we won't be sending any I/O to the * affected LUN. * Otherwise the target is waiting for all TTTs to be completed, * so we have to send all outstanding Data-Out PDUs to the target. */ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) { struct iscsi_session *session = task->conn->session; struct iscsi_tm *tmf = &session->tmhdr; u64 hdr_lun; if (session->tmf_state == TMF_INITIAL) return 0; if ((tmf->opcode & ISCSI_OPCODE_MASK) != ISCSI_OP_SCSI_TMFUNC) return 0; switch (ISCSI_TM_FUNC_VALUE(tmf)) { case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: /* * Allow PDUs for unrelated LUNs */ hdr_lun = scsilun_to_int(&tmf->lun); if (hdr_lun != task->sc->device->lun) return 0; fallthrough; case ISCSI_TM_FUNC_TARGET_WARM_RESET: /* * Fail all SCSI cmd PDUs */ if (opcode != ISCSI_OP_SCSI_DATA_OUT) { iscsi_session_printk(KERN_INFO, session, "task [op %x itt 0x%x/0x%x] rejected.\n", opcode, task->itt, task->hdr_itt); return -EACCES; } /* * And also all data-out PDUs in response to R2T * if fast_abort is set. */ if (session->fast_abort) { iscsi_session_printk(KERN_INFO, session, "task [op %x itt 0x%x/0x%x] fast abort.\n", opcode, task->itt, task->hdr_itt); return -EACCES; } break; case ISCSI_TM_FUNC_ABORT_TASK: /* * the caller has already checked if the task * they want to abort was in the pending queue so if * we are here the cmd pdu has gone out already, and * we will only hit this for data-outs */ if (opcode == ISCSI_OP_SCSI_DATA_OUT && task->hdr_itt == tmf->rtt) { ISCSI_DBG_SESSION(session, "Preventing task %x/%x from sending " "data-out due to abort task in " "progress\n", task->itt, task->hdr_itt); return -EACCES; } break; } return 0; } /** * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu * @task: iscsi task * * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set * fields like dlength or final based on how much data it sends */ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct iscsi_session *session = conn->session; struct scsi_cmnd *sc = task->sc; struct iscsi_scsi_req *hdr; unsigned hdrlength, cmd_len, transfer_length; itt_t itt; int rc; rc = iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_CMD); if (rc) return rc; if (conn->session->tt->alloc_pdu) { rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); if (rc) return rc; } hdr = (struct iscsi_scsi_req *)task->hdr; itt = hdr->itt; memset(hdr, 0, sizeof(*hdr)); if (session->tt->parse_pdu_itt) hdr->itt = task->hdr_itt = itt; else hdr->itt = task->hdr_itt = build_itt(task->itt, task->conn->session->age); task->hdr_len = 0; rc = iscsi_add_hdr(task, sizeof(*hdr)); if (rc) return rc; hdr->opcode = ISCSI_OP_SCSI_CMD; hdr->flags = ISCSI_ATTR_SIMPLE; int_to_scsilun(sc->device->lun, &hdr->lun); task->lun = hdr->lun; hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); cmd_len = sc->cmd_len; if (cmd_len < ISCSI_CDB_SIZE) memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len); else if (cmd_len > ISCSI_CDB_SIZE) { rc = iscsi_prep_ecdb_ahs(task); if (rc) return rc; cmd_len = ISCSI_CDB_SIZE; } memcpy(hdr->cdb, sc->cmnd, cmd_len); task->imm_count = 0; if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) task->protected = true; transfer_length = scsi_transfer_length(sc); hdr->data_length = cpu_to_be32(transfer_length); if (sc->sc_data_direction == DMA_TO_DEVICE) { struct iscsi_r2t_info *r2t = &task->unsol_r2t; hdr->flags |= ISCSI_FLAG_CMD_WRITE; /* * Write counters: * * imm_count bytes to be sent right after * SCSI PDU Header * * unsol_count bytes(as Data-Out) to be sent * without R2T ack right after * immediate data * * r2t data_length bytes to be sent via R2T ack's * * pad_count bytes to be sent as zero-padding */ memset(r2t, 0, sizeof(*r2t)); if (session->imm_data_en) { if (transfer_length >= session->first_burst) task->imm_count = min(session->first_burst, conn->max_xmit_dlength); else task->imm_count = min(transfer_length, conn->max_xmit_dlength); hton24(hdr->dlength, task->imm_count); } else zero_data(hdr->dlength); if (!session->initial_r2t_en) { r2t->data_length = min(session->first_burst, transfer_length) - task->imm_count; r2t->data_offset = task->imm_count; r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); r2t->exp_statsn = cpu_to_be32(conn->exp_statsn); } if (!task->unsol_r2t.data_length) /* No unsolicit Data-Out's */ hdr->flags |= ISCSI_FLAG_CMD_FINAL; } else { hdr->flags |= ISCSI_FLAG_CMD_FINAL; zero_data(hdr->dlength); if (sc->sc_data_direction == DMA_FROM_DEVICE) hdr->flags |= ISCSI_FLAG_CMD_READ; } /* calculate size of additional header segments (AHSs) */ hdrlength = task->hdr_len - sizeof(*hdr); WARN_ON(hdrlength & (ISCSI_PAD_LEN-1)); hdrlength /= ISCSI_PAD_LEN; WARN_ON(hdrlength >= 256); hdr->hlength = hdrlength & 0xFF; hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn); if (session->tt->init_task && session->tt->init_task(task)) return -EIO; task->state = ISCSI_TASK_RUNNING; session->cmdsn++; conn->scsicmd_pdus_cnt++; ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x " "itt 0x%x len %d cmdsn %d win %d]\n", sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", conn->id, sc, sc->cmnd[0], task->itt, transfer_length, session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); return 0; } /** * iscsi_free_task - free a task * @task: iscsi cmd task * * Must be called with session back_lock. * This function returns the scsi command to scsi-ml or cleans * up mgmt tasks then returns the task to the pool. */ static void iscsi_free_task(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct iscsi_session *session = conn->session; struct scsi_cmnd *sc = task->sc; int oldstate = task->state; ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n", task->itt, task->state, task->sc); session->tt->cleanup_task(task); task->state = ISCSI_TASK_FREE; task->sc = NULL; /* * login task is preallocated so do not free */ if (conn->login_task == task) return; kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*)); if (sc) { /* SCSI eh reuses commands to verify us */ iscsi_cmd(sc)->task = NULL; /* * queue command may call this to free the task, so * it will decide how to return sc to scsi-ml. */ if (oldstate != ISCSI_TASK_REQUEUE_SCSIQ) scsi_done(sc); } } bool iscsi_get_task(struct iscsi_task *task) { return refcount_inc_not_zero(&task->refcount); } EXPORT_SYMBOL_GPL(iscsi_get_task); /** * __iscsi_put_task - drop the refcount on a task * @task: iscsi_task to drop the refcount on * * The back_lock must be held when calling in case it frees the task. */ void __iscsi_put_task(struct iscsi_task *task) { if (refcount_dec_and_test(&task->refcount)) iscsi_free_task(task); } EXPORT_SYMBOL_GPL(__iscsi_put_task); void iscsi_put_task(struct iscsi_task *task) { struct iscsi_session *session = task->conn->session; if (refcount_dec_and_test(&task->refcount)) { spin_lock_bh(&session->back_lock); iscsi_free_task(task); spin_unlock_bh(&session->back_lock); } } EXPORT_SYMBOL_GPL(iscsi_put_task); /** * iscsi_complete_task - finish a task * @task: iscsi cmd task * @state: state to complete task with * * Must be called with session back_lock. */ static void iscsi_complete_task(struct iscsi_task *task, int state) { struct iscsi_conn *conn = task->conn; ISCSI_DBG_SESSION(conn->session, "complete task itt 0x%x state %d sc %p\n", task->itt, task->state, task->sc); if (task->state == ISCSI_TASK_COMPLETED || task->state == ISCSI_TASK_ABRT_TMF || task->state == ISCSI_TASK_ABRT_SESS_RECOV || task->state == ISCSI_TASK_REQUEUE_SCSIQ) return; WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); task->state = state; if (READ_ONCE(conn->ping_task) == task) WRITE_ONCE(conn->ping_task, NULL); /* release get from queueing */ __iscsi_put_task(task); } /** * iscsi_complete_scsi_task - finish scsi task normally * @task: iscsi task for scsi cmd * @exp_cmdsn: expected cmd sn in cpu format * @max_cmdsn: max cmd sn in cpu format * * This is used when drivers do not need or cannot perform * lower level pdu processing. * * Called with session back_lock */ void iscsi_complete_scsi_task(struct iscsi_task *task, uint32_t exp_cmdsn, uint32_t max_cmdsn) { struct iscsi_conn *conn = task->conn; ISCSI_DBG_SESSION(conn->session, "[itt 0x%x]\n", task->itt); conn->last_recv = jiffies; __iscsi_update_cmdsn(conn->session, exp_cmdsn, max_cmdsn); iscsi_complete_task(task, ISCSI_TASK_COMPLETED); } EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task); /* * Must be called with back and frwd lock */ static bool cleanup_queued_task(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; bool early_complete = false; /* * We might have raced where we handled a R2T early and got a response * but have not yet taken the task off the requeue list, then a TMF or * recovery happened and so we can still see it here. */ if (task->state == ISCSI_TASK_COMPLETED) early_complete = true; if (!list_empty(&task->running)) { list_del_init(&task->running); /* * If it's on a list but still running this could be cleanup * from a TMF or session recovery. */ if (task->state == ISCSI_TASK_RUNNING || task->state == ISCSI_TASK_COMPLETED) __iscsi_put_task(task); } if (conn->session->running_aborted_task == task) { conn->session->running_aborted_task = NULL; __iscsi_put_task(task); } if (conn->task == task) { conn->task = NULL; __iscsi_put_task(task); } return early_complete; } /* * session back and frwd lock must be held and if not called for a task that * is still pending or from the xmit thread, then xmit thread must be suspended */ static void __fail_scsi_task(struct iscsi_task *task, int err) { struct iscsi_conn *conn = task->conn; struct scsi_cmnd *sc; int state; if (cleanup_queued_task(task)) return; if (task->state == ISCSI_TASK_PENDING) { /* * cmd never made it to the xmit thread, so we should not count * the cmd in the sequencing */ conn->session->queued_cmdsn--; /* it was never sent so just complete like normal */ state = ISCSI_TASK_COMPLETED; } else if (err == DID_TRANSPORT_DISRUPTED) state = ISCSI_TASK_ABRT_SESS_RECOV; else state = ISCSI_TASK_ABRT_TMF; sc = task->sc; sc->result = err << 16; scsi_set_resid(sc, scsi_bufflen(sc)); iscsi_complete_task(task, state); } static void fail_scsi_task(struct iscsi_task *task, int err) { struct iscsi_session *session = task->conn->session; spin_lock_bh(&session->back_lock); __fail_scsi_task(task, err); spin_unlock_bh(&session->back_lock); } static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, struct iscsi_task *task) { struct iscsi_session *session = conn->session; struct iscsi_hdr *hdr = task->hdr; struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr; uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK; if (conn->session->state == ISCSI_STATE_LOGGING_OUT) return -ENOTCONN; if (opcode != ISCSI_OP_LOGIN && opcode != ISCSI_OP_TEXT) nop->exp_statsn = cpu_to_be32(conn->exp_statsn); /* * pre-format CmdSN for outgoing PDU. */ nop->cmdsn = cpu_to_be32(session->cmdsn); if (hdr->itt != RESERVED_ITT) { /* * TODO: We always use immediate for normal session pdus. * If we start to send tmfs or nops as non-immediate then * we should start checking the cmdsn numbers for mgmt tasks. * * During discovery sessions iscsid sends TEXT as non immediate, * but we always only send one PDU at a time. */ if (conn->c_stage == ISCSI_CONN_STARTED && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { session->queued_cmdsn++; session->cmdsn++; } } if (session->tt->init_task && session->tt->init_task(task)) return -EIO; if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) session->state = ISCSI_STATE_LOGGING_OUT; task->state = ISCSI_TASK_RUNNING; ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x " "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt, task->data_count); return 0; } /** * iscsi_alloc_mgmt_task - allocate and setup a mgmt task. * @conn: iscsi conn that the task will be sent on. * @hdr: iscsi pdu that will be sent. * @data: buffer for data segment if needed. * @data_size: length of data in bytes. */ static struct iscsi_task * iscsi_alloc_mgmt_task(struct iscsi_conn *conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size) { struct iscsi_session *session = conn->session; uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK; struct iscsi_task *task; itt_t itt; if (session->state == ISCSI_STATE_TERMINATE || !test_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags)) return NULL; if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) { /* * Login and Text are sent serially, in * request-followed-by-response sequence. * Same task can be used. Same ITT must be used. * Note that login_task is preallocated at conn_create(). */ if (conn->login_task->state != ISCSI_TASK_FREE) { iscsi_conn_printk(KERN_ERR, conn, "Login/Text in " "progress. Cannot start new task.\n"); return NULL; } if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) { iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN); return NULL; } task = conn->login_task; } else { if (session->state != ISCSI_STATE_LOGGED_IN) return NULL; if (data_size != 0) { iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode); return NULL; } BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); if (!kfifo_out(&session->cmdpool.queue, (void*)&task, sizeof(void*))) return NULL; } /* * released in complete pdu for task we expect a response for, and * released by the lld when it has transmitted the task for * pdus we do not expect a response for. */ refcount_set(&task->refcount, 1); task->conn = conn; task->sc = NULL; INIT_LIST_HEAD(&task->running); task->state = ISCSI_TASK_PENDING; if (data_size) { memcpy(task->data, data, data_size); task->data_count = data_size; } else task->data_count = 0; if (conn->session->tt->alloc_pdu) { if (conn->session->tt->alloc_pdu(task, hdr->opcode)) { iscsi_conn_printk(KERN_ERR, conn, "Could not allocate " "pdu for mgmt task.\n"); goto free_task; } } itt = task->hdr->itt; task->hdr_len = sizeof(struct iscsi_hdr); memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr)); if (hdr->itt != RESERVED_ITT) { if (session->tt->parse_pdu_itt) task->hdr->itt = itt; else task->hdr->itt = build_itt(task->itt, task->conn->session->age); } return task; free_task: iscsi_put_task(task); return NULL; } /** * iscsi_send_mgmt_task - Send task created with iscsi_alloc_mgmt_task. * @task: iscsi task to send. * * On failure this returns a non-zero error code, and the driver must free * the task with iscsi_put_task; */ static int iscsi_send_mgmt_task(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct iscsi_session *session = conn->session; struct iscsi_host *ihost = shost_priv(conn->session->host); int rc = 0; if (!ihost->workq) { rc = iscsi_prep_mgmt_task(conn, task); if (rc) return rc; rc = session->tt->xmit_task(task); if (rc) return rc; } else { list_add_tail(&task->running, &conn->mgmtqueue); iscsi_conn_queue_xmit(conn); } return 0; } static int __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size) { struct iscsi_task *task; int rc; task = iscsi_alloc_mgmt_task(conn, hdr, data, data_size); if (!task) return -ENOMEM; rc = iscsi_send_mgmt_task(task); if (rc) iscsi_put_task(task); return rc; } int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; int err = 0; spin_lock_bh(&session->frwd_lock); if (__iscsi_conn_send_pdu(conn, hdr, data, data_size)) err = -EPERM; spin_unlock_bh(&session->frwd_lock); return err; } EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu); /** * iscsi_scsi_cmd_rsp - SCSI Command Response processing * @conn: iscsi connection * @hdr: iscsi header * @task: scsi command task * @data: cmd data buffer * @datalen: len of buffer * * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and * then completes the command and task. called under back_lock **/ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, struct iscsi_task *task, char *data, int datalen) { struct iscsi_scsi_rsp *rhdr = (struct iscsi_scsi_rsp *)hdr; struct iscsi_session *session = conn->session; struct scsi_cmnd *sc = task->sc; iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; sc->result = (DID_OK << 16) | rhdr->cmd_status; if (task->protected) { sector_t sector; u8 ascq; /** * Transports that didn't implement check_protection * callback but still published T10-PI support to scsi-mid * deserve this BUG_ON. **/ BUG_ON(!session->tt->check_protection); ascq = session->tt->check_protection(task, &sector); if (ascq) { scsi_build_sense(sc, 1, ILLEGAL_REQUEST, 0x10, ascq); scsi_set_sense_information(sc->sense_buffer, SCSI_SENSE_BUFFERSIZE, sector); goto out; } } if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) { sc->result = DID_ERROR << 16; goto out; } if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION) { uint16_t senselen; if (datalen < 2) { invalid_datalen: iscsi_conn_printk(KERN_ERR, conn, "Got CHECK_CONDITION but invalid data " "buffer size of %d\n", datalen); sc->result = DID_BAD_TARGET << 16; goto out; } senselen = get_unaligned_be16(data); if (datalen < senselen) goto invalid_datalen; memcpy(sc->sense_buffer, data + 2, min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); ISCSI_DBG_SESSION(session, "copied %d bytes of sense\n", min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); } if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW | ISCSI_FLAG_CMD_BIDI_OVERFLOW)) { sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; } if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) { int res_count = be32_to_cpu(rhdr->residual_count); if (res_count > 0 && (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || res_count <= scsi_bufflen(sc))) /* write side for bidi or uni-io set_resid */ scsi_set_resid(sc, res_count); else sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; } out: ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n", sc, sc->result, task->itt); conn->scsirsp_pdus_cnt++; iscsi_complete_task(task, ISCSI_TASK_COMPLETED); } /** * iscsi_data_in_rsp - SCSI Data-In Response processing * @conn: iscsi connection * @hdr: iscsi pdu * @task: scsi command task * * iscsi_data_in_rsp sets up the scsi_cmnd fields based on the data received * then completes the command and task. called under back_lock **/ static void iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, struct iscsi_task *task) { struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)hdr; struct scsi_cmnd *sc = task->sc; if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) return; iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr); sc->result = (DID_OK << 16) | rhdr->cmd_status; conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW | ISCSI_FLAG_DATA_OVERFLOW)) { int res_count = be32_to_cpu(rhdr->residual_count); if (res_count > 0 && (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || res_count <= sc->sdb.length)) scsi_set_resid(sc, res_count); else sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; } ISCSI_DBG_SESSION(conn->session, "data in with status done " "[sc %p res %d itt 0x%x]\n", sc, sc->result, task->itt); conn->scsirsp_pdus_cnt++; iscsi_complete_task(task, ISCSI_TASK_COMPLETED); } static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) { struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr; struct iscsi_session *session = conn->session; conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; conn->tmfrsp_pdus_cnt++; if (session->tmf_state != TMF_QUEUED) return; if (tmf->response == ISCSI_TMF_RSP_COMPLETE) session->tmf_state = TMF_SUCCESS; else if (tmf->response == ISCSI_TMF_RSP_NO_TASK) session->tmf_state = TMF_NOT_FOUND; else session->tmf_state = TMF_FAILED; wake_up(&session->ehwait); } static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) { struct iscsi_nopout hdr; struct iscsi_task *task; if (!rhdr) { if (READ_ONCE(conn->ping_task)) return -EINVAL; } memset(&hdr, 0, sizeof(struct iscsi_nopout)); hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE; hdr.flags = ISCSI_FLAG_CMD_FINAL; if (rhdr) { hdr.lun = rhdr->lun; hdr.ttt = rhdr->ttt; hdr.itt = RESERVED_ITT; } else hdr.ttt = RESERVED_ITT; task = iscsi_alloc_mgmt_task(conn, (struct iscsi_hdr *)&hdr, NULL, 0); if (!task) return -ENOMEM; if (!rhdr) WRITE_ONCE(conn->ping_task, task); if (iscsi_send_mgmt_task(task)) { if (!rhdr) WRITE_ONCE(conn->ping_task, NULL); iscsi_put_task(task); iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); return -EIO; } else if (!rhdr) { /* only track our nops */ conn->last_ping = jiffies; } return 0; } /** * iscsi_nop_out_rsp - SCSI NOP Response processing * @task: scsi command task * @nop: the nop structure * @data: where to put the data * @datalen: length of data * * iscsi_nop_out_rsp handles nop response from use or * from user space. called under back_lock **/ static int iscsi_nop_out_rsp(struct iscsi_task *task, struct iscsi_nopin *nop, char *data, int datalen) { struct iscsi_conn *conn = task->conn; int rc = 0; if (READ_ONCE(conn->ping_task) != task) { /* * If this is not in response to one of our * nops then it must be from userspace. */ if (iscsi_recv_pdu(conn->cls_conn, (struct iscsi_hdr *)nop, data, datalen)) rc = ISCSI_ERR_CONN_FAILED; } else mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout); iscsi_complete_task(task, ISCSI_TASK_COMPLETED); return rc; } static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, char *data, int datalen) { struct iscsi_reject *reject = (struct iscsi_reject *)hdr; struct iscsi_hdr rejected_pdu; int opcode, rc = 0; conn->exp_statsn = be32_to_cpu(reject->statsn) + 1; if (ntoh24(reject->dlength) > datalen || ntoh24(reject->dlength) < sizeof(struct iscsi_hdr)) { iscsi_conn_printk(KERN_ERR, conn, "Cannot handle rejected " "pdu. Invalid data length (pdu dlength " "%u, datalen %d\n", ntoh24(reject->dlength), datalen); return ISCSI_ERR_PROTO; } memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr)); opcode = rejected_pdu.opcode & ISCSI_OPCODE_MASK; switch (reject->reason) { case ISCSI_REASON_DATA_DIGEST_ERROR: iscsi_conn_printk(KERN_ERR, conn, "pdu (op 0x%x itt 0x%x) rejected " "due to DataDigest error.\n", opcode, rejected_pdu.itt); break; case ISCSI_REASON_IMM_CMD_REJECT: iscsi_conn_printk(KERN_ERR, conn, "pdu (op 0x%x itt 0x%x) rejected. Too many " "immediate commands.\n", opcode, rejected_pdu.itt); /* * We only send one TMF at a time so if the target could not * handle it, then it should get fixed (RFC mandates that * a target can handle one immediate TMF per conn). * * For nops-outs, we could have sent more than one if * the target is sending us lots of nop-ins */ if (opcode != ISCSI_OP_NOOP_OUT) return 0; if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) { /* * nop-out in response to target's nop-out rejected. * Just resend. */ /* In RX path we are under back lock */ spin_unlock(&conn->session->back_lock); spin_lock(&conn->session->frwd_lock); iscsi_send_nopout(conn, (struct iscsi_nopin*)&rejected_pdu); spin_unlock(&conn->session->frwd_lock); spin_lock(&conn->session->back_lock); } else { struct iscsi_task *task; /* * Our nop as ping got dropped. We know the target * and transport are ok so just clean up */ task = iscsi_itt_to_task(conn, rejected_pdu.itt); if (!task) { iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu reject. Could " "not lookup rejected task.\n"); rc = ISCSI_ERR_BAD_ITT; } else rc = iscsi_nop_out_rsp(task, (struct iscsi_nopin*)&rejected_pdu, NULL, 0); } break; default: iscsi_conn_printk(KERN_ERR, conn, "pdu (op 0x%x itt 0x%x) rejected. Reason " "code 0x%x\n", rejected_pdu.opcode, rejected_pdu.itt, reject->reason); break; } return rc; } /** * iscsi_itt_to_task - look up task by itt * @conn: iscsi connection * @itt: itt * * This should be used for mgmt tasks like login and nops, or if * the LDD's itt space does not include the session age. * * The session back_lock must be held. */ struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) { struct iscsi_session *session = conn->session; int i; if (itt == RESERVED_ITT) return NULL; if (session->tt->parse_pdu_itt) session->tt->parse_pdu_itt(conn, itt, &i, NULL); else i = get_itt(itt); if (i >= session->cmds_max) return NULL; return session->cmds[i]; } EXPORT_SYMBOL_GPL(iscsi_itt_to_task); /** * __iscsi_complete_pdu - complete pdu * @conn: iscsi conn * @hdr: iscsi header * @data: data buffer * @datalen: len of data buffer * * Completes pdu processing by freeing any resources allocated at * queuecommand or send generic. session back_lock must be held and verify * itt must have been called. */ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, char *data, int datalen) { struct iscsi_session *session = conn->session; int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0; struct iscsi_task *task; uint32_t itt; conn->last_recv = jiffies; rc = iscsi_verify_itt(conn, hdr->itt); if (rc) return rc; if (hdr->itt != RESERVED_ITT) itt = get_itt(hdr->itt); else itt = ~0U; ISCSI_DBG_SESSION(session, "[op 0x%x cid %d itt 0x%x len %d]\n", opcode, conn->id, itt, datalen); if (itt == ~0U) { iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); switch(opcode) { case ISCSI_OP_NOOP_IN: if (datalen) { rc = ISCSI_ERR_PROTO; break; } if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG)) break; /* In RX path we are under back lock */ spin_unlock(&session->back_lock); spin_lock(&session->frwd_lock); iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr); spin_unlock(&session->frwd_lock); spin_lock(&session->back_lock); break; case ISCSI_OP_REJECT: rc = iscsi_handle_reject(conn, hdr, data, datalen); break; case ISCSI_OP_ASYNC_EVENT: conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) rc = ISCSI_ERR_CONN_FAILED; break; default: rc = ISCSI_ERR_BAD_OPCODE; break; } goto out; } switch(opcode) { case ISCSI_OP_SCSI_CMD_RSP: case ISCSI_OP_SCSI_DATA_IN: task = iscsi_itt_to_ctask(conn, hdr->itt); if (!task) return ISCSI_ERR_BAD_ITT; task->last_xfer = jiffies; break; case ISCSI_OP_R2T: /* * LLD handles R2Ts if they need to. */ return 0; case ISCSI_OP_LOGOUT_RSP: case ISCSI_OP_LOGIN_RSP: case ISCSI_OP_TEXT_RSP: case ISCSI_OP_SCSI_TMFUNC_RSP: case ISCSI_OP_NOOP_IN: task = iscsi_itt_to_task(conn, hdr->itt); if (!task) return ISCSI_ERR_BAD_ITT; break; default: return ISCSI_ERR_BAD_OPCODE; } switch(opcode) { case ISCSI_OP_SCSI_CMD_RSP: iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen); break; case ISCSI_OP_SCSI_DATA_IN: iscsi_data_in_rsp(conn, hdr, task); break; case ISCSI_OP_LOGOUT_RSP: iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); if (datalen) { rc = ISCSI_ERR_PROTO; break; } conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; goto recv_pdu; case ISCSI_OP_LOGIN_RSP: case ISCSI_OP_TEXT_RSP: iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); /* * login related PDU's exp_statsn is handled in * userspace */ goto recv_pdu; case ISCSI_OP_SCSI_TMFUNC_RSP: iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); if (datalen) { rc = ISCSI_ERR_PROTO; break; } iscsi_tmf_rsp(conn, hdr); iscsi_complete_task(task, ISCSI_TASK_COMPLETED); break; case ISCSI_OP_NOOP_IN: iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) { rc = ISCSI_ERR_PROTO; break; } conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; rc = iscsi_nop_out_rsp(task, (struct iscsi_nopin*)hdr, data, datalen); break; default: rc = ISCSI_ERR_BAD_OPCODE; break; } out: return rc; recv_pdu: if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) rc = ISCSI_ERR_CONN_FAILED; iscsi_complete_task(task, ISCSI_TASK_COMPLETED); return rc; } EXPORT_SYMBOL_GPL(__iscsi_complete_pdu); int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, char *data, int datalen) { int rc; spin_lock(&conn->session->back_lock); rc = __iscsi_complete_pdu(conn, hdr, data, datalen); spin_unlock(&conn->session->back_lock); return rc; } EXPORT_SYMBOL_GPL(iscsi_complete_pdu); int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt) { struct iscsi_session *session = conn->session; int age = 0, i = 0; if (itt == RESERVED_ITT) return 0; if (session->tt->parse_pdu_itt) session->tt->parse_pdu_itt(conn, itt, &i, &age); else { i = get_itt(itt); age = ((__force u32)itt >> ISCSI_AGE_SHIFT) & ISCSI_AGE_MASK; } if (age != session->age) { iscsi_conn_printk(KERN_ERR, conn, "received itt %x expected session age (%x)\n", (__force u32)itt, session->age); return ISCSI_ERR_BAD_ITT; } if (i >= session->cmds_max) { iscsi_conn_printk(KERN_ERR, conn, "received invalid itt index %u (max cmds " "%u.\n", i, session->cmds_max); return ISCSI_ERR_BAD_ITT; } return 0; } EXPORT_SYMBOL_GPL(iscsi_verify_itt); /** * iscsi_itt_to_ctask - look up ctask by itt * @conn: iscsi connection * @itt: itt * * This should be used for cmd tasks. * * The session back_lock must be held. */ struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt) { struct iscsi_task *task; if (iscsi_verify_itt(conn, itt)) return NULL; task = iscsi_itt_to_task(conn, itt); if (!task || !task->sc) return NULL; if (iscsi_cmd(task->sc)->age != conn->session->age) { iscsi_session_printk(KERN_ERR, conn->session, "task's session age %d, expected %d\n", iscsi_cmd(task->sc)->age, conn->session->age); return NULL; } return task; } EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask); void iscsi_session_failure(struct iscsi_session *session, enum iscsi_err err) { struct iscsi_conn *conn; spin_lock_bh(&session->frwd_lock); conn = session->leadconn; if (session->state == ISCSI_STATE_TERMINATE || !conn) { spin_unlock_bh(&session->frwd_lock); return; } iscsi_get_conn(conn->cls_conn); spin_unlock_bh(&session->frwd_lock); /* * if the host is being removed bypass the connection * recovery initialization because we are going to kill * the session. */ if (err == ISCSI_ERR_INVALID_HOST) iscsi_conn_error_event(conn->cls_conn, err); else iscsi_conn_failure(conn, err); iscsi_put_conn(conn->cls_conn); } EXPORT_SYMBOL_GPL(iscsi_session_failure); static bool iscsi_set_conn_failed(struct iscsi_conn *conn) { struct iscsi_session *session = conn->session; if (session->state == ISCSI_STATE_FAILED) return false; if (conn->stop_stage == 0) session->state = ISCSI_STATE_FAILED; set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags); return true; } void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) { struct iscsi_session *session = conn->session; bool needs_evt; spin_lock_bh(&session->frwd_lock); needs_evt = iscsi_set_conn_failed(conn); spin_unlock_bh(&session->frwd_lock); if (needs_evt) iscsi_conn_error_event(conn->cls_conn, err); } EXPORT_SYMBOL_GPL(iscsi_conn_failure); static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn) { struct iscsi_session *session = conn->session; /* * Check for iSCSI window and take care of CmdSN wrap-around */ if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) { ISCSI_DBG_SESSION(session, "iSCSI CmdSN closed. ExpCmdSn " "%u MaxCmdSN %u CmdSN %u/%u\n", session->exp_cmdsn, session->max_cmdsn, session->cmdsn, session->queued_cmdsn); return -ENOSPC; } return 0; } static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task, bool was_requeue) { int rc; if (!conn->task) { /* * Take a ref so we can access it after xmit_task(). * * This should never fail because the failure paths will have * stopped the xmit thread. */ if (!iscsi_get_task(task)) { WARN_ON_ONCE(1); return 0; } } else { /* Already have a ref from when we failed to send it last call */ conn->task = NULL; } /* * If this was a requeue for a R2T we have an extra ref on the task in * case a bad target sends a cmd rsp before we have handled the task. */ if (was_requeue) iscsi_put_task(task); /* * Do this after dropping the extra ref because if this was a requeue * it's removed from that list and cleanup_queued_task would miss it. */ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) { /* * Save the task and ref in case we weren't cleaning up this * task and get woken up again. */ conn->task = task; return -ENODATA; } spin_unlock_bh(&conn->session->frwd_lock); rc = conn->session->tt->xmit_task(task); spin_lock_bh(&conn->session->frwd_lock); if (!rc) { /* done with this task */ task->last_xfer = jiffies; } else { /* * get an extra ref that is released next time we access it * as conn->task above. */ iscsi_get_task(task); conn->task = task; } iscsi_put_task(task); return rc; } /** * iscsi_requeue_task - requeue task to run from session workqueue * @task: task to requeue * * Callers must have taken a ref to the task that is going to be requeued. */ void iscsi_requeue_task(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; /* * this may be on the requeue list already if the xmit_task callout * is handling the r2ts while we are adding new ones */ spin_lock_bh(&conn->session->frwd_lock); if (list_empty(&task->running)) { list_add_tail(&task->running, &conn->requeue); } else { /* * Don't need the extra ref since it's already requeued and * has a ref. */ iscsi_put_task(task); } iscsi_conn_queue_xmit(conn); spin_unlock_bh(&conn->session->frwd_lock); } EXPORT_SYMBOL_GPL(iscsi_requeue_task); /** * iscsi_data_xmit - xmit any command into the scheduled connection * @conn: iscsi connection * * Notes: * The function can return -EAGAIN in which case the caller must * re-schedule it again later or recover. '0' return code means * successful xmit. **/ static int iscsi_data_xmit(struct iscsi_conn *conn) { struct iscsi_task *task; int rc = 0; spin_lock_bh(&conn->session->frwd_lock); if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) { ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n"); spin_unlock_bh(&conn->session->frwd_lock); return -ENODATA; } if (conn->task) { rc = iscsi_xmit_task(conn, conn->task, false); if (rc) goto done; } /* * process mgmt pdus like nops before commands since we should * only have one nop-out as a ping from us and targets should not * overflow us with nop-ins */ check_mgmt: while (!list_empty(&conn->mgmtqueue)) { task = list_entry(conn->mgmtqueue.next, struct iscsi_task, running); list_del_init(&task->running); if (iscsi_prep_mgmt_task(conn, task)) { /* regular RX path uses back_lock */ spin_lock_bh(&conn->session->back_lock); __iscsi_put_task(task); spin_unlock_bh(&conn->session->back_lock); continue; } rc = iscsi_xmit_task(conn, task, false); if (rc) goto done; } check_requeue: while (!list_empty(&conn->requeue)) { /* * we always do fastlogout - conn stop code will clean up. */ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) break; task = list_entry(conn->requeue.next, struct iscsi_task, running); if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT)) break; list_del_init(&task->running); rc = iscsi_xmit_task(conn, task, true); if (rc) goto done; if (!list_empty(&conn->mgmtqueue)) goto check_mgmt; } /* process pending command queue */ while (!list_empty(&conn->cmdqueue)) { task = list_entry(conn->cmdqueue.next, struct iscsi_task, running); list_del_init(&task->running); if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { fail_scsi_task(task, DID_IMM_RETRY); continue; } rc = iscsi_prep_scsi_cmd_pdu(task); if (rc) { if (rc == -ENOMEM || rc == -EACCES) fail_scsi_task(task, DID_IMM_RETRY); else fail_scsi_task(task, DID_ABORT); continue; } rc = iscsi_xmit_task(conn, task, false); if (rc) goto done; /* * we could continuously get new task requests so * we need to check the mgmt queue for nops that need to * be sent to aviod starvation */ if (!list_empty(&conn->mgmtqueue)) goto check_mgmt; if (!list_empty(&conn->requeue)) goto check_requeue; } spin_unlock_bh(&conn->session->frwd_lock); return -ENODATA; done: spin_unlock_bh(&conn->session->frwd_lock); return rc; } static void iscsi_xmitworker(struct work_struct *work) { struct iscsi_conn *conn = container_of(work, struct iscsi_conn, xmitwork); int rc; /* * serialize Xmit worker on a per-connection basis. */ do { rc = iscsi_data_xmit(conn); } while (rc >= 0 || rc == -EAGAIN); } static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn, struct scsi_cmnd *sc) { struct iscsi_task *task; if (!kfifo_out(&conn->session->cmdpool.queue, (void *) &task, sizeof(void *))) return NULL; iscsi_cmd(sc)->age = conn->session->age; iscsi_cmd(sc)->task = task; refcount_set(&task->refcount, 1); task->state = ISCSI_TASK_PENDING; task->conn = conn; task->sc = sc; task->have_checked_conn = false; task->last_timeout = jiffies; task->last_xfer = jiffies; task->protected = false; INIT_LIST_HEAD(&task->running); return task; } enum { FAILURE_BAD_HOST = 1, FAILURE_SESSION_FAILED, FAILURE_SESSION_FREED, FAILURE_WINDOW_CLOSED, FAILURE_OOM, FAILURE_SESSION_TERMINATE, FAILURE_SESSION_IN_RECOVERY, FAILURE_SESSION_RECOVERY_TIMEOUT, FAILURE_SESSION_LOGGING_OUT, FAILURE_SESSION_NOT_READY, }; int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) { struct iscsi_cls_session *cls_session; struct iscsi_host *ihost; int reason = 0; struct iscsi_session *session; struct iscsi_conn *conn; struct iscsi_task *task = NULL; sc->result = 0; iscsi_cmd(sc)->task = NULL; ihost = shost_priv(host); cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; spin_lock_bh(&session->frwd_lock); reason = iscsi_session_chkready(cls_session); if (reason) { sc->result = reason; goto fault; } if (session->state != ISCSI_STATE_LOGGED_IN) { /* * to handle the race between when we set the recovery state * and block the session we requeue here (commands could * be entering our queuecommand while a block is starting * up because the block code is not locked) */ switch (session->state) { case ISCSI_STATE_FAILED: /* * cmds should fail during shutdown, if the session * state is bad, allowing completion to happen */ if (unlikely(system_state != SYSTEM_RUNNING)) { reason = FAILURE_SESSION_FAILED; sc->result = DID_NO_CONNECT << 16; break; } fallthrough; case ISCSI_STATE_IN_RECOVERY: reason = FAILURE_SESSION_IN_RECOVERY; sc->result = DID_IMM_RETRY << 16; break; case ISCSI_STATE_LOGGING_OUT: reason = FAILURE_SESSION_LOGGING_OUT; sc->result = DID_IMM_RETRY << 16; break; case ISCSI_STATE_RECOVERY_FAILED: reason = FAILURE_SESSION_RECOVERY_TIMEOUT; sc->result = DID_TRANSPORT_FAILFAST << 16; break; case ISCSI_STATE_TERMINATE: reason = FAILURE_SESSION_TERMINATE; sc->result = DID_NO_CONNECT << 16; break; default: reason = FAILURE_SESSION_FREED; sc->result = DID_NO_CONNECT << 16; } goto fault; } conn = session->leadconn; if (!conn) { reason = FAILURE_SESSION_FREED; sc->result = DID_NO_CONNECT << 16; goto fault; } if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) { reason = FAILURE_SESSION_IN_RECOVERY; sc->result = DID_REQUEUE << 16; goto fault; } if (iscsi_check_cmdsn_window_closed(conn)) { reason = FAILURE_WINDOW_CLOSED; goto reject; } task = iscsi_alloc_task(conn, sc); if (!task) { reason = FAILURE_OOM; goto reject; } if (!ihost->workq) { reason = iscsi_prep_scsi_cmd_pdu(task); if (reason) { if (reason == -ENOMEM || reason == -EACCES) { reason = FAILURE_OOM; goto prepd_reject; } else { sc->result = DID_ABORT << 16; goto prepd_fault; } } if (session->tt->xmit_task(task)) { session->cmdsn--; reason = FAILURE_SESSION_NOT_READY; goto prepd_reject; } } else { list_add_tail(&task->running, &conn->cmdqueue); iscsi_conn_queue_xmit(conn); } session->queued_cmdsn++; spin_unlock_bh(&session->frwd_lock); return 0; prepd_reject: spin_lock_bh(&session->back_lock); iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); spin_unlock_bh(&session->back_lock); reject: spin_unlock_bh(&session->frwd_lock); ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason); return SCSI_MLQUEUE_TARGET_BUSY; prepd_fault: spin_lock_bh(&session->back_lock); iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); spin_unlock_bh(&session->back_lock); fault: spin_unlock_bh(&session->frwd_lock); ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason); scsi_set_resid(sc, scsi_bufflen(sc)); scsi_done(sc); return 0; } EXPORT_SYMBOL_GPL(iscsi_queuecommand); int iscsi_target_alloc(struct scsi_target *starget) { struct iscsi_cls_session *cls_session = starget_to_session(starget); struct iscsi_session *session = cls_session->dd_data; starget->can_queue = session->scsi_cmds_max; return 0; } EXPORT_SYMBOL_GPL(iscsi_target_alloc); static void iscsi_tmf_timedout(struct timer_list *t) { struct iscsi_session *session = from_timer(session, t, tmf_timer); spin_lock(&session->frwd_lock); if (session->tmf_state == TMF_QUEUED) { session->tmf_state = TMF_TIMEDOUT; ISCSI_DBG_EH(session, "tmf timedout\n"); /* unblock eh_abort() */ wake_up(&session->ehwait); } spin_unlock(&session->frwd_lock); } static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, struct iscsi_tm *hdr, int age, int timeout) __must_hold(&session->frwd_lock) { struct iscsi_session *session = conn->session; if (__iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0)) { spin_unlock_bh(&session->frwd_lock); iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n"); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); spin_lock_bh(&session->frwd_lock); return -EPERM; } conn->tmfcmd_pdus_cnt++; session->tmf_timer.expires = timeout * HZ + jiffies; add_timer(&session->tmf_timer); ISCSI_DBG_EH(session, "tmf set timeout\n"); spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); /* * block eh thread until: * * 1) tmf response * 2) tmf timeout * 3) session is terminated or restarted or userspace has * given up on recovery */ wait_event_interruptible(session->ehwait, age != session->age || session->state != ISCSI_STATE_LOGGED_IN || session->tmf_state != TMF_QUEUED); if (signal_pending(current)) flush_signals(current); del_timer_sync(&session->tmf_timer); mutex_lock(&session->eh_mutex); spin_lock_bh(&session->frwd_lock); /* if the session drops it will clean up the task */ if (age != session->age || session->state != ISCSI_STATE_LOGGED_IN) return -ENOTCONN; return 0; } /* * Fail commands. session frwd lock held and xmit thread flushed. */ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error) { struct iscsi_session *session = conn->session; struct iscsi_task *task; int i; restart_cmd_loop: spin_lock_bh(&session->back_lock); for (i = 0; i < session->cmds_max; i++) { task = session->cmds[i]; if (!task->sc || task->state == ISCSI_TASK_FREE) continue; if (lun != -1 && lun != task->sc->device->lun) continue; /* * The cmd is completing but if this is called from an eh * callout path then when we return scsi-ml owns the cmd. Wait * for the completion path to finish freeing the cmd. */ if (!iscsi_get_task(task)) { spin_unlock_bh(&session->back_lock); spin_unlock_bh(&session->frwd_lock); udelay(ISCSI_CMD_COMPL_WAIT); spin_lock_bh(&session->frwd_lock); goto restart_cmd_loop; } ISCSI_DBG_SESSION(session, "failing sc %p itt 0x%x state %d\n", task->sc, task->itt, task->state); __fail_scsi_task(task, error); __iscsi_put_task(task); } spin_unlock_bh(&session->back_lock); } /** * iscsi_suspend_queue - suspend iscsi_queuecommand * @conn: iscsi conn to stop queueing IO on * * This grabs the session frwd_lock to make sure no one is in * xmit_task/queuecommand, and then sets suspend to prevent * new commands from being queued. This only needs to be called * by offload drivers that need to sync a path like ep disconnect * with the iscsi_queuecommand/xmit_task. To start IO again libiscsi * will call iscsi_start_tx and iscsi_unblock_session when in FFP. */ void iscsi_suspend_queue(struct iscsi_conn *conn) { spin_lock_bh(&conn->session->frwd_lock); set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); spin_unlock_bh(&conn->session->frwd_lock); } EXPORT_SYMBOL_GPL(iscsi_suspend_queue); /** * iscsi_suspend_tx - suspend iscsi_data_xmit * @conn: iscsi conn to stop processing IO on. * * This function sets the suspend bit to prevent iscsi_data_xmit * from sending new IO, and if work is queued on the xmit thread * it will wait for it to be completed. */ void iscsi_suspend_tx(struct iscsi_conn *conn) { struct Scsi_Host *shost = conn->session->host; struct iscsi_host *ihost = shost_priv(shost); set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); if (ihost->workq) flush_work(&conn->xmitwork); } EXPORT_SYMBOL_GPL(iscsi_suspend_tx); static void iscsi_start_tx(struct iscsi_conn *conn) { clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); iscsi_conn_queue_xmit(conn); } /** * iscsi_suspend_rx - Prevent recvwork from running again. * @conn: iscsi conn to stop. */ void iscsi_suspend_rx(struct iscsi_conn *conn) { struct Scsi_Host *shost = conn->session->host; struct iscsi_host *ihost = shost_priv(shost); set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags); if (ihost->workq) flush_work(&conn->recvwork); } EXPORT_SYMBOL_GPL(iscsi_suspend_rx); /* * We want to make sure a ping is in flight. It has timed out. * And we are not busy processing a pdu that is making * progress but got started before the ping and is taking a while * to complete so the ping is just stuck behind it in a queue. */ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn) { if (READ_ONCE(conn->ping_task) && time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + (conn->ping_timeout * HZ), jiffies)) return 1; else return 0; } enum scsi_timeout_action iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) { enum scsi_timeout_action rc = SCSI_EH_NOT_HANDLED; struct iscsi_task *task = NULL, *running_task; struct iscsi_cls_session *cls_session; struct iscsi_session *session; struct iscsi_conn *conn; int i; cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc); spin_lock_bh(&session->frwd_lock); spin_lock(&session->back_lock); task = iscsi_cmd(sc)->task; if (!task) { /* * Raced with completion. Blk layer has taken ownership * so let timeout code complete it now. */ rc = SCSI_EH_NOT_HANDLED; spin_unlock(&session->back_lock); goto done; } if (!iscsi_get_task(task)) { /* * Racing with the completion path right now, so give it more * time so that path can complete it like normal. */ rc = SCSI_EH_RESET_TIMER; task = NULL; spin_unlock(&session->back_lock); goto done; } spin_unlock(&session->back_lock); if (session->state != ISCSI_STATE_LOGGED_IN) { /* * During shutdown, if session is prematurely disconnected, * recovery won't happen and there will be hung cmds. Not * handling cmds would trigger EH, also bad in this case. * Instead, handle cmd, allow completion to happen and let * upper layer to deal with the result. */ if (unlikely(system_state != SYSTEM_RUNNING)) { sc->result = DID_NO_CONNECT << 16; ISCSI_DBG_EH(session, "sc on shutdown, handled\n"); rc = SCSI_EH_NOT_HANDLED; goto done; } /* * We are probably in the middle of iscsi recovery so let * that complete and handle the error. */ rc = SCSI_EH_RESET_TIMER; goto done; } conn = session->leadconn; if (!conn) { /* In the middle of shuting down */ rc = SCSI_EH_RESET_TIMER; goto done; } /* * If we have sent (at least queued to the network layer) a pdu or * recvd one for the task since the last timeout ask for * more time. If on the next timeout we have not made progress * we can check if it is the task or connection when we send the * nop as a ping. */ if (time_after(task->last_xfer, task->last_timeout)) { ISCSI_DBG_EH(session, "Command making progress. Asking " "scsi-ml for more time to complete. " "Last data xfer at %lu. Last timeout was at " "%lu\n.", task->last_xfer, task->last_timeout); task->have_checked_conn = false; rc = SCSI_EH_RESET_TIMER; goto done; } if (!conn->recv_timeout && !conn->ping_timeout) goto done; /* * if the ping timedout then we are in the middle of cleaning up * and can let the iscsi eh handle it */ if (iscsi_has_ping_timed_out(conn)) { rc = SCSI_EH_RESET_TIMER; goto done; } spin_lock(&session->back_lock); for (i = 0; i < conn->session->cmds_max; i++) { running_task = conn->session->cmds[i]; if (!running_task->sc || running_task == task || running_task->state != ISCSI_TASK_RUNNING) continue; /* * Only check if cmds started before this one have made * progress, or this could never fail */ if (time_after(running_task->sc->jiffies_at_alloc, task->sc->jiffies_at_alloc)) continue; if (time_after(running_task->last_xfer, task->last_timeout)) { /* * This task has not made progress, but a task * started before us has transferred data since * we started/last-checked. We could be queueing * too many tasks or the LU is bad. * * If the device is bad the cmds ahead of us on * other devs will complete, and this loop will * eventually fail starting the scsi eh. */ ISCSI_DBG_EH(session, "Command has not made progress " "but commands ahead of it have. " "Asking scsi-ml for more time to " "complete. Our last xfer vs running task " "last xfer %lu/%lu. Last check %lu.\n", task->last_xfer, running_task->last_xfer, task->last_timeout); spin_unlock(&session->back_lock); rc = SCSI_EH_RESET_TIMER; goto done; } } spin_unlock(&session->back_lock); /* Assumes nop timeout is shorter than scsi cmd timeout */ if (task->have_checked_conn) goto done; /* * Checking the transport already or nop from a cmd timeout still * running */ if (READ_ONCE(conn->ping_task)) { task->have_checked_conn = true; rc = SCSI_EH_RESET_TIMER; goto done; } /* Make sure there is a transport check done */ iscsi_send_nopout(conn, NULL); task->have_checked_conn = true; rc = SCSI_EH_RESET_TIMER; done: spin_unlock_bh(&session->frwd_lock); if (task) { task->last_timeout = jiffies; iscsi_put_task(task); } ISCSI_DBG_EH(session, "return %s\n", rc == SCSI_EH_RESET_TIMER ? "timer reset" : "shutdown or nh"); return rc; } EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out); static void iscsi_check_transport_timeouts(struct timer_list *t) { struct iscsi_conn *conn = from_timer(conn, t, transport_timer); struct iscsi_session *session = conn->session; unsigned long recv_timeout, next_timeout = 0, last_recv; spin_lock(&session->frwd_lock); if (session->state != ISCSI_STATE_LOGGED_IN) goto done; recv_timeout = conn->recv_timeout; if (!recv_timeout) goto done; recv_timeout *= HZ; last_recv = conn->last_recv; if (iscsi_has_ping_timed_out(conn)) { iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs " "expired, recv timeout %d, last rx %lu, " "last ping %lu, now %lu\n", conn->ping_timeout, conn->recv_timeout, last_recv, conn->last_ping, jiffies); spin_unlock(&session->frwd_lock); iscsi_conn_failure(conn, ISCSI_ERR_NOP_TIMEDOUT); return; } if (time_before_eq(last_recv + recv_timeout, jiffies)) { /* send a ping to try to provoke some traffic */ ISCSI_DBG_CONN(conn, "Sending nopout as ping\n"); if (iscsi_send_nopout(conn, NULL)) next_timeout = jiffies + (1 * HZ); else next_timeout = conn->last_ping + (conn->ping_timeout * HZ); } else next_timeout = last_recv + recv_timeout; ISCSI_DBG_CONN(conn, "Setting next tmo %lu\n", next_timeout); mod_timer(&conn->transport_timer, next_timeout); done: spin_unlock(&session->frwd_lock); } /** * iscsi_conn_unbind - prevent queueing to conn. * @cls_conn: iscsi conn ep is bound to. * @is_active: is the conn in use for boot or is this for EH/termination * * This must be called by drivers implementing the ep_disconnect callout. * It disables queueing to the connection from libiscsi in preparation for * an ep_disconnect call. */ void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active) { struct iscsi_session *session; struct iscsi_conn *conn; if (!cls_conn) return; conn = cls_conn->dd_data; session = conn->session; /* * Wait for iscsi_eh calls to exit. We don't wait for the tmf to * complete or timeout. The caller just wants to know what's running * is everything that needs to be cleaned up, and no cmds will be * queued. */ mutex_lock(&session->eh_mutex); iscsi_suspend_queue(conn); iscsi_suspend_tx(conn); spin_lock_bh(&session->frwd_lock); clear_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags); if (!is_active) { /* * if logout timed out before userspace could even send a PDU * the state might still be in ISCSI_STATE_LOGGED_IN and * allowing new cmds and TMFs. */ if (session->state == ISCSI_STATE_LOGGED_IN) iscsi_set_conn_failed(conn); } spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); } EXPORT_SYMBOL_GPL(iscsi_conn_unbind); static void iscsi_prep_abort_task_pdu(struct iscsi_task *task, struct iscsi_tm *hdr) { memset(hdr, 0, sizeof(*hdr)); hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK; hdr->flags |= ISCSI_FLAG_CMD_FINAL; hdr->lun = task->lun; hdr->rtt = task->hdr_itt; hdr->refcmdsn = task->cmdsn; } int iscsi_eh_abort(struct scsi_cmnd *sc) { struct iscsi_cls_session *cls_session; struct iscsi_session *session; struct iscsi_conn *conn; struct iscsi_task *task; struct iscsi_tm *hdr; int age; cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; ISCSI_DBG_EH(session, "aborting sc %p\n", sc); completion_check: mutex_lock(&session->eh_mutex); spin_lock_bh(&session->frwd_lock); /* * if session was ISCSI_STATE_IN_RECOVERY then we may not have * got the command. */ if (!iscsi_cmd(sc)->task) { ISCSI_DBG_EH(session, "sc never reached iscsi layer or " "it completed.\n"); spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); return SUCCESS; } /* * If we are not logged in or we have started a new session * then let the host reset code handle this */ if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN || iscsi_cmd(sc)->age != session->age) { spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); ISCSI_DBG_EH(session, "failing abort due to dropped " "session.\n"); return FAILED; } spin_lock(&session->back_lock); task = iscsi_cmd(sc)->task; if (!task || !task->sc) { /* task completed before time out */ ISCSI_DBG_EH(session, "sc completed while abort in progress\n"); spin_unlock(&session->back_lock); spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); return SUCCESS; } if (!iscsi_get_task(task)) { spin_unlock(&session->back_lock); spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); /* We are just about to call iscsi_free_task so wait for it. */ udelay(ISCSI_CMD_COMPL_WAIT); goto completion_check; } ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt); conn = session->leadconn; iscsi_get_conn(conn->cls_conn); conn->eh_abort_cnt++; age = session->age; spin_unlock(&session->back_lock); if (task->state == ISCSI_TASK_PENDING) { fail_scsi_task(task, DID_ABORT); goto success; } /* only have one tmf outstanding at a time */ if (session->tmf_state != TMF_INITIAL) goto failed; session->tmf_state = TMF_QUEUED; hdr = &session->tmhdr; iscsi_prep_abort_task_pdu(task, hdr); if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) goto failed; switch (session->tmf_state) { case TMF_SUCCESS: spin_unlock_bh(&session->frwd_lock); /* * stop tx side incase the target had sent a abort rsp but * the initiator was still writing out data. */ iscsi_suspend_tx(conn); /* * we do not stop the recv side because targets have been * good and have never sent us a successful tmf response * then sent more data for the cmd. */ spin_lock_bh(&session->frwd_lock); fail_scsi_task(task, DID_ABORT); session->tmf_state = TMF_INITIAL; memset(hdr, 0, sizeof(*hdr)); spin_unlock_bh(&session->frwd_lock); iscsi_start_tx(conn); goto success_unlocked; case TMF_TIMEDOUT: session->running_aborted_task = task; spin_unlock_bh(&session->frwd_lock); iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); goto failed_unlocked; case TMF_NOT_FOUND: if (iscsi_task_is_completed(task)) { session->tmf_state = TMF_INITIAL; memset(hdr, 0, sizeof(*hdr)); /* task completed before tmf abort response */ ISCSI_DBG_EH(session, "sc completed while abort in " "progress\n"); goto success; } fallthrough; default: session->tmf_state = TMF_INITIAL; goto failed; } success: spin_unlock_bh(&session->frwd_lock); success_unlocked: ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n", sc, task->itt); iscsi_put_task(task); iscsi_put_conn(conn->cls_conn); mutex_unlock(&session->eh_mutex); return SUCCESS; failed: spin_unlock_bh(&session->frwd_lock); failed_unlocked: ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc, task ? task->itt : 0); /* * The driver might be accessing the task so hold the ref. The conn * stop cleanup will drop the ref after ep_disconnect so we know the * driver's no longer touching the task. */ if (!session->running_aborted_task) iscsi_put_task(task); iscsi_put_conn(conn->cls_conn); mutex_unlock(&session->eh_mutex); return FAILED; } EXPORT_SYMBOL_GPL(iscsi_eh_abort); static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) { memset(hdr, 0, sizeof(*hdr)); hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK; hdr->flags |= ISCSI_FLAG_CMD_FINAL; int_to_scsilun(sc->device->lun, &hdr->lun); hdr->rtt = RESERVED_ITT; } int iscsi_eh_device_reset(struct scsi_cmnd *sc) { struct iscsi_cls_session *cls_session; struct iscsi_session *session; struct iscsi_conn *conn; struct iscsi_tm *hdr; int rc = FAILED; cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; ISCSI_DBG_EH(session, "LU Reset [sc %p lun %llu]\n", sc, sc->device->lun); mutex_lock(&session->eh_mutex); spin_lock_bh(&session->frwd_lock); /* * Just check if we are not logged in. We cannot check for * the phase because the reset could come from a ioctl. */ if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) goto unlock; conn = session->leadconn; /* only have one tmf outstanding at a time */ if (session->tmf_state != TMF_INITIAL) goto unlock; session->tmf_state = TMF_QUEUED; hdr = &session->tmhdr; iscsi_prep_lun_reset_pdu(sc, hdr); if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age, session->lu_reset_timeout)) { rc = FAILED; goto unlock; } switch (session->tmf_state) { case TMF_SUCCESS: break; case TMF_TIMEDOUT: spin_unlock_bh(&session->frwd_lock); iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); goto done; default: session->tmf_state = TMF_INITIAL; goto unlock; } rc = SUCCESS; spin_unlock_bh(&session->frwd_lock); iscsi_suspend_tx(conn); spin_lock_bh(&session->frwd_lock); memset(hdr, 0, sizeof(*hdr)); fail_scsi_tasks(conn, sc->device->lun, DID_ERROR); session->tmf_state = TMF_INITIAL; spin_unlock_bh(&session->frwd_lock); iscsi_start_tx(conn); goto done; unlock: spin_unlock_bh(&session->frwd_lock); done: ISCSI_DBG_EH(session, "dev reset result = %s\n", rc == SUCCESS ? "SUCCESS" : "FAILED"); mutex_unlock(&session->eh_mutex); return rc; } EXPORT_SYMBOL_GPL(iscsi_eh_device_reset); void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; spin_lock_bh(&session->frwd_lock); if (session->state != ISCSI_STATE_LOGGED_IN) { session->state = ISCSI_STATE_RECOVERY_FAILED; wake_up(&session->ehwait); } spin_unlock_bh(&session->frwd_lock); } EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout); /** * iscsi_eh_session_reset - drop session and attempt relogin * @sc: scsi command * * This function will wait for a relogin, session termination from * userspace, or a recovery/replacement timeout. */ int iscsi_eh_session_reset(struct scsi_cmnd *sc) { struct iscsi_cls_session *cls_session; struct iscsi_session *session; struct iscsi_conn *conn; cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; mutex_lock(&session->eh_mutex); spin_lock_bh(&session->frwd_lock); if (session->state == ISCSI_STATE_TERMINATE) { failed: ISCSI_DBG_EH(session, "failing session reset: Could not log back into " "%s [age %d]\n", session->targetname, session->age); spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); return FAILED; } conn = session->leadconn; iscsi_get_conn(conn->cls_conn); spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); iscsi_put_conn(conn->cls_conn); ISCSI_DBG_EH(session, "wait for relogin\n"); wait_event_interruptible(session->ehwait, session->state == ISCSI_STATE_TERMINATE || session->state == ISCSI_STATE_LOGGED_IN || session->state == ISCSI_STATE_RECOVERY_FAILED); if (signal_pending(current)) flush_signals(current); mutex_lock(&session->eh_mutex); spin_lock_bh(&session->frwd_lock); if (session->state == ISCSI_STATE_LOGGED_IN) { ISCSI_DBG_EH(session, "session reset succeeded for %s,%s\n", session->targetname, conn->persistent_address); } else goto failed; spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); return SUCCESS; } EXPORT_SYMBOL_GPL(iscsi_eh_session_reset); static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) { memset(hdr, 0, sizeof(*hdr)); hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; hdr->flags = ISCSI_TM_FUNC_TARGET_WARM_RESET & ISCSI_FLAG_TM_FUNC_MASK; hdr->flags |= ISCSI_FLAG_CMD_FINAL; hdr->rtt = RESERVED_ITT; } /** * iscsi_eh_target_reset - reset target * @sc: scsi command * * This will attempt to send a warm target reset. */ static int iscsi_eh_target_reset(struct scsi_cmnd *sc) { struct iscsi_cls_session *cls_session; struct iscsi_session *session; struct iscsi_conn *conn; struct iscsi_tm *hdr; int rc = FAILED; cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; ISCSI_DBG_EH(session, "tgt Reset [sc %p tgt %s]\n", sc, session->targetname); mutex_lock(&session->eh_mutex); spin_lock_bh(&session->frwd_lock); /* * Just check if we are not logged in. We cannot check for * the phase because the reset could come from a ioctl. */ if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) goto unlock; conn = session->leadconn; /* only have one tmf outstanding at a time */ if (session->tmf_state != TMF_INITIAL) goto unlock; session->tmf_state = TMF_QUEUED; hdr = &session->tmhdr; iscsi_prep_tgt_reset_pdu(sc, hdr); if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age, session->tgt_reset_timeout)) { rc = FAILED; goto unlock; } switch (session->tmf_state) { case TMF_SUCCESS: break; case TMF_TIMEDOUT: spin_unlock_bh(&session->frwd_lock); iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); goto done; default: session->tmf_state = TMF_INITIAL; goto unlock; } rc = SUCCESS; spin_unlock_bh(&session->frwd_lock); iscsi_suspend_tx(conn); spin_lock_bh(&session->frwd_lock); memset(hdr, 0, sizeof(*hdr)); fail_scsi_tasks(conn, -1, DID_ERROR); session->tmf_state = TMF_INITIAL; spin_unlock_bh(&session->frwd_lock); iscsi_start_tx(conn); goto done; unlock: spin_unlock_bh(&session->frwd_lock); done: ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname, rc == SUCCESS ? "SUCCESS" : "FAILED"); mutex_unlock(&session->eh_mutex); return rc; } /** * iscsi_eh_recover_target - reset target and possibly the session * @sc: scsi command * * This will attempt to send a warm target reset. If that fails, * we will escalate to ERL0 session recovery. */ int iscsi_eh_recover_target(struct scsi_cmnd *sc) { int rc; rc = iscsi_eh_target_reset(sc); if (rc == FAILED) rc = iscsi_eh_session_reset(sc); return rc; } EXPORT_SYMBOL_GPL(iscsi_eh_recover_target); /* * Pre-allocate a pool of @max items of @item_size. By default, the pool * should be accessed via kfifo_{get,put} on q->queue. * Optionally, the caller can obtain the array of object pointers * by passing in a non-NULL @items pointer */ int iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size) { int i, num_arrays = 1; memset(q, 0, sizeof(*q)); q->max = max; /* If the user passed an items pointer, he wants a copy of * the array. */ if (items) num_arrays++; q->pool = kvcalloc(num_arrays * max, sizeof(void *), GFP_KERNEL); if (q->pool == NULL) return -ENOMEM; kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*)); for (i = 0; i < max; i++) { q->pool[i] = kzalloc(item_size, GFP_KERNEL); if (q->pool[i] == NULL) { q->max = i; goto enomem; } kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*)); } if (items) { *items = q->pool + max; memcpy(*items, q->pool, max * sizeof(void *)); } return 0; enomem: iscsi_pool_free(q); return -ENOMEM; } EXPORT_SYMBOL_GPL(iscsi_pool_init); void iscsi_pool_free(struct iscsi_pool *q) { int i; for (i = 0; i < q->max; i++) kfree(q->pool[i]); kvfree(q->pool); } EXPORT_SYMBOL_GPL(iscsi_pool_free); int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost, uint16_t requested_cmds_max) { int scsi_cmds, total_cmds = requested_cmds_max; check: if (!total_cmds) total_cmds = ISCSI_DEF_XMIT_CMDS_MAX; /* * The iscsi layer needs some tasks for nop handling and tmfs, * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX * + 1 command for scsi IO. */ if (total_cmds < ISCSI_TOTAL_CMDS_MIN) { printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of two that is at least %d.\n", total_cmds, ISCSI_TOTAL_CMDS_MIN); return -EINVAL; } if (total_cmds > ISCSI_TOTAL_CMDS_MAX) { printk(KERN_INFO "iscsi: invalid max cmds of %d. Must be a power of 2 less than or equal to %d. Using %d.\n", requested_cmds_max, ISCSI_TOTAL_CMDS_MAX, ISCSI_TOTAL_CMDS_MAX); total_cmds = ISCSI_TOTAL_CMDS_MAX; } if (!is_power_of_2(total_cmds)) { total_cmds = rounddown_pow_of_two(total_cmds); if (total_cmds < ISCSI_TOTAL_CMDS_MIN) { printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of 2 greater than %d.\n", requested_cmds_max, ISCSI_TOTAL_CMDS_MIN); return -EINVAL; } printk(KERN_INFO "iscsi: invalid max cmds %d. Must be a power of 2. Rounding max cmds down to %d.\n", requested_cmds_max, total_cmds); } scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX; if (shost->can_queue && scsi_cmds > shost->can_queue) { total_cmds = shost->can_queue; printk(KERN_INFO "iscsi: requested max cmds %u is higher than driver limit. Using driver limit %u\n", requested_cmds_max, shost->can_queue); goto check; } return scsi_cmds; } EXPORT_SYMBOL_GPL(iscsi_host_get_max_scsi_cmds); /** * iscsi_host_add - add host to system * @shost: scsi host * @pdev: parent device * * This should be called by partial offload and software iscsi drivers * to add a host to the system. */ int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev) { if (!shost->can_queue) shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX; if (!shost->cmd_per_lun) shost->cmd_per_lun = ISCSI_DEF_CMD_PER_LUN; return scsi_add_host(shost, pdev); } EXPORT_SYMBOL_GPL(iscsi_host_add); /** * iscsi_host_alloc - allocate a host and driver data * @sht: scsi host template * @dd_data_size: driver host data size * @xmit_can_sleep: bool indicating if LLD will queue IO from a work queue * * This should be called by partial offload and software iscsi drivers. * To access the driver specific memory use the iscsi_host_priv() macro. */ struct Scsi_Host *iscsi_host_alloc(const struct scsi_host_template *sht, int dd_data_size, bool xmit_can_sleep) { struct Scsi_Host *shost; struct iscsi_host *ihost; shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size); if (!shost) return NULL; ihost = shost_priv(shost); if (xmit_can_sleep) { ihost->workq = alloc_workqueue("iscsi_q_%d", WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, 1, shost->host_no); if (!ihost->workq) goto free_host; } spin_lock_init(&ihost->lock); ihost->state = ISCSI_HOST_SETUP; ihost->num_sessions = 0; init_waitqueue_head(&ihost->session_removal_wq); return shost; free_host: scsi_host_put(shost); return NULL; } EXPORT_SYMBOL_GPL(iscsi_host_alloc); static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session) { iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_INVALID_HOST); } /** * iscsi_host_remove - remove host and sessions * @shost: scsi host * @is_shutdown: true if called from a driver shutdown callout * * If there are any sessions left, this will initiate the removal and wait * for the completion. */ void iscsi_host_remove(struct Scsi_Host *shost, bool is_shutdown) { struct iscsi_host *ihost = shost_priv(shost); unsigned long flags; spin_lock_irqsave(&ihost->lock, flags); ihost->state = ISCSI_HOST_REMOVED; spin_unlock_irqrestore(&ihost->lock, flags); if (!is_shutdown) iscsi_host_for_each_session(shost, iscsi_notify_host_removed); else iscsi_host_for_each_session(shost, iscsi_force_destroy_session); wait_event_interruptible(ihost->session_removal_wq, ihost->num_sessions == 0); if (signal_pending(current)) flush_signals(current); scsi_remove_host(shost); } EXPORT_SYMBOL_GPL(iscsi_host_remove); void iscsi_host_free(struct Scsi_Host *shost) { struct iscsi_host *ihost = shost_priv(shost); if (ihost->workq) destroy_workqueue(ihost->workq); kfree(ihost->netdev); kfree(ihost->hwaddress); kfree(ihost->initiatorname); scsi_host_put(shost); } EXPORT_SYMBOL_GPL(iscsi_host_free); static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost) { struct iscsi_host *ihost = shost_priv(shost); unsigned long flags; shost = scsi_host_get(shost); if (!shost) { printk(KERN_ERR "Invalid state. Cannot notify host removal " "of session teardown event because host already " "removed.\n"); return; } spin_lock_irqsave(&ihost->lock, flags); ihost->num_sessions--; if (ihost->num_sessions == 0) wake_up(&ihost->session_removal_wq); spin_unlock_irqrestore(&ihost->lock, flags); scsi_host_put(shost); } /** * iscsi_session_setup - create iscsi cls session and host and session * @iscsit: iscsi transport template * @shost: scsi host * @cmds_max: session can queue * @dd_size: private driver data size, added to session allocation size * @cmd_task_size: LLD task private data size * @initial_cmdsn: initial CmdSN * @id: target ID to add to this session * * This can be used by software iscsi_transports that allocate * a session per scsi host. * * Callers should set cmds_max to the largest total numer (mgmt + scsi) of * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks * for nop handling and login/logout requests. */ struct iscsi_cls_session * iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, uint16_t cmds_max, int dd_size, int cmd_task_size, uint32_t initial_cmdsn, unsigned int id) { struct iscsi_host *ihost = shost_priv(shost); struct iscsi_session *session; struct iscsi_cls_session *cls_session; int cmd_i, scsi_cmds; unsigned long flags; spin_lock_irqsave(&ihost->lock, flags); if (ihost->state == ISCSI_HOST_REMOVED) { spin_unlock_irqrestore(&ihost->lock, flags); return NULL; } ihost->num_sessions++; spin_unlock_irqrestore(&ihost->lock, flags); scsi_cmds = iscsi_host_get_max_scsi_cmds(shost, cmds_max); if (scsi_cmds < 0) goto dec_session_count; cls_session = iscsi_alloc_session(shost, iscsit, sizeof(struct iscsi_session) + dd_size); if (!cls_session) goto dec_session_count; session = cls_session->dd_data; session->cls_session = cls_session; session->host = shost; session->state = ISCSI_STATE_FREE; session->fast_abort = 1; session->tgt_reset_timeout = 30; session->lu_reset_timeout = 15; session->abort_timeout = 10; session->scsi_cmds_max = scsi_cmds; session->cmds_max = scsi_cmds + ISCSI_MGMT_CMDS_MAX; session->queued_cmdsn = session->cmdsn = initial_cmdsn; session->exp_cmdsn = initial_cmdsn + 1; session->max_cmdsn = initial_cmdsn + 1; session->max_r2t = 1; session->tt = iscsit; session->dd_data = cls_session->dd_data + sizeof(*session); session->tmf_state = TMF_INITIAL; timer_setup(&session->tmf_timer, iscsi_tmf_timedout, 0); mutex_init(&session->eh_mutex); init_waitqueue_head(&session->ehwait); spin_lock_init(&session->frwd_lock); spin_lock_init(&session->back_lock); /* initialize SCSI PDU commands pool */ if (iscsi_pool_init(&session->cmdpool, session->cmds_max, (void***)&session->cmds, cmd_task_size + sizeof(struct iscsi_task))) goto cmdpool_alloc_fail; /* pre-format cmds pool with ITT */ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { struct iscsi_task *task = session->cmds[cmd_i]; if (cmd_task_size) task->dd_data = &task[1]; task->itt = cmd_i; task->state = ISCSI_TASK_FREE; INIT_LIST_HEAD(&task->running); } if (!try_module_get(iscsit->owner)) goto module_get_fail; if (iscsi_add_session(cls_session, id)) goto cls_session_fail; return cls_session; cls_session_fail: module_put(iscsit->owner); module_get_fail: iscsi_pool_free(&session->cmdpool); cmdpool_alloc_fail: iscsi_free_session(cls_session); dec_session_count: iscsi_host_dec_session_cnt(shost); return NULL; } EXPORT_SYMBOL_GPL(iscsi_session_setup); /* * issi_session_remove - Remove session from iSCSI class. */ void iscsi_session_remove(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; struct Scsi_Host *shost = session->host; iscsi_remove_session(cls_session); /* * host removal only has to wait for its children to be removed from * sysfs, and iscsi_tcp needs to do iscsi_host_remove before freeing * the session, so drop the session count here. */ iscsi_host_dec_session_cnt(shost); } EXPORT_SYMBOL_GPL(iscsi_session_remove); /** * iscsi_session_free - Free iscsi session and it's resources * @cls_session: iscsi session */ void iscsi_session_free(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; struct module *owner = cls_session->transport->owner; iscsi_pool_free(&session->cmdpool); kfree(session->password); kfree(session->password_in); kfree(session->username); kfree(session->username_in); kfree(session->targetname); kfree(session->targetalias); kfree(session->initiatorname); kfree(session->boot_root); kfree(session->boot_nic); kfree(session->boot_target); kfree(session->ifacename); kfree(session->portal_type); kfree(session->discovery_parent_type); iscsi_free_session(cls_session); module_put(owner); } EXPORT_SYMBOL_GPL(iscsi_session_free); /** * iscsi_session_teardown - destroy session and cls_session * @cls_session: iscsi session */ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) { iscsi_session_remove(cls_session); iscsi_session_free(cls_session); } EXPORT_SYMBOL_GPL(iscsi_session_teardown); /** * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn * @cls_session: iscsi_cls_session * @dd_size: private driver data size * @conn_idx: cid */ struct iscsi_cls_conn * iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, uint32_t conn_idx) { struct iscsi_session *session = cls_session->dd_data; struct iscsi_conn *conn; struct iscsi_cls_conn *cls_conn; char *data; int err; cls_conn = iscsi_alloc_conn(cls_session, sizeof(*conn) + dd_size, conn_idx); if (!cls_conn) return NULL; conn = cls_conn->dd_data; conn->dd_data = cls_conn->dd_data + sizeof(*conn); conn->session = session; conn->cls_conn = cls_conn; conn->c_stage = ISCSI_CONN_INITIAL_STAGE; conn->id = conn_idx; conn->exp_statsn = 0; timer_setup(&conn->transport_timer, iscsi_check_transport_timeouts, 0); INIT_LIST_HEAD(&conn->mgmtqueue); INIT_LIST_HEAD(&conn->cmdqueue); INIT_LIST_HEAD(&conn->requeue); INIT_WORK(&conn->xmitwork, iscsi_xmitworker); /* allocate login_task used for the login/text sequences */ spin_lock_bh(&session->frwd_lock); if (!kfifo_out(&session->cmdpool.queue, (void*)&conn->login_task, sizeof(void*))) { spin_unlock_bh(&session->frwd_lock); goto login_task_alloc_fail; } spin_unlock_bh(&session->frwd_lock); data = (char *) __get_free_pages(GFP_KERNEL, get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); if (!data) goto login_task_data_alloc_fail; conn->login_task->data = conn->data = data; err = iscsi_add_conn(cls_conn); if (err) goto login_task_add_dev_fail; return cls_conn; login_task_add_dev_fail: free_pages((unsigned long) conn->data, get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); login_task_data_alloc_fail: kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task, sizeof(void*)); login_task_alloc_fail: iscsi_put_conn(cls_conn); return NULL; } EXPORT_SYMBOL_GPL(iscsi_conn_setup); /** * iscsi_conn_teardown - teardown iscsi connection * @cls_conn: iscsi class connection * * TODO: we may need to make this into a two step process * like scsi-mls remove + put host */ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; iscsi_remove_conn(cls_conn); del_timer_sync(&conn->transport_timer); mutex_lock(&session->eh_mutex); spin_lock_bh(&session->frwd_lock); conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; if (session->leadconn == conn) { /* * leading connection? then give up on recovery. */ session->state = ISCSI_STATE_TERMINATE; wake_up(&session->ehwait); } spin_unlock_bh(&session->frwd_lock); /* flush queued up work because we free the connection below */ iscsi_suspend_tx(conn); spin_lock_bh(&session->frwd_lock); free_pages((unsigned long) conn->data, get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); kfree(conn->persistent_address); kfree(conn->local_ipaddr); /* regular RX path uses back_lock */ spin_lock_bh(&session->back_lock); kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task, sizeof(void*)); spin_unlock_bh(&session->back_lock); if (session->leadconn == conn) session->leadconn = NULL; spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); iscsi_put_conn(cls_conn); } EXPORT_SYMBOL_GPL(iscsi_conn_teardown); int iscsi_conn_start(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; if (!session) { iscsi_conn_printk(KERN_ERR, conn, "can't start unbound connection\n"); return -EPERM; } if ((session->imm_data_en || !session->initial_r2t_en) && session->first_burst > session->max_burst) { iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: " "first_burst %d max_burst %d\n", session->first_burst, session->max_burst); return -EINVAL; } if (conn->ping_timeout && !conn->recv_timeout) { iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of " "zero. Using 5 seconds\n."); conn->recv_timeout = 5; } if (conn->recv_timeout && !conn->ping_timeout) { iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of " "zero. Using 5 seconds.\n"); conn->ping_timeout = 5; } spin_lock_bh(&session->frwd_lock); conn->c_stage = ISCSI_CONN_STARTED; session->state = ISCSI_STATE_LOGGED_IN; session->queued_cmdsn = session->cmdsn; conn->last_recv = jiffies; conn->last_ping = jiffies; if (conn->recv_timeout && conn->ping_timeout) mod_timer(&conn->transport_timer, jiffies + (conn->recv_timeout * HZ)); switch(conn->stop_stage) { case STOP_CONN_RECOVER: /* * unblock eh_abort() if it is blocked. re-try all * commands after successful recovery */ conn->stop_stage = 0; session->tmf_state = TMF_INITIAL; session->age++; if (session->age == 16) session->age = 0; break; case STOP_CONN_TERM: conn->stop_stage = 0; break; default: break; } spin_unlock_bh(&session->frwd_lock); iscsi_unblock_session(session->cls_session); wake_up(&session->ehwait); return 0; } EXPORT_SYMBOL_GPL(iscsi_conn_start); static void fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn) { struct iscsi_task *task; int i, state; for (i = 0; i < conn->session->cmds_max; i++) { task = conn->session->cmds[i]; if (task->sc) continue; if (task->state == ISCSI_TASK_FREE) continue; ISCSI_DBG_SESSION(conn->session, "failing mgmt itt 0x%x state %d\n", task->itt, task->state); spin_lock_bh(&session->back_lock); if (cleanup_queued_task(task)) { spin_unlock_bh(&session->back_lock); continue; } state = ISCSI_TASK_ABRT_SESS_RECOV; if (task->state == ISCSI_TASK_PENDING) state = ISCSI_TASK_COMPLETED; iscsi_complete_task(task, state); spin_unlock_bh(&session->back_lock); } } void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; int old_stop_stage; mutex_lock(&session->eh_mutex); spin_lock_bh(&session->frwd_lock); if (conn->stop_stage == STOP_CONN_TERM) { spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); return; } /* * When this is called for the in_login state, we only want to clean * up the login task and connection. We do not need to block and set * the recovery state again */ if (flag == STOP_CONN_TERM) session->state = ISCSI_STATE_TERMINATE; else if (conn->stop_stage != STOP_CONN_RECOVER) session->state = ISCSI_STATE_IN_RECOVERY; old_stop_stage = conn->stop_stage; conn->stop_stage = flag; spin_unlock_bh(&session->frwd_lock); del_timer_sync(&conn->transport_timer); iscsi_suspend_tx(conn); spin_lock_bh(&session->frwd_lock); conn->c_stage = ISCSI_CONN_STOPPED; spin_unlock_bh(&session->frwd_lock); /* * for connection level recovery we should not calculate * header digest. conn->hdr_size used for optimization * in hdr_extract() and will be re-negotiated at * set_param() time. */ if (flag == STOP_CONN_RECOVER) { conn->hdrdgst_en = 0; conn->datadgst_en = 0; if (session->state == ISCSI_STATE_IN_RECOVERY && old_stop_stage != STOP_CONN_RECOVER) { ISCSI_DBG_SESSION(session, "blocking session\n"); iscsi_block_session(session->cls_session); } } /* * flush queues. */ spin_lock_bh(&session->frwd_lock); fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED); fail_mgmt_tasks(session, conn); memset(&session->tmhdr, 0, sizeof(session->tmhdr)); spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); } EXPORT_SYMBOL_GPL(iscsi_conn_stop); int iscsi_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, int is_leading) { struct iscsi_session *session = cls_session->dd_data; struct iscsi_conn *conn = cls_conn->dd_data; spin_lock_bh(&session->frwd_lock); if (is_leading) session->leadconn = conn; set_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags); spin_unlock_bh(&session->frwd_lock); /* * The target could have reduced it's window size between logins, so * we have to reset max/exp cmdsn so we can see the new values. */ spin_lock_bh(&session->back_lock); session->max_cmdsn = session->exp_cmdsn = session->cmdsn + 1; spin_unlock_bh(&session->back_lock); /* * Unblock xmitworker(), Login Phase will pass through. */ clear_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags); clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); return 0; } EXPORT_SYMBOL_GPL(iscsi_conn_bind); int iscsi_switch_str_param(char **param, char *new_val_buf) { char *new_val; if (*param) { if (!strcmp(*param, new_val_buf)) return 0; } new_val = kstrdup(new_val_buf, GFP_NOIO); if (!new_val) return -ENOMEM; kfree(*param); *param = new_val; return 0; } EXPORT_SYMBOL_GPL(iscsi_switch_str_param); int iscsi_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf, int buflen) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; int val; switch(param) { case ISCSI_PARAM_FAST_ABORT: sscanf(buf, "%d", &session->fast_abort); break; case ISCSI_PARAM_ABORT_TMO: sscanf(buf, "%d", &session->abort_timeout); break; case ISCSI_PARAM_LU_RESET_TMO: sscanf(buf, "%d", &session->lu_reset_timeout); break; case ISCSI_PARAM_TGT_RESET_TMO: sscanf(buf, "%d", &session->tgt_reset_timeout); break; case ISCSI_PARAM_PING_TMO: sscanf(buf, "%d", &conn->ping_timeout); break; case ISCSI_PARAM_RECV_TMO: sscanf(buf, "%d", &conn->recv_timeout); break; case ISCSI_PARAM_MAX_RECV_DLENGTH: sscanf(buf, "%d", &conn->max_recv_dlength); break; case ISCSI_PARAM_MAX_XMIT_DLENGTH: sscanf(buf, "%d", &conn->max_xmit_dlength); break; case ISCSI_PARAM_HDRDGST_EN: sscanf(buf, "%d", &conn->hdrdgst_en); break; case ISCSI_PARAM_DATADGST_EN: sscanf(buf, "%d", &conn->datadgst_en); break; case ISCSI_PARAM_INITIAL_R2T_EN: sscanf(buf, "%d", &session->initial_r2t_en); break; case ISCSI_PARAM_MAX_R2T: sscanf(buf, "%hu", &session->max_r2t); break; case ISCSI_PARAM_IMM_DATA_EN: sscanf(buf, "%d", &session->imm_data_en); break; case ISCSI_PARAM_FIRST_BURST: sscanf(buf, "%d", &session->first_burst); break; case ISCSI_PARAM_MAX_BURST: sscanf(buf, "%d", &session->max_burst); break; case ISCSI_PARAM_PDU_INORDER_EN: sscanf(buf, "%d", &session->pdu_inorder_en); break; case ISCSI_PARAM_DATASEQ_INORDER_EN: sscanf(buf, "%d", &session->dataseq_inorder_en); break; case ISCSI_PARAM_ERL: sscanf(buf, "%d", &session->erl); break; case ISCSI_PARAM_EXP_STATSN: sscanf(buf, "%u", &conn->exp_statsn); break; case ISCSI_PARAM_USERNAME: return iscsi_switch_str_param(&session->username, buf); case ISCSI_PARAM_USERNAME_IN: return iscsi_switch_str_param(&session->username_in, buf); case ISCSI_PARAM_PASSWORD: return iscsi_switch_str_param(&session->password, buf); case ISCSI_PARAM_PASSWORD_IN: return iscsi_switch_str_param(&session->password_in, buf); case ISCSI_PARAM_TARGET_NAME: return iscsi_switch_str_param(&session->targetname, buf); case ISCSI_PARAM_TARGET_ALIAS: return iscsi_switch_str_param(&session->targetalias, buf); case ISCSI_PARAM_TPGT: sscanf(buf, "%d", &session->tpgt); break; case ISCSI_PARAM_PERSISTENT_PORT: sscanf(buf, "%d", &conn->persistent_port); break; case ISCSI_PARAM_PERSISTENT_ADDRESS: return iscsi_switch_str_param(&conn->persistent_address, buf); case ISCSI_PARAM_IFACE_NAME: return iscsi_switch_str_param(&session->ifacename, buf); case ISCSI_PARAM_INITIATOR_NAME: return iscsi_switch_str_param(&session->initiatorname, buf); case ISCSI_PARAM_BOOT_ROOT: return iscsi_switch_str_param(&session->boot_root, buf); case ISCSI_PARAM_BOOT_NIC: return iscsi_switch_str_param(&session->boot_nic, buf); case ISCSI_PARAM_BOOT_TARGET: return iscsi_switch_str_param(&session->boot_target, buf); case ISCSI_PARAM_PORTAL_TYPE: return iscsi_switch_str_param(&session->portal_type, buf); case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: return iscsi_switch_str_param(&session->discovery_parent_type, buf); case ISCSI_PARAM_DISCOVERY_SESS: sscanf(buf, "%d", &val); session->discovery_sess = !!val; break; case ISCSI_PARAM_LOCAL_IPADDR: return iscsi_switch_str_param(&conn->local_ipaddr, buf); default: return -ENOSYS; } return 0; } EXPORT_SYMBOL_GPL(iscsi_set_param); int iscsi_session_get_param(struct iscsi_cls_session *cls_session, enum iscsi_param param, char *buf) { struct iscsi_session *session = cls_session->dd_data; int len; switch(param) { case ISCSI_PARAM_FAST_ABORT: len = sysfs_emit(buf, "%d\n", session->fast_abort); break; case ISCSI_PARAM_ABORT_TMO: len = sysfs_emit(buf, "%d\n", session->abort_timeout); break; case ISCSI_PARAM_LU_RESET_TMO: len = sysfs_emit(buf, "%d\n", session->lu_reset_timeout); break; case ISCSI_PARAM_TGT_RESET_TMO: len = sysfs_emit(buf, "%d\n", session->tgt_reset_timeout); break; case ISCSI_PARAM_INITIAL_R2T_EN: len = sysfs_emit(buf, "%d\n", session->initial_r2t_en); break; case ISCSI_PARAM_MAX_R2T: len = sysfs_emit(buf, "%hu\n", session->max_r2t); break; case ISCSI_PARAM_IMM_DATA_EN: len = sysfs_emit(buf, "%d\n", session->imm_data_en); break; case ISCSI_PARAM_FIRST_BURST: len = sysfs_emit(buf, "%u\n", session->first_burst); break; case ISCSI_PARAM_MAX_BURST: len = sysfs_emit(buf, "%u\n", session->max_burst); break; case ISCSI_PARAM_PDU_INORDER_EN: len = sysfs_emit(buf, "%d\n", session->pdu_inorder_en); break; case ISCSI_PARAM_DATASEQ_INORDER_EN: len = sysfs_emit(buf, "%d\n", session->dataseq_inorder_en); break; case ISCSI_PARAM_DEF_TASKMGMT_TMO: len = sysfs_emit(buf, "%d\n", session->def_taskmgmt_tmo); break; case ISCSI_PARAM_ERL: len = sysfs_emit(buf, "%d\n", session->erl); break; case ISCSI_PARAM_TARGET_NAME: len = sysfs_emit(buf, "%s\n", session->targetname); break; case ISCSI_PARAM_TARGET_ALIAS: len = sysfs_emit(buf, "%s\n", session->targetalias); break; case ISCSI_PARAM_TPGT: len = sysfs_emit(buf, "%d\n", session->tpgt); break; case ISCSI_PARAM_USERNAME: len = sysfs_emit(buf, "%s\n", session->username); break; case ISCSI_PARAM_USERNAME_IN: len = sysfs_emit(buf, "%s\n", session->username_in); break; case ISCSI_PARAM_PASSWORD: len = sysfs_emit(buf, "%s\n", session->password); break; case ISCSI_PARAM_PASSWORD_IN: len = sysfs_emit(buf, "%s\n", session->password_in); break; case ISCSI_PARAM_IFACE_NAME: len = sysfs_emit(buf, "%s\n", session->ifacename); break; case ISCSI_PARAM_INITIATOR_NAME: len = sysfs_emit(buf, "%s\n", session->initiatorname); break; case ISCSI_PARAM_BOOT_ROOT: len = sysfs_emit(buf, "%s\n", session->boot_root); break; case ISCSI_PARAM_BOOT_NIC: len = sysfs_emit(buf, "%s\n", session->boot_nic); break; case ISCSI_PARAM_BOOT_TARGET: len = sysfs_emit(buf, "%s\n", session->boot_target); break; case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: len = sysfs_emit(buf, "%u\n", session->auto_snd_tgt_disable); break; case ISCSI_PARAM_DISCOVERY_SESS: len = sysfs_emit(buf, "%u\n", session->discovery_sess); break; case ISCSI_PARAM_PORTAL_TYPE: len = sysfs_emit(buf, "%s\n", session->portal_type); break; case ISCSI_PARAM_CHAP_AUTH_EN: len = sysfs_emit(buf, "%u\n", session->chap_auth_en); break; case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: len = sysfs_emit(buf, "%u\n", session->discovery_logout_en); break; case ISCSI_PARAM_BIDI_CHAP_EN: len = sysfs_emit(buf, "%u\n", session->bidi_chap_en); break; case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: len = sysfs_emit(buf, "%u\n", session->discovery_auth_optional); break; case ISCSI_PARAM_DEF_TIME2WAIT: len = sysfs_emit(buf, "%d\n", session->time2wait); break; case ISCSI_PARAM_DEF_TIME2RETAIN: len = sysfs_emit(buf, "%d\n", session->time2retain); break; case ISCSI_PARAM_TSID: len = sysfs_emit(buf, "%u\n", session->tsid); break; case ISCSI_PARAM_ISID: len = sysfs_emit(buf, "%02x%02x%02x%02x%02x%02x\n", session->isid[0], session->isid[1], session->isid[2], session->isid[3], session->isid[4], session->isid[5]); break; case ISCSI_PARAM_DISCOVERY_PARENT_IDX: len = sysfs_emit(buf, "%u\n", session->discovery_parent_idx); break; case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: if (session->discovery_parent_type) len = sysfs_emit(buf, "%s\n", session->discovery_parent_type); else len = sysfs_emit(buf, "\n"); break; default: return -ENOSYS; } return len; } EXPORT_SYMBOL_GPL(iscsi_session_get_param); int iscsi_conn_get_addr_param(struct sockaddr_storage *addr, enum iscsi_param param, char *buf) { struct sockaddr_in6 *sin6 = NULL; struct sockaddr_in *sin = NULL; int len; switch (addr->ss_family) { case AF_INET: sin = (struct sockaddr_in *)addr; break; case AF_INET6: sin6 = (struct sockaddr_in6 *)addr; break; default: return -EINVAL; } switch (param) { case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_HOST_PARAM_IPADDRESS: if (sin) len = sysfs_emit(buf, "%pI4\n", &sin->sin_addr.s_addr); else len = sysfs_emit(buf, "%pI6\n", &sin6->sin6_addr); break; case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_LOCAL_PORT: if (sin) len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin->sin_port)); else len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin6->sin6_port)); break; default: return -EINVAL; } return len; } EXPORT_SYMBOL_GPL(iscsi_conn_get_addr_param); int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf) { struct iscsi_conn *conn = cls_conn->dd_data; int len; switch(param) { case ISCSI_PARAM_PING_TMO: len = sysfs_emit(buf, "%u\n", conn->ping_timeout); break; case ISCSI_PARAM_RECV_TMO: len = sysfs_emit(buf, "%u\n", conn->recv_timeout); break; case ISCSI_PARAM_MAX_RECV_DLENGTH: len = sysfs_emit(buf, "%u\n", conn->max_recv_dlength); break; case ISCSI_PARAM_MAX_XMIT_DLENGTH: len = sysfs_emit(buf, "%u\n", conn->max_xmit_dlength); break; case ISCSI_PARAM_HDRDGST_EN: len = sysfs_emit(buf, "%d\n", conn->hdrdgst_en); break; case ISCSI_PARAM_DATADGST_EN: len = sysfs_emit(buf, "%d\n", conn->datadgst_en); break; case ISCSI_PARAM_IFMARKER_EN: len = sysfs_emit(buf, "%d\n", conn->ifmarker_en); break; case ISCSI_PARAM_OFMARKER_EN: len = sysfs_emit(buf, "%d\n", conn->ofmarker_en); break; case ISCSI_PARAM_EXP_STATSN: len = sysfs_emit(buf, "%u\n", conn->exp_statsn); break; case ISCSI_PARAM_PERSISTENT_PORT: len = sysfs_emit(buf, "%d\n", conn->persistent_port); break; case ISCSI_PARAM_PERSISTENT_ADDRESS: len = sysfs_emit(buf, "%s\n", conn->persistent_address); break; case ISCSI_PARAM_STATSN: len = sysfs_emit(buf, "%u\n", conn->statsn); break; case ISCSI_PARAM_MAX_SEGMENT_SIZE: len = sysfs_emit(buf, "%u\n", conn->max_segment_size); break; case ISCSI_PARAM_KEEPALIVE_TMO: len = sysfs_emit(buf, "%u\n", conn->keepalive_tmo); break; case ISCSI_PARAM_LOCAL_PORT: len = sysfs_emit(buf, "%u\n", conn->local_port); break; case ISCSI_PARAM_TCP_TIMESTAMP_STAT: len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_stat); break; case ISCSI_PARAM_TCP_NAGLE_DISABLE: len = sysfs_emit(buf, "%u\n", conn->tcp_nagle_disable); break; case ISCSI_PARAM_TCP_WSF_DISABLE: len = sysfs_emit(buf, "%u\n", conn->tcp_wsf_disable); break; case ISCSI_PARAM_TCP_TIMER_SCALE: len = sysfs_emit(buf, "%u\n", conn->tcp_timer_scale); break; case ISCSI_PARAM_TCP_TIMESTAMP_EN: len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_en); break; case ISCSI_PARAM_IP_FRAGMENT_DISABLE: len = sysfs_emit(buf, "%u\n", conn->fragment_disable); break; case ISCSI_PARAM_IPV4_TOS: len = sysfs_emit(buf, "%u\n", conn->ipv4_tos); break; case ISCSI_PARAM_IPV6_TC: len = sysfs_emit(buf, "%u\n", conn->ipv6_traffic_class); break; case ISCSI_PARAM_IPV6_FLOW_LABEL: len = sysfs_emit(buf, "%u\n", conn->ipv6_flow_label); break; case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: len = sysfs_emit(buf, "%u\n", conn->is_fw_assigned_ipv6); break; case ISCSI_PARAM_TCP_XMIT_WSF: len = sysfs_emit(buf, "%u\n", conn->tcp_xmit_wsf); break; case ISCSI_PARAM_TCP_RECV_WSF: len = sysfs_emit(buf, "%u\n", conn->tcp_recv_wsf); break; case ISCSI_PARAM_LOCAL_IPADDR: len = sysfs_emit(buf, "%s\n", conn->local_ipaddr); break; default: return -ENOSYS; } return len; } EXPORT_SYMBOL_GPL(iscsi_conn_get_param); int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { struct iscsi_host *ihost = shost_priv(shost); int len; switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: len = sysfs_emit(buf, "%s\n", ihost->netdev); break; case ISCSI_HOST_PARAM_HWADDRESS: len = sysfs_emit(buf, "%s\n", ihost->hwaddress); break; case ISCSI_HOST_PARAM_INITIATOR_NAME: len = sysfs_emit(buf, "%s\n", ihost->initiatorname); break; default: return -ENOSYS; } return len; } EXPORT_SYMBOL_GPL(iscsi_host_get_param); int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf, int buflen) { struct iscsi_host *ihost = shost_priv(shost); switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: return iscsi_switch_str_param(&ihost->netdev, buf); case ISCSI_HOST_PARAM_HWADDRESS: return iscsi_switch_str_param(&ihost->hwaddress, buf); case ISCSI_HOST_PARAM_INITIATOR_NAME: return iscsi_switch_str_param(&ihost->initiatorname, buf); default: return -ENOSYS; } return 0; } EXPORT_SYMBOL_GPL(iscsi_host_set_param); MODULE_AUTHOR("Mike Christie"); MODULE_DESCRIPTION("iSCSI library functions"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/libiscsi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * iSCSI Initiator over TCP/IP Data-Path * * Copyright (C) 2004 Dmitry Yusupov * Copyright (C) 2004 Alex Aizman * Copyright (C) 2005 - 2006 Mike Christie * Copyright (C) 2006 Red Hat, Inc. All rights reserved. * maintained by [email protected] * * See the file COPYING included with this distribution for more details. * * Credits: * Christoph Hellwig * FUJITA Tomonori * Arne Redlich * Zhenyu Wang */ #include <crypto/hash.h> #include <linux/types.h> #include <linux/inet.h> #include <linux/slab.h> #include <linux/sched/mm.h> #include <linux/file.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/kfifo.h> #include <linux/scatterlist.h> #include <linux/module.h> #include <linux/backing-dev.h> #include <net/tcp.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi.h> #include <scsi/scsi_transport_iscsi.h> #include <trace/events/iscsi.h> #include <trace/events/sock.h> #include "iscsi_tcp.h" MODULE_AUTHOR("Mike Christie <[email protected]>, " "Dmitry Yusupov <[email protected]>, " "Alex Aizman <[email protected]>"); MODULE_DESCRIPTION("iSCSI/TCP data-path"); MODULE_LICENSE("GPL"); static struct scsi_transport_template *iscsi_sw_tcp_scsi_transport; static const struct scsi_host_template iscsi_sw_tcp_sht; static struct iscsi_transport iscsi_sw_tcp_transport; static unsigned int iscsi_max_lun = ~0; module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); static bool iscsi_recv_from_iscsi_q; module_param_named(recv_from_iscsi_q, iscsi_recv_from_iscsi_q, bool, 0644); MODULE_PARM_DESC(recv_from_iscsi_q, "Set to true to read iSCSI data/headers from the iscsi_q workqueue. The default is false which will perform reads from the network softirq context."); static int iscsi_sw_tcp_dbg; module_param_named(debug_iscsi_tcp, iscsi_sw_tcp_dbg, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug_iscsi_tcp, "Turn on debugging for iscsi_tcp module " "Set to 1 to turn on, and zero to turn off. Default is off."); #define ISCSI_SW_TCP_DBG(_conn, dbg_fmt, arg...) \ do { \ if (iscsi_sw_tcp_dbg) \ iscsi_conn_printk(KERN_INFO, _conn, \ "%s " dbg_fmt, \ __func__, ##arg); \ iscsi_dbg_trace(trace_iscsi_dbg_sw_tcp, \ &(_conn)->cls_conn->dev, \ "%s " dbg_fmt, __func__, ##arg);\ } while (0); /** * iscsi_sw_tcp_recv - TCP receive in sendfile fashion * @rd_desc: read descriptor * @skb: socket buffer * @offset: offset in skb * @len: skb->len - offset */ static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) { struct iscsi_conn *conn = rd_desc->arg.data; unsigned int consumed, total_consumed = 0; int status; ISCSI_SW_TCP_DBG(conn, "in %d bytes\n", skb->len - offset); do { status = 0; consumed = iscsi_tcp_recv_skb(conn, skb, offset, 0, &status); offset += consumed; total_consumed += consumed; } while (consumed != 0 && status != ISCSI_TCP_SKB_DONE); ISCSI_SW_TCP_DBG(conn, "read %d bytes status %d\n", skb->len - offset, status); return total_consumed; } /** * iscsi_sw_sk_state_check - check socket state * @sk: socket * * If the socket is in CLOSE or CLOSE_WAIT we should * not close the connection if there is still some * data pending. * * Must be called with sk_callback_lock. */ static inline int iscsi_sw_sk_state_check(struct sock *sk) { struct iscsi_conn *conn = sk->sk_user_data; if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) && (conn->session->state != ISCSI_STATE_LOGGING_OUT) && !atomic_read(&sk->sk_rmem_alloc)) { ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n"); iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE); return -ECONNRESET; } return 0; } static void iscsi_sw_tcp_recv_data(struct iscsi_conn *conn) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; struct sock *sk = tcp_sw_conn->sock->sk; read_descriptor_t rd_desc; /* * Use rd_desc to pass 'conn' to iscsi_tcp_recv. * We set count to 1 because we want the network layer to * hand us all the skbs that are available. iscsi_tcp_recv * handled pdus that cross buffers or pdus that still need data. */ rd_desc.arg.data = conn; rd_desc.count = 1; tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv); /* If we had to (atomically) map a highmem page, * unmap it now. */ iscsi_tcp_segment_unmap(&tcp_conn->in.segment); iscsi_sw_sk_state_check(sk); } static void iscsi_sw_tcp_recv_data_work(struct work_struct *work) { struct iscsi_conn *conn = container_of(work, struct iscsi_conn, recvwork); struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; struct sock *sk = tcp_sw_conn->sock->sk; lock_sock(sk); iscsi_sw_tcp_recv_data(conn); release_sock(sk); } static void iscsi_sw_tcp_data_ready(struct sock *sk) { struct iscsi_sw_tcp_conn *tcp_sw_conn; struct iscsi_tcp_conn *tcp_conn; struct iscsi_conn *conn; trace_sk_data_ready(sk); read_lock_bh(&sk->sk_callback_lock); conn = sk->sk_user_data; if (!conn) { read_unlock_bh(&sk->sk_callback_lock); return; } tcp_conn = conn->dd_data; tcp_sw_conn = tcp_conn->dd_data; if (tcp_sw_conn->queue_recv) iscsi_conn_queue_recv(conn); else iscsi_sw_tcp_recv_data(conn); read_unlock_bh(&sk->sk_callback_lock); } static void iscsi_sw_tcp_state_change(struct sock *sk) { struct iscsi_tcp_conn *tcp_conn; struct iscsi_sw_tcp_conn *tcp_sw_conn; struct iscsi_conn *conn; void (*old_state_change)(struct sock *); read_lock_bh(&sk->sk_callback_lock); conn = sk->sk_user_data; if (!conn) { read_unlock_bh(&sk->sk_callback_lock); return; } iscsi_sw_sk_state_check(sk); tcp_conn = conn->dd_data; tcp_sw_conn = tcp_conn->dd_data; old_state_change = tcp_sw_conn->old_state_change; read_unlock_bh(&sk->sk_callback_lock); old_state_change(sk); } /** * iscsi_sw_tcp_write_space - Called when more output buffer space is available * @sk: socket space is available for **/ static void iscsi_sw_tcp_write_space(struct sock *sk) { struct iscsi_conn *conn; struct iscsi_tcp_conn *tcp_conn; struct iscsi_sw_tcp_conn *tcp_sw_conn; void (*old_write_space)(struct sock *); read_lock_bh(&sk->sk_callback_lock); conn = sk->sk_user_data; if (!conn) { read_unlock_bh(&sk->sk_callback_lock); return; } tcp_conn = conn->dd_data; tcp_sw_conn = tcp_conn->dd_data; old_write_space = tcp_sw_conn->old_write_space; read_unlock_bh(&sk->sk_callback_lock); old_write_space(sk); ISCSI_SW_TCP_DBG(conn, "iscsi_write_space\n"); iscsi_conn_queue_xmit(conn); } static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; struct sock *sk = tcp_sw_conn->sock->sk; /* assign new callbacks */ write_lock_bh(&sk->sk_callback_lock); sk->sk_user_data = conn; tcp_sw_conn->old_data_ready = sk->sk_data_ready; tcp_sw_conn->old_state_change = sk->sk_state_change; tcp_sw_conn->old_write_space = sk->sk_write_space; sk->sk_data_ready = iscsi_sw_tcp_data_ready; sk->sk_state_change = iscsi_sw_tcp_state_change; sk->sk_write_space = iscsi_sw_tcp_write_space; write_unlock_bh(&sk->sk_callback_lock); } static void iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; struct sock *sk = tcp_sw_conn->sock->sk; /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ write_lock_bh(&sk->sk_callback_lock); sk->sk_user_data = NULL; sk->sk_data_ready = tcp_sw_conn->old_data_ready; sk->sk_state_change = tcp_sw_conn->old_state_change; sk->sk_write_space = tcp_sw_conn->old_write_space; sk->sk_no_check_tx = 0; write_unlock_bh(&sk->sk_callback_lock); } /** * iscsi_sw_tcp_xmit_segment - transmit segment * @tcp_conn: the iSCSI TCP connection * @segment: the buffer to transmnit * * This function transmits as much of the buffer as * the network layer will accept, and returns the number of * bytes transmitted. * * If CRC hashing is enabled, the function will compute the * hash as it goes. When the entire segment has been transmitted, * it will retrieve the hash value and send it as well. */ static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment) { struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; struct socket *sk = tcp_sw_conn->sock; unsigned int copied = 0; int r = 0; while (!iscsi_tcp_segment_done(tcp_conn, segment, 0, r)) { struct scatterlist *sg; struct msghdr msg = {}; struct bio_vec bv; unsigned int offset, copy; r = 0; offset = segment->copied; copy = segment->size - offset; if (segment->total_copied + segment->size < segment->total_size) msg.msg_flags |= MSG_MORE; if (tcp_sw_conn->queue_recv) msg.msg_flags |= MSG_DONTWAIT; if (!segment->data) { if (!tcp_conn->iscsi_conn->datadgst_en) msg.msg_flags |= MSG_SPLICE_PAGES; sg = segment->sg; offset += segment->sg_offset + sg->offset; bvec_set_page(&bv, sg_page(sg), copy, offset); } else { bvec_set_virt(&bv, segment->data + offset, copy); } iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, copy); r = sock_sendmsg(sk, &msg); if (r < 0) { iscsi_tcp_segment_unmap(segment); return r; } copied += r; } return copied; } /** * iscsi_sw_tcp_xmit - TCP transmit * @conn: iscsi connection **/ static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; struct iscsi_segment *segment = &tcp_sw_conn->out.segment; unsigned int consumed = 0; int rc = 0; while (1) { rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment); /* * We may not have been able to send data because the conn * is getting stopped. libiscsi will know so propagate err * for it to do the right thing. */ if (rc == -EAGAIN) return rc; else if (rc < 0) { rc = ISCSI_ERR_XMIT_FAILED; goto error; } else if (rc == 0) break; consumed += rc; if (segment->total_copied >= segment->total_size) { if (segment->done != NULL) { rc = segment->done(tcp_conn, segment); if (rc != 0) goto error; } } } ISCSI_SW_TCP_DBG(conn, "xmit %d bytes\n", consumed); conn->txdata_octets += consumed; return consumed; error: /* Transmit error. We could initiate error recovery * here. */ ISCSI_SW_TCP_DBG(conn, "Error sending PDU, errno=%d\n", rc); iscsi_conn_failure(conn, rc); return -EIO; } /** * iscsi_sw_tcp_xmit_qlen - return the number of bytes queued for xmit * @conn: iscsi connection */ static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; struct iscsi_segment *segment = &tcp_sw_conn->out.segment; return segment->total_copied - segment->total_size; } static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; unsigned int noreclaim_flag; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; int rc = 0; if (!tcp_sw_conn->sock) { iscsi_conn_printk(KERN_ERR, conn, "Transport not bound to socket!\n"); return -EINVAL; } noreclaim_flag = memalloc_noreclaim_save(); while (iscsi_sw_tcp_xmit_qlen(conn)) { rc = iscsi_sw_tcp_xmit(conn); if (rc == 0) { rc = -EAGAIN; break; } if (rc < 0) break; rc = 0; } memalloc_noreclaim_restore(noreclaim_flag); return rc; } /* * This is called when we're done sending the header. * Simply copy the data_segment to the send segment, and return. */ static int iscsi_sw_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment) { struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment; ISCSI_SW_TCP_DBG(tcp_conn->iscsi_conn, "Header done. Next segment size %u total_size %u\n", tcp_sw_conn->out.segment.size, tcp_sw_conn->out.segment.total_size); return 0; } static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; ISCSI_SW_TCP_DBG(conn, "%s\n", conn->hdrdgst_en ? "digest enabled" : "digest disabled"); /* Clear the data segment - needs to be filled in by the * caller using iscsi_tcp_send_data_prep() */ memset(&tcp_sw_conn->out.data_segment, 0, sizeof(struct iscsi_segment)); /* If header digest is enabled, compute the CRC and * place the digest into the same buffer. We make * sure that both iscsi_tcp_task and mtask have * sufficient room. */ if (conn->hdrdgst_en) { iscsi_tcp_dgst_header(tcp_sw_conn->tx_hash, hdr, hdrlen, hdr + hdrlen); hdrlen += ISCSI_DIGEST_SIZE; } /* Remember header pointer for later, when we need * to decide whether there's a payload to go along * with the header. */ tcp_sw_conn->out.hdr = hdr; iscsi_segment_init_linear(&tcp_sw_conn->out.segment, hdr, hdrlen, iscsi_sw_tcp_send_hdr_done, NULL); } /* * Prepare the send buffer for the payload data. * Padding and checksumming will all be taken care * of by the iscsi_segment routines. */ static int iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg, unsigned int count, unsigned int offset, unsigned int len) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; struct ahash_request *tx_hash = NULL; unsigned int hdr_spec_len; ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len, conn->datadgst_en ? "digest enabled" : "digest disabled"); /* Make sure the datalen matches what the caller said he would send. */ hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength); WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len)); if (conn->datadgst_en) tx_hash = tcp_sw_conn->tx_hash; return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment, sg, count, offset, len, NULL, tx_hash); } static void iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data, size_t len) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; struct ahash_request *tx_hash = NULL; unsigned int hdr_spec_len; ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ? "digest enabled" : "digest disabled"); /* Make sure the datalen matches what the caller said he would send. */ hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength); WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len)); if (conn->datadgst_en) tx_hash = tcp_sw_conn->tx_hash; iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment, data, len, NULL, tx_hash); } static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task, unsigned int offset, unsigned int count) { struct iscsi_conn *conn = task->conn; int err = 0; iscsi_sw_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len); if (!count) return 0; if (!task->sc) iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count); else { struct scsi_data_buffer *sdb = &task->sc->sdb; err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl, sdb->table.nents, offset, count); } if (err) { /* got invalid offset/len */ return -EIO; } return 0; } static int iscsi_sw_tcp_pdu_alloc(struct iscsi_task *task, uint8_t opcode) { struct iscsi_tcp_task *tcp_task = task->dd_data; task->hdr = task->dd_data + sizeof(*tcp_task); task->hdr_max = sizeof(struct iscsi_sw_tcp_hdrbuf) - ISCSI_DIGEST_SIZE; return 0; } static struct iscsi_cls_conn * iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) { struct iscsi_conn *conn; struct iscsi_cls_conn *cls_conn; struct iscsi_tcp_conn *tcp_conn; struct iscsi_sw_tcp_conn *tcp_sw_conn; struct crypto_ahash *tfm; cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn), conn_idx); if (!cls_conn) return NULL; conn = cls_conn->dd_data; tcp_conn = conn->dd_data; tcp_sw_conn = tcp_conn->dd_data; INIT_WORK(&conn->recvwork, iscsi_sw_tcp_recv_data_work); tcp_sw_conn->queue_recv = iscsi_recv_from_iscsi_q; mutex_init(&tcp_sw_conn->sock_lock); tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) goto free_conn; tcp_sw_conn->tx_hash = ahash_request_alloc(tfm, GFP_KERNEL); if (!tcp_sw_conn->tx_hash) goto free_tfm; ahash_request_set_callback(tcp_sw_conn->tx_hash, 0, NULL, NULL); tcp_sw_conn->rx_hash = ahash_request_alloc(tfm, GFP_KERNEL); if (!tcp_sw_conn->rx_hash) goto free_tx_hash; ahash_request_set_callback(tcp_sw_conn->rx_hash, 0, NULL, NULL); tcp_conn->rx_hash = tcp_sw_conn->rx_hash; return cls_conn; free_tx_hash: ahash_request_free(tcp_sw_conn->tx_hash); free_tfm: crypto_free_ahash(tfm); free_conn: iscsi_conn_printk(KERN_ERR, conn, "Could not create connection due to crc32c " "loading error. Make sure the crc32c " "module is built as a module or into the " "kernel\n"); iscsi_tcp_conn_teardown(cls_conn); return NULL; } static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; struct socket *sock = tcp_sw_conn->sock; /* * The iscsi transport class will make sure we are not called in * parallel with start, stop, bind and destroys. However, this can be * called twice if userspace does a stop then a destroy. */ if (!sock) return; /* * Make sure we start socket shutdown now in case userspace is up * but delayed in releasing the socket. */ kernel_sock_shutdown(sock, SHUT_RDWR); sock_hold(sock->sk); iscsi_sw_tcp_conn_restore_callbacks(conn); sock_put(sock->sk); iscsi_suspend_rx(conn); mutex_lock(&tcp_sw_conn->sock_lock); tcp_sw_conn->sock = NULL; mutex_unlock(&tcp_sw_conn->sock_lock); sockfd_put(sock); } static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; iscsi_sw_tcp_release_conn(conn); ahash_request_free(tcp_sw_conn->rx_hash); if (tcp_sw_conn->tx_hash) { struct crypto_ahash *tfm; tfm = crypto_ahash_reqtfm(tcp_sw_conn->tx_hash); ahash_request_free(tcp_sw_conn->tx_hash); crypto_free_ahash(tfm); } iscsi_tcp_conn_teardown(cls_conn); } static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; struct socket *sock = tcp_sw_conn->sock; /* userspace may have goofed up and not bound us */ if (!sock) return; sock->sk->sk_err = EIO; wake_up_interruptible(sk_sleep(sock->sk)); /* stop xmit side */ iscsi_suspend_tx(conn); /* stop recv side and release socket */ iscsi_sw_tcp_release_conn(conn); iscsi_conn_stop(cls_conn, flag); } static int iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, int is_leading) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; struct sock *sk; struct socket *sock; int err; /* lookup for existing socket */ sock = sockfd_lookup((int)transport_eph, &err); if (!sock) { iscsi_conn_printk(KERN_ERR, conn, "sockfd_lookup failed %d\n", err); return -EEXIST; } err = -EINVAL; if (!sk_is_tcp(sock->sk)) goto free_socket; err = iscsi_conn_bind(cls_session, cls_conn, is_leading); if (err) goto free_socket; mutex_lock(&tcp_sw_conn->sock_lock); /* bind iSCSI connection and socket */ tcp_sw_conn->sock = sock; mutex_unlock(&tcp_sw_conn->sock_lock); /* setup Socket parameters */ sk = sock->sk; sk->sk_reuse = SK_CAN_REUSE; sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */ sk->sk_allocation = GFP_ATOMIC; sk->sk_use_task_frag = false; sk_set_memalloc(sk); sock_no_linger(sk); iscsi_sw_tcp_conn_set_callbacks(conn); /* * set receive state machine into initial state */ iscsi_tcp_hdr_recv_prep(tcp_conn); return 0; free_socket: sockfd_put(sock); return err; } static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf, int buflen) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; switch(param) { case ISCSI_PARAM_HDRDGST_EN: iscsi_set_param(cls_conn, param, buf, buflen); break; case ISCSI_PARAM_DATADGST_EN: mutex_lock(&tcp_sw_conn->sock_lock); if (!tcp_sw_conn->sock) { mutex_unlock(&tcp_sw_conn->sock_lock); return -ENOTCONN; } iscsi_set_param(cls_conn, param, buf, buflen); mutex_unlock(&tcp_sw_conn->sock_lock); break; case ISCSI_PARAM_MAX_R2T: return iscsi_tcp_set_max_r2t(conn, buf); default: return iscsi_set_param(cls_conn, param, buf, buflen); } return 0; } static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn; struct iscsi_tcp_conn *tcp_conn; struct sockaddr_in6 addr; struct socket *sock; int rc; switch(param) { case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_PARAM_LOCAL_PORT: spin_lock_bh(&conn->session->frwd_lock); if (!conn->session->leadconn) { spin_unlock_bh(&conn->session->frwd_lock); return -ENOTCONN; } /* * The conn has been setup and bound, so just grab a ref * incase a destroy runs while we are in the net layer. */ iscsi_get_conn(conn->cls_conn); spin_unlock_bh(&conn->session->frwd_lock); tcp_conn = conn->dd_data; tcp_sw_conn = tcp_conn->dd_data; mutex_lock(&tcp_sw_conn->sock_lock); sock = tcp_sw_conn->sock; if (!sock) { rc = -ENOTCONN; goto sock_unlock; } if (param == ISCSI_PARAM_LOCAL_PORT) rc = kernel_getsockname(sock, (struct sockaddr *)&addr); else rc = kernel_getpeername(sock, (struct sockaddr *)&addr); sock_unlock: mutex_unlock(&tcp_sw_conn->sock_lock); iscsi_put_conn(conn->cls_conn); if (rc < 0) return rc; return iscsi_conn_get_addr_param((struct sockaddr_storage *) &addr, param, buf); default: return iscsi_conn_get_param(cls_conn, param, buf); } return 0; } static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost); struct iscsi_session *session; struct iscsi_conn *conn; struct iscsi_tcp_conn *tcp_conn; struct iscsi_sw_tcp_conn *tcp_sw_conn; struct sockaddr_in6 addr; struct socket *sock; int rc; switch (param) { case ISCSI_HOST_PARAM_IPADDRESS: session = tcp_sw_host->session; if (!session) return -ENOTCONN; spin_lock_bh(&session->frwd_lock); conn = session->leadconn; if (!conn) { spin_unlock_bh(&session->frwd_lock); return -ENOTCONN; } tcp_conn = conn->dd_data; tcp_sw_conn = tcp_conn->dd_data; /* * The conn has been setup and bound, so just grab a ref * incase a destroy runs while we are in the net layer. */ iscsi_get_conn(conn->cls_conn); spin_unlock_bh(&session->frwd_lock); mutex_lock(&tcp_sw_conn->sock_lock); sock = tcp_sw_conn->sock; if (!sock) rc = -ENOTCONN; else rc = kernel_getsockname(sock, (struct sockaddr *)&addr); mutex_unlock(&tcp_sw_conn->sock_lock); iscsi_put_conn(conn->cls_conn); if (rc < 0) return rc; return iscsi_conn_get_addr_param((struct sockaddr_storage *) &addr, (enum iscsi_param)param, buf); default: return iscsi_host_get_param(shost, param, buf); } return 0; } static void iscsi_sw_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; stats->custom_length = 3; strcpy(stats->custom[0].desc, "tx_sendpage_failures"); stats->custom[0].value = tcp_sw_conn->sendpage_failures_cnt; strcpy(stats->custom[1].desc, "rx_discontiguous_hdr"); stats->custom[1].value = tcp_sw_conn->discontiguous_hdr_cnt; strcpy(stats->custom[2].desc, "eh_abort_cnt"); stats->custom[2].value = conn->eh_abort_cnt; iscsi_tcp_conn_get_stats(cls_conn, stats); } static struct iscsi_cls_session * iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, uint16_t qdepth, uint32_t initial_cmdsn) { struct iscsi_cls_session *cls_session; struct iscsi_session *session; struct iscsi_sw_tcp_host *tcp_sw_host; struct Scsi_Host *shost; int rc; if (ep) { printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep); return NULL; } shost = iscsi_host_alloc(&iscsi_sw_tcp_sht, sizeof(struct iscsi_sw_tcp_host), 1); if (!shost) return NULL; shost->transportt = iscsi_sw_tcp_scsi_transport; shost->cmd_per_lun = qdepth; shost->max_lun = iscsi_max_lun; shost->max_id = 0; shost->max_channel = 0; shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE; rc = iscsi_host_get_max_scsi_cmds(shost, cmds_max); if (rc < 0) goto free_host; shost->can_queue = rc; if (iscsi_host_add(shost, NULL)) goto free_host; cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost, cmds_max, 0, sizeof(struct iscsi_tcp_task) + sizeof(struct iscsi_sw_tcp_hdrbuf), initial_cmdsn, 0); if (!cls_session) goto remove_host; session = cls_session->dd_data; if (iscsi_tcp_r2tpool_alloc(session)) goto remove_session; /* We are now fully setup so expose the session to sysfs. */ tcp_sw_host = iscsi_host_priv(shost); tcp_sw_host->session = session; return cls_session; remove_session: iscsi_session_teardown(cls_session); remove_host: iscsi_host_remove(shost, false); free_host: iscsi_host_free(shost); return NULL; } static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session) { struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct iscsi_session *session = cls_session->dd_data; if (WARN_ON_ONCE(session->leadconn)) return; iscsi_session_remove(cls_session); /* * Our get_host_param needs to access the session, so remove the * host from sysfs before freeing the session to make sure userspace * is no longer accessing the callout. */ iscsi_host_remove(shost, false); iscsi_tcp_r2tpool_free(cls_session->dd_data); iscsi_session_free(cls_session); iscsi_host_free(shost); } static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param) { switch (param_type) { case ISCSI_HOST_PARAM: switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: case ISCSI_HOST_PARAM_HWADDRESS: case ISCSI_HOST_PARAM_IPADDRESS: case ISCSI_HOST_PARAM_INITIATOR_NAME: return S_IRUGO; default: return 0; } case ISCSI_PARAM: switch (param) { case ISCSI_PARAM_MAX_RECV_DLENGTH: case ISCSI_PARAM_MAX_XMIT_DLENGTH: case ISCSI_PARAM_HDRDGST_EN: case ISCSI_PARAM_DATADGST_EN: case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_LOCAL_PORT: case ISCSI_PARAM_EXP_STATSN: case ISCSI_PARAM_PERSISTENT_ADDRESS: case ISCSI_PARAM_PERSISTENT_PORT: case ISCSI_PARAM_PING_TMO: case ISCSI_PARAM_RECV_TMO: case ISCSI_PARAM_INITIAL_R2T_EN: case ISCSI_PARAM_MAX_R2T: case ISCSI_PARAM_IMM_DATA_EN: case ISCSI_PARAM_FIRST_BURST: case ISCSI_PARAM_MAX_BURST: case ISCSI_PARAM_PDU_INORDER_EN: case ISCSI_PARAM_DATASEQ_INORDER_EN: case ISCSI_PARAM_ERL: case ISCSI_PARAM_TARGET_NAME: case ISCSI_PARAM_TPGT: case ISCSI_PARAM_USERNAME: case ISCSI_PARAM_PASSWORD: case ISCSI_PARAM_USERNAME_IN: case ISCSI_PARAM_PASSWORD_IN: case ISCSI_PARAM_FAST_ABORT: case ISCSI_PARAM_ABORT_TMO: case ISCSI_PARAM_LU_RESET_TMO: case ISCSI_PARAM_TGT_RESET_TMO: case ISCSI_PARAM_IFACE_NAME: case ISCSI_PARAM_INITIATOR_NAME: return S_IRUGO; default: return 0; } } return 0; } static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev) { struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host); struct iscsi_session *session = tcp_sw_host->session; struct iscsi_conn *conn = session->leadconn; if (conn->datadgst_en) blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, sdev->request_queue); blk_queue_dma_alignment(sdev->request_queue, 0); return 0; } static const struct scsi_host_template iscsi_sw_tcp_sht = { .module = THIS_MODULE, .name = "iSCSI Initiator over TCP/IP", .queuecommand = iscsi_queuecommand, .change_queue_depth = scsi_change_queue_depth, .can_queue = ISCSI_TOTAL_CMDS_MAX, .sg_tablesize = 4096, .max_sectors = 0xFFFF, .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, .eh_timed_out = iscsi_eh_cmd_timed_out, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler= iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, .dma_boundary = PAGE_SIZE - 1, .slave_configure = iscsi_sw_tcp_slave_configure, .proc_name = "iscsi_tcp", .this_id = -1, .track_queue_depth = 1, .cmd_size = sizeof(struct iscsi_cmd), }; static struct iscsi_transport iscsi_sw_tcp_transport = { .owner = THIS_MODULE, .name = "tcp", .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | CAP_DATADGST, /* session management */ .create_session = iscsi_sw_tcp_session_create, .destroy_session = iscsi_sw_tcp_session_destroy, /* connection management */ .create_conn = iscsi_sw_tcp_conn_create, .bind_conn = iscsi_sw_tcp_conn_bind, .destroy_conn = iscsi_sw_tcp_conn_destroy, .attr_is_visible = iscsi_sw_tcp_attr_is_visible, .set_param = iscsi_sw_tcp_conn_set_param, .get_conn_param = iscsi_sw_tcp_conn_get_param, .get_session_param = iscsi_session_get_param, .start_conn = iscsi_conn_start, .stop_conn = iscsi_sw_tcp_conn_stop, /* iscsi host params */ .get_host_param = iscsi_sw_tcp_host_get_param, .set_host_param = iscsi_host_set_param, /* IO */ .send_pdu = iscsi_conn_send_pdu, .get_stats = iscsi_sw_tcp_conn_get_stats, /* iscsi task/cmd helpers */ .init_task = iscsi_tcp_task_init, .xmit_task = iscsi_tcp_task_xmit, .cleanup_task = iscsi_tcp_cleanup_task, /* low level pdu helpers */ .xmit_pdu = iscsi_sw_tcp_pdu_xmit, .init_pdu = iscsi_sw_tcp_pdu_init, .alloc_pdu = iscsi_sw_tcp_pdu_alloc, /* recovery */ .session_recovery_timedout = iscsi_session_recovery_timedout, }; static int __init iscsi_sw_tcp_init(void) { if (iscsi_max_lun < 1) { printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n", iscsi_max_lun); return -EINVAL; } iscsi_sw_tcp_scsi_transport = iscsi_register_transport( &iscsi_sw_tcp_transport); if (!iscsi_sw_tcp_scsi_transport) return -ENODEV; return 0; } static void __exit iscsi_sw_tcp_exit(void) { iscsi_unregister_transport(&iscsi_sw_tcp_transport); } module_init(iscsi_sw_tcp_init); module_exit(iscsi_sw_tcp_exit);
linux-master
drivers/scsi/iscsi_tcp.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/module.h> #include <linux/pci.h> #include "fdomain.h" static int fdomain_pci_probe(struct pci_dev *pdev, const struct pci_device_id *d) { int err; struct Scsi_Host *sh; err = pci_enable_device(pdev); if (err) goto fail; err = pci_request_regions(pdev, "fdomain_pci"); if (err) goto disable_device; err = -ENODEV; if (pci_resource_len(pdev, 0) == 0) goto release_region; sh = fdomain_create(pci_resource_start(pdev, 0), pdev->irq, 7, &pdev->dev); if (!sh) goto release_region; pci_set_drvdata(pdev, sh); return 0; release_region: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); fail: return err; } static void fdomain_pci_remove(struct pci_dev *pdev) { struct Scsi_Host *sh = pci_get_drvdata(pdev); fdomain_destroy(sh); pci_release_regions(pdev); pci_disable_device(pdev); } static struct pci_device_id fdomain_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70) }, {} }; MODULE_DEVICE_TABLE(pci, fdomain_pci_table); static struct pci_driver fdomain_pci_driver = { .name = "fdomain_pci", .id_table = fdomain_pci_table, .probe = fdomain_pci_probe, .remove = fdomain_pci_remove, .driver.pm = FDOMAIN_PM_OPS, }; module_pci_driver(fdomain_pci_driver); MODULE_AUTHOR("Ondrej Zary, Rickard E. Faith"); MODULE_DESCRIPTION("Future Domain TMC-3260 PCI SCSI driver"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/fdomain_pci.c
// SPDX-License-Identifier: GPL-2.0-only /* * Marvell UMI driver * * Copyright 2011 Marvell. <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/device.h> #include <linux/pci.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/ktime.h> #include <linux/blkdev.h> #include <linux/io.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_eh.h> #include <linux/uaccess.h> #include <linux/kthread.h> #include "mvumi.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("[email protected]"); MODULE_DESCRIPTION("Marvell UMI Driver"); static const struct pci_device_id mvumi_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) }, { 0 } }; MODULE_DEVICE_TABLE(pci, mvumi_pci_table); static void tag_init(struct mvumi_tag *st, unsigned short size) { unsigned short i; BUG_ON(size != st->size); st->top = size; for (i = 0; i < size; i++) st->stack[i] = size - 1 - i; } static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st) { BUG_ON(st->top <= 0); return st->stack[--st->top]; } static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st, unsigned short tag) { BUG_ON(st->top >= st->size); st->stack[st->top++] = tag; } static bool tag_is_empty(struct mvumi_tag *st) { if (st->top == 0) return true; else return false; } static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array) { int i; for (i = 0; i < MAX_BASE_ADDRESS; i++) if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) && addr_array[i]) pci_iounmap(dev, addr_array[i]); } static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array) { int i; for (i = 0; i < MAX_BASE_ADDRESS; i++) { if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { addr_array[i] = pci_iomap(dev, i, 0); if (!addr_array[i]) { dev_err(&dev->dev, "failed to map Bar[%d]\n", i); mvumi_unmap_pci_addr(dev, addr_array); return -ENOMEM; } } else addr_array[i] = NULL; dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]); } return 0; } static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, enum resource_type type, unsigned int size) { struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC); if (!res) { dev_err(&mhba->pdev->dev, "Failed to allocate memory for resource manager.\n"); return NULL; } switch (type) { case RESOURCE_CACHED_MEMORY: res->virt_addr = kzalloc(size, GFP_ATOMIC); if (!res->virt_addr) { dev_err(&mhba->pdev->dev, "unable to allocate memory,size = %d.\n", size); kfree(res); return NULL; } break; case RESOURCE_UNCACHED_MEMORY: size = round_up(size, 8); res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &res->bus_addr, GFP_KERNEL); if (!res->virt_addr) { dev_err(&mhba->pdev->dev, "unable to allocate consistent mem," "size = %d.\n", size); kfree(res); return NULL; } break; default: dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type); kfree(res); return NULL; } res->type = type; res->size = size; INIT_LIST_HEAD(&res->entry); list_add_tail(&res->entry, &mhba->res_list); return res; } static void mvumi_release_mem_resource(struct mvumi_hba *mhba) { struct mvumi_res *res, *tmp; list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) { switch (res->type) { case RESOURCE_UNCACHED_MEMORY: dma_free_coherent(&mhba->pdev->dev, res->size, res->virt_addr, res->bus_addr); break; case RESOURCE_CACHED_MEMORY: kfree(res->virt_addr); break; default: dev_err(&mhba->pdev->dev, "unknown resource type %d\n", res->type); break; } list_del(&res->entry); kfree(res); } mhba->fw_flag &= ~MVUMI_FW_ALLOC; } /** * mvumi_make_sgl - Prepares SGL * @mhba: Adapter soft state * @scmd: SCSI command from the mid-layer * @sgl_p: SGL to be filled in * @sg_count: return the number of SG elements * * If successful, this function returns 0. otherwise, it returns -1. */ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd, void *sgl_p, unsigned char *sg_count) { struct scatterlist *sg; struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p; unsigned int i; unsigned int sgnum = scsi_sg_count(scmd); dma_addr_t busaddr; *sg_count = dma_map_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum, scmd->sc_data_direction); if (*sg_count > mhba->max_sge) { dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger than max sg[0x%x].\n", *sg_count, mhba->max_sge); dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum, scmd->sc_data_direction); return -1; } scsi_for_each_sg(scmd, sg, *sg_count, i) { busaddr = sg_dma_address(sg); m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); m_sg->flags = 0; sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(sg))); if ((i + 1) == *sg_count) m_sg->flags |= 1U << mhba->eot_flag; sgd_inc(mhba, m_sg); } return 0; } static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, unsigned int size) { struct mvumi_sgl *m_sg; void *virt_addr; dma_addr_t phy_addr; if (size == 0) return 0; virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr, GFP_KERNEL); if (!virt_addr) return -1; m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; cmd->frame->sg_counts = 1; cmd->data_buf = virt_addr; m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr)); m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr)); m_sg->flags = 1U << mhba->eot_flag; sgd_setsz(mhba, m_sg, cpu_to_le32(size)); return 0; } static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba, unsigned int buf_size) { struct mvumi_cmd *cmd; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) { dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n"); return NULL; } INIT_LIST_HEAD(&cmd->queue_pointer); cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size, &cmd->frame_phys, GFP_KERNEL); if (!cmd->frame) { dev_err(&mhba->pdev->dev, "failed to allocate memory for FW" " frame,size = %d.\n", mhba->ib_max_size); kfree(cmd); return NULL; } if (buf_size) { if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) { dev_err(&mhba->pdev->dev, "failed to allocate memory" " for internal frame\n"); dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size, cmd->frame, cmd->frame_phys); kfree(cmd); return NULL; } } else cmd->frame->sg_counts = 0; return cmd; } static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) { struct mvumi_sgl *m_sg; unsigned int size; dma_addr_t phy_addr; if (cmd && cmd->frame) { if (cmd->frame->sg_counts) { m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; sgd_getsz(mhba, m_sg, size); phy_addr = (dma_addr_t) m_sg->baseaddr_l | (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16); dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf, phy_addr); } dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size, cmd->frame, cmd->frame_phys); kfree(cmd); } } /** * mvumi_get_cmd - Get a command from the free pool * @mhba: Adapter soft state * * Returns a free command from the pool */ static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba) { struct mvumi_cmd *cmd = NULL; if (likely(!list_empty(&mhba->cmd_pool))) { cmd = list_entry((&mhba->cmd_pool)->next, struct mvumi_cmd, queue_pointer); list_del_init(&cmd->queue_pointer); } else dev_warn(&mhba->pdev->dev, "command pool is empty!\n"); return cmd; } /** * mvumi_return_cmd - Return a cmd to free command pool * @mhba: Adapter soft state * @cmd: Command packet to be returned to free command pool */ static inline void mvumi_return_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) { cmd->scmd = NULL; list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); } /** * mvumi_free_cmds - Free all the cmds in the free cmd pool * @mhba: Adapter soft state */ static void mvumi_free_cmds(struct mvumi_hba *mhba) { struct mvumi_cmd *cmd; while (!list_empty(&mhba->cmd_pool)) { cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, queue_pointer); list_del(&cmd->queue_pointer); if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) kfree(cmd->frame); kfree(cmd); } } /** * mvumi_alloc_cmds - Allocates the command packets * @mhba: Adapter soft state * */ static int mvumi_alloc_cmds(struct mvumi_hba *mhba) { int i; struct mvumi_cmd *cmd; for (i = 0; i < mhba->max_io; i++) { cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) goto err_exit; INIT_LIST_HEAD(&cmd->queue_pointer); list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { cmd->frame = mhba->ib_frame + i * mhba->ib_max_size; cmd->frame_phys = mhba->ib_frame_phys + i * mhba->ib_max_size; } else cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL); if (!cmd->frame) goto err_exit; } return 0; err_exit: dev_err(&mhba->pdev->dev, "failed to allocate memory for cmd[0x%x].\n", i); while (!list_empty(&mhba->cmd_pool)) { cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, queue_pointer); list_del(&cmd->queue_pointer); if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) kfree(cmd->frame); kfree(cmd); } return -ENOMEM; } static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba) { unsigned int ib_rp_reg; struct mvumi_hw_regs *regs = mhba->regs; ib_rp_reg = ioread32(mhba->regs->inb_read_pointer); if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) == (mhba->ib_cur_slot & regs->cl_slot_num_mask)) && ((ib_rp_reg & regs->cl_pointer_toggle) != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) { dev_warn(&mhba->pdev->dev, "no free slot to use.\n"); return 0; } if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) { dev_warn(&mhba->pdev->dev, "firmware io overflow.\n"); return 0; } else { return mhba->max_io - atomic_read(&mhba->fw_outstanding); } } static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba) { unsigned int count; if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1)) return 0; count = ioread32(mhba->ib_shadow); if (count == 0xffff) return 0; return count; } static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry) { unsigned int cur_ib_entry; cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask; cur_ib_entry++; if (cur_ib_entry >= mhba->list_num_io) { cur_ib_entry -= mhba->list_num_io; mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle; } mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask; mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask); if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { *ib_entry = mhba->ib_list + cur_ib_entry * sizeof(struct mvumi_dyn_list_entry); } else { *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size; } atomic_inc(&mhba->fw_outstanding); } static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba) { iowrite32(0xffff, mhba->ib_shadow); iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer); } static char mvumi_check_ob_frame(struct mvumi_hba *mhba, unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame) { unsigned short tag, request_id; udelay(1); p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; request_id = p_outb_frame->request_id; tag = p_outb_frame->tag; if (tag > mhba->tag_pool.size) { dev_err(&mhba->pdev->dev, "ob frame data error\n"); return -1; } if (mhba->tag_cmd[tag] == NULL) { dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag); return -1; } else if (mhba->tag_cmd[tag]->request_id != request_id && mhba->request_id_enabled) { dev_err(&mhba->pdev->dev, "request ID from FW:0x%x," "cmd request ID:0x%x\n", request_id, mhba->tag_cmd[tag]->request_id); return -1; } return 0; } static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba, unsigned int *cur_obf, unsigned int *assign_obf_end) { unsigned int ob_write, ob_write_shadow; struct mvumi_hw_regs *regs = mhba->regs; do { ob_write = ioread32(regs->outb_copy_pointer); ob_write_shadow = ioread32(mhba->ob_shadow); } while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow); *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; if ((ob_write & regs->cl_pointer_toggle) != (mhba->ob_cur_slot & regs->cl_pointer_toggle)) { *assign_obf_end += mhba->list_num_io; } return 0; } static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba, unsigned int *cur_obf, unsigned int *assign_obf_end) { unsigned int ob_write; struct mvumi_hw_regs *regs = mhba->regs; ob_write = ioread32(regs->outb_read_pointer); ob_write = ioread32(regs->outb_copy_pointer); *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; if (*assign_obf_end < *cur_obf) *assign_obf_end += mhba->list_num_io; else if (*assign_obf_end == *cur_obf) return -1; return 0; } static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba) { unsigned int cur_obf, assign_obf_end, i; struct mvumi_ob_data *ob_data; struct mvumi_rsp_frame *p_outb_frame; struct mvumi_hw_regs *regs = mhba->regs; if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end)) return; for (i = (assign_obf_end - cur_obf); i != 0; i--) { cur_obf++; if (cur_obf >= mhba->list_num_io) { cur_obf -= mhba->list_num_io; mhba->ob_cur_slot ^= regs->cl_pointer_toggle; } p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; /* Copy pointer may point to entry in outbound list * before entry has valid data */ if (unlikely(p_outb_frame->tag > mhba->tag_pool.size || mhba->tag_cmd[p_outb_frame->tag] == NULL || p_outb_frame->request_id != mhba->tag_cmd[p_outb_frame->tag]->request_id)) if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame)) continue; if (!list_empty(&mhba->ob_data_list)) { ob_data = (struct mvumi_ob_data *) list_first_entry(&mhba->ob_data_list, struct mvumi_ob_data, list); list_del_init(&ob_data->list); } else { ob_data = NULL; if (cur_obf == 0) { cur_obf = mhba->list_num_io - 1; mhba->ob_cur_slot ^= regs->cl_pointer_toggle; } else cur_obf -= 1; break; } memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size); p_outb_frame->tag = 0xff; list_add_tail(&ob_data->list, &mhba->free_ob_list); } mhba->ob_cur_slot &= ~regs->cl_slot_num_mask; mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask); iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer); } static void mvumi_reset(struct mvumi_hba *mhba) { struct mvumi_hw_regs *regs = mhba->regs; iowrite32(0, regs->enpointa_mask_reg); if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE) return; iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg); } static unsigned char mvumi_start(struct mvumi_hba *mhba); static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba) { mhba->fw_state = FW_STATE_ABORT; mvumi_reset(mhba); if (mvumi_start(mhba)) return FAILED; else return SUCCESS; } static int mvumi_wait_for_fw(struct mvumi_hba *mhba) { struct mvumi_hw_regs *regs = mhba->regs; u32 tmp; unsigned long before; before = jiffies; iowrite32(0, regs->enpointa_mask_reg); tmp = ioread32(regs->arm_to_pciea_msg1); while (tmp != HANDSHAKE_READYSTATE) { iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg); if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { dev_err(&mhba->pdev->dev, "FW reset failed [0x%x].\n", tmp); return FAILED; } msleep(500); rmb(); tmp = ioread32(regs->arm_to_pciea_msg1); } return SUCCESS; } static void mvumi_backup_bar_addr(struct mvumi_hba *mhba) { unsigned char i; for (i = 0; i < MAX_BASE_ADDRESS; i++) { pci_read_config_dword(mhba->pdev, 0x10 + i * 4, &mhba->pci_base[i]); } } static void mvumi_restore_bar_addr(struct mvumi_hba *mhba) { unsigned char i; for (i = 0; i < MAX_BASE_ADDRESS; i++) { if (mhba->pci_base[i]) pci_write_config_dword(mhba->pdev, 0x10 + i * 4, mhba->pci_base[i]); } } static int mvumi_pci_set_master(struct pci_dev *pdev) { int ret = 0; pci_set_master(pdev); if (IS_DMA64) { if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); } else ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); return ret; } static int mvumi_reset_host_9580(struct mvumi_hba *mhba) { mhba->fw_state = FW_STATE_ABORT; iowrite32(0, mhba->regs->reset_enable); iowrite32(0xf, mhba->regs->reset_request); iowrite32(0x10, mhba->regs->reset_enable); iowrite32(0x10, mhba->regs->reset_request); msleep(100); pci_disable_device(mhba->pdev); if (pci_enable_device(mhba->pdev)) { dev_err(&mhba->pdev->dev, "enable device failed\n"); return FAILED; } if (mvumi_pci_set_master(mhba->pdev)) { dev_err(&mhba->pdev->dev, "set master failed\n"); return FAILED; } mvumi_restore_bar_addr(mhba); if (mvumi_wait_for_fw(mhba) == FAILED) return FAILED; return mvumi_wait_for_outstanding(mhba); } static int mvumi_reset_host_9143(struct mvumi_hba *mhba) { return mvumi_wait_for_outstanding(mhba); } static int mvumi_host_reset(struct scsi_cmnd *scmd) { struct mvumi_hba *mhba; mhba = (struct mvumi_hba *) scmd->device->host->hostdata; scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x\n", scsi_cmd_to_rq(scmd)->tag, scmd->cmnd[0], scmd->retries); return mhba->instancet->reset_host(mhba); } static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) { unsigned long flags; cmd->cmd_status = REQ_STATUS_PENDING; if (atomic_read(&cmd->sync_cmd)) { dev_err(&mhba->pdev->dev, "last blocked cmd not finished, sync_cmd = %d\n", atomic_read(&cmd->sync_cmd)); BUG_ON(1); return -1; } atomic_inc(&cmd->sync_cmd); spin_lock_irqsave(mhba->shost->host_lock, flags); mhba->instancet->fire_cmd(mhba, cmd); spin_unlock_irqrestore(mhba->shost->host_lock, flags); wait_event_timeout(mhba->int_cmd_wait_q, (cmd->cmd_status != REQ_STATUS_PENDING), MVUMI_INTERNAL_CMD_WAIT_TIME * HZ); /* command timeout */ if (atomic_read(&cmd->sync_cmd)) { spin_lock_irqsave(mhba->shost->host_lock, flags); atomic_dec(&cmd->sync_cmd); if (mhba->tag_cmd[cmd->frame->tag]) { mhba->tag_cmd[cmd->frame->tag] = NULL; dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n", cmd->frame->tag); tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); } if (!list_empty(&cmd->queue_pointer)) { dev_warn(&mhba->pdev->dev, "TIMEOUT:A internal command doesn't send!\n"); list_del_init(&cmd->queue_pointer); } else atomic_dec(&mhba->fw_outstanding); spin_unlock_irqrestore(mhba->shost->host_lock, flags); } return 0; } static void mvumi_release_fw(struct mvumi_hba *mhba) { mvumi_free_cmds(mhba); mvumi_release_mem_resource(mhba); mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE, mhba->handshake_page, mhba->handshake_page_phys); kfree(mhba->regs); pci_release_regions(mhba->pdev); } static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba) { struct mvumi_cmd *cmd; struct mvumi_msg_frame *frame; unsigned char device_id, retry = 0; unsigned char bitcount = sizeof(unsigned char) * 8; for (device_id = 0; device_id < mhba->max_target_id; device_id++) { if (!(mhba->target_map[device_id / bitcount] & (1 << (device_id % bitcount)))) continue; get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0); if (!cmd) { if (retry++ >= 5) { dev_err(&mhba->pdev->dev, "failed to get memory" " for internal flush cache cmd for " "device %d", device_id); retry = 0; continue; } else goto get_cmd; } cmd->scmd = NULL; cmd->cmd_status = REQ_STATUS_PENDING; atomic_set(&cmd->sync_cmd, 0); frame = cmd->frame; frame->req_function = CL_FUN_SCSI_CMD; frame->device_id = device_id; frame->cmd_flag = CMD_FLAG_NON_DATA; frame->data_transfer_length = 0; frame->cdb_length = MAX_COMMAND_SIZE; memset(frame->cdb, 0, MAX_COMMAND_SIZE); frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC; frame->cdb[1] = CDB_CORE_MODULE; frame->cdb[2] = CDB_CORE_SHUTDOWN; mvumi_issue_blocked_cmd(mhba, cmd); if (cmd->cmd_status != SAM_STAT_GOOD) { dev_err(&mhba->pdev->dev, "device %d flush cache failed, status=0x%x.\n", device_id, cmd->cmd_status); } mvumi_delete_internal_cmd(mhba, cmd); } return 0; } static unsigned char mvumi_calculate_checksum(struct mvumi_hs_header *p_header, unsigned short len) { unsigned char *ptr; unsigned char ret = 0, i; ptr = (unsigned char *) p_header->frame_content; for (i = 0; i < len; i++) { ret ^= *ptr; ptr++; } return ret; } static void mvumi_hs_build_page(struct mvumi_hba *mhba, struct mvumi_hs_header *hs_header) { struct mvumi_hs_page2 *hs_page2; struct mvumi_hs_page4 *hs_page4; struct mvumi_hs_page3 *hs_page3; u64 time; u64 local_time; switch (hs_header->page_code) { case HS_PAGE_HOST_INFO: hs_page2 = (struct mvumi_hs_page2 *) hs_header; hs_header->frame_length = sizeof(*hs_page2) - 4; memset(hs_header->frame_content, 0, hs_header->frame_length); hs_page2->host_type = 3; /* 3 mean linux*/ if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) hs_page2->host_cap = 0x08;/* host dynamic source mode */ hs_page2->host_ver.ver_major = VER_MAJOR; hs_page2->host_ver.ver_minor = VER_MINOR; hs_page2->host_ver.ver_oem = VER_OEM; hs_page2->host_ver.ver_build = VER_BUILD; hs_page2->system_io_bus = 0; hs_page2->slot_number = 0; hs_page2->intr_level = 0; hs_page2->intr_vector = 0; time = ktime_get_real_seconds(); local_time = (time - (sys_tz.tz_minuteswest * 60)); hs_page2->seconds_since1970 = local_time; hs_header->checksum = mvumi_calculate_checksum(hs_header, hs_header->frame_length); break; case HS_PAGE_FIRM_CTL: hs_page3 = (struct mvumi_hs_page3 *) hs_header; hs_header->frame_length = sizeof(*hs_page3) - 4; memset(hs_header->frame_content, 0, hs_header->frame_length); hs_header->checksum = mvumi_calculate_checksum(hs_header, hs_header->frame_length); break; case HS_PAGE_CL_INFO: hs_page4 = (struct mvumi_hs_page4 *) hs_header; hs_header->frame_length = sizeof(*hs_page4) - 4; memset(hs_header->frame_content, 0, hs_header->frame_length); hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys); hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys); hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys); hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys); hs_page4->ib_entry_size = mhba->ib_max_size_setting; hs_page4->ob_entry_size = mhba->ob_max_size_setting; if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) { hs_page4->ob_depth = find_first_bit((unsigned long *) &mhba->list_num_io, BITS_PER_LONG); hs_page4->ib_depth = find_first_bit((unsigned long *) &mhba->list_num_io, BITS_PER_LONG); } else { hs_page4->ob_depth = (u8) mhba->list_num_io; hs_page4->ib_depth = (u8) mhba->list_num_io; } hs_header->checksum = mvumi_calculate_checksum(hs_header, hs_header->frame_length); break; default: dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n", hs_header->page_code); break; } } /** * mvumi_init_data - Initialize requested date for FW * @mhba: Adapter soft state */ static int mvumi_init_data(struct mvumi_hba *mhba) { struct mvumi_ob_data *ob_pool; struct mvumi_res *res_mgnt; unsigned int tmp_size, offset, i; void *virmem, *v; dma_addr_t p; if (mhba->fw_flag & MVUMI_FW_ALLOC) return 0; tmp_size = mhba->ib_max_size * mhba->max_io; if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; tmp_size += 128 + mhba->ob_max_size * mhba->max_io; tmp_size += 8 + sizeof(u32)*2 + 16; res_mgnt = mvumi_alloc_mem_resource(mhba, RESOURCE_UNCACHED_MEMORY, tmp_size); if (!res_mgnt) { dev_err(&mhba->pdev->dev, "failed to allocate memory for inbound list\n"); goto fail_alloc_dma_buf; } p = res_mgnt->bus_addr; v = res_mgnt->virt_addr; /* ib_list */ offset = round_up(p, 128) - p; p += offset; v += offset; mhba->ib_list = v; mhba->ib_list_phys = p; if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; mhba->ib_frame = v; mhba->ib_frame_phys = p; } v += mhba->ib_max_size * mhba->max_io; p += mhba->ib_max_size * mhba->max_io; /* ib shadow */ offset = round_up(p, 8) - p; p += offset; v += offset; mhba->ib_shadow = v; mhba->ib_shadow_phys = p; p += sizeof(u32)*2; v += sizeof(u32)*2; /* ob shadow */ if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { offset = round_up(p, 8) - p; p += offset; v += offset; mhba->ob_shadow = v; mhba->ob_shadow_phys = p; p += 8; v += 8; } else { offset = round_up(p, 4) - p; p += offset; v += offset; mhba->ob_shadow = v; mhba->ob_shadow_phys = p; p += 4; v += 4; } /* ob list */ offset = round_up(p, 128) - p; p += offset; v += offset; mhba->ob_list = v; mhba->ob_list_phys = p; /* ob data pool */ tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool)); tmp_size = round_up(tmp_size, 8); res_mgnt = mvumi_alloc_mem_resource(mhba, RESOURCE_CACHED_MEMORY, tmp_size); if (!res_mgnt) { dev_err(&mhba->pdev->dev, "failed to allocate memory for outbound data buffer\n"); goto fail_alloc_dma_buf; } virmem = res_mgnt->virt_addr; for (i = mhba->max_io; i != 0; i--) { ob_pool = (struct mvumi_ob_data *) virmem; list_add_tail(&ob_pool->list, &mhba->ob_data_list); virmem += mhba->ob_max_size + sizeof(*ob_pool); } tmp_size = sizeof(unsigned short) * mhba->max_io + sizeof(struct mvumi_cmd *) * mhba->max_io; tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) / (sizeof(unsigned char) * 8); res_mgnt = mvumi_alloc_mem_resource(mhba, RESOURCE_CACHED_MEMORY, tmp_size); if (!res_mgnt) { dev_err(&mhba->pdev->dev, "failed to allocate memory for tag and target map\n"); goto fail_alloc_dma_buf; } virmem = res_mgnt->virt_addr; mhba->tag_pool.stack = virmem; mhba->tag_pool.size = mhba->max_io; tag_init(&mhba->tag_pool, mhba->max_io); virmem += sizeof(unsigned short) * mhba->max_io; mhba->tag_cmd = virmem; virmem += sizeof(struct mvumi_cmd *) * mhba->max_io; mhba->target_map = virmem; mhba->fw_flag |= MVUMI_FW_ALLOC; return 0; fail_alloc_dma_buf: mvumi_release_mem_resource(mhba); return -1; } static int mvumi_hs_process_page(struct mvumi_hba *mhba, struct mvumi_hs_header *hs_header) { struct mvumi_hs_page1 *hs_page1; unsigned char page_checksum; page_checksum = mvumi_calculate_checksum(hs_header, hs_header->frame_length); if (page_checksum != hs_header->checksum) { dev_err(&mhba->pdev->dev, "checksum error\n"); return -1; } switch (hs_header->page_code) { case HS_PAGE_FIRM_CAP: hs_page1 = (struct mvumi_hs_page1 *) hs_header; mhba->max_io = hs_page1->max_io_support; mhba->list_num_io = hs_page1->cl_inout_list_depth; mhba->max_transfer_size = hs_page1->max_transfer_size; mhba->max_target_id = hs_page1->max_devices_support; mhba->hba_capability = hs_page1->capability; mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size; mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2; mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size; mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2; dev_dbg(&mhba->pdev->dev, "FW version:%d\n", hs_page1->fw_ver.ver_build); if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) mhba->eot_flag = 22; else mhba->eot_flag = 27; if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth; break; default: dev_err(&mhba->pdev->dev, "handshake: page code error\n"); return -1; } return 0; } /** * mvumi_handshake - Move the FW to READY state * @mhba: Adapter soft state * * During the initialization, FW passes can potentially be in any one of * several possible states. If the FW in operational, waiting-for-handshake * states, driver must take steps to bring it to ready state. Otherwise, it * has to wait for the ready state. */ static int mvumi_handshake(struct mvumi_hba *mhba) { unsigned int hs_state, tmp, hs_fun; struct mvumi_hs_header *hs_header; struct mvumi_hw_regs *regs = mhba->regs; if (mhba->fw_state == FW_STATE_STARTING) hs_state = HS_S_START; else { tmp = ioread32(regs->arm_to_pciea_msg0); hs_state = HS_GET_STATE(tmp); dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state); if (HS_GET_STATUS(tmp) != HS_STATUS_OK) { mhba->fw_state = FW_STATE_STARTING; return -1; } } hs_fun = 0; switch (hs_state) { case HS_S_START: mhba->fw_state = FW_STATE_HANDSHAKING; HS_SET_STATUS(hs_fun, HS_STATUS_OK); HS_SET_STATE(hs_fun, HS_S_RESET); iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1); iowrite32(hs_fun, regs->pciea_to_arm_msg0); iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); break; case HS_S_RESET: iowrite32(lower_32_bits(mhba->handshake_page_phys), regs->pciea_to_arm_msg1); iowrite32(upper_32_bits(mhba->handshake_page_phys), regs->arm_to_pciea_msg1); HS_SET_STATUS(hs_fun, HS_STATUS_OK); HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR); iowrite32(hs_fun, regs->pciea_to_arm_msg0); iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); break; case HS_S_PAGE_ADDR: case HS_S_QUERY_PAGE: case HS_S_SEND_PAGE: hs_header = (struct mvumi_hs_header *) mhba->handshake_page; if (hs_header->page_code == HS_PAGE_FIRM_CAP) { mhba->hba_total_pages = ((struct mvumi_hs_page1 *) hs_header)->total_pages; if (mhba->hba_total_pages == 0) mhba->hba_total_pages = HS_PAGE_TOTAL-1; } if (hs_state == HS_S_QUERY_PAGE) { if (mvumi_hs_process_page(mhba, hs_header)) { HS_SET_STATE(hs_fun, HS_S_ABORT); return -1; } if (mvumi_init_data(mhba)) { HS_SET_STATE(hs_fun, HS_S_ABORT); return -1; } } else if (hs_state == HS_S_PAGE_ADDR) { hs_header->page_code = 0; mhba->hba_total_pages = HS_PAGE_TOTAL-1; } if ((hs_header->page_code + 1) <= mhba->hba_total_pages) { hs_header->page_code++; if (hs_header->page_code != HS_PAGE_FIRM_CAP) { mvumi_hs_build_page(mhba, hs_header); HS_SET_STATE(hs_fun, HS_S_SEND_PAGE); } else HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE); } else HS_SET_STATE(hs_fun, HS_S_END); HS_SET_STATUS(hs_fun, HS_STATUS_OK); iowrite32(hs_fun, regs->pciea_to_arm_msg0); iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); break; case HS_S_END: /* Set communication list ISR */ tmp = ioread32(regs->enpointa_mask_reg); tmp |= regs->int_comaout | regs->int_comaerr; iowrite32(tmp, regs->enpointa_mask_reg); iowrite32(mhba->list_num_io, mhba->ib_shadow); /* Set InBound List Available count shadow */ iowrite32(lower_32_bits(mhba->ib_shadow_phys), regs->inb_aval_count_basel); iowrite32(upper_32_bits(mhba->ib_shadow_phys), regs->inb_aval_count_baseh); if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) { /* Set OutBound List Available count shadow */ iowrite32((mhba->list_num_io-1) | regs->cl_pointer_toggle, mhba->ob_shadow); iowrite32(lower_32_bits(mhba->ob_shadow_phys), regs->outb_copy_basel); iowrite32(upper_32_bits(mhba->ob_shadow_phys), regs->outb_copy_baseh); } mhba->ib_cur_slot = (mhba->list_num_io - 1) | regs->cl_pointer_toggle; mhba->ob_cur_slot = (mhba->list_num_io - 1) | regs->cl_pointer_toggle; mhba->fw_state = FW_STATE_STARTED; break; default: dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n", hs_state); return -1; } return 0; } static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba) { unsigned int isr_status; unsigned long before; before = jiffies; mvumi_handshake(mhba); do { isr_status = mhba->instancet->read_fw_status_reg(mhba); if (mhba->fw_state == FW_STATE_STARTED) return 0; if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { dev_err(&mhba->pdev->dev, "no handshake response at state 0x%x.\n", mhba->fw_state); dev_err(&mhba->pdev->dev, "isr : global=0x%x,status=0x%x.\n", mhba->global_isr, isr_status); return -1; } rmb(); usleep_range(1000, 2000); } while (!(isr_status & DRBL_HANDSHAKE_ISR)); return 0; } static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba) { unsigned int tmp; unsigned long before; before = jiffies; tmp = ioread32(mhba->regs->arm_to_pciea_msg1); while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) { if (tmp != HANDSHAKE_READYSTATE) iowrite32(DRBL_MU_RESET, mhba->regs->pciea_to_arm_drbl_reg); if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { dev_err(&mhba->pdev->dev, "invalid signature [0x%x].\n", tmp); return -1; } usleep_range(1000, 2000); rmb(); tmp = ioread32(mhba->regs->arm_to_pciea_msg1); } mhba->fw_state = FW_STATE_STARTING; dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n"); do { if (mvumi_handshake_event(mhba)) { dev_err(&mhba->pdev->dev, "handshake failed at state 0x%x.\n", mhba->fw_state); return -1; } } while (mhba->fw_state != FW_STATE_STARTED); dev_dbg(&mhba->pdev->dev, "firmware handshake done\n"); return 0; } static unsigned char mvumi_start(struct mvumi_hba *mhba) { unsigned int tmp; struct mvumi_hw_regs *regs = mhba->regs; /* clear Door bell */ tmp = ioread32(regs->arm_to_pciea_drbl_reg); iowrite32(tmp, regs->arm_to_pciea_drbl_reg); iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg); tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea; iowrite32(tmp, regs->enpointa_mask_reg); msleep(100); if (mvumi_check_handshake(mhba)) return -1; return 0; } /** * mvumi_complete_cmd - Completes a command * @mhba: Adapter soft state * @cmd: Command to be completed * @ob_frame: Command response */ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, struct mvumi_rsp_frame *ob_frame) { struct scsi_cmnd *scmd = cmd->scmd; mvumi_priv(cmd->scmd)->cmd_priv = NULL; scmd->result = ob_frame->req_status; switch (ob_frame->req_status) { case SAM_STAT_GOOD: scmd->result |= DID_OK << 16; break; case SAM_STAT_BUSY: scmd->result |= DID_BUS_BUSY << 16; break; case SAM_STAT_CHECK_CONDITION: scmd->result |= (DID_OK << 16); if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) { memcpy(cmd->scmd->sense_buffer, ob_frame->payload, sizeof(struct mvumi_sense_data)); } break; default: scmd->result |= (DID_ABORT << 16); break; } if (scsi_bufflen(scmd)) dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), scsi_sg_count(scmd), scmd->sc_data_direction); scsi_done(scmd); mvumi_return_cmd(mhba, cmd); } static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, struct mvumi_rsp_frame *ob_frame) { if (atomic_read(&cmd->sync_cmd)) { cmd->cmd_status = ob_frame->req_status; if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) && (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) && cmd->data_buf) { memcpy(cmd->data_buf, ob_frame->payload, sizeof(struct mvumi_sense_data)); } atomic_dec(&cmd->sync_cmd); wake_up(&mhba->int_cmd_wait_q); } } static void mvumi_show_event(struct mvumi_hba *mhba, struct mvumi_driver_event *ptr) { unsigned int i; dev_warn(&mhba->pdev->dev, "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n", ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id); if (ptr->param_count) { printk(KERN_WARNING "Event param(len 0x%x): ", ptr->param_count); for (i = 0; i < ptr->param_count; i++) printk(KERN_WARNING "0x%x ", ptr->params[i]); printk(KERN_WARNING "\n"); } if (ptr->sense_data_length) { printk(KERN_WARNING "Event sense data(len 0x%x): ", ptr->sense_data_length); for (i = 0; i < ptr->sense_data_length; i++) printk(KERN_WARNING "0x%x ", ptr->sense_data[i]); printk(KERN_WARNING "\n"); } } static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status) { struct scsi_device *sdev; int ret = -1; if (status == DEVICE_OFFLINE) { sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); if (sdev) { dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0, sdev->id, 0); scsi_remove_device(sdev); scsi_device_put(sdev); ret = 0; } else dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n", devid); } else if (status == DEVICE_ONLINE) { sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); if (!sdev) { scsi_add_device(mhba->shost, 0, devid, 0); dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0, devid, 0); ret = 0; } else { dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n", 0, devid, 0); scsi_device_put(sdev); } } return ret; } static u64 mvumi_inquiry(struct mvumi_hba *mhba, unsigned int id, struct mvumi_cmd *cmd) { struct mvumi_msg_frame *frame; u64 wwid = 0; int cmd_alloc = 0; int data_buf_len = 64; if (!cmd) { cmd = mvumi_create_internal_cmd(mhba, data_buf_len); if (cmd) cmd_alloc = 1; else return 0; } else { memset(cmd->data_buf, 0, data_buf_len); } cmd->scmd = NULL; cmd->cmd_status = REQ_STATUS_PENDING; atomic_set(&cmd->sync_cmd, 0); frame = cmd->frame; frame->device_id = (u16) id; frame->cmd_flag = CMD_FLAG_DATA_IN; frame->req_function = CL_FUN_SCSI_CMD; frame->cdb_length = 6; frame->data_transfer_length = MVUMI_INQUIRY_LENGTH; memset(frame->cdb, 0, frame->cdb_length); frame->cdb[0] = INQUIRY; frame->cdb[4] = frame->data_transfer_length; mvumi_issue_blocked_cmd(mhba, cmd); if (cmd->cmd_status == SAM_STAT_GOOD) { if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) wwid = id + 1; else memcpy((void *)&wwid, (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF), MVUMI_INQUIRY_UUID_LEN); dev_dbg(&mhba->pdev->dev, "inquiry device(0:%d:0) wwid(%llx)\n", id, wwid); } else { wwid = 0; } if (cmd_alloc) mvumi_delete_internal_cmd(mhba, cmd); return wwid; } static void mvumi_detach_devices(struct mvumi_hba *mhba) { struct mvumi_device *mv_dev = NULL , *dev_next; struct scsi_device *sdev = NULL; mutex_lock(&mhba->device_lock); /* detach Hard Disk */ list_for_each_entry_safe(mv_dev, dev_next, &mhba->shost_dev_list, list) { mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE); list_del_init(&mv_dev->list); dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n", mv_dev->id, mv_dev->wwid); kfree(mv_dev); } list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) { list_del_init(&mv_dev->list); dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n", mv_dev->id, mv_dev->wwid); kfree(mv_dev); } /* detach virtual device */ if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) sdev = scsi_device_lookup(mhba->shost, 0, mhba->max_target_id - 1, 0); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); } mutex_unlock(&mhba->device_lock); } static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id) { struct scsi_device *sdev; sdev = scsi_device_lookup(mhba->shost, 0, id, 0); if (sdev) { scsi_rescan_device(sdev); scsi_device_put(sdev); } } static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid) { struct mvumi_device *mv_dev = NULL; list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) { if (mv_dev->wwid == wwid) { if (mv_dev->id != id) { dev_err(&mhba->pdev->dev, "%s has same wwid[%llx] ," " but different id[%d %d]\n", __func__, mv_dev->wwid, mv_dev->id, id); return -1; } else { if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) mvumi_rescan_devices(mhba, id); return 1; } } } return 0; } static void mvumi_remove_devices(struct mvumi_hba *mhba, int id) { struct mvumi_device *mv_dev = NULL, *dev_next; list_for_each_entry_safe(mv_dev, dev_next, &mhba->shost_dev_list, list) { if (mv_dev->id == id) { dev_dbg(&mhba->pdev->dev, "detach device(0:%d:0) wwid(%llx) from HOST\n", mv_dev->id, mv_dev->wwid); mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE); list_del_init(&mv_dev->list); kfree(mv_dev); } } } static int mvumi_probe_devices(struct mvumi_hba *mhba) { int id, maxid; u64 wwid = 0; struct mvumi_device *mv_dev = NULL; struct mvumi_cmd *cmd = NULL; int found = 0; cmd = mvumi_create_internal_cmd(mhba, 64); if (!cmd) return -1; if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) maxid = mhba->max_target_id; else maxid = mhba->max_target_id - 1; for (id = 0; id < maxid; id++) { wwid = mvumi_inquiry(mhba, id, cmd); if (!wwid) { /* device no response, remove it */ mvumi_remove_devices(mhba, id); } else { /* device response, add it */ found = mvumi_match_devices(mhba, id, wwid); if (!found) { mvumi_remove_devices(mhba, id); mv_dev = kzalloc(sizeof(struct mvumi_device), GFP_KERNEL); if (!mv_dev) { dev_err(&mhba->pdev->dev, "%s alloc mv_dev failed\n", __func__); continue; } mv_dev->id = id; mv_dev->wwid = wwid; mv_dev->sdev = NULL; INIT_LIST_HEAD(&mv_dev->list); list_add_tail(&mv_dev->list, &mhba->mhba_dev_list); dev_dbg(&mhba->pdev->dev, "probe a new device(0:%d:0)" " wwid(%llx)\n", id, mv_dev->wwid); } else if (found == -1) return -1; else continue; } } if (cmd) mvumi_delete_internal_cmd(mhba, cmd); return 0; } static int mvumi_rescan_bus(void *data) { int ret = 0; struct mvumi_hba *mhba = (struct mvumi_hba *) data; struct mvumi_device *mv_dev = NULL , *dev_next; while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); if (!atomic_read(&mhba->pnp_count)) schedule(); msleep(1000); atomic_set(&mhba->pnp_count, 0); __set_current_state(TASK_RUNNING); mutex_lock(&mhba->device_lock); ret = mvumi_probe_devices(mhba); if (!ret) { list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) { if (mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_ONLINE)) { dev_err(&mhba->pdev->dev, "%s add device(0:%d:0) failed" "wwid(%llx) has exist\n", __func__, mv_dev->id, mv_dev->wwid); list_del_init(&mv_dev->list); kfree(mv_dev); } else { list_move_tail(&mv_dev->list, &mhba->shost_dev_list); } } } mutex_unlock(&mhba->device_lock); } return 0; } static void mvumi_proc_msg(struct mvumi_hba *mhba, struct mvumi_hotplug_event *param) { u16 size = param->size; const unsigned long *ar_bitmap; const unsigned long *re_bitmap; int index; if (mhba->fw_flag & MVUMI_FW_ATTACH) { index = -1; ar_bitmap = (const unsigned long *) param->bitmap; re_bitmap = (const unsigned long *) &param->bitmap[size >> 3]; mutex_lock(&mhba->sas_discovery_mutex); do { index = find_next_zero_bit(ar_bitmap, size, index + 1); if (index >= size) break; mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE); } while (1); index = -1; do { index = find_next_zero_bit(re_bitmap, size, index + 1); if (index >= size) break; mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE); } while (1); mutex_unlock(&mhba->sas_discovery_mutex); } } static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer) { if (msg == APICDB1_EVENT_GETEVENT) { int i, count; struct mvumi_driver_event *param = NULL; struct mvumi_event_req *er = buffer; count = er->count; if (count > MAX_EVENTS_RETURNED) { dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger" " than max event count[0x%x].\n", count, MAX_EVENTS_RETURNED); return; } for (i = 0; i < count; i++) { param = &er->events[i]; mvumi_show_event(mhba, param); } } else if (msg == APICDB1_HOST_GETEVENT) { mvumi_proc_msg(mhba, buffer); } } static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg) { struct mvumi_cmd *cmd; struct mvumi_msg_frame *frame; cmd = mvumi_create_internal_cmd(mhba, 512); if (!cmd) return -1; cmd->scmd = NULL; cmd->cmd_status = REQ_STATUS_PENDING; atomic_set(&cmd->sync_cmd, 0); frame = cmd->frame; frame->device_id = 0; frame->cmd_flag = CMD_FLAG_DATA_IN; frame->req_function = CL_FUN_SCSI_CMD; frame->cdb_length = MAX_COMMAND_SIZE; frame->data_transfer_length = sizeof(struct mvumi_event_req); memset(frame->cdb, 0, MAX_COMMAND_SIZE); frame->cdb[0] = APICDB0_EVENT; frame->cdb[1] = msg; mvumi_issue_blocked_cmd(mhba, cmd); if (cmd->cmd_status != SAM_STAT_GOOD) dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n", cmd->cmd_status); else mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf); mvumi_delete_internal_cmd(mhba, cmd); return 0; } static void mvumi_scan_events(struct work_struct *work) { struct mvumi_events_wq *mu_ev = container_of(work, struct mvumi_events_wq, work_q); mvumi_get_event(mu_ev->mhba, mu_ev->event); kfree(mu_ev); } static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status) { struct mvumi_events_wq *mu_ev; while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) { if (isr_status & DRBL_BUS_CHANGE) { atomic_inc(&mhba->pnp_count); wake_up_process(mhba->dm_thread); isr_status &= ~(DRBL_BUS_CHANGE); continue; } mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC); if (mu_ev) { INIT_WORK(&mu_ev->work_q, mvumi_scan_events); mu_ev->mhba = mhba; mu_ev->event = APICDB1_EVENT_GETEVENT; isr_status &= ~(DRBL_EVENT_NOTIFY); mu_ev->param = NULL; schedule_work(&mu_ev->work_q); } } } static void mvumi_handle_clob(struct mvumi_hba *mhba) { struct mvumi_rsp_frame *ob_frame; struct mvumi_cmd *cmd; struct mvumi_ob_data *pool; while (!list_empty(&mhba->free_ob_list)) { pool = list_first_entry(&mhba->free_ob_list, struct mvumi_ob_data, list); list_del_init(&pool->list); list_add_tail(&pool->list, &mhba->ob_data_list); ob_frame = (struct mvumi_rsp_frame *) &pool->data[0]; cmd = mhba->tag_cmd[ob_frame->tag]; atomic_dec(&mhba->fw_outstanding); mhba->tag_cmd[ob_frame->tag] = NULL; tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag); if (cmd->scmd) mvumi_complete_cmd(mhba, cmd, ob_frame); else mvumi_complete_internal_cmd(mhba, cmd, ob_frame); } mhba->instancet->fire_cmd(mhba, NULL); } static irqreturn_t mvumi_isr_handler(int irq, void *devp) { struct mvumi_hba *mhba = (struct mvumi_hba *) devp; unsigned long flags; spin_lock_irqsave(mhba->shost->host_lock, flags); if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) { spin_unlock_irqrestore(mhba->shost->host_lock, flags); return IRQ_NONE; } if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) { if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) mvumi_launch_events(mhba, mhba->isr_status); if (mhba->isr_status & DRBL_HANDSHAKE_ISR) { dev_warn(&mhba->pdev->dev, "enter handshake again!\n"); mvumi_handshake(mhba); } } if (mhba->global_isr & mhba->regs->int_comaout) mvumi_receive_ob_list_entry(mhba); mhba->global_isr = 0; mhba->isr_status = 0; if (mhba->fw_state == FW_STATE_STARTED) mvumi_handle_clob(mhba); spin_unlock_irqrestore(mhba->shost->host_lock, flags); return IRQ_HANDLED; } static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) { void *ib_entry; struct mvumi_msg_frame *ib_frame; unsigned int frame_len; ib_frame = cmd->frame; if (unlikely(mhba->fw_state != FW_STATE_STARTED)) { dev_dbg(&mhba->pdev->dev, "firmware not ready.\n"); return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; } if (tag_is_empty(&mhba->tag_pool)) { dev_dbg(&mhba->pdev->dev, "no free tag.\n"); return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; } mvumi_get_ib_list_entry(mhba, &ib_entry); cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool); cmd->frame->request_id = mhba->io_seq++; cmd->request_id = cmd->frame->request_id; mhba->tag_cmd[cmd->frame->tag] = cmd; frame_len = sizeof(*ib_frame) + ib_frame->sg_counts * sizeof(struct mvumi_sgl); if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { struct mvumi_dyn_list_entry *dle; dle = ib_entry; dle->src_low_addr = cpu_to_le32(lower_32_bits(cmd->frame_phys)); dle->src_high_addr = cpu_to_le32(upper_32_bits(cmd->frame_phys)); dle->if_length = (frame_len >> 2) & 0xFFF; } else { memcpy(ib_entry, ib_frame, frame_len); } return MV_QUEUE_COMMAND_RESULT_SENT; } static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) { unsigned short num_of_cl_sent = 0; unsigned int count; enum mvumi_qc_result result; if (cmd) list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list); count = mhba->instancet->check_ib_list(mhba); if (list_empty(&mhba->waiting_req_list) || !count) return; do { cmd = list_first_entry(&mhba->waiting_req_list, struct mvumi_cmd, queue_pointer); list_del_init(&cmd->queue_pointer); result = mvumi_send_command(mhba, cmd); switch (result) { case MV_QUEUE_COMMAND_RESULT_SENT: num_of_cl_sent++; break; case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE: list_add(&cmd->queue_pointer, &mhba->waiting_req_list); if (num_of_cl_sent > 0) mvumi_send_ib_list_entry(mhba); return; } } while (!list_empty(&mhba->waiting_req_list) && count--); if (num_of_cl_sent > 0) mvumi_send_ib_list_entry(mhba); } /** * mvumi_enable_intr - Enables interrupts * @mhba: Adapter soft state */ static void mvumi_enable_intr(struct mvumi_hba *mhba) { unsigned int mask; struct mvumi_hw_regs *regs = mhba->regs; iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg); mask = ioread32(regs->enpointa_mask_reg); mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr; iowrite32(mask, regs->enpointa_mask_reg); } /** * mvumi_disable_intr -Disables interrupt * @mhba: Adapter soft state */ static void mvumi_disable_intr(struct mvumi_hba *mhba) { unsigned int mask; struct mvumi_hw_regs *regs = mhba->regs; iowrite32(0, regs->arm_to_pciea_mask_reg); mask = ioread32(regs->enpointa_mask_reg); mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr); iowrite32(mask, regs->enpointa_mask_reg); } static int mvumi_clear_intr(void *extend) { struct mvumi_hba *mhba = (struct mvumi_hba *) extend; unsigned int status, isr_status = 0, tmp = 0; struct mvumi_hw_regs *regs = mhba->regs; status = ioread32(regs->main_int_cause_reg); if (!(status & regs->int_mu) || status == 0xFFFFFFFF) return 1; if (unlikely(status & regs->int_comaerr)) { tmp = ioread32(regs->outb_isr_cause); if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { if (tmp & regs->clic_out_err) { iowrite32(tmp & regs->clic_out_err, regs->outb_isr_cause); } } else { if (tmp & (regs->clic_in_err | regs->clic_out_err)) iowrite32(tmp & (regs->clic_in_err | regs->clic_out_err), regs->outb_isr_cause); } status ^= mhba->regs->int_comaerr; /* inbound or outbound parity error, command will timeout */ } if (status & regs->int_comaout) { tmp = ioread32(regs->outb_isr_cause); if (tmp & regs->clic_irq) iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause); } if (status & regs->int_dl_cpu2pciea) { isr_status = ioread32(regs->arm_to_pciea_drbl_reg); if (isr_status) iowrite32(isr_status, regs->arm_to_pciea_drbl_reg); } mhba->global_isr = status; mhba->isr_status = isr_status; return 0; } /** * mvumi_read_fw_status_reg - returns the current FW status value * @mhba: Adapter soft state */ static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba) { unsigned int status; status = ioread32(mhba->regs->arm_to_pciea_drbl_reg); if (status) iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg); return status; } static struct mvumi_instance_template mvumi_instance_9143 = { .fire_cmd = mvumi_fire_cmd, .enable_intr = mvumi_enable_intr, .disable_intr = mvumi_disable_intr, .clear_intr = mvumi_clear_intr, .read_fw_status_reg = mvumi_read_fw_status_reg, .check_ib_list = mvumi_check_ib_list_9143, .check_ob_list = mvumi_check_ob_list_9143, .reset_host = mvumi_reset_host_9143, }; static struct mvumi_instance_template mvumi_instance_9580 = { .fire_cmd = mvumi_fire_cmd, .enable_intr = mvumi_enable_intr, .disable_intr = mvumi_disable_intr, .clear_intr = mvumi_clear_intr, .read_fw_status_reg = mvumi_read_fw_status_reg, .check_ib_list = mvumi_check_ib_list_9580, .check_ob_list = mvumi_check_ob_list_9580, .reset_host = mvumi_reset_host_9580, }; static int mvumi_slave_configure(struct scsi_device *sdev) { struct mvumi_hba *mhba; unsigned char bitcount = sizeof(unsigned char) * 8; mhba = (struct mvumi_hba *) sdev->host->hostdata; if (sdev->id >= mhba->max_target_id) return -EINVAL; mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount)); return 0; } /** * mvumi_build_frame - Prepares a direct cdb (DCDB) command * @mhba: Adapter soft state * @scmd: SCSI command * @cmd: Command to be prepared in * * This function prepares CDB commands. These are typcially pass-through * commands to the devices. */ static unsigned char mvumi_build_frame(struct mvumi_hba *mhba, struct scsi_cmnd *scmd, struct mvumi_cmd *cmd) { struct mvumi_msg_frame *pframe; cmd->scmd = scmd; cmd->cmd_status = REQ_STATUS_PENDING; pframe = cmd->frame; pframe->device_id = ((unsigned short) scmd->device->id) | (((unsigned short) scmd->device->lun) << 8); pframe->cmd_flag = 0; switch (scmd->sc_data_direction) { case DMA_NONE: pframe->cmd_flag |= CMD_FLAG_NON_DATA; break; case DMA_FROM_DEVICE: pframe->cmd_flag |= CMD_FLAG_DATA_IN; break; case DMA_TO_DEVICE: pframe->cmd_flag |= CMD_FLAG_DATA_OUT; break; case DMA_BIDIRECTIONAL: default: dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] " "cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]); goto error; } pframe->cdb_length = scmd->cmd_len; memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length); pframe->req_function = CL_FUN_SCSI_CMD; if (scsi_bufflen(scmd)) { if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0], &pframe->sg_counts)) goto error; pframe->data_transfer_length = scsi_bufflen(scmd); } else { pframe->sg_counts = 0; pframe->data_transfer_length = 0; } return 0; error: scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0); return -1; } /** * mvumi_queue_command - Queue entry point * @shost: Scsi host to queue command on * @scmd: SCSI command to be queued */ static int mvumi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) { struct mvumi_cmd *cmd; struct mvumi_hba *mhba; unsigned long irq_flags; spin_lock_irqsave(shost->host_lock, irq_flags); mhba = (struct mvumi_hba *) shost->hostdata; scmd->result = 0; cmd = mvumi_get_cmd(mhba); if (unlikely(!cmd)) { spin_unlock_irqrestore(shost->host_lock, irq_flags); return SCSI_MLQUEUE_HOST_BUSY; } if (unlikely(mvumi_build_frame(mhba, scmd, cmd))) goto out_return_cmd; cmd->scmd = scmd; mvumi_priv(scmd)->cmd_priv = cmd; mhba->instancet->fire_cmd(mhba, cmd); spin_unlock_irqrestore(shost->host_lock, irq_flags); return 0; out_return_cmd: mvumi_return_cmd(mhba, cmd); scsi_done(scmd); spin_unlock_irqrestore(shost->host_lock, irq_flags); return 0; } static enum scsi_timeout_action mvumi_timed_out(struct scsi_cmnd *scmd) { struct mvumi_cmd *cmd = mvumi_priv(scmd)->cmd_priv; struct Scsi_Host *host = scmd->device->host; struct mvumi_hba *mhba = shost_priv(host); unsigned long flags; spin_lock_irqsave(mhba->shost->host_lock, flags); if (mhba->tag_cmd[cmd->frame->tag]) { mhba->tag_cmd[cmd->frame->tag] = NULL; tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); } if (!list_empty(&cmd->queue_pointer)) list_del_init(&cmd->queue_pointer); else atomic_dec(&mhba->fw_outstanding); scmd->result = (DID_ABORT << 16); mvumi_priv(scmd)->cmd_priv = NULL; if (scsi_bufflen(scmd)) { dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), scsi_sg_count(scmd), scmd->sc_data_direction); } mvumi_return_cmd(mhba, cmd); spin_unlock_irqrestore(mhba->shost->host_lock, flags); return SCSI_EH_NOT_HANDLED; } static int mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { int heads, sectors; sector_t cylinders; unsigned long tmp; heads = 64; sectors = 32; tmp = heads * sectors; cylinders = capacity; sector_div(cylinders, tmp); if (capacity >= 0x200000) { heads = 255; sectors = 63; tmp = heads * sectors; cylinders = capacity; sector_div(cylinders, tmp); } geom[0] = heads; geom[1] = sectors; geom[2] = cylinders; return 0; } static const struct scsi_host_template mvumi_template = { .module = THIS_MODULE, .name = "Marvell Storage Controller", .slave_configure = mvumi_slave_configure, .queuecommand = mvumi_queue_command, .eh_timed_out = mvumi_timed_out, .eh_host_reset_handler = mvumi_host_reset, .bios_param = mvumi_bios_param, .dma_boundary = PAGE_SIZE - 1, .this_id = -1, .cmd_size = sizeof(struct mvumi_cmd_priv), }; static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba) { void *base = NULL; struct mvumi_hw_regs *regs; switch (mhba->pdev->device) { case PCI_DEVICE_ID_MARVELL_MV9143: mhba->mmio = mhba->base_addr[0]; base = mhba->mmio; if (!mhba->regs) { mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL); if (mhba->regs == NULL) return -ENOMEM; } regs = mhba->regs; /* For Arm */ regs->ctrl_sts_reg = base + 0x20104; regs->rstoutn_mask_reg = base + 0x20108; regs->sys_soft_rst_reg = base + 0x2010C; regs->main_int_cause_reg = base + 0x20200; regs->enpointa_mask_reg = base + 0x2020C; regs->rstoutn_en_reg = base + 0xF1400; /* For Doorbell */ regs->pciea_to_arm_drbl_reg = base + 0x20400; regs->arm_to_pciea_drbl_reg = base + 0x20408; regs->arm_to_pciea_mask_reg = base + 0x2040C; regs->pciea_to_arm_msg0 = base + 0x20430; regs->pciea_to_arm_msg1 = base + 0x20434; regs->arm_to_pciea_msg0 = base + 0x20438; regs->arm_to_pciea_msg1 = base + 0x2043C; /* For Message Unit */ regs->inb_aval_count_basel = base + 0x508; regs->inb_aval_count_baseh = base + 0x50C; regs->inb_write_pointer = base + 0x518; regs->inb_read_pointer = base + 0x51C; regs->outb_coal_cfg = base + 0x568; regs->outb_copy_basel = base + 0x5B0; regs->outb_copy_baseh = base + 0x5B4; regs->outb_copy_pointer = base + 0x544; regs->outb_read_pointer = base + 0x548; regs->outb_isr_cause = base + 0x560; regs->outb_coal_cfg = base + 0x568; /* Bit setting for HW */ regs->int_comaout = 1 << 8; regs->int_comaerr = 1 << 6; regs->int_dl_cpu2pciea = 1 << 1; regs->cl_pointer_toggle = 1 << 12; regs->clic_irq = 1 << 1; regs->clic_in_err = 1 << 8; regs->clic_out_err = 1 << 12; regs->cl_slot_num_mask = 0xFFF; regs->int_drbl_int_mask = 0x3FFFFFFF; regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr; break; case PCI_DEVICE_ID_MARVELL_MV9580: mhba->mmio = mhba->base_addr[2]; base = mhba->mmio; if (!mhba->regs) { mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL); if (mhba->regs == NULL) return -ENOMEM; } regs = mhba->regs; /* For Arm */ regs->ctrl_sts_reg = base + 0x20104; regs->rstoutn_mask_reg = base + 0x1010C; regs->sys_soft_rst_reg = base + 0x10108; regs->main_int_cause_reg = base + 0x10200; regs->enpointa_mask_reg = base + 0x1020C; regs->rstoutn_en_reg = base + 0xF1400; /* For Doorbell */ regs->pciea_to_arm_drbl_reg = base + 0x10460; regs->arm_to_pciea_drbl_reg = base + 0x10480; regs->arm_to_pciea_mask_reg = base + 0x10484; regs->pciea_to_arm_msg0 = base + 0x10400; regs->pciea_to_arm_msg1 = base + 0x10404; regs->arm_to_pciea_msg0 = base + 0x10420; regs->arm_to_pciea_msg1 = base + 0x10424; /* For reset*/ regs->reset_request = base + 0x10108; regs->reset_enable = base + 0x1010c; /* For Message Unit */ regs->inb_aval_count_basel = base + 0x4008; regs->inb_aval_count_baseh = base + 0x400C; regs->inb_write_pointer = base + 0x4018; regs->inb_read_pointer = base + 0x401C; regs->outb_copy_basel = base + 0x4058; regs->outb_copy_baseh = base + 0x405C; regs->outb_copy_pointer = base + 0x406C; regs->outb_read_pointer = base + 0x4070; regs->outb_coal_cfg = base + 0x4080; regs->outb_isr_cause = base + 0x4088; /* Bit setting for HW */ regs->int_comaout = 1 << 4; regs->int_dl_cpu2pciea = 1 << 12; regs->int_comaerr = 1 << 29; regs->cl_pointer_toggle = 1 << 14; regs->cl_slot_num_mask = 0x3FFF; regs->clic_irq = 1 << 0; regs->clic_out_err = 1 << 1; regs->int_drbl_int_mask = 0x3FFFFFFF; regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout; break; default: return -1; } return 0; } /** * mvumi_init_fw - Initializes the FW * @mhba: Adapter soft state * * This is the main function for initializing firmware. */ static int mvumi_init_fw(struct mvumi_hba *mhba) { int ret = 0; if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) { dev_err(&mhba->pdev->dev, "IO memory region busy!\n"); return -EBUSY; } ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr); if (ret) goto fail_ioremap; switch (mhba->pdev->device) { case PCI_DEVICE_ID_MARVELL_MV9143: mhba->instancet = &mvumi_instance_9143; mhba->io_seq = 0; mhba->max_sge = MVUMI_MAX_SG_ENTRY; mhba->request_id_enabled = 1; break; case PCI_DEVICE_ID_MARVELL_MV9580: mhba->instancet = &mvumi_instance_9580; mhba->io_seq = 0; mhba->max_sge = MVUMI_MAX_SG_ENTRY; break; default: dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n", mhba->pdev->device); mhba->instancet = NULL; ret = -EINVAL; goto fail_alloc_mem; } dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n", mhba->pdev->device); ret = mvumi_cfg_hw_reg(mhba); if (ret) { dev_err(&mhba->pdev->dev, "failed to allocate memory for reg\n"); ret = -ENOMEM; goto fail_alloc_mem; } mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev, HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL); if (!mhba->handshake_page) { dev_err(&mhba->pdev->dev, "failed to allocate memory for handshake\n"); ret = -ENOMEM; goto fail_alloc_page; } if (mvumi_start(mhba)) { ret = -EINVAL; goto fail_ready_state; } ret = mvumi_alloc_cmds(mhba); if (ret) goto fail_ready_state; return 0; fail_ready_state: mvumi_release_mem_resource(mhba); dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE, mhba->handshake_page, mhba->handshake_page_phys); fail_alloc_page: kfree(mhba->regs); fail_alloc_mem: mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); fail_ioremap: pci_release_regions(mhba->pdev); return ret; } /** * mvumi_io_attach - Attaches this driver to SCSI mid-layer * @mhba: Adapter soft state */ static int mvumi_io_attach(struct mvumi_hba *mhba) { struct Scsi_Host *host = mhba->shost; struct scsi_device *sdev = NULL; int ret; unsigned int max_sg = (mhba->ib_max_size - sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl); host->irq = mhba->pdev->irq; host->unique_id = mhba->unique_id; host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge; host->max_sectors = mhba->max_transfer_size / 512; host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; host->max_id = mhba->max_target_id; host->max_cmd_len = MAX_COMMAND_SIZE; ret = scsi_add_host(host, &mhba->pdev->dev); if (ret) { dev_err(&mhba->pdev->dev, "scsi_add_host failed\n"); return ret; } mhba->fw_flag |= MVUMI_FW_ATTACH; mutex_lock(&mhba->sas_discovery_mutex); if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0); else ret = 0; if (ret) { dev_err(&mhba->pdev->dev, "add virtual device failed\n"); mutex_unlock(&mhba->sas_discovery_mutex); goto fail_add_device; } mhba->dm_thread = kthread_create(mvumi_rescan_bus, mhba, "mvumi_scanthread"); if (IS_ERR(mhba->dm_thread)) { dev_err(&mhba->pdev->dev, "failed to create device scan thread\n"); ret = PTR_ERR(mhba->dm_thread); mutex_unlock(&mhba->sas_discovery_mutex); goto fail_create_thread; } atomic_set(&mhba->pnp_count, 1); wake_up_process(mhba->dm_thread); mutex_unlock(&mhba->sas_discovery_mutex); return 0; fail_create_thread: if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) sdev = scsi_device_lookup(mhba->shost, 0, mhba->max_target_id - 1, 0); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); } fail_add_device: scsi_remove_host(mhba->shost); return ret; } /** * mvumi_probe_one - PCI hotplug entry point * @pdev: PCI device structure * @id: PCI ids of supported hotplugged adapter */ static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct Scsi_Host *host; struct mvumi_hba *mhba; int ret; dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ", pdev->vendor, pdev->device, pdev->subsystem_vendor, pdev->subsystem_device); ret = pci_enable_device(pdev); if (ret) return ret; ret = mvumi_pci_set_master(pdev); if (ret) goto fail_set_dma_mask; host = scsi_host_alloc(&mvumi_template, sizeof(*mhba)); if (!host) { dev_err(&pdev->dev, "scsi_host_alloc failed\n"); ret = -ENOMEM; goto fail_alloc_instance; } mhba = shost_priv(host); INIT_LIST_HEAD(&mhba->cmd_pool); INIT_LIST_HEAD(&mhba->ob_data_list); INIT_LIST_HEAD(&mhba->free_ob_list); INIT_LIST_HEAD(&mhba->res_list); INIT_LIST_HEAD(&mhba->waiting_req_list); mutex_init(&mhba->device_lock); INIT_LIST_HEAD(&mhba->mhba_dev_list); INIT_LIST_HEAD(&mhba->shost_dev_list); atomic_set(&mhba->fw_outstanding, 0); init_waitqueue_head(&mhba->int_cmd_wait_q); mutex_init(&mhba->sas_discovery_mutex); mhba->pdev = pdev; mhba->shost = host; mhba->unique_id = pci_dev_id(pdev); ret = mvumi_init_fw(mhba); if (ret) goto fail_init_fw; ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED, "mvumi", mhba); if (ret) { dev_err(&pdev->dev, "failed to register IRQ\n"); goto fail_init_irq; } mhba->instancet->enable_intr(mhba); pci_set_drvdata(pdev, mhba); ret = mvumi_io_attach(mhba); if (ret) goto fail_io_attach; mvumi_backup_bar_addr(mhba); dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n"); return 0; fail_io_attach: mhba->instancet->disable_intr(mhba); free_irq(mhba->pdev->irq, mhba); fail_init_irq: mvumi_release_fw(mhba); fail_init_fw: scsi_host_put(host); fail_alloc_instance: fail_set_dma_mask: pci_disable_device(pdev); return ret; } static void mvumi_detach_one(struct pci_dev *pdev) { struct Scsi_Host *host; struct mvumi_hba *mhba; mhba = pci_get_drvdata(pdev); if (mhba->dm_thread) { kthread_stop(mhba->dm_thread); mhba->dm_thread = NULL; } mvumi_detach_devices(mhba); host = mhba->shost; scsi_remove_host(mhba->shost); mvumi_flush_cache(mhba); mhba->instancet->disable_intr(mhba); free_irq(mhba->pdev->irq, mhba); mvumi_release_fw(mhba); scsi_host_put(host); pci_disable_device(pdev); dev_dbg(&pdev->dev, "driver is removed!\n"); } /** * mvumi_shutdown - Shutdown entry point * @pdev: PCI device structure */ static void mvumi_shutdown(struct pci_dev *pdev) { struct mvumi_hba *mhba = pci_get_drvdata(pdev); mvumi_flush_cache(mhba); } static int __maybe_unused mvumi_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct mvumi_hba *mhba = pci_get_drvdata(pdev); mvumi_flush_cache(mhba); mhba->instancet->disable_intr(mhba); mvumi_unmap_pci_addr(pdev, mhba->base_addr); return 0; } static int __maybe_unused mvumi_resume(struct device *dev) { int ret; struct pci_dev *pdev = to_pci_dev(dev); struct mvumi_hba *mhba = pci_get_drvdata(pdev); ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (ret) goto fail; ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr); if (ret) goto release_regions; if (mvumi_cfg_hw_reg(mhba)) { ret = -EINVAL; goto unmap_pci_addr; } mhba->mmio = mhba->base_addr[0]; mvumi_reset(mhba); if (mvumi_start(mhba)) { ret = -EINVAL; goto unmap_pci_addr; } mhba->instancet->enable_intr(mhba); return 0; unmap_pci_addr: mvumi_unmap_pci_addr(pdev, mhba->base_addr); release_regions: pci_release_regions(pdev); fail: return ret; } static SIMPLE_DEV_PM_OPS(mvumi_pm_ops, mvumi_suspend, mvumi_resume); static struct pci_driver mvumi_pci_driver = { .name = MV_DRIVER_NAME, .id_table = mvumi_pci_table, .probe = mvumi_probe_one, .remove = mvumi_detach_one, .shutdown = mvumi_shutdown, .driver.pm = &mvumi_pm_ops, }; module_pci_driver(mvumi_pci_driver);
linux-master
drivers/scsi/mvumi.c
// SPDX-License-Identifier: GPL-2.0-only /* sun3x_esp.c: ESP front-end for Sun3x systems. * * Copyright (C) 2007,2008 Thomas Bogendoerfer ([email protected]) */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/io.h> #include <asm/sun3x.h> #include <asm/dma.h> #include <asm/dvma.h> /* DMA controller reg offsets */ #define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */ #define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */ #define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */ #define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */ #include <scsi/scsi_host.h> #include "esp_scsi.h" #define DRV_MODULE_NAME "sun3x_esp" #define PFX DRV_MODULE_NAME ": " #define DRV_VERSION "1.000" #define DRV_MODULE_RELDATE "Nov 1, 2007" /* * m68k always assumes readl/writel operate on little endian * mmio space; this is wrong at least for Sun3x, so we * need to workaround this until a proper way is found */ #if 0 #define dma_read32(REG) \ readl(esp->dma_regs + (REG)) #define dma_write32(VAL, REG) \ writel((VAL), esp->dma_regs + (REG)) #else #define dma_read32(REG) \ *(volatile u32 *)(esp->dma_regs + (REG)) #define dma_write32(VAL, REG) \ do { *(volatile u32 *)(esp->dma_regs + (REG)) = (VAL); } while (0) #endif static void sun3x_esp_write8(struct esp *esp, u8 val, unsigned long reg) { writeb(val, esp->regs + (reg * 4UL)); } static u8 sun3x_esp_read8(struct esp *esp, unsigned long reg) { return readb(esp->regs + (reg * 4UL)); } static int sun3x_esp_irq_pending(struct esp *esp) { if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)) return 1; return 0; } static void sun3x_esp_reset_dma(struct esp *esp) { u32 val; val = dma_read32(DMA_CSR); dma_write32(val | DMA_RST_SCSI, DMA_CSR); dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); /* Enable interrupts. */ val = dma_read32(DMA_CSR); dma_write32(val | DMA_INT_ENAB, DMA_CSR); } static void sun3x_esp_dma_drain(struct esp *esp) { u32 csr; int lim; csr = dma_read32(DMA_CSR); if (!(csr & DMA_FIFO_ISDRAIN)) return; dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR); lim = 1000; while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) { if (--lim == 0) { printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n", esp->host->unique_id); break; } udelay(1); } } static void sun3x_esp_dma_invalidate(struct esp *esp) { u32 val; int lim; lim = 1000; while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) { if (--lim == 0) { printk(KERN_ALERT PFX "esp%d: DMA will not " "invalidate!\n", esp->host->unique_id); break; } udelay(1); } val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB); val |= DMA_FIFO_INV; dma_write32(val, DMA_CSR); val &= ~DMA_FIFO_INV; dma_write32(val, DMA_CSR); } static void sun3x_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, u32 dma_count, int write, u8 cmd) { u32 csr; BUG_ON(!(cmd & ESP_CMD_DMA)); sun3x_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); sun3x_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); csr = dma_read32(DMA_CSR); csr |= DMA_ENABLE; if (write) csr |= DMA_ST_WRITE; else csr &= ~DMA_ST_WRITE; dma_write32(csr, DMA_CSR); dma_write32(addr, DMA_ADDR); scsi_esp_cmd(esp, cmd); } static int sun3x_esp_dma_error(struct esp *esp) { u32 csr = dma_read32(DMA_CSR); if (csr & DMA_HNDL_ERROR) return 1; return 0; } static const struct esp_driver_ops sun3x_esp_ops = { .esp_write8 = sun3x_esp_write8, .esp_read8 = sun3x_esp_read8, .irq_pending = sun3x_esp_irq_pending, .reset_dma = sun3x_esp_reset_dma, .dma_drain = sun3x_esp_dma_drain, .dma_invalidate = sun3x_esp_dma_invalidate, .send_dma_cmd = sun3x_esp_send_dma_cmd, .dma_error = sun3x_esp_dma_error, }; static int esp_sun3x_probe(struct platform_device *dev) { const struct scsi_host_template *tpnt = &scsi_esp_template; struct Scsi_Host *host; struct esp *esp; struct resource *res; int err = -ENOMEM; host = scsi_host_alloc(tpnt, sizeof(struct esp)); if (!host) goto fail; host->max_id = 8; esp = shost_priv(host); esp->host = host; esp->dev = &dev->dev; esp->ops = &sun3x_esp_ops; res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!res || !res->start) goto fail_unlink; esp->regs = ioremap(res->start, 0x20); if (!esp->regs) goto fail_unmap_regs; res = platform_get_resource(dev, IORESOURCE_MEM, 1); if (!res || !res->start) goto fail_unmap_regs; esp->dma_regs = ioremap(res->start, 0x10); esp->command_block = dma_alloc_coherent(esp->dev, 16, &esp->command_block_dma, GFP_KERNEL); if (!esp->command_block) goto fail_unmap_regs_dma; host->irq = err = platform_get_irq(dev, 0); if (err < 0) goto fail_unmap_command_block; err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "SUN3X ESP", esp); if (err < 0) goto fail_unmap_command_block; esp->scsi_id = 7; esp->host->this_id = esp->scsi_id; esp->scsi_id_mask = (1 << esp->scsi_id); esp->cfreq = 20000000; dev_set_drvdata(&dev->dev, esp); err = scsi_esp_register(esp); if (err) goto fail_free_irq; return 0; fail_free_irq: free_irq(host->irq, esp); fail_unmap_command_block: dma_free_coherent(esp->dev, 16, esp->command_block, esp->command_block_dma); fail_unmap_regs_dma: iounmap(esp->dma_regs); fail_unmap_regs: iounmap(esp->regs); fail_unlink: scsi_host_put(host); fail: return err; } static int esp_sun3x_remove(struct platform_device *dev) { struct esp *esp = dev_get_drvdata(&dev->dev); unsigned int irq = esp->host->irq; u32 val; scsi_esp_unregister(esp); /* Disable interrupts. */ val = dma_read32(DMA_CSR); dma_write32(val & ~DMA_INT_ENAB, DMA_CSR); free_irq(irq, esp); dma_free_coherent(esp->dev, 16, esp->command_block, esp->command_block_dma); scsi_host_put(esp->host); return 0; } static struct platform_driver esp_sun3x_driver = { .probe = esp_sun3x_probe, .remove = esp_sun3x_remove, .driver = { .name = "sun3x_esp", }, }; module_platform_driver(esp_sun3x_driver); MODULE_DESCRIPTION("Sun3x ESP SCSI driver"); MODULE_AUTHOR("Thomas Bogendoerfer ([email protected])"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_ALIAS("platform:sun3x_esp");
linux-master
drivers/scsi/sun3x_esp.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Virtio SCSI HBA driver * * Copyright IBM Corp. 2010 * Copyright Red Hat, Inc. 2011 * * Authors: * Stefan Hajnoczi <[email protected]> * Paolo Bonzini <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/mempool.h> #include <linux/interrupt.h> #include <linux/virtio.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> #include <linux/virtio_scsi.h> #include <linux/cpu.h> #include <linux/blkdev.h> #include <linux/blk-integrity.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_devinfo.h> #include <linux/seqlock.h> #include <linux/blk-mq-virtio.h> #include "sd.h" #define VIRTIO_SCSI_MEMPOOL_SZ 64 #define VIRTIO_SCSI_EVENT_LEN 8 #define VIRTIO_SCSI_VQ_BASE 2 /* Command queue element */ struct virtio_scsi_cmd { struct scsi_cmnd *sc; struct completion *comp; union { struct virtio_scsi_cmd_req cmd; struct virtio_scsi_cmd_req_pi cmd_pi; struct virtio_scsi_ctrl_tmf_req tmf; struct virtio_scsi_ctrl_an_req an; } req; union { struct virtio_scsi_cmd_resp cmd; struct virtio_scsi_ctrl_tmf_resp tmf; struct virtio_scsi_ctrl_an_resp an; struct virtio_scsi_event evt; } resp; } ____cacheline_aligned_in_smp; struct virtio_scsi_event_node { struct virtio_scsi *vscsi; struct virtio_scsi_event event; struct work_struct work; }; struct virtio_scsi_vq { /* Protects vq */ spinlock_t vq_lock; struct virtqueue *vq; }; /* Driver instance state */ struct virtio_scsi { struct virtio_device *vdev; /* Get some buffers ready for event vq */ struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN]; u32 num_queues; struct hlist_node node; /* Protected by event_vq lock */ bool stop_events; struct virtio_scsi_vq ctrl_vq; struct virtio_scsi_vq event_vq; struct virtio_scsi_vq req_vqs[]; }; static struct kmem_cache *virtscsi_cmd_cache; static mempool_t *virtscsi_cmd_pool; static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev) { return vdev->priv; } static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid) { if (resid) scsi_set_resid(sc, min(resid, scsi_bufflen(sc))); } /* * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done * * Called with vq_lock held. */ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_cmd *cmd = buf; struct scsi_cmnd *sc = cmd->sc; struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; dev_dbg(&sc->device->sdev_gendev, "cmd %p response %u status %#02x sense_len %u\n", sc, resp->response, resp->status, resp->sense_len); sc->result = resp->status; virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid)); switch (resp->response) { case VIRTIO_SCSI_S_OK: set_host_byte(sc, DID_OK); break; case VIRTIO_SCSI_S_OVERRUN: set_host_byte(sc, DID_ERROR); break; case VIRTIO_SCSI_S_ABORTED: set_host_byte(sc, DID_ABORT); break; case VIRTIO_SCSI_S_BAD_TARGET: set_host_byte(sc, DID_BAD_TARGET); break; case VIRTIO_SCSI_S_RESET: set_host_byte(sc, DID_RESET); break; case VIRTIO_SCSI_S_BUSY: set_host_byte(sc, DID_BUS_BUSY); break; case VIRTIO_SCSI_S_TRANSPORT_FAILURE: set_host_byte(sc, DID_TRANSPORT_DISRUPTED); break; case VIRTIO_SCSI_S_TARGET_FAILURE: set_host_byte(sc, DID_BAD_TARGET); break; case VIRTIO_SCSI_S_NEXUS_FAILURE: set_status_byte(sc, SAM_STAT_RESERVATION_CONFLICT); break; default: scmd_printk(KERN_WARNING, sc, "Unknown response %d", resp->response); fallthrough; case VIRTIO_SCSI_S_FAILURE: set_host_byte(sc, DID_ERROR); break; } WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) > VIRTIO_SCSI_SENSE_SIZE); if (resp->sense_len) { memcpy(sc->sense_buffer, resp->sense, min_t(u32, virtio32_to_cpu(vscsi->vdev, resp->sense_len), VIRTIO_SCSI_SENSE_SIZE)); } scsi_done(sc); } static void virtscsi_vq_done(struct virtio_scsi *vscsi, struct virtio_scsi_vq *virtscsi_vq, void (*fn)(struct virtio_scsi *vscsi, void *buf)) { void *buf; unsigned int len; unsigned long flags; struct virtqueue *vq = virtscsi_vq->vq; spin_lock_irqsave(&virtscsi_vq->vq_lock, flags); do { virtqueue_disable_cb(vq); while ((buf = virtqueue_get_buf(vq, &len)) != NULL) fn(vscsi, buf); if (unlikely(virtqueue_is_broken(vq))) break; } while (!virtqueue_enable_cb(vq)); spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags); } static void virtscsi_req_done(struct virtqueue *vq) { struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); struct virtio_scsi *vscsi = shost_priv(sh); int index = vq->index - VIRTIO_SCSI_VQ_BASE; struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index]; virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); }; static void virtscsi_poll_requests(struct virtio_scsi *vscsi) { int i, num_vqs; num_vqs = vscsi->num_queues; for (i = 0; i < num_vqs; i++) virtscsi_vq_done(vscsi, &vscsi->req_vqs[i], virtscsi_complete_cmd); } static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_cmd *cmd = buf; if (cmd->comp) complete(cmd->comp); } static void virtscsi_ctrl_done(struct virtqueue *vq) { struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); struct virtio_scsi *vscsi = shost_priv(sh); virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free); }; static void virtscsi_handle_event(struct work_struct *work); static int virtscsi_kick_event(struct virtio_scsi *vscsi, struct virtio_scsi_event_node *event_node) { int err; struct scatterlist sg; unsigned long flags; INIT_WORK(&event_node->work, virtscsi_handle_event); sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node, GFP_ATOMIC); if (!err) virtqueue_kick(vscsi->event_vq.vq); spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags); return err; } static int virtscsi_kick_event_all(struct virtio_scsi *vscsi) { int i; for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) { vscsi->event_list[i].vscsi = vscsi; virtscsi_kick_event(vscsi, &vscsi->event_list[i]); } return 0; } static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi) { int i; /* Stop scheduling work before calling cancel_work_sync. */ spin_lock_irq(&vscsi->event_vq.vq_lock); vscsi->stop_events = true; spin_unlock_irq(&vscsi->event_vq.vq_lock); for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) cancel_work_sync(&vscsi->event_list[i].work); } static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi, struct virtio_scsi_event *event) { struct scsi_device *sdev; struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); unsigned int target = event->lun[1]; unsigned int lun = (event->lun[2] << 8) | event->lun[3]; switch (virtio32_to_cpu(vscsi->vdev, event->reason)) { case VIRTIO_SCSI_EVT_RESET_RESCAN: if (lun == 0) { scsi_scan_target(&shost->shost_gendev, 0, target, SCAN_WILD_CARD, SCSI_SCAN_INITIAL); } else { scsi_add_device(shost, 0, target, lun); } break; case VIRTIO_SCSI_EVT_RESET_REMOVED: sdev = scsi_device_lookup(shost, 0, target, lun); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); } else { pr_err("SCSI device %d 0 %d %d not found\n", shost->host_no, target, lun); } break; default: pr_info("Unsupported virtio scsi event reason %x\n", event->reason); } } static void virtscsi_handle_param_change(struct virtio_scsi *vscsi, struct virtio_scsi_event *event) { struct scsi_device *sdev; struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); unsigned int target = event->lun[1]; unsigned int lun = (event->lun[2] << 8) | event->lun[3]; u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255; u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8; sdev = scsi_device_lookup(shost, 0, target, lun); if (!sdev) { pr_err("SCSI device %d 0 %d %d not found\n", shost->host_no, target, lun); return; } /* Handle "Parameters changed", "Mode parameters changed", and "Capacity data has changed". */ if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09)) scsi_rescan_device(sdev); scsi_device_put(sdev); } static int virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi) { struct scsi_device *sdev; struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); unsigned char scsi_cmd[MAX_COMMAND_SIZE]; int result, inquiry_len, inq_result_len = 256; char *inq_result = kmalloc(inq_result_len, GFP_KERNEL); if (!inq_result) return -ENOMEM; shost_for_each_device(sdev, shost) { inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36; memset(scsi_cmd, 0, sizeof(scsi_cmd)); scsi_cmd[0] = INQUIRY; scsi_cmd[4] = (unsigned char) inquiry_len; memset(inq_result, 0, inq_result_len); result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, inq_result, inquiry_len, SD_TIMEOUT, SD_MAX_RETRIES, NULL); if (result == 0 && inq_result[0] >> 5) { /* PQ indicates the LUN is not attached */ scsi_remove_device(sdev); } else if (result > 0 && host_byte(result) == DID_BAD_TARGET) { /* * If all LUNs of a virtio-scsi device are unplugged * it will respond with BAD TARGET on any INQUIRY * command. * Remove the device in this case as well. */ scsi_remove_device(sdev); } } kfree(inq_result); return 0; } static void virtscsi_handle_event(struct work_struct *work) { struct virtio_scsi_event_node *event_node = container_of(work, struct virtio_scsi_event_node, work); struct virtio_scsi *vscsi = event_node->vscsi; struct virtio_scsi_event *event = &event_node->event; if (event->event & cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) { int ret; event->event &= ~cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED); ret = virtscsi_rescan_hotunplug(vscsi); if (ret) return; scsi_scan_host(virtio_scsi_host(vscsi->vdev)); } switch (virtio32_to_cpu(vscsi->vdev, event->event)) { case VIRTIO_SCSI_T_NO_EVENT: break; case VIRTIO_SCSI_T_TRANSPORT_RESET: virtscsi_handle_transport_reset(vscsi, event); break; case VIRTIO_SCSI_T_PARAM_CHANGE: virtscsi_handle_param_change(vscsi, event); break; default: pr_err("Unsupported virtio scsi event %x\n", event->event); } virtscsi_kick_event(vscsi, event_node); } static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_event_node *event_node = buf; if (!vscsi->stop_events) queue_work(system_freezable_wq, &event_node->work); } static void virtscsi_event_done(struct virtqueue *vq) { struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); struct virtio_scsi *vscsi = shost_priv(sh); virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event); }; static int __virtscsi_add_cmd(struct virtqueue *vq, struct virtio_scsi_cmd *cmd, size_t req_size, size_t resp_size) { struct scsi_cmnd *sc = cmd->sc; struct scatterlist *sgs[6], req, resp; struct sg_table *out, *in; unsigned out_num = 0, in_num = 0; out = in = NULL; if (sc && sc->sc_data_direction != DMA_NONE) { if (sc->sc_data_direction != DMA_FROM_DEVICE) out = &sc->sdb.table; if (sc->sc_data_direction != DMA_TO_DEVICE) in = &sc->sdb.table; } /* Request header. */ sg_init_one(&req, &cmd->req, req_size); sgs[out_num++] = &req; /* Data-out buffer. */ if (out) { /* Place WRITE protection SGLs before Data OUT payload */ if (scsi_prot_sg_count(sc)) sgs[out_num++] = scsi_prot_sglist(sc); sgs[out_num++] = out->sgl; } /* Response header. */ sg_init_one(&resp, &cmd->resp, resp_size); sgs[out_num + in_num++] = &resp; /* Data-in buffer */ if (in) { /* Place READ protection SGLs before Data IN payload */ if (scsi_prot_sg_count(sc)) sgs[out_num + in_num++] = scsi_prot_sglist(sc); sgs[out_num + in_num++] = in->sgl; } return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC); } static void virtscsi_kick_vq(struct virtio_scsi_vq *vq) { bool needs_kick; unsigned long flags; spin_lock_irqsave(&vq->vq_lock, flags); needs_kick = virtqueue_kick_prepare(vq->vq); spin_unlock_irqrestore(&vq->vq_lock, flags); if (needs_kick) virtqueue_notify(vq->vq); } /** * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it * @vq : the struct virtqueue we're talking about * @cmd : command structure * @req_size : size of the request buffer * @resp_size : size of the response buffer * @kick : whether to kick the virtqueue immediately */ static int virtscsi_add_cmd(struct virtio_scsi_vq *vq, struct virtio_scsi_cmd *cmd, size_t req_size, size_t resp_size, bool kick) { unsigned long flags; int err; bool needs_kick = false; spin_lock_irqsave(&vq->vq_lock, flags); err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size); if (!err && kick) needs_kick = virtqueue_kick_prepare(vq->vq); spin_unlock_irqrestore(&vq->vq_lock, flags); if (needs_kick) virtqueue_notify(vq->vq); return err; } static void virtio_scsi_init_hdr(struct virtio_device *vdev, struct virtio_scsi_cmd_req *cmd, struct scsi_cmnd *sc) { cmd->lun[0] = 1; cmd->lun[1] = sc->device->id; cmd->lun[2] = (sc->device->lun >> 8) | 0x40; cmd->lun[3] = sc->device->lun & 0xff; cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc); cmd->task_attr = VIRTIO_SCSI_S_SIMPLE; cmd->prio = 0; cmd->crn = 0; } #ifdef CONFIG_BLK_DEV_INTEGRITY static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev, struct virtio_scsi_cmd_req_pi *cmd_pi, struct scsi_cmnd *sc) { struct request *rq = scsi_cmd_to_rq(sc); struct blk_integrity *bi; virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc); if (!rq || !scsi_prot_sg_count(sc)) return; bi = blk_get_integrity(rq->q->disk); if (sc->sc_data_direction == DMA_TO_DEVICE) cmd_pi->pi_bytesout = cpu_to_virtio32(vdev, bio_integrity_bytes(bi, blk_rq_sectors(rq))); else if (sc->sc_data_direction == DMA_FROM_DEVICE) cmd_pi->pi_bytesin = cpu_to_virtio32(vdev, bio_integrity_bytes(bi, blk_rq_sectors(rq))); } #endif static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi, struct scsi_cmnd *sc) { u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc)); u16 hwq = blk_mq_unique_tag_to_hwq(tag); return &vscsi->req_vqs[hwq]; } static int virtscsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) { struct virtio_scsi *vscsi = shost_priv(shost); struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc); struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); bool kick; unsigned long flags; int req_size; int ret; BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); /* TODO: check feature bit and fail if unsupported? */ BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL); dev_dbg(&sc->device->sdev_gendev, "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); cmd->sc = sc; BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); #ifdef CONFIG_BLK_DEV_INTEGRITY if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) { virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc); memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len); req_size = sizeof(cmd->req.cmd_pi); } else #endif { virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc); memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); req_size = sizeof(cmd->req.cmd); } kick = (sc->flags & SCMD_LAST) != 0; ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick); if (ret == -EIO) { cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; spin_lock_irqsave(&req_vq->vq_lock, flags); virtscsi_complete_cmd(vscsi, cmd); spin_unlock_irqrestore(&req_vq->vq_lock, flags); } else if (ret != 0) { return SCSI_MLQUEUE_HOST_BUSY; } return 0; } static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) { DECLARE_COMPLETION_ONSTACK(comp); int ret = FAILED; cmd->comp = &comp; if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd, sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0) goto out; wait_for_completion(&comp); if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK || cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) ret = SUCCESS; /* * The spec guarantees that all requests related to the TMF have * been completed, but the callback might not have run yet if * we're using independent interrupts (e.g. MSI). Poll the * virtqueues once. * * In the abort case, scsi_done() will do nothing, because the * command timed out and hence SCMD_STATE_COMPLETE has been set. */ virtscsi_poll_requests(vscsi); out: mempool_free(cmd, virtscsi_cmd_pool); return ret; } static int virtscsi_device_reset(struct scsi_cmnd *sc) { struct virtio_scsi *vscsi = shost_priv(sc->device->host); struct virtio_scsi_cmd *cmd; sdev_printk(KERN_INFO, sc->device, "device reset\n"); cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); if (!cmd) return FAILED; memset(cmd, 0, sizeof(*cmd)); cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ .type = VIRTIO_SCSI_T_TMF, .subtype = cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET), .lun[0] = 1, .lun[1] = sc->device->id, .lun[2] = (sc->device->lun >> 8) | 0x40, .lun[3] = sc->device->lun & 0xff, }; return virtscsi_tmf(vscsi, cmd); } static int virtscsi_device_alloc(struct scsi_device *sdevice) { /* * Passed through SCSI targets (e.g. with qemu's 'scsi-block') * may have transfer limits which come from the host SCSI * controller or something on the host side other than the * target itself. * * To make this work properly, the hypervisor can adjust the * target's VPD information to advertise these limits. But * for that to work, the guest has to look at the VPD pages, * which we won't do by default if it is an SPC-2 device, even * if it does actually support it. * * So, set the blist to always try to read the VPD pages. */ sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES; return 0; } /** * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth * @sdev: Virtscsi target whose queue depth to change * @qdepth: New queue depth */ static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) { struct Scsi_Host *shost = sdev->host; int max_depth = shost->cmd_per_lun; return scsi_change_queue_depth(sdev, min(max_depth, qdepth)); } static int virtscsi_abort(struct scsi_cmnd *sc) { struct virtio_scsi *vscsi = shost_priv(sc->device->host); struct virtio_scsi_cmd *cmd; scmd_printk(KERN_INFO, sc, "abort\n"); cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); if (!cmd) return FAILED; memset(cmd, 0, sizeof(*cmd)); cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ .type = VIRTIO_SCSI_T_TMF, .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK, .lun[0] = 1, .lun[1] = sc->device->id, .lun[2] = (sc->device->lun >> 8) | 0x40, .lun[3] = sc->device->lun & 0xff, .tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc), }; return virtscsi_tmf(vscsi, cmd); } static void virtscsi_map_queues(struct Scsi_Host *shost) { struct virtio_scsi *vscsi = shost_priv(shost); struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2); } static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq) { struct virtio_scsi *vscsi = shost_priv(shost); virtscsi_kick_vq(&vscsi->req_vqs[hwq]); } /* * The host guarantees to respond to each command, although I/O * latencies might be higher than on bare metal. Reset the timer * unconditionally to give the host a chance to perform EH. */ static enum scsi_timeout_action virtscsi_eh_timed_out(struct scsi_cmnd *scmnd) { return SCSI_EH_RESET_TIMER; } static const struct scsi_host_template virtscsi_host_template = { .module = THIS_MODULE, .name = "Virtio SCSI HBA", .proc_name = "virtio_scsi", .this_id = -1, .cmd_size = sizeof(struct virtio_scsi_cmd), .queuecommand = virtscsi_queuecommand, .commit_rqs = virtscsi_commit_rqs, .change_queue_depth = virtscsi_change_queue_depth, .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, .eh_timed_out = virtscsi_eh_timed_out, .slave_alloc = virtscsi_device_alloc, .dma_boundary = UINT_MAX, .map_queues = virtscsi_map_queues, .track_queue_depth = 1, }; #define virtscsi_config_get(vdev, fld) \ ({ \ __virtio_native_type(struct virtio_scsi_config, fld) __val; \ virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \ __val; \ }) #define virtscsi_config_set(vdev, fld, val) \ do { \ __virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \ virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \ } while(0) static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, struct virtqueue *vq) { spin_lock_init(&virtscsi_vq->vq_lock); virtscsi_vq->vq = vq; } static void virtscsi_remove_vqs(struct virtio_device *vdev) { /* Stop all the virtqueues. */ virtio_reset_device(vdev); vdev->config->del_vqs(vdev); } static int virtscsi_init(struct virtio_device *vdev, struct virtio_scsi *vscsi) { int err; u32 i; u32 num_vqs; vq_callback_t **callbacks; const char **names; struct virtqueue **vqs; struct irq_affinity desc = { .pre_vectors = 2 }; num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE; vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL); callbacks = kmalloc_array(num_vqs, sizeof(vq_callback_t *), GFP_KERNEL); names = kmalloc_array(num_vqs, sizeof(char *), GFP_KERNEL); if (!callbacks || !vqs || !names) { err = -ENOMEM; goto out; } callbacks[0] = virtscsi_ctrl_done; callbacks[1] = virtscsi_event_done; names[0] = "control"; names[1] = "event"; for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) { callbacks[i] = virtscsi_req_done; names[i] = "request"; } /* Discover virtqueues and write information to configuration. */ err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc); if (err) goto out; virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]); virtscsi_init_vq(&vscsi->event_vq, vqs[1]); for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE], vqs[i]); virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE); virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE); err = 0; out: kfree(names); kfree(callbacks); kfree(vqs); if (err) virtscsi_remove_vqs(vdev); return err; } static int virtscsi_probe(struct virtio_device *vdev) { struct Scsi_Host *shost; struct virtio_scsi *vscsi; int err; u32 sg_elems, num_targets; u32 cmd_per_lun; u32 num_queues; if (!vdev->config->get) { dev_err(&vdev->dev, "%s failure: config access disabled\n", __func__); return -EINVAL; } /* We need to know how many queues before we allocate. */ num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; num_queues = min_t(unsigned int, nr_cpu_ids, num_queues); num_targets = virtscsi_config_get(vdev, max_target) + 1; shost = scsi_host_alloc(&virtscsi_host_template, struct_size(vscsi, req_vqs, num_queues)); if (!shost) return -ENOMEM; sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1; shost->sg_tablesize = sg_elems; vscsi = shost_priv(shost); vscsi->vdev = vdev; vscsi->num_queues = num_queues; vdev->priv = shost; err = virtscsi_init(vdev, vscsi); if (err) goto virtscsi_init_failed; shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq); cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; /* LUNs > 256 are reported with format 1, so they go in the range * 16640-32767. */ shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000; shost->max_id = num_targets; shost->max_channel = 0; shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; shost->nr_hw_queues = num_queues; #ifdef CONFIG_BLK_DEV_INTEGRITY if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { int host_prot; host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; scsi_host_set_prot(shost, host_prot); scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); } #endif err = scsi_add_host(shost, &vdev->dev); if (err) goto scsi_add_host_failed; virtio_device_ready(vdev); if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) virtscsi_kick_event_all(vscsi); scsi_scan_host(shost); return 0; scsi_add_host_failed: vdev->config->del_vqs(vdev); virtscsi_init_failed: scsi_host_put(shost); return err; } static void virtscsi_remove(struct virtio_device *vdev) { struct Scsi_Host *shost = virtio_scsi_host(vdev); struct virtio_scsi *vscsi = shost_priv(shost); if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) virtscsi_cancel_event_work(vscsi); scsi_remove_host(shost); virtscsi_remove_vqs(vdev); scsi_host_put(shost); } #ifdef CONFIG_PM_SLEEP static int virtscsi_freeze(struct virtio_device *vdev) { virtscsi_remove_vqs(vdev); return 0; } static int virtscsi_restore(struct virtio_device *vdev) { struct Scsi_Host *sh = virtio_scsi_host(vdev); struct virtio_scsi *vscsi = shost_priv(sh); int err; err = virtscsi_init(vdev, vscsi); if (err) return err; virtio_device_ready(vdev); if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) virtscsi_kick_event_all(vscsi); return err; } #endif static struct virtio_device_id id_table[] = { { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID }, { 0 }, }; static unsigned int features[] = { VIRTIO_SCSI_F_HOTPLUG, VIRTIO_SCSI_F_CHANGE, #ifdef CONFIG_BLK_DEV_INTEGRITY VIRTIO_SCSI_F_T10_PI, #endif }; static struct virtio_driver virtio_scsi_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtscsi_probe, #ifdef CONFIG_PM_SLEEP .freeze = virtscsi_freeze, .restore = virtscsi_restore, #endif .remove = virtscsi_remove, }; static int __init virtio_scsi_init(void) { int ret = -ENOMEM; virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0); if (!virtscsi_cmd_cache) { pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n"); goto error; } virtscsi_cmd_pool = mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ, virtscsi_cmd_cache); if (!virtscsi_cmd_pool) { pr_err("mempool_create() for virtscsi_cmd_pool failed\n"); goto error; } ret = register_virtio_driver(&virtio_scsi_driver); if (ret < 0) goto error; return 0; error: mempool_destroy(virtscsi_cmd_pool); virtscsi_cmd_pool = NULL; kmem_cache_destroy(virtscsi_cmd_cache); virtscsi_cmd_cache = NULL; return ret; } static void __exit virtio_scsi_fini(void) { unregister_virtio_driver(&virtio_scsi_driver); mempool_destroy(virtscsi_cmd_pool); kmem_cache_destroy(virtscsi_cmd_cache); } module_init(virtio_scsi_init); module_exit(virtio_scsi_fini); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio SCSI HBA driver"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/virtio_scsi.c
/* * Qlogic FAS408 ISA card driver * * Copyright 1994, Tom Zerucha. * [email protected] * * Redistributable under terms of the GNU General Public License * * For the avoidance of doubt the "preferred form" of this code is one which * is in an open non patent encumbered format. Where cryptographic key signing * forms part of the process of creating an executable the information * including keys needed to generate an equivalently functional executable * are deemed to be part of the source code. * * Check qlogicfas408.c for more credits and info. */ #include <linux/module.h> #include <linux/blkdev.h> /* to get disk capacity */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/proc_fs.h> #include <linux/unistd.h> #include <linux/spinlock.h> #include <linux/stat.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/dma.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include "qlogicfas408.h" /* Set the following to 2 to use normal interrupt (active high/totempole- * tristate), otherwise use 0 (REQUIRED FOR PCMCIA) for active low, open * drain */ #define INT_TYPE 2 static char qlogicfas_name[] = "qlogicfas"; /* * Look for qlogic card and init if found */ static struct Scsi_Host *__qlogicfas_detect(struct scsi_host_template *host, int qbase, int qlirq) { int qltyp; /* type of chip */ int qinitid; struct Scsi_Host *hreg; /* registered host structure */ struct qlogicfas408_priv *priv; /* Qlogic Cards only exist at 0x230 or 0x330 (the chip itself * decodes the address - I check 230 first since MIDI cards are * typically at 0x330 * * Theoretically, two Qlogic cards can coexist in the same system. * This should work by simply using this as a loadable module for * the second card, but I haven't tested this. */ if (!qbase || qlirq == -1) goto err; if (!request_region(qbase, 0x10, qlogicfas_name)) { printk(KERN_INFO "%s: address %#x is busy\n", qlogicfas_name, qbase); goto err; } if (!qlogicfas408_detect(qbase, INT_TYPE)) { printk(KERN_WARNING "%s: probe failed for %#x\n", qlogicfas_name, qbase); goto err_release_mem; } printk(KERN_INFO "%s: Using preset base address of %03x," " IRQ %d\n", qlogicfas_name, qbase, qlirq); qltyp = qlogicfas408_get_chip_type(qbase, INT_TYPE); qinitid = host->this_id; if (qinitid < 0) qinitid = 7; /* if no ID, use 7 */ qlogicfas408_setup(qbase, qinitid, INT_TYPE); hreg = scsi_host_alloc(host, sizeof(struct qlogicfas408_priv)); if (!hreg) goto err_release_mem; priv = get_priv_by_host(hreg); hreg->io_port = qbase; hreg->n_io_port = 16; hreg->dma_channel = -1; if (qlirq != -1) hreg->irq = qlirq; priv->qbase = qbase; priv->qlirq = qlirq; priv->qinitid = qinitid; priv->shost = hreg; priv->int_type = INT_TYPE; sprintf(priv->qinfo, "Qlogicfas Driver version 0.46, chip %02X at %03X, IRQ %d, TPdma:%d", qltyp, qbase, qlirq, QL_TURBO_PDMA); host->name = qlogicfas_name; if (request_irq(qlirq, qlogicfas408_ihandl, 0, qlogicfas_name, hreg)) goto free_scsi_host; if (scsi_add_host(hreg, NULL)) goto free_interrupt; scsi_scan_host(hreg); return hreg; free_interrupt: free_irq(qlirq, hreg); free_scsi_host: scsi_host_put(hreg); err_release_mem: release_region(qbase, 0x10); err: return NULL; } #define MAX_QLOGICFAS 8 static struct qlogicfas408_priv *cards; static int iobase[MAX_QLOGICFAS]; static int irq[MAX_QLOGICFAS] = { [0 ... MAX_QLOGICFAS-1] = -1 }; module_param_hw_array(iobase, int, ioport, NULL, 0); module_param_hw_array(irq, int, irq, NULL, 0); MODULE_PARM_DESC(iobase, "I/O address"); MODULE_PARM_DESC(irq, "IRQ"); static int qlogicfas_detect(struct scsi_host_template *sht) { struct Scsi_Host *shost; struct qlogicfas408_priv *priv; int num; for (num = 0; num < MAX_QLOGICFAS; num++) { shost = __qlogicfas_detect(sht, iobase[num], irq[num]); if (shost == NULL) { /* no more devices */ break; } priv = get_priv_by_host(shost); priv->next = cards; cards = priv; } return num; } static int qlogicfas_release(struct Scsi_Host *shost) { struct qlogicfas408_priv *priv = get_priv_by_host(shost); scsi_remove_host(shost); if (shost->irq) { qlogicfas408_disable_ints(priv); free_irq(shost->irq, shost); } if (shost->io_port && shost->n_io_port) release_region(shost->io_port, shost->n_io_port); scsi_host_put(shost); return 0; } /* * The driver template is also needed for PCMCIA */ static struct scsi_host_template qlogicfas_driver_template = { .module = THIS_MODULE, .name = qlogicfas_name, .proc_name = qlogicfas_name, .info = qlogicfas408_info, .queuecommand = qlogicfas408_queuecommand, .eh_abort_handler = qlogicfas408_abort, .eh_host_reset_handler = qlogicfas408_host_reset, .bios_param = qlogicfas408_biosparam, .can_queue = 1, .this_id = -1, .sg_tablesize = SG_ALL, .dma_boundary = PAGE_SIZE - 1, }; static __init int qlogicfas_init(void) { if (!qlogicfas_detect(&qlogicfas_driver_template)) { /* no cards found */ printk(KERN_INFO "%s: no cards were found, please specify " "I/O address and IRQ using iobase= and irq= " "options", qlogicfas_name); return -ENODEV; } return 0; } static __exit void qlogicfas_exit(void) { struct qlogicfas408_priv *priv; for (priv = cards; priv != NULL; priv = priv->next) qlogicfas_release(priv->shost); } MODULE_AUTHOR("Tom Zerucha, Michael Griffith"); MODULE_DESCRIPTION("Driver for the Qlogic FAS408 based ISA card"); MODULE_LICENSE("GPL"); module_init(qlogicfas_init); module_exit(qlogicfas_exit);
linux-master
drivers/scsi/qlogicfas.c
// SPDX-License-Identifier: GPL-2.0-or-later /* SNI RM driver * * Copyright (C) 2001 by [email protected] **----------------------------------------------------------------------------- ** ** **----------------------------------------------------------------------------- */ /* * Based on lasi700.c */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/mm.h> #include <linux/blkdev.h> #include <linux/sched.h> #include <linux/ioport.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <asm/page.h> #include <asm/irq.h> #include <asm/delay.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_spi.h> #include "53c700.h" MODULE_AUTHOR("Thomas Bogendörfer"); MODULE_DESCRIPTION("SNI RM 53c710 SCSI Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:snirm_53c710"); #define SNIRM710_CLOCK 32 static struct scsi_host_template snirm710_template = { .name = "SNI RM SCSI 53c710", .proc_name = "snirm_53c710", .this_id = 7, .module = THIS_MODULE, }; static int snirm710_probe(struct platform_device *dev) { unsigned long base; struct NCR_700_Host_Parameters *hostdata; struct Scsi_Host *host; struct resource *res; int rc; res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; base = res->start; hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); if (!hostdata) return -ENOMEM; hostdata->dev = &dev->dev; dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); hostdata->base = ioremap(base, 0x100); hostdata->differential = 0; hostdata->clock = SNIRM710_CLOCK; hostdata->force_le_on_be = 1; hostdata->chip710 = 1; hostdata->burst_length = 4; host = NCR_700_detect(&snirm710_template, hostdata, &dev->dev); if (!host) goto out_kfree; host->this_id = 7; host->base = base; host->irq = rc = platform_get_irq(dev, 0); if (rc < 0) goto out_put_host; if(request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "snirm710", host)) { printk(KERN_ERR "snirm710: request_irq failed!\n"); goto out_put_host; } dev_set_drvdata(&dev->dev, host); scsi_scan_host(host); return 0; out_put_host: scsi_host_put(host); out_kfree: iounmap(hostdata->base); kfree(hostdata); return -ENODEV; } static int snirm710_driver_remove(struct platform_device *dev) { struct Scsi_Host *host = dev_get_drvdata(&dev->dev); struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)host->hostdata[0]; scsi_remove_host(host); NCR_700_release(host); free_irq(host->irq, host); iounmap(hostdata->base); kfree(hostdata); return 0; } static struct platform_driver snirm710_driver = { .probe = snirm710_probe, .remove = snirm710_driver_remove, .driver = { .name = "snirm_53c710", }, }; module_platform_driver(snirm710_driver);
linux-master
drivers/scsi/sni_53c710.c
// SPDX-License-Identifier: GPL-2.0 /* * NCR 5380 generic driver routines. These should make it *trivial* * to implement 5380 SCSI drivers under Linux with a non-trantor * architecture. * * Note that these routines also work with NR53c400 family chips. * * Copyright 1993, Drew Eckhardt * Visionary Computing * (Unix and Linux consulting and custom programming) * [email protected] * +1 (303) 666-5836 * * For more information, please consult * * NCR 5380 Family * SCSI Protocol Controller * Databook * * NCR Microelectronics * 1635 Aeroplaza Drive * Colorado Springs, CO 80916 * 1+ (719) 578-3400 * 1+ (800) 334-5454 */ /* * With contributions from Ray Van Tassle, Ingmar Baumgart, * Ronald van Cuijlenborg, Alan Cox and others. */ /* Ported to Atari by Roman Hodek and others. */ /* Adapted for the Sun 3 by Sam Creasey. */ /* * Design * * This is a generic 5380 driver. To use it on a different platform, * one simply writes appropriate system specific macros (ie, data * transfer - some PC's will use the I/O bus, 68K's must use * memory mapped) and drops this file in their 'C' wrapper. * * As far as command queueing, two queues are maintained for * each 5380 in the system - commands that haven't been issued yet, * and commands that are currently executing. This means that an * unlimited number of commands may be queued, letting * more commands propagate from the higher driver levels giving higher * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported, * allowing multiple commands to propagate all the way to a SCSI-II device * while a command is already executing. * * * Issues specific to the NCR5380 : * * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead * piece of hardware that requires you to sit in a loop polling for * the REQ signal as long as you are connected. Some devices are * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect * while doing long seek operations. [...] These * broken devices are the exception rather than the rule and I'd rather * spend my time optimizing for the normal case. * * Architecture : * * At the heart of the design is a coroutine, NCR5380_main, * which is started from a workqueue for each NCR5380 host in the * system. It attempts to establish I_T_L or I_T_L_Q nexuses by * removing the commands from the issue queue and calling * NCR5380_select() if a nexus is not established. * * Once a nexus is established, the NCR5380_information_transfer() * phase goes through the various phases as instructed by the target. * if the target goes into MSG IN and sends a DISCONNECT message, * the command structure is placed into the per instance disconnected * queue, and NCR5380_main tries to find more work. If the target is * idle for too long, the system will try to sleep. * * If a command has disconnected, eventually an interrupt will trigger, * calling NCR5380_intr() which will in turn call NCR5380_reselect * to reestablish a nexus. This will run main if necessary. * * On command termination, the done function will be called as * appropriate. * * The command data pointer is initialized after the command is connected * in NCR5380_select, and set as appropriate in NCR5380_information_transfer. * Note that in violation of the standard, an implicit SAVE POINTERS operation * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS. */ /* * Using this file : * This file a skeleton Linux SCSI driver for the NCR 5380 series * of chips. To use it, you write an architecture specific functions * and macros and include this file in your driver. * * These macros MUST be defined : * * NCR5380_read(register) - read from the specified register * * NCR5380_write(register, value) - write to the specific register * * NCR5380_implementation_fields - additional fields needed for this * specific implementation of the NCR5380 * * Either real DMA *or* pseudo DMA may be implemented * * NCR5380_dma_xfer_len - determine size of DMA/PDMA transfer * NCR5380_dma_send_setup - execute DMA/PDMA from memory to 5380 * NCR5380_dma_recv_setup - execute DMA/PDMA from 5380 to memory * NCR5380_dma_residual - residual byte count * * The generic driver is initialized by calling NCR5380_init(instance), * after setting the appropriate host specific fields and ID. */ #ifndef NCR5380_io_delay #define NCR5380_io_delay(x) #endif #ifndef NCR5380_acquire_dma_irq #define NCR5380_acquire_dma_irq(x) (1) #endif #ifndef NCR5380_release_dma_irq #define NCR5380_release_dma_irq(x) #endif static unsigned int disconnect_mask = ~0; module_param(disconnect_mask, int, 0444); static int do_abort(struct Scsi_Host *, unsigned int); static void do_reset(struct Scsi_Host *); static void bus_reset_cleanup(struct Scsi_Host *); /** * initialize_SCp - init the scsi pointer field * @cmd: command block to set up * * Set up the internal fields in the SCSI command. */ static inline void initialize_SCp(struct scsi_cmnd *cmd) { struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd); if (scsi_bufflen(cmd)) { ncmd->buffer = scsi_sglist(cmd); ncmd->ptr = sg_virt(ncmd->buffer); ncmd->this_residual = ncmd->buffer->length; } else { ncmd->buffer = NULL; ncmd->ptr = NULL; ncmd->this_residual = 0; } ncmd->status = 0; ncmd->message = 0; } static inline void advance_sg_buffer(struct NCR5380_cmd *ncmd) { struct scatterlist *s = ncmd->buffer; if (!ncmd->this_residual && s && !sg_is_last(s)) { ncmd->buffer = sg_next(s); ncmd->ptr = sg_virt(ncmd->buffer); ncmd->this_residual = ncmd->buffer->length; } } static inline void set_resid_from_SCp(struct scsi_cmnd *cmd) { struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd); int resid = ncmd->this_residual; struct scatterlist *s = ncmd->buffer; if (s) while (!sg_is_last(s)) { s = sg_next(s); resid += s->length; } scsi_set_resid(cmd, resid); } /** * NCR5380_poll_politely2 - wait for two chip register values * @hostdata: host private data * @reg1: 5380 register to poll * @bit1: Bitmask to check * @val1: Expected value * @reg2: Second 5380 register to poll * @bit2: Second bitmask to check * @val2: Second expected value * @wait: Time-out in jiffies, 0 if sleeping is not allowed * * Polls the chip in a reasonably efficient manner waiting for an * event to occur. After a short quick poll we begin to yield the CPU * (if possible). In irq contexts the time-out is arbitrarily limited. * Callers may hold locks as long as they are held in irq mode. * * Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT. */ static int NCR5380_poll_politely2(struct NCR5380_hostdata *hostdata, unsigned int reg1, u8 bit1, u8 val1, unsigned int reg2, u8 bit2, u8 val2, unsigned long wait) { unsigned long n = hostdata->poll_loops; unsigned long deadline = jiffies + wait; do { if ((NCR5380_read(reg1) & bit1) == val1) return 0; if ((NCR5380_read(reg2) & bit2) == val2) return 0; cpu_relax(); } while (n--); if (!wait) return -ETIMEDOUT; /* Repeatedly sleep for 1 ms until deadline */ while (time_is_after_jiffies(deadline)) { schedule_timeout_uninterruptible(1); if ((NCR5380_read(reg1) & bit1) == val1) return 0; if ((NCR5380_read(reg2) & bit2) == val2) return 0; } return -ETIMEDOUT; } #if NDEBUG static struct { unsigned char mask; const char *name; } signals[] = { {SR_DBP, "PARITY"}, {SR_RST, "RST"}, {SR_BSY, "BSY"}, {SR_REQ, "REQ"}, {SR_MSG, "MSG"}, {SR_CD, "CD"}, {SR_IO, "IO"}, {SR_SEL, "SEL"}, {0, NULL} }, basrs[] = { {BASR_END_DMA_TRANSFER, "END OF DMA"}, {BASR_DRQ, "DRQ"}, {BASR_PARITY_ERROR, "PARITY ERROR"}, {BASR_IRQ, "IRQ"}, {BASR_PHASE_MATCH, "PHASE MATCH"}, {BASR_BUSY_ERROR, "BUSY ERROR"}, {BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL} }, icrs[] = { {ICR_ASSERT_RST, "ASSERT RST"}, {ICR_ARBITRATION_PROGRESS, "ARB. IN PROGRESS"}, {ICR_ARBITRATION_LOST, "LOST ARB."}, {ICR_ASSERT_ACK, "ASSERT ACK"}, {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"}, {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"}, {0, NULL} }, mrs[] = { {MR_BLOCK_DMA_MODE, "BLOCK DMA MODE"}, {MR_TARGET, "TARGET"}, {MR_ENABLE_PAR_CHECK, "PARITY CHECK"}, {MR_ENABLE_PAR_INTR, "PARITY INTR"}, {MR_ENABLE_EOP_INTR, "EOP INTR"}, {MR_MONITOR_BSY, "MONITOR BSY"}, {MR_DMA_MODE, "DMA MODE"}, {MR_ARBITRATE, "ARBITRATE"}, {0, NULL} }; /** * NCR5380_print - print scsi bus signals * @instance: adapter state to dump * * Print the SCSI bus signals for debugging purposes */ static void NCR5380_print(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char status, basr, mr, icr, i; status = NCR5380_read(STATUS_REG); mr = NCR5380_read(MODE_REG); icr = NCR5380_read(INITIATOR_COMMAND_REG); basr = NCR5380_read(BUS_AND_STATUS_REG); printk(KERN_DEBUG "SR = 0x%02x : ", status); for (i = 0; signals[i].mask; ++i) if (status & signals[i].mask) printk(KERN_CONT "%s, ", signals[i].name); printk(KERN_CONT "\nBASR = 0x%02x : ", basr); for (i = 0; basrs[i].mask; ++i) if (basr & basrs[i].mask) printk(KERN_CONT "%s, ", basrs[i].name); printk(KERN_CONT "\nICR = 0x%02x : ", icr); for (i = 0; icrs[i].mask; ++i) if (icr & icrs[i].mask) printk(KERN_CONT "%s, ", icrs[i].name); printk(KERN_CONT "\nMR = 0x%02x : ", mr); for (i = 0; mrs[i].mask; ++i) if (mr & mrs[i].mask) printk(KERN_CONT "%s, ", mrs[i].name); printk(KERN_CONT "\n"); } static struct { unsigned char value; const char *name; } phases[] = { {PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"}, {PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"}, {PHASE_UNKNOWN, "UNKNOWN"} }; /** * NCR5380_print_phase - show SCSI phase * @instance: adapter to dump * * Print the current SCSI phase for debugging purposes */ static void NCR5380_print_phase(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char status; int i; status = NCR5380_read(STATUS_REG); if (!(status & SR_REQ)) shost_printk(KERN_DEBUG, instance, "REQ not asserted, phase unknown.\n"); else { for (i = 0; (phases[i].value != PHASE_UNKNOWN) && (phases[i].value != (status & PHASE_MASK)); ++i) ; shost_printk(KERN_DEBUG, instance, "phase %s\n", phases[i].name); } } #endif /** * NCR5380_info - report driver and host information * @instance: relevant scsi host instance * * For use as the host template info() handler. */ static const char *NCR5380_info(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); return hostdata->info; } /** * NCR5380_init - initialise an NCR5380 * @instance: adapter to configure * @flags: control flags * * Initializes *instance and corresponding 5380 chip, * with flags OR'd into the initial flags value. * * Notes : I assume that the host, hostno, and id bits have been * set correctly. I don't care about the irq and other fields. * * Returns 0 for success */ static int NCR5380_init(struct Scsi_Host *instance, int flags) { struct NCR5380_hostdata *hostdata = shost_priv(instance); int i; unsigned long deadline; unsigned long accesses_per_ms; instance->max_lun = 7; hostdata->host = instance; hostdata->id_mask = 1 << instance->this_id; hostdata->id_higher_mask = 0; for (i = hostdata->id_mask; i <= 0x80; i <<= 1) if (i > hostdata->id_mask) hostdata->id_higher_mask |= i; for (i = 0; i < 8; ++i) hostdata->busy[i] = 0; hostdata->dma_len = 0; spin_lock_init(&hostdata->lock); hostdata->connected = NULL; hostdata->sensing = NULL; INIT_LIST_HEAD(&hostdata->autosense); INIT_LIST_HEAD(&hostdata->unissued); INIT_LIST_HEAD(&hostdata->disconnected); hostdata->flags = flags; INIT_WORK(&hostdata->main_task, NCR5380_main); hostdata->work_q = alloc_workqueue("ncr5380_%d", WQ_UNBOUND | WQ_MEM_RECLAIM, 0, instance->host_no); if (!hostdata->work_q) return -ENOMEM; snprintf(hostdata->info, sizeof(hostdata->info), "%s, irq %d, io_port 0x%lx, base 0x%lx, can_queue %d, cmd_per_lun %d, sg_tablesize %d, this_id %d, flags { %s%s%s}", instance->hostt->name, instance->irq, hostdata->io_port, hostdata->base, instance->can_queue, instance->cmd_per_lun, instance->sg_tablesize, instance->this_id, hostdata->flags & FLAG_DMA_FIXUP ? "DMA_FIXUP " : "", hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "", hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : ""); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_write(SELECT_ENABLE_REG, 0); /* Calibrate register polling loop */ i = 0; deadline = jiffies + 1; do { cpu_relax(); } while (time_is_after_jiffies(deadline)); deadline += msecs_to_jiffies(256); do { NCR5380_read(STATUS_REG); ++i; cpu_relax(); } while (time_is_after_jiffies(deadline)); accesses_per_ms = i / 256; hostdata->poll_loops = NCR5380_REG_POLL_TIME * accesses_per_ms / 2; return 0; } /** * NCR5380_maybe_reset_bus - Detect and correct bus wedge problems. * @instance: adapter to check * * If the system crashed, it may have crashed with a connected target and * the SCSI bus busy. Check for BUS FREE phase. If not, try to abort the * currently established nexus, which we know nothing about. Failing that * do a bus reset. * * Note that a bus reset will cause the chip to assert IRQ. * * Returns 0 if successful, otherwise -ENXIO. */ static int NCR5380_maybe_reset_bus(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); int pass; for (pass = 1; (NCR5380_read(STATUS_REG) & SR_BSY) && pass <= 6; ++pass) { switch (pass) { case 1: case 3: case 5: shost_printk(KERN_ERR, instance, "SCSI bus busy, waiting up to five seconds\n"); NCR5380_poll_politely(hostdata, STATUS_REG, SR_BSY, 0, 5 * HZ); break; case 2: shost_printk(KERN_ERR, instance, "bus busy, attempting abort\n"); do_abort(instance, 1); break; case 4: shost_printk(KERN_ERR, instance, "bus busy, attempting reset\n"); do_reset(instance); /* Wait after a reset; the SCSI standard calls for * 250ms, we wait 500ms to be on the safe side. * But some Toshiba CD-ROMs need ten times that. */ if (hostdata->flags & FLAG_TOSHIBA_DELAY) msleep(2500); else msleep(500); break; case 6: shost_printk(KERN_ERR, instance, "bus locked solid\n"); return -ENXIO; } } return 0; } /** * NCR5380_exit - remove an NCR5380 * @instance: adapter to remove * * Assumes that no more work can be queued (e.g. by NCR5380_intr). */ static void NCR5380_exit(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); cancel_work_sync(&hostdata->main_task); destroy_workqueue(hostdata->work_q); } /** * complete_cmd - finish processing a command and return it to the SCSI ML * @instance: the host instance * @cmd: command to complete */ static void complete_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd) { struct NCR5380_hostdata *hostdata = shost_priv(instance); dsprintk(NDEBUG_QUEUES, instance, "complete_cmd: cmd %p\n", cmd); if (hostdata->sensing == cmd) { /* Autosense processing ends here */ if (get_status_byte(cmd) != SAM_STAT_GOOD) { scsi_eh_restore_cmnd(cmd, &hostdata->ses); } else { scsi_eh_restore_cmnd(cmd, &hostdata->ses); set_status_byte(cmd, SAM_STAT_CHECK_CONDITION); } hostdata->sensing = NULL; } scsi_done(cmd); } /** * NCR5380_queue_command - queue a command * @instance: the relevant SCSI adapter * @cmd: SCSI command * * cmd is added to the per-instance issue queue, with minor * twiddling done to the host specific fields of cmd. If the * main coroutine is not running, it is restarted. */ static int NCR5380_queue_command(struct Scsi_Host *instance, struct scsi_cmnd *cmd) { struct NCR5380_hostdata *hostdata = shost_priv(instance); struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd); unsigned long flags; #if (NDEBUG & NDEBUG_NO_WRITE) switch (cmd->cmnd[0]) { case WRITE_6: case WRITE_10: shost_printk(KERN_DEBUG, instance, "WRITE attempted with NDEBUG_NO_WRITE set\n"); cmd->result = (DID_ERROR << 16); scsi_done(cmd); return 0; } #endif /* (NDEBUG & NDEBUG_NO_WRITE) */ cmd->result = 0; spin_lock_irqsave(&hostdata->lock, flags); if (!NCR5380_acquire_dma_irq(instance)) { spin_unlock_irqrestore(&hostdata->lock, flags); return SCSI_MLQUEUE_HOST_BUSY; } /* * Insert the cmd into the issue queue. Note that REQUEST SENSE * commands are added to the head of the queue since any command will * clear the contingent allegiance condition that exists and the * sense data is only guaranteed to be valid while the condition exists. */ if (cmd->cmnd[0] == REQUEST_SENSE) list_add(&ncmd->list, &hostdata->unissued); else list_add_tail(&ncmd->list, &hostdata->unissued); spin_unlock_irqrestore(&hostdata->lock, flags); dsprintk(NDEBUG_QUEUES, instance, "command %p added to %s of queue\n", cmd, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); /* Kick off command processing */ queue_work(hostdata->work_q, &hostdata->main_task); return 0; } static inline void maybe_release_dma_irq(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); /* Caller does the locking needed to set & test these data atomically */ if (list_empty(&hostdata->disconnected) && list_empty(&hostdata->unissued) && list_empty(&hostdata->autosense) && !hostdata->connected && !hostdata->selecting) { NCR5380_release_dma_irq(instance); } } /** * dequeue_next_cmd - dequeue a command for processing * @instance: the scsi host instance * * Priority is given to commands on the autosense queue. These commands * need autosense because of a CHECK CONDITION result. * * Returns a command pointer if a command is found for a target that is * not already busy. Otherwise returns NULL. */ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); struct NCR5380_cmd *ncmd; struct scsi_cmnd *cmd; if (hostdata->sensing || list_empty(&hostdata->autosense)) { list_for_each_entry(ncmd, &hostdata->unissued, list) { cmd = NCR5380_to_scmd(ncmd); dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n", cmd, scmd_id(cmd), hostdata->busy[scmd_id(cmd)], cmd->device->lun); if (!(hostdata->busy[scmd_id(cmd)] & (1 << cmd->device->lun))) { list_del(&ncmd->list); dsprintk(NDEBUG_QUEUES, instance, "dequeue: removed %p from issue queue\n", cmd); return cmd; } } } else { /* Autosense processing begins here */ ncmd = list_first_entry(&hostdata->autosense, struct NCR5380_cmd, list); list_del(&ncmd->list); cmd = NCR5380_to_scmd(ncmd); dsprintk(NDEBUG_QUEUES, instance, "dequeue: removed %p from autosense queue\n", cmd); scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); hostdata->sensing = cmd; return cmd; } return NULL; } static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd) { struct NCR5380_hostdata *hostdata = shost_priv(instance); struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd); if (hostdata->sensing == cmd) { scsi_eh_restore_cmnd(cmd, &hostdata->ses); list_add(&ncmd->list, &hostdata->autosense); hostdata->sensing = NULL; } else list_add(&ncmd->list, &hostdata->unissued); } /** * NCR5380_main - NCR state machines * * NCR5380_main is a coroutine that runs as long as more work can * be done on the NCR5380 host adapters in a system. Both * NCR5380_queue_command() and NCR5380_intr() will try to start it * in case it is not running. */ static void NCR5380_main(struct work_struct *work) { struct NCR5380_hostdata *hostdata = container_of(work, struct NCR5380_hostdata, main_task); struct Scsi_Host *instance = hostdata->host; int done; do { done = 1; spin_lock_irq(&hostdata->lock); while (!hostdata->connected && !hostdata->selecting) { struct scsi_cmnd *cmd = dequeue_next_cmd(instance); if (!cmd) break; dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd); /* * Attempt to establish an I_T_L nexus here. * On success, instance->hostdata->connected is set. * On failure, we must add the command back to the * issue queue so we can keep trying. */ /* * REQUEST SENSE commands are issued without tagged * queueing, even on SCSI-II devices because the * contingent allegiance condition exists for the * entire unit. */ if (!NCR5380_select(instance, cmd)) { dsprintk(NDEBUG_MAIN, instance, "main: select complete\n"); } else { dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance, "main: select failed, returning %p to queue\n", cmd); requeue_cmd(instance, cmd); } } if (hostdata->connected && !hostdata->dma_len) { dsprintk(NDEBUG_MAIN, instance, "main: performing information transfer\n"); NCR5380_information_transfer(instance); done = 0; } if (!hostdata->connected) { NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); maybe_release_dma_irq(instance); } spin_unlock_irq(&hostdata->lock); if (!done) cond_resched(); } while (!done); } /* * NCR5380_dma_complete - finish DMA transfer * @instance: the scsi host instance * * Called by the interrupt handler when DMA finishes or a phase * mismatch occurs (which would end the DMA transfer). */ static void NCR5380_dma_complete(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(hostdata->connected); int transferred; unsigned char **data; int *count; int saved_data = 0, overrun = 0; unsigned char p; if (hostdata->read_overruns) { p = ncmd->phase; if (p & SR_IO) { udelay(10); if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == (BASR_PHASE_MATCH | BASR_ACK)) { saved_data = NCR5380_read(INPUT_DATA_REG); overrun = 1; dsprintk(NDEBUG_DMA, instance, "read overrun handled\n"); } } } #ifdef CONFIG_SUN3 if (sun3scsi_dma_finish(hostdata->connected->sc_data_direction)) { pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", instance->host_no); BUG(); } if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == (BASR_PHASE_MATCH | BASR_ACK)) { pr_err("scsi%d: BASR %02x\n", instance->host_no, NCR5380_read(BUS_AND_STATUS_REG)); pr_err("scsi%d: bus stuck in data phase -- probably a single byte overrun!\n", instance->host_no); BUG(); } #endif NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_read(RESET_PARITY_INTERRUPT_REG); transferred = hostdata->dma_len - NCR5380_dma_residual(hostdata); hostdata->dma_len = 0; data = (unsigned char **)&ncmd->ptr; count = &ncmd->this_residual; *data += transferred; *count -= transferred; if (hostdata->read_overruns) { int cnt, toPIO; if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { cnt = toPIO = hostdata->read_overruns; if (overrun) { dsprintk(NDEBUG_DMA, instance, "Got an input overrun, using saved byte\n"); *(*data)++ = saved_data; (*count)--; cnt--; toPIO--; } if (toPIO > 0) { dsprintk(NDEBUG_DMA, instance, "Doing %d byte PIO to 0x%p\n", cnt, *data); NCR5380_transfer_pio(instance, &p, &cnt, data, 0); *count -= toPIO - cnt; } } } } /** * NCR5380_intr - generic NCR5380 irq handler * @irq: interrupt number * @dev_id: device info * * Handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses * from the disconnected queue, and restarting NCR5380_main() * as required. * * The chip can assert IRQ in any of six different conditions. The IRQ flag * is then cleared by reading the Reset Parity/Interrupt Register (RPIR). * Three of these six conditions are latched in the Bus and Status Register: * - End of DMA (cleared by ending DMA Mode) * - Parity error (cleared by reading RPIR) * - Loss of BSY (cleared by reading RPIR) * Two conditions have flag bits that are not latched: * - Bus phase mismatch (non-maskable in DMA Mode, cleared by ending DMA Mode) * - Bus reset (non-maskable) * The remaining condition has no flag bit at all: * - Selection/reselection * * Hence, establishing the cause(s) of any interrupt is partly guesswork. * In "The DP8490 and DP5380 Comparison Guide", National Semiconductor * claimed that "the design of the [DP8490] interrupt logic ensures * interrupts will not be lost (they can be on the DP5380)." * The L5380/53C80 datasheet from LOGIC Devices has more details. * * Checking for bus reset by reading RST is futile because of interrupt * latency, but a bus reset will reset chip logic. Checking for parity error * is unnecessary because that interrupt is never enabled. A Loss of BSY * condition will clear DMA Mode. We can tell when this occurs because the * Busy Monitor interrupt is enabled together with DMA Mode. */ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id) { struct Scsi_Host *instance = dev_id; struct NCR5380_hostdata *hostdata = shost_priv(instance); int handled = 0; unsigned char basr; unsigned long flags; spin_lock_irqsave(&hostdata->lock, flags); basr = NCR5380_read(BUS_AND_STATUS_REG); if (basr & BASR_IRQ) { unsigned char mr = NCR5380_read(MODE_REG); unsigned char sr = NCR5380_read(STATUS_REG); dsprintk(NDEBUG_INTR, instance, "IRQ %d, BASR 0x%02x, SR 0x%02x, MR 0x%02x\n", irq, basr, sr, mr); if ((mr & MR_DMA_MODE) || (mr & MR_MONITOR_BSY)) { /* Probably End of DMA, Phase Mismatch or Loss of BSY. * We ack IRQ after clearing Mode Register. Workarounds * for End of DMA errata need to happen in DMA Mode. */ dsprintk(NDEBUG_INTR, instance, "interrupt in DMA mode\n"); if (hostdata->connected) { NCR5380_dma_complete(instance); queue_work(hostdata->work_q, &hostdata->main_task); } else { NCR5380_write(MODE_REG, MR_BASE); NCR5380_read(RESET_PARITY_INTERRUPT_REG); } } else if ((NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_mask) && (sr & (SR_SEL | SR_IO | SR_BSY | SR_RST)) == (SR_SEL | SR_IO)) { /* Probably reselected */ NCR5380_write(SELECT_ENABLE_REG, 0); NCR5380_read(RESET_PARITY_INTERRUPT_REG); dsprintk(NDEBUG_INTR, instance, "interrupt with SEL and IO\n"); if (!hostdata->connected) { NCR5380_reselect(instance); queue_work(hostdata->work_q, &hostdata->main_task); } if (!hostdata->connected) NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); } else { /* Probably Bus Reset */ NCR5380_read(RESET_PARITY_INTERRUPT_REG); if (sr & SR_RST) { /* Certainly Bus Reset */ shost_printk(KERN_WARNING, instance, "bus reset interrupt\n"); bus_reset_cleanup(instance); } else { dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n"); } #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif } handled = 1; } else { dsprintk(NDEBUG_INTR, instance, "interrupt without IRQ bit\n"); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif } spin_unlock_irqrestore(&hostdata->lock, flags); return IRQ_RETVAL(handled); } /** * NCR5380_select - attempt arbitration and selection for a given command * @instance: the Scsi_Host instance * @cmd: the scsi_cmnd to execute * * This routine establishes an I_T_L nexus for a SCSI command. This involves * ARBITRATION, SELECTION and MESSAGE OUT phases and an IDENTIFY message. * * Returns true if the operation should be retried. * Returns false if it should not be retried. * * Side effects : * If bus busy, arbitration failed, etc, NCR5380_select() will exit * with registers as they should have been on entry - ie * SELECT_ENABLE will be set appropriately, the NCR5380 * will cease to drive any SCSI bus signals. * * If successful : the I_T_L nexus will be established, and * hostdata->connected will be set to cmd. * SELECT interrupt will be disabled. * * If failed (no target) : scsi_done() will be called, and the * cmd->result host byte set to DID_BAD_TARGET. */ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) __releases(&hostdata->lock) __acquires(&hostdata->lock) { struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char tmp[3], phase; unsigned char *data; int len; int err; bool ret = true; bool can_disconnect = instance->irq != NO_IRQ && cmd->cmnd[0] != REQUEST_SENSE && (disconnect_mask & BIT(scmd_id(cmd))); NCR5380_dprint(NDEBUG_ARBITRATION, instance); dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n", instance->this_id); /* * Arbitration and selection phases are slow and involve dropping the * lock, so we have to watch out for EH. An exception handler may * change 'selecting' to NULL. This function will then return false * so that the caller will forget about 'cmd'. (During information * transfer phases, EH may change 'connected' to NULL.) */ hostdata->selecting = cmd; /* * Set the phase bits to 0, otherwise the NCR5380 won't drive the * data bus during SELECTION. */ NCR5380_write(TARGET_COMMAND_REG, 0); /* * Start arbitration. */ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); NCR5380_write(MODE_REG, MR_ARBITRATE); /* The chip now waits for BUS FREE phase. Then after the 800 ns * Bus Free Delay, arbitration will begin. */ spin_unlock_irq(&hostdata->lock); err = NCR5380_poll_politely2(hostdata, MODE_REG, MR_ARBITRATE, 0, INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS, ICR_ARBITRATION_PROGRESS, HZ); spin_lock_irq(&hostdata->lock); if (!(NCR5380_read(MODE_REG) & MR_ARBITRATE)) { /* Reselection interrupt */ goto out; } if (!hostdata->selecting) { /* Command was aborted */ NCR5380_write(MODE_REG, MR_BASE); return false; } if (err < 0) { NCR5380_write(MODE_REG, MR_BASE); shost_printk(KERN_ERR, instance, "select: arbitration timeout\n"); goto out; } spin_unlock_irq(&hostdata->lock); /* The SCSI-2 arbitration delay is 2.4 us */ udelay(3); /* Check for lost arbitration */ if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) { NCR5380_write(MODE_REG, MR_BASE); dsprintk(NDEBUG_ARBITRATION, instance, "lost arbitration, deasserting MR_ARBITRATE\n"); spin_lock_irq(&hostdata->lock); goto out; } /* After/during arbitration, BSY should be asserted. * IBM DPES-31080 Version S31Q works now * Tnx to [email protected] for finding this! (Roman) */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL | ICR_ASSERT_BSY); /* * Again, bus clear + bus settle time is 1.2us, however, this is * a minimum so we'll udelay ceil(1.2) */ if (hostdata->flags & FLAG_TOSHIBA_DELAY) udelay(15); else udelay(2); spin_lock_irq(&hostdata->lock); /* NCR5380_reselect() clears MODE_REG after a reselection interrupt */ if (!(NCR5380_read(MODE_REG) & MR_ARBITRATE)) goto out; if (!hostdata->selecting) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); return false; } dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n"); /* * Now that we have won arbitration, start Selection process, asserting * the host and target ID's on the SCSI bus. */ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask | (1 << scmd_id(cmd))); /* * Raise ATN while SEL is true before BSY goes false from arbitration, * since this is the only way to guarantee that we'll get a MESSAGE OUT * phase immediately after selection. */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL); NCR5380_write(MODE_REG, MR_BASE); /* * Reselect interrupts must be turned off prior to the dropping of BSY, * otherwise we will trigger an interrupt. */ NCR5380_write(SELECT_ENABLE_REG, 0); spin_unlock_irq(&hostdata->lock); /* * The initiator shall then wait at least two deskew delays and release * the BSY signal. */ udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */ /* Reset BSY */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL); /* * Something weird happens when we cease to drive BSY - looks * like the board/chip is letting us do another read before the * appropriate propagation delay has expired, and we're confusing * a BSY signal from ourselves as the target's response to SELECTION. * * A small delay (the 'C++' frontend breaks the pipeline with an * unnecessary jump, making it work on my 386-33/Trantor T128, the * tighter 'C' code breaks and requires this) solves the problem - * the 1 us delay is arbitrary, and only used because this delay will * be the same on other platforms and since it works here, it should * work there. * * wingel suggests that this could be due to failing to wait * one deskew delay. */ udelay(1); dsprintk(NDEBUG_SELECTION, instance, "selecting target %d\n", scmd_id(cmd)); /* * The SCSI specification calls for a 250 ms timeout for the actual * selection. */ err = NCR5380_poll_politely(hostdata, STATUS_REG, SR_BSY, SR_BSY, msecs_to_jiffies(250)); if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { spin_lock_irq(&hostdata->lock); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_reselect(instance); shost_printk(KERN_ERR, instance, "reselection after won arbitration?\n"); goto out; } if (err < 0) { spin_lock_irq(&hostdata->lock); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* Can't touch cmd if it has been reclaimed by the scsi ML */ if (!hostdata->selecting) return false; cmd->result = DID_BAD_TARGET << 16; complete_cmd(instance, cmd); dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n"); ret = false; goto out; } /* * No less than two deskew delays after the initiator detects the * BSY signal is true, it shall release the SEL signal and may * change the DATA BUS. -wingel */ udelay(1); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); /* * Since we followed the SCSI spec, and raised ATN while SEL * was true but before BSY was false during selection, the information * transfer phase should be a MESSAGE OUT phase so that we can send the * IDENTIFY message. */ /* Wait for start of REQ/ACK handshake */ err = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ); spin_lock_irq(&hostdata->lock); if (err < 0) { shost_printk(KERN_ERR, instance, "select: REQ timeout\n"); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); goto out; } if (!hostdata->selecting) { do_abort(instance, 0); return false; } dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n", scmd_id(cmd)); tmp[0] = IDENTIFY(can_disconnect, cmd->device->lun); len = 1; data = tmp; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &data, 0); if (len) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); cmd->result = DID_ERROR << 16; complete_cmd(instance, cmd); dsprintk(NDEBUG_SELECTION, instance, "IDENTIFY message transfer failed\n"); ret = false; goto out; } dsprintk(NDEBUG_SELECTION, instance, "nexus established.\n"); hostdata->connected = cmd; hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun; #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; #endif initialize_SCp(cmd); ret = false; out: if (!hostdata->selecting) return false; hostdata->selecting = NULL; return ret; } /* * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, * unsigned char *phase, int *count, unsigned char **data) * * Purpose : transfers data in given phase using polled I/O * * Inputs : instance - instance of driver, *phase - pointer to * what phase is expected, *count - pointer to number of * bytes to transfer, **data - pointer to data pointer, * can_sleep - 1 or 0 when sleeping is permitted or not, respectively. * * Returns : -1 when different phase is entered without transferring * maximum number of bytes, 0 if all bytes are transferred or exit * is in same phase. * * Also, *phase, *count, *data are modified in place. * * XXX Note : handling for bus free may be useful. */ /* * Note : this code is not as quick as it could be, however it * IS 100% reliable, and for the actual data transfer where speed * counts, we will always do a pseudo DMA or DMA transfer. */ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data, unsigned int can_sleep) { struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char p = *phase, tmp; int c = *count; unsigned char *d = *data; /* * The NCR5380 chip will only drive the SCSI bus when the * phase specified in the appropriate bits of the TARGET COMMAND * REGISTER match the STATUS REGISTER */ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); do { /* * Wait for assertion of REQ, after which the phase bits will be * valid */ if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ * can_sleep) < 0) break; dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n"); /* Check for phase mismatch */ if ((NCR5380_read(STATUS_REG) & PHASE_MASK) != p) { dsprintk(NDEBUG_PIO, instance, "phase mismatch\n"); NCR5380_dprint_phase(NDEBUG_PIO, instance); break; } /* Do actual transfer from SCSI bus to / from memory */ if (!(p & SR_IO)) NCR5380_write(OUTPUT_DATA_REG, *d); else *d = NCR5380_read(CURRENT_SCSI_DATA_REG); ++d; /* * The SCSI standard suggests that in MSGOUT phase, the initiator * should drop ATN on the last byte of the message phase * after REQ has been asserted for the handshake but before * the initiator raises ACK. */ if (!(p & SR_IO)) { if (!((p & SR_MSG) && c > 1)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ACK); } else { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN); NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); } } else { NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); } if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 5 * HZ * can_sleep) < 0) break; dsprintk(NDEBUG_HANDSHAKE, instance, "REQ negated, handshake complete\n"); /* * We have several special cases to consider during REQ/ACK handshaking : * 1. We were in MSGOUT phase, and we are on the last byte of the * message. ATN must be dropped as ACK is dropped. * * 2. We are in a MSGIN phase, and we are on the last byte of the * message. We must exit with ACK asserted, so that the calling * code may raise ATN before dropping ACK to reject the message. * * 3. ACK and ATN are clear and the target may proceed as normal. */ if (!(p == PHASE_MSGIN && c == 1)) { if (p == PHASE_MSGOUT && c > 1) NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); else NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); } } while (--c); dsprintk(NDEBUG_PIO, instance, "residual %d\n", c); *count = c; *data = d; tmp = NCR5380_read(STATUS_REG); /* The phase read from the bus is valid if either REQ is (already) * asserted or if ACK hasn't been released yet. The latter applies if * we're in MSG IN, DATA IN or STATUS and all bytes have been received. */ if ((tmp & SR_REQ) || ((tmp & SR_IO) && c == 0)) *phase = tmp & PHASE_MASK; else *phase = PHASE_UNKNOWN; if (!c || (*phase == p)) return 0; else return -1; } /** * do_reset - issue a reset command * @instance: adapter to reset * * Issue a reset sequence to the NCR5380 and try and get the bus * back into sane shape. * * This clears the reset interrupt flag because there may be no handler for * it. When the driver is initialized, the NCR5380_intr() handler has not yet * been installed. And when in EH we may have released the ST DMA interrupt. */ static void do_reset(struct Scsi_Host *instance) { struct NCR5380_hostdata __maybe_unused *hostdata = shost_priv(instance); unsigned long flags; local_irq_save(flags); NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK)); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST); udelay(50); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); local_irq_restore(flags); } /** * do_abort - abort the currently established nexus by going to * MESSAGE OUT phase and sending an ABORT message. * @instance: relevant scsi host instance * @can_sleep: 1 or 0 when sleeping is permitted or not, respectively * * Returns 0 on success, negative error code on failure. */ static int do_abort(struct Scsi_Host *instance, unsigned int can_sleep) { struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char *msgptr, phase, tmp; int len; int rc; /* Request message out phase */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); /* * Wait for the target to indicate a valid phase by asserting * REQ. Once this happens, we'll have either a MSGOUT phase * and can immediately send the ABORT message, or we'll have some * other phase and will have to source/sink data. * * We really don't care what value was on the bus or what value * the target sees, so we just handshake. */ rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ * can_sleep); if (rc < 0) goto out; tmp = NCR5380_read(STATUS_REG) & PHASE_MASK; NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); if (tmp != PHASE_MSGOUT) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 3 * HZ * can_sleep); if (rc < 0) goto out; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); } tmp = ABORT; msgptr = &tmp; len = 1; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &msgptr, can_sleep); if (len) rc = -ENXIO; /* * If we got here, and the command completed successfully, * we're about to go into bus free state. */ out: NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); return rc; } /* * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, * unsigned char *phase, int *count, unsigned char **data) * * Purpose : transfers data in given phase using either real * or pseudo DMA. * * Inputs : instance - instance of driver, *phase - pointer to * what phase is expected, *count - pointer to number of * bytes to transfer, **data - pointer to data pointer. * * Returns : -1 when different phase is entered without transferring * maximum number of bytes, 0 if all bytes or transferred or exit * is in same phase. * * Also, *phase, *count, *data are modified in place. */ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) { struct NCR5380_hostdata *hostdata = shost_priv(instance); int c = *count; unsigned char p = *phase; unsigned char *d = *data; unsigned char tmp; int result = 0; if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) { *phase = tmp; return -1; } NCR5380_to_ncmd(hostdata->connected)->phase = p; if (p & SR_IO) { if (hostdata->read_overruns) c -= hostdata->read_overruns; else if (hostdata->flags & FLAG_DMA_FIXUP) --c; } dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n", (p & SR_IO) ? "receive" : "send", c, d); #ifdef CONFIG_SUN3 /* send start chain */ sun3scsi_dma_start(c, *data); #endif NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY | MR_ENABLE_EOP_INTR); if (!(hostdata->flags & FLAG_LATE_DMA_SETUP)) { /* On the Medusa, it is a must to initialize the DMA before * starting the NCR. This is also the cleaner way for the TT. */ if (p & SR_IO) result = NCR5380_dma_recv_setup(hostdata, d, c); else result = NCR5380_dma_send_setup(hostdata, d, c); } /* * On the PAS16 at least I/O recovery delays are not needed here. * Everyone else seems to want them. */ if (p & SR_IO) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_io_delay(1); NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); } else { NCR5380_io_delay(1); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); NCR5380_io_delay(1); NCR5380_write(START_DMA_SEND_REG, 0); NCR5380_io_delay(1); } #ifdef CONFIG_SUN3 #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif sun3_dma_active = 1; #endif if (hostdata->flags & FLAG_LATE_DMA_SETUP) { /* On the Falcon, the DMA setup must be done after the last * NCR access, else the DMA setup gets trashed! */ if (p & SR_IO) result = NCR5380_dma_recv_setup(hostdata, d, c); else result = NCR5380_dma_send_setup(hostdata, d, c); } /* On failure, NCR5380_dma_xxxx_setup() returns a negative int. */ if (result < 0) return result; /* For real DMA, result is the byte count. DMA interrupt is expected. */ if (result > 0) { hostdata->dma_len = result; return 0; } /* The result is zero iff pseudo DMA send/receive was completed. */ hostdata->dma_len = c; /* * A note regarding the DMA errata workarounds for early NMOS silicon. * * For DMA sends, we want to wait until the last byte has been * transferred out over the bus before we turn off DMA mode. Alas, there * seems to be no terribly good way of doing this on a 5380 under all * conditions. For non-scatter-gather operations, we can wait until REQ * and ACK both go false, or until a phase mismatch occurs. Gather-sends * are nastier, since the device will be expecting more data than we * are prepared to send it, and REQ will remain asserted. On a 53C8[01] we * could test Last Byte Sent to assure transfer (I imagine this is precisely * why this signal was added to the newer chips) but on the older 538[01] * this signal does not exist. The workaround for this lack is a watchdog; * we bail out of the wait-loop after a modest amount of wait-time if * the usual exit conditions are not met. Not a terribly clean or * correct solution :-% * * DMA receive is equally tricky due to a nasty characteristic of the NCR5380. * If the chip is in DMA receive mode, it will respond to a target's * REQ by latching the SCSI data into the INPUT DATA register and asserting * ACK, even if it has _already_ been notified by the DMA controller that * the current DMA transfer has completed! If the NCR5380 is then taken * out of DMA mode, this already-acknowledged byte is lost. This is * not a problem for "one DMA transfer per READ command", because * the situation will never arise... either all of the data is DMA'ed * properly, or the target switches to MESSAGE IN phase to signal a * disconnection (either operation bringing the DMA to a clean halt). * However, in order to handle scatter-receive, we must work around the * problem. The chosen fix is to DMA fewer bytes, then check for the * condition before taking the NCR5380 out of DMA mode. One or two extra * bytes are transferred via PIO as necessary to fill out the original * request. */ if (hostdata->flags & FLAG_DMA_FIXUP) { if (p & SR_IO) { /* * The workaround was to transfer fewer bytes than we * intended to with the pseudo-DMA read function, wait for * the chip to latch the last byte, read it, and then disable * pseudo-DMA mode. * * After REQ is asserted, the NCR5380 asserts DRQ and ACK. * REQ is deasserted when ACK is asserted, and not reasserted * until ACK goes false. Since the NCR5380 won't lower ACK * until DACK is asserted, which won't happen unless we twiddle * the DMA port or we take the NCR5380 out of DMA mode, we * can guarantee that we won't handshake another extra * byte. */ if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, 0) < 0) { result = -1; shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n"); } if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 0) < 0) { result = -1; shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n"); } d[*count - 1] = NCR5380_read(INPUT_DATA_REG); } else { /* * Wait for the last byte to be sent. If REQ is being asserted for * the byte we're interested, we'll ACK it and it will go false. */ if (NCR5380_poll_politely2(hostdata, BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0) < 0) { result = -1; shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n"); } } } NCR5380_dma_complete(instance); return result; } /* * Function : NCR5380_information_transfer (struct Scsi_Host *instance) * * Purpose : run through the various SCSI phases and do as the target * directs us to. Operates on the currently connected command, * instance->connected. * * Inputs : instance, instance for which we are doing commands * * Side effects : SCSI things happen, the disconnected queue will be * modified if a command disconnects, *instance->connected will * change. * * XXX Note : we need to watch for bus free or a reset condition here * to recover from an unexpected bus free condition. */ static void NCR5380_information_transfer(struct Scsi_Host *instance) __releases(&hostdata->lock) __acquires(&hostdata->lock) { struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char msgout = NOP; int sink = 0; int len; int transfersize; unsigned char *data; unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; struct scsi_cmnd *cmd; #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; #endif while ((cmd = hostdata->connected)) { struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd); tmp = NCR5380_read(STATUS_REG); /* We only have a valid SCSI phase when REQ is asserted */ if (tmp & SR_REQ) { phase = (tmp & PHASE_MASK); if (phase != old_phase) { old_phase = phase; NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); } #ifdef CONFIG_SUN3 if (phase == PHASE_CMDOUT && sun3_dma_setup_done != cmd) { int count; advance_sg_buffer(ncmd); count = sun3scsi_dma_xfer_len(hostdata, cmd); if (count > 0) { if (cmd->sc_data_direction == DMA_TO_DEVICE) sun3scsi_dma_send_setup(hostdata, ncmd->ptr, count); else sun3scsi_dma_recv_setup(hostdata, ncmd->ptr, count); sun3_dma_setup_done = cmd; } #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; #endif } #endif /* CONFIG_SUN3 */ if (sink && (phase != PHASE_MSGOUT)) { NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); while (NCR5380_read(STATUS_REG) & SR_REQ) ; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); sink = 0; continue; } switch (phase) { case PHASE_DATAOUT: #if (NDEBUG & NDEBUG_NO_DATAOUT) shost_printk(KERN_DEBUG, instance, "NDEBUG_NO_DATAOUT set, attempted DATAOUT aborted\n"); sink = 1; do_abort(instance, 0); cmd->result = DID_ERROR << 16; complete_cmd(instance, cmd); hostdata->connected = NULL; hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); return; #endif case PHASE_DATAIN: /* * If there is no room left in the current buffer in the * scatter-gather list, move onto the next one. */ advance_sg_buffer(ncmd); dsprintk(NDEBUG_INFORMATION, instance, "this residual %d, sg ents %d\n", ncmd->this_residual, sg_nents(ncmd->buffer)); /* * The preferred transfer method is going to be * PSEUDO-DMA for systems that are strictly PIO, * since we can let the hardware do the handshaking. * * For this to work, we need to know the transfersize * ahead of time, since the pseudo-DMA code will sit * in an unconditional loop. */ transfersize = 0; if (!cmd->device->borken) transfersize = NCR5380_dma_xfer_len(hostdata, cmd); if (transfersize > 0) { len = transfersize; if (NCR5380_transfer_dma(instance, &phase, &len, (unsigned char **)&ncmd->ptr)) { /* * If the watchdog timer fires, all future * accesses to this device will use the * polled-IO. */ scmd_printk(KERN_INFO, cmd, "switching to slow handshake\n"); cmd->device->borken = 1; do_reset(instance); bus_reset_cleanup(instance); } } else { /* Transfer a small chunk so that the * irq mode lock is not held too long. */ transfersize = min(ncmd->this_residual, NCR5380_PIO_CHUNK_SIZE); len = transfersize; NCR5380_transfer_pio(instance, &phase, &len, (unsigned char **)&ncmd->ptr, 0); ncmd->this_residual -= transfersize - len; } #ifdef CONFIG_SUN3 if (sun3_dma_setup_done == cmd) sun3_dma_setup_done = NULL; #endif return; case PHASE_MSGIN: len = 1; data = &tmp; NCR5380_transfer_pio(instance, &phase, &len, &data, 0); ncmd->message = tmp; switch (tmp) { case ABORT: set_host_byte(cmd, DID_ABORT); fallthrough; case COMMAND_COMPLETE: /* Accept message by clearing ACK */ sink = 1; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); dsprintk(NDEBUG_QUEUES, instance, "COMMAND COMPLETE %p target %d lun %llu\n", cmd, scmd_id(cmd), cmd->device->lun); hostdata->connected = NULL; hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); set_status_byte(cmd, ncmd->status); set_resid_from_SCp(cmd); if (cmd->cmnd[0] == REQUEST_SENSE) complete_cmd(instance, cmd); else { if (ncmd->status == SAM_STAT_CHECK_CONDITION || ncmd->status == SAM_STAT_COMMAND_TERMINATED) { dsprintk(NDEBUG_QUEUES, instance, "autosense: adding cmd %p to tail of autosense queue\n", cmd); list_add_tail(&ncmd->list, &hostdata->autosense); } else complete_cmd(instance, cmd); } /* * Restore phase bits to 0 so an interrupted selection, * arbitration can resume. */ NCR5380_write(TARGET_COMMAND_REG, 0); return; case MESSAGE_REJECT: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); switch (hostdata->last_message) { case HEAD_OF_QUEUE_TAG: case ORDERED_QUEUE_TAG: case SIMPLE_QUEUE_TAG: cmd->device->simple_tags = 0; hostdata->busy[cmd->device->id] |= (1 << (cmd->device->lun & 0xFF)); break; default: break; } break; case DISCONNECT: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); hostdata->connected = NULL; list_add(&ncmd->list, &hostdata->disconnected); dsprintk(NDEBUG_INFORMATION | NDEBUG_QUEUES, instance, "connected command %p for target %d lun %llu moved to disconnected queue\n", cmd, scmd_id(cmd), cmd->device->lun); /* * Restore phase bits to 0 so an interrupted selection, * arbitration can resume. */ NCR5380_write(TARGET_COMMAND_REG, 0); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif return; /* * The SCSI data pointer is *IMPLICITLY* saved on a disconnect * operation, in violation of the SCSI spec so we can safely * ignore SAVE/RESTORE pointers calls. * * Unfortunately, some disks violate the SCSI spec and * don't issue the required SAVE_POINTERS message before * disconnecting, and we have to break spec to remain * compatible. */ case SAVE_POINTERS: case RESTORE_POINTERS: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); break; case EXTENDED_MESSAGE: /* * Start the message buffer with the EXTENDED_MESSAGE * byte, since spi_print_msg() wants the whole thing. */ extended_msg[0] = EXTENDED_MESSAGE; /* Accept first byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); spin_unlock_irq(&hostdata->lock); dsprintk(NDEBUG_EXTENDED, instance, "receiving extended message\n"); len = 2; data = extended_msg + 1; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data, 1); dsprintk(NDEBUG_EXTENDED, instance, "length %d, code 0x%02x\n", (int)extended_msg[1], (int)extended_msg[2]); if (!len && extended_msg[1] > 0 && extended_msg[1] <= sizeof(extended_msg) - 2) { /* Accept third byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); len = extended_msg[1] - 1; data = extended_msg + 3; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data, 1); dsprintk(NDEBUG_EXTENDED, instance, "message received, residual %d\n", len); switch (extended_msg[2]) { case EXTENDED_SDTR: case EXTENDED_WDTR: tmp = 0; } } else if (len) { shost_printk(KERN_ERR, instance, "error receiving extended message\n"); tmp = 0; } else { shost_printk(KERN_NOTICE, instance, "extended message code %02x length %d is too long\n", extended_msg[2], extended_msg[1]); tmp = 0; } spin_lock_irq(&hostdata->lock); if (!hostdata->connected) return; /* Reject message */ fallthrough; default: /* * If we get something weird that we aren't expecting, * log it. */ if (tmp == EXTENDED_MESSAGE) scmd_printk(KERN_INFO, cmd, "rejecting unknown extended message code %02x, length %d\n", extended_msg[2], extended_msg[1]); else if (tmp) scmd_printk(KERN_INFO, cmd, "rejecting unknown message code %02x\n", tmp); msgout = MESSAGE_REJECT; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); break; } /* switch (tmp) */ break; case PHASE_MSGOUT: len = 1; data = &msgout; hostdata->last_message = msgout; NCR5380_transfer_pio(instance, &phase, &len, &data, 0); if (msgout == ABORT) { hostdata->connected = NULL; hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); cmd->result = DID_ERROR << 16; complete_cmd(instance, cmd); return; } msgout = NOP; break; case PHASE_CMDOUT: len = cmd->cmd_len; data = cmd->cmnd; /* * XXX for performance reasons, on machines with a * PSEUDO-DMA architecture we should probably * use the dma transfer function. */ NCR5380_transfer_pio(instance, &phase, &len, &data, 0); break; case PHASE_STATIN: len = 1; data = &tmp; NCR5380_transfer_pio(instance, &phase, &len, &data, 0); ncmd->status = tmp; break; default: shost_printk(KERN_ERR, instance, "unknown phase\n"); NCR5380_dprint(NDEBUG_ANY, instance); } /* switch(phase) */ } else { spin_unlock_irq(&hostdata->lock); NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ); spin_lock_irq(&hostdata->lock); } } } /* * Function : void NCR5380_reselect (struct Scsi_Host *instance) * * Purpose : does reselection, initializing the instance->connected * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q * nexus has been reestablished, * * Inputs : instance - this instance of the NCR5380. */ static void NCR5380_reselect(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char target_mask; unsigned char lun; unsigned char msg[3]; struct NCR5380_cmd *ncmd; struct scsi_cmnd *tmp; /* * Disable arbitration, etc. since the host adapter obviously * lost, and tell an interrupted NCR5380_select() to restart. */ NCR5380_write(MODE_REG, MR_BASE); target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); if (!target_mask || target_mask & (target_mask - 1)) { shost_printk(KERN_WARNING, instance, "reselect: bad target_mask 0x%02x\n", target_mask); return; } /* * At this point, we have detected that our SCSI ID is on the bus, * SEL is true and BSY was false for at least one bus settle delay * (400 ns). * * We must assert BSY ourselves, until the target drops the SEL * signal. */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_SEL, 0, 0) < 0) { shost_printk(KERN_ERR, instance, "reselect: !SEL timeout\n"); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); return; } NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* * Wait for target to go into MSGIN. */ if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 0) < 0) { if ((NCR5380_read(STATUS_REG) & (SR_BSY | SR_SEL)) == 0) /* BUS FREE phase */ return; shost_printk(KERN_ERR, instance, "reselect: REQ timeout\n"); do_abort(instance, 0); return; } #ifdef CONFIG_SUN3 /* acknowledge toggle to MSGIN */ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN)); /* peek at the byte without really hitting the bus */ msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG); #else { int len = 1; unsigned char *data = msg; unsigned char phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data, 0); if (len) { do_abort(instance, 0); return; } } #endif /* CONFIG_SUN3 */ if (!(msg[0] & 0x80)) { shost_printk(KERN_ERR, instance, "expecting IDENTIFY message, got "); spi_print_msg(msg); printk("\n"); do_abort(instance, 0); return; } lun = msg[0] & 0x07; /* * We need to add code for SCSI-II to track which devices have * I_T_L_Q nexuses established, and which have simple I_T_L * nexuses so we can chose to do additional data transfer. */ /* * Find the command corresponding to the I_T_L or I_T_L_Q nexus we * just reestablished, and remove it from the disconnected queue. */ tmp = NULL; list_for_each_entry(ncmd, &hostdata->disconnected, list) { struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); if (target_mask == (1 << scmd_id(cmd)) && lun == (u8)cmd->device->lun) { list_del(&ncmd->list); tmp = cmd; break; } } if (tmp) { dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance, "reselect: removed %p from disconnected queue\n", tmp); } else { int target = ffs(target_mask) - 1; shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n", target_mask, lun); /* * Since we have an established nexus that we can't do anything * with, we must abort it. */ if (do_abort(instance, 0) == 0) hostdata->busy[target] &= ~(1 << lun); return; } #ifdef CONFIG_SUN3 if (sun3_dma_setup_done != tmp) { int count; advance_sg_buffer(ncmd); count = sun3scsi_dma_xfer_len(hostdata, tmp); if (count > 0) { if (tmp->sc_data_direction == DMA_TO_DEVICE) sun3scsi_dma_send_setup(hostdata, ncmd->ptr, count); else sun3scsi_dma_recv_setup(hostdata, ncmd->ptr, count); sun3_dma_setup_done = tmp; } } NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); #endif /* CONFIG_SUN3 */ /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); hostdata->connected = tmp; dsprintk(NDEBUG_RESELECTION, instance, "nexus established, target %d, lun %llu\n", scmd_id(tmp), tmp->device->lun); } /** * list_find_cmd - test for presence of a command in a linked list * @haystack: list of commands * @needle: command to search for */ static bool list_find_cmd(struct list_head *haystack, struct scsi_cmnd *needle) { struct NCR5380_cmd *ncmd; list_for_each_entry(ncmd, haystack, list) if (NCR5380_to_scmd(ncmd) == needle) return true; return false; } /** * list_remove_cmd - remove a command from linked list * @haystack: list of commands * @needle: command to remove */ static bool list_del_cmd(struct list_head *haystack, struct scsi_cmnd *needle) { if (list_find_cmd(haystack, needle)) { struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(needle); list_del(&ncmd->list); return true; } return false; } /** * NCR5380_abort - scsi host eh_abort_handler() method * @cmd: the command to be aborted * * Try to abort a given command by removing it from queues and/or sending * the target an abort message. This may not succeed in causing a target * to abort the command. Nonetheless, the low-level driver must forget about * the command because the mid-layer reclaims it and it may be re-issued. * * The normal path taken by a command is as follows. For EH we trace this * same path to locate and abort the command. * * unissued -> selecting -> [unissued -> selecting ->]... connected -> * [disconnected -> connected ->]... * [autosense -> connected ->] done * * If cmd was not found at all then presumably it has already been completed, * in which case return SUCCESS to try to avoid further EH measures. * * If the command has not completed yet, we must not fail to find it. * We have no option but to forget the aborted command (even if it still * lacks sense data). The mid-layer may re-issue a command that is in error * recovery (see scsi_send_eh_cmnd), but the logic and data structures in * this driver are such that a command can appear on one queue only. * * The lock protects driver data structures, but EH handlers also use it * to serialize their own execution and prevent their own re-entry. */ static int NCR5380_abort(struct scsi_cmnd *cmd) { struct Scsi_Host *instance = cmd->device->host; struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned long flags; int result = SUCCESS; spin_lock_irqsave(&hostdata->lock, flags); #if (NDEBUG & NDEBUG_ANY) scmd_printk(KERN_INFO, cmd, __func__); #endif NCR5380_dprint(NDEBUG_ANY, instance); NCR5380_dprint_phase(NDEBUG_ANY, instance); if (list_del_cmd(&hostdata->unissued, cmd)) { dsprintk(NDEBUG_ABORT, instance, "abort: removed %p from issue queue\n", cmd); cmd->result = DID_ABORT << 16; scsi_done(cmd); /* No tag or busy flag to worry about */ goto out; } if (hostdata->selecting == cmd) { dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p == selecting\n", cmd); hostdata->selecting = NULL; cmd->result = DID_ABORT << 16; complete_cmd(instance, cmd); goto out; } if (list_del_cmd(&hostdata->disconnected, cmd)) { dsprintk(NDEBUG_ABORT, instance, "abort: removed %p from disconnected list\n", cmd); /* Can't call NCR5380_select() and send ABORT because that * means releasing the lock. Need a bus reset. */ set_host_byte(cmd, DID_ERROR); complete_cmd(instance, cmd); result = FAILED; goto out; } if (hostdata->connected == cmd) { dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd); hostdata->connected = NULL; hostdata->dma_len = 0; if (do_abort(instance, 0) < 0) { set_host_byte(cmd, DID_ERROR); complete_cmd(instance, cmd); result = FAILED; goto out; } set_host_byte(cmd, DID_ABORT); complete_cmd(instance, cmd); goto out; } if (list_del_cmd(&hostdata->autosense, cmd)) { dsprintk(NDEBUG_ABORT, instance, "abort: removed %p from sense queue\n", cmd); complete_cmd(instance, cmd); } out: if (result == FAILED) dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd); else { hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd); } queue_work(hostdata->work_q, &hostdata->main_task); spin_unlock_irqrestore(&hostdata->lock, flags); return result; } static void bus_reset_cleanup(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); int i; struct NCR5380_cmd *ncmd; /* reset NCR registers */ NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_write(SELECT_ENABLE_REG, 0); /* After the reset, there are no more connected or disconnected commands * and no busy units; so clear the low-level status here to avoid * conflicts when the mid-level code tries to wake up the affected * commands! */ if (hostdata->selecting) { hostdata->selecting->result = DID_RESET << 16; complete_cmd(instance, hostdata->selecting); hostdata->selecting = NULL; } list_for_each_entry(ncmd, &hostdata->disconnected, list) { struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); set_host_byte(cmd, DID_RESET); complete_cmd(instance, cmd); } INIT_LIST_HEAD(&hostdata->disconnected); list_for_each_entry(ncmd, &hostdata->autosense, list) { struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); scsi_done(cmd); } INIT_LIST_HEAD(&hostdata->autosense); if (hostdata->connected) { set_host_byte(hostdata->connected, DID_RESET); complete_cmd(instance, hostdata->connected); hostdata->connected = NULL; } for (i = 0; i < 8; ++i) hostdata->busy[i] = 0; hostdata->dma_len = 0; queue_work(hostdata->work_q, &hostdata->main_task); } /** * NCR5380_host_reset - reset the SCSI host * @cmd: SCSI command undergoing EH * * Returns SUCCESS */ static int NCR5380_host_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *instance = cmd->device->host; struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned long flags; struct NCR5380_cmd *ncmd; spin_lock_irqsave(&hostdata->lock, flags); #if (NDEBUG & NDEBUG_ANY) shost_printk(KERN_INFO, instance, __func__); #endif NCR5380_dprint(NDEBUG_ANY, instance); NCR5380_dprint_phase(NDEBUG_ANY, instance); list_for_each_entry(ncmd, &hostdata->unissued, list) { struct scsi_cmnd *scmd = NCR5380_to_scmd(ncmd); scmd->result = DID_RESET << 16; scsi_done(scmd); } INIT_LIST_HEAD(&hostdata->unissued); do_reset(instance); bus_reset_cleanup(instance); spin_unlock_irqrestore(&hostdata->lock, flags); return SUCCESS; }
linux-master
drivers/scsi/NCR5380.c
// SPDX-License-Identifier: GPL-2.0 /* * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers * * This driver supports the newer, SCSI-based firmware interface only. * * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <[email protected]> * * Based on the original DAC960 driver, which has * Copyright 1998-2001 by Leonard N. Zubkoff <[email protected]> * Portions Copyright 2002 by Mylex (An IBM Business Unit) */ #include <linux/module.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/raid_class.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_tcq.h> #include "myrs.h" static struct raid_template *myrs_raid_template; static struct myrs_devstate_name_entry { enum myrs_devstate state; char *name; } myrs_devstate_name_list[] = { { MYRS_DEVICE_UNCONFIGURED, "Unconfigured" }, { MYRS_DEVICE_ONLINE, "Online" }, { MYRS_DEVICE_REBUILD, "Rebuild" }, { MYRS_DEVICE_MISSING, "Missing" }, { MYRS_DEVICE_SUSPECTED_CRITICAL, "SuspectedCritical" }, { MYRS_DEVICE_OFFLINE, "Offline" }, { MYRS_DEVICE_CRITICAL, "Critical" }, { MYRS_DEVICE_SUSPECTED_DEAD, "SuspectedDead" }, { MYRS_DEVICE_COMMANDED_OFFLINE, "CommandedOffline" }, { MYRS_DEVICE_STANDBY, "Standby" }, { MYRS_DEVICE_INVALID_STATE, "Invalid" }, }; static char *myrs_devstate_name(enum myrs_devstate state) { struct myrs_devstate_name_entry *entry = myrs_devstate_name_list; int i; for (i = 0; i < ARRAY_SIZE(myrs_devstate_name_list); i++) { if (entry[i].state == state) return entry[i].name; } return NULL; } static struct myrs_raid_level_name_entry { enum myrs_raid_level level; char *name; } myrs_raid_level_name_list[] = { { MYRS_RAID_LEVEL0, "RAID0" }, { MYRS_RAID_LEVEL1, "RAID1" }, { MYRS_RAID_LEVEL3, "RAID3 right asymmetric parity" }, { MYRS_RAID_LEVEL5, "RAID5 right asymmetric parity" }, { MYRS_RAID_LEVEL6, "RAID6" }, { MYRS_RAID_JBOD, "JBOD" }, { MYRS_RAID_NEWSPAN, "New Mylex SPAN" }, { MYRS_RAID_LEVEL3F, "RAID3 fixed parity" }, { MYRS_RAID_LEVEL3L, "RAID3 left symmetric parity" }, { MYRS_RAID_SPAN, "Mylex SPAN" }, { MYRS_RAID_LEVEL5L, "RAID5 left symmetric parity" }, { MYRS_RAID_LEVELE, "RAIDE (concatenation)" }, { MYRS_RAID_PHYSICAL, "Physical device" }, }; static char *myrs_raid_level_name(enum myrs_raid_level level) { struct myrs_raid_level_name_entry *entry = myrs_raid_level_name_list; int i; for (i = 0; i < ARRAY_SIZE(myrs_raid_level_name_list); i++) { if (entry[i].level == level) return entry[i].name; } return NULL; } /* * myrs_reset_cmd - clears critical fields in struct myrs_cmdblk */ static inline void myrs_reset_cmd(struct myrs_cmdblk *cmd_blk) { union myrs_cmd_mbox *mbox = &cmd_blk->mbox; memset(mbox, 0, sizeof(union myrs_cmd_mbox)); cmd_blk->status = 0; } /* * myrs_qcmd - queues Command for DAC960 V2 Series Controllers. */ static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk) { void __iomem *base = cs->io_base; union myrs_cmd_mbox *mbox = &cmd_blk->mbox; union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox; cs->write_cmd_mbox(next_mbox, mbox); if (cs->prev_cmd_mbox1->words[0] == 0 || cs->prev_cmd_mbox2->words[0] == 0) cs->get_cmd_mbox(base); cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1; cs->prev_cmd_mbox1 = next_mbox; if (++next_mbox > cs->last_cmd_mbox) next_mbox = cs->first_cmd_mbox; cs->next_cmd_mbox = next_mbox; } /* * myrs_exec_cmd - executes V2 Command and waits for completion. */ static void myrs_exec_cmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk) { DECLARE_COMPLETION_ONSTACK(complete); unsigned long flags; cmd_blk->complete = &complete; spin_lock_irqsave(&cs->queue_lock, flags); myrs_qcmd(cs, cmd_blk); spin_unlock_irqrestore(&cs->queue_lock, flags); wait_for_completion(&complete); } /* * myrs_report_progress - prints progress message */ static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num, unsigned char *msg, unsigned long blocks, unsigned long size) { shost_printk(KERN_INFO, cs->host, "Logical Drive %d: %s in Progress: %d%% completed\n", ldev_num, msg, (100 * (int)(blocks >> 7)) / (int)(size >> 7)); } /* * myrs_get_ctlr_info - executes a Controller Information IOCTL Command */ static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs) { struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk; union myrs_cmd_mbox *mbox = &cmd_blk->mbox; dma_addr_t ctlr_info_addr; union myrs_sgl *sgl; unsigned char status; unsigned short ldev_present, ldev_critical, ldev_offline; ldev_present = cs->ctlr_info->ldev_present; ldev_critical = cs->ctlr_info->ldev_critical; ldev_offline = cs->ctlr_info->ldev_offline; ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info, sizeof(struct myrs_ctlr_info), DMA_FROM_DEVICE); if (dma_mapping_error(&cs->pdev->dev, ctlr_info_addr)) return MYRS_STATUS_FAILED; mutex_lock(&cs->dcmd_mutex); myrs_reset_cmd(cmd_blk); mbox->ctlr_info.id = MYRS_DCMD_TAG; mbox->ctlr_info.opcode = MYRS_CMD_OP_IOCTL; mbox->ctlr_info.control.dma_ctrl_to_host = true; mbox->ctlr_info.control.no_autosense = true; mbox->ctlr_info.dma_size = sizeof(struct myrs_ctlr_info); mbox->ctlr_info.ctlr_num = 0; mbox->ctlr_info.ioctl_opcode = MYRS_IOCTL_GET_CTLR_INFO; sgl = &mbox->ctlr_info.dma_addr; sgl->sge[0].sge_addr = ctlr_info_addr; sgl->sge[0].sge_count = mbox->ctlr_info.dma_size; dev_dbg(&cs->host->shost_gendev, "Sending GetControllerInfo\n"); myrs_exec_cmd(cs, cmd_blk); status = cmd_blk->status; mutex_unlock(&cs->dcmd_mutex); dma_unmap_single(&cs->pdev->dev, ctlr_info_addr, sizeof(struct myrs_ctlr_info), DMA_FROM_DEVICE); if (status == MYRS_STATUS_SUCCESS) { if (cs->ctlr_info->bg_init_active + cs->ctlr_info->ldev_init_active + cs->ctlr_info->pdev_init_active + cs->ctlr_info->cc_active + cs->ctlr_info->rbld_active + cs->ctlr_info->exp_active != 0) cs->needs_update = true; if (cs->ctlr_info->ldev_present != ldev_present || cs->ctlr_info->ldev_critical != ldev_critical || cs->ctlr_info->ldev_offline != ldev_offline) shost_printk(KERN_INFO, cs->host, "Logical drive count changes (%d/%d/%d)\n", cs->ctlr_info->ldev_critical, cs->ctlr_info->ldev_offline, cs->ctlr_info->ldev_present); } return status; } /* * myrs_get_ldev_info - executes a Logical Device Information IOCTL Command */ static unsigned char myrs_get_ldev_info(struct myrs_hba *cs, unsigned short ldev_num, struct myrs_ldev_info *ldev_info) { struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk; union myrs_cmd_mbox *mbox = &cmd_blk->mbox; dma_addr_t ldev_info_addr; struct myrs_ldev_info ldev_info_orig; union myrs_sgl *sgl; unsigned char status; memcpy(&ldev_info_orig, ldev_info, sizeof(struct myrs_ldev_info)); ldev_info_addr = dma_map_single(&cs->pdev->dev, ldev_info, sizeof(struct myrs_ldev_info), DMA_FROM_DEVICE); if (dma_mapping_error(&cs->pdev->dev, ldev_info_addr)) return MYRS_STATUS_FAILED; mutex_lock(&cs->dcmd_mutex); myrs_reset_cmd(cmd_blk); mbox->ldev_info.id = MYRS_DCMD_TAG; mbox->ldev_info.opcode = MYRS_CMD_OP_IOCTL; mbox->ldev_info.control.dma_ctrl_to_host = true; mbox->ldev_info.control.no_autosense = true; mbox->ldev_info.dma_size = sizeof(struct myrs_ldev_info); mbox->ldev_info.ldev.ldev_num = ldev_num; mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_GET_LDEV_INFO_VALID; sgl = &mbox->ldev_info.dma_addr; sgl->sge[0].sge_addr = ldev_info_addr; sgl->sge[0].sge_count = mbox->ldev_info.dma_size; dev_dbg(&cs->host->shost_gendev, "Sending GetLogicalDeviceInfoValid for ldev %d\n", ldev_num); myrs_exec_cmd(cs, cmd_blk); status = cmd_blk->status; mutex_unlock(&cs->dcmd_mutex); dma_unmap_single(&cs->pdev->dev, ldev_info_addr, sizeof(struct myrs_ldev_info), DMA_FROM_DEVICE); if (status == MYRS_STATUS_SUCCESS) { unsigned short ldev_num = ldev_info->ldev_num; struct myrs_ldev_info *new = ldev_info; struct myrs_ldev_info *old = &ldev_info_orig; unsigned long ldev_size = new->cfg_devsize; if (new->dev_state != old->dev_state) { const char *name; name = myrs_devstate_name(new->dev_state); shost_printk(KERN_INFO, cs->host, "Logical Drive %d is now %s\n", ldev_num, name ? name : "Invalid"); } if ((new->soft_errs != old->soft_errs) || (new->cmds_failed != old->cmds_failed) || (new->deferred_write_errs != old->deferred_write_errs)) shost_printk(KERN_INFO, cs->host, "Logical Drive %d Errors: Soft = %d, Failed = %d, Deferred Write = %d\n", ldev_num, new->soft_errs, new->cmds_failed, new->deferred_write_errs); if (new->bg_init_active) myrs_report_progress(cs, ldev_num, "Background Initialization", new->bg_init_lba, ldev_size); else if (new->fg_init_active) myrs_report_progress(cs, ldev_num, "Foreground Initialization", new->fg_init_lba, ldev_size); else if (new->migration_active) myrs_report_progress(cs, ldev_num, "Data Migration", new->migration_lba, ldev_size); else if (new->patrol_active) myrs_report_progress(cs, ldev_num, "Patrol Operation", new->patrol_lba, ldev_size); if (old->bg_init_active && !new->bg_init_active) shost_printk(KERN_INFO, cs->host, "Logical Drive %d: Background Initialization %s\n", ldev_num, (new->ldev_control.ldev_init_done ? "Completed" : "Failed")); } return status; } /* * myrs_get_pdev_info - executes a "Read Physical Device Information" Command */ static unsigned char myrs_get_pdev_info(struct myrs_hba *cs, unsigned char channel, unsigned char target, unsigned char lun, struct myrs_pdev_info *pdev_info) { struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk; union myrs_cmd_mbox *mbox = &cmd_blk->mbox; dma_addr_t pdev_info_addr; union myrs_sgl *sgl; unsigned char status; pdev_info_addr = dma_map_single(&cs->pdev->dev, pdev_info, sizeof(struct myrs_pdev_info), DMA_FROM_DEVICE); if (dma_mapping_error(&cs->pdev->dev, pdev_info_addr)) return MYRS_STATUS_FAILED; mutex_lock(&cs->dcmd_mutex); myrs_reset_cmd(cmd_blk); mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL; mbox->pdev_info.id = MYRS_DCMD_TAG; mbox->pdev_info.control.dma_ctrl_to_host = true; mbox->pdev_info.control.no_autosense = true; mbox->pdev_info.dma_size = sizeof(struct myrs_pdev_info); mbox->pdev_info.pdev.lun = lun; mbox->pdev_info.pdev.target = target; mbox->pdev_info.pdev.channel = channel; mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_GET_PDEV_INFO_VALID; sgl = &mbox->pdev_info.dma_addr; sgl->sge[0].sge_addr = pdev_info_addr; sgl->sge[0].sge_count = mbox->pdev_info.dma_size; dev_dbg(&cs->host->shost_gendev, "Sending GetPhysicalDeviceInfoValid for pdev %d:%d:%d\n", channel, target, lun); myrs_exec_cmd(cs, cmd_blk); status = cmd_blk->status; mutex_unlock(&cs->dcmd_mutex); dma_unmap_single(&cs->pdev->dev, pdev_info_addr, sizeof(struct myrs_pdev_info), DMA_FROM_DEVICE); return status; } /* * myrs_dev_op - executes a "Device Operation" Command */ static unsigned char myrs_dev_op(struct myrs_hba *cs, enum myrs_ioctl_opcode opcode, enum myrs_opdev opdev) { struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk; union myrs_cmd_mbox *mbox = &cmd_blk->mbox; unsigned char status; mutex_lock(&cs->dcmd_mutex); myrs_reset_cmd(cmd_blk); mbox->dev_op.opcode = MYRS_CMD_OP_IOCTL; mbox->dev_op.id = MYRS_DCMD_TAG; mbox->dev_op.control.dma_ctrl_to_host = true; mbox->dev_op.control.no_autosense = true; mbox->dev_op.ioctl_opcode = opcode; mbox->dev_op.opdev = opdev; myrs_exec_cmd(cs, cmd_blk); status = cmd_blk->status; mutex_unlock(&cs->dcmd_mutex); return status; } /* * myrs_translate_pdev - translates a Physical Device Channel and * TargetID into a Logical Device. */ static unsigned char myrs_translate_pdev(struct myrs_hba *cs, unsigned char channel, unsigned char target, unsigned char lun, struct myrs_devmap *devmap) { struct pci_dev *pdev = cs->pdev; dma_addr_t devmap_addr; struct myrs_cmdblk *cmd_blk; union myrs_cmd_mbox *mbox; union myrs_sgl *sgl; unsigned char status; memset(devmap, 0x0, sizeof(struct myrs_devmap)); devmap_addr = dma_map_single(&pdev->dev, devmap, sizeof(struct myrs_devmap), DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, devmap_addr)) return MYRS_STATUS_FAILED; mutex_lock(&cs->dcmd_mutex); cmd_blk = &cs->dcmd_blk; mbox = &cmd_blk->mbox; mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL; mbox->pdev_info.control.dma_ctrl_to_host = true; mbox->pdev_info.control.no_autosense = true; mbox->pdev_info.dma_size = sizeof(struct myrs_devmap); mbox->pdev_info.pdev.target = target; mbox->pdev_info.pdev.channel = channel; mbox->pdev_info.pdev.lun = lun; mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_XLATE_PDEV_TO_LDEV; sgl = &mbox->pdev_info.dma_addr; sgl->sge[0].sge_addr = devmap_addr; sgl->sge[0].sge_count = mbox->pdev_info.dma_size; myrs_exec_cmd(cs, cmd_blk); status = cmd_blk->status; mutex_unlock(&cs->dcmd_mutex); dma_unmap_single(&pdev->dev, devmap_addr, sizeof(struct myrs_devmap), DMA_FROM_DEVICE); return status; } /* * myrs_get_event - executes a Get Event Command */ static unsigned char myrs_get_event(struct myrs_hba *cs, unsigned int event_num, struct myrs_event *event_buf) { struct pci_dev *pdev = cs->pdev; dma_addr_t event_addr; struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk; union myrs_cmd_mbox *mbox = &cmd_blk->mbox; union myrs_sgl *sgl; unsigned char status; event_addr = dma_map_single(&pdev->dev, event_buf, sizeof(struct myrs_event), DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, event_addr)) return MYRS_STATUS_FAILED; mbox->get_event.opcode = MYRS_CMD_OP_IOCTL; mbox->get_event.dma_size = sizeof(struct myrs_event); mbox->get_event.evnum_upper = event_num >> 16; mbox->get_event.ctlr_num = 0; mbox->get_event.ioctl_opcode = MYRS_IOCTL_GET_EVENT; mbox->get_event.evnum_lower = event_num & 0xFFFF; sgl = &mbox->get_event.dma_addr; sgl->sge[0].sge_addr = event_addr; sgl->sge[0].sge_count = mbox->get_event.dma_size; myrs_exec_cmd(cs, cmd_blk); status = cmd_blk->status; dma_unmap_single(&pdev->dev, event_addr, sizeof(struct myrs_event), DMA_FROM_DEVICE); return status; } /* * myrs_get_fwstatus - executes a Get Health Status Command */ static unsigned char myrs_get_fwstatus(struct myrs_hba *cs) { struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk; union myrs_cmd_mbox *mbox = &cmd_blk->mbox; union myrs_sgl *sgl; unsigned char status = cmd_blk->status; myrs_reset_cmd(cmd_blk); mbox->common.opcode = MYRS_CMD_OP_IOCTL; mbox->common.id = MYRS_MCMD_TAG; mbox->common.control.dma_ctrl_to_host = true; mbox->common.control.no_autosense = true; mbox->common.dma_size = sizeof(struct myrs_fwstat); mbox->common.ioctl_opcode = MYRS_IOCTL_GET_HEALTH_STATUS; sgl = &mbox->common.dma_addr; sgl->sge[0].sge_addr = cs->fwstat_addr; sgl->sge[0].sge_count = mbox->ctlr_info.dma_size; dev_dbg(&cs->host->shost_gendev, "Sending GetHealthStatus\n"); myrs_exec_cmd(cs, cmd_blk); status = cmd_blk->status; return status; } /* * myrs_enable_mmio_mbox - enables the Memory Mailbox Interface */ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs, enable_mbox_t enable_mbox_fn) { void __iomem *base = cs->io_base; struct pci_dev *pdev = cs->pdev; union myrs_cmd_mbox *cmd_mbox; struct myrs_stat_mbox *stat_mbox; union myrs_cmd_mbox *mbox; dma_addr_t mbox_addr; unsigned char status = MYRS_STATUS_FAILED; if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { dev_err(&pdev->dev, "DMA mask out of range\n"); return false; } /* Temporary dma mapping, used only in the scope of this function */ mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox), &mbox_addr, GFP_KERNEL); if (dma_mapping_error(&pdev->dev, mbox_addr)) return false; /* These are the base addresses for the command memory mailbox array */ cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox); cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size, &cs->cmd_mbox_addr, GFP_KERNEL); if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) { dev_err(&pdev->dev, "Failed to map command mailbox\n"); goto out_free; } cs->first_cmd_mbox = cmd_mbox; cmd_mbox += MYRS_MAX_CMD_MBOX - 1; cs->last_cmd_mbox = cmd_mbox; cs->next_cmd_mbox = cs->first_cmd_mbox; cs->prev_cmd_mbox1 = cs->last_cmd_mbox; cs->prev_cmd_mbox2 = cs->last_cmd_mbox - 1; /* These are the base addresses for the status memory mailbox array */ cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox); stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size, &cs->stat_mbox_addr, GFP_KERNEL); if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) { dev_err(&pdev->dev, "Failed to map status mailbox\n"); goto out_free; } cs->first_stat_mbox = stat_mbox; stat_mbox += MYRS_MAX_STAT_MBOX - 1; cs->last_stat_mbox = stat_mbox; cs->next_stat_mbox = cs->first_stat_mbox; cs->fwstat_buf = dma_alloc_coherent(&pdev->dev, sizeof(struct myrs_fwstat), &cs->fwstat_addr, GFP_KERNEL); if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) { dev_err(&pdev->dev, "Failed to map firmware health buffer\n"); cs->fwstat_buf = NULL; goto out_free; } cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info), GFP_KERNEL); if (!cs->ctlr_info) goto out_free; cs->event_buf = kzalloc(sizeof(struct myrs_event), GFP_KERNEL); if (!cs->event_buf) goto out_free; /* Enable the Memory Mailbox Interface. */ memset(mbox, 0, sizeof(union myrs_cmd_mbox)); mbox->set_mbox.id = 1; mbox->set_mbox.opcode = MYRS_CMD_OP_IOCTL; mbox->set_mbox.control.no_autosense = true; mbox->set_mbox.first_cmd_mbox_size_kb = (MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox)) >> 10; mbox->set_mbox.first_stat_mbox_size_kb = (MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox)) >> 10; mbox->set_mbox.second_cmd_mbox_size_kb = 0; mbox->set_mbox.second_stat_mbox_size_kb = 0; mbox->set_mbox.sense_len = 0; mbox->set_mbox.ioctl_opcode = MYRS_IOCTL_SET_MEM_MBOX; mbox->set_mbox.fwstat_buf_size_kb = 1; mbox->set_mbox.fwstat_buf_addr = cs->fwstat_addr; mbox->set_mbox.first_cmd_mbox_addr = cs->cmd_mbox_addr; mbox->set_mbox.first_stat_mbox_addr = cs->stat_mbox_addr; status = enable_mbox_fn(base, mbox_addr); out_free: dma_free_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox), mbox, mbox_addr); if (status != MYRS_STATUS_SUCCESS) dev_err(&pdev->dev, "Failed to enable mailbox, status %X\n", status); return (status == MYRS_STATUS_SUCCESS); } /* * myrs_get_config - reads the Configuration Information */ static int myrs_get_config(struct myrs_hba *cs) { struct myrs_ctlr_info *info = cs->ctlr_info; struct Scsi_Host *shost = cs->host; unsigned char status; unsigned char model[20]; unsigned char fw_version[12]; int i, model_len; /* Get data into dma-able area, then copy into permanent location */ mutex_lock(&cs->cinfo_mutex); status = myrs_get_ctlr_info(cs); mutex_unlock(&cs->cinfo_mutex); if (status != MYRS_STATUS_SUCCESS) { shost_printk(KERN_ERR, shost, "Failed to get controller information\n"); return -ENODEV; } /* Initialize the Controller Model Name and Full Model Name fields. */ model_len = sizeof(info->ctlr_name); if (model_len > sizeof(model)-1) model_len = sizeof(model)-1; memcpy(model, info->ctlr_name, model_len); model_len--; while (model[model_len] == ' ' || model[model_len] == '\0') model_len--; model[++model_len] = '\0'; strcpy(cs->model_name, "DAC960 "); strcat(cs->model_name, model); /* Initialize the Controller Firmware Version field. */ sprintf(fw_version, "%d.%02d-%02d", info->fw_major_version, info->fw_minor_version, info->fw_turn_number); if (info->fw_major_version == 6 && info->fw_minor_version == 0 && info->fw_turn_number < 1) { shost_printk(KERN_WARNING, shost, "FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n" "STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n" "PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n", fw_version); return -ENODEV; } /* Initialize the Controller Channels and Targets. */ shost->max_channel = info->physchan_present + info->virtchan_present; shost->max_id = info->max_targets[0]; for (i = 1; i < 16; i++) { if (!info->max_targets[i]) continue; if (shost->max_id < info->max_targets[i]) shost->max_id = info->max_targets[i]; } /* * Initialize the Controller Queue Depth, Driver Queue Depth, * Logical Drive Count, Maximum Blocks per Command, Controller * Scatter/Gather Limit, and Driver Scatter/Gather Limit. * The Driver Queue Depth must be at most three less than * the Controller Queue Depth; tag '1' is reserved for * direct commands, and tag '2' for monitoring commands. */ shost->can_queue = info->max_tcq - 3; if (shost->can_queue > MYRS_MAX_CMD_MBOX - 3) shost->can_queue = MYRS_MAX_CMD_MBOX - 3; shost->max_sectors = info->max_transfer_size; shost->sg_tablesize = info->max_sge; if (shost->sg_tablesize > MYRS_SG_LIMIT) shost->sg_tablesize = MYRS_SG_LIMIT; shost_printk(KERN_INFO, shost, "Configuring %s PCI RAID Controller\n", model); shost_printk(KERN_INFO, shost, " Firmware Version: %s, Channels: %d, Memory Size: %dMB\n", fw_version, info->physchan_present, info->mem_size_mb); shost_printk(KERN_INFO, shost, " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n", shost->can_queue, shost->max_sectors); shost_printk(KERN_INFO, shost, " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n", shost->can_queue, shost->sg_tablesize, MYRS_SG_LIMIT); for (i = 0; i < info->physchan_max; i++) { if (!info->max_targets[i]) continue; shost_printk(KERN_INFO, shost, " Device Channel %d: max %d devices\n", i, info->max_targets[i]); } shost_printk(KERN_INFO, shost, " Physical: %d/%d channels, %d disks, %d devices\n", info->physchan_present, info->physchan_max, info->pdisk_present, info->pdev_present); shost_printk(KERN_INFO, shost, " Logical: %d/%d channels, %d disks\n", info->virtchan_present, info->virtchan_max, info->ldev_present); return 0; } /* * myrs_log_event - prints a Controller Event message */ static struct { int ev_code; unsigned char *ev_msg; } myrs_ev_list[] = { /* Physical Device Events (0x0000 - 0x007F) */ { 0x0001, "P Online" }, { 0x0002, "P Standby" }, { 0x0005, "P Automatic Rebuild Started" }, { 0x0006, "P Manual Rebuild Started" }, { 0x0007, "P Rebuild Completed" }, { 0x0008, "P Rebuild Cancelled" }, { 0x0009, "P Rebuild Failed for Unknown Reasons" }, { 0x000A, "P Rebuild Failed due to New Physical Device" }, { 0x000B, "P Rebuild Failed due to Logical Drive Failure" }, { 0x000C, "S Offline" }, { 0x000D, "P Found" }, { 0x000E, "P Removed" }, { 0x000F, "P Unconfigured" }, { 0x0010, "P Expand Capacity Started" }, { 0x0011, "P Expand Capacity Completed" }, { 0x0012, "P Expand Capacity Failed" }, { 0x0013, "P Command Timed Out" }, { 0x0014, "P Command Aborted" }, { 0x0015, "P Command Retried" }, { 0x0016, "P Parity Error" }, { 0x0017, "P Soft Error" }, { 0x0018, "P Miscellaneous Error" }, { 0x0019, "P Reset" }, { 0x001A, "P Active Spare Found" }, { 0x001B, "P Warm Spare Found" }, { 0x001C, "S Sense Data Received" }, { 0x001D, "P Initialization Started" }, { 0x001E, "P Initialization Completed" }, { 0x001F, "P Initialization Failed" }, { 0x0020, "P Initialization Cancelled" }, { 0x0021, "P Failed because Write Recovery Failed" }, { 0x0022, "P Failed because SCSI Bus Reset Failed" }, { 0x0023, "P Failed because of Double Check Condition" }, { 0x0024, "P Failed because Device Cannot Be Accessed" }, { 0x0025, "P Failed because of Gross Error on SCSI Processor" }, { 0x0026, "P Failed because of Bad Tag from Device" }, { 0x0027, "P Failed because of Command Timeout" }, { 0x0028, "P Failed because of System Reset" }, { 0x0029, "P Failed because of Busy Status or Parity Error" }, { 0x002A, "P Failed because Host Set Device to Failed State" }, { 0x002B, "P Failed because of Selection Timeout" }, { 0x002C, "P Failed because of SCSI Bus Phase Error" }, { 0x002D, "P Failed because Device Returned Unknown Status" }, { 0x002E, "P Failed because Device Not Ready" }, { 0x002F, "P Failed because Device Not Found at Startup" }, { 0x0030, "P Failed because COD Write Operation Failed" }, { 0x0031, "P Failed because BDT Write Operation Failed" }, { 0x0039, "P Missing at Startup" }, { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" }, { 0x003C, "P Temporarily Offline Device Automatically Made Online" }, { 0x003D, "P Standby Rebuild Started" }, /* Logical Device Events (0x0080 - 0x00FF) */ { 0x0080, "M Consistency Check Started" }, { 0x0081, "M Consistency Check Completed" }, { 0x0082, "M Consistency Check Cancelled" }, { 0x0083, "M Consistency Check Completed With Errors" }, { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" }, { 0x0085, "M Consistency Check Failed due to Physical Device Failure" }, { 0x0086, "L Offline" }, { 0x0087, "L Critical" }, { 0x0088, "L Online" }, { 0x0089, "M Automatic Rebuild Started" }, { 0x008A, "M Manual Rebuild Started" }, { 0x008B, "M Rebuild Completed" }, { 0x008C, "M Rebuild Cancelled" }, { 0x008D, "M Rebuild Failed for Unknown Reasons" }, { 0x008E, "M Rebuild Failed due to New Physical Device" }, { 0x008F, "M Rebuild Failed due to Logical Drive Failure" }, { 0x0090, "M Initialization Started" }, { 0x0091, "M Initialization Completed" }, { 0x0092, "M Initialization Cancelled" }, { 0x0093, "M Initialization Failed" }, { 0x0094, "L Found" }, { 0x0095, "L Deleted" }, { 0x0096, "M Expand Capacity Started" }, { 0x0097, "M Expand Capacity Completed" }, { 0x0098, "M Expand Capacity Failed" }, { 0x0099, "L Bad Block Found" }, { 0x009A, "L Size Changed" }, { 0x009B, "L Type Changed" }, { 0x009C, "L Bad Data Block Found" }, { 0x009E, "L Read of Data Block in BDT" }, { 0x009F, "L Write Back Data for Disk Block Lost" }, { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" }, { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" }, { 0x00A2, "L Standby Rebuild Started" }, /* Fault Management Events (0x0100 - 0x017F) */ { 0x0140, "E Fan %d Failed" }, { 0x0141, "E Fan %d OK" }, { 0x0142, "E Fan %d Not Present" }, { 0x0143, "E Power Supply %d Failed" }, { 0x0144, "E Power Supply %d OK" }, { 0x0145, "E Power Supply %d Not Present" }, { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" }, { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" }, { 0x0148, "E Temperature Sensor %d Temperature Normal" }, { 0x0149, "E Temperature Sensor %d Not Present" }, { 0x014A, "E Enclosure Management Unit %d Access Critical" }, { 0x014B, "E Enclosure Management Unit %d Access OK" }, { 0x014C, "E Enclosure Management Unit %d Access Offline" }, /* Controller Events (0x0180 - 0x01FF) */ { 0x0181, "C Cache Write Back Error" }, { 0x0188, "C Battery Backup Unit Found" }, { 0x0189, "C Battery Backup Unit Charge Level Low" }, { 0x018A, "C Battery Backup Unit Charge Level OK" }, { 0x0193, "C Installation Aborted" }, { 0x0195, "C Battery Backup Unit Physically Removed" }, { 0x0196, "C Memory Error During Warm Boot" }, { 0x019E, "C Memory Soft ECC Error Corrected" }, { 0x019F, "C Memory Hard ECC Error Corrected" }, { 0x01A2, "C Battery Backup Unit Failed" }, { 0x01AB, "C Mirror Race Recovery Failed" }, { 0x01AC, "C Mirror Race on Critical Drive" }, /* Controller Internal Processor Events */ { 0x0380, "C Internal Controller Hung" }, { 0x0381, "C Internal Controller Firmware Breakpoint" }, { 0x0390, "C Internal Controller i960 Processor Specific Error" }, { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" }, { 0, "" } }; static void myrs_log_event(struct myrs_hba *cs, struct myrs_event *ev) { unsigned char msg_buf[MYRS_LINE_BUFFER_SIZE]; int ev_idx = 0, ev_code; unsigned char ev_type, *ev_msg; struct Scsi_Host *shost = cs->host; struct scsi_device *sdev; struct scsi_sense_hdr sshdr = {0}; unsigned char sense_info[4]; unsigned char cmd_specific[4]; if (ev->ev_code == 0x1C) { if (!scsi_normalize_sense(ev->sense_data, 40, &sshdr)) { memset(&sshdr, 0x0, sizeof(sshdr)); memset(sense_info, 0x0, sizeof(sense_info)); memset(cmd_specific, 0x0, sizeof(cmd_specific)); } else { memcpy(sense_info, &ev->sense_data[3], 4); memcpy(cmd_specific, &ev->sense_data[7], 4); } } if (sshdr.sense_key == VENDOR_SPECIFIC && (sshdr.asc == 0x80 || sshdr.asc == 0x81)) ev->ev_code = ((sshdr.asc - 0x80) << 8 | sshdr.ascq); while (true) { ev_code = myrs_ev_list[ev_idx].ev_code; if (ev_code == ev->ev_code || ev_code == 0) break; ev_idx++; } ev_type = myrs_ev_list[ev_idx].ev_msg[0]; ev_msg = &myrs_ev_list[ev_idx].ev_msg[2]; if (ev_code == 0) { shost_printk(KERN_WARNING, shost, "Unknown Controller Event Code %04X\n", ev->ev_code); return; } switch (ev_type) { case 'P': sdev = scsi_device_lookup(shost, ev->channel, ev->target, 0); sdev_printk(KERN_INFO, sdev, "event %d: Physical Device %s\n", ev->ev_seq, ev_msg); if (sdev && sdev->hostdata && sdev->channel < cs->ctlr_info->physchan_present) { struct myrs_pdev_info *pdev_info = sdev->hostdata; switch (ev->ev_code) { case 0x0001: case 0x0007: pdev_info->dev_state = MYRS_DEVICE_ONLINE; break; case 0x0002: pdev_info->dev_state = MYRS_DEVICE_STANDBY; break; case 0x000C: pdev_info->dev_state = MYRS_DEVICE_OFFLINE; break; case 0x000E: pdev_info->dev_state = MYRS_DEVICE_MISSING; break; case 0x000F: pdev_info->dev_state = MYRS_DEVICE_UNCONFIGURED; break; } } break; case 'L': shost_printk(KERN_INFO, shost, "event %d: Logical Drive %d %s\n", ev->ev_seq, ev->lun, ev_msg); cs->needs_update = true; break; case 'M': shost_printk(KERN_INFO, shost, "event %d: Logical Drive %d %s\n", ev->ev_seq, ev->lun, ev_msg); cs->needs_update = true; break; case 'S': if (sshdr.sense_key == NO_SENSE || (sshdr.sense_key == NOT_READY && sshdr.asc == 0x04 && (sshdr.ascq == 0x01 || sshdr.ascq == 0x02))) break; shost_printk(KERN_INFO, shost, "event %d: Physical Device %d:%d %s\n", ev->ev_seq, ev->channel, ev->target, ev_msg); shost_printk(KERN_INFO, shost, "Physical Device %d:%d Sense Key = %X, ASC = %02X, ASCQ = %02X\n", ev->channel, ev->target, sshdr.sense_key, sshdr.asc, sshdr.ascq); shost_printk(KERN_INFO, shost, "Physical Device %d:%d Sense Information = %02X%02X%02X%02X %02X%02X%02X%02X\n", ev->channel, ev->target, sense_info[0], sense_info[1], sense_info[2], sense_info[3], cmd_specific[0], cmd_specific[1], cmd_specific[2], cmd_specific[3]); break; case 'E': if (cs->disable_enc_msg) break; sprintf(msg_buf, ev_msg, ev->lun); shost_printk(KERN_INFO, shost, "event %d: Enclosure %d %s\n", ev->ev_seq, ev->target, msg_buf); break; case 'C': shost_printk(KERN_INFO, shost, "event %d: Controller %s\n", ev->ev_seq, ev_msg); break; default: shost_printk(KERN_INFO, shost, "event %d: Unknown Event Code %04X\n", ev->ev_seq, ev->ev_code); break; } } /* * SCSI sysfs interface functions */ static ssize_t raid_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct myrs_hba *cs = shost_priv(sdev->host); int ret; if (!sdev->hostdata) return snprintf(buf, 16, "Unknown\n"); if (sdev->channel >= cs->ctlr_info->physchan_present) { struct myrs_ldev_info *ldev_info = sdev->hostdata; const char *name; name = myrs_devstate_name(ldev_info->dev_state); if (name) ret = snprintf(buf, 32, "%s\n", name); else ret = snprintf(buf, 32, "Invalid (%02X)\n", ldev_info->dev_state); } else { struct myrs_pdev_info *pdev_info; const char *name; pdev_info = sdev->hostdata; name = myrs_devstate_name(pdev_info->dev_state); if (name) ret = snprintf(buf, 32, "%s\n", name); else ret = snprintf(buf, 32, "Invalid (%02X)\n", pdev_info->dev_state); } return ret; } static ssize_t raid_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); struct myrs_hba *cs = shost_priv(sdev->host); struct myrs_cmdblk *cmd_blk; union myrs_cmd_mbox *mbox; enum myrs_devstate new_state; unsigned short ldev_num; unsigned char status; if (!strncmp(buf, "offline", 7) || !strncmp(buf, "kill", 4)) new_state = MYRS_DEVICE_OFFLINE; else if (!strncmp(buf, "online", 6)) new_state = MYRS_DEVICE_ONLINE; else if (!strncmp(buf, "standby", 7)) new_state = MYRS_DEVICE_STANDBY; else return -EINVAL; if (sdev->channel < cs->ctlr_info->physchan_present) { struct myrs_pdev_info *pdev_info = sdev->hostdata; struct myrs_devmap *pdev_devmap = (struct myrs_devmap *)&pdev_info->rsvd13; if (pdev_info->dev_state == new_state) { sdev_printk(KERN_INFO, sdev, "Device already in %s\n", myrs_devstate_name(new_state)); return count; } status = myrs_translate_pdev(cs, sdev->channel, sdev->id, sdev->lun, pdev_devmap); if (status != MYRS_STATUS_SUCCESS) return -ENXIO; ldev_num = pdev_devmap->ldev_num; } else { struct myrs_ldev_info *ldev_info = sdev->hostdata; if (ldev_info->dev_state == new_state) { sdev_printk(KERN_INFO, sdev, "Device already in %s\n", myrs_devstate_name(new_state)); return count; } ldev_num = ldev_info->ldev_num; } mutex_lock(&cs->dcmd_mutex); cmd_blk = &cs->dcmd_blk; myrs_reset_cmd(cmd_blk); mbox = &cmd_blk->mbox; mbox->common.opcode = MYRS_CMD_OP_IOCTL; mbox->common.id = MYRS_DCMD_TAG; mbox->common.control.dma_ctrl_to_host = true; mbox->common.control.no_autosense = true; mbox->set_devstate.ioctl_opcode = MYRS_IOCTL_SET_DEVICE_STATE; mbox->set_devstate.state = new_state; mbox->set_devstate.ldev.ldev_num = ldev_num; myrs_exec_cmd(cs, cmd_blk); status = cmd_blk->status; mutex_unlock(&cs->dcmd_mutex); if (status == MYRS_STATUS_SUCCESS) { if (sdev->channel < cs->ctlr_info->physchan_present) { struct myrs_pdev_info *pdev_info = sdev->hostdata; pdev_info->dev_state = new_state; } else { struct myrs_ldev_info *ldev_info = sdev->hostdata; ldev_info->dev_state = new_state; } sdev_printk(KERN_INFO, sdev, "Set device state to %s\n", myrs_devstate_name(new_state)); return count; } sdev_printk(KERN_INFO, sdev, "Failed to set device state to %s, status 0x%02x\n", myrs_devstate_name(new_state), status); return -EINVAL; } static DEVICE_ATTR_RW(raid_state); static ssize_t raid_level_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct myrs_hba *cs = shost_priv(sdev->host); const char *name = NULL; if (!sdev->hostdata) return snprintf(buf, 16, "Unknown\n"); if (sdev->channel >= cs->ctlr_info->physchan_present) { struct myrs_ldev_info *ldev_info; ldev_info = sdev->hostdata; name = myrs_raid_level_name(ldev_info->raid_level); if (!name) return snprintf(buf, 32, "Invalid (%02X)\n", ldev_info->dev_state); } else name = myrs_raid_level_name(MYRS_RAID_PHYSICAL); return snprintf(buf, 32, "%s\n", name); } static DEVICE_ATTR_RO(raid_level); static ssize_t rebuild_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct myrs_hba *cs = shost_priv(sdev->host); struct myrs_ldev_info *ldev_info; unsigned short ldev_num; unsigned char status; if (sdev->channel < cs->ctlr_info->physchan_present) return snprintf(buf, 32, "physical device - not rebuilding\n"); ldev_info = sdev->hostdata; ldev_num = ldev_info->ldev_num; status = myrs_get_ldev_info(cs, ldev_num, ldev_info); if (status != MYRS_STATUS_SUCCESS) { sdev_printk(KERN_INFO, sdev, "Failed to get device information, status 0x%02x\n", status); return -EIO; } if (ldev_info->rbld_active) { return snprintf(buf, 32, "rebuilding block %zu of %zu\n", (size_t)ldev_info->rbld_lba, (size_t)ldev_info->cfg_devsize); } else return snprintf(buf, 32, "not rebuilding\n"); } static ssize_t rebuild_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); struct myrs_hba *cs = shost_priv(sdev->host); struct myrs_ldev_info *ldev_info; struct myrs_cmdblk *cmd_blk; union myrs_cmd_mbox *mbox; unsigned short ldev_num; unsigned char status; int rebuild, ret; if (sdev->channel < cs->ctlr_info->physchan_present) return -EINVAL; ldev_info = sdev->hostdata; if (!ldev_info) return -ENXIO; ldev_num = ldev_info->ldev_num; ret = kstrtoint(buf, 0, &rebuild); if (ret) return ret; status = myrs_get_ldev_info(cs, ldev_num, ldev_info); if (status != MYRS_STATUS_SUCCESS) { sdev_printk(KERN_INFO, sdev, "Failed to get device information, status 0x%02x\n", status); return -EIO; } if (rebuild && ldev_info->rbld_active) { sdev_printk(KERN_INFO, sdev, "Rebuild Not Initiated; already in progress\n"); return -EALREADY; } if (!rebuild && !ldev_info->rbld_active) { sdev_printk(KERN_INFO, sdev, "Rebuild Not Cancelled; no rebuild in progress\n"); return count; } mutex_lock(&cs->dcmd_mutex); cmd_blk = &cs->dcmd_blk; myrs_reset_cmd(cmd_blk); mbox = &cmd_blk->mbox; mbox->common.opcode = MYRS_CMD_OP_IOCTL; mbox->common.id = MYRS_DCMD_TAG; mbox->common.control.dma_ctrl_to_host = true; mbox->common.control.no_autosense = true; if (rebuild) { mbox->ldev_info.ldev.ldev_num = ldev_num; mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_START; } else { mbox->ldev_info.ldev.ldev_num = ldev_num; mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_STOP; } myrs_exec_cmd(cs, cmd_blk); status = cmd_blk->status; mutex_unlock(&cs->dcmd_mutex); if (status) { sdev_printk(KERN_INFO, sdev, "Rebuild Not %s, status 0x%02x\n", rebuild ? "Initiated" : "Cancelled", status); ret = -EIO; } else { sdev_printk(KERN_INFO, sdev, "Rebuild %s\n", rebuild ? "Initiated" : "Cancelled"); ret = count; } return ret; } static DEVICE_ATTR_RW(rebuild); static ssize_t consistency_check_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct myrs_hba *cs = shost_priv(sdev->host); struct myrs_ldev_info *ldev_info; unsigned short ldev_num; if (sdev->channel < cs->ctlr_info->physchan_present) return snprintf(buf, 32, "physical device - not checking\n"); ldev_info = sdev->hostdata; if (!ldev_info) return -ENXIO; ldev_num = ldev_info->ldev_num; myrs_get_ldev_info(cs, ldev_num, ldev_info); if (ldev_info->cc_active) return snprintf(buf, 32, "checking block %zu of %zu\n", (size_t)ldev_info->cc_lba, (size_t)ldev_info->cfg_devsize); else return snprintf(buf, 32, "not checking\n"); } static ssize_t consistency_check_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); struct myrs_hba *cs = shost_priv(sdev->host); struct myrs_ldev_info *ldev_info; struct myrs_cmdblk *cmd_blk; union myrs_cmd_mbox *mbox; unsigned short ldev_num; unsigned char status; int check, ret; if (sdev->channel < cs->ctlr_info->physchan_present) return -EINVAL; ldev_info = sdev->hostdata; if (!ldev_info) return -ENXIO; ldev_num = ldev_info->ldev_num; ret = kstrtoint(buf, 0, &check); if (ret) return ret; status = myrs_get_ldev_info(cs, ldev_num, ldev_info); if (status != MYRS_STATUS_SUCCESS) { sdev_printk(KERN_INFO, sdev, "Failed to get device information, status 0x%02x\n", status); return -EIO; } if (check && ldev_info->cc_active) { sdev_printk(KERN_INFO, sdev, "Consistency Check Not Initiated; " "already in progress\n"); return -EALREADY; } if (!check && !ldev_info->cc_active) { sdev_printk(KERN_INFO, sdev, "Consistency Check Not Cancelled; " "check not in progress\n"); return count; } mutex_lock(&cs->dcmd_mutex); cmd_blk = &cs->dcmd_blk; myrs_reset_cmd(cmd_blk); mbox = &cmd_blk->mbox; mbox->common.opcode = MYRS_CMD_OP_IOCTL; mbox->common.id = MYRS_DCMD_TAG; mbox->common.control.dma_ctrl_to_host = true; mbox->common.control.no_autosense = true; if (check) { mbox->cc.ldev.ldev_num = ldev_num; mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_START; mbox->cc.restore_consistency = true; mbox->cc.initialized_area_only = false; } else { mbox->cc.ldev.ldev_num = ldev_num; mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_STOP; } myrs_exec_cmd(cs, cmd_blk); status = cmd_blk->status; mutex_unlock(&cs->dcmd_mutex); if (status != MYRS_STATUS_SUCCESS) { sdev_printk(KERN_INFO, sdev, "Consistency Check Not %s, status 0x%02x\n", check ? "Initiated" : "Cancelled", status); ret = -EIO; } else { sdev_printk(KERN_INFO, sdev, "Consistency Check %s\n", check ? "Initiated" : "Cancelled"); ret = count; } return ret; } static DEVICE_ATTR_RW(consistency_check); static struct attribute *myrs_sdev_attrs[] = { &dev_attr_consistency_check.attr, &dev_attr_rebuild.attr, &dev_attr_raid_state.attr, &dev_attr_raid_level.attr, NULL, }; ATTRIBUTE_GROUPS(myrs_sdev); static ssize_t serial_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct myrs_hba *cs = shost_priv(shost); char serial[17]; memcpy(serial, cs->ctlr_info->serial_number, 16); serial[16] = '\0'; return snprintf(buf, 16, "%s\n", serial); } static DEVICE_ATTR_RO(serial); static ssize_t ctlr_num_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct myrs_hba *cs = shost_priv(shost); return snprintf(buf, 20, "%d\n", cs->host->host_no); } static DEVICE_ATTR_RO(ctlr_num); static struct myrs_cpu_type_tbl { enum myrs_cpu_type type; char *name; } myrs_cpu_type_names[] = { { MYRS_CPUTYPE_i960CA, "i960CA" }, { MYRS_CPUTYPE_i960RD, "i960RD" }, { MYRS_CPUTYPE_i960RN, "i960RN" }, { MYRS_CPUTYPE_i960RP, "i960RP" }, { MYRS_CPUTYPE_NorthBay, "NorthBay" }, { MYRS_CPUTYPE_StrongArm, "StrongARM" }, { MYRS_CPUTYPE_i960RM, "i960RM" }, }; static ssize_t processor_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct myrs_hba *cs = shost_priv(shost); struct myrs_cpu_type_tbl *tbl; const char *first_processor = NULL; const char *second_processor = NULL; struct myrs_ctlr_info *info = cs->ctlr_info; ssize_t ret; int i; if (info->cpu[0].cpu_count) { tbl = myrs_cpu_type_names; for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) { if (tbl[i].type == info->cpu[0].cpu_type) { first_processor = tbl[i].name; break; } } } if (info->cpu[1].cpu_count) { tbl = myrs_cpu_type_names; for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) { if (tbl[i].type == info->cpu[1].cpu_type) { second_processor = tbl[i].name; break; } } } if (first_processor && second_processor) ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n" "2: %s (%s, %d cpus)\n", info->cpu[0].cpu_name, first_processor, info->cpu[0].cpu_count, info->cpu[1].cpu_name, second_processor, info->cpu[1].cpu_count); else if (first_processor && !second_processor) ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n2: absent\n", info->cpu[0].cpu_name, first_processor, info->cpu[0].cpu_count); else if (!first_processor && second_processor) ret = snprintf(buf, 64, "1: absent\n2: %s (%s, %d cpus)\n", info->cpu[1].cpu_name, second_processor, info->cpu[1].cpu_count); else ret = snprintf(buf, 64, "1: absent\n2: absent\n"); return ret; } static DEVICE_ATTR_RO(processor); static ssize_t model_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct myrs_hba *cs = shost_priv(shost); return snprintf(buf, 28, "%s\n", cs->model_name); } static DEVICE_ATTR_RO(model); static ssize_t ctlr_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct myrs_hba *cs = shost_priv(shost); return snprintf(buf, 4, "%d\n", cs->ctlr_info->ctlr_type); } static DEVICE_ATTR_RO(ctlr_type); static ssize_t cache_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct myrs_hba *cs = shost_priv(shost); return snprintf(buf, 8, "%d MB\n", cs->ctlr_info->cache_size_mb); } static DEVICE_ATTR_RO(cache_size); static ssize_t firmware_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct myrs_hba *cs = shost_priv(shost); return snprintf(buf, 16, "%d.%02d-%02d\n", cs->ctlr_info->fw_major_version, cs->ctlr_info->fw_minor_version, cs->ctlr_info->fw_turn_number); } static DEVICE_ATTR_RO(firmware); static ssize_t discovery_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct myrs_hba *cs = shost_priv(shost); struct myrs_cmdblk *cmd_blk; union myrs_cmd_mbox *mbox; unsigned char status; mutex_lock(&cs->dcmd_mutex); cmd_blk = &cs->dcmd_blk; myrs_reset_cmd(cmd_blk); mbox = &cmd_blk->mbox; mbox->common.opcode = MYRS_CMD_OP_IOCTL; mbox->common.id = MYRS_DCMD_TAG; mbox->common.control.dma_ctrl_to_host = true; mbox->common.control.no_autosense = true; mbox->common.ioctl_opcode = MYRS_IOCTL_START_DISCOVERY; myrs_exec_cmd(cs, cmd_blk); status = cmd_blk->status; mutex_unlock(&cs->dcmd_mutex); if (status != MYRS_STATUS_SUCCESS) { shost_printk(KERN_INFO, shost, "Discovery Not Initiated, status %02X\n", status); return -EINVAL; } shost_printk(KERN_INFO, shost, "Discovery Initiated\n"); cs->next_evseq = 0; cs->needs_update = true; queue_delayed_work(cs->work_q, &cs->monitor_work, 1); flush_delayed_work(&cs->monitor_work); shost_printk(KERN_INFO, shost, "Discovery Completed\n"); return count; } static DEVICE_ATTR_WO(discovery); static ssize_t flush_cache_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct myrs_hba *cs = shost_priv(shost); unsigned char status; status = myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER); if (status == MYRS_STATUS_SUCCESS) { shost_printk(KERN_INFO, shost, "Cache Flush Completed\n"); return count; } shost_printk(KERN_INFO, shost, "Cache Flush failed, status 0x%02x\n", status); return -EIO; } static DEVICE_ATTR_WO(flush_cache); static ssize_t disable_enclosure_messages_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct myrs_hba *cs = shost_priv(shost); return snprintf(buf, 3, "%d\n", cs->disable_enc_msg); } static ssize_t disable_enclosure_messages_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); struct myrs_hba *cs = shost_priv(sdev->host); int value, ret; ret = kstrtoint(buf, 0, &value); if (ret) return ret; if (value > 2) return -EINVAL; cs->disable_enc_msg = value; return count; } static DEVICE_ATTR_RW(disable_enclosure_messages); static struct attribute *myrs_shost_attrs[] = { &dev_attr_serial.attr, &dev_attr_ctlr_num.attr, &dev_attr_processor.attr, &dev_attr_model.attr, &dev_attr_ctlr_type.attr, &dev_attr_cache_size.attr, &dev_attr_firmware.attr, &dev_attr_discovery.attr, &dev_attr_flush_cache.attr, &dev_attr_disable_enclosure_messages.attr, NULL, }; ATTRIBUTE_GROUPS(myrs_shost); /* * SCSI midlayer interface */ static int myrs_host_reset(struct scsi_cmnd *scmd) { struct Scsi_Host *shost = scmd->device->host; struct myrs_hba *cs = shost_priv(shost); cs->reset(cs->io_base); return SUCCESS; } static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd, struct myrs_ldev_info *ldev_info) { unsigned char modes[32], *mode_pg; bool dbd; size_t mode_len; dbd = (scmd->cmnd[1] & 0x08) == 0x08; if (dbd) { mode_len = 24; mode_pg = &modes[4]; } else { mode_len = 32; mode_pg = &modes[12]; } memset(modes, 0, sizeof(modes)); modes[0] = mode_len - 1; modes[2] = 0x10; /* Enable FUA */ if (ldev_info->ldev_control.wce == MYRS_LOGICALDEVICE_RO) modes[2] |= 0x80; if (!dbd) { unsigned char *block_desc = &modes[4]; modes[3] = 8; put_unaligned_be32(ldev_info->cfg_devsize, &block_desc[0]); put_unaligned_be32(ldev_info->devsize_bytes, &block_desc[5]); } mode_pg[0] = 0x08; mode_pg[1] = 0x12; if (ldev_info->ldev_control.rce == MYRS_READCACHE_DISABLED) mode_pg[2] |= 0x01; if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED || ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED) mode_pg[2] |= 0x04; if (ldev_info->cacheline_size) { mode_pg[2] |= 0x08; put_unaligned_be16(1 << ldev_info->cacheline_size, &mode_pg[14]); } scsi_sg_copy_from_buffer(scmd, modes, mode_len); } static int myrs_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmd) { struct request *rq = scsi_cmd_to_rq(scmd); struct myrs_hba *cs = shost_priv(shost); struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd); union myrs_cmd_mbox *mbox = &cmd_blk->mbox; struct scsi_device *sdev = scmd->device; union myrs_sgl *hw_sge; dma_addr_t sense_addr; struct scatterlist *sgl; unsigned long flags, timeout; int nsge; if (!scmd->device->hostdata) { scmd->result = (DID_NO_CONNECT << 16); scsi_done(scmd); return 0; } switch (scmd->cmnd[0]) { case REPORT_LUNS: scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0x0); scsi_done(scmd); return 0; case MODE_SENSE: if (scmd->device->channel >= cs->ctlr_info->physchan_present) { struct myrs_ldev_info *ldev_info = sdev->hostdata; if ((scmd->cmnd[2] & 0x3F) != 0x3F && (scmd->cmnd[2] & 0x3F) != 0x08) { /* Illegal request, invalid field in CDB */ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0); } else { myrs_mode_sense(cs, scmd, ldev_info); scmd->result = (DID_OK << 16); } scsi_done(scmd); return 0; } break; } myrs_reset_cmd(cmd_blk); cmd_blk->sense = dma_pool_alloc(cs->sense_pool, GFP_ATOMIC, &sense_addr); if (!cmd_blk->sense) return SCSI_MLQUEUE_HOST_BUSY; cmd_blk->sense_addr = sense_addr; timeout = rq->timeout; if (scmd->cmd_len <= 10) { if (scmd->device->channel >= cs->ctlr_info->physchan_present) { struct myrs_ldev_info *ldev_info = sdev->hostdata; mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10; mbox->SCSI_10.pdev.lun = ldev_info->lun; mbox->SCSI_10.pdev.target = ldev_info->target; mbox->SCSI_10.pdev.channel = ldev_info->channel; mbox->SCSI_10.pdev.ctlr = 0; } else { mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10_PASSTHRU; mbox->SCSI_10.pdev.lun = sdev->lun; mbox->SCSI_10.pdev.target = sdev->id; mbox->SCSI_10.pdev.channel = sdev->channel; } mbox->SCSI_10.id = rq->tag + 3; mbox->SCSI_10.control.dma_ctrl_to_host = (scmd->sc_data_direction == DMA_FROM_DEVICE); if (rq->cmd_flags & REQ_FUA) mbox->SCSI_10.control.fua = true; mbox->SCSI_10.dma_size = scsi_bufflen(scmd); mbox->SCSI_10.sense_addr = cmd_blk->sense_addr; mbox->SCSI_10.sense_len = MYRS_SENSE_SIZE; mbox->SCSI_10.cdb_len = scmd->cmd_len; if (timeout > 60) { mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES; mbox->SCSI_10.tmo.tmo_val = timeout / 60; } else { mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS; mbox->SCSI_10.tmo.tmo_val = timeout; } memcpy(&mbox->SCSI_10.cdb, scmd->cmnd, scmd->cmd_len); hw_sge = &mbox->SCSI_10.dma_addr; cmd_blk->dcdb = NULL; } else { dma_addr_t dcdb_dma; cmd_blk->dcdb = dma_pool_alloc(cs->dcdb_pool, GFP_ATOMIC, &dcdb_dma); if (!cmd_blk->dcdb) { dma_pool_free(cs->sense_pool, cmd_blk->sense, cmd_blk->sense_addr); cmd_blk->sense = NULL; cmd_blk->sense_addr = 0; return SCSI_MLQUEUE_HOST_BUSY; } cmd_blk->dcdb_dma = dcdb_dma; if (scmd->device->channel >= cs->ctlr_info->physchan_present) { struct myrs_ldev_info *ldev_info = sdev->hostdata; mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_256; mbox->SCSI_255.pdev.lun = ldev_info->lun; mbox->SCSI_255.pdev.target = ldev_info->target; mbox->SCSI_255.pdev.channel = ldev_info->channel; mbox->SCSI_255.pdev.ctlr = 0; } else { mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_255_PASSTHRU; mbox->SCSI_255.pdev.lun = sdev->lun; mbox->SCSI_255.pdev.target = sdev->id; mbox->SCSI_255.pdev.channel = sdev->channel; } mbox->SCSI_255.id = rq->tag + 3; mbox->SCSI_255.control.dma_ctrl_to_host = (scmd->sc_data_direction == DMA_FROM_DEVICE); if (rq->cmd_flags & REQ_FUA) mbox->SCSI_255.control.fua = true; mbox->SCSI_255.dma_size = scsi_bufflen(scmd); mbox->SCSI_255.sense_addr = cmd_blk->sense_addr; mbox->SCSI_255.sense_len = MYRS_SENSE_SIZE; mbox->SCSI_255.cdb_len = scmd->cmd_len; mbox->SCSI_255.cdb_addr = cmd_blk->dcdb_dma; if (timeout > 60) { mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES; mbox->SCSI_255.tmo.tmo_val = timeout / 60; } else { mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS; mbox->SCSI_255.tmo.tmo_val = timeout; } memcpy(cmd_blk->dcdb, scmd->cmnd, scmd->cmd_len); hw_sge = &mbox->SCSI_255.dma_addr; } if (scmd->sc_data_direction == DMA_NONE) goto submit; nsge = scsi_dma_map(scmd); if (nsge == 1) { sgl = scsi_sglist(scmd); hw_sge->sge[0].sge_addr = (u64)sg_dma_address(sgl); hw_sge->sge[0].sge_count = (u64)sg_dma_len(sgl); } else { struct myrs_sge *hw_sgl; dma_addr_t hw_sgl_addr; int i; if (nsge > 2) { hw_sgl = dma_pool_alloc(cs->sg_pool, GFP_ATOMIC, &hw_sgl_addr); if (WARN_ON(!hw_sgl)) { if (cmd_blk->dcdb) { dma_pool_free(cs->dcdb_pool, cmd_blk->dcdb, cmd_blk->dcdb_dma); cmd_blk->dcdb = NULL; cmd_blk->dcdb_dma = 0; } dma_pool_free(cs->sense_pool, cmd_blk->sense, cmd_blk->sense_addr); cmd_blk->sense = NULL; cmd_blk->sense_addr = 0; return SCSI_MLQUEUE_HOST_BUSY; } cmd_blk->sgl = hw_sgl; cmd_blk->sgl_addr = hw_sgl_addr; if (scmd->cmd_len <= 10) mbox->SCSI_10.control.add_sge_mem = true; else mbox->SCSI_255.control.add_sge_mem = true; hw_sge->ext.sge0_len = nsge; hw_sge->ext.sge0_addr = cmd_blk->sgl_addr; } else hw_sgl = hw_sge->sge; scsi_for_each_sg(scmd, sgl, nsge, i) { if (WARN_ON(!hw_sgl)) { scsi_dma_unmap(scmd); scmd->result = (DID_ERROR << 16); scsi_done(scmd); return 0; } hw_sgl->sge_addr = (u64)sg_dma_address(sgl); hw_sgl->sge_count = (u64)sg_dma_len(sgl); hw_sgl++; } } submit: spin_lock_irqsave(&cs->queue_lock, flags); myrs_qcmd(cs, cmd_blk); spin_unlock_irqrestore(&cs->queue_lock, flags); return 0; } static unsigned short myrs_translate_ldev(struct myrs_hba *cs, struct scsi_device *sdev) { unsigned short ldev_num; unsigned int chan_offset = sdev->channel - cs->ctlr_info->physchan_present; ldev_num = sdev->id + chan_offset * sdev->host->max_id; return ldev_num; } static int myrs_slave_alloc(struct scsi_device *sdev) { struct myrs_hba *cs = shost_priv(sdev->host); unsigned char status; if (sdev->channel > sdev->host->max_channel) return 0; if (sdev->channel >= cs->ctlr_info->physchan_present) { struct myrs_ldev_info *ldev_info; unsigned short ldev_num; if (sdev->lun > 0) return -ENXIO; ldev_num = myrs_translate_ldev(cs, sdev); ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL); if (!ldev_info) return -ENOMEM; status = myrs_get_ldev_info(cs, ldev_num, ldev_info); if (status != MYRS_STATUS_SUCCESS) { sdev->hostdata = NULL; kfree(ldev_info); } else { enum raid_level level; dev_dbg(&sdev->sdev_gendev, "Logical device mapping %d:%d:%d -> %d\n", ldev_info->channel, ldev_info->target, ldev_info->lun, ldev_info->ldev_num); sdev->hostdata = ldev_info; switch (ldev_info->raid_level) { case MYRS_RAID_LEVEL0: level = RAID_LEVEL_LINEAR; break; case MYRS_RAID_LEVEL1: level = RAID_LEVEL_1; break; case MYRS_RAID_LEVEL3: case MYRS_RAID_LEVEL3F: case MYRS_RAID_LEVEL3L: level = RAID_LEVEL_3; break; case MYRS_RAID_LEVEL5: case MYRS_RAID_LEVEL5L: level = RAID_LEVEL_5; break; case MYRS_RAID_LEVEL6: level = RAID_LEVEL_6; break; case MYRS_RAID_LEVELE: case MYRS_RAID_NEWSPAN: case MYRS_RAID_SPAN: level = RAID_LEVEL_LINEAR; break; case MYRS_RAID_JBOD: level = RAID_LEVEL_JBOD; break; default: level = RAID_LEVEL_UNKNOWN; break; } raid_set_level(myrs_raid_template, &sdev->sdev_gendev, level); if (ldev_info->dev_state != MYRS_DEVICE_ONLINE) { const char *name; name = myrs_devstate_name(ldev_info->dev_state); sdev_printk(KERN_DEBUG, sdev, "logical device in state %s\n", name ? name : "Invalid"); } } } else { struct myrs_pdev_info *pdev_info; pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL); if (!pdev_info) return -ENOMEM; status = myrs_get_pdev_info(cs, sdev->channel, sdev->id, sdev->lun, pdev_info); if (status != MYRS_STATUS_SUCCESS) { sdev->hostdata = NULL; kfree(pdev_info); return -ENXIO; } sdev->hostdata = pdev_info; } return 0; } static int myrs_slave_configure(struct scsi_device *sdev) { struct myrs_hba *cs = shost_priv(sdev->host); struct myrs_ldev_info *ldev_info; if (sdev->channel > sdev->host->max_channel) return -ENXIO; if (sdev->channel < cs->ctlr_info->physchan_present) { /* Skip HBA device */ if (sdev->type == TYPE_RAID) return -ENXIO; sdev->no_uld_attach = 1; return 0; } if (sdev->lun != 0) return -ENXIO; ldev_info = sdev->hostdata; if (!ldev_info) return -ENXIO; if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED || ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED) sdev->wce_default_on = 1; sdev->tagged_supported = 1; return 0; } static void myrs_slave_destroy(struct scsi_device *sdev) { kfree(sdev->hostdata); } static const struct scsi_host_template myrs_template = { .module = THIS_MODULE, .name = "DAC960", .proc_name = "myrs", .queuecommand = myrs_queuecommand, .eh_host_reset_handler = myrs_host_reset, .slave_alloc = myrs_slave_alloc, .slave_configure = myrs_slave_configure, .slave_destroy = myrs_slave_destroy, .cmd_size = sizeof(struct myrs_cmdblk), .shost_groups = myrs_shost_groups, .sdev_groups = myrs_sdev_groups, .this_id = -1, }; static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev, const struct pci_device_id *entry) { struct Scsi_Host *shost; struct myrs_hba *cs; shost = scsi_host_alloc(&myrs_template, sizeof(struct myrs_hba)); if (!shost) return NULL; shost->max_cmd_len = 16; shost->max_lun = 256; cs = shost_priv(shost); mutex_init(&cs->dcmd_mutex); mutex_init(&cs->cinfo_mutex); cs->host = shost; return cs; } /* * RAID template functions */ /** * myrs_is_raid - return boolean indicating device is raid volume * @dev: the device struct object */ static int myrs_is_raid(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); struct myrs_hba *cs = shost_priv(sdev->host); return (sdev->channel >= cs->ctlr_info->physchan_present) ? 1 : 0; } /** * myrs_get_resync - get raid volume resync percent complete * @dev: the device struct object */ static void myrs_get_resync(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); struct myrs_hba *cs = shost_priv(sdev->host); struct myrs_ldev_info *ldev_info = sdev->hostdata; u64 percent_complete = 0; if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info) return; if (ldev_info->rbld_active) { unsigned short ldev_num = ldev_info->ldev_num; myrs_get_ldev_info(cs, ldev_num, ldev_info); percent_complete = ldev_info->rbld_lba * 100; do_div(percent_complete, ldev_info->cfg_devsize); } raid_set_resync(myrs_raid_template, dev, percent_complete); } /** * myrs_get_state - get raid volume status * @dev: the device struct object */ static void myrs_get_state(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); struct myrs_hba *cs = shost_priv(sdev->host); struct myrs_ldev_info *ldev_info = sdev->hostdata; enum raid_state state = RAID_STATE_UNKNOWN; if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info) state = RAID_STATE_UNKNOWN; else { switch (ldev_info->dev_state) { case MYRS_DEVICE_ONLINE: state = RAID_STATE_ACTIVE; break; case MYRS_DEVICE_SUSPECTED_CRITICAL: case MYRS_DEVICE_CRITICAL: state = RAID_STATE_DEGRADED; break; case MYRS_DEVICE_REBUILD: state = RAID_STATE_RESYNCING; break; case MYRS_DEVICE_UNCONFIGURED: case MYRS_DEVICE_INVALID_STATE: state = RAID_STATE_UNKNOWN; break; default: state = RAID_STATE_OFFLINE; } } raid_set_state(myrs_raid_template, dev, state); } static struct raid_function_template myrs_raid_functions = { .cookie = &myrs_template, .is_raid = myrs_is_raid, .get_resync = myrs_get_resync, .get_state = myrs_get_state, }; /* * PCI interface functions */ static void myrs_flush_cache(struct myrs_hba *cs) { myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER); } static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk, struct scsi_cmnd *scmd) { unsigned char status; if (!cmd_blk) return; scsi_dma_unmap(scmd); status = cmd_blk->status; if (cmd_blk->sense) { if (status == MYRS_STATUS_FAILED && cmd_blk->sense_len) { unsigned int sense_len = SCSI_SENSE_BUFFERSIZE; if (sense_len > cmd_blk->sense_len) sense_len = cmd_blk->sense_len; memcpy(scmd->sense_buffer, cmd_blk->sense, sense_len); } dma_pool_free(cs->sense_pool, cmd_blk->sense, cmd_blk->sense_addr); cmd_blk->sense = NULL; cmd_blk->sense_addr = 0; } if (cmd_blk->dcdb) { dma_pool_free(cs->dcdb_pool, cmd_blk->dcdb, cmd_blk->dcdb_dma); cmd_blk->dcdb = NULL; cmd_blk->dcdb_dma = 0; } if (cmd_blk->sgl) { dma_pool_free(cs->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr); cmd_blk->sgl = NULL; cmd_blk->sgl_addr = 0; } if (cmd_blk->residual) scsi_set_resid(scmd, cmd_blk->residual); if (status == MYRS_STATUS_DEVICE_NON_RESPONSIVE || status == MYRS_STATUS_DEVICE_NON_RESPONSIVE2) scmd->result = (DID_BAD_TARGET << 16); else scmd->result = (DID_OK << 16) | status; scsi_done(scmd); } static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk) { if (!cmd_blk) return; if (cmd_blk->complete) { complete(cmd_blk->complete); cmd_blk->complete = NULL; } } static void myrs_monitor(struct work_struct *work) { struct myrs_hba *cs = container_of(work, struct myrs_hba, monitor_work.work); struct Scsi_Host *shost = cs->host; struct myrs_ctlr_info *info = cs->ctlr_info; unsigned int epoch = cs->fwstat_buf->epoch; unsigned long interval = MYRS_PRIMARY_MONITOR_INTERVAL; unsigned char status; dev_dbg(&shost->shost_gendev, "monitor tick\n"); status = myrs_get_fwstatus(cs); if (cs->needs_update) { cs->needs_update = false; mutex_lock(&cs->cinfo_mutex); status = myrs_get_ctlr_info(cs); mutex_unlock(&cs->cinfo_mutex); } if (cs->fwstat_buf->next_evseq - cs->next_evseq > 0) { status = myrs_get_event(cs, cs->next_evseq, cs->event_buf); if (status == MYRS_STATUS_SUCCESS) { myrs_log_event(cs, cs->event_buf); cs->next_evseq++; interval = 1; } } if (time_after(jiffies, cs->secondary_monitor_time + MYRS_SECONDARY_MONITOR_INTERVAL)) cs->secondary_monitor_time = jiffies; if (info->bg_init_active + info->ldev_init_active + info->pdev_init_active + info->cc_active + info->rbld_active + info->exp_active != 0) { struct scsi_device *sdev; shost_for_each_device(sdev, shost) { struct myrs_ldev_info *ldev_info; int ldev_num; if (sdev->channel < info->physchan_present) continue; ldev_info = sdev->hostdata; if (!ldev_info) continue; ldev_num = ldev_info->ldev_num; myrs_get_ldev_info(cs, ldev_num, ldev_info); } cs->needs_update = true; } if (epoch == cs->epoch && cs->fwstat_buf->next_evseq == cs->next_evseq && (cs->needs_update == false || time_before(jiffies, cs->primary_monitor_time + MYRS_PRIMARY_MONITOR_INTERVAL))) { interval = MYRS_SECONDARY_MONITOR_INTERVAL; } if (interval > 1) cs->primary_monitor_time = jiffies; queue_delayed_work(cs->work_q, &cs->monitor_work, interval); } static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs) { struct Scsi_Host *shost = cs->host; size_t elem_size, elem_align; elem_align = sizeof(struct myrs_sge); elem_size = shost->sg_tablesize * elem_align; cs->sg_pool = dma_pool_create("myrs_sg", &pdev->dev, elem_size, elem_align, 0); if (cs->sg_pool == NULL) { shost_printk(KERN_ERR, shost, "Failed to allocate SG pool\n"); return false; } cs->sense_pool = dma_pool_create("myrs_sense", &pdev->dev, MYRS_SENSE_SIZE, sizeof(int), 0); if (cs->sense_pool == NULL) { dma_pool_destroy(cs->sg_pool); cs->sg_pool = NULL; shost_printk(KERN_ERR, shost, "Failed to allocate sense data pool\n"); return false; } cs->dcdb_pool = dma_pool_create("myrs_dcdb", &pdev->dev, MYRS_DCDB_SIZE, sizeof(unsigned char), 0); if (!cs->dcdb_pool) { dma_pool_destroy(cs->sg_pool); cs->sg_pool = NULL; dma_pool_destroy(cs->sense_pool); cs->sense_pool = NULL; shost_printk(KERN_ERR, shost, "Failed to allocate DCDB pool\n"); return false; } snprintf(cs->work_q_name, sizeof(cs->work_q_name), "myrs_wq_%d", shost->host_no); cs->work_q = create_singlethread_workqueue(cs->work_q_name); if (!cs->work_q) { dma_pool_destroy(cs->dcdb_pool); cs->dcdb_pool = NULL; dma_pool_destroy(cs->sg_pool); cs->sg_pool = NULL; dma_pool_destroy(cs->sense_pool); cs->sense_pool = NULL; shost_printk(KERN_ERR, shost, "Failed to create workqueue\n"); return false; } /* Initialize the Monitoring Timer. */ INIT_DELAYED_WORK(&cs->monitor_work, myrs_monitor); queue_delayed_work(cs->work_q, &cs->monitor_work, 1); return true; } static void myrs_destroy_mempools(struct myrs_hba *cs) { cancel_delayed_work_sync(&cs->monitor_work); destroy_workqueue(cs->work_q); dma_pool_destroy(cs->sg_pool); dma_pool_destroy(cs->dcdb_pool); dma_pool_destroy(cs->sense_pool); } static void myrs_unmap(struct myrs_hba *cs) { kfree(cs->event_buf); kfree(cs->ctlr_info); if (cs->fwstat_buf) { dma_free_coherent(&cs->pdev->dev, sizeof(struct myrs_fwstat), cs->fwstat_buf, cs->fwstat_addr); cs->fwstat_buf = NULL; } if (cs->first_stat_mbox) { dma_free_coherent(&cs->pdev->dev, cs->stat_mbox_size, cs->first_stat_mbox, cs->stat_mbox_addr); cs->first_stat_mbox = NULL; } if (cs->first_cmd_mbox) { dma_free_coherent(&cs->pdev->dev, cs->cmd_mbox_size, cs->first_cmd_mbox, cs->cmd_mbox_addr); cs->first_cmd_mbox = NULL; } } static void myrs_cleanup(struct myrs_hba *cs) { struct pci_dev *pdev = cs->pdev; /* Free the memory mailbox, status, and related structures */ myrs_unmap(cs); if (cs->mmio_base) { if (cs->disable_intr) cs->disable_intr(cs); iounmap(cs->mmio_base); cs->mmio_base = NULL; } if (cs->irq) free_irq(cs->irq, cs); if (cs->io_addr) release_region(cs->io_addr, 0x80); pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); scsi_host_put(cs->host); } static struct myrs_hba *myrs_detect(struct pci_dev *pdev, const struct pci_device_id *entry) { struct myrs_privdata *privdata = (struct myrs_privdata *)entry->driver_data; irq_handler_t irq_handler = privdata->irq_handler; unsigned int mmio_size = privdata->mmio_size; struct myrs_hba *cs = NULL; cs = myrs_alloc_host(pdev, entry); if (!cs) { dev_err(&pdev->dev, "Unable to allocate Controller\n"); return NULL; } cs->pdev = pdev; if (pci_enable_device(pdev)) goto Failure; cs->pci_addr = pci_resource_start(pdev, 0); pci_set_drvdata(pdev, cs); spin_lock_init(&cs->queue_lock); /* Map the Controller Register Window. */ if (mmio_size < PAGE_SIZE) mmio_size = PAGE_SIZE; cs->mmio_base = ioremap(cs->pci_addr & PAGE_MASK, mmio_size); if (cs->mmio_base == NULL) { dev_err(&pdev->dev, "Unable to map Controller Register Window\n"); goto Failure; } cs->io_base = cs->mmio_base + (cs->pci_addr & ~PAGE_MASK); if (privdata->hw_init(pdev, cs, cs->io_base)) goto Failure; /* Acquire shared access to the IRQ Channel. */ if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrs", cs) < 0) { dev_err(&pdev->dev, "Unable to acquire IRQ Channel %d\n", pdev->irq); goto Failure; } cs->irq = pdev->irq; return cs; Failure: dev_err(&pdev->dev, "Failed to initialize Controller\n"); myrs_cleanup(cs); return NULL; } /* * myrs_err_status reports Controller BIOS Messages passed through * the Error Status Register when the driver performs the BIOS handshaking. * It returns true for fatal errors and false otherwise. */ static bool myrs_err_status(struct myrs_hba *cs, unsigned char status, unsigned char parm0, unsigned char parm1) { struct pci_dev *pdev = cs->pdev; switch (status) { case 0x00: dev_info(&pdev->dev, "Physical Device %d:%d Not Responding\n", parm1, parm0); break; case 0x08: dev_notice(&pdev->dev, "Spinning Up Drives\n"); break; case 0x30: dev_notice(&pdev->dev, "Configuration Checksum Error\n"); break; case 0x60: dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n"); break; case 0x70: dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n"); break; case 0x90: dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n", parm1, parm0); break; case 0xA0: dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n"); break; case 0xB0: dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n"); break; case 0xD0: dev_notice(&pdev->dev, "New Controller Configuration Found\n"); break; case 0xF0: dev_err(&pdev->dev, "Fatal Memory Parity Error\n"); return true; default: dev_err(&pdev->dev, "Unknown Initialization Error %02X\n", status); return true; } return false; } /* * Hardware-specific functions */ /* * DAC960 GEM Series Controllers. */ static inline void DAC960_GEM_hw_mbox_new_cmd(void __iomem *base) { __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24); writel(val, base + DAC960_GEM_IDB_READ_OFFSET); } static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem *base) { __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_ACK_STS << 24); writel(val, base + DAC960_GEM_IDB_CLEAR_OFFSET); } static inline void DAC960_GEM_reset_ctrl(void __iomem *base) { __le32 val = cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET << 24); writel(val, base + DAC960_GEM_IDB_READ_OFFSET); } static inline void DAC960_GEM_mem_mbox_new_cmd(void __iomem *base) { __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24); writel(val, base + DAC960_GEM_IDB_READ_OFFSET); } static inline bool DAC960_GEM_hw_mbox_is_full(void __iomem *base) { __le32 val; val = readl(base + DAC960_GEM_IDB_READ_OFFSET); return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_HWMBOX_FULL; } static inline bool DAC960_GEM_init_in_progress(void __iomem *base) { __le32 val; val = readl(base + DAC960_GEM_IDB_READ_OFFSET); return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_INIT_IN_PROGRESS; } static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem *base) { __le32 val = cpu_to_le32(DAC960_GEM_ODB_HWMBOX_ACK_IRQ << 24); writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET); } static inline void DAC960_GEM_ack_intr(void __iomem *base) { __le32 val = cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ | DAC960_GEM_ODB_MMBOX_ACK_IRQ) << 24); writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET); } static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem *base) { __le32 val; val = readl(base + DAC960_GEM_ODB_READ_OFFSET); return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL; } static inline void DAC960_GEM_enable_intr(void __iomem *base) { __le32 val = cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ | DAC960_GEM_IRQMASK_MMBOX_IRQ) << 24); writel(val, base + DAC960_GEM_IRQMASK_CLEAR_OFFSET); } static inline void DAC960_GEM_disable_intr(void __iomem *base) { __le32 val = 0; writel(val, base + DAC960_GEM_IRQMASK_READ_OFFSET); } static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox, union myrs_cmd_mbox *mbox) { memcpy(&mem_mbox->words[1], &mbox->words[1], sizeof(union myrs_cmd_mbox) - sizeof(unsigned int)); /* Barrier to avoid reordering */ wmb(); mem_mbox->words[0] = mbox->words[0]; /* Barrier to force PCI access */ mb(); } static inline void DAC960_GEM_write_hw_mbox(void __iomem *base, dma_addr_t cmd_mbox_addr) { dma_addr_writeql(cmd_mbox_addr, base + DAC960_GEM_CMDMBX_OFFSET); } static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem *base) { return readw(base + DAC960_GEM_CMDSTS_OFFSET + 2); } static inline bool DAC960_GEM_read_error_status(void __iomem *base, unsigned char *error, unsigned char *param0, unsigned char *param1) { __le32 val; val = readl(base + DAC960_GEM_ERRSTS_READ_OFFSET); if (!((le32_to_cpu(val) >> 24) & DAC960_GEM_ERRSTS_PENDING)) return false; *error = val & ~(DAC960_GEM_ERRSTS_PENDING << 24); *param0 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 0); *param1 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 1); writel(0x03000000, base + DAC960_GEM_ERRSTS_CLEAR_OFFSET); return true; } static inline unsigned char DAC960_GEM_mbox_init(void __iomem *base, dma_addr_t mbox_addr) { unsigned char status; while (DAC960_GEM_hw_mbox_is_full(base)) udelay(1); DAC960_GEM_write_hw_mbox(base, mbox_addr); DAC960_GEM_hw_mbox_new_cmd(base); while (!DAC960_GEM_hw_mbox_status_available(base)) udelay(1); status = DAC960_GEM_read_cmd_status(base); DAC960_GEM_ack_hw_mbox_intr(base); DAC960_GEM_ack_hw_mbox_status(base); return status; } static int DAC960_GEM_hw_init(struct pci_dev *pdev, struct myrs_hba *cs, void __iomem *base) { int timeout = 0; unsigned char status, parm0, parm1; DAC960_GEM_disable_intr(base); DAC960_GEM_ack_hw_mbox_status(base); udelay(1000); while (DAC960_GEM_init_in_progress(base) && timeout < MYRS_MAILBOX_TIMEOUT) { if (DAC960_GEM_read_error_status(base, &status, &parm0, &parm1) && myrs_err_status(cs, status, parm0, parm1)) return -EIO; udelay(10); timeout++; } if (timeout == MYRS_MAILBOX_TIMEOUT) { dev_err(&pdev->dev, "Timeout waiting for Controller Initialisation\n"); return -ETIMEDOUT; } if (!myrs_enable_mmio_mbox(cs, DAC960_GEM_mbox_init)) { dev_err(&pdev->dev, "Unable to Enable Memory Mailbox Interface\n"); DAC960_GEM_reset_ctrl(base); return -EAGAIN; } DAC960_GEM_enable_intr(base); cs->write_cmd_mbox = DAC960_GEM_write_cmd_mbox; cs->get_cmd_mbox = DAC960_GEM_mem_mbox_new_cmd; cs->disable_intr = DAC960_GEM_disable_intr; cs->reset = DAC960_GEM_reset_ctrl; return 0; } static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg) { struct myrs_hba *cs = arg; void __iomem *base = cs->io_base; struct myrs_stat_mbox *next_stat_mbox; unsigned long flags; spin_lock_irqsave(&cs->queue_lock, flags); DAC960_GEM_ack_intr(base); next_stat_mbox = cs->next_stat_mbox; while (next_stat_mbox->id > 0) { unsigned short id = next_stat_mbox->id; struct scsi_cmnd *scmd = NULL; struct myrs_cmdblk *cmd_blk = NULL; if (id == MYRS_DCMD_TAG) cmd_blk = &cs->dcmd_blk; else if (id == MYRS_MCMD_TAG) cmd_blk = &cs->mcmd_blk; else { scmd = scsi_host_find_tag(cs->host, id - 3); if (scmd) cmd_blk = scsi_cmd_priv(scmd); } if (cmd_blk) { cmd_blk->status = next_stat_mbox->status; cmd_blk->sense_len = next_stat_mbox->sense_len; cmd_blk->residual = next_stat_mbox->residual; } else dev_err(&cs->pdev->dev, "Unhandled command completion %d\n", id); memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox)); if (++next_stat_mbox > cs->last_stat_mbox) next_stat_mbox = cs->first_stat_mbox; if (cmd_blk) { if (id < 3) myrs_handle_cmdblk(cs, cmd_blk); else myrs_handle_scsi(cs, cmd_blk, scmd); } } cs->next_stat_mbox = next_stat_mbox; spin_unlock_irqrestore(&cs->queue_lock, flags); return IRQ_HANDLED; } static struct myrs_privdata DAC960_GEM_privdata = { .hw_init = DAC960_GEM_hw_init, .irq_handler = DAC960_GEM_intr_handler, .mmio_size = DAC960_GEM_mmio_size, }; /* * DAC960 BA Series Controllers. */ static inline void DAC960_BA_hw_mbox_new_cmd(void __iomem *base) { writeb(DAC960_BA_IDB_HWMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET); } static inline void DAC960_BA_ack_hw_mbox_status(void __iomem *base) { writeb(DAC960_BA_IDB_HWMBOX_ACK_STS, base + DAC960_BA_IDB_OFFSET); } static inline void DAC960_BA_reset_ctrl(void __iomem *base) { writeb(DAC960_BA_IDB_CTRL_RESET, base + DAC960_BA_IDB_OFFSET); } static inline void DAC960_BA_mem_mbox_new_cmd(void __iomem *base) { writeb(DAC960_BA_IDB_MMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET); } static inline bool DAC960_BA_hw_mbox_is_full(void __iomem *base) { u8 val; val = readb(base + DAC960_BA_IDB_OFFSET); return !(val & DAC960_BA_IDB_HWMBOX_EMPTY); } static inline bool DAC960_BA_init_in_progress(void __iomem *base) { u8 val; val = readb(base + DAC960_BA_IDB_OFFSET); return !(val & DAC960_BA_IDB_INIT_DONE); } static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem *base) { writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET); } static inline void DAC960_BA_ack_intr(void __iomem *base) { writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ | DAC960_BA_ODB_MMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET); } static inline bool DAC960_BA_hw_mbox_status_available(void __iomem *base) { u8 val; val = readb(base + DAC960_BA_ODB_OFFSET); return val & DAC960_BA_ODB_HWMBOX_STS_AVAIL; } static inline void DAC960_BA_enable_intr(void __iomem *base) { writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ, base + DAC960_BA_IRQMASK_OFFSET); } static inline void DAC960_BA_disable_intr(void __iomem *base) { writeb(0xFF, base + DAC960_BA_IRQMASK_OFFSET); } static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox, union myrs_cmd_mbox *mbox) { memcpy(&mem_mbox->words[1], &mbox->words[1], sizeof(union myrs_cmd_mbox) - sizeof(unsigned int)); /* Barrier to avoid reordering */ wmb(); mem_mbox->words[0] = mbox->words[0]; /* Barrier to force PCI access */ mb(); } static inline void DAC960_BA_write_hw_mbox(void __iomem *base, dma_addr_t cmd_mbox_addr) { dma_addr_writeql(cmd_mbox_addr, base + DAC960_BA_CMDMBX_OFFSET); } static inline unsigned char DAC960_BA_read_cmd_status(void __iomem *base) { return readw(base + DAC960_BA_CMDSTS_OFFSET + 2); } static inline bool DAC960_BA_read_error_status(void __iomem *base, unsigned char *error, unsigned char *param0, unsigned char *param1) { u8 val; val = readb(base + DAC960_BA_ERRSTS_OFFSET); if (!(val & DAC960_BA_ERRSTS_PENDING)) return false; val &= ~DAC960_BA_ERRSTS_PENDING; *error = val; *param0 = readb(base + DAC960_BA_CMDMBX_OFFSET + 0); *param1 = readb(base + DAC960_BA_CMDMBX_OFFSET + 1); writeb(0xFF, base + DAC960_BA_ERRSTS_OFFSET); return true; } static inline unsigned char DAC960_BA_mbox_init(void __iomem *base, dma_addr_t mbox_addr) { unsigned char status; while (DAC960_BA_hw_mbox_is_full(base)) udelay(1); DAC960_BA_write_hw_mbox(base, mbox_addr); DAC960_BA_hw_mbox_new_cmd(base); while (!DAC960_BA_hw_mbox_status_available(base)) udelay(1); status = DAC960_BA_read_cmd_status(base); DAC960_BA_ack_hw_mbox_intr(base); DAC960_BA_ack_hw_mbox_status(base); return status; } static int DAC960_BA_hw_init(struct pci_dev *pdev, struct myrs_hba *cs, void __iomem *base) { int timeout = 0; unsigned char status, parm0, parm1; DAC960_BA_disable_intr(base); DAC960_BA_ack_hw_mbox_status(base); udelay(1000); while (DAC960_BA_init_in_progress(base) && timeout < MYRS_MAILBOX_TIMEOUT) { if (DAC960_BA_read_error_status(base, &status, &parm0, &parm1) && myrs_err_status(cs, status, parm0, parm1)) return -EIO; udelay(10); timeout++; } if (timeout == MYRS_MAILBOX_TIMEOUT) { dev_err(&pdev->dev, "Timeout waiting for Controller Initialisation\n"); return -ETIMEDOUT; } if (!myrs_enable_mmio_mbox(cs, DAC960_BA_mbox_init)) { dev_err(&pdev->dev, "Unable to Enable Memory Mailbox Interface\n"); DAC960_BA_reset_ctrl(base); return -EAGAIN; } DAC960_BA_enable_intr(base); cs->write_cmd_mbox = DAC960_BA_write_cmd_mbox; cs->get_cmd_mbox = DAC960_BA_mem_mbox_new_cmd; cs->disable_intr = DAC960_BA_disable_intr; cs->reset = DAC960_BA_reset_ctrl; return 0; } static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg) { struct myrs_hba *cs = arg; void __iomem *base = cs->io_base; struct myrs_stat_mbox *next_stat_mbox; unsigned long flags; spin_lock_irqsave(&cs->queue_lock, flags); DAC960_BA_ack_intr(base); next_stat_mbox = cs->next_stat_mbox; while (next_stat_mbox->id > 0) { unsigned short id = next_stat_mbox->id; struct scsi_cmnd *scmd = NULL; struct myrs_cmdblk *cmd_blk = NULL; if (id == MYRS_DCMD_TAG) cmd_blk = &cs->dcmd_blk; else if (id == MYRS_MCMD_TAG) cmd_blk = &cs->mcmd_blk; else { scmd = scsi_host_find_tag(cs->host, id - 3); if (scmd) cmd_blk = scsi_cmd_priv(scmd); } if (cmd_blk) { cmd_blk->status = next_stat_mbox->status; cmd_blk->sense_len = next_stat_mbox->sense_len; cmd_blk->residual = next_stat_mbox->residual; } else dev_err(&cs->pdev->dev, "Unhandled command completion %d\n", id); memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox)); if (++next_stat_mbox > cs->last_stat_mbox) next_stat_mbox = cs->first_stat_mbox; if (cmd_blk) { if (id < 3) myrs_handle_cmdblk(cs, cmd_blk); else myrs_handle_scsi(cs, cmd_blk, scmd); } } cs->next_stat_mbox = next_stat_mbox; spin_unlock_irqrestore(&cs->queue_lock, flags); return IRQ_HANDLED; } static struct myrs_privdata DAC960_BA_privdata = { .hw_init = DAC960_BA_hw_init, .irq_handler = DAC960_BA_intr_handler, .mmio_size = DAC960_BA_mmio_size, }; /* * DAC960 LP Series Controllers. */ static inline void DAC960_LP_hw_mbox_new_cmd(void __iomem *base) { writeb(DAC960_LP_IDB_HWMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET); } static inline void DAC960_LP_ack_hw_mbox_status(void __iomem *base) { writeb(DAC960_LP_IDB_HWMBOX_ACK_STS, base + DAC960_LP_IDB_OFFSET); } static inline void DAC960_LP_reset_ctrl(void __iomem *base) { writeb(DAC960_LP_IDB_CTRL_RESET, base + DAC960_LP_IDB_OFFSET); } static inline void DAC960_LP_mem_mbox_new_cmd(void __iomem *base) { writeb(DAC960_LP_IDB_MMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET); } static inline bool DAC960_LP_hw_mbox_is_full(void __iomem *base) { u8 val; val = readb(base + DAC960_LP_IDB_OFFSET); return val & DAC960_LP_IDB_HWMBOX_FULL; } static inline bool DAC960_LP_init_in_progress(void __iomem *base) { u8 val; val = readb(base + DAC960_LP_IDB_OFFSET); return val & DAC960_LP_IDB_INIT_IN_PROGRESS; } static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem *base) { writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET); } static inline void DAC960_LP_ack_intr(void __iomem *base) { writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ | DAC960_LP_ODB_MMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET); } static inline bool DAC960_LP_hw_mbox_status_available(void __iomem *base) { u8 val; val = readb(base + DAC960_LP_ODB_OFFSET); return val & DAC960_LP_ODB_HWMBOX_STS_AVAIL; } static inline void DAC960_LP_enable_intr(void __iomem *base) { writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ, base + DAC960_LP_IRQMASK_OFFSET); } static inline void DAC960_LP_disable_intr(void __iomem *base) { writeb(0xFF, base + DAC960_LP_IRQMASK_OFFSET); } static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox, union myrs_cmd_mbox *mbox) { memcpy(&mem_mbox->words[1], &mbox->words[1], sizeof(union myrs_cmd_mbox) - sizeof(unsigned int)); /* Barrier to avoid reordering */ wmb(); mem_mbox->words[0] = mbox->words[0]; /* Barrier to force PCI access */ mb(); } static inline void DAC960_LP_write_hw_mbox(void __iomem *base, dma_addr_t cmd_mbox_addr) { dma_addr_writeql(cmd_mbox_addr, base + DAC960_LP_CMDMBX_OFFSET); } static inline unsigned char DAC960_LP_read_cmd_status(void __iomem *base) { return readw(base + DAC960_LP_CMDSTS_OFFSET + 2); } static inline bool DAC960_LP_read_error_status(void __iomem *base, unsigned char *error, unsigned char *param0, unsigned char *param1) { u8 val; val = readb(base + DAC960_LP_ERRSTS_OFFSET); if (!(val & DAC960_LP_ERRSTS_PENDING)) return false; val &= ~DAC960_LP_ERRSTS_PENDING; *error = val; *param0 = readb(base + DAC960_LP_CMDMBX_OFFSET + 0); *param1 = readb(base + DAC960_LP_CMDMBX_OFFSET + 1); writeb(0xFF, base + DAC960_LP_ERRSTS_OFFSET); return true; } static inline unsigned char DAC960_LP_mbox_init(void __iomem *base, dma_addr_t mbox_addr) { unsigned char status; while (DAC960_LP_hw_mbox_is_full(base)) udelay(1); DAC960_LP_write_hw_mbox(base, mbox_addr); DAC960_LP_hw_mbox_new_cmd(base); while (!DAC960_LP_hw_mbox_status_available(base)) udelay(1); status = DAC960_LP_read_cmd_status(base); DAC960_LP_ack_hw_mbox_intr(base); DAC960_LP_ack_hw_mbox_status(base); return status; } static int DAC960_LP_hw_init(struct pci_dev *pdev, struct myrs_hba *cs, void __iomem *base) { int timeout = 0; unsigned char status, parm0, parm1; DAC960_LP_disable_intr(base); DAC960_LP_ack_hw_mbox_status(base); udelay(1000); while (DAC960_LP_init_in_progress(base) && timeout < MYRS_MAILBOX_TIMEOUT) { if (DAC960_LP_read_error_status(base, &status, &parm0, &parm1) && myrs_err_status(cs, status, parm0, parm1)) return -EIO; udelay(10); timeout++; } if (timeout == MYRS_MAILBOX_TIMEOUT) { dev_err(&pdev->dev, "Timeout waiting for Controller Initialisation\n"); return -ETIMEDOUT; } if (!myrs_enable_mmio_mbox(cs, DAC960_LP_mbox_init)) { dev_err(&pdev->dev, "Unable to Enable Memory Mailbox Interface\n"); DAC960_LP_reset_ctrl(base); return -ENODEV; } DAC960_LP_enable_intr(base); cs->write_cmd_mbox = DAC960_LP_write_cmd_mbox; cs->get_cmd_mbox = DAC960_LP_mem_mbox_new_cmd; cs->disable_intr = DAC960_LP_disable_intr; cs->reset = DAC960_LP_reset_ctrl; return 0; } static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg) { struct myrs_hba *cs = arg; void __iomem *base = cs->io_base; struct myrs_stat_mbox *next_stat_mbox; unsigned long flags; spin_lock_irqsave(&cs->queue_lock, flags); DAC960_LP_ack_intr(base); next_stat_mbox = cs->next_stat_mbox; while (next_stat_mbox->id > 0) { unsigned short id = next_stat_mbox->id; struct scsi_cmnd *scmd = NULL; struct myrs_cmdblk *cmd_blk = NULL; if (id == MYRS_DCMD_TAG) cmd_blk = &cs->dcmd_blk; else if (id == MYRS_MCMD_TAG) cmd_blk = &cs->mcmd_blk; else { scmd = scsi_host_find_tag(cs->host, id - 3); if (scmd) cmd_blk = scsi_cmd_priv(scmd); } if (cmd_blk) { cmd_blk->status = next_stat_mbox->status; cmd_blk->sense_len = next_stat_mbox->sense_len; cmd_blk->residual = next_stat_mbox->residual; } else dev_err(&cs->pdev->dev, "Unhandled command completion %d\n", id); memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox)); if (++next_stat_mbox > cs->last_stat_mbox) next_stat_mbox = cs->first_stat_mbox; if (cmd_blk) { if (id < 3) myrs_handle_cmdblk(cs, cmd_blk); else myrs_handle_scsi(cs, cmd_blk, scmd); } } cs->next_stat_mbox = next_stat_mbox; spin_unlock_irqrestore(&cs->queue_lock, flags); return IRQ_HANDLED; } static struct myrs_privdata DAC960_LP_privdata = { .hw_init = DAC960_LP_hw_init, .irq_handler = DAC960_LP_intr_handler, .mmio_size = DAC960_LP_mmio_size, }; /* * Module functions */ static int myrs_probe(struct pci_dev *dev, const struct pci_device_id *entry) { struct myrs_hba *cs; int ret; cs = myrs_detect(dev, entry); if (!cs) return -ENODEV; ret = myrs_get_config(cs); if (ret < 0) { myrs_cleanup(cs); return ret; } if (!myrs_create_mempools(dev, cs)) { ret = -ENOMEM; goto failed; } ret = scsi_add_host(cs->host, &dev->dev); if (ret) { dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret); myrs_destroy_mempools(cs); goto failed; } scsi_scan_host(cs->host); return 0; failed: myrs_cleanup(cs); return ret; } static void myrs_remove(struct pci_dev *pdev) { struct myrs_hba *cs = pci_get_drvdata(pdev); if (cs == NULL) return; shost_printk(KERN_NOTICE, cs->host, "Flushing Cache..."); myrs_flush_cache(cs); myrs_destroy_mempools(cs); myrs_cleanup(cs); } static const struct pci_device_id myrs_id_table[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_MYLEX_DAC960_GEM, PCI_VENDOR_ID_MYLEX, PCI_ANY_ID), .driver_data = (unsigned long) &DAC960_GEM_privdata, }, { PCI_DEVICE_DATA(MYLEX, DAC960_BA, &DAC960_BA_privdata), }, { PCI_DEVICE_DATA(MYLEX, DAC960_LP, &DAC960_LP_privdata), }, {0, }, }; MODULE_DEVICE_TABLE(pci, myrs_id_table); static struct pci_driver myrs_pci_driver = { .name = "myrs", .id_table = myrs_id_table, .probe = myrs_probe, .remove = myrs_remove, }; static int __init myrs_init_module(void) { int ret; myrs_raid_template = raid_class_attach(&myrs_raid_functions); if (!myrs_raid_template) return -ENODEV; ret = pci_register_driver(&myrs_pci_driver); if (ret) raid_class_release(myrs_raid_template); return ret; } static void __exit myrs_cleanup_module(void) { pci_unregister_driver(&myrs_pci_driver); raid_class_release(myrs_raid_template); } module_init(myrs_init_module); module_exit(myrs_cleanup_module); MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (SCSI Interface)"); MODULE_AUTHOR("Hannes Reinecke <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/myrs.c
// SPDX-License-Identifier: GPL-2.0-only /* * scsi_sysfs.c * * SCSI sysfs interface routines. * * Created to pull SCSI mid layer sysfs routines into one file. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/device.h> #include <linux/pm_runtime.h> #include <linux/bsg.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_dh.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_devinfo.h> #include "scsi_priv.h" #include "scsi_logging.h" static struct device_type scsi_dev_type; static const struct { enum scsi_device_state value; char *name; } sdev_states[] = { { SDEV_CREATED, "created" }, { SDEV_RUNNING, "running" }, { SDEV_CANCEL, "cancel" }, { SDEV_DEL, "deleted" }, { SDEV_QUIESCE, "quiesce" }, { SDEV_OFFLINE, "offline" }, { SDEV_TRANSPORT_OFFLINE, "transport-offline" }, { SDEV_BLOCK, "blocked" }, { SDEV_CREATED_BLOCK, "created-blocked" }, }; const char *scsi_device_state_name(enum scsi_device_state state) { int i; char *name = NULL; for (i = 0; i < ARRAY_SIZE(sdev_states); i++) { if (sdev_states[i].value == state) { name = sdev_states[i].name; break; } } return name; } static const struct { enum scsi_host_state value; char *name; } shost_states[] = { { SHOST_CREATED, "created" }, { SHOST_RUNNING, "running" }, { SHOST_CANCEL, "cancel" }, { SHOST_DEL, "deleted" }, { SHOST_RECOVERY, "recovery" }, { SHOST_CANCEL_RECOVERY, "cancel/recovery" }, { SHOST_DEL_RECOVERY, "deleted/recovery", }, }; const char *scsi_host_state_name(enum scsi_host_state state) { int i; char *name = NULL; for (i = 0; i < ARRAY_SIZE(shost_states); i++) { if (shost_states[i].value == state) { name = shost_states[i].name; break; } } return name; } #ifdef CONFIG_SCSI_DH static const struct { unsigned char value; char *name; } sdev_access_states[] = { { SCSI_ACCESS_STATE_OPTIMAL, "active/optimized" }, { SCSI_ACCESS_STATE_ACTIVE, "active/non-optimized" }, { SCSI_ACCESS_STATE_STANDBY, "standby" }, { SCSI_ACCESS_STATE_UNAVAILABLE, "unavailable" }, { SCSI_ACCESS_STATE_LBA, "lba-dependent" }, { SCSI_ACCESS_STATE_OFFLINE, "offline" }, { SCSI_ACCESS_STATE_TRANSITIONING, "transitioning" }, }; static const char *scsi_access_state_name(unsigned char state) { int i; char *name = NULL; for (i = 0; i < ARRAY_SIZE(sdev_access_states); i++) { if (sdev_access_states[i].value == state) { name = sdev_access_states[i].name; break; } } return name; } #endif static int check_set(unsigned long long *val, char *src) { char *last; if (strcmp(src, "-") == 0) { *val = SCAN_WILD_CARD; } else { /* * Doesn't check for int overflow */ *val = simple_strtoull(src, &last, 0); if (*last != '\0') return 1; } return 0; } static int scsi_scan(struct Scsi_Host *shost, const char *str) { char s1[15], s2[15], s3[17], junk; unsigned long long channel, id, lun; int res; res = sscanf(str, "%10s %10s %16s %c", s1, s2, s3, &junk); if (res != 3) return -EINVAL; if (check_set(&channel, s1)) return -EINVAL; if (check_set(&id, s2)) return -EINVAL; if (check_set(&lun, s3)) return -EINVAL; if (shost->transportt->user_scan) res = shost->transportt->user_scan(shost, channel, id, lun); else res = scsi_scan_host_selected(shost, channel, id, lun, SCSI_SCAN_MANUAL); return res; } /* * shost_show_function: macro to create an attr function that can be used to * show a non-bit field. */ #define shost_show_function(name, field, format_string) \ static ssize_t \ show_##name (struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct Scsi_Host *shost = class_to_shost(dev); \ return snprintf (buf, 20, format_string, shost->field); \ } /* * shost_rd_attr: macro to create a function and attribute variable for a * read only field. */ #define shost_rd_attr2(name, field, format_string) \ shost_show_function(name, field, format_string) \ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL); #define shost_rd_attr(field, format_string) \ shost_rd_attr2(field, field, format_string) /* * Create the actual show/store functions and data structures. */ static ssize_t store_scan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); int res; res = scsi_scan(shost, buf); if (res == 0) res = count; return res; }; static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan); static ssize_t store_shost_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int i; struct Scsi_Host *shost = class_to_shost(dev); enum scsi_host_state state = 0; for (i = 0; i < ARRAY_SIZE(shost_states); i++) { const int len = strlen(shost_states[i].name); if (strncmp(shost_states[i].name, buf, len) == 0 && buf[len] == '\n') { state = shost_states[i].value; break; } } if (!state) return -EINVAL; if (scsi_host_set_state(shost, state)) return -EINVAL; return count; } static ssize_t show_shost_state(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); const char *name = scsi_host_state_name(shost->shost_state); if (!name) return -EINVAL; return snprintf(buf, 20, "%s\n", name); } /* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */ static struct device_attribute dev_attr_hstate = __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state); static ssize_t show_shost_mode(unsigned int mode, char *buf) { ssize_t len = 0; if (mode & MODE_INITIATOR) len = sprintf(buf, "%s", "Initiator"); if (mode & MODE_TARGET) len += sprintf(buf + len, "%s%s", len ? ", " : "", "Target"); len += sprintf(buf + len, "\n"); return len; } static ssize_t show_shost_supported_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); unsigned int supported_mode = shost->hostt->supported_mode; if (supported_mode == MODE_UNKNOWN) /* by default this should be initiator */ supported_mode = MODE_INITIATOR; return show_shost_mode(supported_mode, buf); } static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL); static ssize_t show_shost_active_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); if (shost->active_mode == MODE_UNKNOWN) return snprintf(buf, 20, "unknown\n"); else return show_shost_mode(shost->active_mode, buf); } static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL); static int check_reset_type(const char *str) { if (sysfs_streq(str, "adapter")) return SCSI_ADAPTER_RESET; else if (sysfs_streq(str, "firmware")) return SCSI_FIRMWARE_RESET; else return 0; } static ssize_t store_host_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); const struct scsi_host_template *sht = shost->hostt; int ret = -EINVAL; int type; type = check_reset_type(buf); if (!type) goto exit_store_host_reset; if (sht->host_reset) ret = sht->host_reset(shost, type); else ret = -EOPNOTSUPP; exit_store_host_reset: if (ret == 0) ret = count; return ret; } static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset); static ssize_t show_shost_eh_deadline(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); if (shost->eh_deadline == -1) return snprintf(buf, strlen("off") + 2, "off\n"); return sprintf(buf, "%u\n", shost->eh_deadline / HZ); } static ssize_t store_shost_eh_deadline(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); int ret = -EINVAL; unsigned long deadline, flags; if (shost->transportt && (shost->transportt->eh_strategy_handler || !shost->hostt->eh_host_reset_handler)) return ret; if (!strncmp(buf, "off", strlen("off"))) deadline = -1; else { ret = kstrtoul(buf, 10, &deadline); if (ret) return ret; if (deadline * HZ > UINT_MAX) return -EINVAL; } spin_lock_irqsave(shost->host_lock, flags); if (scsi_host_in_recovery(shost)) ret = -EBUSY; else { if (deadline == -1) shost->eh_deadline = -1; else shost->eh_deadline = deadline * HZ; ret = count; } spin_unlock_irqrestore(shost->host_lock, flags); return ret; } static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline); shost_rd_attr(unique_id, "%u\n"); shost_rd_attr(cmd_per_lun, "%hd\n"); shost_rd_attr(can_queue, "%d\n"); shost_rd_attr(sg_tablesize, "%hu\n"); shost_rd_attr(sg_prot_tablesize, "%hu\n"); shost_rd_attr(prot_capabilities, "%u\n"); shost_rd_attr(prot_guard_type, "%hd\n"); shost_rd_attr2(proc_name, hostt->proc_name, "%s\n"); static ssize_t show_host_busy(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); return snprintf(buf, 20, "%d\n", scsi_host_busy(shost)); } static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL); static ssize_t show_use_blk_mq(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "1\n"); } static DEVICE_ATTR(use_blk_mq, S_IRUGO, show_use_blk_mq, NULL); static ssize_t show_nr_hw_queues(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct blk_mq_tag_set *tag_set = &shost->tag_set; return snprintf(buf, 20, "%d\n", tag_set->nr_hw_queues); } static DEVICE_ATTR(nr_hw_queues, S_IRUGO, show_nr_hw_queues, NULL); static struct attribute *scsi_sysfs_shost_attrs[] = { &dev_attr_use_blk_mq.attr, &dev_attr_unique_id.attr, &dev_attr_host_busy.attr, &dev_attr_cmd_per_lun.attr, &dev_attr_can_queue.attr, &dev_attr_sg_tablesize.attr, &dev_attr_sg_prot_tablesize.attr, &dev_attr_proc_name.attr, &dev_attr_scan.attr, &dev_attr_hstate.attr, &dev_attr_supported_mode.attr, &dev_attr_active_mode.attr, &dev_attr_prot_capabilities.attr, &dev_attr_prot_guard_type.attr, &dev_attr_host_reset.attr, &dev_attr_eh_deadline.attr, &dev_attr_nr_hw_queues.attr, NULL }; static const struct attribute_group scsi_shost_attr_group = { .attrs = scsi_sysfs_shost_attrs, }; const struct attribute_group *scsi_shost_groups[] = { &scsi_shost_attr_group, NULL }; static void scsi_device_cls_release(struct device *class_dev) { struct scsi_device *sdev; sdev = class_to_sdev(class_dev); put_device(&sdev->sdev_gendev); } static void scsi_device_dev_release(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); struct device *parent; struct list_head *this, *tmp; struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL; struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL; struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL; unsigned long flags; might_sleep(); scsi_dh_release_device(sdev); parent = sdev->sdev_gendev.parent; spin_lock_irqsave(sdev->host->host_lock, flags); list_del(&sdev->siblings); list_del(&sdev->same_target_siblings); list_del(&sdev->starved_entry); spin_unlock_irqrestore(sdev->host->host_lock, flags); cancel_work_sync(&sdev->event_work); list_for_each_safe(this, tmp, &sdev->event_list) { struct scsi_event *evt; evt = list_entry(this, struct scsi_event, node); list_del(&evt->node); kfree(evt); } blk_put_queue(sdev->request_queue); /* NULL queue means the device can't be used */ sdev->request_queue = NULL; sbitmap_free(&sdev->budget_map); mutex_lock(&sdev->inquiry_mutex); vpd_pg0 = rcu_replace_pointer(sdev->vpd_pg0, vpd_pg0, lockdep_is_held(&sdev->inquiry_mutex)); vpd_pg80 = rcu_replace_pointer(sdev->vpd_pg80, vpd_pg80, lockdep_is_held(&sdev->inquiry_mutex)); vpd_pg83 = rcu_replace_pointer(sdev->vpd_pg83, vpd_pg83, lockdep_is_held(&sdev->inquiry_mutex)); vpd_pg89 = rcu_replace_pointer(sdev->vpd_pg89, vpd_pg89, lockdep_is_held(&sdev->inquiry_mutex)); vpd_pgb0 = rcu_replace_pointer(sdev->vpd_pgb0, vpd_pgb0, lockdep_is_held(&sdev->inquiry_mutex)); vpd_pgb1 = rcu_replace_pointer(sdev->vpd_pgb1, vpd_pgb1, lockdep_is_held(&sdev->inquiry_mutex)); vpd_pgb2 = rcu_replace_pointer(sdev->vpd_pgb2, vpd_pgb2, lockdep_is_held(&sdev->inquiry_mutex)); mutex_unlock(&sdev->inquiry_mutex); if (vpd_pg0) kfree_rcu(vpd_pg0, rcu); if (vpd_pg83) kfree_rcu(vpd_pg83, rcu); if (vpd_pg80) kfree_rcu(vpd_pg80, rcu); if (vpd_pg89) kfree_rcu(vpd_pg89, rcu); if (vpd_pgb0) kfree_rcu(vpd_pgb0, rcu); if (vpd_pgb1) kfree_rcu(vpd_pgb1, rcu); if (vpd_pgb2) kfree_rcu(vpd_pgb2, rcu); kfree(sdev->inquiry); kfree(sdev); if (parent) put_device(parent); } static struct class sdev_class = { .name = "scsi_device", .dev_release = scsi_device_cls_release, }; /* all probing is done in the individual ->probe routines */ static int scsi_bus_match(struct device *dev, struct device_driver *gendrv) { struct scsi_device *sdp; if (dev->type != &scsi_dev_type) return 0; sdp = to_scsi_device(dev); if (sdp->no_uld_attach) return 0; return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0; } static int scsi_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct scsi_device *sdev; if (dev->type != &scsi_dev_type) return 0; sdev = to_scsi_device(dev); add_uevent_var(env, "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type); return 0; } struct bus_type scsi_bus_type = { .name = "scsi", .match = scsi_bus_match, .uevent = scsi_bus_uevent, #ifdef CONFIG_PM .pm = &scsi_bus_pm_ops, #endif }; int scsi_sysfs_register(void) { int error; error = bus_register(&scsi_bus_type); if (!error) { error = class_register(&sdev_class); if (error) bus_unregister(&scsi_bus_type); } return error; } void scsi_sysfs_unregister(void) { class_unregister(&sdev_class); bus_unregister(&scsi_bus_type); } /* * sdev_show_function: macro to create an attr function that can be used to * show a non-bit field. */ #define sdev_show_function(field, format_string) \ static ssize_t \ sdev_show_##field (struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct scsi_device *sdev; \ sdev = to_scsi_device(dev); \ return snprintf (buf, 20, format_string, sdev->field); \ } \ /* * sdev_rd_attr: macro to create a function and attribute variable for a * read only field. */ #define sdev_rd_attr(field, format_string) \ sdev_show_function(field, format_string) \ static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL); /* * sdev_rw_attr: create a function and attribute variable for a * read/write field. */ #define sdev_rw_attr(field, format_string) \ sdev_show_function(field, format_string) \ \ static ssize_t \ sdev_store_##field (struct device *dev, struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct scsi_device *sdev; \ sdev = to_scsi_device(dev); \ sscanf (buf, format_string, &sdev->field); \ return count; \ } \ static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field); /* Currently we don't export bit fields, but we might in future, * so leave this code in */ #if 0 /* * sdev_rd_attr: create a function and attribute variable for a * read/write bit field. */ #define sdev_rw_attr_bit(field) \ sdev_show_function(field, "%d\n") \ \ static ssize_t \ sdev_store_##field (struct device *dev, struct device_attribute *attr, \ const char *buf, size_t count) \ { \ int ret; \ struct scsi_device *sdev; \ ret = scsi_sdev_check_buf_bit(buf); \ if (ret >= 0) { \ sdev = to_scsi_device(dev); \ sdev->field = ret; \ ret = count; \ } \ return ret; \ } \ static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field); /* * scsi_sdev_check_buf_bit: return 0 if buf is "0", return 1 if buf is "1", * else return -EINVAL. */ static int scsi_sdev_check_buf_bit(const char *buf) { if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) { if (buf[0] == '1') return 1; else if (buf[0] == '0') return 0; else return -EINVAL; } else return -EINVAL; } #endif /* * Create the actual show/store functions and data structures. */ sdev_rd_attr (type, "%d\n"); sdev_rd_attr (scsi_level, "%d\n"); sdev_rd_attr (vendor, "%.8s\n"); sdev_rd_attr (model, "%.16s\n"); sdev_rd_attr (rev, "%.4s\n"); sdev_rd_attr (cdl_supported, "%d\n"); static ssize_t sdev_show_device_busy(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); return snprintf(buf, 20, "%d\n", scsi_device_busy(sdev)); } static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL); static ssize_t sdev_show_device_blocked(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_blocked)); } static DEVICE_ATTR(device_blocked, S_IRUGO, sdev_show_device_blocked, NULL); /* * TODO: can we make these symlinks to the block layer ones? */ static ssize_t sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev; sdev = to_scsi_device(dev); return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ); } static ssize_t sdev_store_timeout (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev; int timeout; sdev = to_scsi_device(dev); sscanf (buf, "%d\n", &timeout); blk_queue_rq_timeout(sdev->request_queue, timeout * HZ); return count; } static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout); static ssize_t sdev_show_eh_timeout(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev; sdev = to_scsi_device(dev); return snprintf(buf, 20, "%u\n", sdev->eh_timeout / HZ); } static ssize_t sdev_store_eh_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev; unsigned int eh_timeout; int err; if (!capable(CAP_SYS_ADMIN)) return -EACCES; sdev = to_scsi_device(dev); err = kstrtouint(buf, 10, &eh_timeout); if (err) return err; sdev->eh_timeout = eh_timeout * HZ; return count; } static DEVICE_ATTR(eh_timeout, S_IRUGO | S_IWUSR, sdev_show_eh_timeout, sdev_store_eh_timeout); static ssize_t store_rescan_field (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { scsi_rescan_device(to_scsi_device(dev)); return count; } static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field); static ssize_t sdev_store_delete(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kernfs_node *kn; struct scsi_device *sdev = to_scsi_device(dev); /* * We need to try to get module, avoiding the module been removed * during delete. */ if (scsi_device_get(sdev)) return -ENODEV; kn = sysfs_break_active_protection(&dev->kobj, &attr->attr); WARN_ON_ONCE(!kn); /* * Concurrent writes into the "delete" sysfs attribute may trigger * concurrent calls to device_remove_file() and scsi_remove_device(). * device_remove_file() handles concurrent removal calls by * serializing these and by ignoring the second and later removal * attempts. Concurrent calls of scsi_remove_device() are * serialized. The second and later calls of scsi_remove_device() are * ignored because the first call of that function changes the device * state into SDEV_DEL. */ device_remove_file(dev, attr); scsi_remove_device(sdev); if (kn) sysfs_unbreak_active_protection(kn); scsi_device_put(sdev); return count; }; static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); static ssize_t store_state_field(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int i, ret; struct scsi_device *sdev = to_scsi_device(dev); enum scsi_device_state state = 0; bool rescan_dev = false; for (i = 0; i < ARRAY_SIZE(sdev_states); i++) { const int len = strlen(sdev_states[i].name); if (strncmp(sdev_states[i].name, buf, len) == 0 && buf[len] == '\n') { state = sdev_states[i].value; break; } } switch (state) { case SDEV_RUNNING: case SDEV_OFFLINE: break; default: return -EINVAL; } mutex_lock(&sdev->state_mutex); switch (sdev->sdev_state) { case SDEV_RUNNING: case SDEV_OFFLINE: break; default: mutex_unlock(&sdev->state_mutex); return -EINVAL; } if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) { ret = 0; } else { ret = scsi_device_set_state(sdev, state); if (ret == 0 && state == SDEV_RUNNING) rescan_dev = true; } mutex_unlock(&sdev->state_mutex); if (rescan_dev) { /* * If the device state changes to SDEV_RUNNING, we need to * run the queue to avoid I/O hang, and rescan the device * to revalidate it. Running the queue first is necessary * because another thread may be waiting inside * blk_mq_freeze_queue_wait() and because that call may be * waiting for pending I/O to finish. */ blk_mq_run_hw_queues(sdev->request_queue, true); scsi_rescan_device(sdev); } return ret == 0 ? count : -EINVAL; } static ssize_t show_state_field(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); const char *name = scsi_device_state_name(sdev->sdev_state); if (!name) return -EINVAL; return snprintf(buf, 20, "%s\n", name); } static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_state_field, store_state_field); static ssize_t show_queue_type_field(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); const char *name = "none"; if (sdev->simple_tags) name = "simple"; return snprintf(buf, 20, "%s\n", name); } static ssize_t store_queue_type_field(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); if (!sdev->tagged_supported) return -EINVAL; sdev_printk(KERN_INFO, sdev, "ignoring write to deprecated queue_type attribute"); return count; } static DEVICE_ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field, store_queue_type_field); #define sdev_vpd_pg_attr(_page) \ static ssize_t \ show_vpd_##_page(struct file *filp, struct kobject *kobj, \ struct bin_attribute *bin_attr, \ char *buf, loff_t off, size_t count) \ { \ struct device *dev = kobj_to_dev(kobj); \ struct scsi_device *sdev = to_scsi_device(dev); \ struct scsi_vpd *vpd_page; \ int ret = -EINVAL; \ \ rcu_read_lock(); \ vpd_page = rcu_dereference(sdev->vpd_##_page); \ if (vpd_page) \ ret = memory_read_from_buffer(buf, count, &off, \ vpd_page->data, vpd_page->len); \ rcu_read_unlock(); \ return ret; \ } \ static struct bin_attribute dev_attr_vpd_##_page = { \ .attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \ .size = 0, \ .read = show_vpd_##_page, \ }; sdev_vpd_pg_attr(pg83); sdev_vpd_pg_attr(pg80); sdev_vpd_pg_attr(pg89); sdev_vpd_pg_attr(pgb0); sdev_vpd_pg_attr(pgb1); sdev_vpd_pg_attr(pgb2); sdev_vpd_pg_attr(pg0); static ssize_t show_inquiry(struct file *filep, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj); struct scsi_device *sdev = to_scsi_device(dev); if (!sdev->inquiry) return -EINVAL; return memory_read_from_buffer(buf, count, &off, sdev->inquiry, sdev->inquiry_len); } static struct bin_attribute dev_attr_inquiry = { .attr = { .name = "inquiry", .mode = S_IRUGO, }, .size = 0, .read = show_inquiry, }; static ssize_t show_iostat_counterbits(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8); } static DEVICE_ATTR(iocounterbits, S_IRUGO, show_iostat_counterbits, NULL); #define show_sdev_iostat(field) \ static ssize_t \ show_iostat_##field(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct scsi_device *sdev = to_scsi_device(dev); \ unsigned long long count = atomic_read(&sdev->field); \ return snprintf(buf, 20, "0x%llx\n", count); \ } \ static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL) show_sdev_iostat(iorequest_cnt); show_sdev_iostat(iodone_cnt); show_sdev_iostat(ioerr_cnt); show_sdev_iostat(iotmo_cnt); static ssize_t sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev; sdev = to_scsi_device(dev); return snprintf (buf, 20, SCSI_DEVICE_MODALIAS_FMT "\n", sdev->type); } static DEVICE_ATTR(modalias, S_IRUGO, sdev_show_modalias, NULL); #define DECLARE_EVT_SHOW(name, Cap_name) \ static ssize_t \ sdev_show_evt_##name(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct scsi_device *sdev = to_scsi_device(dev); \ int val = test_bit(SDEV_EVT_##Cap_name, sdev->supported_events);\ return snprintf(buf, 20, "%d\n", val); \ } #define DECLARE_EVT_STORE(name, Cap_name) \ static ssize_t \ sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\ const char *buf, size_t count) \ { \ struct scsi_device *sdev = to_scsi_device(dev); \ int val = simple_strtoul(buf, NULL, 0); \ if (val == 0) \ clear_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \ else if (val == 1) \ set_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \ else \ return -EINVAL; \ return count; \ } #define DECLARE_EVT(name, Cap_name) \ DECLARE_EVT_SHOW(name, Cap_name) \ DECLARE_EVT_STORE(name, Cap_name) \ static DEVICE_ATTR(evt_##name, S_IRUGO, sdev_show_evt_##name, \ sdev_store_evt_##name); #define REF_EVT(name) &dev_attr_evt_##name.attr DECLARE_EVT(media_change, MEDIA_CHANGE) DECLARE_EVT(inquiry_change_reported, INQUIRY_CHANGE_REPORTED) DECLARE_EVT(capacity_change_reported, CAPACITY_CHANGE_REPORTED) DECLARE_EVT(soft_threshold_reached, SOFT_THRESHOLD_REACHED_REPORTED) DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED) DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED) static ssize_t sdev_store_queue_depth(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int depth, retval; struct scsi_device *sdev = to_scsi_device(dev); const struct scsi_host_template *sht = sdev->host->hostt; if (!sht->change_queue_depth) return -EINVAL; depth = simple_strtoul(buf, NULL, 0); if (depth < 1 || depth > sdev->host->can_queue) return -EINVAL; retval = sht->change_queue_depth(sdev, depth); if (retval < 0) return retval; sdev->max_queue_depth = sdev->queue_depth; return count; } sdev_show_function(queue_depth, "%d\n"); static DEVICE_ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth, sdev_store_queue_depth); static ssize_t sdev_show_wwid(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); ssize_t count; count = scsi_vpd_lun_id(sdev, buf, PAGE_SIZE); if (count > 0) { buf[count] = '\n'; count++; } return count; } static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL); #define BLIST_FLAG_NAME(name) \ [const_ilog2((__force __u64)BLIST_##name)] = #name static const char *const sdev_bflags_name[] = { #include "scsi_devinfo_tbl.c" }; #undef BLIST_FLAG_NAME static ssize_t sdev_show_blacklist(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); int i; ssize_t len = 0; for (i = 0; i < sizeof(sdev->sdev_bflags) * BITS_PER_BYTE; i++) { const char *name = NULL; if (!(sdev->sdev_bflags & (__force blist_flags_t)BIT(i))) continue; if (i < ARRAY_SIZE(sdev_bflags_name) && sdev_bflags_name[i]) name = sdev_bflags_name[i]; if (name) len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s", len ? " " : "", name); else len += scnprintf(buf + len, PAGE_SIZE - len, "%sINVALID_BIT(%d)", len ? " " : "", i); } if (len) len += scnprintf(buf + len, PAGE_SIZE - len, "\n"); return len; } static DEVICE_ATTR(blacklist, S_IRUGO, sdev_show_blacklist, NULL); #ifdef CONFIG_SCSI_DH static ssize_t sdev_show_dh_state(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); if (!sdev->handler) return snprintf(buf, 20, "detached\n"); return snprintf(buf, 20, "%s\n", sdev->handler->name); } static ssize_t sdev_store_dh_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); int err = -EINVAL; if (sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL) return -ENODEV; if (!sdev->handler) { /* * Attach to a device handler */ err = scsi_dh_attach(sdev->request_queue, buf); } else if (!strncmp(buf, "activate", 8)) { /* * Activate a device handler */ if (sdev->handler->activate) err = sdev->handler->activate(sdev, NULL, NULL); else err = 0; } else if (!strncmp(buf, "detach", 6)) { /* * Detach from a device handler */ sdev_printk(KERN_WARNING, sdev, "can't detach handler %s.\n", sdev->handler->name); err = -EINVAL; } return err < 0 ? err : count; } static DEVICE_ATTR(dh_state, S_IRUGO | S_IWUSR, sdev_show_dh_state, sdev_store_dh_state); static ssize_t sdev_show_access_state(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); unsigned char access_state; const char *access_state_name; if (!sdev->handler) return -EINVAL; access_state = (sdev->access_state & SCSI_ACCESS_STATE_MASK); access_state_name = scsi_access_state_name(access_state); return sprintf(buf, "%s\n", access_state_name ? access_state_name : "unknown"); } static DEVICE_ATTR(access_state, S_IRUGO, sdev_show_access_state, NULL); static ssize_t sdev_show_preferred_path(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); if (!sdev->handler) return -EINVAL; if (sdev->access_state & SCSI_ACCESS_STATE_PREFERRED) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static DEVICE_ATTR(preferred_path, S_IRUGO, sdev_show_preferred_path, NULL); #endif static ssize_t sdev_show_queue_ramp_up_period(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev; sdev = to_scsi_device(dev); return snprintf(buf, 20, "%u\n", jiffies_to_msecs(sdev->queue_ramp_up_period)); } static ssize_t sdev_store_queue_ramp_up_period(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); unsigned int period; if (kstrtouint(buf, 10, &period)) return -EINVAL; sdev->queue_ramp_up_period = msecs_to_jiffies(period); return count; } static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR, sdev_show_queue_ramp_up_period, sdev_store_queue_ramp_up_period); static ssize_t sdev_show_cdl_enable(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); return sysfs_emit(buf, "%d\n", (int)sdev->cdl_enable); } static ssize_t sdev_store_cdl_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; bool v; if (kstrtobool(buf, &v)) return -EINVAL; ret = scsi_cdl_enable(to_scsi_device(dev), v); if (ret) return ret; return count; } static DEVICE_ATTR(cdl_enable, S_IRUGO | S_IWUSR, sdev_show_cdl_enable, sdev_store_cdl_enable); static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *dev = kobj_to_dev(kobj); struct scsi_device *sdev = to_scsi_device(dev); if (attr == &dev_attr_queue_depth.attr && !sdev->host->hostt->change_queue_depth) return S_IRUGO; if (attr == &dev_attr_queue_ramp_up_period.attr && !sdev->host->hostt->change_queue_depth) return 0; return attr->mode; } static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj, struct bin_attribute *attr, int i) { struct device *dev = kobj_to_dev(kobj); struct scsi_device *sdev = to_scsi_device(dev); if (attr == &dev_attr_vpd_pg0 && !sdev->vpd_pg0) return 0; if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80) return 0; if (attr == &dev_attr_vpd_pg83 && !sdev->vpd_pg83) return 0; if (attr == &dev_attr_vpd_pg89 && !sdev->vpd_pg89) return 0; if (attr == &dev_attr_vpd_pgb0 && !sdev->vpd_pgb0) return 0; if (attr == &dev_attr_vpd_pgb1 && !sdev->vpd_pgb1) return 0; if (attr == &dev_attr_vpd_pgb2 && !sdev->vpd_pgb2) return 0; return S_IRUGO; } /* Default template for device attributes. May NOT be modified */ static struct attribute *scsi_sdev_attrs[] = { &dev_attr_device_blocked.attr, &dev_attr_type.attr, &dev_attr_scsi_level.attr, &dev_attr_device_busy.attr, &dev_attr_vendor.attr, &dev_attr_model.attr, &dev_attr_rev.attr, &dev_attr_rescan.attr, &dev_attr_delete.attr, &dev_attr_state.attr, &dev_attr_timeout.attr, &dev_attr_eh_timeout.attr, &dev_attr_iocounterbits.attr, &dev_attr_iorequest_cnt.attr, &dev_attr_iodone_cnt.attr, &dev_attr_ioerr_cnt.attr, &dev_attr_iotmo_cnt.attr, &dev_attr_modalias.attr, &dev_attr_queue_depth.attr, &dev_attr_queue_type.attr, &dev_attr_wwid.attr, &dev_attr_blacklist.attr, #ifdef CONFIG_SCSI_DH &dev_attr_dh_state.attr, &dev_attr_access_state.attr, &dev_attr_preferred_path.attr, #endif &dev_attr_queue_ramp_up_period.attr, &dev_attr_cdl_supported.attr, &dev_attr_cdl_enable.attr, REF_EVT(media_change), REF_EVT(inquiry_change_reported), REF_EVT(capacity_change_reported), REF_EVT(soft_threshold_reached), REF_EVT(mode_parameter_change_reported), REF_EVT(lun_change_reported), NULL }; static struct bin_attribute *scsi_sdev_bin_attrs[] = { &dev_attr_vpd_pg0, &dev_attr_vpd_pg83, &dev_attr_vpd_pg80, &dev_attr_vpd_pg89, &dev_attr_vpd_pgb0, &dev_attr_vpd_pgb1, &dev_attr_vpd_pgb2, &dev_attr_inquiry, NULL }; static struct attribute_group scsi_sdev_attr_group = { .attrs = scsi_sdev_attrs, .bin_attrs = scsi_sdev_bin_attrs, .is_visible = scsi_sdev_attr_is_visible, .is_bin_visible = scsi_sdev_bin_attr_is_visible, }; static const struct attribute_group *scsi_sdev_attr_groups[] = { &scsi_sdev_attr_group, NULL }; static int scsi_target_add(struct scsi_target *starget) { int error; if (starget->state != STARGET_CREATED) return 0; error = device_add(&starget->dev); if (error) { dev_err(&starget->dev, "target device_add failed, error %d\n", error); return error; } transport_add_device(&starget->dev); starget->state = STARGET_RUNNING; pm_runtime_set_active(&starget->dev); pm_runtime_enable(&starget->dev); device_enable_async_suspend(&starget->dev); return 0; } /** * scsi_sysfs_add_sdev - add scsi device to sysfs * @sdev: scsi_device to add * * Return value: * 0 on Success / non-zero on Failure **/ int scsi_sysfs_add_sdev(struct scsi_device *sdev) { int error; struct scsi_target *starget = sdev->sdev_target; error = scsi_target_add(starget); if (error) return error; transport_configure_device(&starget->dev); device_enable_async_suspend(&sdev->sdev_gendev); scsi_autopm_get_target(starget); pm_runtime_set_active(&sdev->sdev_gendev); if (!sdev->rpm_autosuspend) pm_runtime_forbid(&sdev->sdev_gendev); pm_runtime_enable(&sdev->sdev_gendev); scsi_autopm_put_target(starget); scsi_autopm_get_device(sdev); scsi_dh_add_device(sdev); error = device_add(&sdev->sdev_gendev); if (error) { sdev_printk(KERN_INFO, sdev, "failed to add device: %d\n", error); return error; } device_enable_async_suspend(&sdev->sdev_dev); error = device_add(&sdev->sdev_dev); if (error) { sdev_printk(KERN_INFO, sdev, "failed to add class device: %d\n", error); device_del(&sdev->sdev_gendev); return error; } transport_add_device(&sdev->sdev_gendev); sdev->is_visible = 1; if (IS_ENABLED(CONFIG_BLK_DEV_BSG)) { sdev->bsg_dev = scsi_bsg_register_queue(sdev); if (IS_ERR(sdev->bsg_dev)) { error = PTR_ERR(sdev->bsg_dev); sdev_printk(KERN_INFO, sdev, "Failed to register bsg queue, errno=%d\n", error); sdev->bsg_dev = NULL; } } scsi_autopm_put_device(sdev); return error; } void __scsi_remove_device(struct scsi_device *sdev) { struct device *dev = &sdev->sdev_gendev; int res; /* * This cleanup path is not reentrant and while it is impossible * to get a new reference with scsi_device_get() someone can still * hold a previously acquired one. */ if (sdev->sdev_state == SDEV_DEL) return; if (sdev->is_visible) { /* * If scsi_internal_target_block() is running concurrently, * wait until it has finished before changing the device state. */ mutex_lock(&sdev->state_mutex); /* * If blocked, we go straight to DEL and restart the queue so * any commands issued during driver shutdown (like sync * cache) are errored immediately. */ res = scsi_device_set_state(sdev, SDEV_CANCEL); if (res != 0) { res = scsi_device_set_state(sdev, SDEV_DEL); if (res == 0) scsi_start_queue(sdev); } mutex_unlock(&sdev->state_mutex); if (res != 0) return; if (IS_ENABLED(CONFIG_BLK_DEV_BSG) && sdev->bsg_dev) bsg_unregister_queue(sdev->bsg_dev); device_unregister(&sdev->sdev_dev); transport_remove_device(dev); device_del(dev); } else put_device(&sdev->sdev_dev); /* * Stop accepting new requests and wait until all queuecommand() and * scsi_run_queue() invocations have finished before tearing down the * device. */ mutex_lock(&sdev->state_mutex); scsi_device_set_state(sdev, SDEV_DEL); mutex_unlock(&sdev->state_mutex); blk_mq_destroy_queue(sdev->request_queue); kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags); cancel_work_sync(&sdev->requeue_work); if (sdev->host->hostt->slave_destroy) sdev->host->hostt->slave_destroy(sdev); transport_destroy_device(dev); /* * Paired with the kref_get() in scsi_sysfs_initialize(). We have * removed sysfs visibility from the device, so make the target * invisible if this was the last device underneath it. */ scsi_target_reap(scsi_target(sdev)); put_device(dev); } /** * scsi_remove_device - unregister a device from the scsi bus * @sdev: scsi_device to unregister **/ void scsi_remove_device(struct scsi_device *sdev) { struct Scsi_Host *shost = sdev->host; mutex_lock(&shost->scan_mutex); __scsi_remove_device(sdev); mutex_unlock(&shost->scan_mutex); } EXPORT_SYMBOL(scsi_remove_device); static void __scsi_remove_target(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); unsigned long flags; struct scsi_device *sdev; spin_lock_irqsave(shost->host_lock, flags); restart: list_for_each_entry(sdev, &shost->__devices, siblings) { /* * We cannot call scsi_device_get() here, as * we might've been called from rmmod() causing * scsi_device_get() to fail the module_is_live() * check. */ if (sdev->channel != starget->channel || sdev->id != starget->id) continue; if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL || !get_device(&sdev->sdev_gendev)) continue; spin_unlock_irqrestore(shost->host_lock, flags); scsi_remove_device(sdev); put_device(&sdev->sdev_gendev); spin_lock_irqsave(shost->host_lock, flags); goto restart; } spin_unlock_irqrestore(shost->host_lock, flags); } /** * scsi_remove_target - try to remove a target and all its devices * @dev: generic starget or parent of generic stargets to be removed * * Note: This is slightly racy. It is possible that if the user * requests the addition of another device then the target won't be * removed. */ void scsi_remove_target(struct device *dev) { struct Scsi_Host *shost = dev_to_shost(dev->parent); struct scsi_target *starget; unsigned long flags; restart: spin_lock_irqsave(shost->host_lock, flags); list_for_each_entry(starget, &shost->__targets, siblings) { if (starget->state == STARGET_DEL || starget->state == STARGET_REMOVE || starget->state == STARGET_CREATED_REMOVE) continue; if (starget->dev.parent == dev || &starget->dev == dev) { kref_get(&starget->reap_ref); if (starget->state == STARGET_CREATED) starget->state = STARGET_CREATED_REMOVE; else starget->state = STARGET_REMOVE; spin_unlock_irqrestore(shost->host_lock, flags); __scsi_remove_target(starget); scsi_target_reap(starget); goto restart; } } spin_unlock_irqrestore(shost->host_lock, flags); } EXPORT_SYMBOL(scsi_remove_target); int scsi_register_driver(struct device_driver *drv) { drv->bus = &scsi_bus_type; return driver_register(drv); } EXPORT_SYMBOL(scsi_register_driver); int scsi_register_interface(struct class_interface *intf) { intf->class = &sdev_class; return class_interface_register(intf); } EXPORT_SYMBOL(scsi_register_interface); /** * scsi_sysfs_add_host - add scsi host to subsystem * @shost: scsi host struct to add to subsystem **/ int scsi_sysfs_add_host(struct Scsi_Host *shost) { transport_register_device(&shost->shost_gendev); transport_configure_device(&shost->shost_gendev); return 0; } static struct device_type scsi_dev_type = { .name = "scsi_device", .release = scsi_device_dev_release, .groups = scsi_sdev_attr_groups, }; void scsi_sysfs_device_initialize(struct scsi_device *sdev) { unsigned long flags; struct Scsi_Host *shost = sdev->host; const struct scsi_host_template *hostt = shost->hostt; struct scsi_target *starget = sdev->sdev_target; device_initialize(&sdev->sdev_gendev); sdev->sdev_gendev.bus = &scsi_bus_type; sdev->sdev_gendev.type = &scsi_dev_type; scsi_enable_async_suspend(&sdev->sdev_gendev); dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu", sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); sdev->sdev_gendev.groups = hostt->sdev_groups; device_initialize(&sdev->sdev_dev); sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev); sdev->sdev_dev.class = &sdev_class; dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%llu", sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); /* * Get a default scsi_level from the target (derived from sibling * devices). This is the best we can do for guessing how to set * sdev->lun_in_cdb for the initial INQUIRY command. For LUN 0 the * setting doesn't matter, because all the bits are zero anyway. * But it does matter for higher LUNs. */ sdev->scsi_level = starget->scsi_level; if (sdev->scsi_level <= SCSI_2 && sdev->scsi_level != SCSI_UNKNOWN && !shost->no_scsi2_lun_in_cdb) sdev->lun_in_cdb = 1; transport_setup_device(&sdev->sdev_gendev); spin_lock_irqsave(shost->host_lock, flags); list_add_tail(&sdev->same_target_siblings, &starget->devices); list_add_tail(&sdev->siblings, &shost->__devices); spin_unlock_irqrestore(shost->host_lock, flags); /* * device can now only be removed via __scsi_remove_device() so hold * the target. Target will be held in CREATED state until something * beneath it becomes visible (in which case it moves to RUNNING) */ kref_get(&starget->reap_ref); } int scsi_is_sdev_device(const struct device *dev) { return dev->type == &scsi_dev_type; } EXPORT_SYMBOL(scsi_is_sdev_device); /* A blank transport template that is used in drivers that don't * yet implement Transport Attributes */ struct scsi_transport_template blank_transport_template = { { { {NULL, }, }, }, };
linux-master
drivers/scsi/scsi_sysfs.c
// SPDX-License-Identifier: GPL-2.0-or-later /* dmx3191d.c - driver for the Domex DMX3191D SCSI card. Copyright (C) 2000 by Massimo Piccioni <[email protected]> Portions Copyright (C) 2004 by Christoph Hellwig <[email protected]> Based on the generic NCR5380 driver by Drew Eckhardt et al. */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <asm/io.h> #include <scsi/scsi_host.h> /* * Definitions for the generic 5380 driver. */ #define NCR5380_read(reg) inb(hostdata->base + (reg)) #define NCR5380_write(reg, value) outb(value, hostdata->base + (reg)) #define NCR5380_dma_xfer_len NCR5380_dma_xfer_none #define NCR5380_dma_recv_setup NCR5380_dma_setup_none #define NCR5380_dma_send_setup NCR5380_dma_setup_none #define NCR5380_dma_residual NCR5380_dma_residual_none #define NCR5380_implementation_fields /* none */ #include "NCR5380.h" #include "NCR5380.c" #define DMX3191D_DRIVER_NAME "dmx3191d" #define DMX3191D_REGION_LEN 8 static const struct scsi_host_template dmx3191d_driver_template = { .module = THIS_MODULE, .proc_name = DMX3191D_DRIVER_NAME, .name = "Domex DMX3191D", .info = NCR5380_info, .queuecommand = NCR5380_queue_command, .eh_abort_handler = NCR5380_abort, .eh_host_reset_handler = NCR5380_host_reset, .can_queue = 32, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = 2, .dma_boundary = PAGE_SIZE - 1, .cmd_size = sizeof(struct NCR5380_cmd), }; static int dmx3191d_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct Scsi_Host *shost; struct NCR5380_hostdata *hostdata; unsigned long io; int error = -ENODEV; if (pci_enable_device(pdev)) goto out; io = pci_resource_start(pdev, 0); if (!request_region(io, DMX3191D_REGION_LEN, DMX3191D_DRIVER_NAME)) { printk(KERN_ERR "dmx3191: region 0x%lx-0x%lx already reserved\n", io, io + DMX3191D_REGION_LEN); goto out_disable_device; } shost = scsi_host_alloc(&dmx3191d_driver_template, sizeof(struct NCR5380_hostdata)); if (!shost) goto out_release_region; hostdata = shost_priv(shost); hostdata->base = io; /* This card does not seem to raise an interrupt on pdev->irq. * Steam-powered SCSI controllers run without an IRQ anyway. */ shost->irq = NO_IRQ; error = NCR5380_init(shost, 0); if (error) goto out_host_put; NCR5380_maybe_reset_bus(shost); pci_set_drvdata(pdev, shost); error = scsi_add_host(shost, &pdev->dev); if (error) goto out_exit; scsi_scan_host(shost); return 0; out_exit: NCR5380_exit(shost); out_host_put: scsi_host_put(shost); out_release_region: release_region(io, DMX3191D_REGION_LEN); out_disable_device: pci_disable_device(pdev); out: return error; } static void dmx3191d_remove_one(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct NCR5380_hostdata *hostdata = shost_priv(shost); unsigned long io = hostdata->base; scsi_remove_host(shost); NCR5380_exit(shost); scsi_host_put(shost); release_region(io, DMX3191D_REGION_LEN); pci_disable_device(pdev); } static struct pci_device_id dmx3191d_pci_tbl[] = { {PCI_VENDOR_ID_DOMEX, PCI_DEVICE_ID_DOMEX_DMX3191D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4}, { } }; MODULE_DEVICE_TABLE(pci, dmx3191d_pci_tbl); static struct pci_driver dmx3191d_pci_driver = { .name = DMX3191D_DRIVER_NAME, .id_table = dmx3191d_pci_tbl, .probe = dmx3191d_probe_one, .remove = dmx3191d_remove_one, }; module_pci_driver(dmx3191d_pci_driver); MODULE_AUTHOR("Massimo Piccioni <[email protected]>"); MODULE_DESCRIPTION("Domex DMX3191D SCSI driver"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/dmx3191d.c
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for Western Digital WD7193, WD7197 and WD7296 SCSI cards * Copyright 2013 Ondrej Zary * * Original driver by * Aaron Dewell <[email protected]> * Gaerti <[email protected]> * * HW documentation available in book: * * SPIDER Command Protocol * by Chandru M. Sippy * SCSI Storage Products (MCP) * Western Digital Corporation * 09-15-95 * * http://web.archive.org/web/20070717175254/http://sun1.rrzn.uni-hannover.de/gaertner.juergen/wd719x/Linux/Docu/Spider/ */ /* * Driver workflow: * 1. SCSI command is transformed to SCB (Spider Control Block) by the * queuecommand function. * 2. The address of the SCB is stored in a list to be able to access it, if * something goes wrong. * 3. The address of the SCB is written to the Controller, which loads the SCB * via BM-DMA and processes it. * 4. After it has finished, it generates an interrupt, and sets registers. * * flaws: * - abort/reset functions * * ToDo: * - tagged queueing */ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/firmware.h> #include <linux/eeprom_93cx6.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "wd719x.h" /* low-level register access */ static inline u8 wd719x_readb(struct wd719x *wd, u8 reg) { return ioread8(wd->base + reg); } static inline u32 wd719x_readl(struct wd719x *wd, u8 reg) { return ioread32(wd->base + reg); } static inline void wd719x_writeb(struct wd719x *wd, u8 reg, u8 val) { iowrite8(val, wd->base + reg); } static inline void wd719x_writew(struct wd719x *wd, u8 reg, u16 val) { iowrite16(val, wd->base + reg); } static inline void wd719x_writel(struct wd719x *wd, u8 reg, u32 val) { iowrite32(val, wd->base + reg); } /* wait until the command register is ready */ static inline int wd719x_wait_ready(struct wd719x *wd) { int i = 0; do { if (wd719x_readb(wd, WD719X_AMR_COMMAND) == WD719X_CMD_READY) return 0; udelay(1); } while (i++ < WD719X_WAIT_FOR_CMD_READY); dev_err(&wd->pdev->dev, "command register is not ready: 0x%02x\n", wd719x_readb(wd, WD719X_AMR_COMMAND)); return -ETIMEDOUT; } /* poll interrupt status register until command finishes */ static inline int wd719x_wait_done(struct wd719x *wd, int timeout) { u8 status; while (timeout > 0) { status = wd719x_readb(wd, WD719X_AMR_INT_STATUS); if (status) break; timeout--; udelay(1); } if (timeout <= 0) { dev_err(&wd->pdev->dev, "direct command timed out\n"); return -ETIMEDOUT; } if (status != WD719X_INT_NOERRORS) { u8 sue = wd719x_readb(wd, WD719X_AMR_SCB_ERROR); /* we get this after wd719x_dev_reset, it's not an error */ if (sue == WD719X_SUE_TERM) return 0; /* we get this after wd719x_bus_reset, it's not an error */ if (sue == WD719X_SUE_RESET) return 0; dev_err(&wd->pdev->dev, "direct command failed, status 0x%02x, SUE 0x%02x\n", status, sue); return -EIO; } return 0; } static int wd719x_direct_cmd(struct wd719x *wd, u8 opcode, u8 dev, u8 lun, u8 tag, dma_addr_t data, int timeout) { int ret = 0; /* clear interrupt status register (allow command register to clear) */ wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE); /* Wait for the Command register to become free */ if (wd719x_wait_ready(wd)) return -ETIMEDOUT; /* disable interrupts except for RESET/ABORT (it breaks them) */ if (opcode != WD719X_CMD_BUSRESET && opcode != WD719X_CMD_ABORT && opcode != WD719X_CMD_ABORT_TAG && opcode != WD719X_CMD_RESET) dev |= WD719X_DISABLE_INT; wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, dev); wd719x_writeb(wd, WD719X_AMR_CMD_PARAM_2, lun); wd719x_writeb(wd, WD719X_AMR_CMD_PARAM_3, tag); if (data) wd719x_writel(wd, WD719X_AMR_SCB_IN, data); /* clear interrupt status register again */ wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE); /* Now, write the command */ wd719x_writeb(wd, WD719X_AMR_COMMAND, opcode); if (timeout) /* wait for the command to complete */ ret = wd719x_wait_done(wd, timeout); /* clear interrupt status register (clean up) */ if (opcode != WD719X_CMD_READ_FIRMVER) wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE); return ret; } static void wd719x_destroy(struct wd719x *wd) { /* stop the RISC */ if (wd719x_direct_cmd(wd, WD719X_CMD_SLEEP, 0, 0, 0, 0, WD719X_WAIT_FOR_RISC)) dev_warn(&wd->pdev->dev, "RISC sleep command failed\n"); /* disable RISC */ wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0); WARN_ON_ONCE(!list_empty(&wd->active_scbs)); /* free internal buffers */ dma_free_coherent(&wd->pdev->dev, wd->fw_size, wd->fw_virt, wd->fw_phys); wd->fw_virt = NULL; dma_free_coherent(&wd->pdev->dev, WD719X_HASH_TABLE_SIZE, wd->hash_virt, wd->hash_phys); wd->hash_virt = NULL; dma_free_coherent(&wd->pdev->dev, sizeof(struct wd719x_host_param), wd->params, wd->params_phys); wd->params = NULL; free_irq(wd->pdev->irq, wd); } /* finish a SCSI command, unmap buffers */ static void wd719x_finish_cmd(struct wd719x_scb *scb, int result) { struct scsi_cmnd *cmd = scb->cmd; struct wd719x *wd = shost_priv(cmd->device->host); list_del(&scb->list); dma_unmap_single(&wd->pdev->dev, scb->phys, sizeof(struct wd719x_scb), DMA_BIDIRECTIONAL); scsi_dma_unmap(cmd); dma_unmap_single(&wd->pdev->dev, scb->dma_handle, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); cmd->result = result << 16; scsi_done(cmd); } /* Build a SCB and send it to the card */ static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) { int i, count_sg; unsigned long flags; struct wd719x_scb *scb = scsi_cmd_priv(cmd); struct wd719x *wd = shost_priv(sh); scb->cmd = cmd; scb->CDB_tag = 0; /* Tagged queueing not supported yet */ scb->devid = cmd->device->id; scb->lun = cmd->device->lun; /* copy the command */ memcpy(scb->CDB, cmd->cmnd, cmd->cmd_len); /* map SCB */ scb->phys = dma_map_single(&wd->pdev->dev, scb, sizeof(*scb), DMA_BIDIRECTIONAL); if (dma_mapping_error(&wd->pdev->dev, scb->phys)) goto out_error; /* map sense buffer */ scb->sense_buf_length = SCSI_SENSE_BUFFERSIZE; scb->dma_handle = dma_map_single(&wd->pdev->dev, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&wd->pdev->dev, scb->dma_handle)) goto out_unmap_scb; scb->sense_buf = cpu_to_le32(scb->dma_handle); /* request autosense */ scb->SCB_options |= WD719X_SCB_FLAGS_AUTO_REQUEST_SENSE; /* check direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) scb->SCB_options |= WD719X_SCB_FLAGS_CHECK_DIRECTION | WD719X_SCB_FLAGS_PCI_TO_SCSI; else if (cmd->sc_data_direction == DMA_FROM_DEVICE) scb->SCB_options |= WD719X_SCB_FLAGS_CHECK_DIRECTION; /* Scather/gather */ count_sg = scsi_dma_map(cmd); if (count_sg < 0) goto out_unmap_sense; BUG_ON(count_sg > WD719X_SG); if (count_sg) { struct scatterlist *sg; scb->data_length = cpu_to_le32(count_sg * sizeof(struct wd719x_sglist)); scb->data_p = cpu_to_le32(scb->phys + offsetof(struct wd719x_scb, sg_list)); scsi_for_each_sg(cmd, sg, count_sg, i) { scb->sg_list[i].ptr = cpu_to_le32(sg_dma_address(sg)); scb->sg_list[i].length = cpu_to_le32(sg_dma_len(sg)); } scb->SCB_options |= WD719X_SCB_FLAGS_DO_SCATTER_GATHER; } else { /* zero length */ scb->data_length = 0; scb->data_p = 0; } spin_lock_irqsave(wd->sh->host_lock, flags); /* check if the Command register is free */ if (wd719x_readb(wd, WD719X_AMR_COMMAND) != WD719X_CMD_READY) { spin_unlock_irqrestore(wd->sh->host_lock, flags); return SCSI_MLQUEUE_HOST_BUSY; } list_add(&scb->list, &wd->active_scbs); /* write pointer to the AMR */ wd719x_writel(wd, WD719X_AMR_SCB_IN, scb->phys); /* send SCB opcode */ wd719x_writeb(wd, WD719X_AMR_COMMAND, WD719X_CMD_PROCESS_SCB); spin_unlock_irqrestore(wd->sh->host_lock, flags); return 0; out_unmap_sense: dma_unmap_single(&wd->pdev->dev, scb->dma_handle, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); out_unmap_scb: dma_unmap_single(&wd->pdev->dev, scb->phys, sizeof(*scb), DMA_BIDIRECTIONAL); out_error: cmd->result = DID_ERROR << 16; scsi_done(cmd); return 0; } static int wd719x_chip_init(struct wd719x *wd) { int i, ret; u32 risc_init[3]; const struct firmware *fw_wcs, *fw_risc; const char fwname_wcs[] = "wd719x-wcs.bin"; const char fwname_risc[] = "wd719x-risc.bin"; memset(wd->hash_virt, 0, WD719X_HASH_TABLE_SIZE); /* WCS (sequencer) firmware */ ret = request_firmware(&fw_wcs, fwname_wcs, &wd->pdev->dev); if (ret) { dev_err(&wd->pdev->dev, "Unable to load firmware %s: %d\n", fwname_wcs, ret); return ret; } /* RISC firmware */ ret = request_firmware(&fw_risc, fwname_risc, &wd->pdev->dev); if (ret) { dev_err(&wd->pdev->dev, "Unable to load firmware %s: %d\n", fwname_risc, ret); release_firmware(fw_wcs); return ret; } wd->fw_size = ALIGN(fw_wcs->size, 4) + fw_risc->size; if (!wd->fw_virt) wd->fw_virt = dma_alloc_coherent(&wd->pdev->dev, wd->fw_size, &wd->fw_phys, GFP_KERNEL); if (!wd->fw_virt) { ret = -ENOMEM; goto wd719x_init_end; } /* make a fresh copy of WCS and RISC code */ memcpy(wd->fw_virt, fw_wcs->data, fw_wcs->size); memcpy(wd->fw_virt + ALIGN(fw_wcs->size, 4), fw_risc->data, fw_risc->size); /* Reset the Spider Chip and adapter itself */ wd719x_writeb(wd, WD719X_PCI_PORT_RESET, WD719X_PCI_RESET); udelay(WD719X_WAIT_FOR_RISC); /* Clear PIO mode bits set by BIOS */ wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, 0); /* ensure RISC is not running */ wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0); /* ensure command port is ready */ wd719x_writeb(wd, WD719X_AMR_COMMAND, 0); if (wd719x_wait_ready(wd)) { ret = -ETIMEDOUT; goto wd719x_init_end; } /* Transfer the first 2K words of RISC code to kick start the uP */ risc_init[0] = wd->fw_phys; /* WCS FW */ risc_init[1] = wd->fw_phys + ALIGN(fw_wcs->size, 4); /* RISC FW */ risc_init[2] = wd->hash_phys; /* hash table */ /* clear DMA status */ wd719x_writeb(wd, WD719X_PCI_CHANNEL2_3STATUS, 0); /* address to read firmware from */ wd719x_writel(wd, WD719X_PCI_EXTERNAL_ADDR, risc_init[1]); /* base address to write firmware to (on card) */ wd719x_writew(wd, WD719X_PCI_INTERNAL_ADDR, WD719X_PRAM_BASE_ADDR); /* size: first 2K words */ wd719x_writew(wd, WD719X_PCI_DMA_TRANSFER_SIZE, 2048 * 2); /* start DMA */ wd719x_writeb(wd, WD719X_PCI_CHANNEL2_3CMD, WD719X_START_CHANNEL2_3DMA); /* wait for DMA to complete */ i = WD719X_WAIT_FOR_RISC; while (i-- > 0) { u8 status = wd719x_readb(wd, WD719X_PCI_CHANNEL2_3STATUS); if (status == WD719X_START_CHANNEL2_3DONE) break; if (status == WD719X_START_CHANNEL2_3ABORT) { dev_warn(&wd->pdev->dev, "RISC bootstrap failed: DMA aborted\n"); ret = -EIO; goto wd719x_init_end; } udelay(1); } if (i < 1) { dev_warn(&wd->pdev->dev, "RISC bootstrap failed: DMA timeout\n"); ret = -ETIMEDOUT; goto wd719x_init_end; } /* firmware is loaded, now initialize and wake up the RISC */ /* write RISC initialization long words to Spider */ wd719x_writel(wd, WD719X_AMR_SCB_IN, risc_init[0]); wd719x_writel(wd, WD719X_AMR_SCB_IN + 4, risc_init[1]); wd719x_writel(wd, WD719X_AMR_SCB_IN + 8, risc_init[2]); /* disable interrupts during initialization of RISC */ wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, WD719X_DISABLE_INT); /* issue INITIALIZE RISC comand */ wd719x_writeb(wd, WD719X_AMR_COMMAND, WD719X_CMD_INIT_RISC); /* enable advanced mode (wake up RISC) */ wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, WD719X_ENABLE_ADVANCE_MODE); udelay(WD719X_WAIT_FOR_RISC); ret = wd719x_wait_done(wd, WD719X_WAIT_FOR_RISC); /* clear interrupt status register */ wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE); if (ret) { dev_warn(&wd->pdev->dev, "Unable to initialize RISC\n"); goto wd719x_init_end; } /* RISC is up and running */ /* Read FW version from RISC */ ret = wd719x_direct_cmd(wd, WD719X_CMD_READ_FIRMVER, 0, 0, 0, 0, WD719X_WAIT_FOR_RISC); if (ret) { dev_warn(&wd->pdev->dev, "Unable to read firmware version\n"); goto wd719x_init_end; } dev_info(&wd->pdev->dev, "RISC initialized with firmware version %.2x.%.2x\n", wd719x_readb(wd, WD719X_AMR_SCB_OUT + 1), wd719x_readb(wd, WD719X_AMR_SCB_OUT)); /* RESET SCSI bus */ ret = wd719x_direct_cmd(wd, WD719X_CMD_BUSRESET, 0, 0, 0, 0, WD719X_WAIT_FOR_SCSI_RESET); if (ret) { dev_warn(&wd->pdev->dev, "SCSI bus reset failed\n"); goto wd719x_init_end; } /* use HostParameter structure to set Spider's Host Parameter Block */ ret = wd719x_direct_cmd(wd, WD719X_CMD_SET_PARAM, 0, sizeof(struct wd719x_host_param), 0, wd->params_phys, WD719X_WAIT_FOR_RISC); if (ret) { dev_warn(&wd->pdev->dev, "Failed to set HOST PARAMETERS\n"); goto wd719x_init_end; } /* initiate SCAM (does nothing if disabled in BIOS) */ /* bug?: we should pass a mask of static IDs which we don't have */ ret = wd719x_direct_cmd(wd, WD719X_CMD_INIT_SCAM, 0, 0, 0, 0, WD719X_WAIT_FOR_SCSI_RESET); if (ret) { dev_warn(&wd->pdev->dev, "SCAM initialization failed\n"); goto wd719x_init_end; } /* clear AMR_BIOS_SHARE_INT register */ wd719x_writeb(wd, WD719X_AMR_BIOS_SHARE_INT, 0); wd719x_init_end: release_firmware(fw_wcs); release_firmware(fw_risc); return ret; } static int wd719x_abort(struct scsi_cmnd *cmd) { int action, result; unsigned long flags; struct wd719x_scb *scb = scsi_cmd_priv(cmd); struct wd719x *wd = shost_priv(cmd->device->host); struct device *dev = &wd->pdev->dev; dev_info(dev, "abort command, tag: %x\n", scsi_cmd_to_rq(cmd)->tag); action = WD719X_CMD_ABORT; spin_lock_irqsave(wd->sh->host_lock, flags); result = wd719x_direct_cmd(wd, action, cmd->device->id, cmd->device->lun, scsi_cmd_to_rq(cmd)->tag, scb->phys, 0); wd719x_finish_cmd(scb, DID_ABORT); spin_unlock_irqrestore(wd->sh->host_lock, flags); if (result) return FAILED; return SUCCESS; } static int wd719x_reset(struct scsi_cmnd *cmd, u8 opcode, u8 device) { int result; unsigned long flags; struct wd719x *wd = shost_priv(cmd->device->host); struct wd719x_scb *scb, *tmp; dev_info(&wd->pdev->dev, "%s reset requested\n", (opcode == WD719X_CMD_BUSRESET) ? "bus" : "device"); spin_lock_irqsave(wd->sh->host_lock, flags); result = wd719x_direct_cmd(wd, opcode, device, 0, 0, 0, WD719X_WAIT_FOR_SCSI_RESET); /* flush all SCBs (or all for a device if dev_reset) */ list_for_each_entry_safe(scb, tmp, &wd->active_scbs, list) { if (opcode == WD719X_CMD_BUSRESET || scb->cmd->device->id == device) wd719x_finish_cmd(scb, DID_RESET); } spin_unlock_irqrestore(wd->sh->host_lock, flags); if (result) return FAILED; return SUCCESS; } static int wd719x_dev_reset(struct scsi_cmnd *cmd) { return wd719x_reset(cmd, WD719X_CMD_RESET, cmd->device->id); } static int wd719x_bus_reset(struct scsi_cmnd *cmd) { return wd719x_reset(cmd, WD719X_CMD_BUSRESET, 0); } static int wd719x_host_reset(struct scsi_cmnd *cmd) { struct wd719x *wd = shost_priv(cmd->device->host); struct wd719x_scb *scb, *tmp; unsigned long flags; dev_info(&wd->pdev->dev, "host reset requested\n"); spin_lock_irqsave(wd->sh->host_lock, flags); /* stop the RISC */ if (wd719x_direct_cmd(wd, WD719X_CMD_SLEEP, 0, 0, 0, 0, WD719X_WAIT_FOR_RISC)) dev_warn(&wd->pdev->dev, "RISC sleep command failed\n"); /* disable RISC */ wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0); /* flush all SCBs */ list_for_each_entry_safe(scb, tmp, &wd->active_scbs, list) wd719x_finish_cmd(scb, DID_RESET); spin_unlock_irqrestore(wd->sh->host_lock, flags); /* Try to reinit the RISC */ return wd719x_chip_init(wd) == 0 ? SUCCESS : FAILED; } static int wd719x_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { if (capacity >= 0x200000) { geom[0] = 255; /* heads */ geom[1] = 63; /* sectors */ } else { geom[0] = 64; /* heads */ geom[1] = 32; /* sectors */ } geom[2] = sector_div(capacity, geom[0] * geom[1]); /* cylinders */ return 0; } /* process a SCB-completion interrupt */ static inline void wd719x_interrupt_SCB(struct wd719x *wd, union wd719x_regs regs, struct wd719x_scb *scb) { int result; /* now have to find result from card */ switch (regs.bytes.SUE) { case WD719X_SUE_NOERRORS: result = DID_OK; break; case WD719X_SUE_REJECTED: dev_err(&wd->pdev->dev, "command rejected\n"); result = DID_ERROR; break; case WD719X_SUE_SCBQFULL: dev_err(&wd->pdev->dev, "SCB queue is full\n"); result = DID_ERROR; break; case WD719X_SUE_TERM: dev_dbg(&wd->pdev->dev, "SCB terminated by direct command\n"); result = DID_ABORT; /* or DID_RESET? */ break; case WD719X_SUE_CHAN1ABORT: case WD719X_SUE_CHAN23ABORT: result = DID_ABORT; dev_err(&wd->pdev->dev, "DMA abort\n"); break; case WD719X_SUE_CHAN1PAR: case WD719X_SUE_CHAN23PAR: result = DID_PARITY; dev_err(&wd->pdev->dev, "DMA parity error\n"); break; case WD719X_SUE_TIMEOUT: result = DID_TIME_OUT; dev_dbg(&wd->pdev->dev, "selection timeout\n"); break; case WD719X_SUE_RESET: dev_dbg(&wd->pdev->dev, "bus reset occurred\n"); result = DID_RESET; break; case WD719X_SUE_BUSERROR: dev_dbg(&wd->pdev->dev, "SCSI bus error\n"); result = DID_ERROR; break; case WD719X_SUE_WRONGWAY: dev_err(&wd->pdev->dev, "wrong data transfer direction\n"); result = DID_ERROR; break; case WD719X_SUE_BADPHASE: dev_err(&wd->pdev->dev, "invalid SCSI phase\n"); result = DID_ERROR; break; case WD719X_SUE_TOOLONG: dev_err(&wd->pdev->dev, "record too long\n"); result = DID_ERROR; break; case WD719X_SUE_BUSFREE: dev_err(&wd->pdev->dev, "unexpected bus free\n"); result = DID_NO_CONNECT; /* or DID_ERROR ???*/ break; case WD719X_SUE_ARSDONE: dev_dbg(&wd->pdev->dev, "auto request sense\n"); if (regs.bytes.SCSI == 0) result = DID_OK; else result = DID_PARITY; break; case WD719X_SUE_IGNORED: dev_err(&wd->pdev->dev, "target id %d ignored command\n", scb->cmd->device->id); result = DID_NO_CONNECT; break; case WD719X_SUE_WRONGTAGS: dev_err(&wd->pdev->dev, "reversed tags\n"); result = DID_ERROR; break; case WD719X_SUE_BADTAGS: dev_err(&wd->pdev->dev, "tag type not supported by target\n"); result = DID_ERROR; break; case WD719X_SUE_NOSCAMID: dev_err(&wd->pdev->dev, "no SCAM soft ID available\n"); result = DID_ERROR; break; default: dev_warn(&wd->pdev->dev, "unknown SUE error code: 0x%x\n", regs.bytes.SUE); result = DID_ERROR; break; } wd719x_finish_cmd(scb, result); } static irqreturn_t wd719x_interrupt(int irq, void *dev_id) { struct wd719x *wd = dev_id; union wd719x_regs regs; unsigned long flags; u32 SCB_out; spin_lock_irqsave(wd->sh->host_lock, flags); /* read SCB pointer back from card */ SCB_out = wd719x_readl(wd, WD719X_AMR_SCB_OUT); /* read all status info at once */ regs.all = cpu_to_le32(wd719x_readl(wd, WD719X_AMR_OP_CODE)); switch (regs.bytes.INT) { case WD719X_INT_NONE: spin_unlock_irqrestore(wd->sh->host_lock, flags); return IRQ_NONE; case WD719X_INT_LINKNOSTATUS: dev_err(&wd->pdev->dev, "linked command completed with no status\n"); break; case WD719X_INT_BADINT: dev_err(&wd->pdev->dev, "unsolicited interrupt\n"); break; case WD719X_INT_NOERRORS: case WD719X_INT_LINKNOERRORS: case WD719X_INT_ERRORSLOGGED: case WD719X_INT_SPIDERFAILED: /* was the cmd completed a direct or SCB command? */ if (regs.bytes.OPC == WD719X_CMD_PROCESS_SCB) { struct wd719x_scb *scb; list_for_each_entry(scb, &wd->active_scbs, list) if (SCB_out == scb->phys) break; if (SCB_out == scb->phys) wd719x_interrupt_SCB(wd, regs, scb); else dev_err(&wd->pdev->dev, "card returned invalid SCB pointer\n"); } else dev_dbg(&wd->pdev->dev, "direct command 0x%x completed\n", regs.bytes.OPC); break; case WD719X_INT_PIOREADY: dev_err(&wd->pdev->dev, "card indicates PIO data ready but we never use PIO\n"); /* interrupt will not be cleared until all data is read */ break; default: dev_err(&wd->pdev->dev, "unknown interrupt reason: %d\n", regs.bytes.INT); } /* clear interrupt so another can happen */ wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE); spin_unlock_irqrestore(wd->sh->host_lock, flags); return IRQ_HANDLED; } static void wd719x_eeprom_reg_read(struct eeprom_93cx6 *eeprom) { struct wd719x *wd = eeprom->data; u8 reg = wd719x_readb(wd, WD719X_PCI_GPIO_DATA); eeprom->reg_data_out = reg & WD719X_EE_DO; } static void wd719x_eeprom_reg_write(struct eeprom_93cx6 *eeprom) { struct wd719x *wd = eeprom->data; u8 reg = 0; if (eeprom->reg_data_in) reg |= WD719X_EE_DI; if (eeprom->reg_data_clock) reg |= WD719X_EE_CLK; if (eeprom->reg_chip_select) reg |= WD719X_EE_CS; wd719x_writeb(wd, WD719X_PCI_GPIO_DATA, reg); } /* read config from EEPROM so it can be downloaded by the RISC on (re-)init */ static void wd719x_read_eeprom(struct wd719x *wd) { struct eeprom_93cx6 eeprom; u8 gpio; struct wd719x_eeprom_header header; eeprom.data = wd; eeprom.register_read = wd719x_eeprom_reg_read; eeprom.register_write = wd719x_eeprom_reg_write; eeprom.width = PCI_EEPROM_WIDTH_93C46; /* set all outputs to low */ wd719x_writeb(wd, WD719X_PCI_GPIO_DATA, 0); /* configure GPIO pins */ gpio = wd719x_readb(wd, WD719X_PCI_GPIO_CONTROL); /* GPIO outputs */ gpio &= (~(WD719X_EE_CLK | WD719X_EE_DI | WD719X_EE_CS)); /* GPIO input */ gpio |= WD719X_EE_DO; wd719x_writeb(wd, WD719X_PCI_GPIO_CONTROL, gpio); /* read EEPROM header */ eeprom_93cx6_multireadb(&eeprom, 0, (u8 *)&header, sizeof(header)); if (header.sig1 == 'W' && header.sig2 == 'D') eeprom_93cx6_multireadb(&eeprom, header.cfg_offset, (u8 *)wd->params, sizeof(struct wd719x_host_param)); else { /* default EEPROM values */ dev_warn(&wd->pdev->dev, "EEPROM signature is invalid (0x%02x 0x%02x), using default values\n", header.sig1, header.sig2); wd->params->ch_1_th = 0x10; /* 16 DWs = 64 B */ wd->params->scsi_conf = 0x4c; /* 48ma, spue, parity check */ wd->params->own_scsi_id = 0x07; /* ID 7, SCAM disabled */ wd->params->sel_timeout = 0x4d; /* 250 ms */ wd->params->sleep_timer = 0x01; wd->params->cdb_size = cpu_to_le16(0x5555); /* all 6 B */ wd->params->scsi_pad = 0x1b; if (wd->type == WD719X_TYPE_7193) /* narrow card - disable */ wd->params->wide = cpu_to_le32(0x00000000); else /* initiate & respond to WIDE messages */ wd->params->wide = cpu_to_le32(0xffffffff); wd->params->sync = cpu_to_le32(0xffffffff); wd->params->soft_mask = 0x00; /* all disabled */ wd->params->unsol_mask = 0x00; /* all disabled */ } /* disable TAGGED messages */ wd->params->tag_en = cpu_to_le16(0x0000); } /* Read card type from GPIO bits 1 and 3 */ static enum wd719x_card_type wd719x_detect_type(struct wd719x *wd) { u8 card = wd719x_readb(wd, WD719X_PCI_GPIO_CONTROL); card |= WD719X_GPIO_ID_BITS; wd719x_writeb(wd, WD719X_PCI_GPIO_CONTROL, card); card = wd719x_readb(wd, WD719X_PCI_GPIO_DATA) & WD719X_GPIO_ID_BITS; switch (card) { case 0x08: return WD719X_TYPE_7193; case 0x02: return WD719X_TYPE_7197; case 0x00: return WD719X_TYPE_7296; default: dev_warn(&wd->pdev->dev, "unknown card type 0x%x\n", card); return WD719X_TYPE_UNKNOWN; } } static int wd719x_board_found(struct Scsi_Host *sh) { struct wd719x *wd = shost_priv(sh); static const char * const card_types[] = { "Unknown card", "WD7193", "WD7197", "WD7296" }; int ret; INIT_LIST_HEAD(&wd->active_scbs); sh->base = pci_resource_start(wd->pdev, 0); wd->type = wd719x_detect_type(wd); wd->sh = sh; sh->irq = wd->pdev->irq; wd->fw_virt = NULL; /* memory area for host (EEPROM) parameters */ wd->params = dma_alloc_coherent(&wd->pdev->dev, sizeof(struct wd719x_host_param), &wd->params_phys, GFP_KERNEL); if (!wd->params) { dev_warn(&wd->pdev->dev, "unable to allocate parameter buffer\n"); return -ENOMEM; } /* memory area for the RISC for hash table of outstanding requests */ wd->hash_virt = dma_alloc_coherent(&wd->pdev->dev, WD719X_HASH_TABLE_SIZE, &wd->hash_phys, GFP_KERNEL); if (!wd->hash_virt) { dev_warn(&wd->pdev->dev, "unable to allocate hash buffer\n"); ret = -ENOMEM; goto fail_free_params; } ret = request_irq(wd->pdev->irq, wd719x_interrupt, IRQF_SHARED, "wd719x", wd); if (ret) { dev_warn(&wd->pdev->dev, "unable to assign IRQ %d\n", wd->pdev->irq); goto fail_free_hash; } /* read parameters from EEPROM */ wd719x_read_eeprom(wd); ret = wd719x_chip_init(wd); if (ret) goto fail_free_irq; sh->this_id = wd->params->own_scsi_id & WD719X_EE_SCSI_ID_MASK; dev_info(&wd->pdev->dev, "%s at I/O 0x%lx, IRQ %u, SCSI ID %d\n", card_types[wd->type], sh->base, sh->irq, sh->this_id); return 0; fail_free_irq: free_irq(wd->pdev->irq, wd); fail_free_hash: dma_free_coherent(&wd->pdev->dev, WD719X_HASH_TABLE_SIZE, wd->hash_virt, wd->hash_phys); fail_free_params: dma_free_coherent(&wd->pdev->dev, sizeof(struct wd719x_host_param), wd->params, wd->params_phys); return ret; } static const struct scsi_host_template wd719x_template = { .module = THIS_MODULE, .name = "Western Digital 719x", .cmd_size = sizeof(struct wd719x_scb), .queuecommand = wd719x_queuecommand, .eh_abort_handler = wd719x_abort, .eh_device_reset_handler = wd719x_dev_reset, .eh_bus_reset_handler = wd719x_bus_reset, .eh_host_reset_handler = wd719x_host_reset, .bios_param = wd719x_biosparam, .proc_name = "wd719x", .can_queue = 255, .this_id = 7, .sg_tablesize = WD719X_SG, }; static int wd719x_pci_probe(struct pci_dev *pdev, const struct pci_device_id *d) { int err; struct Scsi_Host *sh; struct wd719x *wd; err = pci_enable_device(pdev); if (err) goto fail; err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_warn(&pdev->dev, "Unable to set 32-bit DMA mask\n"); goto disable_device; } err = pci_request_regions(pdev, "wd719x"); if (err) goto disable_device; pci_set_master(pdev); err = -ENODEV; if (pci_resource_len(pdev, 0) == 0) goto release_region; err = -ENOMEM; sh = scsi_host_alloc(&wd719x_template, sizeof(struct wd719x)); if (!sh) goto release_region; wd = shost_priv(sh); wd->base = pci_iomap(pdev, 0, 0); if (!wd->base) goto free_host; wd->pdev = pdev; err = wd719x_board_found(sh); if (err) goto unmap; err = scsi_add_host(sh, &wd->pdev->dev); if (err) goto destroy; scsi_scan_host(sh); pci_set_drvdata(pdev, sh); return 0; destroy: wd719x_destroy(wd); unmap: pci_iounmap(pdev, wd->base); free_host: scsi_host_put(sh); release_region: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); fail: return err; } static void wd719x_pci_remove(struct pci_dev *pdev) { struct Scsi_Host *sh = pci_get_drvdata(pdev); struct wd719x *wd = shost_priv(sh); scsi_remove_host(sh); wd719x_destroy(wd); pci_iounmap(pdev, wd->base); pci_release_regions(pdev); pci_disable_device(pdev); scsi_host_put(sh); } static const struct pci_device_id wd719x_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_WD, 0x3296) }, {} }; MODULE_DEVICE_TABLE(pci, wd719x_pci_table); static struct pci_driver wd719x_pci_driver = { .name = "wd719x", .id_table = wd719x_pci_table, .probe = wd719x_pci_probe, .remove = wd719x_pci_remove, }; module_pci_driver(wd719x_pci_driver); MODULE_DESCRIPTION("Western Digital WD7193/7197/7296 SCSI driver"); MODULE_AUTHOR("Ondrej Zary, Aaron Dewell, Juergen Gaertner"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("wd719x-wcs.bin"); MODULE_FIRMWARE("wd719x-risc.bin");
linux-master
drivers/scsi/wd719x.c
// SPDX-License-Identifier: GPL-2.0 /* * scsicam.c - SCSI CAM support functions, use for HDIO_GETGEO, etc. * * Copyright 1993, 1994 Drew Eckhardt * Visionary Computing * (Unix and Linux consulting and custom programming) * [email protected] * +1 (303) 786-7975 * * For more information, please consult the SCSI-CAM draft. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/pagemap.h> #include <linux/msdos_partition.h> #include <asm/unaligned.h> #include <scsi/scsicam.h> /** * scsi_bios_ptable - Read PC partition table out of first sector of device. * @dev: from this device * * Description: Reads the first sector from the device and returns %0x42 bytes * starting at offset %0x1be. * Returns: partition table in kmalloc(GFP_KERNEL) memory, or NULL on error. */ unsigned char *scsi_bios_ptable(struct block_device *dev) { struct address_space *mapping = bdev_whole(dev)->bd_inode->i_mapping; unsigned char *res = NULL; struct folio *folio; folio = read_mapping_folio(mapping, 0, NULL); if (IS_ERR(folio)) return NULL; res = kmemdup(folio_address(folio) + 0x1be, 66, GFP_KERNEL); folio_put(folio); return res; } EXPORT_SYMBOL(scsi_bios_ptable); /** * scsi_partsize - Parse cylinders/heads/sectors from PC partition table * @bdev: block device to parse * @capacity: size of the disk in sectors * @geom: output in form of [hds, cylinders, sectors] * * Determine the BIOS mapping/geometry used to create the partition * table, storing the results in @geom. * * Returns: %false on failure, %true on success. */ bool scsi_partsize(struct block_device *bdev, sector_t capacity, int geom[3]) { int cyl, ext_cyl, end_head, end_cyl, end_sector; unsigned int logical_end, physical_end, ext_physical_end; struct msdos_partition *p, *largest = NULL; void *buf; int ret = false; buf = scsi_bios_ptable(bdev); if (!buf) return false; if (*(unsigned short *) (buf + 64) == 0xAA55) { int largest_cyl = -1, i; for (i = 0, p = buf; i < 4; i++, p++) { if (!p->sys_ind) continue; #ifdef DEBUG printk("scsicam_bios_param : partition %d has system \n", i); #endif cyl = p->cyl + ((p->sector & 0xc0) << 2); if (cyl > largest_cyl) { largest_cyl = cyl; largest = p; } } } if (largest) { end_cyl = largest->end_cyl + ((largest->end_sector & 0xc0) << 2); end_head = largest->end_head; end_sector = largest->end_sector & 0x3f; if (end_head + 1 == 0 || end_sector == 0) goto out_free_buf; #ifdef DEBUG printk("scsicam_bios_param : end at h = %d, c = %d, s = %d\n", end_head, end_cyl, end_sector); #endif physical_end = end_cyl * (end_head + 1) * end_sector + end_head * end_sector + end_sector; /* This is the actual _sector_ number at the end */ logical_end = get_unaligned_le32(&largest->start_sect) + get_unaligned_le32(&largest->nr_sects); /* This is for >1023 cylinders */ ext_cyl = (logical_end - (end_head * end_sector + end_sector)) / (end_head + 1) / end_sector; ext_physical_end = ext_cyl * (end_head + 1) * end_sector + end_head * end_sector + end_sector; #ifdef DEBUG printk("scsicam_bios_param : logical_end=%d physical_end=%d ext_physical_end=%d ext_cyl=%d\n" ,logical_end, physical_end, ext_physical_end, ext_cyl); #endif if (logical_end == physical_end || (end_cyl == 1023 && ext_physical_end == logical_end)) { geom[0] = end_head + 1; geom[1] = end_sector; geom[2] = (unsigned long)capacity / ((end_head + 1) * end_sector); ret = true; goto out_free_buf; } #ifdef DEBUG printk("scsicam_bios_param : logical (%u) != physical (%u)\n", logical_end, physical_end); #endif } out_free_buf: kfree(buf); return ret; } EXPORT_SYMBOL(scsi_partsize); /* * Function : static int setsize(unsigned long capacity,unsigned int *cyls, * unsigned int *hds, unsigned int *secs); * * Purpose : to determine a near-optimal int 0x13 mapping for a * SCSI disk in terms of lost space of size capacity, storing * the results in *cyls, *hds, and *secs. * * Returns : -1 on failure, 0 on success. * * Extracted from * * WORKING X3T9.2 * DRAFT 792D * see http://www.t10.org/ftp/t10/drafts/cam/cam-r12b.pdf * * Revision 6 * 10-MAR-94 * Information technology - * SCSI-2 Common access method * transport and SCSI interface module * * ANNEX A : * * setsize() converts a read capacity value to int 13h * head-cylinder-sector requirements. It minimizes the value for * number of heads and maximizes the number of cylinders. This * will support rather large disks before the number of heads * will not fit in 4 bits (or 6 bits). This algorithm also * minimizes the number of sectors that will be unused at the end * of the disk while allowing for very large disks to be * accommodated. This algorithm does not use physical geometry. */ static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds, unsigned int *secs) { unsigned int rv = 0; unsigned long heads, sectors, cylinders, temp; cylinders = 1024L; /* Set number of cylinders to max */ sectors = 62L; /* Maximize sectors per track */ temp = cylinders * sectors; /* Compute divisor for heads */ heads = capacity / temp; /* Compute value for number of heads */ if (capacity % temp) { /* If no remainder, done! */ heads++; /* Else, increment number of heads */ temp = cylinders * heads; /* Compute divisor for sectors */ sectors = capacity / temp; /* Compute value for sectors per track */ if (capacity % temp) { /* If no remainder, done! */ sectors++; /* Else, increment number of sectors */ temp = heads * sectors; /* Compute divisor for cylinders */ cylinders = capacity / temp; /* Compute number of cylinders */ } } if (cylinders == 0) rv = (unsigned) -1; /* Give error if 0 cylinders */ *cyls = (unsigned int) cylinders; /* Stuff return values */ *secs = (unsigned int) sectors; *hds = (unsigned int) heads; return (rv); } /** * scsicam_bios_param - Determine geometry of a disk in cylinders/heads/sectors. * @bdev: which device * @capacity: size of the disk in sectors * @ip: return value: ip[0]=heads, ip[1]=sectors, ip[2]=cylinders * * Description : determine the BIOS mapping/geometry used for a drive in a * SCSI-CAM system, storing the results in ip as required * by the HDIO_GETGEO ioctl(). * * Returns : -1 on failure, 0 on success. */ int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip) { u64 capacity64 = capacity; /* Suppress gcc warning */ int ret = 0; /* try to infer mapping from partition table */ if (scsi_partsize(bdev, capacity, ip)) return 0; if (capacity64 < (1ULL << 32)) { /* * Pick some standard mapping with at most 1024 cylinders, and * at most 62 sectors per track - this works up to 7905 MB. */ ret = setsize((unsigned long)capacity, (unsigned int *)ip + 2, (unsigned int *)ip + 0, (unsigned int *)ip + 1); } /* * If something went wrong, then apparently we have to return a geometry * with more than 1024 cylinders. */ if (ret || ip[0] > 255 || ip[1] > 63) { if ((capacity >> 11) > 65534) { ip[0] = 255; ip[1] = 63; } else { ip[0] = 64; ip[1] = 32; } if (capacity > 65535*63*255) ip[2] = 65535; else ip[2] = (unsigned long)capacity / (ip[0] * ip[1]); } return 0; } EXPORT_SYMBOL(scsicam_bios_param);
linux-master
drivers/scsi/scsicam.c
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for Adaptec AHA-1542 SCSI host adapters * * Copyright (C) 1992 Tommy Thorn * Copyright (C) 1993, 1994, 1995 Eric Youngdale * Copyright (C) 2015 Ondrej Zary */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/isa.h> #include <linux/pnp.h> #include <linux/slab.h> #include <linux/io.h> #include <asm/dma.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "aha1542.h" #define MAXBOARDS 4 static bool isapnp = 1; module_param(isapnp, bool, 0); MODULE_PARM_DESC(isapnp, "enable PnP support (default=1)"); static int io[MAXBOARDS] = { 0x330, 0x334, 0, 0 }; module_param_hw_array(io, int, ioport, NULL, 0); MODULE_PARM_DESC(io, "base IO address of controller (0x130,0x134,0x230,0x234,0x330,0x334, default=0x330,0x334)"); /* time AHA spends on the AT-bus during data transfer */ static int bus_on[MAXBOARDS] = { -1, -1, -1, -1 }; /* power-on default: 11us */ module_param_array(bus_on, int, NULL, 0); MODULE_PARM_DESC(bus_on, "bus on time [us] (2-15, default=-1 [HW default: 11])"); /* time AHA spends off the bus (not to monopolize it) during data transfer */ static int bus_off[MAXBOARDS] = { -1, -1, -1, -1 }; /* power-on default: 4us */ module_param_array(bus_off, int, NULL, 0); MODULE_PARM_DESC(bus_off, "bus off time [us] (1-64, default=-1 [HW default: 4])"); /* default is jumper selected (J1 on 1542A), factory default = 5 MB/s */ static int dma_speed[MAXBOARDS] = { -1, -1, -1, -1 }; module_param_array(dma_speed, int, NULL, 0); MODULE_PARM_DESC(dma_speed, "DMA speed [MB/s] (5,6,7,8,10, default=-1 [by jumper])"); #define BIOS_TRANSLATION_6432 1 /* Default case these days */ #define BIOS_TRANSLATION_25563 2 /* Big disk case */ struct aha1542_hostdata { /* This will effectively start both of them at the first mailbox */ int bios_translation; /* Mapping bios uses - for compatibility */ int aha1542_last_mbi_used; int aha1542_last_mbo_used; struct scsi_cmnd *int_cmds[AHA1542_MAILBOXES]; struct mailbox *mb; dma_addr_t mb_handle; struct ccb *ccb; dma_addr_t ccb_handle; }; #define AHA1542_MAX_SECTORS 16 struct aha1542_cmd { /* bounce buffer */ void *data_buffer; dma_addr_t data_buffer_handle; }; static inline void aha1542_intr_reset(u16 base) { outb(IRST, CONTROL(base)); } static inline bool wait_mask(u16 port, u8 mask, u8 allof, u8 noneof, int timeout) { bool delayed = true; if (timeout == 0) { timeout = 3000000; delayed = false; } while (1) { u8 bits = inb(port) & mask; if ((bits & allof) == allof && ((bits & noneof) == 0)) break; if (delayed) mdelay(1); if (--timeout == 0) return false; } return true; } static int aha1542_outb(unsigned int base, u8 val) { if (!wait_mask(STATUS(base), CDF, 0, CDF, 0)) return 1; outb(val, DATA(base)); return 0; } static int aha1542_out(unsigned int base, u8 *buf, int len) { while (len--) { if (!wait_mask(STATUS(base), CDF, 0, CDF, 0)) return 1; outb(*buf++, DATA(base)); } if (!wait_mask(INTRFLAGS(base), INTRMASK, HACC, 0, 0)) return 1; return 0; } /* * Only used at boot time, so we do not need to worry about latency as much * here */ static int aha1542_in(unsigned int base, u8 *buf, int len, int timeout) { while (len--) { if (!wait_mask(STATUS(base), DF, DF, 0, timeout)) return 1; *buf++ = inb(DATA(base)); } return 0; } static int makecode(unsigned hosterr, unsigned scsierr) { switch (hosterr) { case 0x0: case 0xa: /* Linked command complete without error and linked normally */ case 0xb: /* Linked command complete without error, interrupt generated */ hosterr = 0; break; case 0x11: /* Selection time out-The initiator selection or target * reselection was not complete within the SCSI Time out period */ hosterr = DID_TIME_OUT; break; case 0x12: /* Data overrun/underrun-The target attempted to transfer more data * than was allocated by the Data Length field or the sum of the * Scatter / Gather Data Length fields. */ case 0x13: /* Unexpected bus free-The target dropped the SCSI BSY at an unexpected time. */ case 0x15: /* MBO command was not 00, 01 or 02-The first byte of the CB was * invalid. This usually indicates a software failure. */ case 0x16: /* Invalid CCB Operation Code-The first byte of the CCB was invalid. * This usually indicates a software failure. */ case 0x17: /* Linked CCB does not have the same LUN-A subsequent CCB of a set * of linked CCB's does not specify the same logical unit number as * the first. */ case 0x18: /* Invalid Target Direction received from Host-The direction of a * Target Mode CCB was invalid. */ case 0x19: /* Duplicate CCB Received in Target Mode-More than once CCB was * received to service data transfer between the same target LUN * and initiator SCSI ID in the same direction. */ case 0x1a: /* Invalid CCB or Segment List Parameter-A segment list with a zero * length segment or invalid segment list boundaries was received. * A CCB parameter was invalid. */ #ifdef DEBUG printk("Aha1542: %x %x\n", hosterr, scsierr); #endif hosterr = DID_ERROR; /* Couldn't find any better */ break; case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus * phase sequence was requested by the target. The host adapter * will generate a SCSI Reset Condition, notifying the host with * a SCRD interrupt */ hosterr = DID_RESET; break; default: printk(KERN_ERR "aha1542: makecode: unknown hoststatus %x\n", hosterr); break; } return scsierr | (hosterr << 16); } static int aha1542_test_port(struct Scsi_Host *sh) { int i; /* Quick and dirty test for presence of the card. */ if (inb(STATUS(sh->io_port)) == 0xff) return 0; /* Reset the adapter. I ought to make a hard reset, but it's not really necessary */ /* In case some other card was probing here, reset interrupts */ aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */ outb(SRST | IRST /*|SCRST */ , CONTROL(sh->io_port)); mdelay(20); /* Wait a little bit for things to settle down. */ /* Expect INIT and IDLE, any of the others are bad */ if (!wait_mask(STATUS(sh->io_port), STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF, 0)) return 0; /* Shouldn't have generated any interrupts during reset */ if (inb(INTRFLAGS(sh->io_port)) & INTRMASK) return 0; /* * Perform a host adapter inquiry instead so we do not need to set * up the mailboxes ahead of time */ aha1542_outb(sh->io_port, CMD_INQUIRY); for (i = 0; i < 4; i++) { if (!wait_mask(STATUS(sh->io_port), DF, DF, 0, 0)) return 0; (void)inb(DATA(sh->io_port)); } /* Reading port should reset DF */ if (inb(STATUS(sh->io_port)) & DF) return 0; /* When HACC, command is completed, and we're though testing */ if (!wait_mask(INTRFLAGS(sh->io_port), HACC, HACC, 0, 0)) return 0; /* Clear interrupts */ outb(IRST, CONTROL(sh->io_port)); return 1; } static void aha1542_free_cmd(struct scsi_cmnd *cmd) { struct aha1542_cmd *acmd = scsi_cmd_priv(cmd); if (cmd->sc_data_direction == DMA_FROM_DEVICE) { struct request *rq = scsi_cmd_to_rq(cmd); void *buf = acmd->data_buffer; struct req_iterator iter; struct bio_vec bv; rq_for_each_segment(bv, rq, iter) { memcpy_to_bvec(&bv, buf); buf += bv.bv_len; } } scsi_dma_unmap(cmd); } static irqreturn_t aha1542_interrupt(int irq, void *dev_id) { struct Scsi_Host *sh = dev_id; struct aha1542_hostdata *aha1542 = shost_priv(sh); int errstatus, mbi, mbo, mbistatus; int number_serviced; unsigned long flags; struct scsi_cmnd *tmp_cmd; int flag; struct mailbox *mb = aha1542->mb; struct ccb *ccb = aha1542->ccb; #ifdef DEBUG { flag = inb(INTRFLAGS(sh->io_port)); shost_printk(KERN_DEBUG, sh, "aha1542_intr_handle: "); if (!(flag & ANYINTR)) printk("no interrupt?"); if (flag & MBIF) printk("MBIF "); if (flag & MBOA) printk("MBOF "); if (flag & HACC) printk("HACC "); if (flag & SCRD) printk("SCRD "); printk("status %02x\n", inb(STATUS(sh->io_port))); } #endif number_serviced = 0; spin_lock_irqsave(sh->host_lock, flags); while (1) { flag = inb(INTRFLAGS(sh->io_port)); /* * Check for unusual interrupts. If any of these happen, we should * probably do something special, but for now just printing a message * is sufficient. A SCSI reset detected is something that we really * need to deal with in some way. */ if (flag & ~MBIF) { if (flag & MBOA) printk("MBOF "); if (flag & HACC) printk("HACC "); if (flag & SCRD) printk("SCRD "); } aha1542_intr_reset(sh->io_port); mbi = aha1542->aha1542_last_mbi_used + 1; if (mbi >= 2 * AHA1542_MAILBOXES) mbi = AHA1542_MAILBOXES; do { if (mb[mbi].status != 0) break; mbi++; if (mbi >= 2 * AHA1542_MAILBOXES) mbi = AHA1542_MAILBOXES; } while (mbi != aha1542->aha1542_last_mbi_used); if (mb[mbi].status == 0) { spin_unlock_irqrestore(sh->host_lock, flags); /* Hmm, no mail. Must have read it the last time around */ if (!number_serviced) shost_printk(KERN_WARNING, sh, "interrupt received, but no mail.\n"); return IRQ_HANDLED; } mbo = (scsi2int(mb[mbi].ccbptr) - (unsigned long)aha1542->ccb_handle) / sizeof(struct ccb); mbistatus = mb[mbi].status; mb[mbi].status = 0; aha1542->aha1542_last_mbi_used = mbi; #ifdef DEBUG if (ccb[mbo].tarstat | ccb[mbo].hastat) shost_printk(KERN_DEBUG, sh, "aha1542_command: returning %x (status %d)\n", ccb[mbo].tarstat + ((int) ccb[mbo].hastat << 16), mb[mbi].status); #endif if (mbistatus == 3) continue; /* Aborted command not found */ #ifdef DEBUG shost_printk(KERN_DEBUG, sh, "...done %d %d\n", mbo, mbi); #endif tmp_cmd = aha1542->int_cmds[mbo]; if (!tmp_cmd) { spin_unlock_irqrestore(sh->host_lock, flags); shost_printk(KERN_WARNING, sh, "Unexpected interrupt\n"); shost_printk(KERN_WARNING, sh, "tarstat=%x, hastat=%x idlun=%x ccb#=%d\n", ccb[mbo].tarstat, ccb[mbo].hastat, ccb[mbo].idlun, mbo); return IRQ_HANDLED; } aha1542_free_cmd(tmp_cmd); /* * Fetch the sense data, and tuck it away, in the required slot. The * Adaptec automatically fetches it, and there is no guarantee that * we will still have it in the cdb when we come back */ if (ccb[mbo].tarstat == 2) memcpy(tmp_cmd->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen], SCSI_SENSE_BUFFERSIZE); /* is there mail :-) */ /* more error checking left out here */ if (mbistatus != 1) /* This is surely wrong, but I don't know what's right */ errstatus = makecode(ccb[mbo].hastat, ccb[mbo].tarstat); else errstatus = 0; #ifdef DEBUG if (errstatus) shost_printk(KERN_DEBUG, sh, "(aha1542 error:%x %x %x) ", errstatus, ccb[mbo].hastat, ccb[mbo].tarstat); if (ccb[mbo].tarstat == 2) print_hex_dump_bytes("sense: ", DUMP_PREFIX_NONE, &ccb[mbo].cdb[ccb[mbo].cdblen], 12); if (errstatus) printk("aha1542_intr_handle: returning %6x\n", errstatus); #endif tmp_cmd->result = errstatus; aha1542->int_cmds[mbo] = NULL; /* This effectively frees up the mailbox slot, as * far as queuecommand is concerned */ scsi_done(tmp_cmd); number_serviced++; } } static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) { struct aha1542_cmd *acmd = scsi_cmd_priv(cmd); struct aha1542_hostdata *aha1542 = shost_priv(sh); u8 direction; u8 target = cmd->device->id; u8 lun = cmd->device->lun; unsigned long flags; int bufflen = scsi_bufflen(cmd); int mbo; struct mailbox *mb = aha1542->mb; struct ccb *ccb = aha1542->ccb; if (*cmd->cmnd == REQUEST_SENSE) { /* Don't do the command - we have the sense data already */ cmd->result = 0; scsi_done(cmd); return 0; } #ifdef DEBUG { int i = -1; if (*cmd->cmnd == READ_10 || *cmd->cmnd == WRITE_10) i = xscsi2int(cmd->cmnd + 2); else if (*cmd->cmnd == READ_6 || *cmd->cmnd == WRITE_6) i = scsi2int(cmd->cmnd + 2); shost_printk(KERN_DEBUG, sh, "aha1542_queuecommand: dev %d cmd %02x pos %d len %d", target, *cmd->cmnd, i, bufflen); print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len); } #endif if (cmd->sc_data_direction == DMA_TO_DEVICE) { struct request *rq = scsi_cmd_to_rq(cmd); void *buf = acmd->data_buffer; struct req_iterator iter; struct bio_vec bv; rq_for_each_segment(bv, rq, iter) { memcpy_from_bvec(buf, &bv); buf += bv.bv_len; } } /* * Use the outgoing mailboxes in a round-robin fashion, because this * is how the host adapter will scan for them */ spin_lock_irqsave(sh->host_lock, flags); mbo = aha1542->aha1542_last_mbo_used + 1; if (mbo >= AHA1542_MAILBOXES) mbo = 0; do { if (mb[mbo].status == 0 && aha1542->int_cmds[mbo] == NULL) break; mbo++; if (mbo >= AHA1542_MAILBOXES) mbo = 0; } while (mbo != aha1542->aha1542_last_mbo_used); if (mb[mbo].status || aha1542->int_cmds[mbo]) panic("Unable to find empty mailbox for aha1542.\n"); aha1542->int_cmds[mbo] = cmd; /* This will effectively prevent someone else from * screwing with this cdb. */ aha1542->aha1542_last_mbo_used = mbo; #ifdef DEBUG shost_printk(KERN_DEBUG, sh, "Sending command (%d)...", mbo); #endif /* This gets trashed for some reason */ any2scsi(mb[mbo].ccbptr, aha1542->ccb_handle + mbo * sizeof(*ccb)); memset(&ccb[mbo], 0, sizeof(struct ccb)); ccb[mbo].cdblen = cmd->cmd_len; direction = 0; if (*cmd->cmnd == READ_10 || *cmd->cmnd == READ_6) direction = 8; else if (*cmd->cmnd == WRITE_10 || *cmd->cmnd == WRITE_6) direction = 16; memcpy(ccb[mbo].cdb, cmd->cmnd, ccb[mbo].cdblen); ccb[mbo].op = 0; /* SCSI Initiator Command */ any2scsi(ccb[mbo].datalen, bufflen); if (bufflen) any2scsi(ccb[mbo].dataptr, acmd->data_buffer_handle); else any2scsi(ccb[mbo].dataptr, 0); ccb[mbo].idlun = (target & 7) << 5 | direction | (lun & 7); /*SCSI Target Id */ ccb[mbo].rsalen = 16; ccb[mbo].linkptr[0] = ccb[mbo].linkptr[1] = ccb[mbo].linkptr[2] = 0; ccb[mbo].commlinkid = 0; #ifdef DEBUG print_hex_dump_bytes("sending: ", DUMP_PREFIX_NONE, &ccb[mbo], sizeof(ccb[mbo]) - 10); printk("aha1542_queuecommand: now waiting for interrupt "); #endif mb[mbo].status = 1; aha1542_outb(cmd->device->host->io_port, CMD_START_SCSI); spin_unlock_irqrestore(sh->host_lock, flags); return 0; } /* Initialize mailboxes */ static void setup_mailboxes(struct Scsi_Host *sh) { struct aha1542_hostdata *aha1542 = shost_priv(sh); u8 mb_cmd[5] = { CMD_MBINIT, AHA1542_MAILBOXES, 0, 0, 0}; int i; for (i = 0; i < AHA1542_MAILBOXES; i++) { aha1542->mb[i].status = 0; any2scsi(aha1542->mb[i].ccbptr, aha1542->ccb_handle + i * sizeof(struct ccb)); aha1542->mb[AHA1542_MAILBOXES + i].status = 0; } aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */ any2scsi(mb_cmd + 2, aha1542->mb_handle); if (aha1542_out(sh->io_port, mb_cmd, 5)) shost_printk(KERN_ERR, sh, "failed setting up mailboxes\n"); aha1542_intr_reset(sh->io_port); } static int aha1542_getconfig(struct Scsi_Host *sh) { u8 inquiry_result[3]; int i; i = inb(STATUS(sh->io_port)); if (i & DF) { i = inb(DATA(sh->io_port)); } aha1542_outb(sh->io_port, CMD_RETCONF); aha1542_in(sh->io_port, inquiry_result, 3, 0); if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0)) shost_printk(KERN_ERR, sh, "error querying board settings\n"); aha1542_intr_reset(sh->io_port); switch (inquiry_result[0]) { case 0x80: sh->dma_channel = 7; break; case 0x40: sh->dma_channel = 6; break; case 0x20: sh->dma_channel = 5; break; case 0x01: sh->dma_channel = 0; break; case 0: /* * This means that the adapter, although Adaptec 1542 compatible, doesn't use a DMA channel. * Currently only aware of the BusLogic BT-445S VL-Bus adapter which needs this. */ sh->dma_channel = 0xFF; break; default: shost_printk(KERN_ERR, sh, "Unable to determine DMA channel.\n"); return -1; } switch (inquiry_result[1]) { case 0x40: sh->irq = 15; break; case 0x20: sh->irq = 14; break; case 0x8: sh->irq = 12; break; case 0x4: sh->irq = 11; break; case 0x2: sh->irq = 10; break; case 0x1: sh->irq = 9; break; default: shost_printk(KERN_ERR, sh, "Unable to determine IRQ level.\n"); return -1; } sh->this_id = inquiry_result[2] & 7; return 0; } /* * This function should only be called for 1542C boards - we can detect * the special firmware settings and unlock the board */ static int aha1542_mbenable(struct Scsi_Host *sh) { static u8 mbenable_cmd[3]; static u8 mbenable_result[2]; int retval; retval = BIOS_TRANSLATION_6432; aha1542_outb(sh->io_port, CMD_EXTBIOS); if (aha1542_in(sh->io_port, mbenable_result, 2, 100)) return retval; if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 100)) goto fail; aha1542_intr_reset(sh->io_port); if ((mbenable_result[0] & 0x08) || mbenable_result[1]) { mbenable_cmd[0] = CMD_MBENABLE; mbenable_cmd[1] = 0; mbenable_cmd[2] = mbenable_result[1]; if ((mbenable_result[0] & 0x08) && (mbenable_result[1] & 0x03)) retval = BIOS_TRANSLATION_25563; if (aha1542_out(sh->io_port, mbenable_cmd, 3)) goto fail; } while (0) { fail: shost_printk(KERN_ERR, sh, "Mailbox init failed\n"); } aha1542_intr_reset(sh->io_port); return retval; } /* Query the board to find out if it is a 1542 or a 1740, or whatever. */ static int aha1542_query(struct Scsi_Host *sh) { struct aha1542_hostdata *aha1542 = shost_priv(sh); u8 inquiry_result[4]; int i; i = inb(STATUS(sh->io_port)); if (i & DF) { i = inb(DATA(sh->io_port)); } aha1542_outb(sh->io_port, CMD_INQUIRY); aha1542_in(sh->io_port, inquiry_result, 4, 0); if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0)) shost_printk(KERN_ERR, sh, "error querying card type\n"); aha1542_intr_reset(sh->io_port); aha1542->bios_translation = BIOS_TRANSLATION_6432; /* Default case */ /* * For an AHA1740 series board, we ignore the board since there is a * hardware bug which can lead to wrong blocks being returned if the board * is operating in the 1542 emulation mode. Since there is an extended mode * driver, we simply ignore the board and let the 1740 driver pick it up. */ if (inquiry_result[0] == 0x43) { shost_printk(KERN_INFO, sh, "Emulation mode not supported for AHA-1740 hardware, use aha1740 driver instead.\n"); return 1; } /* * Always call this - boards that do not support extended bios translation * will ignore the command, and we will set the proper default */ aha1542->bios_translation = aha1542_mbenable(sh); return 0; } static u8 dma_speed_hw(int dma_speed) { switch (dma_speed) { case 5: return 0x00; case 6: return 0x04; case 7: return 0x01; case 8: return 0x02; case 10: return 0x03; } return 0xff; /* invalid */ } /* Set the Bus on/off-times as not to ruin floppy performance */ static void aha1542_set_bus_times(struct Scsi_Host *sh, int bus_on, int bus_off, int dma_speed) { if (bus_on > 0) { u8 oncmd[] = { CMD_BUSON_TIME, clamp(bus_on, 2, 15) }; aha1542_intr_reset(sh->io_port); if (aha1542_out(sh->io_port, oncmd, 2)) goto fail; } if (bus_off > 0) { u8 offcmd[] = { CMD_BUSOFF_TIME, clamp(bus_off, 1, 64) }; aha1542_intr_reset(sh->io_port); if (aha1542_out(sh->io_port, offcmd, 2)) goto fail; } if (dma_speed_hw(dma_speed) != 0xff) { u8 dmacmd[] = { CMD_DMASPEED, dma_speed_hw(dma_speed) }; aha1542_intr_reset(sh->io_port); if (aha1542_out(sh->io_port, dmacmd, 2)) goto fail; } aha1542_intr_reset(sh->io_port); return; fail: shost_printk(KERN_ERR, sh, "setting bus on/off-time failed\n"); aha1542_intr_reset(sh->io_port); } /* return non-zero on detection */ static struct Scsi_Host *aha1542_hw_init(const struct scsi_host_template *tpnt, struct device *pdev, int indx) { unsigned int base_io = io[indx]; struct Scsi_Host *sh; struct aha1542_hostdata *aha1542; char dma_info[] = "no DMA"; if (base_io == 0) return NULL; if (!request_region(base_io, AHA1542_REGION_SIZE, "aha1542")) return NULL; sh = scsi_host_alloc(tpnt, sizeof(struct aha1542_hostdata)); if (!sh) goto release; aha1542 = shost_priv(sh); sh->unique_id = base_io; sh->io_port = base_io; sh->n_io_port = AHA1542_REGION_SIZE; aha1542->aha1542_last_mbi_used = 2 * AHA1542_MAILBOXES - 1; aha1542->aha1542_last_mbo_used = AHA1542_MAILBOXES - 1; if (!aha1542_test_port(sh)) goto unregister; aha1542_set_bus_times(sh, bus_on[indx], bus_off[indx], dma_speed[indx]); if (aha1542_query(sh)) goto unregister; if (aha1542_getconfig(sh) == -1) goto unregister; if (sh->dma_channel != 0xFF) snprintf(dma_info, sizeof(dma_info), "DMA %d", sh->dma_channel); shost_printk(KERN_INFO, sh, "Adaptec AHA-1542 (SCSI-ID %d) at IO 0x%x, IRQ %d, %s\n", sh->this_id, base_io, sh->irq, dma_info); if (aha1542->bios_translation == BIOS_TRANSLATION_25563) shost_printk(KERN_INFO, sh, "Using extended bios translation\n"); if (dma_set_mask_and_coherent(pdev, DMA_BIT_MASK(24)) < 0) goto unregister; aha1542->mb = dma_alloc_coherent(pdev, AHA1542_MAILBOXES * 2 * sizeof(struct mailbox), &aha1542->mb_handle, GFP_KERNEL); if (!aha1542->mb) goto unregister; aha1542->ccb = dma_alloc_coherent(pdev, AHA1542_MAILBOXES * sizeof(struct ccb), &aha1542->ccb_handle, GFP_KERNEL); if (!aha1542->ccb) goto free_mb; setup_mailboxes(sh); if (request_irq(sh->irq, aha1542_interrupt, 0, "aha1542", sh)) { shost_printk(KERN_ERR, sh, "Unable to allocate IRQ.\n"); goto free_ccb; } if (sh->dma_channel != 0xFF) { if (request_dma(sh->dma_channel, "aha1542")) { shost_printk(KERN_ERR, sh, "Unable to allocate DMA channel.\n"); goto free_irq; } if (sh->dma_channel == 0 || sh->dma_channel >= 5) { set_dma_mode(sh->dma_channel, DMA_MODE_CASCADE); enable_dma(sh->dma_channel); } } if (scsi_add_host(sh, pdev)) goto free_dma; scsi_scan_host(sh); return sh; free_dma: if (sh->dma_channel != 0xff) free_dma(sh->dma_channel); free_irq: free_irq(sh->irq, sh); free_ccb: dma_free_coherent(pdev, AHA1542_MAILBOXES * sizeof(struct ccb), aha1542->ccb, aha1542->ccb_handle); free_mb: dma_free_coherent(pdev, AHA1542_MAILBOXES * 2 * sizeof(struct mailbox), aha1542->mb, aha1542->mb_handle); unregister: scsi_host_put(sh); release: release_region(base_io, AHA1542_REGION_SIZE); return NULL; } static int aha1542_release(struct Scsi_Host *sh) { struct aha1542_hostdata *aha1542 = shost_priv(sh); struct device *dev = sh->dma_dev; scsi_remove_host(sh); if (sh->dma_channel != 0xff) free_dma(sh->dma_channel); dma_free_coherent(dev, AHA1542_MAILBOXES * sizeof(struct ccb), aha1542->ccb, aha1542->ccb_handle); dma_free_coherent(dev, AHA1542_MAILBOXES * 2 * sizeof(struct mailbox), aha1542->mb, aha1542->mb_handle); if (sh->irq) free_irq(sh->irq, sh); if (sh->io_port && sh->n_io_port) release_region(sh->io_port, sh->n_io_port); scsi_host_put(sh); return 0; } /* * This is a device reset. This is handled by sending a special command * to the device. */ static int aha1542_dev_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *sh = cmd->device->host; struct aha1542_hostdata *aha1542 = shost_priv(sh); unsigned long flags; struct mailbox *mb = aha1542->mb; u8 target = cmd->device->id; u8 lun = cmd->device->lun; int mbo; struct ccb *ccb = aha1542->ccb; spin_lock_irqsave(sh->host_lock, flags); mbo = aha1542->aha1542_last_mbo_used + 1; if (mbo >= AHA1542_MAILBOXES) mbo = 0; do { if (mb[mbo].status == 0 && aha1542->int_cmds[mbo] == NULL) break; mbo++; if (mbo >= AHA1542_MAILBOXES) mbo = 0; } while (mbo != aha1542->aha1542_last_mbo_used); if (mb[mbo].status || aha1542->int_cmds[mbo]) panic("Unable to find empty mailbox for aha1542.\n"); aha1542->int_cmds[mbo] = cmd; /* This will effectively * prevent someone else from * screwing with this cdb. */ aha1542->aha1542_last_mbo_used = mbo; /* This gets trashed for some reason */ any2scsi(mb[mbo].ccbptr, aha1542->ccb_handle + mbo * sizeof(*ccb)); memset(&ccb[mbo], 0, sizeof(struct ccb)); ccb[mbo].op = 0x81; /* BUS DEVICE RESET */ ccb[mbo].idlun = (target & 7) << 5 | (lun & 7); /*SCSI Target Id */ ccb[mbo].linkptr[0] = ccb[mbo].linkptr[1] = ccb[mbo].linkptr[2] = 0; ccb[mbo].commlinkid = 0; /* * Now tell the 1542 to flush all pending commands for this * target */ aha1542_outb(sh->io_port, CMD_START_SCSI); spin_unlock_irqrestore(sh->host_lock, flags); scmd_printk(KERN_WARNING, cmd, "Trying device reset for target\n"); return SUCCESS; } static int aha1542_reset(struct scsi_cmnd *cmd, u8 reset_cmd) { struct Scsi_Host *sh = cmd->device->host; struct aha1542_hostdata *aha1542 = shost_priv(sh); unsigned long flags; int i; spin_lock_irqsave(sh->host_lock, flags); /* * This does a scsi reset for all devices on the bus. * In principle, we could also reset the 1542 - should * we do this? Try this first, and we can add that later * if it turns out to be useful. */ outb(reset_cmd, CONTROL(cmd->device->host->io_port)); if (!wait_mask(STATUS(cmd->device->host->io_port), STATMASK, IDLE, STST | DIAGF | INVDCMD | DF | CDF, 0)) { spin_unlock_irqrestore(sh->host_lock, flags); return FAILED; } /* * We need to do this too before the 1542 can interact with * us again after host reset. */ if (reset_cmd & HRST) setup_mailboxes(cmd->device->host); /* * Now try to pick up the pieces. For all pending commands, * free any internal data structures, and basically clear things * out. We do not try and restart any commands or anything - * the strategy handler takes care of that crap. */ shost_printk(KERN_WARNING, cmd->device->host, "Sent BUS RESET to scsi host %d\n", cmd->device->host->host_no); for (i = 0; i < AHA1542_MAILBOXES; i++) { if (aha1542->int_cmds[i] != NULL) { struct scsi_cmnd *tmp_cmd; tmp_cmd = aha1542->int_cmds[i]; if (tmp_cmd->device->soft_reset) { /* * If this device implements the soft reset option, * then it is still holding onto the command, and * may yet complete it. In this case, we don't * flush the data. */ continue; } aha1542_free_cmd(tmp_cmd); aha1542->int_cmds[i] = NULL; aha1542->mb[i].status = 0; } } spin_unlock_irqrestore(sh->host_lock, flags); return SUCCESS; } static int aha1542_bus_reset(struct scsi_cmnd *cmd) { return aha1542_reset(cmd, SCRST); } static int aha1542_host_reset(struct scsi_cmnd *cmd) { return aha1542_reset(cmd, HRST | SCRST); } static int aha1542_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { struct aha1542_hostdata *aha1542 = shost_priv(sdev->host); if (capacity >= 0x200000 && aha1542->bios_translation == BIOS_TRANSLATION_25563) { /* Please verify that this is the same as what DOS returns */ geom[0] = 255; /* heads */ geom[1] = 63; /* sectors */ } else { geom[0] = 64; /* heads */ geom[1] = 32; /* sectors */ } geom[2] = sector_div(capacity, geom[0] * geom[1]); /* cylinders */ return 0; } MODULE_LICENSE("GPL"); static int aha1542_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd) { struct aha1542_cmd *acmd = scsi_cmd_priv(cmd); acmd->data_buffer = dma_alloc_coherent(shost->dma_dev, SECTOR_SIZE * AHA1542_MAX_SECTORS, &acmd->data_buffer_handle, GFP_KERNEL); if (!acmd->data_buffer) return -ENOMEM; return 0; } static int aha1542_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd) { struct aha1542_cmd *acmd = scsi_cmd_priv(cmd); dma_free_coherent(shost->dma_dev, SECTOR_SIZE * AHA1542_MAX_SECTORS, acmd->data_buffer, acmd->data_buffer_handle); return 0; } static const struct scsi_host_template driver_template = { .module = THIS_MODULE, .proc_name = "aha1542", .name = "Adaptec 1542", .cmd_size = sizeof(struct aha1542_cmd), .queuecommand = aha1542_queuecommand, .eh_device_reset_handler= aha1542_dev_reset, .eh_bus_reset_handler = aha1542_bus_reset, .eh_host_reset_handler = aha1542_host_reset, .bios_param = aha1542_biosparam, .init_cmd_priv = aha1542_init_cmd_priv, .exit_cmd_priv = aha1542_exit_cmd_priv, .can_queue = AHA1542_MAILBOXES, .this_id = 7, .max_sectors = AHA1542_MAX_SECTORS, .sg_tablesize = SG_ALL, }; static int aha1542_isa_match(struct device *pdev, unsigned int ndev) { struct Scsi_Host *sh = aha1542_hw_init(&driver_template, pdev, ndev); if (!sh) return 0; dev_set_drvdata(pdev, sh); return 1; } static void aha1542_isa_remove(struct device *pdev, unsigned int ndev) { aha1542_release(dev_get_drvdata(pdev)); dev_set_drvdata(pdev, NULL); } static struct isa_driver aha1542_isa_driver = { .match = aha1542_isa_match, .remove = aha1542_isa_remove, .driver = { .name = "aha1542" }, }; static int isa_registered; #ifdef CONFIG_PNP static const struct pnp_device_id aha1542_pnp_ids[] = { { .id = "ADP1542" }, { .id = "" } }; MODULE_DEVICE_TABLE(pnp, aha1542_pnp_ids); static int aha1542_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *id) { int indx; struct Scsi_Host *sh; for (indx = 0; indx < ARRAY_SIZE(io); indx++) { if (io[indx]) continue; if (pnp_activate_dev(pdev) < 0) continue; io[indx] = pnp_port_start(pdev, 0); /* * The card can be queried for its DMA, we have * the DMA set up that is enough */ dev_info(&pdev->dev, "ISAPnP found an AHA1535 at I/O 0x%03X", io[indx]); } sh = aha1542_hw_init(&driver_template, &pdev->dev, indx); if (!sh) return -ENODEV; pnp_set_drvdata(pdev, sh); return 0; } static void aha1542_pnp_remove(struct pnp_dev *pdev) { aha1542_release(pnp_get_drvdata(pdev)); pnp_set_drvdata(pdev, NULL); } static struct pnp_driver aha1542_pnp_driver = { .name = "aha1542", .id_table = aha1542_pnp_ids, .probe = aha1542_pnp_probe, .remove = aha1542_pnp_remove, }; static int pnp_registered; #endif /* CONFIG_PNP */ static int __init aha1542_init(void) { int ret = 0; #ifdef CONFIG_PNP if (isapnp) { ret = pnp_register_driver(&aha1542_pnp_driver); if (!ret) pnp_registered = 1; } #endif ret = isa_register_driver(&aha1542_isa_driver, MAXBOARDS); if (!ret) isa_registered = 1; #ifdef CONFIG_PNP if (pnp_registered) ret = 0; #endif if (isa_registered) ret = 0; return ret; } static void __exit aha1542_exit(void) { #ifdef CONFIG_PNP if (pnp_registered) pnp_unregister_driver(&aha1542_pnp_driver); #endif if (isa_registered) isa_unregister_driver(&aha1542_isa_driver); } module_init(aha1542_init); module_exit(aha1542_exit);
linux-master
drivers/scsi/aha1542.c
// SPDX-License-Identifier: GPL-2.0-only /* * HighPoint RR3xxx/4xxx controller driver for Linux * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved. * * Please report bugs/comments/suggestions to [email protected] * * For more information, visit http://www.highpoint-tech.com */ #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/spinlock.h> #include <linux/gfp.h> #include <linux/uaccess.h> #include <asm/io.h> #include <asm/div64.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_host.h> #include "hptiop.h" MODULE_AUTHOR("HighPoint Technologies, Inc."); MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver"); static char driver_name[] = "hptiop"; static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver"; static const char driver_ver[] = "v1.10.0"; static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec); static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, struct hpt_iop_request_scsi_command *req); static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag); static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag); static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec) { u32 req = 0; int i; for (i = 0; i < millisec; i++) { req = readl(&hba->u.itl.iop->inbound_queue); if (req != IOPMU_QUEUE_EMPTY) break; msleep(1); } if (req != IOPMU_QUEUE_EMPTY) { writel(req, &hba->u.itl.iop->outbound_queue); readl(&hba->u.itl.iop->outbound_intstatus); return 0; } return -1; } static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec) { return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec); } static int iop_wait_ready_mvfrey(struct hptiop_hba *hba, u32 millisec) { return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec); } static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag) { if (tag & IOPMU_QUEUE_ADDR_HOST_BIT) hptiop_host_request_callback_itl(hba, tag & ~IOPMU_QUEUE_ADDR_HOST_BIT); else hptiop_iop_request_callback_itl(hba, tag); } static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba) { u32 req; while ((req = readl(&hba->u.itl.iop->outbound_queue)) != IOPMU_QUEUE_EMPTY) { if (req & IOPMU_QUEUE_MASK_HOST_BITS) hptiop_request_callback_itl(hba, req); else { struct hpt_iop_request_header __iomem * p; p = (struct hpt_iop_request_header __iomem *) ((char __iomem *)hba->u.itl.iop + req); if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) { if (readl(&p->context)) hptiop_request_callback_itl(hba, req); else writel(1, &p->context); } else hptiop_request_callback_itl(hba, req); } } } static int iop_intr_itl(struct hptiop_hba *hba) { struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop; void __iomem *plx = hba->u.itl.plx; u32 status; int ret = 0; if (plx && readl(plx + 0x11C5C) & 0xf) writel(1, plx + 0x11C60); status = readl(&iop->outbound_intstatus); if (status & IOPMU_OUTBOUND_INT_MSG0) { u32 msg = readl(&iop->outbound_msgaddr0); dprintk("received outbound msg %x\n", msg); writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus); hptiop_message_callback(hba, msg); ret = 1; } if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { hptiop_drain_outbound_queue_itl(hba); ret = 1; } return ret; } static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu) { u32 outbound_tail = readl(&mu->outbound_tail); u32 outbound_head = readl(&mu->outbound_head); if (outbound_tail != outbound_head) { u64 p; memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8); outbound_tail++; if (outbound_tail == MVIOP_QUEUE_LEN) outbound_tail = 0; writel(outbound_tail, &mu->outbound_tail); return p; } else return 0; } static void mv_inbound_write(u64 p, struct hptiop_hba *hba) { u32 inbound_head = readl(&hba->u.mv.mu->inbound_head); u32 head = inbound_head + 1; if (head == MVIOP_QUEUE_LEN) head = 0; memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8); writel(head, &hba->u.mv.mu->inbound_head); writel(MVIOP_MU_INBOUND_INT_POSTQUEUE, &hba->u.mv.regs->inbound_doorbell); } static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag) { u32 req_type = (tag >> 5) & 0x7; struct hpt_iop_request_scsi_command *req; dprintk("hptiop_request_callback_mv: tag=%llx\n", tag); BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0); switch (req_type) { case IOP_REQUEST_TYPE_GET_CONFIG: case IOP_REQUEST_TYPE_SET_CONFIG: hba->msg_done = 1; break; case IOP_REQUEST_TYPE_SCSI_COMMAND: req = hba->reqs[tag >> 8].req_virt; if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)) req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); hptiop_finish_scsi_req(hba, tag>>8, req); break; default: break; } } static int iop_intr_mv(struct hptiop_hba *hba) { u32 status; int ret = 0; status = readl(&hba->u.mv.regs->outbound_doorbell); writel(~status, &hba->u.mv.regs->outbound_doorbell); if (status & MVIOP_MU_OUTBOUND_INT_MSG) { u32 msg; msg = readl(&hba->u.mv.mu->outbound_msg); dprintk("received outbound msg %x\n", msg); hptiop_message_callback(hba, msg); ret = 1; } if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) { u64 tag; while ((tag = mv_outbound_read(hba->u.mv.mu))) hptiop_request_callback_mv(hba, tag); ret = 1; } return ret; } static void hptiop_request_callback_mvfrey(struct hptiop_hba *hba, u32 _tag) { u32 req_type = _tag & 0xf; struct hpt_iop_request_scsi_command *req; switch (req_type) { case IOP_REQUEST_TYPE_GET_CONFIG: case IOP_REQUEST_TYPE_SET_CONFIG: hba->msg_done = 1; break; case IOP_REQUEST_TYPE_SCSI_COMMAND: req = hba->reqs[(_tag >> 4) & 0xff].req_virt; if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT)) req->header.result = IOP_RESULT_SUCCESS; hptiop_finish_scsi_req(hba, (_tag >> 4) & 0xff, req); break; default: break; } } static int iop_intr_mvfrey(struct hptiop_hba *hba) { u32 _tag, status, cptr, cur_rptr; int ret = 0; if (hba->initialized) writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); status = readl(&(hba->u.mvfrey.mu->f0_doorbell)); if (status) { writel(status, &(hba->u.mvfrey.mu->f0_doorbell)); if (status & CPU_TO_F0_DRBL_MSG_BIT) { u32 msg = readl(&(hba->u.mvfrey.mu->cpu_to_f0_msg_a)); dprintk("received outbound msg %x\n", msg); hptiop_message_callback(hba, msg); } ret = 1; } status = readl(&(hba->u.mvfrey.mu->isr_cause)); if (status) { writel(status, &(hba->u.mvfrey.mu->isr_cause)); do { cptr = *hba->u.mvfrey.outlist_cptr & 0xff; cur_rptr = hba->u.mvfrey.outlist_rptr; while (cur_rptr != cptr) { cur_rptr++; if (cur_rptr == hba->u.mvfrey.list_count) cur_rptr = 0; _tag = hba->u.mvfrey.outlist[cur_rptr].val; BUG_ON(!(_tag & IOPMU_QUEUE_MASK_HOST_BITS)); hptiop_request_callback_mvfrey(hba, _tag); ret = 1; } hba->u.mvfrey.outlist_rptr = cur_rptr; } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff)); } if (hba->initialized) writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); return ret; } static int iop_send_sync_request_itl(struct hptiop_hba *hba, void __iomem *_req, u32 millisec) { struct hpt_iop_request_header __iomem *req = _req; u32 i; writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags); writel(0, &req->context); writel((unsigned long)req - (unsigned long)hba->u.itl.iop, &hba->u.itl.iop->inbound_queue); readl(&hba->u.itl.iop->outbound_intstatus); for (i = 0; i < millisec; i++) { iop_intr_itl(hba); if (readl(&req->context)) return 0; msleep(1); } return -1; } static int iop_send_sync_request_mv(struct hptiop_hba *hba, u32 size_bits, u32 millisec) { struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req; u32 i; hba->msg_done = 0; reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST); mv_inbound_write(hba->u.mv.internal_req_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba); for (i = 0; i < millisec; i++) { iop_intr_mv(hba); if (hba->msg_done) return 0; msleep(1); } return -1; } static int iop_send_sync_request_mvfrey(struct hptiop_hba *hba, u32 size_bits, u32 millisec) { struct hpt_iop_request_header *reqhdr = hba->u.mvfrey.internal_req.req_virt; u32 i; hba->msg_done = 0; reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST); hba->ops->post_req(hba, &(hba->u.mvfrey.internal_req)); for (i = 0; i < millisec; i++) { iop_intr_mvfrey(hba); if (hba->msg_done) break; msleep(1); } return hba->msg_done ? 0 : -1; } static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg) { writel(msg, &hba->u.itl.iop->inbound_msgaddr0); readl(&hba->u.itl.iop->outbound_intstatus); } static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg) { writel(msg, &hba->u.mv.mu->inbound_msg); writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell); readl(&hba->u.mv.regs->inbound_doorbell); } static void hptiop_post_msg_mvfrey(struct hptiop_hba *hba, u32 msg) { writel(msg, &(hba->u.mvfrey.mu->f0_to_cpu_msg_a)); readl(&(hba->u.mvfrey.mu->f0_to_cpu_msg_a)); } static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec) { u32 i; hba->msg_done = 0; hba->ops->disable_intr(hba); hba->ops->post_msg(hba, msg); for (i = 0; i < millisec; i++) { spin_lock_irq(hba->host->host_lock); hba->ops->iop_intr(hba); spin_unlock_irq(hba->host->host_lock); if (hba->msg_done) break; msleep(1); } hba->ops->enable_intr(hba); return hba->msg_done? 0 : -1; } static int iop_get_config_itl(struct hptiop_hba *hba, struct hpt_iop_request_get_config *config) { u32 req32; struct hpt_iop_request_get_config __iomem *req; req32 = readl(&hba->u.itl.iop->inbound_queue); if (req32 == IOPMU_QUEUE_EMPTY) return -1; req = (struct hpt_iop_request_get_config __iomem *) ((unsigned long)hba->u.itl.iop + req32); writel(0, &req->header.flags); writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type); writel(sizeof(struct hpt_iop_request_get_config), &req->header.size); writel(IOP_RESULT_PENDING, &req->header.result); if (iop_send_sync_request_itl(hba, req, 20000)) { dprintk("Get config send cmd failed\n"); return -1; } memcpy_fromio(config, req, sizeof(*config)); writel(req32, &hba->u.itl.iop->outbound_queue); return 0; } static int iop_get_config_mv(struct hptiop_hba *hba, struct hpt_iop_request_get_config *config) { struct hpt_iop_request_get_config *req = hba->u.mv.internal_req; req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG); req->header.size = cpu_to_le32(sizeof(struct hpt_iop_request_get_config)); req->header.result = cpu_to_le32(IOP_RESULT_PENDING); req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5); req->header.context_hi32 = 0; if (iop_send_sync_request_mv(hba, 0, 20000)) { dprintk("Get config send cmd failed\n"); return -1; } memcpy(config, req, sizeof(struct hpt_iop_request_get_config)); return 0; } static int iop_get_config_mvfrey(struct hptiop_hba *hba, struct hpt_iop_request_get_config *config) { struct hpt_iop_request_get_config *info = hba->u.mvfrey.config; if (info->header.size != sizeof(struct hpt_iop_request_get_config) || info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) return -1; config->interface_version = info->interface_version; config->firmware_version = info->firmware_version; config->max_requests = info->max_requests; config->request_size = info->request_size; config->max_sg_count = info->max_sg_count; config->data_transfer_length = info->data_transfer_length; config->alignment_mask = info->alignment_mask; config->max_devices = info->max_devices; config->sdram_size = info->sdram_size; return 0; } static int iop_set_config_itl(struct hptiop_hba *hba, struct hpt_iop_request_set_config *config) { u32 req32; struct hpt_iop_request_set_config __iomem *req; req32 = readl(&hba->u.itl.iop->inbound_queue); if (req32 == IOPMU_QUEUE_EMPTY) return -1; req = (struct hpt_iop_request_set_config __iomem *) ((unsigned long)hba->u.itl.iop + req32); memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header), (u8 *)config + sizeof(struct hpt_iop_request_header), sizeof(struct hpt_iop_request_set_config) - sizeof(struct hpt_iop_request_header)); writel(0, &req->header.flags); writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type); writel(sizeof(struct hpt_iop_request_set_config), &req->header.size); writel(IOP_RESULT_PENDING, &req->header.result); if (iop_send_sync_request_itl(hba, req, 20000)) { dprintk("Set config send cmd failed\n"); return -1; } writel(req32, &hba->u.itl.iop->outbound_queue); return 0; } static int iop_set_config_mv(struct hptiop_hba *hba, struct hpt_iop_request_set_config *config) { struct hpt_iop_request_set_config *req = hba->u.mv.internal_req; memcpy(req, config, sizeof(struct hpt_iop_request_set_config)); req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG); req->header.size = cpu_to_le32(sizeof(struct hpt_iop_request_set_config)); req->header.result = cpu_to_le32(IOP_RESULT_PENDING); req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5); req->header.context_hi32 = 0; if (iop_send_sync_request_mv(hba, 0, 20000)) { dprintk("Set config send cmd failed\n"); return -1; } return 0; } static int iop_set_config_mvfrey(struct hptiop_hba *hba, struct hpt_iop_request_set_config *config) { struct hpt_iop_request_set_config *req = hba->u.mvfrey.internal_req.req_virt; memcpy(req, config, sizeof(struct hpt_iop_request_set_config)); req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG); req->header.size = cpu_to_le32(sizeof(struct hpt_iop_request_set_config)); req->header.result = cpu_to_le32(IOP_RESULT_PENDING); req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5); req->header.context_hi32 = 0; if (iop_send_sync_request_mvfrey(hba, 0, 20000)) { dprintk("Set config send cmd failed\n"); return -1; } return 0; } static void hptiop_enable_intr_itl(struct hptiop_hba *hba) { writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0), &hba->u.itl.iop->outbound_intmask); } static void hptiop_enable_intr_mv(struct hptiop_hba *hba) { writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG, &hba->u.mv.regs->outbound_intmask); } static void hptiop_enable_intr_mvfrey(struct hptiop_hba *hba) { writel(CPU_TO_F0_DRBL_MSG_BIT, &(hba->u.mvfrey.mu->f0_doorbell_enable)); writel(0x1, &(hba->u.mvfrey.mu->isr_enable)); writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); } static int hptiop_initialize_iop(struct hptiop_hba *hba) { /* enable interrupts */ hba->ops->enable_intr(hba); hba->initialized = 1; /* start background tasks */ if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { printk(KERN_ERR "scsi%d: fail to start background task\n", hba->host->host_no); return -1; } return 0; } static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index) { u32 mem_base_phy, length; void __iomem *mem_base_virt; struct pci_dev *pcidev = hba->pcidev; if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) { printk(KERN_ERR "scsi%d: pci resource invalid\n", hba->host->host_no); return NULL; } mem_base_phy = pci_resource_start(pcidev, index); length = pci_resource_len(pcidev, index); mem_base_virt = ioremap(mem_base_phy, length); if (!mem_base_virt) { printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n", hba->host->host_no); return NULL; } return mem_base_virt; } static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba) { struct pci_dev *pcidev = hba->pcidev; hba->u.itl.iop = hptiop_map_pci_bar(hba, 0); if (hba->u.itl.iop == NULL) return -1; if ((pcidev->device & 0xff00) == 0x4400) { hba->u.itl.plx = hba->u.itl.iop; hba->u.itl.iop = hptiop_map_pci_bar(hba, 2); if (hba->u.itl.iop == NULL) { iounmap(hba->u.itl.plx); return -1; } } return 0; } static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba) { if (hba->u.itl.plx) iounmap(hba->u.itl.plx); iounmap(hba->u.itl.iop); } static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba) { hba->u.mv.regs = hptiop_map_pci_bar(hba, 0); if (hba->u.mv.regs == NULL) return -1; hba->u.mv.mu = hptiop_map_pci_bar(hba, 2); if (hba->u.mv.mu == NULL) { iounmap(hba->u.mv.regs); return -1; } return 0; } static int hptiop_map_pci_bar_mvfrey(struct hptiop_hba *hba) { hba->u.mvfrey.config = hptiop_map_pci_bar(hba, 0); if (hba->u.mvfrey.config == NULL) return -1; hba->u.mvfrey.mu = hptiop_map_pci_bar(hba, 2); if (hba->u.mvfrey.mu == NULL) { iounmap(hba->u.mvfrey.config); return -1; } return 0; } static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba) { iounmap(hba->u.mv.regs); iounmap(hba->u.mv.mu); } static void hptiop_unmap_pci_bar_mvfrey(struct hptiop_hba *hba) { iounmap(hba->u.mvfrey.config); iounmap(hba->u.mvfrey.mu); } static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg) { dprintk("iop message 0x%x\n", msg); if (msg == IOPMU_INBOUND_MSG0_NOP || msg == IOPMU_INBOUND_MSG0_RESET_COMM) hba->msg_done = 1; if (!hba->initialized) return; if (msg == IOPMU_INBOUND_MSG0_RESET) { atomic_set(&hba->resetting, 0); wake_up(&hba->reset_wq); } else if (msg <= IOPMU_INBOUND_MSG0_MAX) hba->msg_done = 1; } static struct hptiop_request *get_req(struct hptiop_hba *hba) { struct hptiop_request *ret; dprintk("get_req : req=%p\n", hba->req_list); ret = hba->req_list; if (ret) hba->req_list = ret->next; return ret; } static void free_req(struct hptiop_hba *hba, struct hptiop_request *req) { dprintk("free_req(%d, %p)\n", req->index, req); req->next = hba->req_list; hba->req_list = req; } static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, struct hpt_iop_request_scsi_command *req) { struct scsi_cmnd *scp; dprintk("hptiop_finish_scsi_req: req=%p, type=%d, " "result=%d, context=0x%x tag=%d\n", req, req->header.type, req->header.result, req->header.context, tag); BUG_ON(!req->header.result); BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND)); scp = hba->reqs[tag].scp; if (HPT_SCP(scp)->mapped) scsi_dma_unmap(scp); switch (le32_to_cpu(req->header.result)) { case IOP_RESULT_SUCCESS: scsi_set_resid(scp, scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); scp->result = (DID_OK<<16); break; case IOP_RESULT_BAD_TARGET: scp->result = (DID_BAD_TARGET<<16); break; case IOP_RESULT_BUSY: scp->result = (DID_BUS_BUSY<<16); break; case IOP_RESULT_RESET: scp->result = (DID_RESET<<16); break; case IOP_RESULT_FAIL: scp->result = (DID_ERROR<<16); break; case IOP_RESULT_INVALID_REQUEST: scp->result = (DID_ABORT<<16); break; case IOP_RESULT_CHECK_CONDITION: scsi_set_resid(scp, scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); scp->result = SAM_STAT_CHECK_CONDITION; memcpy(scp->sense_buffer, &req->sg_list, SCSI_SENSE_BUFFERSIZE); goto skip_resid; default: scp->result = DID_ABORT << 16; break; } scsi_set_resid(scp, scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); skip_resid: dprintk("scsi_done(%p)\n", scp); scsi_done(scp); free_req(hba, &hba->reqs[tag]); } static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag) { struct hpt_iop_request_scsi_command *req; u32 tag; if (hba->iopintf_v2) { tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT; req = hba->reqs[tag].req_virt; if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT)) req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); } else { tag = _tag; req = hba->reqs[tag].req_virt; } hptiop_finish_scsi_req(hba, tag, req); } static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag) { struct hpt_iop_request_header __iomem *req; struct hpt_iop_request_ioctl_command __iomem *p; struct hpt_ioctl_k *arg; req = (struct hpt_iop_request_header __iomem *) ((unsigned long)hba->u.itl.iop + tag); dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, " "result=%d, context=0x%x tag=%d\n", req, readl(&req->type), readl(&req->result), readl(&req->context), tag); BUG_ON(!readl(&req->result)); BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND); p = (struct hpt_iop_request_ioctl_command __iomem *)req; arg = (struct hpt_ioctl_k *)(unsigned long) (readl(&req->context) | ((u64)readl(&req->context_hi32)<<32)); if (readl(&req->result) == IOP_RESULT_SUCCESS) { arg->result = HPT_IOCTL_RESULT_OK; if (arg->outbuf_size) memcpy_fromio(arg->outbuf, &p->buf[(readl(&p->inbuf_size) + 3)& ~3], arg->outbuf_size); if (arg->bytes_returned) *arg->bytes_returned = arg->outbuf_size; } else arg->result = HPT_IOCTL_RESULT_FAILED; arg->done(arg); writel(tag, &hba->u.itl.iop->outbound_queue); } static irqreturn_t hptiop_intr(int irq, void *dev_id) { struct hptiop_hba *hba = dev_id; int handled; unsigned long flags; spin_lock_irqsave(hba->host->host_lock, flags); handled = hba->ops->iop_intr(hba); spin_unlock_irqrestore(hba->host->host_lock, flags); return handled; } static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg) { struct Scsi_Host *host = scp->device->host; struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; struct scatterlist *sg; int idx, nseg; nseg = scsi_dma_map(scp); BUG_ON(nseg < 0); if (!nseg) return 0; HPT_SCP(scp)->sgcnt = nseg; HPT_SCP(scp)->mapped = 1; BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors); scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) { psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)) | hba->ops->host_phy_flag; psg[idx].size = cpu_to_le32(sg_dma_len(sg)); psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ? cpu_to_le32(1) : 0; } return HPT_SCP(scp)->sgcnt; } static void hptiop_post_req_itl(struct hptiop_hba *hba, struct hptiop_request *_req) { struct hpt_iop_request_header *reqhdr = _req->req_virt; reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | (u32)_req->index); reqhdr->context_hi32 = 0; if (hba->iopintf_v2) { u32 size, size_bits; size = le32_to_cpu(reqhdr->size); if (size < 256) size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT; else if (size < 512) size_bits = IOPMU_QUEUE_ADDR_HOST_BIT; else size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT | IOPMU_QUEUE_ADDR_HOST_BIT; writel(_req->req_shifted_phy | size_bits, &hba->u.itl.iop->inbound_queue); } else writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT, &hba->u.itl.iop->inbound_queue); } static void hptiop_post_req_mv(struct hptiop_hba *hba, struct hptiop_request *_req) { struct hpt_iop_request_header *reqhdr = _req->req_virt; u32 size, size_bit; reqhdr->context = cpu_to_le32(_req->index<<8 | IOP_REQUEST_TYPE_SCSI_COMMAND<<5); reqhdr->context_hi32 = 0; size = le32_to_cpu(reqhdr->size); if (size <= 256) size_bit = 0; else if (size <= 256*2) size_bit = 1; else if (size <= 256*3) size_bit = 2; else size_bit = 3; mv_inbound_write((_req->req_shifted_phy << 5) | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba); } static void hptiop_post_req_mvfrey(struct hptiop_hba *hba, struct hptiop_request *_req) { struct hpt_iop_request_header *reqhdr = _req->req_virt; u32 index; reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT | IOP_REQUEST_FLAG_ADDR_BITS | ((_req->req_shifted_phy >> 11) & 0xffff0000)); reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | (_req->index << 4) | reqhdr->type); reqhdr->context_hi32 = cpu_to_le32((_req->req_shifted_phy << 5) & 0xffffffff); hba->u.mvfrey.inlist_wptr++; index = hba->u.mvfrey.inlist_wptr & 0x3fff; if (index == hba->u.mvfrey.list_count) { index = 0; hba->u.mvfrey.inlist_wptr &= ~0x3fff; hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; } hba->u.mvfrey.inlist[index].addr = (dma_addr_t)_req->req_shifted_phy << 5; hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4; writel(hba->u.mvfrey.inlist_wptr, &(hba->u.mvfrey.mu->inbound_write_ptr)); readl(&(hba->u.mvfrey.mu->inbound_write_ptr)); } static int hptiop_reset_comm_itl(struct hptiop_hba *hba) { return 0; } static int hptiop_reset_comm_mv(struct hptiop_hba *hba) { return 0; } static int hptiop_reset_comm_mvfrey(struct hptiop_hba *hba) { u32 list_count = hba->u.mvfrey.list_count; if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000)) return -1; /* wait 100ms for MCU ready */ msleep(100); writel(cpu_to_le32(hba->u.mvfrey.inlist_phy & 0xffffffff), &(hba->u.mvfrey.mu->inbound_base)); writel(cpu_to_le32((hba->u.mvfrey.inlist_phy >> 16) >> 16), &(hba->u.mvfrey.mu->inbound_base_high)); writel(cpu_to_le32(hba->u.mvfrey.outlist_phy & 0xffffffff), &(hba->u.mvfrey.mu->outbound_base)); writel(cpu_to_le32((hba->u.mvfrey.outlist_phy >> 16) >> 16), &(hba->u.mvfrey.mu->outbound_base_high)); writel(cpu_to_le32(hba->u.mvfrey.outlist_cptr_phy & 0xffffffff), &(hba->u.mvfrey.mu->outbound_shadow_base)); writel(cpu_to_le32((hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16), &(hba->u.mvfrey.mu->outbound_shadow_base_high)); hba->u.mvfrey.inlist_wptr = (list_count - 1) | CL_POINTER_TOGGLE; *hba->u.mvfrey.outlist_cptr = (list_count - 1) | CL_POINTER_TOGGLE; hba->u.mvfrey.outlist_rptr = list_count - 1; return 0; } static int hptiop_queuecommand_lck(struct scsi_cmnd *scp) { struct Scsi_Host *host = scp->device->host; struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; struct hpt_iop_request_scsi_command *req; int sg_count = 0; struct hptiop_request *_req; _req = get_req(hba); if (_req == NULL) { dprintk("hptiop_queuecmd : no free req\n"); return SCSI_MLQUEUE_HOST_BUSY; } _req->scp = scp; dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%llu cdb=(%08x-%08x-%08x-%08x) " "req_index=%d, req=%p\n", scp, host->host_no, scp->device->channel, scp->device->id, scp->device->lun, cpu_to_be32(((u32 *)scp->cmnd)[0]), cpu_to_be32(((u32 *)scp->cmnd)[1]), cpu_to_be32(((u32 *)scp->cmnd)[2]), cpu_to_be32(((u32 *)scp->cmnd)[3]), _req->index, _req->req_virt); scp->result = 0; if (scp->device->channel || (scp->device->id > hba->max_devices) || ((scp->device->id == (hba->max_devices-1)) && scp->device->lun)) { scp->result = DID_BAD_TARGET << 16; free_req(hba, _req); goto cmd_done; } req = _req->req_virt; /* build S/G table */ sg_count = hptiop_buildsgl(scp, req->sg_list); if (!sg_count) HPT_SCP(scp)->mapped = 0; req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND); req->header.result = cpu_to_le32(IOP_RESULT_PENDING); req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp)); req->channel = scp->device->channel; req->target = scp->device->id; req->lun = scp->device->lun; req->header.size = cpu_to_le32(struct_size(req, sg_list, sg_count)); memcpy(req->cdb, scp->cmnd, sizeof(req->cdb)); hba->ops->post_req(hba, _req); return 0; cmd_done: dprintk("scsi_done(scp=%p)\n", scp); scsi_done(scp); return 0; } static DEF_SCSI_QCMD(hptiop_queuecommand) static const char *hptiop_info(struct Scsi_Host *host) { return driver_name_long; } static int hptiop_reset_hba(struct hptiop_hba *hba) { if (atomic_xchg(&hba->resetting, 1) == 0) { atomic_inc(&hba->reset_count); hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET); } wait_event_timeout(hba->reset_wq, atomic_read(&hba->resetting) == 0, 60 * HZ); if (atomic_read(&hba->resetting)) { /* IOP is in unknown state, abort reset */ printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no); return -1; } if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { dprintk("scsi%d: fail to start background task\n", hba->host->host_no); } return 0; } static int hptiop_reset(struct scsi_cmnd *scp) { struct hptiop_hba * hba = (struct hptiop_hba *)scp->device->host->hostdata; printk(KERN_WARNING "hptiop_reset(%d/%d/%d)\n", scp->device->host->host_no, -1, -1); return hptiop_reset_hba(hba)? FAILED : SUCCESS; } static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth) { struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata; if (queue_depth > hba->max_requests) queue_depth = hba->max_requests; return scsi_change_queue_depth(sdev, queue_depth); } static ssize_t hptiop_show_version(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); } static ssize_t hptiop_show_fw_version(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *host = class_to_shost(dev); struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", hba->firmware_version >> 24, (hba->firmware_version >> 16) & 0xff, (hba->firmware_version >> 8) & 0xff, hba->firmware_version & 0xff); } static struct device_attribute hptiop_attr_version = { .attr = { .name = "driver-version", .mode = S_IRUGO, }, .show = hptiop_show_version, }; static struct device_attribute hptiop_attr_fw_version = { .attr = { .name = "firmware-version", .mode = S_IRUGO, }, .show = hptiop_show_fw_version, }; static struct attribute *hptiop_host_attrs[] = { &hptiop_attr_version.attr, &hptiop_attr_fw_version.attr, NULL }; ATTRIBUTE_GROUPS(hptiop_host); static int hptiop_slave_config(struct scsi_device *sdev) { if (sdev->type == TYPE_TAPE) blk_queue_max_hw_sectors(sdev->request_queue, 8192); return 0; } static const struct scsi_host_template driver_template = { .module = THIS_MODULE, .name = driver_name, .queuecommand = hptiop_queuecommand, .eh_host_reset_handler = hptiop_reset, .info = hptiop_info, .emulated = 0, .proc_name = driver_name, .shost_groups = hptiop_host_groups, .slave_configure = hptiop_slave_config, .this_id = -1, .change_queue_depth = hptiop_adjust_disk_queue_depth, .cmd_size = sizeof(struct hpt_cmd_priv), }; static int hptiop_internal_memalloc_itl(struct hptiop_hba *hba) { return 0; } static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba) { hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev, 0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL); if (hba->u.mv.internal_req) return 0; else return -1; } static int hptiop_internal_memalloc_mvfrey(struct hptiop_hba *hba) { u32 list_count = readl(&hba->u.mvfrey.mu->inbound_conf_ctl); char *p; dma_addr_t phy; BUG_ON(hba->max_request_size == 0); if (list_count == 0) { BUG_ON(1); return -1; } list_count >>= 16; hba->u.mvfrey.list_count = list_count; hba->u.mvfrey.internal_mem_size = 0x800 + list_count * sizeof(struct mvfrey_inlist_entry) + list_count * sizeof(struct mvfrey_outlist_entry) + sizeof(int); p = dma_alloc_coherent(&hba->pcidev->dev, hba->u.mvfrey.internal_mem_size, &phy, GFP_KERNEL); if (!p) return -1; hba->u.mvfrey.internal_req.req_virt = p; hba->u.mvfrey.internal_req.req_shifted_phy = phy >> 5; hba->u.mvfrey.internal_req.scp = NULL; hba->u.mvfrey.internal_req.next = NULL; p += 0x800; phy += 0x800; hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p; hba->u.mvfrey.inlist_phy = phy; p += list_count * sizeof(struct mvfrey_inlist_entry); phy += list_count * sizeof(struct mvfrey_inlist_entry); hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p; hba->u.mvfrey.outlist_phy = phy; p += list_count * sizeof(struct mvfrey_outlist_entry); phy += list_count * sizeof(struct mvfrey_outlist_entry); hba->u.mvfrey.outlist_cptr = (__le32 *)p; hba->u.mvfrey.outlist_cptr_phy = phy; return 0; } static int hptiop_internal_memfree_itl(struct hptiop_hba *hba) { return 0; } static int hptiop_internal_memfree_mv(struct hptiop_hba *hba) { if (hba->u.mv.internal_req) { dma_free_coherent(&hba->pcidev->dev, 0x800, hba->u.mv.internal_req, hba->u.mv.internal_req_phy); return 0; } else return -1; } static int hptiop_internal_memfree_mvfrey(struct hptiop_hba *hba) { if (hba->u.mvfrey.internal_req.req_virt) { dma_free_coherent(&hba->pcidev->dev, hba->u.mvfrey.internal_mem_size, hba->u.mvfrey.internal_req.req_virt, (dma_addr_t) hba->u.mvfrey.internal_req.req_shifted_phy << 5); return 0; } else return -1; } static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id) { struct Scsi_Host *host = NULL; struct hptiop_hba *hba; struct hptiop_adapter_ops *iop_ops; struct hpt_iop_request_get_config iop_config; struct hpt_iop_request_set_config set_config; dma_addr_t start_phy; void *start_virt; u32 offset, i, req_size; int rc; dprintk("hptiop_probe(%p)\n", pcidev); if (pci_enable_device(pcidev)) { printk(KERN_ERR "hptiop: fail to enable pci device\n"); return -ENODEV; } printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n", pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7, pcidev->irq); pci_set_master(pcidev); /* Enable 64bit DMA if possible */ iop_ops = (struct hptiop_adapter_ops *)id->driver_data; rc = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(iop_ops->hw_dma_bit_mask)); if (rc) rc = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)); if (rc) { printk(KERN_ERR "hptiop: fail to set dma_mask\n"); goto disable_pci_device; } if (pci_request_regions(pcidev, driver_name)) { printk(KERN_ERR "hptiop: pci_request_regions failed\n"); goto disable_pci_device; } host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba)); if (!host) { printk(KERN_ERR "hptiop: fail to alloc scsi host\n"); goto free_pci_regions; } hba = (struct hptiop_hba *)host->hostdata; memset(hba, 0, sizeof(struct hptiop_hba)); hba->ops = iop_ops; hba->pcidev = pcidev; hba->host = host; hba->initialized = 0; hba->iopintf_v2 = 0; atomic_set(&hba->resetting, 0); atomic_set(&hba->reset_count, 0); init_waitqueue_head(&hba->reset_wq); init_waitqueue_head(&hba->ioctl_wq); host->max_lun = 128; host->max_channel = 0; host->io_port = 0; host->n_io_port = 0; host->irq = pcidev->irq; if (hba->ops->map_pci_bar(hba)) goto free_scsi_host; if (hba->ops->iop_wait_ready(hba, 20000)) { printk(KERN_ERR "scsi%d: firmware not ready\n", hba->host->host_no); goto unmap_pci_bar; } if (hba->ops->family == MV_BASED_IOP) { if (hba->ops->internal_memalloc(hba)) { printk(KERN_ERR "scsi%d: internal_memalloc failed\n", hba->host->host_no); goto unmap_pci_bar; } } if (hba->ops->get_config(hba, &iop_config)) { printk(KERN_ERR "scsi%d: get config failed\n", hba->host->host_no); goto unmap_pci_bar; } hba->max_requests = min(le32_to_cpu(iop_config.max_requests), HPTIOP_MAX_REQUESTS); hba->max_devices = le32_to_cpu(iop_config.max_devices); hba->max_request_size = le32_to_cpu(iop_config.request_size); hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count); hba->firmware_version = le32_to_cpu(iop_config.firmware_version); hba->interface_version = le32_to_cpu(iop_config.interface_version); hba->sdram_size = le32_to_cpu(iop_config.sdram_size); if (hba->ops->family == MVFREY_BASED_IOP) { if (hba->ops->internal_memalloc(hba)) { printk(KERN_ERR "scsi%d: internal_memalloc failed\n", hba->host->host_no); goto unmap_pci_bar; } if (hba->ops->reset_comm(hba)) { printk(KERN_ERR "scsi%d: reset comm failed\n", hba->host->host_no); goto unmap_pci_bar; } } if (hba->firmware_version > 0x01020000 || hba->interface_version > 0x01020000) hba->iopintf_v2 = 1; host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9; host->max_id = le32_to_cpu(iop_config.max_devices); host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count); host->can_queue = le32_to_cpu(iop_config.max_requests); host->cmd_per_lun = le32_to_cpu(iop_config.max_requests); host->max_cmd_len = 16; req_size = struct_size_t(struct hpt_iop_request_scsi_command, sg_list, hba->max_sg_descriptors); if ((req_size & 0x1f) != 0) req_size = (req_size + 0x1f) & ~0x1f; memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config)); set_config.iop_id = cpu_to_le32(host->host_no); set_config.vbus_id = cpu_to_le16(host->host_no); set_config.max_host_request_size = cpu_to_le16(req_size); if (hba->ops->set_config(hba, &set_config)) { printk(KERN_ERR "scsi%d: set config failed\n", hba->host->host_no); goto unmap_pci_bar; } pci_set_drvdata(pcidev, host); if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, driver_name, hba)) { printk(KERN_ERR "scsi%d: request irq %d failed\n", hba->host->host_no, pcidev->irq); goto unmap_pci_bar; } /* Allocate request mem */ dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests); hba->req_size = req_size; hba->req_list = NULL; for (i = 0; i < hba->max_requests; i++) { start_virt = dma_alloc_coherent(&pcidev->dev, hba->req_size + 0x20, &start_phy, GFP_KERNEL); if (!start_virt) { printk(KERN_ERR "scsi%d: fail to alloc request mem\n", hba->host->host_no); goto free_request_mem; } hba->dma_coherent[i] = start_virt; hba->dma_coherent_handle[i] = start_phy; if ((start_phy & 0x1f) != 0) { offset = ((start_phy + 0x1f) & ~0x1f) - start_phy; start_phy += offset; start_virt += offset; } hba->reqs[i].next = NULL; hba->reqs[i].req_virt = start_virt; hba->reqs[i].req_shifted_phy = start_phy >> 5; hba->reqs[i].index = i; free_req(hba, &hba->reqs[i]); } /* Enable Interrupt and start background task */ if (hptiop_initialize_iop(hba)) goto free_request_mem; if (scsi_add_host(host, &pcidev->dev)) { printk(KERN_ERR "scsi%d: scsi_add_host failed\n", hba->host->host_no); goto free_request_mem; } scsi_scan_host(host); dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no); return 0; free_request_mem: for (i = 0; i < hba->max_requests; i++) { if (hba->dma_coherent[i] && hba->dma_coherent_handle[i]) dma_free_coherent(&hba->pcidev->dev, hba->req_size + 0x20, hba->dma_coherent[i], hba->dma_coherent_handle[i]); else break; } free_irq(hba->pcidev->irq, hba); unmap_pci_bar: hba->ops->internal_memfree(hba); hba->ops->unmap_pci_bar(hba); free_scsi_host: scsi_host_put(host); free_pci_regions: pci_release_regions(pcidev); disable_pci_device: pci_disable_device(pcidev); dprintk("scsi%d: hptiop_probe fail\n", host ? host->host_no : 0); return -ENODEV; } static void hptiop_shutdown(struct pci_dev *pcidev) { struct Scsi_Host *host = pci_get_drvdata(pcidev); struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; dprintk("hptiop_shutdown(%p)\n", hba); /* stop the iop */ if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) printk(KERN_ERR "scsi%d: shutdown the iop timeout\n", hba->host->host_no); /* disable all outbound interrupts */ hba->ops->disable_intr(hba); } static void hptiop_disable_intr_itl(struct hptiop_hba *hba) { u32 int_mask; int_mask = readl(&hba->u.itl.iop->outbound_intmask); writel(int_mask | IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE, &hba->u.itl.iop->outbound_intmask); readl(&hba->u.itl.iop->outbound_intmask); } static void hptiop_disable_intr_mv(struct hptiop_hba *hba) { writel(0, &hba->u.mv.regs->outbound_intmask); readl(&hba->u.mv.regs->outbound_intmask); } static void hptiop_disable_intr_mvfrey(struct hptiop_hba *hba) { writel(0, &(hba->u.mvfrey.mu->f0_doorbell_enable)); readl(&(hba->u.mvfrey.mu->f0_doorbell_enable)); writel(0, &(hba->u.mvfrey.mu->isr_enable)); readl(&(hba->u.mvfrey.mu->isr_enable)); writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); readl(&(hba->u.mvfrey.mu->pcie_f0_int_enable)); } static void hptiop_remove(struct pci_dev *pcidev) { struct Scsi_Host *host = pci_get_drvdata(pcidev); struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; u32 i; dprintk("scsi%d: hptiop_remove\n", hba->host->host_no); scsi_remove_host(host); hptiop_shutdown(pcidev); free_irq(hba->pcidev->irq, hba); for (i = 0; i < hba->max_requests; i++) { if (hba->dma_coherent[i] && hba->dma_coherent_handle[i]) dma_free_coherent(&hba->pcidev->dev, hba->req_size + 0x20, hba->dma_coherent[i], hba->dma_coherent_handle[i]); else break; } hba->ops->internal_memfree(hba); hba->ops->unmap_pci_bar(hba); pci_release_regions(hba->pcidev); pci_set_drvdata(hba->pcidev, NULL); pci_disable_device(hba->pcidev); scsi_host_put(host); } static struct hptiop_adapter_ops hptiop_itl_ops = { .family = INTEL_BASED_IOP, .iop_wait_ready = iop_wait_ready_itl, .internal_memalloc = hptiop_internal_memalloc_itl, .internal_memfree = hptiop_internal_memfree_itl, .map_pci_bar = hptiop_map_pci_bar_itl, .unmap_pci_bar = hptiop_unmap_pci_bar_itl, .enable_intr = hptiop_enable_intr_itl, .disable_intr = hptiop_disable_intr_itl, .get_config = iop_get_config_itl, .set_config = iop_set_config_itl, .iop_intr = iop_intr_itl, .post_msg = hptiop_post_msg_itl, .post_req = hptiop_post_req_itl, .hw_dma_bit_mask = 64, .reset_comm = hptiop_reset_comm_itl, .host_phy_flag = cpu_to_le64(0), }; static struct hptiop_adapter_ops hptiop_mv_ops = { .family = MV_BASED_IOP, .iop_wait_ready = iop_wait_ready_mv, .internal_memalloc = hptiop_internal_memalloc_mv, .internal_memfree = hptiop_internal_memfree_mv, .map_pci_bar = hptiop_map_pci_bar_mv, .unmap_pci_bar = hptiop_unmap_pci_bar_mv, .enable_intr = hptiop_enable_intr_mv, .disable_intr = hptiop_disable_intr_mv, .get_config = iop_get_config_mv, .set_config = iop_set_config_mv, .iop_intr = iop_intr_mv, .post_msg = hptiop_post_msg_mv, .post_req = hptiop_post_req_mv, .hw_dma_bit_mask = 33, .reset_comm = hptiop_reset_comm_mv, .host_phy_flag = cpu_to_le64(0), }; static struct hptiop_adapter_ops hptiop_mvfrey_ops = { .family = MVFREY_BASED_IOP, .iop_wait_ready = iop_wait_ready_mvfrey, .internal_memalloc = hptiop_internal_memalloc_mvfrey, .internal_memfree = hptiop_internal_memfree_mvfrey, .map_pci_bar = hptiop_map_pci_bar_mvfrey, .unmap_pci_bar = hptiop_unmap_pci_bar_mvfrey, .enable_intr = hptiop_enable_intr_mvfrey, .disable_intr = hptiop_disable_intr_mvfrey, .get_config = iop_get_config_mvfrey, .set_config = iop_set_config_mvfrey, .iop_intr = iop_intr_mvfrey, .post_msg = hptiop_post_msg_mvfrey, .post_req = hptiop_post_req_mvfrey, .hw_dma_bit_mask = 64, .reset_comm = hptiop_reset_comm_mvfrey, .host_phy_flag = cpu_to_le64(1), }; static struct pci_device_id hptiop_id_table[] = { { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x4400), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops }, { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops }, { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops }, { PCI_VDEVICE(TTI, 0x4520), (kernel_ulong_t)&hptiop_mvfrey_ops }, { PCI_VDEVICE(TTI, 0x4522), (kernel_ulong_t)&hptiop_mvfrey_ops }, { PCI_VDEVICE(TTI, 0x3610), (kernel_ulong_t)&hptiop_mvfrey_ops }, { PCI_VDEVICE(TTI, 0x3611), (kernel_ulong_t)&hptiop_mvfrey_ops }, { PCI_VDEVICE(TTI, 0x3620), (kernel_ulong_t)&hptiop_mvfrey_ops }, { PCI_VDEVICE(TTI, 0x3622), (kernel_ulong_t)&hptiop_mvfrey_ops }, { PCI_VDEVICE(TTI, 0x3640), (kernel_ulong_t)&hptiop_mvfrey_ops }, { PCI_VDEVICE(TTI, 0x3660), (kernel_ulong_t)&hptiop_mvfrey_ops }, { PCI_VDEVICE(TTI, 0x3680), (kernel_ulong_t)&hptiop_mvfrey_ops }, { PCI_VDEVICE(TTI, 0x3690), (kernel_ulong_t)&hptiop_mvfrey_ops }, {}, }; MODULE_DEVICE_TABLE(pci, hptiop_id_table); static struct pci_driver hptiop_pci_driver = { .name = driver_name, .id_table = hptiop_id_table, .probe = hptiop_probe, .remove = hptiop_remove, .shutdown = hptiop_shutdown, }; static int __init hptiop_module_init(void) { printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); return pci_register_driver(&hptiop_pci_driver); } static void __exit hptiop_module_exit(void) { pci_unregister_driver(&hptiop_pci_driver); } module_init(hptiop_module_init); module_exit(hptiop_module_exit); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/hptiop.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * History: * Started: Aug 9 by Lawrence Foard ([email protected]), * to allow user process control of SCSI devices. * Development Sponsored by Killy Corp. NY NY * * Original driver (sg.c): * Copyright (C) 1992 Lawrence Foard * Version 2 and 3 extensions to driver: * Copyright (C) 1998 - 2014 Douglas Gilbert */ static int sg_version_num = 30536; /* 2 digits for each component */ #define SG_VERSION_STR "3.5.36" /* * D. P. Gilbert ([email protected]), notes: * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First * the kernel/module needs to be built with CONFIG_SCSI_LOGGING * (otherwise the macros compile to empty statements). * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/mtio.h> #include <linux/ioctl.h> #include <linux/major.h> #include <linux/slab.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/moduleparam.h> #include <linux/cdev.h> #include <linux/idr.h> #include <linux/seq_file.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/blktrace_api.h> #include <linux/mutex.h> #include <linux/atomic.h> #include <linux/ratelimit.h> #include <linux/uio.h> #include <linux/cred.h> /* for sg_check_file_access() */ #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_ioctl.h> #include <scsi/scsi_tcq.h> #include <scsi/sg.h> #include "scsi_logging.h" #ifdef CONFIG_SCSI_PROC_FS #include <linux/proc_fs.h> static char *sg_version_date = "20140603"; static int sg_proc_init(void); #endif #define SG_ALLOW_DIO_DEF 0 #define SG_MAX_DEVS (1 << MINORBITS) /* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type * of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater * than 16 bytes are "variable length" whose length is a multiple of 4 */ #define SG_MAX_CDB_SIZE 252 #define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ) static int sg_big_buff = SG_DEF_RESERVED_SIZE; /* N.B. This variable is readable and writeable via /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer of this size (or less if there is not enough memory) will be reserved for use by this file descriptor. [Deprecated usage: this variable is also readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into the kernel (i.e. it is not a module).] */ static int def_reserved_size = -1; /* picks up init parameter */ static int sg_allow_dio = SG_ALLOW_DIO_DEF; static int scatter_elem_sz = SG_SCATTER_SZ; static int scatter_elem_sz_prev = SG_SCATTER_SZ; #define SG_SECTOR_SZ 512 static int sg_add_device(struct device *); static void sg_remove_device(struct device *); static DEFINE_IDR(sg_index_idr); static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock file descriptor list for device */ static struct class_interface sg_interface = { .add_dev = sg_add_device, .remove_dev = sg_remove_device, }; typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */ unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */ unsigned bufflen; /* Size of (aggregate) data buffer */ struct page **pages; int page_order; char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ unsigned char cmd_opcode; /* first byte of command */ } Sg_scatter_hold; struct sg_device; /* forward declarations */ struct sg_fd; typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ struct list_head entry; /* list entry */ struct sg_fd *parentfp; /* NULL -> not in use */ Sg_scatter_hold data; /* hold buffer, perhaps scatter list */ sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */ unsigned char sense_b[SCSI_SENSE_BUFFERSIZE]; char res_used; /* 1 -> using reserve buffer, 0 -> not ... */ char orphan; /* 1 -> drop on sight, 0 -> normal */ char sg_io_owned; /* 1 -> packet belongs to SG_IO */ /* done protected by rq_list_lock */ char done; /* 0->before bh, 1->before read, 2->read */ struct request *rq; struct bio *bio; struct execute_work ew; } Sg_request; typedef struct sg_fd { /* holds the state of a file descriptor */ struct list_head sfd_siblings; /* protected by device's sfd_lock */ struct sg_device *parentdp; /* owning device */ wait_queue_head_t read_wait; /* queue read until command done */ rwlock_t rq_list_lock; /* protect access to list in req_arr */ struct mutex f_mutex; /* protect against changes in this fd */ int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */ Sg_scatter_hold reserve; /* buffer held for this file descriptor */ struct list_head rq_list; /* head of request list */ struct fasync_struct *async_qp; /* used by asynchronous notification */ Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */ char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */ char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ char mmap_called; /* 0 -> mmap() never called on this fd */ char res_in_use; /* 1 -> 'reserve' array in use */ struct kref f_ref; struct execute_work ew; } Sg_fd; typedef struct sg_device { /* holds the state of each scsi generic device */ struct scsi_device *device; wait_queue_head_t open_wait; /* queue open() when O_EXCL present */ struct mutex open_rel_lock; /* held when in open() or release() */ int sg_tablesize; /* adapter's max scatter-gather table size */ u32 index; /* device index number */ struct list_head sfds; rwlock_t sfd_lock; /* protect access to sfd list */ atomic_t detaching; /* 0->device usable, 1->device detaching */ bool exclude; /* 1->open(O_EXCL) succeeded and is active */ int open_cnt; /* count of opens (perhaps < num(sfds) ) */ char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ char name[DISK_NAME_LEN]; struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */ struct kref d_ref; } Sg_device; /* tasklet or soft irq callback */ static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status); static int sg_start_req(Sg_request *srp, unsigned char *cmd); static int sg_finish_rem_req(Sg_request * srp); static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp); static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, size_t count, int blocking, int read_only, int sg_io_owned, Sg_request **o_srp); static int sg_common_write(Sg_fd * sfp, Sg_request * srp, unsigned char *cmnd, int timeout, int blocking); static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp); static void sg_build_reserve(Sg_fd * sfp, int req_size); static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); static Sg_fd *sg_add_sfp(Sg_device * sdp); static void sg_remove_sfp(struct kref *); static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy); static Sg_request *sg_add_request(Sg_fd * sfp); static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); static Sg_device *sg_get_dev(int dev); static void sg_device_destroy(struct kref *kref); #define SZ_SG_HEADER sizeof(struct sg_header) #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t) #define SZ_SG_IOVEC sizeof(sg_iovec_t) #define SZ_SG_REQ_INFO sizeof(sg_req_info_t) #define sg_printk(prefix, sdp, fmt, a...) \ sdev_prefix_printk(prefix, (sdp)->device, (sdp)->name, fmt, ##a) /* * The SCSI interfaces that use read() and write() as an asynchronous variant of * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways * to trigger read() and write() calls from various contexts with elevated * privileges. This can lead to kernel memory corruption (e.g. if these * interfaces are called through splice()) and privilege escalation inside * userspace (e.g. if a process with access to such a device passes a file * descriptor to a SUID binary as stdin/stdout/stderr). * * This function provides protection for the legacy API by restricting the * calling context. */ static int sg_check_file_access(struct file *filp, const char *caller) { if (filp->f_cred != current_real_cred()) { pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", caller, task_tgid_vnr(current), current->comm); return -EPERM; } return 0; } static int sg_allow_access(struct file *filp, unsigned char *cmd) { struct sg_fd *sfp = filp->private_data; if (sfp->parentdp->device->type == TYPE_SCANNER) return 0; if (!scsi_cmd_allowed(cmd, filp->f_mode & FMODE_WRITE)) return -EPERM; return 0; } static int open_wait(Sg_device *sdp, int flags) { int retval = 0; if (flags & O_EXCL) { while (sdp->open_cnt > 0) { mutex_unlock(&sdp->open_rel_lock); retval = wait_event_interruptible(sdp->open_wait, (atomic_read(&sdp->detaching) || !sdp->open_cnt)); mutex_lock(&sdp->open_rel_lock); if (retval) /* -ERESTARTSYS */ return retval; if (atomic_read(&sdp->detaching)) return -ENODEV; } } else { while (sdp->exclude) { mutex_unlock(&sdp->open_rel_lock); retval = wait_event_interruptible(sdp->open_wait, (atomic_read(&sdp->detaching) || !sdp->exclude)); mutex_lock(&sdp->open_rel_lock); if (retval) /* -ERESTARTSYS */ return retval; if (atomic_read(&sdp->detaching)) return -ENODEV; } } return retval; } /* Returns 0 on success, else a negated errno value */ static int sg_open(struct inode *inode, struct file *filp) { int dev = iminor(inode); int flags = filp->f_flags; struct request_queue *q; Sg_device *sdp; Sg_fd *sfp; int retval; nonseekable_open(inode, filp); if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) return -EPERM; /* Can't lock it with read only access */ sdp = sg_get_dev(dev); if (IS_ERR(sdp)) return PTR_ERR(sdp); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_open: flags=0x%x\n", flags)); /* This driver's module count bumped by fops_get in <linux/fs.h> */ /* Prevent the device driver from vanishing while we sleep */ retval = scsi_device_get(sdp->device); if (retval) goto sg_put; retval = scsi_autopm_get_device(sdp->device); if (retval) goto sdp_put; /* scsi_block_when_processing_errors() may block so bypass * check if O_NONBLOCK. Permits SCSI commands to be issued * during error recovery. Tread carefully. */ if (!((flags & O_NONBLOCK) || scsi_block_when_processing_errors(sdp->device))) { retval = -ENXIO; /* we are in error recovery for this device */ goto error_out; } mutex_lock(&sdp->open_rel_lock); if (flags & O_NONBLOCK) { if (flags & O_EXCL) { if (sdp->open_cnt > 0) { retval = -EBUSY; goto error_mutex_locked; } } else { if (sdp->exclude) { retval = -EBUSY; goto error_mutex_locked; } } } else { retval = open_wait(sdp, flags); if (retval) /* -ERESTARTSYS or -ENODEV */ goto error_mutex_locked; } /* N.B. at this point we are holding the open_rel_lock */ if (flags & O_EXCL) sdp->exclude = true; if (sdp->open_cnt < 1) { /* no existing opens */ sdp->sgdebug = 0; q = sdp->device->request_queue; sdp->sg_tablesize = queue_max_segments(q); } sfp = sg_add_sfp(sdp); if (IS_ERR(sfp)) { retval = PTR_ERR(sfp); goto out_undo; } filp->private_data = sfp; sdp->open_cnt++; mutex_unlock(&sdp->open_rel_lock); retval = 0; sg_put: kref_put(&sdp->d_ref, sg_device_destroy); return retval; out_undo: if (flags & O_EXCL) { sdp->exclude = false; /* undo if error */ wake_up_interruptible(&sdp->open_wait); } error_mutex_locked: mutex_unlock(&sdp->open_rel_lock); error_out: scsi_autopm_put_device(sdp->device); sdp_put: scsi_device_put(sdp->device); goto sg_put; } /* Release resources associated with a successful sg_open() * Returns 0 on success, else a negated errno value */ static int sg_release(struct inode *inode, struct file *filp) { Sg_device *sdp; Sg_fd *sfp; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n")); mutex_lock(&sdp->open_rel_lock); scsi_autopm_put_device(sdp->device); kref_put(&sfp->f_ref, sg_remove_sfp); sdp->open_cnt--; /* possibly many open()s waiting on exlude clearing, start many; * only open(O_EXCL)s wait on 0==open_cnt so only start one */ if (sdp->exclude) { sdp->exclude = false; wake_up_interruptible_all(&sdp->open_wait); } else if (0 == sdp->open_cnt) { wake_up_interruptible(&sdp->open_wait); } mutex_unlock(&sdp->open_rel_lock); return 0; } static int get_sg_io_pack_id(int *pack_id, void __user *buf, size_t count) { struct sg_header __user *old_hdr = buf; int reply_len; if (count >= SZ_SG_HEADER) { /* negative reply_len means v3 format, otherwise v1/v2 */ if (get_user(reply_len, &old_hdr->reply_len)) return -EFAULT; if (reply_len >= 0) return get_user(*pack_id, &old_hdr->pack_id); if (in_compat_syscall() && count >= sizeof(struct compat_sg_io_hdr)) { struct compat_sg_io_hdr __user *hp = buf; return get_user(*pack_id, &hp->pack_id); } if (count >= sizeof(struct sg_io_hdr)) { struct sg_io_hdr __user *hp = buf; return get_user(*pack_id, &hp->pack_id); } } /* no valid header was passed, so ignore the pack_id */ *pack_id = -1; return 0; } static ssize_t sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) { Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; int req_pack_id = -1; bool busy; sg_io_hdr_t *hp; struct sg_header *old_hdr; int retval; /* * This could cause a response to be stranded. Close the associated * file descriptor to free up any resources being held. */ retval = sg_check_file_access(filp, __func__); if (retval) return retval; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_read: count=%d\n", (int) count)); if (sfp->force_packid) retval = get_sg_io_pack_id(&req_pack_id, buf, count); if (retval) return retval; srp = sg_get_rq_mark(sfp, req_pack_id, &busy); if (!srp) { /* now wait on packet to arrive */ if (filp->f_flags & O_NONBLOCK) return -EAGAIN; retval = wait_event_interruptible(sfp->read_wait, ((srp = sg_get_rq_mark(sfp, req_pack_id, &busy)) || (!busy && atomic_read(&sdp->detaching)))); if (!srp) /* signal or detaching */ return retval ? retval : -ENODEV; } if (srp->header.interface_id != '\0') return sg_new_read(sfp, buf, count, srp); hp = &srp->header; old_hdr = kzalloc(SZ_SG_HEADER, GFP_KERNEL); if (!old_hdr) return -ENOMEM; old_hdr->reply_len = (int) hp->timeout; old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */ old_hdr->pack_id = hp->pack_id; old_hdr->twelve_byte = ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0; old_hdr->target_status = hp->masked_status; old_hdr->host_status = hp->host_status; old_hdr->driver_status = hp->driver_status; if ((CHECK_CONDITION & hp->masked_status) || (srp->sense_b[0] & 0x70) == 0x70) { old_hdr->driver_status = DRIVER_SENSE; memcpy(old_hdr->sense_buffer, srp->sense_b, sizeof (old_hdr->sense_buffer)); } switch (hp->host_status) { /* This setup of 'result' is for backward compatibility and is best ignored by the user who should use target, host + driver status */ case DID_OK: case DID_PASSTHROUGH: case DID_SOFT_ERROR: old_hdr->result = 0; break; case DID_NO_CONNECT: case DID_BUS_BUSY: case DID_TIME_OUT: old_hdr->result = EBUSY; break; case DID_BAD_TARGET: case DID_ABORT: case DID_PARITY: case DID_RESET: case DID_BAD_INTR: old_hdr->result = EIO; break; case DID_ERROR: old_hdr->result = (srp->sense_b[0] == 0 && hp->masked_status == GOOD) ? 0 : EIO; break; default: old_hdr->result = EIO; break; } /* Now copy the result back to the user buffer. */ if (count >= SZ_SG_HEADER) { if (copy_to_user(buf, old_hdr, SZ_SG_HEADER)) { retval = -EFAULT; goto free_old_hdr; } buf += SZ_SG_HEADER; if (count > old_hdr->reply_len) count = old_hdr->reply_len; if (count > SZ_SG_HEADER) { if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) { retval = -EFAULT; goto free_old_hdr; } } } else count = (old_hdr->result == 0) ? 0 : -EIO; sg_finish_rem_req(srp); sg_remove_request(sfp, srp); retval = count; free_old_hdr: kfree(old_hdr); return retval; } static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) { sg_io_hdr_t *hp = &srp->header; int err = 0, err2; int len; if (in_compat_syscall()) { if (count < sizeof(struct compat_sg_io_hdr)) { err = -EINVAL; goto err_out; } } else if (count < SZ_SG_IO_HDR) { err = -EINVAL; goto err_out; } hp->sb_len_wr = 0; if ((hp->mx_sb_len > 0) && hp->sbp) { if ((CHECK_CONDITION & hp->masked_status) || (srp->sense_b[0] & 0x70) == 0x70) { int sb_len = SCSI_SENSE_BUFFERSIZE; sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len; len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */ len = (len > sb_len) ? sb_len : len; if (copy_to_user(hp->sbp, srp->sense_b, len)) { err = -EFAULT; goto err_out; } hp->driver_status = DRIVER_SENSE; hp->sb_len_wr = len; } } if (hp->masked_status || hp->host_status || hp->driver_status) hp->info |= SG_INFO_CHECK; err = put_sg_io_hdr(hp, buf); err_out: err2 = sg_finish_rem_req(srp); sg_remove_request(sfp, srp); return err ? : err2 ? : count; } static ssize_t sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) { int mxsize, cmd_size, k; int input_size, blocking; unsigned char opcode; Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; struct sg_header old_hdr; sg_io_hdr_t *hp; unsigned char cmnd[SG_MAX_CDB_SIZE]; int retval; retval = sg_check_file_access(filp, __func__); if (retval) return retval; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_write: count=%d\n", (int) count)); if (atomic_read(&sdp->detaching)) return -ENODEV; if (!((filp->f_flags & O_NONBLOCK) || scsi_block_when_processing_errors(sdp->device))) return -ENXIO; if (count < SZ_SG_HEADER) return -EIO; if (copy_from_user(&old_hdr, buf, SZ_SG_HEADER)) return -EFAULT; blocking = !(filp->f_flags & O_NONBLOCK); if (old_hdr.reply_len < 0) return sg_new_write(sfp, filp, buf, count, blocking, 0, 0, NULL); if (count < (SZ_SG_HEADER + 6)) return -EIO; /* The minimum scsi command length is 6 bytes. */ buf += SZ_SG_HEADER; if (get_user(opcode, buf)) return -EFAULT; if (!(srp = sg_add_request(sfp))) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp, "sg_write: queue full\n")); return -EDOM; } mutex_lock(&sfp->f_mutex); if (sfp->next_cmd_len > 0) { cmd_size = sfp->next_cmd_len; sfp->next_cmd_len = 0; /* reset so only this write() effected */ } else { cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */ if ((opcode >= 0xc0) && old_hdr.twelve_byte) cmd_size = 12; } mutex_unlock(&sfp->f_mutex); SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); /* Determine buffer size. */ input_size = count - cmd_size; mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len; mxsize -= SZ_SG_HEADER; input_size -= SZ_SG_HEADER; if (input_size < 0) { sg_remove_request(sfp, srp); return -EIO; /* User did not pass enough bytes for this command. */ } hp = &srp->header; hp->interface_id = '\0'; /* indicator of old interface tunnelled */ hp->cmd_len = (unsigned char) cmd_size; hp->iovec_count = 0; hp->mx_sb_len = 0; if (input_size > 0) hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ? SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV; else hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; hp->dxfer_len = mxsize; if ((hp->dxfer_direction == SG_DXFER_TO_DEV) || (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV)) hp->dxferp = (char __user *)buf + cmd_size; else hp->dxferp = NULL; hp->sbp = NULL; hp->timeout = old_hdr.reply_len; /* structure abuse ... */ hp->flags = input_size; /* structure abuse ... */ hp->pack_id = old_hdr.pack_id; hp->usr_ptr = NULL; if (copy_from_user(cmnd, buf, cmd_size)) { sg_remove_request(sfp, srp); return -EFAULT; } /* * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, * but is is possible that the app intended SG_DXFER_TO_DEV, because there * is a non-zero input_size, so emit a warning. */ if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) { printk_ratelimited(KERN_WARNING "sg_write: data in/out %d/%d bytes " "for SCSI command 0x%x-- guessing " "data in;\n program %s not setting " "count and/or reply_len properly\n", old_hdr.reply_len - (int)SZ_SG_HEADER, input_size, (unsigned int) cmnd[0], current->comm); } k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); return (k < 0) ? k : count; } static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, size_t count, int blocking, int read_only, int sg_io_owned, Sg_request **o_srp) { int k; Sg_request *srp; sg_io_hdr_t *hp; unsigned char cmnd[SG_MAX_CDB_SIZE]; int timeout; unsigned long ul_timeout; if (count < SZ_SG_IO_HDR) return -EINVAL; sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ if (!(srp = sg_add_request(sfp))) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, "sg_new_write: queue full\n")); return -EDOM; } srp->sg_io_owned = sg_io_owned; hp = &srp->header; if (get_sg_io_hdr(hp, buf)) { sg_remove_request(sfp, srp); return -EFAULT; } if (hp->interface_id != 'S') { sg_remove_request(sfp, srp); return -ENOSYS; } if (hp->flags & SG_FLAG_MMAP_IO) { if (hp->dxfer_len > sfp->reserve.bufflen) { sg_remove_request(sfp, srp); return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */ } if (hp->flags & SG_FLAG_DIRECT_IO) { sg_remove_request(sfp, srp); return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ } if (sfp->res_in_use) { sg_remove_request(sfp, srp); return -EBUSY; /* reserve buffer already being used */ } } ul_timeout = msecs_to_jiffies(srp->header.timeout); timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX; if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) { sg_remove_request(sfp, srp); return -EMSGSIZE; } if (copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) { sg_remove_request(sfp, srp); return -EFAULT; } if (read_only && sg_allow_access(file, cmnd)) { sg_remove_request(sfp, srp); return -EPERM; } k = sg_common_write(sfp, srp, cmnd, timeout, blocking); if (k < 0) return k; if (o_srp) *o_srp = srp; return count; } static int sg_common_write(Sg_fd * sfp, Sg_request * srp, unsigned char *cmnd, int timeout, int blocking) { int k, at_head; Sg_device *sdp = sfp->parentdp; sg_io_hdr_t *hp = &srp->header; srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */ hp->status = 0; hp->masked_status = 0; hp->msg_status = 0; hp->info = 0; hp->host_status = 0; hp->driver_status = 0; hp->resid = 0; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) cmnd[0], (int) hp->cmd_len)); if (hp->dxfer_len >= SZ_256M) { sg_remove_request(sfp, srp); return -EINVAL; } k = sg_start_req(srp, cmnd); if (k) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, "sg_common_write: start_req err=%d\n", k)); sg_finish_rem_req(srp); sg_remove_request(sfp, srp); return k; /* probably out of space --> ENOMEM */ } if (atomic_read(&sdp->detaching)) { if (srp->bio) { blk_mq_free_request(srp->rq); srp->rq = NULL; } sg_finish_rem_req(srp); sg_remove_request(sfp, srp); return -ENODEV; } hp->duration = jiffies_to_msecs(jiffies); if (hp->interface_id != '\0' && /* v3 (or later) interface */ (SG_FLAG_Q_AT_TAIL & hp->flags)) at_head = 0; else at_head = 1; srp->rq->timeout = timeout; kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */ srp->rq->end_io = sg_rq_end_io; blk_execute_rq_nowait(srp->rq, at_head); return 0; } static int srp_done(Sg_fd *sfp, Sg_request *srp) { unsigned long flags; int ret; read_lock_irqsave(&sfp->rq_list_lock, flags); ret = srp->done; read_unlock_irqrestore(&sfp->rq_list_lock, flags); return ret; } static int max_sectors_bytes(struct request_queue *q) { unsigned int max_sectors = queue_max_sectors(q); max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9); return max_sectors << 9; } static void sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo) { Sg_request *srp; int val; unsigned int ms; val = 0; list_for_each_entry(srp, &sfp->rq_list, entry) { if (val >= SG_MAX_QUEUE) break; rinfo[val].req_state = srp->done + 1; rinfo[val].problem = srp->header.masked_status & srp->header.host_status & srp->header.driver_status; if (srp->done) rinfo[val].duration = srp->header.duration; else { ms = jiffies_to_msecs(jiffies); rinfo[val].duration = (ms > srp->header.duration) ? (ms - srp->header.duration) : 0; } rinfo[val].orphan = srp->orphan; rinfo[val].sg_io_owned = srp->sg_io_owned; rinfo[val].pack_id = srp->header.pack_id; rinfo[val].usr_ptr = srp->header.usr_ptr; val++; } } #ifdef CONFIG_COMPAT struct compat_sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */ char req_state; char orphan; char sg_io_owned; char problem; int pack_id; compat_uptr_t usr_ptr; unsigned int duration; int unused; }; static int put_compat_request_table(struct compat_sg_req_info __user *o, struct sg_req_info *rinfo) { int i; for (i = 0; i < SG_MAX_QUEUE; i++) { if (copy_to_user(o + i, rinfo + i, offsetof(sg_req_info_t, usr_ptr)) || put_user((uintptr_t)rinfo[i].usr_ptr, &o[i].usr_ptr) || put_user(rinfo[i].duration, &o[i].duration) || put_user(rinfo[i].unused, &o[i].unused)) return -EFAULT; } return 0; } #endif static long sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp, unsigned int cmd_in, void __user *p) { int __user *ip = p; int result, val, read_only; Sg_request *srp; unsigned long iflags; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_ioctl: cmd=0x%x\n", (int) cmd_in)); read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); switch (cmd_in) { case SG_IO: if (atomic_read(&sdp->detaching)) return -ENODEV; if (!scsi_block_when_processing_errors(sdp->device)) return -ENXIO; result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR, 1, read_only, 1, &srp); if (result < 0) return result; result = wait_event_interruptible(sfp->read_wait, srp_done(sfp, srp)); write_lock_irq(&sfp->rq_list_lock); if (srp->done) { srp->done = 2; write_unlock_irq(&sfp->rq_list_lock); result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp); return (result < 0) ? result : 0; } srp->orphan = 1; write_unlock_irq(&sfp->rq_list_lock); return result; /* -ERESTARTSYS because signal hit process */ case SG_SET_TIMEOUT: result = get_user(val, ip); if (result) return result; if (val < 0) return -EIO; if (val >= mult_frac((s64)INT_MAX, USER_HZ, HZ)) val = min_t(s64, mult_frac((s64)INT_MAX, USER_HZ, HZ), INT_MAX); sfp->timeout_user = val; sfp->timeout = mult_frac(val, HZ, USER_HZ); return 0; case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */ /* strange ..., for backward compatibility */ return sfp->timeout_user; case SG_SET_FORCE_LOW_DMA: /* * N.B. This ioctl never worked properly, but failed to * return an error value. So returning '0' to keep compability * with legacy applications. */ return 0; case SG_GET_LOW_DMA: return put_user(0, ip); case SG_GET_SCSI_ID: { sg_scsi_id_t v; if (atomic_read(&sdp->detaching)) return -ENODEV; memset(&v, 0, sizeof(v)); v.host_no = sdp->device->host->host_no; v.channel = sdp->device->channel; v.scsi_id = sdp->device->id; v.lun = sdp->device->lun; v.scsi_type = sdp->device->type; v.h_cmd_per_lun = sdp->device->host->cmd_per_lun; v.d_queue_depth = sdp->device->queue_depth; if (copy_to_user(p, &v, sizeof(sg_scsi_id_t))) return -EFAULT; return 0; } case SG_SET_FORCE_PACK_ID: result = get_user(val, ip); if (result) return result; sfp->force_packid = val ? 1 : 0; return 0; case SG_GET_PACK_ID: read_lock_irqsave(&sfp->rq_list_lock, iflags); list_for_each_entry(srp, &sfp->rq_list, entry) { if ((1 == srp->done) && (!srp->sg_io_owned)) { read_unlock_irqrestore(&sfp->rq_list_lock, iflags); return put_user(srp->header.pack_id, ip); } } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); return put_user(-1, ip); case SG_GET_NUM_WAITING: read_lock_irqsave(&sfp->rq_list_lock, iflags); val = 0; list_for_each_entry(srp, &sfp->rq_list, entry) { if ((1 == srp->done) && (!srp->sg_io_owned)) ++val; } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); return put_user(val, ip); case SG_GET_SG_TABLESIZE: return put_user(sdp->sg_tablesize, ip); case SG_SET_RESERVED_SIZE: result = get_user(val, ip); if (result) return result; if (val < 0) return -EINVAL; val = min_t(int, val, max_sectors_bytes(sdp->device->request_queue)); mutex_lock(&sfp->f_mutex); if (val != sfp->reserve.bufflen) { if (sfp->mmap_called || sfp->res_in_use) { mutex_unlock(&sfp->f_mutex); return -EBUSY; } sg_remove_scat(sfp, &sfp->reserve); sg_build_reserve(sfp, val); } mutex_unlock(&sfp->f_mutex); return 0; case SG_GET_RESERVED_SIZE: val = min_t(int, sfp->reserve.bufflen, max_sectors_bytes(sdp->device->request_queue)); return put_user(val, ip); case SG_SET_COMMAND_Q: result = get_user(val, ip); if (result) return result; sfp->cmd_q = val ? 1 : 0; return 0; case SG_GET_COMMAND_Q: return put_user((int) sfp->cmd_q, ip); case SG_SET_KEEP_ORPHAN: result = get_user(val, ip); if (result) return result; sfp->keep_orphan = val; return 0; case SG_GET_KEEP_ORPHAN: return put_user((int) sfp->keep_orphan, ip); case SG_NEXT_CMD_LEN: result = get_user(val, ip); if (result) return result; if (val > SG_MAX_CDB_SIZE) return -ENOMEM; sfp->next_cmd_len = (val > 0) ? val : 0; return 0; case SG_GET_VERSION_NUM: return put_user(sg_version_num, ip); case SG_GET_ACCESS_COUNT: /* faked - we don't have a real access count anymore */ val = (sdp->device ? 1 : 0); return put_user(val, ip); case SG_GET_REQUEST_TABLE: { sg_req_info_t *rinfo; rinfo = kcalloc(SG_MAX_QUEUE, SZ_SG_REQ_INFO, GFP_KERNEL); if (!rinfo) return -ENOMEM; read_lock_irqsave(&sfp->rq_list_lock, iflags); sg_fill_request_table(sfp, rinfo); read_unlock_irqrestore(&sfp->rq_list_lock, iflags); #ifdef CONFIG_COMPAT if (in_compat_syscall()) result = put_compat_request_table(p, rinfo); else #endif result = copy_to_user(p, rinfo, SZ_SG_REQ_INFO * SG_MAX_QUEUE); result = result ? -EFAULT : 0; kfree(rinfo); return result; } case SG_EMULATED_HOST: if (atomic_read(&sdp->detaching)) return -ENODEV; return put_user(sdp->device->host->hostt->emulated, ip); case SCSI_IOCTL_SEND_COMMAND: if (atomic_read(&sdp->detaching)) return -ENODEV; return scsi_ioctl(sdp->device, filp->f_mode & FMODE_WRITE, cmd_in, p); case SG_SET_DEBUG: result = get_user(val, ip); if (result) return result; sdp->sgdebug = (char) val; return 0; case BLKSECTGET: return put_user(max_sectors_bytes(sdp->device->request_queue), ip); case BLKTRACESETUP: return blk_trace_setup(sdp->device->request_queue, sdp->name, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), NULL, p); case BLKTRACESTART: return blk_trace_startstop(sdp->device->request_queue, 1); case BLKTRACESTOP: return blk_trace_startstop(sdp->device->request_queue, 0); case BLKTRACETEARDOWN: return blk_trace_remove(sdp->device->request_queue); case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: case SCSI_IOCTL_PROBE_HOST: case SG_GET_TRANSFORM: case SG_SCSI_RESET: if (atomic_read(&sdp->detaching)) return -ENODEV; break; default: if (read_only) return -EPERM; /* don't know so take safe approach */ break; } result = scsi_ioctl_block_when_processing_errors(sdp->device, cmd_in, filp->f_flags & O_NDELAY); if (result) return result; return -ENOIOCTLCMD; } static long sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) { void __user *p = (void __user *)arg; Sg_device *sdp; Sg_fd *sfp; int ret; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p); if (ret != -ENOIOCTLCMD) return ret; return scsi_ioctl(sdp->device, filp->f_mode & FMODE_WRITE, cmd_in, p); } static __poll_t sg_poll(struct file *filp, poll_table * wait) { __poll_t res = 0; Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; int count = 0; unsigned long iflags; sfp = filp->private_data; if (!sfp) return EPOLLERR; sdp = sfp->parentdp; if (!sdp) return EPOLLERR; poll_wait(filp, &sfp->read_wait, wait); read_lock_irqsave(&sfp->rq_list_lock, iflags); list_for_each_entry(srp, &sfp->rq_list, entry) { /* if any read waiting, flag it */ if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned)) res = EPOLLIN | EPOLLRDNORM; ++count; } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (atomic_read(&sdp->detaching)) res |= EPOLLHUP; else if (!sfp->cmd_q) { if (0 == count) res |= EPOLLOUT | EPOLLWRNORM; } else if (count < SG_MAX_QUEUE) res |= EPOLLOUT | EPOLLWRNORM; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_poll: res=0x%x\n", (__force u32) res)); return res; } static int sg_fasync(int fd, struct file *filp, int mode) { Sg_device *sdp; Sg_fd *sfp; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_fasync: mode=%d\n", mode)); return fasync_helper(fd, filp, mode, &sfp->async_qp); } static vm_fault_t sg_vma_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; Sg_fd *sfp; unsigned long offset, len, sa; Sg_scatter_hold *rsv_schp; int k, length; if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) return VM_FAULT_SIGBUS; rsv_schp = &sfp->reserve; offset = vmf->pgoff << PAGE_SHIFT; if (offset >= rsv_schp->bufflen) return VM_FAULT_SIGBUS; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp, "sg_vma_fault: offset=%lu, scatg=%d\n", offset, rsv_schp->k_use_sg)); sa = vma->vm_start; length = 1 << (PAGE_SHIFT + rsv_schp->page_order); for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { len = vma->vm_end - sa; len = (len < length) ? len : length; if (offset < len) { struct page *page = nth_page(rsv_schp->pages[k], offset >> PAGE_SHIFT); get_page(page); /* increment page count */ vmf->page = page; return 0; /* success */ } sa += len; offset -= len; } return VM_FAULT_SIGBUS; } static const struct vm_operations_struct sg_mmap_vm_ops = { .fault = sg_vma_fault, }; static int sg_mmap(struct file *filp, struct vm_area_struct *vma) { Sg_fd *sfp; unsigned long req_sz, len, sa; Sg_scatter_hold *rsv_schp; int k, length; int ret = 0; if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) return -ENXIO; req_sz = vma->vm_end - vma->vm_start; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp, "sg_mmap starting, vm_start=%p, len=%d\n", (void *) vma->vm_start, (int) req_sz)); if (vma->vm_pgoff) return -EINVAL; /* want no offset */ rsv_schp = &sfp->reserve; mutex_lock(&sfp->f_mutex); if (req_sz > rsv_schp->bufflen) { ret = -ENOMEM; /* cannot map more than reserved buffer */ goto out; } sa = vma->vm_start; length = 1 << (PAGE_SHIFT + rsv_schp->page_order); for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { len = vma->vm_end - sa; len = (len < length) ? len : length; sa += len; } sfp->mmap_called = 1; vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP); vma->vm_private_data = sfp; vma->vm_ops = &sg_mmap_vm_ops; out: mutex_unlock(&sfp->f_mutex); return ret; } static void sg_rq_end_io_usercontext(struct work_struct *work) { struct sg_request *srp = container_of(work, struct sg_request, ew.work); struct sg_fd *sfp = srp->parentfp; sg_finish_rem_req(srp); sg_remove_request(sfp, srp); kref_put(&sfp->f_ref, sg_remove_sfp); } /* * This function is a "bottom half" handler that is called by the mid * level when a command is completed (or has failed). */ static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status) { struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); struct sg_request *srp = rq->end_io_data; Sg_device *sdp; Sg_fd *sfp; unsigned long iflags; unsigned int ms; char *sense; int result, resid, done = 1; if (WARN_ON(srp->done != 0)) return RQ_END_IO_NONE; sfp = srp->parentfp; if (WARN_ON(sfp == NULL)) return RQ_END_IO_NONE; sdp = sfp->parentdp; if (unlikely(atomic_read(&sdp->detaching))) pr_info("%s: device detaching\n", __func__); sense = scmd->sense_buffer; result = scmd->result; resid = scmd->resid_len; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, "sg_cmd_done: pack_id=%d, res=0x%x\n", srp->header.pack_id, result)); srp->header.resid = resid; ms = jiffies_to_msecs(jiffies); srp->header.duration = (ms > srp->header.duration) ? (ms - srp->header.duration) : 0; if (0 != result) { struct scsi_sense_hdr sshdr; srp->header.status = 0xff & result; srp->header.masked_status = sg_status_byte(result); srp->header.msg_status = COMMAND_COMPLETE; srp->header.host_status = host_byte(result); srp->header.driver_status = driver_byte(result); if ((sdp->sgdebug > 0) && ((CHECK_CONDITION == srp->header.masked_status) || (COMMAND_TERMINATED == srp->header.masked_status))) __scsi_print_sense(sdp->device, __func__, sense, SCSI_SENSE_BUFFERSIZE); /* Following if statement is a patch supplied by Eric Youngdale */ if (driver_byte(result) != 0 && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr) && !scsi_sense_is_deferred(&sshdr) && sshdr.sense_key == UNIT_ATTENTION && sdp->device->removable) { /* Detected possible disc change. Set the bit - this */ /* may be used if there are filesystems using this device */ sdp->device->changed = 1; } } if (scmd->sense_len) memcpy(srp->sense_b, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); /* Rely on write phase to clean out srp status values, so no "else" */ /* * Free the request as soon as it is complete so that its resources * can be reused without waiting for userspace to read() the * result. But keep the associated bio (if any) around until * blk_rq_unmap_user() can be called from user context. */ srp->rq = NULL; blk_mq_free_request(rq); write_lock_irqsave(&sfp->rq_list_lock, iflags); if (unlikely(srp->orphan)) { if (sfp->keep_orphan) srp->sg_io_owned = 0; else done = 0; } srp->done = done; write_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (likely(done)) { /* Now wake up any sg_read() that is waiting for this * packet. */ wake_up_interruptible(&sfp->read_wait); kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN); kref_put(&sfp->f_ref, sg_remove_sfp); } else { INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext); schedule_work(&srp->ew.work); } return RQ_END_IO_NONE; } static const struct file_operations sg_fops = { .owner = THIS_MODULE, .read = sg_read, .write = sg_write, .poll = sg_poll, .unlocked_ioctl = sg_ioctl, .compat_ioctl = compat_ptr_ioctl, .open = sg_open, .mmap = sg_mmap, .release = sg_release, .fasync = sg_fasync, .llseek = no_llseek, }; static struct class *sg_sysfs_class; static int sg_sysfs_valid = 0; static Sg_device * sg_alloc(struct scsi_device *scsidp) { struct request_queue *q = scsidp->request_queue; Sg_device *sdp; unsigned long iflags; int error; u32 k; sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL); if (!sdp) { sdev_printk(KERN_WARNING, scsidp, "%s: kmalloc Sg_device " "failure\n", __func__); return ERR_PTR(-ENOMEM); } idr_preload(GFP_KERNEL); write_lock_irqsave(&sg_index_lock, iflags); error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT); if (error < 0) { if (error == -ENOSPC) { sdev_printk(KERN_WARNING, scsidp, "Unable to attach sg device type=%d, minor number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1); error = -ENODEV; } else { sdev_printk(KERN_WARNING, scsidp, "%s: idr " "allocation Sg_device failure: %d\n", __func__, error); } goto out_unlock; } k = error; SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp, "sg_alloc: dev=%d \n", k)); sprintf(sdp->name, "sg%d", k); sdp->device = scsidp; mutex_init(&sdp->open_rel_lock); INIT_LIST_HEAD(&sdp->sfds); init_waitqueue_head(&sdp->open_wait); atomic_set(&sdp->detaching, 0); rwlock_init(&sdp->sfd_lock); sdp->sg_tablesize = queue_max_segments(q); sdp->index = k; kref_init(&sdp->d_ref); error = 0; out_unlock: write_unlock_irqrestore(&sg_index_lock, iflags); idr_preload_end(); if (error) { kfree(sdp); return ERR_PTR(error); } return sdp; } static int sg_add_device(struct device *cl_dev) { struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); Sg_device *sdp = NULL; struct cdev * cdev = NULL; int error; unsigned long iflags; if (!blk_get_queue(scsidp->request_queue)) { pr_warn("%s: get scsi_device queue failed\n", __func__); return -ENODEV; } error = -ENOMEM; cdev = cdev_alloc(); if (!cdev) { pr_warn("%s: cdev_alloc failed\n", __func__); goto out; } cdev->owner = THIS_MODULE; cdev->ops = &sg_fops; sdp = sg_alloc(scsidp); if (IS_ERR(sdp)) { pr_warn("%s: sg_alloc failed\n", __func__); error = PTR_ERR(sdp); goto out; } error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1); if (error) goto cdev_add_err; sdp->cdev = cdev; if (sg_sysfs_valid) { struct device *sg_class_member; sg_class_member = device_create(sg_sysfs_class, cl_dev->parent, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), sdp, "%s", sdp->name); if (IS_ERR(sg_class_member)) { pr_err("%s: device_create failed\n", __func__); error = PTR_ERR(sg_class_member); goto cdev_add_err; } error = sysfs_create_link(&scsidp->sdev_gendev.kobj, &sg_class_member->kobj, "generic"); if (error) pr_err("%s: unable to make symlink 'generic' back " "to sg%d\n", __func__, sdp->index); } else pr_warn("%s: sg_sys Invalid\n", __func__); sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d " "type %d\n", sdp->index, scsidp->type); dev_set_drvdata(cl_dev, sdp); return 0; cdev_add_err: write_lock_irqsave(&sg_index_lock, iflags); idr_remove(&sg_index_idr, sdp->index); write_unlock_irqrestore(&sg_index_lock, iflags); kfree(sdp); out: if (cdev) cdev_del(cdev); blk_put_queue(scsidp->request_queue); return error; } static void sg_device_destroy(struct kref *kref) { struct sg_device *sdp = container_of(kref, struct sg_device, d_ref); struct request_queue *q = sdp->device->request_queue; unsigned long flags; /* CAUTION! Note that the device can still be found via idr_find() * even though the refcount is 0. Therefore, do idr_remove() BEFORE * any other cleanup. */ blk_trace_remove(q); blk_put_queue(q); write_lock_irqsave(&sg_index_lock, flags); idr_remove(&sg_index_idr, sdp->index); write_unlock_irqrestore(&sg_index_lock, flags); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_device_destroy\n")); kfree(sdp); } static void sg_remove_device(struct device *cl_dev) { struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); Sg_device *sdp = dev_get_drvdata(cl_dev); unsigned long iflags; Sg_fd *sfp; int val; if (!sdp) return; /* want sdp->detaching non-zero as soon as possible */ val = atomic_inc_return(&sdp->detaching); if (val > 1) return; /* only want to do following once per device */ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "%s\n", __func__)); read_lock_irqsave(&sdp->sfd_lock, iflags); list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { wake_up_interruptible_all(&sfp->read_wait); kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); } wake_up_interruptible_all(&sdp->open_wait); read_unlock_irqrestore(&sdp->sfd_lock, iflags); sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index)); cdev_del(sdp->cdev); sdp->cdev = NULL; kref_put(&sdp->d_ref, sg_device_destroy); } module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR); module_param_named(def_reserved_size, def_reserved_size, int, S_IRUGO | S_IWUSR); module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR); MODULE_AUTHOR("Douglas Gilbert"); MODULE_DESCRIPTION("SCSI generic (sg) driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(SG_VERSION_STR); MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR); MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element " "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))"); MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd"); MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))"); #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> static struct ctl_table sg_sysctls[] = { { .procname = "sg-big-buff", .data = &sg_big_buff, .maxlen = sizeof(int), .mode = 0444, .proc_handler = proc_dointvec, }, {} }; static struct ctl_table_header *hdr; static void register_sg_sysctls(void) { if (!hdr) hdr = register_sysctl("kernel", sg_sysctls); } static void unregister_sg_sysctls(void) { if (hdr) unregister_sysctl_table(hdr); } #else #define register_sg_sysctls() do { } while (0) #define unregister_sg_sysctls() do { } while (0) #endif /* CONFIG_SYSCTL */ static int __init init_sg(void) { int rc; if (scatter_elem_sz < PAGE_SIZE) { scatter_elem_sz = PAGE_SIZE; scatter_elem_sz_prev = scatter_elem_sz; } if (def_reserved_size >= 0) sg_big_buff = def_reserved_size; else def_reserved_size = sg_big_buff; rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS, "sg"); if (rc) return rc; sg_sysfs_class = class_create("scsi_generic"); if ( IS_ERR(sg_sysfs_class) ) { rc = PTR_ERR(sg_sysfs_class); goto err_out; } sg_sysfs_valid = 1; rc = scsi_register_interface(&sg_interface); if (0 == rc) { #ifdef CONFIG_SCSI_PROC_FS sg_proc_init(); #endif /* CONFIG_SCSI_PROC_FS */ return 0; } class_destroy(sg_sysfs_class); register_sg_sysctls(); err_out: unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS); return rc; } static void __exit exit_sg(void) { unregister_sg_sysctls(); #ifdef CONFIG_SCSI_PROC_FS remove_proc_subtree("scsi/sg", NULL); #endif /* CONFIG_SCSI_PROC_FS */ scsi_unregister_interface(&sg_interface); class_destroy(sg_sysfs_class); sg_sysfs_valid = 0; unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS); idr_destroy(&sg_index_idr); } static int sg_start_req(Sg_request *srp, unsigned char *cmd) { int res; struct request *rq; Sg_fd *sfp = srp->parentfp; sg_io_hdr_t *hp = &srp->header; int dxfer_len = (int) hp->dxfer_len; int dxfer_dir = hp->dxfer_direction; unsigned int iov_count = hp->iovec_count; Sg_scatter_hold *req_schp = &srp->data; Sg_scatter_hold *rsv_schp = &sfp->reserve; struct request_queue *q = sfp->parentdp->device->request_queue; struct rq_map_data *md, map_data; int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? ITER_SOURCE : ITER_DEST; struct scsi_cmnd *scmd; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_start_req: dxfer_len=%d\n", dxfer_len)); /* * NOTE * * With scsi-mq enabled, there are a fixed number of preallocated * requests equal in number to shost->can_queue. If all of the * preallocated requests are already in use, then scsi_alloc_request() * will sleep until an active command completes, freeing up a request. * Although waiting in an asynchronous interface is less than ideal, we * do not want to use BLK_MQ_REQ_NOWAIT here because userspace might * not expect an EWOULDBLOCK from this condition. */ rq = scsi_alloc_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); if (IS_ERR(rq)) return PTR_ERR(rq); scmd = blk_mq_rq_to_pdu(rq); if (hp->cmd_len > sizeof(scmd->cmnd)) { blk_mq_free_request(rq); return -EINVAL; } memcpy(scmd->cmnd, cmd, hp->cmd_len); scmd->cmd_len = hp->cmd_len; srp->rq = rq; rq->end_io_data = srp; scmd->allowed = SG_DEFAULT_RETRIES; if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) return 0; if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO && dxfer_dir != SG_DXFER_UNKNOWN && !iov_count && blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len)) md = NULL; else md = &map_data; if (md) { mutex_lock(&sfp->f_mutex); if (dxfer_len <= rsv_schp->bufflen && !sfp->res_in_use) { sfp->res_in_use = 1; sg_link_reserve(sfp, srp, dxfer_len); } else if (hp->flags & SG_FLAG_MMAP_IO) { res = -EBUSY; /* sfp->res_in_use == 1 */ if (dxfer_len > rsv_schp->bufflen) res = -ENOMEM; mutex_unlock(&sfp->f_mutex); return res; } else { res = sg_build_indirect(req_schp, sfp, dxfer_len); if (res) { mutex_unlock(&sfp->f_mutex); return res; } } mutex_unlock(&sfp->f_mutex); md->pages = req_schp->pages; md->page_order = req_schp->page_order; md->nr_entries = req_schp->k_use_sg; md->offset = 0; md->null_mapped = hp->dxferp ? 0 : 1; if (dxfer_dir == SG_DXFER_TO_FROM_DEV) md->from_user = 1; else md->from_user = 0; } res = blk_rq_map_user_io(rq, md, hp->dxferp, hp->dxfer_len, GFP_ATOMIC, iov_count, iov_count, 1, rw); if (!res) { srp->bio = rq->bio; if (!md) { req_schp->dio_in_use = 1; hp->info |= SG_INFO_DIRECT_IO; } } return res; } static int sg_finish_rem_req(Sg_request *srp) { int ret = 0; Sg_fd *sfp = srp->parentfp; Sg_scatter_hold *req_schp = &srp->data; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_finish_rem_req: res_used=%d\n", (int) srp->res_used)); if (srp->bio) ret = blk_rq_unmap_user(srp->bio); if (srp->rq) blk_mq_free_request(srp->rq); if (srp->res_used) sg_unlink_reserve(sfp, srp); else sg_remove_scat(sfp, req_schp); return ret; } static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) { int sg_bufflen = tablesize * sizeof(struct page *); gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN; schp->pages = kzalloc(sg_bufflen, gfp_flags); if (!schp->pages) return -ENOMEM; schp->sglist_len = sg_bufflen; return tablesize; /* number of scat_gath elements allocated */ } static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) { int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems; int sg_tablesize = sfp->parentdp->sg_tablesize; int blk_size = buff_size, order; gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN | __GFP_ZERO; if (blk_size < 0) return -EFAULT; if (0 == blk_size) ++blk_size; /* don't know why */ /* round request up to next highest SG_SECTOR_SZ byte boundary */ blk_size = ALIGN(blk_size, SG_SECTOR_SZ); SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_indirect: buff_size=%d, blk_size=%d\n", buff_size, blk_size)); /* N.B. ret_sz carried into this block ... */ mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); if (mx_sc_elems < 0) return mx_sc_elems; /* most likely -ENOMEM */ num = scatter_elem_sz; if (unlikely(num != scatter_elem_sz_prev)) { if (num < PAGE_SIZE) { scatter_elem_sz = PAGE_SIZE; scatter_elem_sz_prev = PAGE_SIZE; } else scatter_elem_sz_prev = num; } order = get_order(num); retry: ret_sz = 1 << (PAGE_SHIFT + order); for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems; k++, rem_sz -= ret_sz) { num = (rem_sz > scatter_elem_sz_prev) ? scatter_elem_sz_prev : rem_sz; schp->pages[k] = alloc_pages(gfp_mask, order); if (!schp->pages[k]) goto out; if (num == scatter_elem_sz_prev) { if (unlikely(ret_sz > scatter_elem_sz_prev)) { scatter_elem_sz = ret_sz; scatter_elem_sz_prev = ret_sz; } } SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_indirect: k=%d, num=%d, ret_sz=%d\n", k, num, ret_sz)); } /* end of for loop */ schp->page_order = order; schp->k_use_sg = k; SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_indirect: k_use_sg=%d, rem_sz=%d\n", k, rem_sz)); schp->bufflen = blk_size; if (rem_sz > 0) /* must have failed */ return -ENOMEM; return 0; out: for (i = 0; i < k; i++) __free_pages(schp->pages[i], order); if (--order >= 0) goto retry; return -ENOMEM; } static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp) { SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); if (schp->pages && schp->sglist_len > 0) { if (!schp->dio_in_use) { int k; for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, "sg_remove_scat: k=%d, pg=0x%p\n", k, schp->pages[k])); __free_pages(schp->pages[k], schp->page_order); } kfree(schp->pages); } } memset(schp, 0, sizeof (*schp)); } static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) { Sg_scatter_hold *schp = &srp->data; int k, num; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp, "sg_read_oxfer: num_read_xfer=%d\n", num_read_xfer)); if ((!outp) || (num_read_xfer <= 0)) return 0; num = 1 << (PAGE_SHIFT + schp->page_order); for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { if (num > num_read_xfer) { if (copy_to_user(outp, page_address(schp->pages[k]), num_read_xfer)) return -EFAULT; break; } else { if (copy_to_user(outp, page_address(schp->pages[k]), num)) return -EFAULT; num_read_xfer -= num; if (num_read_xfer <= 0) break; outp += num; } } return 0; } static void sg_build_reserve(Sg_fd * sfp, int req_size) { Sg_scatter_hold *schp = &sfp->reserve; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_reserve: req_size=%d\n", req_size)); do { if (req_size < PAGE_SIZE) req_size = PAGE_SIZE; if (0 == sg_build_indirect(schp, sfp, req_size)) return; else sg_remove_scat(sfp, schp); req_size >>= 1; /* divide by 2 */ } while (req_size > (PAGE_SIZE / 2)); } static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) { Sg_scatter_hold *req_schp = &srp->data; Sg_scatter_hold *rsv_schp = &sfp->reserve; int k, num, rem; srp->res_used = 1; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_link_reserve: size=%d\n", size)); rem = size; num = 1 << (PAGE_SHIFT + rsv_schp->page_order); for (k = 0; k < rsv_schp->k_use_sg; k++) { if (rem <= num) { req_schp->k_use_sg = k + 1; req_schp->sglist_len = rsv_schp->sglist_len; req_schp->pages = rsv_schp->pages; req_schp->bufflen = size; req_schp->page_order = rsv_schp->page_order; break; } else rem -= num; } if (k >= rsv_schp->k_use_sg) SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, "sg_link_reserve: BAD size\n")); } static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) { Sg_scatter_hold *req_schp = &srp->data; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp, "sg_unlink_reserve: req->k_use_sg=%d\n", (int) req_schp->k_use_sg)); req_schp->k_use_sg = 0; req_schp->bufflen = 0; req_schp->pages = NULL; req_schp->page_order = 0; req_schp->sglist_len = 0; srp->res_used = 0; /* Called without mutex lock to avoid deadlock */ sfp->res_in_use = 0; } static Sg_request * sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy) { Sg_request *resp; unsigned long iflags; *busy = false; write_lock_irqsave(&sfp->rq_list_lock, iflags); list_for_each_entry(resp, &sfp->rq_list, entry) { /* look for requests that are not SG_IO owned */ if ((!resp->sg_io_owned) && ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { switch (resp->done) { case 0: /* request active */ *busy = true; break; case 1: /* request done; response ready to return */ resp->done = 2; /* guard against other readers */ write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return resp; case 2: /* response already being returned */ break; } } } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return NULL; } /* always adds to end of list */ static Sg_request * sg_add_request(Sg_fd * sfp) { int k; unsigned long iflags; Sg_request *rp = sfp->req_arr; write_lock_irqsave(&sfp->rq_list_lock, iflags); if (!list_empty(&sfp->rq_list)) { if (!sfp->cmd_q) goto out_unlock; for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) { if (!rp->parentfp) break; } if (k >= SG_MAX_QUEUE) goto out_unlock; } memset(rp, 0, sizeof (Sg_request)); rp->parentfp = sfp; rp->header.duration = jiffies_to_msecs(jiffies); list_add_tail(&rp->entry, &sfp->rq_list); write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return rp; out_unlock: write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return NULL; } /* Return of 1 for found; 0 for not found */ static int sg_remove_request(Sg_fd * sfp, Sg_request * srp) { unsigned long iflags; int res = 0; if (!sfp || !srp || list_empty(&sfp->rq_list)) return res; write_lock_irqsave(&sfp->rq_list_lock, iflags); if (!list_empty(&srp->entry)) { list_del(&srp->entry); srp->parentfp = NULL; res = 1; } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); /* * If the device is detaching, wakeup any readers in case we just * removed the last response, which would leave nothing for them to * return other than -ENODEV. */ if (unlikely(atomic_read(&sfp->parentdp->detaching))) wake_up_interruptible_all(&sfp->read_wait); return res; } static Sg_fd * sg_add_sfp(Sg_device * sdp) { Sg_fd *sfp; unsigned long iflags; int bufflen; sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); if (!sfp) return ERR_PTR(-ENOMEM); init_waitqueue_head(&sfp->read_wait); rwlock_init(&sfp->rq_list_lock); INIT_LIST_HEAD(&sfp->rq_list); kref_init(&sfp->f_ref); mutex_init(&sfp->f_mutex); sfp->timeout = SG_DEFAULT_TIMEOUT; sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; sfp->force_packid = SG_DEF_FORCE_PACK_ID; sfp->cmd_q = SG_DEF_COMMAND_Q; sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; sfp->parentdp = sdp; write_lock_irqsave(&sdp->sfd_lock, iflags); if (atomic_read(&sdp->detaching)) { write_unlock_irqrestore(&sdp->sfd_lock, iflags); kfree(sfp); return ERR_PTR(-ENODEV); } list_add_tail(&sfp->sfd_siblings, &sdp->sfds); write_unlock_irqrestore(&sdp->sfd_lock, iflags); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_add_sfp: sfp=0x%p\n", sfp)); if (unlikely(sg_big_buff != def_reserved_size)) sg_big_buff = def_reserved_size; bufflen = min_t(int, sg_big_buff, max_sectors_bytes(sdp->device->request_queue)); sg_build_reserve(sfp, bufflen); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_add_sfp: bufflen=%d, k_use_sg=%d\n", sfp->reserve.bufflen, sfp->reserve.k_use_sg)); kref_get(&sdp->d_ref); __module_get(THIS_MODULE); return sfp; } static void sg_remove_sfp_usercontext(struct work_struct *work) { struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); struct sg_device *sdp = sfp->parentdp; Sg_request *srp; unsigned long iflags; /* Cleanup any responses which were never read(). */ write_lock_irqsave(&sfp->rq_list_lock, iflags); while (!list_empty(&sfp->rq_list)) { srp = list_first_entry(&sfp->rq_list, Sg_request, entry); sg_finish_rem_req(srp); list_del(&srp->entry); srp->parentfp = NULL; } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (sfp->reserve.bufflen > 0) { SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, "sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg)); sg_remove_scat(sfp, &sfp->reserve); } SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, "sg_remove_sfp: sfp=0x%p\n", sfp)); kfree(sfp); scsi_device_put(sdp->device); kref_put(&sdp->d_ref, sg_device_destroy); module_put(THIS_MODULE); } static void sg_remove_sfp(struct kref *kref) { struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref); struct sg_device *sdp = sfp->parentdp; unsigned long iflags; write_lock_irqsave(&sdp->sfd_lock, iflags); list_del(&sfp->sfd_siblings); write_unlock_irqrestore(&sdp->sfd_lock, iflags); INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); schedule_work(&sfp->ew.work); } #ifdef CONFIG_SCSI_PROC_FS static int sg_idr_max_id(int id, void *p, void *data) { int *k = data; if (*k < id) *k = id; return 0; } static int sg_last_dev(void) { int k = -1; unsigned long iflags; read_lock_irqsave(&sg_index_lock, iflags); idr_for_each(&sg_index_idr, sg_idr_max_id, &k); read_unlock_irqrestore(&sg_index_lock, iflags); return k + 1; /* origin 1 */ } #endif /* must be called with sg_index_lock held */ static Sg_device *sg_lookup_dev(int dev) { return idr_find(&sg_index_idr, dev); } static Sg_device * sg_get_dev(int dev) { struct sg_device *sdp; unsigned long flags; read_lock_irqsave(&sg_index_lock, flags); sdp = sg_lookup_dev(dev); if (!sdp) sdp = ERR_PTR(-ENXIO); else if (atomic_read(&sdp->detaching)) { /* If sdp->detaching, then the refcount may already be 0, in * which case it would be a bug to do kref_get(). */ sdp = ERR_PTR(-ENODEV); } else kref_get(&sdp->d_ref); read_unlock_irqrestore(&sg_index_lock, flags); return sdp; } #ifdef CONFIG_SCSI_PROC_FS static int sg_proc_seq_show_int(struct seq_file *s, void *v); static int sg_proc_single_open_adio(struct inode *inode, struct file *file); static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, size_t count, loff_t *off); static const struct proc_ops adio_proc_ops = { .proc_open = sg_proc_single_open_adio, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_write = sg_proc_write_adio, .proc_release = single_release, }; static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); static ssize_t sg_proc_write_dressz(struct file *filp, const char __user *buffer, size_t count, loff_t *off); static const struct proc_ops dressz_proc_ops = { .proc_open = sg_proc_single_open_dressz, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_write = sg_proc_write_dressz, .proc_release = single_release, }; static int sg_proc_seq_show_version(struct seq_file *s, void *v); static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v); static int sg_proc_seq_show_dev(struct seq_file *s, void *v); static void * dev_seq_start(struct seq_file *s, loff_t *pos); static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos); static void dev_seq_stop(struct seq_file *s, void *v); static const struct seq_operations dev_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = sg_proc_seq_show_dev, }; static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v); static const struct seq_operations devstrs_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = sg_proc_seq_show_devstrs, }; static int sg_proc_seq_show_debug(struct seq_file *s, void *v); static const struct seq_operations debug_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = sg_proc_seq_show_debug, }; static int sg_proc_init(void) { struct proc_dir_entry *p; p = proc_mkdir("scsi/sg", NULL); if (!p) return 1; proc_create("allow_dio", S_IRUGO | S_IWUSR, p, &adio_proc_ops); proc_create_seq("debug", S_IRUGO, p, &debug_seq_ops); proc_create("def_reserved_size", S_IRUGO | S_IWUSR, p, &dressz_proc_ops); proc_create_single("device_hdr", S_IRUGO, p, sg_proc_seq_show_devhdr); proc_create_seq("devices", S_IRUGO, p, &dev_seq_ops); proc_create_seq("device_strs", S_IRUGO, p, &devstrs_seq_ops); proc_create_single("version", S_IRUGO, p, sg_proc_seq_show_version); return 0; } static int sg_proc_seq_show_int(struct seq_file *s, void *v) { seq_printf(s, "%d\n", *((int *)s->private)); return 0; } static int sg_proc_single_open_adio(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_int, &sg_allow_dio); } static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, size_t count, loff_t *off) { int err; unsigned long num; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; err = kstrtoul_from_user(buffer, count, 0, &num); if (err) return err; sg_allow_dio = num ? 1 : 0; return count; } static int sg_proc_single_open_dressz(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_int, &sg_big_buff); } static ssize_t sg_proc_write_dressz(struct file *filp, const char __user *buffer, size_t count, loff_t *off) { int err; unsigned long k = ULONG_MAX; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; err = kstrtoul_from_user(buffer, count, 0, &k); if (err) return err; if (k <= 1048576) { /* limit "big buff" to 1 MB */ sg_big_buff = k; return count; } return -ERANGE; } static int sg_proc_seq_show_version(struct seq_file *s, void *v) { seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR, sg_version_date); return 0; } static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v) { seq_puts(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n"); return 0; } struct sg_proc_deviter { loff_t index; size_t max; }; static void * dev_seq_start(struct seq_file *s, loff_t *pos) { struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL); s->private = it; if (! it) return NULL; it->index = *pos; it->max = sg_last_dev(); if (it->index >= it->max) return NULL; return it; } static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct sg_proc_deviter * it = s->private; *pos = ++it->index; return (it->index < it->max) ? it : NULL; } static void dev_seq_stop(struct seq_file *s, void *v) { kfree(s->private); } static int sg_proc_seq_show_dev(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; struct scsi_device *scsidp; unsigned long iflags; read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; if ((NULL == sdp) || (NULL == sdp->device) || (atomic_read(&sdp->detaching))) seq_puts(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n"); else { scsidp = sdp->device; seq_printf(s, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n", scsidp->host->host_no, scsidp->channel, scsidp->id, scsidp->lun, (int) scsidp->type, 1, (int) scsidp->queue_depth, (int) scsi_device_busy(scsidp), (int) scsi_device_online(scsidp)); } read_unlock_irqrestore(&sg_index_lock, iflags); return 0; } static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; struct scsi_device *scsidp; unsigned long iflags; read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; scsidp = sdp ? sdp->device : NULL; if (sdp && scsidp && (!atomic_read(&sdp->detaching))) seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n", scsidp->vendor, scsidp->model, scsidp->rev); else seq_puts(s, "<no active device>\n"); read_unlock_irqrestore(&sg_index_lock, iflags); return 0; } /* must be called while holding sg_index_lock */ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) { int k, new_interface, blen, usg; Sg_request *srp; Sg_fd *fp; const sg_io_hdr_t *hp; const char * cp; unsigned int ms; k = 0; list_for_each_entry(fp, &sdp->sfds, sfd_siblings) { k++; read_lock(&fp->rq_list_lock); /* irqs already disabled */ seq_printf(s, " FD(%d): timeout=%dms bufflen=%d " "(res)sgat=%d low_dma=%d\n", k, jiffies_to_msecs(fp->timeout), fp->reserve.bufflen, (int) fp->reserve.k_use_sg, 0); seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n", (int) fp->cmd_q, (int) fp->force_packid, (int) fp->keep_orphan); list_for_each_entry(srp, &fp->rq_list, entry) { hp = &srp->header; new_interface = (hp->interface_id == '\0') ? 0 : 1; if (srp->res_used) { if (new_interface && (SG_FLAG_MMAP_IO & hp->flags)) cp = " mmap>> "; else cp = " rb>> "; } else { if (SG_INFO_DIRECT_IO_MASK & hp->info) cp = " dio>> "; else cp = " "; } seq_puts(s, cp); blen = srp->data.bufflen; usg = srp->data.k_use_sg; seq_puts(s, srp->done ? ((1 == srp->done) ? "rcv:" : "fin:") : "act:"); seq_printf(s, " id=%d blen=%d", srp->header.pack_id, blen); if (srp->done) seq_printf(s, " dur=%d", hp->duration); else { ms = jiffies_to_msecs(jiffies); seq_printf(s, " t_o/elap=%d/%d", (new_interface ? hp->timeout : jiffies_to_msecs(fp->timeout)), (ms > hp->duration ? ms - hp->duration : 0)); } seq_printf(s, "ms sgat=%d op=0x%02x\n", usg, (int) srp->data.cmd_opcode); } if (list_empty(&fp->rq_list)) seq_puts(s, " No requests active\n"); read_unlock(&fp->rq_list_lock); } } static int sg_proc_seq_show_debug(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; unsigned long iflags; if (it && (0 == it->index)) seq_printf(s, "max_active_device=%d def_reserved_size=%d\n", (int)it->max, sg_big_buff); read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; if (NULL == sdp) goto skip; read_lock(&sdp->sfd_lock); if (!list_empty(&sdp->sfds)) { seq_printf(s, " >>> device=%s ", sdp->name); if (atomic_read(&sdp->detaching)) seq_puts(s, "detaching pending close "); else if (sdp->device) { struct scsi_device *scsidp = sdp->device; seq_printf(s, "%d:%d:%d:%llu em=%d", scsidp->host->host_no, scsidp->channel, scsidp->id, scsidp->lun, scsidp->host->hostt->emulated); } seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n", sdp->sg_tablesize, sdp->exclude, sdp->open_cnt); sg_proc_debug_helper(s, sdp); } read_unlock(&sdp->sfd_lock); skip: read_unlock_irqrestore(&sg_index_lock, iflags); return 0; } #endif /* CONFIG_SCSI_PROC_FS */ module_init(init_sg); module_exit(exit_sg);
linux-master
drivers/scsi/sg.c
// SPDX-License-Identifier: GPL-2.0-or-later /************************************************************************** * Initio 9100 device driver for Linux. * * Copyright (c) 1994-1998 Initio Corporation * Copyright (c) 1998 Bas Vermeulen <[email protected]> * Copyright (c) 2004 Christoph Hellwig <[email protected]> * Copyright (c) 2007 Red Hat * ************************************************************************* * * DESCRIPTION: * * This is the Linux low-level SCSI driver for Initio INI-9X00U/UW SCSI host * adapters * * 08/06/97 hc - v1.01h * - Support inic-940 and inic-935 * 09/26/97 hc - v1.01i * - Make correction from J.W. Schultz suggestion * 10/13/97 hc - Support reset function * 10/21/97 hc - v1.01j * - Support 32 LUN (SCSI 3) * 01/14/98 hc - v1.01k * - Fix memory allocation problem * 03/04/98 hc - v1.01l * - Fix tape rewind which will hang the system problem * - Set can_queue to initio_num_scb * 06/25/98 hc - v1.01m * - Get it work for kernel version >= 2.1.75 * - Dynamic assign SCSI bus reset holding time in initio_init() * 07/02/98 hc - v1.01n * - Support 0002134A * 08/07/98 hc - v1.01o * - Change the initio_abort_srb routine to use scsi_done. <01> * 09/07/98 hl - v1.02 * - Change the INI9100U define and proc_dir_entry to * reflect the newer Kernel 2.1.118, but the v1.o1o * should work with Kernel 2.1.118. * 09/20/98 wh - v1.02a * - Support Abort command. * - Handle reset routine. * 09/21/98 hl - v1.03 * - remove comments. * 12/09/98 bv - v1.03a * - Removed unused code * 12/13/98 bv - v1.03b * - Remove cli() locking for kernels >= 2.1.95. This uses * spinlocks to serialize access to the pSRB_head and * pSRB_tail members of the HCS structure. * 09/01/99 bv - v1.03d * - Fixed a deadlock problem in SMP. * 21/01/99 bv - v1.03e * - Add support for the Domex 3192U PCI SCSI * This is a slightly modified patch by * Brian Macy <[email protected]> * 22/02/99 bv - v1.03f * - Didn't detect the INIC-950 in 2.0.x correctly. * Now fixed. * 05/07/99 bv - v1.03g * - Changed the assumption that HZ = 100 * 10/17/03 mc - v1.04 * - added new DMA API support * 06/01/04 jmd - v1.04a * - Re-add reset_bus support **************************************************************************/ #include <linux/module.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/spinlock.h> #include <linux/stat.h> #include <linux/kernel.h> #include <linux/proc_fs.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/dma-mapping.h> #include <asm/io.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include "initio.h" #define SENSE_SIZE 14 #define i91u_MAXQUEUE 2 #define i91u_REVID "Initio INI-9X00U/UW SCSI device driver; Revision: 1.04a" #ifdef DEBUG_i91u static unsigned int i91u_debug = DEBUG_DEFAULT; #endif static int initio_tag_enable = 1; #ifdef DEBUG_i91u static int setup_debug = 0; #endif static void i91uSCBPost(u8 * pHcb, u8 * pScb); #define DEBUG_INTERRUPT 0 #define DEBUG_QUEUE 0 #define DEBUG_STATE 0 #define INT_DISC 0 /*--- forward references ---*/ static struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun); static struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host); static int tulip_main(struct initio_host * host); static int initio_next_state(struct initio_host * host); static int initio_state_1(struct initio_host * host); static int initio_state_2(struct initio_host * host); static int initio_state_3(struct initio_host * host); static int initio_state_4(struct initio_host * host); static int initio_state_5(struct initio_host * host); static int initio_state_6(struct initio_host * host); static int initio_state_7(struct initio_host * host); static int initio_xfer_data_in(struct initio_host * host); static int initio_xfer_data_out(struct initio_host * host); static int initio_xpad_in(struct initio_host * host); static int initio_xpad_out(struct initio_host * host); static int initio_status_msg(struct initio_host * host); static int initio_msgin(struct initio_host * host); static int initio_msgin_sync(struct initio_host * host); static int initio_msgin_accept(struct initio_host * host); static int initio_msgout_reject(struct initio_host * host); static int initio_msgin_extend(struct initio_host * host); static int initio_msgout_ide(struct initio_host * host); static int initio_msgout_abort_targ(struct initio_host * host); static int initio_msgout_abort_tag(struct initio_host * host); static int initio_bus_device_reset(struct initio_host * host); static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb); static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb); static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb); static int int_initio_busfree(struct initio_host * host); static int int_initio_scsi_rst(struct initio_host * host); static int int_initio_bad_seq(struct initio_host * host); static int int_initio_resel(struct initio_host * host); static int initio_sync_done(struct initio_host * host); static int wdtr_done(struct initio_host * host); static int wait_tulip(struct initio_host * host); static int initio_wait_done_disc(struct initio_host * host); static int initio_wait_disc(struct initio_host * host); static void tulip_scsi(struct initio_host * host); static int initio_post_scsi_rst(struct initio_host * host); static void initio_se2_ew_en(unsigned long base); static void initio_se2_ew_ds(unsigned long base); static int initio_se2_rd_all(unsigned long base); static void initio_se2_update_all(unsigned long base); /* setup default pattern */ static void initio_read_eeprom(unsigned long base); /* ---- INTERNAL VARIABLES ---- */ static NVRAM i91unvram; static NVRAM *i91unvramp; static u8 i91udftNvRam[64] = { /*----------- header -----------*/ 0x25, 0xc9, /* Signature */ 0x40, /* Size */ 0x01, /* Revision */ /* -- Host Adapter Structure -- */ 0x95, /* ModelByte0 */ 0x00, /* ModelByte1 */ 0x00, /* ModelInfo */ 0x01, /* NumOfCh */ NBC1_DEFAULT, /* BIOSConfig1 */ 0, /* BIOSConfig2 */ 0, /* HAConfig1 */ 0, /* HAConfig2 */ /* SCSI channel 0 and target Structure */ 7, /* SCSIid */ NCC1_DEFAULT, /* SCSIconfig1 */ 0, /* SCSIconfig2 */ 0x10, /* NumSCSItarget */ NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, /* SCSI channel 1 and target Structure */ 7, /* SCSIid */ NCC1_DEFAULT, /* SCSIconfig1 */ 0, /* SCSIconfig2 */ 0x10, /* NumSCSItarget */ NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; /* - CheckSum - */ static u8 initio_rate_tbl[8] = /* fast 20 */ { /* nanosecond divide by 4 */ 12, /* 50ns, 20M */ 18, /* 75ns, 13.3M */ 25, /* 100ns, 10M */ 31, /* 125ns, 8M */ 37, /* 150ns, 6.6M */ 43, /* 175ns, 5.7M */ 50, /* 200ns, 5M */ 62 /* 250ns, 4M */ }; static void initio_do_pause(unsigned amount) { /* Pause for amount jiffies */ unsigned long the_time = jiffies + amount; while (time_before_eq(jiffies, the_time)) cpu_relax(); } /*-- forward reference --*/ /****************************************************************** Input: instruction for Serial E2PROM EX: se2_rd(0 call se2_instr() to send address and read command StartBit OP_Code Address Data --------- -------- ------------------ ------- 1 1 , 0 A5,A4,A3,A2,A1,A0 D15-D0 +----------------------------------------------------- | CS -----+ +--+ +--+ +--+ +--+ +--+ ^ | ^ | ^ | ^ | ^ | | | | | | | | | | | CLK -------+ +--+ +--+ +--+ +--+ +-- (leading edge trigger) +--1-----1--+ | SB OP | OP A5 A4 DI ----+ +--0------------------ (address and cmd sent to nvram) -------------------------------------------+ | DO +--- (data sent from nvram) ******************************************************************/ /** * initio_se2_instr - bitbang an instruction * @base: Base of InitIO controller * @instr: Instruction for serial E2PROM * * Bitbang an instruction out to the serial E2Prom */ static void initio_se2_instr(unsigned long base, u8 instr) { int i; u8 b; outb(SE2CS | SE2DO, base + TUL_NVRAM); /* cs+start bit */ udelay(30); outb(SE2CS | SE2CLK | SE2DO, base + TUL_NVRAM); /* +CLK */ udelay(30); for (i = 0; i < 8; i++) { if (instr & 0x80) b = SE2CS | SE2DO; /* -CLK+dataBit */ else b = SE2CS; /* -CLK */ outb(b, base + TUL_NVRAM); udelay(30); outb(b | SE2CLK, base + TUL_NVRAM); /* +CLK */ udelay(30); instr <<= 1; } outb(SE2CS, base + TUL_NVRAM); /* -CLK */ udelay(30); } /** * initio_se2_ew_en - Enable erase/write * @base: Base address of InitIO controller * * Enable erase/write state of serial EEPROM */ void initio_se2_ew_en(unsigned long base) { initio_se2_instr(base, 0x30); /* EWEN */ outb(0, base + TUL_NVRAM); /* -CS */ udelay(30); } /** * initio_se2_ew_ds - Disable erase/write * @base: Base address of InitIO controller * * Disable erase/write state of serial EEPROM */ void initio_se2_ew_ds(unsigned long base) { initio_se2_instr(base, 0); /* EWDS */ outb(0, base + TUL_NVRAM); /* -CS */ udelay(30); } /** * initio_se2_rd - read E2PROM word * @base: Base of InitIO controller * @addr: Address of word in E2PROM * * Read a word from the NV E2PROM device */ static u16 initio_se2_rd(unsigned long base, u8 addr) { u8 instr, rb; u16 val = 0; int i; instr = (u8) (addr | 0x80); initio_se2_instr(base, instr); /* READ INSTR */ for (i = 15; i >= 0; i--) { outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */ udelay(30); outb(SE2CS, base + TUL_NVRAM); /* -CLK */ /* sample data after the following edge of clock */ rb = inb(base + TUL_NVRAM); rb &= SE2DI; val += (rb << i); udelay(30); /* 6/20/95 */ } outb(0, base + TUL_NVRAM); /* no chip select */ udelay(30); return val; } /** * initio_se2_wr - read E2PROM word * @base: Base of InitIO controller * @addr: Address of word in E2PROM * @val: Value to write * * Write a word to the NV E2PROM device. Used when recovering from * a problem with the NV. */ static void initio_se2_wr(unsigned long base, u8 addr, u16 val) { u8 rb; u8 instr; int i; instr = (u8) (addr | 0x40); initio_se2_instr(base, instr); /* WRITE INSTR */ for (i = 15; i >= 0; i--) { if (val & 0x8000) outb(SE2CS | SE2DO, base + TUL_NVRAM); /* -CLK+dataBit 1 */ else outb(SE2CS, base + TUL_NVRAM); /* -CLK+dataBit 0 */ udelay(30); outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */ udelay(30); val <<= 1; } outb(SE2CS, base + TUL_NVRAM); /* -CLK */ udelay(30); outb(0, base + TUL_NVRAM); /* -CS */ udelay(30); outb(SE2CS, base + TUL_NVRAM); /* +CS */ udelay(30); for (;;) { outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */ udelay(30); outb(SE2CS, base + TUL_NVRAM); /* -CLK */ udelay(30); if ((rb = inb(base + TUL_NVRAM)) & SE2DI) break; /* write complete */ } outb(0, base + TUL_NVRAM); /* -CS */ } /** * initio_se2_rd_all - read hostadapter NV configuration * @base: Base address of InitIO controller * * Reads the E2PROM data into main memory. Ensures that the checksum * and header marker are valid. Returns 1 on success -1 on error. */ static int initio_se2_rd_all(unsigned long base) { int i; u16 chksum = 0; u16 *np; i91unvramp = &i91unvram; np = (u16 *) i91unvramp; for (i = 0; i < 32; i++) *np++ = initio_se2_rd(base, i); /* Is signature "ini" ok ? */ if (i91unvramp->NVM_Signature != INI_SIGNATURE) return -1; /* Is ckecksum ok ? */ np = (u16 *) i91unvramp; for (i = 0; i < 31; i++) chksum += *np++; if (i91unvramp->NVM_CheckSum != chksum) return -1; return 1; } /** * initio_se2_update_all - Update E2PROM * @base: Base of InitIO controller * * Update the E2PROM by wrting any changes into the E2PROM * chip, rewriting the checksum. */ static void initio_se2_update_all(unsigned long base) { /* setup default pattern */ int i; u16 chksum = 0; u16 *np, *np1; i91unvramp = &i91unvram; /* Calculate checksum first */ np = (u16 *) i91udftNvRam; for (i = 0; i < 31; i++) chksum += *np++; *np = chksum; initio_se2_ew_en(base); /* Enable write */ np = (u16 *) i91udftNvRam; np1 = (u16 *) i91unvramp; for (i = 0; i < 32; i++, np++, np1++) { if (*np != *np1) initio_se2_wr(base, i, *np); } initio_se2_ew_ds(base); /* Disable write */ } /** * initio_read_eeprom - Retrieve configuration * @base: Base of InitIO Host Adapter * * Retrieve the host adapter configuration data from E2Prom. If the * data is invalid then the defaults are used and are also restored * into the E2PROM. This forms the access point for the SCSI driver * into the E2PROM layer, the other functions for the E2PROM are all * internal use. * * Must be called single threaded, uses a shared global area. */ static void initio_read_eeprom(unsigned long base) { u8 gctrl; i91unvramp = &i91unvram; /* Enable EEProm programming */ gctrl = inb(base + TUL_GCTRL); outb(gctrl | TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL); if (initio_se2_rd_all(base) != 1) { initio_se2_update_all(base); /* setup default pattern */ initio_se2_rd_all(base); /* load again */ } /* Disable EEProm programming */ gctrl = inb(base + TUL_GCTRL); outb(gctrl & ~TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL); } /** * initio_stop_bm - stop bus master * @host: InitIO we are stopping * * Stop any pending DMA operation, aborting the DMA if necessary */ static void initio_stop_bm(struct initio_host * host) { if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */ outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd); /* wait Abort DMA xfer done */ while ((inb(host->addr + TUL_Int) & XABT) == 0) cpu_relax(); } outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); } /** * initio_reset_scsi - Reset SCSI host controller * @host: InitIO host to reset * @seconds: Recovery time * * Perform a full reset of the SCSI subsystem. */ static int initio_reset_scsi(struct initio_host * host, int seconds) { outb(TSC_RST_BUS, host->addr + TUL_SCtrl0); while (!((host->jsint = inb(host->addr + TUL_SInt)) & TSS_SCSIRST_INT)) cpu_relax(); /* reset tulip chip */ outb(0, host->addr + TUL_SSignal); /* Stall for a while, wait for target's firmware ready,make it 2 sec ! */ /* SONY 5200 tape drive won't work if only stall for 1 sec */ /* FIXME: this is a very long busy wait right now */ initio_do_pause(seconds * HZ); inb(host->addr + TUL_SInt); return SCSI_RESET_SUCCESS; } /** * initio_init - set up an InitIO host adapter * @host: InitIO host adapter * @bios_addr: BIOS address * * Set up the host adapter and devices according to the configuration * retrieved from the E2PROM. * * Locking: Calls E2PROM layer code which is not re-enterable so must * run single threaded for now. */ static void initio_init(struct initio_host * host, u8 *bios_addr) { int i; u8 *flags; u8 *heads; /* Get E2Prom configuration */ initio_read_eeprom(host->addr); if (i91unvramp->NVM_SCSIInfo[0].NVM_NumOfTarg == 8) host->max_tar = 8; else host->max_tar = 16; host->config = i91unvramp->NVM_SCSIInfo[0].NVM_ChConfig1; host->scsi_id = i91unvramp->NVM_SCSIInfo[0].NVM_ChSCSIID; host->idmask = ~(1 << host->scsi_id); #ifdef CHK_PARITY /* Enable parity error response */ outb(inb(host->addr + TUL_PCMD) | 0x40, host->addr + TUL_PCMD); #endif /* Mask all the interrupt */ outb(0x1F, host->addr + TUL_Mask); initio_stop_bm(host); /* --- Initialize the tulip --- */ outb(TSC_RST_CHIP, host->addr + TUL_SCtrl0); /* program HBA's SCSI ID */ outb(host->scsi_id << 4, host->addr + TUL_SScsiId); /* Enable Initiator Mode ,phase latch,alternate sync period mode, disable SCSI reset */ if (host->config & HCC_EN_PAR) host->sconf1 = (TSC_INITDEFAULT | TSC_EN_SCSI_PAR); else host->sconf1 = (TSC_INITDEFAULT); outb(host->sconf1, host->addr + TUL_SConfig); /* Enable HW reselect */ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); outb(0, host->addr + TUL_SPeriod); /* selection time out = 250 ms */ outb(153, host->addr + TUL_STimeOut); /* Enable SCSI terminator */ outb((host->config & (HCC_ACT_TERM1 | HCC_ACT_TERM2)), host->addr + TUL_XCtrl); outb(((host->config & HCC_AUTO_TERM) >> 4) | (inb(host->addr + TUL_GCTRL1) & 0xFE), host->addr + TUL_GCTRL1); for (i = 0, flags = & (i91unvramp->NVM_SCSIInfo[0].NVM_Targ0Config), heads = bios_addr + 0x180; i < host->max_tar; i++, flags++) { host->targets[i].flags = *flags & ~(TCF_SYNC_DONE | TCF_WDTR_DONE); if (host->targets[i].flags & TCF_EN_255) host->targets[i].drv_flags = TCF_DRV_255_63; else host->targets[i].drv_flags = 0; host->targets[i].js_period = 0; host->targets[i].sconfig0 = host->sconf1; host->targets[i].heads = *heads++; if (host->targets[i].heads == 255) host->targets[i].drv_flags = TCF_DRV_255_63; else host->targets[i].drv_flags = 0; host->targets[i].sectors = *heads++; host->targets[i].flags &= ~TCF_BUSY; host->act_tags[i] = 0; host->max_tags[i] = 0xFF; } /* for */ printk("i91u: PCI Base=0x%04X, IRQ=%d, BIOS=0x%04X0, SCSI ID=%d\n", host->addr, host->pci_dev->irq, host->bios_addr, host->scsi_id); /* Reset SCSI Bus */ if (host->config & HCC_SCSI_RESET) { printk(KERN_INFO "i91u: Reset SCSI Bus ... \n"); initio_reset_scsi(host, 10); } outb(0x17, host->addr + TUL_SCFG1); outb(0xE9, host->addr + TUL_SIntEnable); } /** * initio_alloc_scb - Allocate an SCB * @host: InitIO host we are allocating for * * Walk the SCB list for the controller and allocate a free SCB if * one exists. */ static struct scsi_ctrl_blk *initio_alloc_scb(struct initio_host *host) { struct scsi_ctrl_blk *scb; unsigned long flags; spin_lock_irqsave(&host->avail_lock, flags); if ((scb = host->first_avail) != NULL) { #if DEBUG_QUEUE printk("find scb at %p\n", scb); #endif if ((host->first_avail = scb->next) == NULL) host->last_avail = NULL; scb->next = NULL; scb->status = SCB_RENT; } spin_unlock_irqrestore(&host->avail_lock, flags); return scb; } /** * initio_release_scb - Release an SCB * @host: InitIO host that owns the SCB * @cmnd: SCB command block being returned * * Return an allocated SCB to the host free list */ static void initio_release_scb(struct initio_host * host, struct scsi_ctrl_blk * cmnd) { unsigned long flags; #if DEBUG_QUEUE printk("Release SCB %p; ", cmnd); #endif spin_lock_irqsave(&(host->avail_lock), flags); cmnd->srb = NULL; cmnd->status = 0; cmnd->next = NULL; if (host->last_avail != NULL) { host->last_avail->next = cmnd; host->last_avail = cmnd; } else { host->first_avail = cmnd; host->last_avail = cmnd; } spin_unlock_irqrestore(&(host->avail_lock), flags); } /***************************************************************************/ static void initio_append_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp) { #if DEBUG_QUEUE printk("Append pend SCB %p; ", scbp); #endif scbp->status = SCB_PEND; scbp->next = NULL; if (host->last_pending != NULL) { host->last_pending->next = scbp; host->last_pending = scbp; } else { host->first_pending = scbp; host->last_pending = scbp; } } /***************************************************************************/ static void initio_push_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp) { #if DEBUG_QUEUE printk("Push pend SCB %p; ", scbp); #endif scbp->status = SCB_PEND; if ((scbp->next = host->first_pending) != NULL) { host->first_pending = scbp; } else { host->first_pending = scbp; host->last_pending = scbp; } } static struct scsi_ctrl_blk *initio_find_first_pend_scb(struct initio_host * host) { struct scsi_ctrl_blk *first; first = host->first_pending; while (first != NULL) { if (first->opcode != ExecSCSI) return first; if (first->tagmsg == 0) { if ((host->act_tags[first->target] == 0) && !(host->targets[first->target].flags & TCF_BUSY)) return first; } else { if ((host->act_tags[first->target] >= host->max_tags[first->target]) | (host->targets[first->target].flags & TCF_BUSY)) { first = first->next; continue; } return first; } first = first->next; } return first; } static void initio_unlink_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scb) { struct scsi_ctrl_blk *tmp, *prev; #if DEBUG_QUEUE printk("unlink pend SCB %p; ", scb); #endif prev = tmp = host->first_pending; while (tmp != NULL) { if (scb == tmp) { /* Unlink this SCB */ if (tmp == host->first_pending) { if ((host->first_pending = tmp->next) == NULL) host->last_pending = NULL; } else { prev->next = tmp->next; if (tmp == host->last_pending) host->last_pending = prev; } tmp->next = NULL; break; } prev = tmp; tmp = tmp->next; } } static void initio_append_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp) { #if DEBUG_QUEUE printk("append busy SCB %p; ", scbp); #endif if (scbp->tagmsg) host->act_tags[scbp->target]++; else host->targets[scbp->target].flags |= TCF_BUSY; scbp->status = SCB_BUSY; scbp->next = NULL; if (host->last_busy != NULL) { host->last_busy->next = scbp; host->last_busy = scbp; } else { host->first_busy = scbp; host->last_busy = scbp; } } /***************************************************************************/ static struct scsi_ctrl_blk *initio_pop_busy_scb(struct initio_host * host) { struct scsi_ctrl_blk *tmp; if ((tmp = host->first_busy) != NULL) { if ((host->first_busy = tmp->next) == NULL) host->last_busy = NULL; tmp->next = NULL; if (tmp->tagmsg) host->act_tags[tmp->target]--; else host->targets[tmp->target].flags &= ~TCF_BUSY; } #if DEBUG_QUEUE printk("Pop busy SCB %p; ", tmp); #endif return tmp; } /***************************************************************************/ static void initio_unlink_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scb) { struct scsi_ctrl_blk *tmp, *prev; #if DEBUG_QUEUE printk("unlink busy SCB %p; ", scb); #endif prev = tmp = host->first_busy; while (tmp != NULL) { if (scb == tmp) { /* Unlink this SCB */ if (tmp == host->first_busy) { if ((host->first_busy = tmp->next) == NULL) host->last_busy = NULL; } else { prev->next = tmp->next; if (tmp == host->last_busy) host->last_busy = prev; } tmp->next = NULL; if (tmp->tagmsg) host->act_tags[tmp->target]--; else host->targets[tmp->target].flags &= ~TCF_BUSY; break; } prev = tmp; tmp = tmp->next; } return; } struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun) { struct scsi_ctrl_blk *tmp; u16 scbp_tarlun; tmp = host->first_busy; while (tmp != NULL) { scbp_tarlun = (tmp->lun << 8) | (tmp->target); if (scbp_tarlun == tarlun) { /* Unlink this SCB */ break; } tmp = tmp->next; } #if DEBUG_QUEUE printk("find busy SCB %p; ", tmp); #endif return tmp; } static void initio_append_done_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp) { #if DEBUG_QUEUE printk("append done SCB %p; ", scbp); #endif scbp->status = SCB_DONE; scbp->next = NULL; if (host->last_done != NULL) { host->last_done->next = scbp; host->last_done = scbp; } else { host->first_done = scbp; host->last_done = scbp; } } struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host) { struct scsi_ctrl_blk *tmp; if ((tmp = host->first_done) != NULL) { if ((host->first_done = tmp->next) == NULL) host->last_done = NULL; tmp->next = NULL; } #if DEBUG_QUEUE printk("find done SCB %p; ",tmp); #endif return tmp; } static int initio_abort_srb(struct initio_host * host, struct scsi_cmnd *srbp) { unsigned long flags; struct scsi_ctrl_blk *tmp, *prev; spin_lock_irqsave(&host->semaph_lock, flags); if ((host->semaph == 0) && (host->active == NULL)) { /* disable Jasmin SCSI Int */ outb(0x1F, host->addr + TUL_Mask); spin_unlock_irqrestore(&host->semaph_lock, flags); /* FIXME: synchronize_irq needed ? */ tulip_main(host); spin_lock_irqsave(&host->semaph_lock, flags); host->semaph = 1; outb(0x0F, host->addr + TUL_Mask); spin_unlock_irqrestore(&host->semaph_lock, flags); return SCSI_ABORT_SNOOZE; } prev = tmp = host->first_pending; /* Check Pend queue */ while (tmp != NULL) { /* 07/27/98 */ if (tmp->srb == srbp) { if (tmp == host->active) { spin_unlock_irqrestore(&host->semaph_lock, flags); return SCSI_ABORT_BUSY; } else if (tmp == host->first_pending) { if ((host->first_pending = tmp->next) == NULL) host->last_pending = NULL; } else { prev->next = tmp->next; if (tmp == host->last_pending) host->last_pending = prev; } tmp->hastat = HOST_ABORTED; tmp->flags |= SCF_DONE; if (tmp->flags & SCF_POST) (*tmp->post) ((u8 *) host, (u8 *) tmp); spin_unlock_irqrestore(&host->semaph_lock, flags); return SCSI_ABORT_SUCCESS; } prev = tmp; tmp = tmp->next; } prev = tmp = host->first_busy; /* Check Busy queue */ while (tmp != NULL) { if (tmp->srb == srbp) { if (tmp == host->active) { spin_unlock_irqrestore(&host->semaph_lock, flags); return SCSI_ABORT_BUSY; } else if (tmp->tagmsg == 0) { spin_unlock_irqrestore(&host->semaph_lock, flags); return SCSI_ABORT_BUSY; } else { host->act_tags[tmp->target]--; if (tmp == host->first_busy) { if ((host->first_busy = tmp->next) == NULL) host->last_busy = NULL; } else { prev->next = tmp->next; if (tmp == host->last_busy) host->last_busy = prev; } tmp->next = NULL; tmp->hastat = HOST_ABORTED; tmp->flags |= SCF_DONE; if (tmp->flags & SCF_POST) (*tmp->post) ((u8 *) host, (u8 *) tmp); spin_unlock_irqrestore(&host->semaph_lock, flags); return SCSI_ABORT_SUCCESS; } } prev = tmp; tmp = tmp->next; } spin_unlock_irqrestore(&host->semaph_lock, flags); return SCSI_ABORT_NOT_RUNNING; } /***************************************************************************/ static int initio_bad_seq(struct initio_host * host) { struct scsi_ctrl_blk *scb; printk("initio_bad_seg c=%d\n", host->index); if ((scb = host->active) != NULL) { initio_unlink_busy_scb(host, scb); scb->hastat = HOST_BAD_PHAS; scb->tastat = 0; initio_append_done_scb(host, scb); } initio_stop_bm(host); initio_reset_scsi(host, 8); /* 7/29/98 */ return initio_post_scsi_rst(host); } /************************************************************************/ static void initio_exec_scb(struct initio_host * host, struct scsi_ctrl_blk * scb) { unsigned long flags; scb->mode = 0; scb->sgidx = 0; scb->sgmax = scb->sglen; spin_lock_irqsave(&host->semaph_lock, flags); initio_append_pend_scb(host, scb); /* Append this SCB to Pending queue */ /* VVVVV 07/21/98 */ if (host->semaph == 1) { /* Disable Jasmin SCSI Int */ outb(0x1F, host->addr + TUL_Mask); host->semaph = 0; spin_unlock_irqrestore(&host->semaph_lock, flags); tulip_main(host); spin_lock_irqsave(&host->semaph_lock, flags); host->semaph = 1; outb(0x0F, host->addr + TUL_Mask); } spin_unlock_irqrestore(&host->semaph_lock, flags); return; } /***************************************************************************/ static int initio_isr(struct initio_host * host) { if (inb(host->addr + TUL_Int) & TSS_INT_PENDING) { if (host->semaph == 1) { outb(0x1F, host->addr + TUL_Mask); /* Disable Tulip SCSI Int */ host->semaph = 0; tulip_main(host); host->semaph = 1; outb(0x0F, host->addr + TUL_Mask); return 1; } } return 0; } static int tulip_main(struct initio_host * host) { struct scsi_ctrl_blk *scb; for (;;) { tulip_scsi(host); /* Call tulip_scsi */ /* Walk the list of completed SCBs */ while ((scb = initio_find_done_scb(host)) != NULL) { /* find done entry */ if (scb->tastat == INI_QUEUE_FULL) { host->max_tags[scb->target] = host->act_tags[scb->target] - 1; scb->tastat = 0; initio_append_pend_scb(host, scb); continue; } if (!(scb->mode & SCM_RSENS)) { /* not in auto req. sense mode */ if (scb->tastat == 2) { /* clr sync. nego flag */ if (scb->flags & SCF_SENSE) { u8 len; len = scb->senselen; if (len == 0) len = 1; scb->buflen = scb->senselen; scb->bufptr = scb->senseptr; scb->flags &= ~(SCF_SG | SCF_DIR); /* for xfer_data_in */ /* so, we won't report wrong direction in xfer_data_in, and won't report HOST_DO_DU in state_6 */ scb->mode = SCM_RSENS; scb->ident &= 0xBF; /* Disable Disconnect */ scb->tagmsg = 0; scb->tastat = 0; scb->cdblen = 6; scb->cdb[0] = SCSICMD_RequestSense; scb->cdb[1] = 0; scb->cdb[2] = 0; scb->cdb[3] = 0; scb->cdb[4] = len; scb->cdb[5] = 0; initio_push_pend_scb(host, scb); break; } } } else { /* in request sense mode */ if (scb->tastat == 2) { /* check contition status again after sending requset sense cmd 0x3 */ scb->hastat = HOST_BAD_PHAS; } scb->tastat = 2; } scb->flags |= SCF_DONE; if (scb->flags & SCF_POST) { /* FIXME: only one post method and lose casts */ (*scb->post) ((u8 *) host, (u8 *) scb); } } /* while */ /* find_active: */ if (inb(host->addr + TUL_SStatus0) & TSS_INT_PENDING) continue; if (host->active) /* return to OS and wait for xfer_done_ISR/Selected_ISR */ return 1; /* return to OS, enable interrupt */ /* Check pending SCB */ if (initio_find_first_pend_scb(host) == NULL) return 1; /* return to OS, enable interrupt */ } /* End of for loop */ /* statement won't reach here */ } static void tulip_scsi(struct initio_host * host) { struct scsi_ctrl_blk *scb; struct target_control *active_tc; /* make sure to service interrupt asap */ if ((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING) { host->phase = host->jsstatus0 & TSS_PH_MASK; host->jsstatus1 = inb(host->addr + TUL_SStatus1); host->jsint = inb(host->addr + TUL_SInt); if (host->jsint & TSS_SCSIRST_INT) { /* SCSI bus reset detected */ int_initio_scsi_rst(host); return; } if (host->jsint & TSS_RESEL_INT) { /* if selected/reselected interrupt */ if (int_initio_resel(host) == 0) initio_next_state(host); return; } if (host->jsint & TSS_SEL_TIMEOUT) { int_initio_busfree(host); return; } if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */ int_initio_busfree(host); /* unexpected bus free or sel timeout */ return; } if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV)) { /* func complete or Bus service */ if (host->active) initio_next_state(host); return; } } if (host->active != NULL) return; if ((scb = initio_find_first_pend_scb(host)) == NULL) return; /* program HBA's SCSI ID & target SCSI ID */ outb((host->scsi_id << 4) | (scb->target & 0x0F), host->addr + TUL_SScsiId); if (scb->opcode == ExecSCSI) { active_tc = &host->targets[scb->target]; if (scb->tagmsg) active_tc->drv_flags |= TCF_DRV_EN_TAG; else active_tc->drv_flags &= ~TCF_DRV_EN_TAG; outb(active_tc->js_period, host->addr + TUL_SPeriod); if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) { /* do wdtr negotiation */ initio_select_atn_stop(host, scb); } else { if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { /* do sync negotiation */ initio_select_atn_stop(host, scb); } else { if (scb->tagmsg) initio_select_atn3(host, scb); else initio_select_atn(host, scb); } } if (scb->flags & SCF_POLL) { while (wait_tulip(host) != -1) { if (initio_next_state(host) == -1) break; } } } else if (scb->opcode == BusDevRst) { initio_select_atn_stop(host, scb); scb->next_state = 8; if (scb->flags & SCF_POLL) { while (wait_tulip(host) != -1) { if (initio_next_state(host) == -1) break; } } } else if (scb->opcode == AbortCmd) { if (initio_abort_srb(host, scb->srb) != 0) { initio_unlink_pend_scb(host, scb); initio_release_scb(host, scb); } else { scb->opcode = BusDevRst; initio_select_atn_stop(host, scb); scb->next_state = 8; } } else { initio_unlink_pend_scb(host, scb); scb->hastat = 0x16; /* bad command */ initio_append_done_scb(host, scb); } return; } /** * initio_next_state - Next SCSI state * @host: InitIO host we are processing * * Progress the active command block along the state machine * until we hit a state which we must wait for activity to occur. * * Returns zero or a negative code. */ static int initio_next_state(struct initio_host * host) { int next; next = host->active->next_state; for (;;) { switch (next) { case 1: next = initio_state_1(host); break; case 2: next = initio_state_2(host); break; case 3: next = initio_state_3(host); break; case 4: next = initio_state_4(host); break; case 5: next = initio_state_5(host); break; case 6: next = initio_state_6(host); break; case 7: next = initio_state_7(host); break; case 8: return initio_bus_device_reset(host); default: return initio_bad_seq(host); } if (next <= 0) return next; } } /** * initio_state_1 - SCSI state machine * @host: InitIO host we are controlling * * Perform SCSI state processing for Select/Attention/Stop */ static int initio_state_1(struct initio_host * host) { struct scsi_ctrl_blk *scb = host->active; struct target_control *active_tc = host->active_tc; #if DEBUG_STATE printk("-s1-"); #endif /* Move the SCB from pending to busy */ initio_unlink_pend_scb(host, scb); initio_append_busy_scb(host, scb); outb(active_tc->sconfig0, host->addr + TUL_SConfig ); /* ATN on */ if (host->phase == MSG_OUT) { outb(TSC_EN_BUS_IN | TSC_HW_RESELECT, host->addr + TUL_SCtrl1); outb(scb->ident, host->addr + TUL_SFifo); if (scb->tagmsg) { outb(scb->tagmsg, host->addr + TUL_SFifo); outb(scb->tagid, host->addr + TUL_SFifo); } if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) { active_tc->flags |= TCF_WDTR_DONE; outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo); outb(2, host->addr + TUL_SFifo); /* Extended msg length */ outb(EXTENDED_SDTR, host->addr + TUL_SFifo); /* Sync request */ outb(1, host->addr + TUL_SFifo); /* Start from 16 bits */ } else if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { active_tc->flags |= TCF_SYNC_DONE; outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo); outb(3, host->addr + TUL_SFifo); /* extended msg length */ outb(EXTENDED_SDTR, host->addr + TUL_SFifo); /* sync request */ outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo); outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */ } outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); if (wait_tulip(host) == -1) return -1; } outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal); /* Into before CDB xfer */ return 3; } /** * initio_state_2 - SCSI state machine * @host: InitIO host we are controlling * * state after selection with attention * state after selection with attention3 */ static int initio_state_2(struct initio_host * host) { struct scsi_ctrl_blk *scb = host->active; struct target_control *active_tc = host->active_tc; #if DEBUG_STATE printk("-s2-"); #endif initio_unlink_pend_scb(host, scb); initio_append_busy_scb(host, scb); outb(active_tc->sconfig0, host->addr + TUL_SConfig); if (host->jsstatus1 & TSS_CMD_PH_CMP) return 4; outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal); /* Into before CDB xfer */ return 3; } /** * initio_state_3 - SCSI state machine * @host: InitIO host we are controlling * * state before CDB xfer is done */ static int initio_state_3(struct initio_host * host) { struct scsi_ctrl_blk *scb = host->active; struct target_control *active_tc = host->active_tc; int i; #if DEBUG_STATE printk("-s3-"); #endif for (;;) { switch (host->phase) { case CMD_OUT: /* Command out phase */ for (i = 0; i < (int) scb->cdblen; i++) outb(scb->cdb[i], host->addr + TUL_SFifo); outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); if (wait_tulip(host) == -1) return -1; if (host->phase == CMD_OUT) return initio_bad_seq(host); return 4; case MSG_IN: /* Message in phase */ scb->next_state = 3; if (initio_msgin(host) == -1) return -1; break; case STATUS_IN: /* Status phase */ if (initio_status_msg(host) == -1) return -1; break; case MSG_OUT: /* Message out phase */ if (active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) { outb(NOP, host->addr + TUL_SFifo); /* msg nop */ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); if (wait_tulip(host) == -1) return -1; } else { active_tc->flags |= TCF_SYNC_DONE; outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo); outb(3, host->addr + TUL_SFifo); /* ext. msg len */ outb(EXTENDED_SDTR, host->addr + TUL_SFifo); /* sync request */ outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo); outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); if (wait_tulip(host) == -1) return -1; outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); outb(inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7), host->addr + TUL_SSignal); } break; default: return initio_bad_seq(host); } } } /** * initio_state_4 - SCSI state machine * @host: InitIO host we are controlling * * SCSI state machine. State 4 */ static int initio_state_4(struct initio_host * host) { struct scsi_ctrl_blk *scb = host->active; #if DEBUG_STATE printk("-s4-"); #endif if ((scb->flags & SCF_DIR) == SCF_NO_XF) { return 6; /* Go to state 6 (After data) */ } for (;;) { if (scb->buflen == 0) return 6; switch (host->phase) { case STATUS_IN: /* Status phase */ if ((scb->flags & SCF_DIR) != 0) /* if direction bit set then report data underrun */ scb->hastat = HOST_DO_DU; if ((initio_status_msg(host)) == -1) return -1; break; case MSG_IN: /* Message in phase */ scb->next_state = 0x4; if (initio_msgin(host) == -1) return -1; break; case MSG_OUT: /* Message out phase */ if (host->jsstatus0 & TSS_PAR_ERROR) { scb->buflen = 0; scb->hastat = HOST_DO_DU; if (initio_msgout_ide(host) == -1) return -1; return 6; } else { outb(NOP, host->addr + TUL_SFifo); /* msg nop */ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); if (wait_tulip(host) == -1) return -1; } break; case DATA_IN: /* Data in phase */ return initio_xfer_data_in(host); case DATA_OUT: /* Data out phase */ return initio_xfer_data_out(host); default: return initio_bad_seq(host); } } } /** * initio_state_5 - SCSI state machine * @host: InitIO host we are controlling * * State after dma xfer done or phase change before xfer done */ static int initio_state_5(struct initio_host * host) { struct scsi_ctrl_blk *scb = host->active; long cnt, xcnt; /* cannot use unsigned !! code: if (xcnt < 0) */ #if DEBUG_STATE printk("-s5-"); #endif /*------ get remaining count -------*/ cnt = inl(host->addr + TUL_SCnt0) & 0x0FFFFFF; if (inb(host->addr + TUL_XCmd) & 0x20) { /* ----------------------- DATA_IN ----------------------------- */ /* check scsi parity error */ if (host->jsstatus0 & TSS_PAR_ERROR) scb->hastat = HOST_DO_DU; if (inb(host->addr + TUL_XStatus) & XPEND) { /* DMA xfer pending, Send STOP */ /* tell Hardware scsi xfer has been terminated */ outb(inb(host->addr + TUL_XCtrl) | 0x80, host->addr + TUL_XCtrl); /* wait until DMA xfer not pending */ while (inb(host->addr + TUL_XStatus) & XPEND) cpu_relax(); } } else { /*-------- DATA OUT -----------*/ if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0) { if (host->active_tc->js_period & TSC_WIDE_SCSI) cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F) << 1; else cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F); } if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */ outb(TAX_X_ABT, host->addr + TUL_XCmd); /* wait Abort DMA xfer done */ while ((inb(host->addr + TUL_Int) & XABT) == 0) cpu_relax(); } if ((cnt == 1) && (host->phase == DATA_OUT)) { outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); if (wait_tulip(host) == -1) return -1; cnt = 0; } else { if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0) outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); } } if (cnt == 0) { scb->buflen = 0; return 6; /* After Data */ } /* Update active data pointer */ xcnt = (long) scb->buflen - cnt; /* xcnt== bytes already xferred */ scb->buflen = (u32) cnt; /* cnt == bytes left to be xferred */ if (scb->flags & SCF_SG) { struct sg_entry *sgp; unsigned long i; sgp = &scb->sglist[scb->sgidx]; for (i = scb->sgidx; i < scb->sgmax; sgp++, i++) { xcnt -= (long) sgp->len; if (xcnt < 0) { /* this sgp xfer half done */ xcnt += (long) sgp->len; /* xcnt == bytes xferred in this sgp */ sgp->data += (u32) xcnt; /* new ptr to be xfer */ sgp->len -= (u32) xcnt; /* new len to be xfer */ scb->bufptr += ((u32) (i - scb->sgidx) << 3); /* new SG table ptr */ scb->sglen = (u8) (scb->sgmax - i); /* new SG table len */ scb->sgidx = (u16) i; /* for next disc and come in this loop */ return 4; /* Go to state 4 */ } /* else (xcnt >= 0 , i.e. this sgp already xferred */ } /* for */ return 6; /* Go to state 6 */ } else { scb->bufptr += (u32) xcnt; } return 4; /* Go to state 4 */ } /** * initio_state_6 - SCSI state machine * @host: InitIO host we are controlling * * State after Data phase */ static int initio_state_6(struct initio_host * host) { struct scsi_ctrl_blk *scb = host->active; #if DEBUG_STATE printk("-s6-"); #endif for (;;) { switch (host->phase) { case STATUS_IN: /* Status phase */ if ((initio_status_msg(host)) == -1) return -1; break; case MSG_IN: /* Message in phase */ scb->next_state = 6; if ((initio_msgin(host)) == -1) return -1; break; case MSG_OUT: /* Message out phase */ outb(NOP, host->addr + TUL_SFifo); /* msg nop */ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); if (wait_tulip(host) == -1) return -1; break; case DATA_IN: /* Data in phase */ return initio_xpad_in(host); case DATA_OUT: /* Data out phase */ return initio_xpad_out(host); default: return initio_bad_seq(host); } } } /** * initio_state_7 - SCSI state machine * @host: InitIO host we are controlling * */ static int initio_state_7(struct initio_host * host) { int cnt, i; #if DEBUG_STATE printk("-s7-"); #endif /* flush SCSI FIFO */ cnt = inb(host->addr + TUL_SFifoCnt) & 0x1F; if (cnt) { for (i = 0; i < cnt; i++) inb(host->addr + TUL_SFifo); } switch (host->phase) { case DATA_IN: /* Data in phase */ case DATA_OUT: /* Data out phase */ return initio_bad_seq(host); default: return 6; /* Go to state 6 */ } } /** * initio_xfer_data_in - Commence data input * @host: InitIO host in use * * Commence a block of data transfer. The transfer itself will * be managed by the controller and we will get a completion (or * failure) interrupt. */ static int initio_xfer_data_in(struct initio_host * host) { struct scsi_ctrl_blk *scb = host->active; if ((scb->flags & SCF_DIR) == SCF_DOUT) return 6; /* wrong direction */ outl(scb->buflen, host->addr + TUL_SCnt0); outb(TSC_XF_DMA_IN, host->addr + TUL_SCmd); /* 7/25/95 */ if (scb->flags & SCF_SG) { /* S/G xfer */ outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH); outl(scb->bufptr, host->addr + TUL_XAddH); outb(TAX_SG_IN, host->addr + TUL_XCmd); } else { outl(scb->buflen, host->addr + TUL_XCntH); outl(scb->bufptr, host->addr + TUL_XAddH); outb(TAX_X_IN, host->addr + TUL_XCmd); } scb->next_state = 0x5; return 0; /* return to OS, wait xfer done , let jas_isr come in */ } /** * initio_xfer_data_out - Commence data output * @host: InitIO host in use * * Commence a block of data transfer. The transfer itself will * be managed by the controller and we will get a completion (or * failure) interrupt. */ static int initio_xfer_data_out(struct initio_host * host) { struct scsi_ctrl_blk *scb = host->active; if ((scb->flags & SCF_DIR) == SCF_DIN) return 6; /* wrong direction */ outl(scb->buflen, host->addr + TUL_SCnt0); outb(TSC_XF_DMA_OUT, host->addr + TUL_SCmd); if (scb->flags & SCF_SG) { /* S/G xfer */ outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH); outl(scb->bufptr, host->addr + TUL_XAddH); outb(TAX_SG_OUT, host->addr + TUL_XCmd); } else { outl(scb->buflen, host->addr + TUL_XCntH); outl(scb->bufptr, host->addr + TUL_XAddH); outb(TAX_X_OUT, host->addr + TUL_XCmd); } scb->next_state = 0x5; return 0; /* return to OS, wait xfer done , let jas_isr come in */ } int initio_xpad_in(struct initio_host * host) { struct scsi_ctrl_blk *scb = host->active; struct target_control *active_tc = host->active_tc; if ((scb->flags & SCF_DIR) != SCF_NO_DCHK) scb->hastat = HOST_DO_DU; /* over run */ for (;;) { if (active_tc->js_period & TSC_WIDE_SCSI) outl(2, host->addr + TUL_SCnt0); else outl(1, host->addr + TUL_SCnt0); outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd); if (wait_tulip(host) == -1) return -1; if (host->phase != DATA_IN) { outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); return 6; } inb(host->addr + TUL_SFifo); } } int initio_xpad_out(struct initio_host * host) { struct scsi_ctrl_blk *scb = host->active; struct target_control *active_tc = host->active_tc; if ((scb->flags & SCF_DIR) != SCF_NO_DCHK) scb->hastat = HOST_DO_DU; /* over run */ for (;;) { if (active_tc->js_period & TSC_WIDE_SCSI) outl(2, host->addr + TUL_SCnt0); else outl(1, host->addr + TUL_SCnt0); outb(0, host->addr + TUL_SFifo); outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); if ((wait_tulip(host)) == -1) return -1; if (host->phase != DATA_OUT) { /* Disable wide CPU to allow read 16 bits */ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); return 6; } } } int initio_status_msg(struct initio_host * host) { /* status & MSG_IN */ struct scsi_ctrl_blk *scb = host->active; u8 msg; outb(TSC_CMD_COMP, host->addr + TUL_SCmd); if (wait_tulip(host) == -1) return -1; /* get status */ scb->tastat = inb(host->addr + TUL_SFifo); if (host->phase == MSG_OUT) { if (host->jsstatus0 & TSS_PAR_ERROR) outb(MSG_PARITY_ERROR, host->addr + TUL_SFifo); else outb(NOP, host->addr + TUL_SFifo); outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); return wait_tulip(host); } if (host->phase == MSG_IN) { msg = inb(host->addr + TUL_SFifo); if (host->jsstatus0 & TSS_PAR_ERROR) { /* Parity error */ if ((initio_msgin_accept(host)) == -1) return -1; if (host->phase != MSG_OUT) return initio_bad_seq(host); outb(MSG_PARITY_ERROR, host->addr + TUL_SFifo); outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); return wait_tulip(host); } if (msg == 0) { /* Command complete */ if ((scb->tastat & 0x18) == 0x10) /* No link support */ return initio_bad_seq(host); outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd); return initio_wait_done_disc(host); } if (msg == LINKED_CMD_COMPLETE || msg == LINKED_FLG_CMD_COMPLETE) { if ((scb->tastat & 0x18) == 0x10) return initio_msgin_accept(host); } } return initio_bad_seq(host); } /* scsi bus free */ int int_initio_busfree(struct initio_host * host) { struct scsi_ctrl_blk *scb = host->active; if (scb != NULL) { if (scb->status & SCB_SELECT) { /* selection timeout */ initio_unlink_pend_scb(host, scb); scb->hastat = HOST_SEL_TOUT; initio_append_done_scb(host, scb); } else { /* Unexpected bus free */ initio_unlink_busy_scb(host, scb); scb->hastat = HOST_BUS_FREE; initio_append_done_scb(host, scb); } host->active = NULL; host->active_tc = NULL; } outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */ outb(TSC_INITDEFAULT, host->addr + TUL_SConfig); outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */ return -1; } /** * int_initio_scsi_rst - SCSI reset occurred * @host: Host seeing the reset * * A SCSI bus reset has occurred. Clean up any pending transfer * the hardware is doing by DMA and then abort all active and * disconnected commands. The mid layer should sort the rest out * for us */ static int int_initio_scsi_rst(struct initio_host * host) { struct scsi_ctrl_blk *scb; int i; /* if DMA xfer is pending, abort DMA xfer */ if (inb(host->addr + TUL_XStatus) & 0x01) { outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd); /* wait Abort DMA xfer done */ while ((inb(host->addr + TUL_Int) & 0x04) == 0) cpu_relax(); outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); } /* Abort all active & disconnected scb */ while ((scb = initio_pop_busy_scb(host)) != NULL) { scb->hastat = HOST_BAD_PHAS; initio_append_done_scb(host, scb); } host->active = NULL; host->active_tc = NULL; /* clr sync nego. done flag */ for (i = 0; i < host->max_tar; i++) host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE); return -1; } /** * int_initio_resel - Reselection occurred * @host: InitIO host adapter * * A SCSI reselection event has been signalled and the interrupt * is now being processed. Work out which command block needs attention * and continue processing that command. */ int int_initio_resel(struct initio_host * host) { struct scsi_ctrl_blk *scb; struct target_control *active_tc; u8 tag, msg = 0; u8 tar, lun; if ((scb = host->active) != NULL) { /* FIXME: Why check and not just clear ? */ if (scb->status & SCB_SELECT) /* if waiting for selection complete */ scb->status &= ~SCB_SELECT; host->active = NULL; } /* --------- get target id---------------------- */ tar = inb(host->addr + TUL_SBusId); /* ------ get LUN from Identify message----------- */ lun = inb(host->addr + TUL_SIdent) & 0x0F; /* 07/22/98 from 0x1F -> 0x0F */ active_tc = &host->targets[tar]; host->active_tc = active_tc; outb(active_tc->sconfig0, host->addr + TUL_SConfig); outb(active_tc->js_period, host->addr + TUL_SPeriod); /* ------------- tag queueing ? ------------------- */ if (active_tc->drv_flags & TCF_DRV_EN_TAG) { if ((initio_msgin_accept(host)) == -1) return -1; if (host->phase != MSG_IN) goto no_tag; outl(1, host->addr + TUL_SCnt0); outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd); if (wait_tulip(host) == -1) return -1; msg = inb(host->addr + TUL_SFifo); /* Read Tag Message */ if (msg < SIMPLE_QUEUE_TAG || msg > ORDERED_QUEUE_TAG) /* Is simple Tag */ goto no_tag; if (initio_msgin_accept(host) == -1) return -1; if (host->phase != MSG_IN) goto no_tag; outl(1, host->addr + TUL_SCnt0); outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd); if (wait_tulip(host) == -1) return -1; tag = inb(host->addr + TUL_SFifo); /* Read Tag ID */ scb = host->scb + tag; if (scb->target != tar || scb->lun != lun) { return initio_msgout_abort_tag(host); } if (scb->status != SCB_BUSY) { /* 03/24/95 */ return initio_msgout_abort_tag(host); } host->active = scb; if ((initio_msgin_accept(host)) == -1) return -1; } else { /* No tag */ no_tag: if ((scb = initio_find_busy_scb(host, tar | (lun << 8))) == NULL) { return initio_msgout_abort_targ(host); } host->active = scb; if (!(active_tc->drv_flags & TCF_DRV_EN_TAG)) { if ((initio_msgin_accept(host)) == -1) return -1; } } return 0; } /** * int_initio_bad_seq - out of phase * @host: InitIO host flagging event * * We have ended up out of phase somehow. Reset the host controller * and throw all our toys out of the pram. Let the midlayer clean up */ static int int_initio_bad_seq(struct initio_host * host) { /* target wrong phase */ struct scsi_ctrl_blk *scb; int i; initio_reset_scsi(host, 10); while ((scb = initio_pop_busy_scb(host)) != NULL) { scb->hastat = HOST_BAD_PHAS; initio_append_done_scb(host, scb); } for (i = 0; i < host->max_tar; i++) host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE); return -1; } /** * initio_msgout_abort_targ - abort a tag * @host: InitIO host * * Abort when the target/lun does not match or when our SCB is not * busy. Used by untagged commands. */ static int initio_msgout_abort_targ(struct initio_host * host) { outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal); if (initio_msgin_accept(host) == -1) return -1; if (host->phase != MSG_OUT) return initio_bad_seq(host); outb(ABORT_TASK_SET, host->addr + TUL_SFifo); outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); return initio_wait_disc(host); } /** * initio_msgout_abort_tag - abort a tag * @host: InitIO host * * Abort when the target/lun does not match or when our SCB is not * busy. Used for tagged commands. */ static int initio_msgout_abort_tag(struct initio_host * host) { outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal); if (initio_msgin_accept(host) == -1) return -1; if (host->phase != MSG_OUT) return initio_bad_seq(host); outb(ABORT_TASK, host->addr + TUL_SFifo); outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); return initio_wait_disc(host); } /** * initio_msgin - Message in * @host: InitIO Host * * Process incoming message */ static int initio_msgin(struct initio_host * host) { struct target_control *active_tc; for (;;) { outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); outl(1, host->addr + TUL_SCnt0); outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd); if (wait_tulip(host) == -1) return -1; switch (inb(host->addr + TUL_SFifo)) { case DISCONNECT: /* Disconnect msg */ outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd); return initio_wait_disc(host); case SAVE_POINTERS: case RESTORE_POINTERS: case NOP: initio_msgin_accept(host); break; case MESSAGE_REJECT: /* Clear ATN first */ outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal); active_tc = host->active_tc; if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) /* do sync nego */ outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal); initio_msgin_accept(host); break; case EXTENDED_MESSAGE: /* extended msg */ initio_msgin_extend(host); break; case IGNORE_WIDE_RESIDUE: initio_msgin_accept(host); break; case COMMAND_COMPLETE: outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd); return initio_wait_done_disc(host); default: initio_msgout_reject(host); break; } if (host->phase != MSG_IN) return host->phase; } /* statement won't reach here */ } static int initio_msgout_reject(struct initio_host * host) { outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal); if (initio_msgin_accept(host) == -1) return -1; if (host->phase == MSG_OUT) { outb(MESSAGE_REJECT, host->addr + TUL_SFifo); /* Msg reject */ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); return wait_tulip(host); } return host->phase; } static int initio_msgout_ide(struct initio_host * host) { outb(INITIATOR_ERROR, host->addr + TUL_SFifo); /* Initiator Detected Error */ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); return wait_tulip(host); } static int initio_msgin_extend(struct initio_host * host) { u8 len, idx; if (initio_msgin_accept(host) != MSG_IN) return host->phase; /* Get extended msg length */ outl(1, host->addr + TUL_SCnt0); outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd); if (wait_tulip(host) == -1) return -1; len = inb(host->addr + TUL_SFifo); host->msg[0] = len; for (idx = 1; len != 0; len--) { if ((initio_msgin_accept(host)) != MSG_IN) return host->phase; outl(1, host->addr + TUL_SCnt0); outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd); if (wait_tulip(host) == -1) return -1; host->msg[idx++] = inb(host->addr + TUL_SFifo); } if (host->msg[1] == 1) { /* if it's synchronous data transfer request */ u8 r; if (host->msg[0] != 3) /* if length is not right */ return initio_msgout_reject(host); if (host->active_tc->flags & TCF_NO_SYNC_NEGO) { /* Set OFFSET=0 to do async, nego back */ host->msg[3] = 0; } else { if (initio_msgin_sync(host) == 0 && (host->active_tc->flags & TCF_SYNC_DONE)) { initio_sync_done(host); return initio_msgin_accept(host); } } r = inb(host->addr + TUL_SSignal); outb((r & (TSC_SET_ACK | 7)) | TSC_SET_ATN, host->addr + TUL_SSignal); if (initio_msgin_accept(host) != MSG_OUT) return host->phase; /* sync msg out */ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); initio_sync_done(host); outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo); outb(3, host->addr + TUL_SFifo); outb(EXTENDED_SDTR, host->addr + TUL_SFifo); outb(host->msg[2], host->addr + TUL_SFifo); outb(host->msg[3], host->addr + TUL_SFifo); outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); return wait_tulip(host); } if (host->msg[0] != 2 || host->msg[1] != 3) return initio_msgout_reject(host); /* if it's WIDE DATA XFER REQ */ if (host->active_tc->flags & TCF_NO_WDTR) { host->msg[2] = 0; } else { if (host->msg[2] > 2) /* > 32 bits */ return initio_msgout_reject(host); if (host->msg[2] == 2) { /* == 32 */ host->msg[2] = 1; } else { if ((host->active_tc->flags & TCF_NO_WDTR) == 0) { wdtr_done(host); if ((host->active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal); return initio_msgin_accept(host); } } } outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal); if (initio_msgin_accept(host) != MSG_OUT) return host->phase; /* WDTR msg out */ outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo); outb(2, host->addr + TUL_SFifo); outb(EXTENDED_WDTR, host->addr + TUL_SFifo); outb(host->msg[2], host->addr + TUL_SFifo); outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); return wait_tulip(host); } static int initio_msgin_sync(struct initio_host * host) { char default_period; default_period = initio_rate_tbl[host->active_tc->flags & TCF_SCSI_RATE]; if (host->msg[3] > MAX_OFFSET) { host->msg[3] = MAX_OFFSET; if (host->msg[2] < default_period) { host->msg[2] = default_period; return 1; } if (host->msg[2] >= 59) /* Change to async */ host->msg[3] = 0; return 1; } /* offset requests asynchronous transfers ? */ if (host->msg[3] == 0) { return 0; } if (host->msg[2] < default_period) { host->msg[2] = default_period; return 1; } if (host->msg[2] >= 59) { host->msg[3] = 0; return 1; } return 0; } static int wdtr_done(struct initio_host * host) { host->active_tc->flags &= ~TCF_SYNC_DONE; host->active_tc->flags |= TCF_WDTR_DONE; host->active_tc->js_period = 0; if (host->msg[2]) /* if 16 bit */ host->active_tc->js_period |= TSC_WIDE_SCSI; host->active_tc->sconfig0 &= ~TSC_ALT_PERIOD; outb(host->active_tc->sconfig0, host->addr + TUL_SConfig); outb(host->active_tc->js_period, host->addr + TUL_SPeriod); return 1; } static int initio_sync_done(struct initio_host * host) { int i; host->active_tc->flags |= TCF_SYNC_DONE; if (host->msg[3]) { host->active_tc->js_period |= host->msg[3]; for (i = 0; i < 8; i++) { if (initio_rate_tbl[i] >= host->msg[2]) /* pick the big one */ break; } host->active_tc->js_period |= (i << 4); host->active_tc->sconfig0 |= TSC_ALT_PERIOD; } outb(host->active_tc->sconfig0, host->addr + TUL_SConfig); outb(host->active_tc->js_period, host->addr + TUL_SPeriod); return -1; } static int initio_post_scsi_rst(struct initio_host * host) { struct scsi_ctrl_blk *scb; struct target_control *active_tc; int i; host->active = NULL; host->active_tc = NULL; host->flags = 0; while ((scb = initio_pop_busy_scb(host)) != NULL) { scb->hastat = HOST_BAD_PHAS; initio_append_done_scb(host, scb); } /* clear sync done flag */ active_tc = &host->targets[0]; for (i = 0; i < host->max_tar; active_tc++, i++) { active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE); /* Initialize the sync. xfer register values to an asyn xfer */ active_tc->js_period = 0; active_tc->sconfig0 = host->sconf1; host->act_tags[0] = 0; /* 07/22/98 */ host->targets[i].flags &= ~TCF_BUSY; /* 07/22/98 */ } /* for */ return -1; } static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb) { scb->status |= SCB_SELECT; scb->next_state = 0x1; host->active = scb; host->active_tc = &host->targets[scb->target]; outb(TSC_SELATNSTOP, host->addr + TUL_SCmd); } static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb) { int i; scb->status |= SCB_SELECT; scb->next_state = 0x2; outb(scb->ident, host->addr + TUL_SFifo); for (i = 0; i < (int) scb->cdblen; i++) outb(scb->cdb[i], host->addr + TUL_SFifo); host->active_tc = &host->targets[scb->target]; host->active = scb; outb(TSC_SEL_ATN, host->addr + TUL_SCmd); } static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb) { int i; scb->status |= SCB_SELECT; scb->next_state = 0x2; outb(scb->ident, host->addr + TUL_SFifo); outb(scb->tagmsg, host->addr + TUL_SFifo); outb(scb->tagid, host->addr + TUL_SFifo); for (i = 0; i < scb->cdblen; i++) outb(scb->cdb[i], host->addr + TUL_SFifo); host->active_tc = &host->targets[scb->target]; host->active = scb; outb(TSC_SEL_ATN3, host->addr + TUL_SCmd); } /** * initio_bus_device_reset - SCSI Bus Device Reset * @host: InitIO host to reset * * Perform a device reset and abort all pending SCBs for the * victim device */ int initio_bus_device_reset(struct initio_host * host) { struct scsi_ctrl_blk *scb = host->active; struct target_control *active_tc = host->active_tc; struct scsi_ctrl_blk *tmp, *prev; u8 tar; if (host->phase != MSG_OUT) return int_initio_bad_seq(host); /* Unexpected phase */ initio_unlink_pend_scb(host, scb); initio_release_scb(host, scb); tar = scb->target; /* target */ active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE | TCF_BUSY); /* clr sync. nego & WDTR flags 07/22/98 */ /* abort all SCB with same target */ prev = tmp = host->first_busy; /* Check Busy queue */ while (tmp != NULL) { if (tmp->target == tar) { /* unlink it */ if (tmp == host->first_busy) { if ((host->first_busy = tmp->next) == NULL) host->last_busy = NULL; } else { prev->next = tmp->next; if (tmp == host->last_busy) host->last_busy = prev; } tmp->hastat = HOST_ABORTED; initio_append_done_scb(host, tmp); } /* Previous haven't change */ else { prev = tmp; } tmp = tmp->next; } outb(TARGET_RESET, host->addr + TUL_SFifo); outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd); return initio_wait_disc(host); } static int initio_msgin_accept(struct initio_host * host) { outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd); return wait_tulip(host); } static int wait_tulip(struct initio_host * host) { while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING)) cpu_relax(); host->jsint = inb(host->addr + TUL_SInt); host->phase = host->jsstatus0 & TSS_PH_MASK; host->jsstatus1 = inb(host->addr + TUL_SStatus1); if (host->jsint & TSS_RESEL_INT) /* if SCSI bus reset detected */ return int_initio_resel(host); if (host->jsint & TSS_SEL_TIMEOUT) /* if selected/reselected timeout interrupt */ return int_initio_busfree(host); if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */ return int_initio_scsi_rst(host); if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */ if (host->flags & HCF_EXPECT_DONE_DISC) { outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */ initio_unlink_busy_scb(host, host->active); host->active->hastat = 0; initio_append_done_scb(host, host->active); host->active = NULL; host->active_tc = NULL; host->flags &= ~HCF_EXPECT_DONE_DISC; outb(TSC_INITDEFAULT, host->addr + TUL_SConfig); outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */ return -1; } if (host->flags & HCF_EXPECT_DISC) { outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */ host->active = NULL; host->active_tc = NULL; host->flags &= ~HCF_EXPECT_DISC; outb(TSC_INITDEFAULT, host->addr + TUL_SConfig); outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */ return -1; } return int_initio_busfree(host); } /* The old code really does the below. Can probably be removed */ if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV)) return host->phase; return host->phase; } static int initio_wait_disc(struct initio_host * host) { while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING)) cpu_relax(); host->jsint = inb(host->addr + TUL_SInt); if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */ return int_initio_scsi_rst(host); if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */ outb(TSC_INITDEFAULT, host->addr + TUL_SConfig); outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */ host->active = NULL; return -1; } return initio_bad_seq(host); } static int initio_wait_done_disc(struct initio_host * host) { while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING)) cpu_relax(); host->jsint = inb(host->addr + TUL_SInt); if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */ return int_initio_scsi_rst(host); if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */ outb(TSC_INITDEFAULT, host->addr + TUL_SConfig); outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */ initio_unlink_busy_scb(host, host->active); initio_append_done_scb(host, host->active); host->active = NULL; return -1; } return initio_bad_seq(host); } /** * i91u_intr - IRQ handler * @irqno: IRQ number * @dev_id: IRQ identifier * * Take the relevant locks and then invoke the actual isr processing * code under the lock. */ static irqreturn_t i91u_intr(int irqno, void *dev_id) { struct Scsi_Host *dev = dev_id; unsigned long flags; int r; spin_lock_irqsave(dev->host_lock, flags); r = initio_isr((struct initio_host *)dev->hostdata); spin_unlock_irqrestore(dev->host_lock, flags); if (r) return IRQ_HANDLED; else return IRQ_NONE; } /** * initio_build_scb - Build the mappings and SCB * @host: InitIO host taking the command * @cblk: Firmware command block * @cmnd: SCSI midlayer command block * * Translate the abstract SCSI command into a firmware command block * suitable for feeding to the InitIO host controller. This also requires * we build the scatter gather lists and ensure they are mapped properly. */ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * cblk, struct scsi_cmnd * cmnd) { /* Create corresponding SCB */ struct scatterlist *sglist; struct sg_entry *sg; /* Pointer to SG list */ int i, nseg; long total_len; dma_addr_t dma_addr; /* Fill in the command headers */ cblk->post = i91uSCBPost; /* i91u's callback routine */ cblk->srb = cmnd; cblk->opcode = ExecSCSI; cblk->flags = SCF_POST; /* After SCSI done, call post routine */ cblk->target = cmnd->device->id; cblk->lun = cmnd->device->lun; cblk->ident = cmnd->device->lun | DISC_ALLOW; cblk->flags |= SCF_SENSE; /* Turn on auto request sense */ /* Map the sense buffer into bus memory */ dma_addr = dma_map_single(&host->pci_dev->dev, cmnd->sense_buffer, SENSE_SIZE, DMA_FROM_DEVICE); cblk->senseptr = (u32)dma_addr; cblk->senselen = SENSE_SIZE; initio_priv(cmnd)->sense_dma_addr = dma_addr; cblk->cdblen = cmnd->cmd_len; /* Clear the returned status */ cblk->hastat = 0; cblk->tastat = 0; /* Command the command */ memcpy(cblk->cdb, cmnd->cmnd, cmnd->cmd_len); /* Set up tags */ if (cmnd->device->tagged_supported) { /* Tag Support */ cblk->tagmsg = SIMPLE_QUEUE_TAG; /* Do simple tag only */ } else { cblk->tagmsg = 0; /* No tag support */ } /* todo handle map_sg error */ nseg = scsi_dma_map(cmnd); BUG_ON(nseg < 0); if (nseg) { dma_addr = dma_map_single(&host->pci_dev->dev, &cblk->sglist[0], sizeof(struct sg_entry) * TOTAL_SG_ENTRY, DMA_BIDIRECTIONAL); cblk->bufptr = (u32)dma_addr; initio_priv(cmnd)->sglist_dma_addr = dma_addr; cblk->sglen = nseg; cblk->flags |= SCF_SG; /* Turn on SG list flag */ total_len = 0; sg = &cblk->sglist[0]; scsi_for_each_sg(cmnd, sglist, cblk->sglen, i) { sg->data = cpu_to_le32((u32)sg_dma_address(sglist)); sg->len = cpu_to_le32((u32)sg_dma_len(sglist)); total_len += sg_dma_len(sglist); ++sg; } cblk->buflen = (scsi_bufflen(cmnd) > total_len) ? total_len : scsi_bufflen(cmnd); } else { /* No data transfer required */ cblk->buflen = 0; cblk->sglen = 0; } } /** * i91u_queuecommand_lck - Queue a new command if possible * @cmd: SCSI command block from the mid layer * * Attempts to queue a new command with the host adapter. Will return * zero if successful or indicate a host busy condition if not (which * will cause the mid layer to call us again later with the command) */ static int i91u_queuecommand_lck(struct scsi_cmnd *cmd) { struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata; struct scsi_ctrl_blk *cmnd; cmnd = initio_alloc_scb(host); if (!cmnd) return SCSI_MLQUEUE_HOST_BUSY; initio_build_scb(host, cmnd, cmd); initio_exec_scb(host, cmnd); return 0; } static DEF_SCSI_QCMD(i91u_queuecommand) /** * i91u_bus_reset - reset the SCSI bus * @cmnd: Command block we want to trigger the reset for * * Initiate a SCSI bus reset sequence */ static int i91u_bus_reset(struct scsi_cmnd * cmnd) { struct initio_host *host; host = (struct initio_host *) cmnd->device->host->hostdata; spin_lock_irq(cmnd->device->host->host_lock); initio_reset_scsi(host, 0); spin_unlock_irq(cmnd->device->host->host_lock); return SUCCESS; } /** * i91u_biosparam - return the "logical geometry * @sdev: SCSI device * @dev: Matching block device * @capacity: Sector size of drive * @info_array: Return space for BIOS geometry * * Map the device geometry in a manner compatible with the host * controller BIOS behaviour. * * FIXME: limited to 2^32 sector devices. */ static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev, sector_t capacity, int *info_array) { struct initio_host *host; /* Point to Host adapter control block */ struct target_control *tc; host = (struct initio_host *) sdev->host->hostdata; tc = &host->targets[sdev->id]; if (tc->heads) { info_array[0] = tc->heads; info_array[1] = tc->sectors; info_array[2] = (unsigned long)capacity / tc->heads / tc->sectors; } else { if (tc->drv_flags & TCF_DRV_255_63) { info_array[0] = 255; info_array[1] = 63; info_array[2] = (unsigned long)capacity / 255 / 63; } else { info_array[0] = 64; info_array[1] = 32; info_array[2] = (unsigned long)capacity >> 11; } } #if defined(DEBUG_BIOSPARAM) if (i91u_debug & debug_biosparam) { printk("bios geometry: head=%d, sec=%d, cyl=%d\n", info_array[0], info_array[1], info_array[2]); printk("WARNING: check, if the bios geometry is correct.\n"); } #endif return 0; } /** * i91u_unmap_scb - Unmap a command * @pci_dev: PCI device the command is for * @cmnd: The command itself * * Unmap any PCI mapping/IOMMU resources allocated when the command * was mapped originally as part of initio_build_scb */ static void i91u_unmap_scb(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd) { /* auto sense buffer */ if (initio_priv(cmnd)->sense_dma_addr) { dma_unmap_single(&pci_dev->dev, initio_priv(cmnd)->sense_dma_addr, SENSE_SIZE, DMA_FROM_DEVICE); initio_priv(cmnd)->sense_dma_addr = 0; } /* request buffer */ if (scsi_sg_count(cmnd)) { dma_unmap_single(&pci_dev->dev, initio_priv(cmnd)->sglist_dma_addr, sizeof(struct sg_entry) * TOTAL_SG_ENTRY, DMA_BIDIRECTIONAL); scsi_dma_unmap(cmnd); } } /* * i91uSCBPost - SCSI callback * * This is callback routine be called when tulip finish one * SCSI command. */ static void i91uSCBPost(u8 * host_mem, u8 * cblk_mem) { struct scsi_cmnd *cmnd; /* Pointer to SCSI request block */ struct initio_host *host; struct scsi_ctrl_blk *cblk; host = (struct initio_host *) host_mem; cblk = (struct scsi_ctrl_blk *) cblk_mem; if ((cmnd = cblk->srb) == NULL) { printk(KERN_ERR "i91uSCBPost: SRB pointer is empty\n"); WARN_ON(1); initio_release_scb(host, cblk); /* Release SCB for current channel */ return; } /* * Remap the firmware error status into a mid layer one */ switch (cblk->hastat) { case 0x0: case 0xa: /* Linked command complete without error and linked normally */ case 0xb: /* Linked command complete without error interrupt generated */ cblk->hastat = 0; break; case 0x11: /* Selection time out-The initiator selection or target reselection was not complete within the SCSI Time out period */ cblk->hastat = DID_TIME_OUT; break; case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus phase sequence was requested by the target. The host adapter will generate a SCSI Reset Condition, notifying the host with a SCRD interrupt */ cblk->hastat = DID_RESET; break; case 0x1a: /* SCB Aborted. 07/21/98 */ cblk->hastat = DID_ABORT; break; case 0x12: /* Data overrun/underrun-The target attempted to transfer more data than was allocated by the Data Length field or the sum of the Scatter / Gather Data Length fields. */ case 0x13: /* Unexpected bus free-The target dropped the SCSI BSY at an unexpected time. */ case 0x16: /* Invalid SCB Operation Code. */ default: printk("ini9100u: %x %x\n", cblk->hastat, cblk->tastat); cblk->hastat = DID_ERROR; /* Couldn't find any better */ break; } cmnd->result = cblk->tastat | (cblk->hastat << 16); i91u_unmap_scb(host->pci_dev, cmnd); scsi_done(cmnd); /* Notify system DONE */ initio_release_scb(host, cblk); /* Release SCB for current channel */ } static const struct scsi_host_template initio_template = { .proc_name = "INI9100U", .name = "Initio INI-9X00U/UW SCSI device driver", .queuecommand = i91u_queuecommand, .eh_bus_reset_handler = i91u_bus_reset, .bios_param = i91u_biosparam, .can_queue = MAX_TARGETS * i91u_MAXQUEUE, .this_id = 1, .sg_tablesize = SG_ALL, .cmd_size = sizeof(struct initio_cmd_priv), }; static int initio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct Scsi_Host *shost; struct initio_host *host; u32 reg; u16 bios_seg; struct scsi_ctrl_blk *scb, *tmp, *prev = NULL /* silence gcc */; int num_scb, i, error; error = pci_enable_device(pdev); if (error) return error; pci_read_config_dword(pdev, 0x44, (u32 *) & reg); bios_seg = (u16) (reg & 0xFF); if (((reg & 0xFF00) >> 8) == 0xFF) reg = 0; bios_seg = (bios_seg << 8) + ((u16) ((reg & 0xFF00) >> 8)); if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { printk(KERN_WARNING "i91u: Could not set 32 bit DMA mask\n"); error = -ENODEV; goto out_disable_device; } shost = scsi_host_alloc(&initio_template, sizeof(struct initio_host)); if (!shost) { printk(KERN_WARNING "initio: Could not allocate host structure.\n"); error = -ENOMEM; goto out_disable_device; } host = (struct initio_host *)shost->hostdata; memset(host, 0, sizeof(struct initio_host)); host->addr = pci_resource_start(pdev, 0); host->bios_addr = bios_seg; if (!request_region(host->addr, 256, "i91u")) { printk(KERN_WARNING "initio: I/O port range 0x%x is busy.\n", host->addr); error = -ENODEV; goto out_host_put; } if (initio_tag_enable) /* 1.01i */ num_scb = MAX_TARGETS * i91u_MAXQUEUE; else num_scb = MAX_TARGETS + 3; /* 1-tape, 1-CD_ROM, 1- extra */ for (; num_scb >= MAX_TARGETS + 3; num_scb--) { i = num_scb * sizeof(struct scsi_ctrl_blk); scb = kzalloc(i, GFP_KERNEL); if (scb) break; } if (!scb) { printk(KERN_WARNING "initio: Cannot allocate SCB array.\n"); error = -ENOMEM; goto out_release_region; } host->pci_dev = pdev; host->semaph = 1; spin_lock_init(&host->semaph_lock); host->num_scbs = num_scb; host->scb = scb; host->next_pending = scb; host->next_avail = scb; for (i = 0, tmp = scb; i < num_scb; i++, tmp++) { tmp->tagid = i; if (i != 0) prev->next = tmp; prev = tmp; } prev->next = NULL; host->scb_end = tmp; host->first_avail = scb; host->last_avail = prev; spin_lock_init(&host->avail_lock); initio_init(host, phys_to_virt(((u32)bios_seg << 4))); host->jsstatus0 = 0; shost->io_port = host->addr; shost->n_io_port = 0xff; shost->can_queue = num_scb; /* 03/05/98 */ shost->unique_id = host->addr; shost->max_id = host->max_tar; shost->max_lun = 32; /* 10/21/97 */ shost->irq = pdev->irq; shost->this_id = host->scsi_id; /* Assign HCS index */ shost->base = host->addr; shost->sg_tablesize = TOTAL_SG_ENTRY; error = request_irq(pdev->irq, i91u_intr, IRQF_SHARED, "i91u", shost); if (error < 0) { printk(KERN_WARNING "initio: Unable to request IRQ %d\n", pdev->irq); goto out_free_scbs; } pci_set_drvdata(pdev, shost); error = scsi_add_host(shost, &pdev->dev); if (error) goto out_free_irq; scsi_scan_host(shost); return 0; out_free_irq: free_irq(pdev->irq, shost); out_free_scbs: kfree(host->scb); out_release_region: release_region(host->addr, 256); out_host_put: scsi_host_put(shost); out_disable_device: pci_disable_device(pdev); return error; } /** * initio_remove_one - control shutdown * @pdev: PCI device being released * * Release the resources assigned to this adapter after it has * finished being used. */ static void initio_remove_one(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); struct initio_host *s = (struct initio_host *)host->hostdata; scsi_remove_host(host); free_irq(pdev->irq, host); release_region(s->addr, 256); scsi_host_put(host); pci_disable_device(pdev); } MODULE_LICENSE("GPL"); static struct pci_device_id initio_pci_tbl[] = { {PCI_VENDOR_ID_INIT, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_INIT, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_INIT, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_INIT, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_DOMEX, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, initio_pci_tbl); static struct pci_driver initio_pci_driver = { .name = "initio", .id_table = initio_pci_tbl, .probe = initio_probe_one, .remove = initio_remove_one, }; module_pci_driver(initio_pci_driver); MODULE_DESCRIPTION("Initio INI-9X00U/UW SCSI device driver"); MODULE_AUTHOR("Initio Corporation"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/initio.c
/*****************************************************************************/ /* ips.c -- driver for the Adaptec / IBM ServeRAID controller */ /* */ /* Written By: Keith Mitchell, IBM Corporation */ /* Jack Hammer, Adaptec, Inc. */ /* David Jeffery, Adaptec, Inc. */ /* */ /* Copyright (C) 2000 IBM Corporation */ /* Copyright (C) 2002,2003 Adaptec, Inc. */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* NO WARRANTY */ /* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR */ /* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT */ /* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, */ /* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is */ /* solely responsible for determining the appropriateness of using and */ /* distributing the Program and assumes all risks associated with its */ /* exercise of rights under this Agreement, including but not limited to */ /* the risks and costs of program errors, damage to or loss of data, */ /* programs or equipment, and unavailability or interruption of operations. */ /* */ /* DISCLAIMER OF LIABILITY */ /* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY */ /* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL */ /* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND */ /* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR */ /* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE */ /* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED */ /* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* */ /* Bugs/Comments/Suggestions about this driver should be mailed to: */ /* [email protected] */ /* */ /* For system support issues, contact your local IBM Customer support. */ /* Directions to find IBM Customer Support for each country can be found at: */ /* http://www.ibm.com/planetwide/ */ /* */ /*****************************************************************************/ /*****************************************************************************/ /* Change Log */ /* */ /* 0.99.02 - Breakup commands that are bigger than 8 * the stripe size */ /* 0.99.03 - Make interrupt routine handle all completed request on the */ /* adapter not just the first one */ /* - Make sure passthru commands get woken up if we run out of */ /* SCBs */ /* - Send all of the commands on the queue at once rather than */ /* one at a time since the card will support it. */ /* 0.99.04 - Fix race condition in the passthru mechanism -- this required */ /* the interface to the utilities to change */ /* - Fix error recovery code */ /* 0.99.05 - Fix an oops when we get certain passthru commands */ /* 1.00.00 - Initial Public Release */ /* Functionally equivalent to 0.99.05 */ /* 3.60.00 - Bump max commands to 128 for use with firmware 3.60 */ /* - Change version to 3.60 to coincide with release numbering. */ /* 3.60.01 - Remove bogus error check in passthru routine */ /* 3.60.02 - Make DCDB direction based on lookup table */ /* - Only allow one DCDB command to a SCSI ID at a time */ /* 4.00.00 - Add support for ServeRAID 4 */ /* 4.00.01 - Add support for First Failure Data Capture */ /* 4.00.02 - Fix problem with PT DCDB with no buffer */ /* 4.00.03 - Add alternative passthru interface */ /* - Add ability to flash BIOS */ /* 4.00.04 - Rename structures/constants to be prefixed with IPS_ */ /* 4.00.05 - Remove wish_block from init routine */ /* - Use linux/spinlock.h instead of asm/spinlock.h for kernels */ /* 2.3.18 and later */ /* - Sync with other changes from the 2.3 kernels */ /* 4.00.06 - Fix timeout with initial FFDC command */ /* 4.00.06a - Port to 2.4 (trivial) -- Christoph Hellwig <[email protected]> */ /* 4.10.00 - Add support for ServeRAID 4M/4L */ /* 4.10.13 - Fix for dynamic unload and proc file system */ /* 4.20.03 - Rename version to coincide with new release schedules */ /* Performance fixes */ /* Fix truncation of /proc files with cat */ /* Merge in changes through kernel 2.4.0test1ac21 */ /* 4.20.13 - Fix some failure cases / reset code */ /* - Hook into the reboot_notifier to flush the controller cache */ /* 4.50.01 - Fix problem when there is a hole in logical drive numbering */ /* 4.70.09 - Use a Common ( Large Buffer ) for Flashing from the JCRM CD */ /* - Add IPSSEND Flash Support */ /* - Set Sense Data for Unknown SCSI Command */ /* - Use Slot Number from NVRAM Page 5 */ /* - Restore caller's DCDB Structure */ /* 4.70.12 - Corrective actions for bad controller ( during initialization )*/ /* 4.70.13 - Don't Send CDB's if we already know the device is not present */ /* - Don't release HA Lock in ips_next() until SC taken off queue */ /* - Unregister SCSI device in ips_release() */ /* 4.70.15 - Fix Breakup for very large ( non-SG ) requests in ips_done() */ /* 4.71.00 - Change all memory allocations to not use GFP_DMA flag */ /* Code Clean-Up for 2.4.x kernel */ /* 4.72.00 - Allow for a Scatter-Gather Element to exceed MAX_XFER Size */ /* 4.72.01 - I/O Mapped Memory release ( so "insmod ips" does not Fail ) */ /* - Don't Issue Internal FFDC Command if there are Active Commands */ /* - Close Window for getting too many IOCTL's active */ /* 4.80.00 - Make ia64 Safe */ /* 4.80.04 - Eliminate calls to strtok() if 2.4.x or greater */ /* - Adjustments to Device Queue Depth */ /* 4.80.14 - Take all semaphores off stack */ /* - Clean Up New_IOCTL path */ /* 4.80.20 - Set max_sectors in Scsi_Host structure ( if >= 2.4.7 kernel ) */ /* - 5 second delay needed after resetting an i960 adapter */ /* 4.80.26 - Clean up potential code problems ( Arjan's recommendations ) */ /* 4.90.01 - Version Matching for FirmWare, BIOS, and Driver */ /* 4.90.05 - Use New PCI Architecture to facilitate Hot Plug Development */ /* 4.90.08 - Increase Delays in Flashing ( Trombone Only - 4H ) */ /* 4.90.08 - Data Corruption if First Scatter Gather Element is > 64K */ /* 4.90.11 - Don't actually RESET unless it's physically required */ /* - Remove unused compile options */ /* 5.00.01 - Sarasota ( 5i ) adapters must always be scanned first */ /* - Get rid on IOCTL_NEW_COMMAND code */ /* - Add Extended DCDB Commands for Tape Support in 5I */ /* 5.10.12 - use pci_dma interfaces, update for 2.5 kernel changes */ /* 5.10.15 - remove unused code (sem, macros, etc.) */ /* 5.30.00 - use __devexit_p() */ /* 6.00.00 - Add 6x Adapters and Battery Flash */ /* 6.10.00 - Remove 1G Addressing Limitations */ /* 6.11.xx - Get VersionInfo buffer off the stack ! DDTS 60401 */ /* 6.11.xx - Make Logical Drive Info structure safe for DMA DDTS 60639 */ /* 7.10.18 - Add highmem_io flag in SCSI Templete for 2.4 kernels */ /* - Fix path/name for scsi_hosts.h include for 2.6 kernels */ /* - Fix sort order of 7k */ /* - Remove 3 unused "inline" functions */ /* 7.12.xx - Use STATIC functions wherever possible */ /* - Clean up deprecated MODULE_PARM calls */ /* 7.12.05 - Remove Version Matching per IBM request */ /*****************************************************************************/ /* * Conditional Compilation directives for this driver: * * IPS_DEBUG - Turn on debugging info * * Parameters: * * debug:<number> - Set debug level to <number> * NOTE: only works when IPS_DEBUG compile directive is used. * 1 - Normal debug messages * 2 - Verbose debug messages * 11 - Method trace (non interrupt) * 12 - Method trace (includes interrupt) * * noi2o - Don't use I2O Queues (ServeRAID 4 only) * nommap - Don't use memory mapped I/O * ioctlsize - Initial size of the IOCTL buffer */ #include <asm/io.h> #include <asm/byteorder.h> #include <asm/page.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/reboot.h> #include <linux/interrupt.h> #include <linux/blkdev.h> #include <linux/types.h> #include <linux/dma-mapping.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/sg.h> #include "ips.h" #include <linux/module.h> #include <linux/stat.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/smp.h> #ifdef MODULE static char *ips = NULL; module_param(ips, charp, 0); #endif /* * DRIVER_VER */ #define IPS_VERSION_HIGH IPS_VER_MAJOR_STRING "." IPS_VER_MINOR_STRING #define IPS_VERSION_LOW "." IPS_VER_BUILD_STRING " " #define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \ DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \ DMA_BIDIRECTIONAL : \ scb->scsi_cmd->sc_data_direction) #ifdef IPS_DEBUG #define METHOD_TRACE(s, i) if (ips_debug >= (i+10)) printk(KERN_NOTICE s "\n"); #define DEBUG(i, s) if (ips_debug >= i) printk(KERN_NOTICE s "\n"); #define DEBUG_VAR(i, s, v...) if (ips_debug >= i) printk(KERN_NOTICE s "\n", v); #else #define METHOD_TRACE(s, i) #define DEBUG(i, s) #define DEBUG_VAR(i, s, v...) #endif /* * Function prototypes */ static int ips_eh_abort(struct scsi_cmnd *); static int ips_eh_reset(struct scsi_cmnd *); static int ips_queue(struct Scsi_Host *, struct scsi_cmnd *); static const char *ips_info(struct Scsi_Host *); static irqreturn_t do_ipsintr(int, void *); static int ips_hainit(ips_ha_t *); static int ips_map_status(ips_ha_t *, ips_scb_t *, ips_stat_t *); static int ips_send_wait(ips_ha_t *, ips_scb_t *, int, int); static int ips_send_cmd(ips_ha_t *, ips_scb_t *); static int ips_online(ips_ha_t *, ips_scb_t *); static int ips_inquiry(ips_ha_t *, ips_scb_t *); static int ips_rdcap(ips_ha_t *, ips_scb_t *); static int ips_msense(ips_ha_t *, ips_scb_t *); static int ips_reqsen(ips_ha_t *, ips_scb_t *); static int ips_deallocatescbs(ips_ha_t *, int); static int ips_allocatescbs(ips_ha_t *); static int ips_reset_copperhead(ips_ha_t *); static int ips_reset_copperhead_memio(ips_ha_t *); static int ips_reset_morpheus(ips_ha_t *); static int ips_issue_copperhead(ips_ha_t *, ips_scb_t *); static int ips_issue_copperhead_memio(ips_ha_t *, ips_scb_t *); static int ips_issue_i2o(ips_ha_t *, ips_scb_t *); static int ips_issue_i2o_memio(ips_ha_t *, ips_scb_t *); static int ips_isintr_copperhead(ips_ha_t *); static int ips_isintr_copperhead_memio(ips_ha_t *); static int ips_isintr_morpheus(ips_ha_t *); static int ips_wait(ips_ha_t *, int, int); static int ips_write_driver_status(ips_ha_t *, int); static int ips_read_adapter_status(ips_ha_t *, int); static int ips_read_subsystem_parameters(ips_ha_t *, int); static int ips_read_config(ips_ha_t *, int); static int ips_clear_adapter(ips_ha_t *, int); static int ips_readwrite_page5(ips_ha_t *, int, int); static int ips_init_copperhead(ips_ha_t *); static int ips_init_copperhead_memio(ips_ha_t *); static int ips_init_morpheus(ips_ha_t *); static int ips_isinit_copperhead(ips_ha_t *); static int ips_isinit_copperhead_memio(ips_ha_t *); static int ips_isinit_morpheus(ips_ha_t *); static int ips_erase_bios(ips_ha_t *); static int ips_program_bios(ips_ha_t *, char *, uint32_t, uint32_t); static int ips_verify_bios(ips_ha_t *, char *, uint32_t, uint32_t); static int ips_erase_bios_memio(ips_ha_t *); static int ips_program_bios_memio(ips_ha_t *, char *, uint32_t, uint32_t); static int ips_verify_bios_memio(ips_ha_t *, char *, uint32_t, uint32_t); static int ips_flash_copperhead(ips_ha_t *, ips_passthru_t *, ips_scb_t *); static int ips_flash_bios(ips_ha_t *, ips_passthru_t *, ips_scb_t *); static int ips_flash_firmware(ips_ha_t *, ips_passthru_t *, ips_scb_t *); static void ips_free_flash_copperhead(ips_ha_t * ha); static void ips_get_bios_version(ips_ha_t *, int); static void ips_identify_controller(ips_ha_t *); static void ips_chkstatus(ips_ha_t *, IPS_STATUS *); static void ips_enable_int_copperhead(ips_ha_t *); static void ips_enable_int_copperhead_memio(ips_ha_t *); static void ips_enable_int_morpheus(ips_ha_t *); static int ips_intr_copperhead(ips_ha_t *); static int ips_intr_morpheus(ips_ha_t *); static void ips_next(ips_ha_t *, int); static void ipsintr_blocking(ips_ha_t *, struct ips_scb *); static void ipsintr_done(ips_ha_t *, struct ips_scb *); static void ips_done(ips_ha_t *, ips_scb_t *); static void ips_free(ips_ha_t *); static void ips_init_scb(ips_ha_t *, ips_scb_t *); static void ips_freescb(ips_ha_t *, ips_scb_t *); static void ips_setup_funclist(ips_ha_t *); static void ips_statinit(ips_ha_t *); static void ips_statinit_memio(ips_ha_t *); static void ips_fix_ffdc_time(ips_ha_t *, ips_scb_t *, time64_t); static void ips_ffdc_reset(ips_ha_t *, int); static void ips_ffdc_time(ips_ha_t *); static uint32_t ips_statupd_copperhead(ips_ha_t *); static uint32_t ips_statupd_copperhead_memio(ips_ha_t *); static uint32_t ips_statupd_morpheus(ips_ha_t *); static ips_scb_t *ips_getscb(ips_ha_t *); static void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *); static void ips_putq_wait_tail(ips_wait_queue_entry_t *, struct scsi_cmnd *); static void ips_putq_copp_tail(ips_copp_queue_t *, ips_copp_wait_item_t *); static ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *); static ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *); static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *); static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *, struct scsi_cmnd *); static ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *, ips_copp_wait_item_t *); static ips_copp_wait_item_t *ips_removeq_copp_head(ips_copp_queue_t *); static int ips_is_passthru(struct scsi_cmnd *); static int ips_make_passthru(ips_ha_t *, struct scsi_cmnd *, ips_scb_t *, int); static int ips_usrcmd(ips_ha_t *, ips_passthru_t *, ips_scb_t *); static void ips_cleanup_passthru(ips_ha_t *, ips_scb_t *); static void ips_scmd_buf_write(struct scsi_cmnd * scmd, void *data, unsigned int count); static void ips_scmd_buf_read(struct scsi_cmnd * scmd, void *data, unsigned int count); static int ips_write_info(struct Scsi_Host *, char *, int); static int ips_show_info(struct seq_file *, struct Scsi_Host *); static int ips_host_info(ips_ha_t *, struct seq_file *); static int ips_abort_init(ips_ha_t * ha, int index); static int ips_init_phase2(int index); static int ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr); static int ips_register_scsi(int index); static int ips_poll_for_flush_complete(ips_ha_t * ha); static void ips_flush_and_reset(ips_ha_t *ha); /* * global variables */ static const char ips_name[] = "ips"; static struct Scsi_Host *ips_sh[IPS_MAX_ADAPTERS]; /* Array of host controller structures */ static ips_ha_t *ips_ha[IPS_MAX_ADAPTERS]; /* Array of HA structures */ static unsigned int ips_next_controller; static unsigned int ips_num_controllers; static unsigned int ips_released_controllers; static int ips_hotplug; static int ips_cmd_timeout = 60; static int ips_reset_timeout = 60 * 5; static int ips_force_memio = 1; /* Always use Memory Mapped I/O */ static int ips_force_i2o = 1; /* Always use I2O command delivery */ static int ips_ioctlsize = IPS_IOCTL_SIZE; /* Size of the ioctl buffer */ static int ips_cd_boot; /* Booting from Manager CD */ static char *ips_FlashData = NULL; /* CD Boot - Flash Data Buffer */ static dma_addr_t ips_flashbusaddr; static long ips_FlashDataInUse; /* CD Boot - Flash Data In Use Flag */ static uint32_t MaxLiteCmds = 32; /* Max Active Cmds for a Lite Adapter */ static struct scsi_host_template ips_driver_template = { .info = ips_info, .queuecommand = ips_queue, .eh_abort_handler = ips_eh_abort, .eh_host_reset_handler = ips_eh_reset, .proc_name = "ips", .show_info = ips_show_info, .write_info = ips_write_info, .slave_configure = ips_slave_configure, .bios_param = ips_biosparam, .this_id = -1, .sg_tablesize = IPS_MAX_SG, .cmd_per_lun = 3, .no_write_same = 1, }; /* This table describes all ServeRAID Adapters */ static struct pci_device_id ips_pci_table[] = { { 0x1014, 0x002E, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, { 0x1014, 0x01BD, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, { 0x9005, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0 }, { 0, } }; MODULE_DEVICE_TABLE( pci, ips_pci_table ); static char ips_hot_plug_name[] = "ips"; static int ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent); static void ips_remove_device(struct pci_dev *pci_dev); static struct pci_driver ips_pci_driver = { .name = ips_hot_plug_name, .id_table = ips_pci_table, .probe = ips_insert_device, .remove = ips_remove_device, }; /* * Necessary forward function protoypes */ static int ips_halt(struct notifier_block *nb, ulong event, void *buf); #define MAX_ADAPTER_NAME 15 static char ips_adapter_name[][30] = { "ServeRAID", "ServeRAID II", "ServeRAID on motherboard", "ServeRAID on motherboard", "ServeRAID 3H", "ServeRAID 3L", "ServeRAID 4H", "ServeRAID 4M", "ServeRAID 4L", "ServeRAID 4Mx", "ServeRAID 4Lx", "ServeRAID 5i", "ServeRAID 5i", "ServeRAID 6M", "ServeRAID 6i", "ServeRAID 7t", "ServeRAID 7k", "ServeRAID 7M" }; static struct notifier_block ips_notifier = { ips_halt, NULL, 0 }; /* * Direction table */ static char ips_command_direction[] = { IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK }; /****************************************************************************/ /* */ /* Routine Name: ips_setup */ /* */ /* Routine Description: */ /* */ /* setup parameters to the driver */ /* */ /****************************************************************************/ static int ips_setup(char *ips_str) { int i; char *key; char *value; static const IPS_OPTION options[] = { {"noi2o", &ips_force_i2o, 0}, {"nommap", &ips_force_memio, 0}, {"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE}, {"cdboot", &ips_cd_boot, 0}, {"maxcmds", &MaxLiteCmds, 32}, }; /* Don't use strtok() anymore ( if 2.4 Kernel or beyond ) */ /* Search for value */ while ((key = strsep(&ips_str, ",."))) { if (!*key) continue; value = strchr(key, ':'); if (value) *value++ = '\0'; /* * We now have key/value pairs. * Update the variables */ for (i = 0; i < ARRAY_SIZE(options); i++) { if (strncasecmp (key, options[i].option_name, strlen(options[i].option_name)) == 0) { if (value) *options[i].option_flag = simple_strtoul(value, NULL, 0); else *options[i].option_flag = options[i].option_value; break; } } } return (1); } __setup("ips=", ips_setup); /****************************************************************************/ /* */ /* Routine Name: ips_detect */ /* */ /* Routine Description: */ /* */ /* Detect and initialize the driver */ /* */ /* NOTE: this routine is called under the io_request_lock spinlock */ /* */ /****************************************************************************/ static int ips_detect(struct scsi_host_template * SHT) { int i; METHOD_TRACE("ips_detect", 1); #ifdef MODULE if (ips) ips_setup(ips); #endif for (i = 0; i < ips_num_controllers; i++) { if (ips_register_scsi(i)) ips_free(ips_ha[i]); ips_released_controllers++; } ips_hotplug = 1; return (ips_num_controllers); } /****************************************************************************/ /* configure the function pointers to use the functions that will work */ /* with the found version of the adapter */ /****************************************************************************/ static void ips_setup_funclist(ips_ha_t * ha) { /* * Setup Functions */ if (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) { /* morpheus / marco / sebring */ ha->func.isintr = ips_isintr_morpheus; ha->func.isinit = ips_isinit_morpheus; ha->func.issue = ips_issue_i2o_memio; ha->func.init = ips_init_morpheus; ha->func.statupd = ips_statupd_morpheus; ha->func.reset = ips_reset_morpheus; ha->func.intr = ips_intr_morpheus; ha->func.enableint = ips_enable_int_morpheus; } else if (IPS_USE_MEMIO(ha)) { /* copperhead w/MEMIO */ ha->func.isintr = ips_isintr_copperhead_memio; ha->func.isinit = ips_isinit_copperhead_memio; ha->func.init = ips_init_copperhead_memio; ha->func.statupd = ips_statupd_copperhead_memio; ha->func.statinit = ips_statinit_memio; ha->func.reset = ips_reset_copperhead_memio; ha->func.intr = ips_intr_copperhead; ha->func.erasebios = ips_erase_bios_memio; ha->func.programbios = ips_program_bios_memio; ha->func.verifybios = ips_verify_bios_memio; ha->func.enableint = ips_enable_int_copperhead_memio; if (IPS_USE_I2O_DELIVER(ha)) ha->func.issue = ips_issue_i2o_memio; else ha->func.issue = ips_issue_copperhead_memio; } else { /* copperhead */ ha->func.isintr = ips_isintr_copperhead; ha->func.isinit = ips_isinit_copperhead; ha->func.init = ips_init_copperhead; ha->func.statupd = ips_statupd_copperhead; ha->func.statinit = ips_statinit; ha->func.reset = ips_reset_copperhead; ha->func.intr = ips_intr_copperhead; ha->func.erasebios = ips_erase_bios; ha->func.programbios = ips_program_bios; ha->func.verifybios = ips_verify_bios; ha->func.enableint = ips_enable_int_copperhead; if (IPS_USE_I2O_DELIVER(ha)) ha->func.issue = ips_issue_i2o; else ha->func.issue = ips_issue_copperhead; } } /****************************************************************************/ /* */ /* Routine Name: ips_release */ /* */ /* Routine Description: */ /* */ /* Remove a driver */ /* */ /****************************************************************************/ static void ips_release(struct Scsi_Host *sh) { ips_scb_t *scb; ips_ha_t *ha; int i; METHOD_TRACE("ips_release", 1); scsi_remove_host(sh); for (i = 0; i < IPS_MAX_ADAPTERS && ips_sh[i] != sh; i++) ; if (i == IPS_MAX_ADAPTERS) { printk(KERN_WARNING "(%s) release, invalid Scsi_Host pointer.\n", ips_name); BUG(); } ha = IPS_HA(sh); if (!ha) return; /* flush the cache on the controller */ scb = &ha->scbs[ha->max_cmds - 1]; ips_init_scb(ha, scb); scb->timeout = ips_cmd_timeout; scb->cdb[0] = IPS_CMD_FLUSH; scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb); scb->cmd.flush_cache.state = IPS_NORM_STATE; scb->cmd.flush_cache.reserved = 0; scb->cmd.flush_cache.reserved2 = 0; scb->cmd.flush_cache.reserved3 = 0; scb->cmd.flush_cache.reserved4 = 0; IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Cache.\n"); /* send command */ if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) == IPS_FAILURE) IPS_PRINTK(KERN_WARNING, ha->pcidev, "Incomplete Flush.\n"); IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Complete.\n"); ips_sh[i] = NULL; ips_ha[i] = NULL; /* free extra memory */ ips_free(ha); /* free IRQ */ free_irq(ha->pcidev->irq, ha); scsi_host_put(sh); ips_released_controllers++; } /****************************************************************************/ /* */ /* Routine Name: ips_halt */ /* */ /* Routine Description: */ /* */ /* Perform cleanup when the system reboots */ /* */ /****************************************************************************/ static int ips_halt(struct notifier_block *nb, ulong event, void *buf) { ips_scb_t *scb; ips_ha_t *ha; int i; if ((event != SYS_RESTART) && (event != SYS_HALT) && (event != SYS_POWER_OFF)) return (NOTIFY_DONE); for (i = 0; i < ips_next_controller; i++) { ha = (ips_ha_t *) ips_ha[i]; if (!ha) continue; if (!ha->active) continue; /* flush the cache on the controller */ scb = &ha->scbs[ha->max_cmds - 1]; ips_init_scb(ha, scb); scb->timeout = ips_cmd_timeout; scb->cdb[0] = IPS_CMD_FLUSH; scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb); scb->cmd.flush_cache.state = IPS_NORM_STATE; scb->cmd.flush_cache.reserved = 0; scb->cmd.flush_cache.reserved2 = 0; scb->cmd.flush_cache.reserved3 = 0; scb->cmd.flush_cache.reserved4 = 0; IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Cache.\n"); /* send command */ if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) == IPS_FAILURE) IPS_PRINTK(KERN_WARNING, ha->pcidev, "Incomplete Flush.\n"); else IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Complete.\n"); } return (NOTIFY_OK); } /****************************************************************************/ /* */ /* Routine Name: ips_eh_abort */ /* */ /* Routine Description: */ /* */ /* Abort a command (using the new error code stuff) */ /* Note: this routine is called under the io_request_lock */ /****************************************************************************/ int ips_eh_abort(struct scsi_cmnd *SC) { ips_ha_t *ha; ips_copp_wait_item_t *item; int ret; struct Scsi_Host *host; METHOD_TRACE("ips_eh_abort", 1); if (!SC) return (FAILED); host = SC->device->host; ha = (ips_ha_t *) SC->device->host->hostdata; if (!ha) return (FAILED); if (!ha->active) return (FAILED); spin_lock(host->host_lock); /* See if the command is on the copp queue */ item = ha->copp_waitlist.head; while ((item) && (item->scsi_cmd != SC)) item = item->next; if (item) { /* Found it */ ips_removeq_copp(&ha->copp_waitlist, item); ret = (SUCCESS); /* See if the command is on the wait queue */ } else if (ips_removeq_wait(&ha->scb_waitlist, SC)) { /* command not sent yet */ ret = (SUCCESS); } else { /* command must have already been sent */ ret = (FAILED); } spin_unlock(host->host_lock); return ret; } /****************************************************************************/ /* */ /* Routine Name: ips_eh_reset */ /* */ /* Routine Description: */ /* */ /* Reset the controller (with new eh error code) */ /* */ /* NOTE: this routine is called under the io_request_lock spinlock */ /* */ /****************************************************************************/ static int __ips_eh_reset(struct scsi_cmnd *SC) { int ret; int i; ips_ha_t *ha; ips_scb_t *scb; ips_copp_wait_item_t *item; METHOD_TRACE("ips_eh_reset", 1); #ifdef NO_IPS_RESET return (FAILED); #else if (!SC) { DEBUG(1, "Reset called with NULL scsi command"); return (FAILED); } ha = (ips_ha_t *) SC->device->host->hostdata; if (!ha) { DEBUG(1, "Reset called with NULL ha struct"); return (FAILED); } if (!ha->active) return (FAILED); /* See if the command is on the copp queue */ item = ha->copp_waitlist.head; while ((item) && (item->scsi_cmd != SC)) item = item->next; if (item) { /* Found it */ ips_removeq_copp(&ha->copp_waitlist, item); return (SUCCESS); } /* See if the command is on the wait queue */ if (ips_removeq_wait(&ha->scb_waitlist, SC)) { /* command not sent yet */ return (SUCCESS); } /* An explanation for the casual observer: */ /* Part of the function of a RAID controller is automatic error */ /* detection and recovery. As such, the only problem that physically */ /* resetting an adapter will ever fix is when, for some reason, */ /* the driver is not successfully communicating with the adapter. */ /* Therefore, we will attempt to flush this adapter. If that succeeds, */ /* then there's no real purpose in a physical reset. This will complete */ /* much faster and avoids any problems that might be caused by a */ /* physical reset ( such as having to fail all the outstanding I/O's ). */ if (ha->ioctl_reset == 0) { /* IF Not an IOCTL Requested Reset */ scb = &ha->scbs[ha->max_cmds - 1]; ips_init_scb(ha, scb); scb->timeout = ips_cmd_timeout; scb->cdb[0] = IPS_CMD_FLUSH; scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb); scb->cmd.flush_cache.state = IPS_NORM_STATE; scb->cmd.flush_cache.reserved = 0; scb->cmd.flush_cache.reserved2 = 0; scb->cmd.flush_cache.reserved3 = 0; scb->cmd.flush_cache.reserved4 = 0; /* Attempt the flush command */ ret = ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_IORL); if (ret == IPS_SUCCESS) { IPS_PRINTK(KERN_NOTICE, ha->pcidev, "Reset Request - Flushed Cache\n"); return (SUCCESS); } } /* Either we can't communicate with the adapter or it's an IOCTL request */ /* from a utility. A physical reset is needed at this point. */ ha->ioctl_reset = 0; /* Reset the IOCTL Requested Reset Flag */ /* * command must have already been sent * reset the controller */ IPS_PRINTK(KERN_NOTICE, ha->pcidev, "Resetting controller.\n"); ret = (*ha->func.reset) (ha); if (!ret) { struct scsi_cmnd *scsi_cmd; IPS_PRINTK(KERN_NOTICE, ha->pcidev, "Controller reset failed - controller now offline.\n"); /* Now fail all of the active commands */ DEBUG_VAR(1, "(%s%d) Failing active commands", ips_name, ha->host_num); while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { scb->scsi_cmd->result = DID_ERROR << 16; scsi_done(scb->scsi_cmd); ips_freescb(ha, scb); } /* Now fail all of the pending commands */ DEBUG_VAR(1, "(%s%d) Failing pending commands", ips_name, ha->host_num); while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) { scsi_cmd->result = DID_ERROR; scsi_done(scsi_cmd); } ha->active = false; return (FAILED); } if (!ips_clear_adapter(ha, IPS_INTR_IORL)) { struct scsi_cmnd *scsi_cmd; IPS_PRINTK(KERN_NOTICE, ha->pcidev, "Controller reset failed - controller now offline.\n"); /* Now fail all of the active commands */ DEBUG_VAR(1, "(%s%d) Failing active commands", ips_name, ha->host_num); while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { scb->scsi_cmd->result = DID_ERROR << 16; scsi_done(scb->scsi_cmd); ips_freescb(ha, scb); } /* Now fail all of the pending commands */ DEBUG_VAR(1, "(%s%d) Failing pending commands", ips_name, ha->host_num); while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) { scsi_cmd->result = DID_ERROR << 16; scsi_done(scsi_cmd); } ha->active = false; return (FAILED); } /* FFDC */ if (le32_to_cpu(ha->subsys->param[3]) & 0x300000) { ha->last_ffdc = ktime_get_real_seconds(); ha->reset_count++; ips_ffdc_reset(ha, IPS_INTR_IORL); } /* Now fail all of the active commands */ DEBUG_VAR(1, "(%s%d) Failing active commands", ips_name, ha->host_num); while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { scb->scsi_cmd->result = DID_RESET << 16; scsi_done(scb->scsi_cmd); ips_freescb(ha, scb); } /* Reset DCDB active command bits */ for (i = 1; i < ha->nbus; i++) ha->dcdb_active[i - 1] = 0; /* Reset the number of active IOCTLs */ ha->num_ioctl = 0; ips_next(ha, IPS_INTR_IORL); return (SUCCESS); #endif /* NO_IPS_RESET */ } static int ips_eh_reset(struct scsi_cmnd *SC) { int rc; spin_lock_irq(SC->device->host->host_lock); rc = __ips_eh_reset(SC); spin_unlock_irq(SC->device->host->host_lock); return rc; } /****************************************************************************/ /* */ /* Routine Name: ips_queue */ /* */ /* Routine Description: */ /* */ /* Send a command to the controller */ /* */ /* NOTE: */ /* Linux obtains io_request_lock before calling this function */ /* */ /****************************************************************************/ static int ips_queue_lck(struct scsi_cmnd *SC) { void (*done)(struct scsi_cmnd *) = scsi_done; ips_ha_t *ha; ips_passthru_t *pt; METHOD_TRACE("ips_queue", 1); ha = (ips_ha_t *) SC->device->host->hostdata; if (!ha) goto out_error; if (!ha->active) goto out_error; if (ips_is_passthru(SC)) { if (ha->copp_waitlist.count == IPS_MAX_IOCTL_QUEUE) { SC->result = DID_BUS_BUSY << 16; done(SC); return (0); } } else if (ha->scb_waitlist.count == IPS_MAX_QUEUE) { SC->result = DID_BUS_BUSY << 16; done(SC); return (0); } DEBUG_VAR(2, "(%s%d): ips_queue: cmd 0x%X (%d %d %d)", ips_name, ha->host_num, SC->cmnd[0], SC->device->channel, SC->device->id, SC->device->lun); /* Check for command to initiator IDs */ if ((scmd_channel(SC) > 0) && (scmd_id(SC) == ha->ha_id[scmd_channel(SC)])) { SC->result = DID_NO_CONNECT << 16; done(SC); return (0); } if (ips_is_passthru(SC)) { ips_copp_wait_item_t *scratch; /* A Reset IOCTL is only sent by the boot CD in extreme cases. */ /* There can never be any system activity ( network or disk ), but check */ /* anyway just as a good practice. */ pt = (ips_passthru_t *) scsi_sglist(SC); if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) && (pt->CoppCP.cmd.reset.adapter_flag == 1)) { if (ha->scb_activelist.count != 0) { SC->result = DID_BUS_BUSY << 16; done(SC); return (0); } ha->ioctl_reset = 1; /* This reset request is from an IOCTL */ __ips_eh_reset(SC); SC->result = DID_OK << 16; scsi_done(SC); return (0); } /* allocate space for the scribble */ scratch = kmalloc(sizeof (ips_copp_wait_item_t), GFP_ATOMIC); if (!scratch) { SC->result = DID_ERROR << 16; done(SC); return (0); } scratch->scsi_cmd = SC; scratch->next = NULL; ips_putq_copp_tail(&ha->copp_waitlist, scratch); } else { ips_putq_wait_tail(&ha->scb_waitlist, SC); } ips_next(ha, IPS_INTR_IORL); return (0); out_error: SC->result = DID_ERROR << 16; done(SC); return (0); } static DEF_SCSI_QCMD(ips_queue) /****************************************************************************/ /* */ /* Routine Name: ips_biosparam */ /* */ /* Routine Description: */ /* */ /* Set bios geometry for the controller */ /* */ /****************************************************************************/ static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata; int heads; int sectors; int cylinders; METHOD_TRACE("ips_biosparam", 1); if (!ha) /* ?!?! host adater info invalid */ return (0); if (!ha->active) return (0); if (!ips_read_adapter_status(ha, IPS_INTR_ON)) /* ?!?! Enquiry command failed */ return (0); if ((capacity > 0x400000) && ((ha->enq->ucMiscFlag & 0x8) == 0)) { heads = IPS_NORM_HEADS; sectors = IPS_NORM_SECTORS; } else { heads = IPS_COMP_HEADS; sectors = IPS_COMP_SECTORS; } cylinders = (unsigned long) capacity / (heads * sectors); DEBUG_VAR(2, "Geometry: heads: %d, sectors: %d, cylinders: %d", heads, sectors, cylinders); geom[0] = heads; geom[1] = sectors; geom[2] = cylinders; return (0); } /****************************************************************************/ /* */ /* Routine Name: ips_slave_configure */ /* */ /* Routine Description: */ /* */ /* Set queue depths on devices once scan is complete */ /* */ /****************************************************************************/ static int ips_slave_configure(struct scsi_device * SDptr) { ips_ha_t *ha; int min; ha = IPS_HA(SDptr->host); if (SDptr->tagged_supported && SDptr->type == TYPE_DISK) { min = ha->max_cmds / 2; if (ha->enq->ucLogDriveCount <= 2) min = ha->max_cmds - 1; scsi_change_queue_depth(SDptr, min); } SDptr->skip_ms_page_8 = 1; SDptr->skip_ms_page_3f = 1; return 0; } /****************************************************************************/ /* */ /* Routine Name: do_ipsintr */ /* */ /* Routine Description: */ /* */ /* Wrapper for the interrupt handler */ /* */ /****************************************************************************/ static irqreturn_t do_ipsintr(int irq, void *dev_id) { ips_ha_t *ha; struct Scsi_Host *host; int irqstatus; METHOD_TRACE("do_ipsintr", 2); ha = (ips_ha_t *) dev_id; if (!ha) return IRQ_NONE; host = ips_sh[ha->host_num]; /* interrupt during initialization */ if (!host) { (*ha->func.intr) (ha); return IRQ_HANDLED; } spin_lock(host->host_lock); if (!ha->active) { spin_unlock(host->host_lock); return IRQ_HANDLED; } irqstatus = (*ha->func.intr) (ha); spin_unlock(host->host_lock); /* start the next command */ ips_next(ha, IPS_INTR_ON); return IRQ_RETVAL(irqstatus); } /****************************************************************************/ /* */ /* Routine Name: ips_intr_copperhead */ /* */ /* Routine Description: */ /* */ /* Polling interrupt handler */ /* */ /* ASSUMES interrupts are disabled */ /* */ /****************************************************************************/ int ips_intr_copperhead(ips_ha_t * ha) { ips_stat_t *sp; ips_scb_t *scb; IPS_STATUS cstatus; int intrstatus; METHOD_TRACE("ips_intr", 2); if (!ha) return 0; if (!ha->active) return 0; intrstatus = (*ha->func.isintr) (ha); if (!intrstatus) { /* * Unexpected/Shared interrupt */ return 0; } while (true) { sp = &ha->sp; intrstatus = (*ha->func.isintr) (ha); if (!intrstatus) break; else cstatus.value = (*ha->func.statupd) (ha); if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) { /* Spurious Interrupt ? */ continue; } ips_chkstatus(ha, &cstatus); scb = (ips_scb_t *) sp->scb_addr; /* * use the callback function to finish things up * NOTE: interrupts are OFF for this */ (*scb->callback) (ha, scb); } /* end while */ return 1; } /****************************************************************************/ /* */ /* Routine Name: ips_intr_morpheus */ /* */ /* Routine Description: */ /* */ /* Polling interrupt handler */ /* */ /* ASSUMES interrupts are disabled */ /* */ /****************************************************************************/ int ips_intr_morpheus(ips_ha_t * ha) { ips_stat_t *sp; ips_scb_t *scb; IPS_STATUS cstatus; int intrstatus; METHOD_TRACE("ips_intr_morpheus", 2); if (!ha) return 0; if (!ha->active) return 0; intrstatus = (*ha->func.isintr) (ha); if (!intrstatus) { /* * Unexpected/Shared interrupt */ return 0; } while (true) { sp = &ha->sp; intrstatus = (*ha->func.isintr) (ha); if (!intrstatus) break; else cstatus.value = (*ha->func.statupd) (ha); if (cstatus.value == 0xffffffff) /* No more to process */ break; if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) { IPS_PRINTK(KERN_WARNING, ha->pcidev, "Spurious interrupt; no ccb.\n"); continue; } ips_chkstatus(ha, &cstatus); scb = (ips_scb_t *) sp->scb_addr; /* * use the callback function to finish things up * NOTE: interrupts are OFF for this */ (*scb->callback) (ha, scb); } /* end while */ return 1; } /****************************************************************************/ /* */ /* Routine Name: ips_info */ /* */ /* Routine Description: */ /* */ /* Return info about the driver */ /* */ /****************************************************************************/ static const char * ips_info(struct Scsi_Host *SH) { static char buffer[256]; char *bp; ips_ha_t *ha; METHOD_TRACE("ips_info", 1); ha = IPS_HA(SH); if (!ha) return (NULL); bp = &buffer[0]; memset(bp, 0, sizeof (buffer)); sprintf(bp, "%s%s%s Build %d", "IBM PCI ServeRAID ", IPS_VERSION_HIGH, IPS_VERSION_LOW, IPS_BUILD_IDENT); if (ha->ad_type > 0 && ha->ad_type <= MAX_ADAPTER_NAME) { strcat(bp, " <"); strcat(bp, ips_adapter_name[ha->ad_type - 1]); strcat(bp, ">"); } return (bp); } static int ips_write_info(struct Scsi_Host *host, char *buffer, int length) { int i; ips_ha_t *ha = NULL; /* Find our host structure */ for (i = 0; i < ips_next_controller; i++) { if (ips_sh[i]) { if (ips_sh[i] == host) { ha = (ips_ha_t *) ips_sh[i]->hostdata; break; } } } if (!ha) return (-EINVAL); return 0; } static int ips_show_info(struct seq_file *m, struct Scsi_Host *host) { int i; ips_ha_t *ha = NULL; /* Find our host structure */ for (i = 0; i < ips_next_controller; i++) { if (ips_sh[i]) { if (ips_sh[i] == host) { ha = (ips_ha_t *) ips_sh[i]->hostdata; break; } } } if (!ha) return (-EINVAL); return ips_host_info(ha, m); } /*--------------------------------------------------------------------------*/ /* Helper Functions */ /*--------------------------------------------------------------------------*/ /****************************************************************************/ /* */ /* Routine Name: ips_is_passthru */ /* */ /* Routine Description: */ /* */ /* Determine if the specified SCSI command is really a passthru command */ /* */ /****************************************************************************/ static int ips_is_passthru(struct scsi_cmnd *SC) { unsigned long flags; METHOD_TRACE("ips_is_passthru", 1); if (!SC) return (0); if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) && (SC->device->channel == 0) && (SC->device->id == IPS_ADAPTER_ID) && (SC->device->lun == 0) && scsi_sglist(SC)) { struct scatterlist *sg = scsi_sglist(SC); char *buffer; /* local_irq_save() protects the KM_IRQ0 address slot. */ local_irq_save(flags); buffer = kmap_local_page(sg_page(sg)) + sg->offset; if (buffer && buffer[0] == 'C' && buffer[1] == 'O' && buffer[2] == 'P' && buffer[3] == 'P') { kunmap_local(buffer); local_irq_restore(flags); return 1; } kunmap_local(buffer); local_irq_restore(flags); } return 0; } /****************************************************************************/ /* */ /* Routine Name: ips_alloc_passthru_buffer */ /* */ /* Routine Description: */ /* allocate a buffer large enough for the ioctl data if the ioctl buffer */ /* is too small or doesn't exist */ /****************************************************************************/ static int ips_alloc_passthru_buffer(ips_ha_t * ha, int length) { void *bigger_buf; dma_addr_t dma_busaddr; if (ha->ioctl_data && length <= ha->ioctl_len) return 0; /* there is no buffer or it's not big enough, allocate a new one */ bigger_buf = dma_alloc_coherent(&ha->pcidev->dev, length, &dma_busaddr, GFP_KERNEL); if (bigger_buf) { /* free the old memory */ dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len, ha->ioctl_data, ha->ioctl_busaddr); /* use the new memory */ ha->ioctl_data = (char *) bigger_buf; ha->ioctl_len = length; ha->ioctl_busaddr = dma_busaddr; } else { return -1; } return 0; } /****************************************************************************/ /* */ /* Routine Name: ips_make_passthru */ /* */ /* Routine Description: */ /* */ /* Make a passthru command out of the info in the Scsi block */ /* */ /****************************************************************************/ static int ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr) { ips_passthru_t *pt; int length = 0; int i, ret; struct scatterlist *sg = scsi_sglist(SC); METHOD_TRACE("ips_make_passthru", 1); scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i) length += sg->length; if (length < sizeof (ips_passthru_t)) { /* wrong size */ DEBUG_VAR(1, "(%s%d) Passthru structure wrong size", ips_name, ha->host_num); return (IPS_FAILURE); } if (ips_alloc_passthru_buffer(ha, length)) { /* allocation failure! If ha->ioctl_data exists, use it to return some error codes. Return a failed command to the scsi layer. */ if (ha->ioctl_data) { pt = (ips_passthru_t *) ha->ioctl_data; ips_scmd_buf_read(SC, pt, sizeof (ips_passthru_t)); pt->BasicStatus = 0x0B; pt->ExtendedStatus = 0x00; ips_scmd_buf_write(SC, pt, sizeof (ips_passthru_t)); } return IPS_FAILURE; } ha->ioctl_datasize = length; ips_scmd_buf_read(SC, ha->ioctl_data, ha->ioctl_datasize); pt = (ips_passthru_t *) ha->ioctl_data; /* * Some notes about the passthru interface used * * IF the scsi op_code == 0x0d then we assume * that the data came along with/goes with the * packet we received from the sg driver. In this * case the CmdBSize field of the pt structure is * used for the size of the buffer. */ switch (pt->CoppCmd) { case IPS_NUMCTRLS: memcpy(ha->ioctl_data + sizeof (ips_passthru_t), &ips_num_controllers, sizeof (int)); ips_scmd_buf_write(SC, ha->ioctl_data, sizeof (ips_passthru_t) + sizeof (int)); SC->result = DID_OK << 16; return (IPS_SUCCESS_IMM); case IPS_COPPUSRCMD: case IPS_COPPIOCCMD: if (SC->cmnd[0] == IPS_IOCTL_COMMAND) { if (length < (sizeof (ips_passthru_t) + pt->CmdBSize)) { /* wrong size */ DEBUG_VAR(1, "(%s%d) Passthru structure wrong size", ips_name, ha->host_num); return (IPS_FAILURE); } if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD && pt->CoppCP.cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW) { ret = ips_flash_copperhead(ha, pt, scb); ips_scmd_buf_write(SC, ha->ioctl_data, sizeof (ips_passthru_t)); return ret; } if (ips_usrcmd(ha, pt, scb)) return (IPS_SUCCESS); else return (IPS_FAILURE); } break; } /* end switch */ return (IPS_FAILURE); } /****************************************************************************/ /* Routine Name: ips_flash_copperhead */ /* Routine Description: */ /* Flash the BIOS/FW on a Copperhead style controller */ /****************************************************************************/ static int ips_flash_copperhead(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb) { int datasize; /* Trombone is the only copperhead that can do packet flash, but only * for firmware. No one said it had to make sense. */ if (IPS_IS_TROMBONE(ha) && pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE) { if (ips_usrcmd(ha, pt, scb)) return IPS_SUCCESS; else return IPS_FAILURE; } pt->BasicStatus = 0x0B; pt->ExtendedStatus = 0; scb->scsi_cmd->result = DID_OK << 16; /* IF it's OK to Use the "CD BOOT" Flash Buffer, then you can */ /* avoid allocating a huge buffer per adapter ( which can fail ). */ if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE && pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) { pt->BasicStatus = 0; return ips_flash_bios(ha, pt, scb); } else if (pt->CoppCP.cmd.flashfw.packet_num == 0) { if (ips_FlashData && !test_and_set_bit(0, &ips_FlashDataInUse)){ ha->flash_data = ips_FlashData; ha->flash_busaddr = ips_flashbusaddr; ha->flash_len = PAGE_SIZE << 7; ha->flash_datasize = 0; } else if (!ha->flash_data) { datasize = pt->CoppCP.cmd.flashfw.total_packets * pt->CoppCP.cmd.flashfw.count; ha->flash_data = dma_alloc_coherent(&ha->pcidev->dev, datasize, &ha->flash_busaddr, GFP_KERNEL); if (!ha->flash_data){ printk(KERN_WARNING "Unable to allocate a flash buffer\n"); return IPS_FAILURE; } ha->flash_datasize = 0; ha->flash_len = datasize; } else return IPS_FAILURE; } else { if (pt->CoppCP.cmd.flashfw.count + ha->flash_datasize > ha->flash_len) { ips_free_flash_copperhead(ha); IPS_PRINTK(KERN_WARNING, ha->pcidev, "failed size sanity check\n"); return IPS_FAILURE; } } if (!ha->flash_data) return IPS_FAILURE; pt->BasicStatus = 0; memcpy(&ha->flash_data[ha->flash_datasize], pt + 1, pt->CoppCP.cmd.flashfw.count); ha->flash_datasize += pt->CoppCP.cmd.flashfw.count; if (pt->CoppCP.cmd.flashfw.packet_num == pt->CoppCP.cmd.flashfw.total_packets - 1) { if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE) return ips_flash_bios(ha, pt, scb); else if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE) return ips_flash_firmware(ha, pt, scb); } return IPS_SUCCESS_IMM; } /****************************************************************************/ /* Routine Name: ips_flash_bios */ /* Routine Description: */ /* flashes the bios of a copperhead adapter */ /****************************************************************************/ static int ips_flash_bios(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb) { if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE && pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_BIOS) { if ((!ha->func.programbios) || (!ha->func.erasebios) || (!ha->func.verifybios)) goto error; if ((*ha->func.erasebios) (ha)) { DEBUG_VAR(1, "(%s%d) flash bios failed - unable to erase flash", ips_name, ha->host_num); goto error; } else if ((*ha->func.programbios) (ha, ha->flash_data + IPS_BIOS_HEADER, ha->flash_datasize - IPS_BIOS_HEADER, 0)) { DEBUG_VAR(1, "(%s%d) flash bios failed - unable to flash", ips_name, ha->host_num); goto error; } else if ((*ha->func.verifybios) (ha, ha->flash_data + IPS_BIOS_HEADER, ha->flash_datasize - IPS_BIOS_HEADER, 0)) { DEBUG_VAR(1, "(%s%d) flash bios failed - unable to verify flash", ips_name, ha->host_num); goto error; } ips_free_flash_copperhead(ha); return IPS_SUCCESS_IMM; } else if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE && pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) { if (!ha->func.erasebios) goto error; if ((*ha->func.erasebios) (ha)) { DEBUG_VAR(1, "(%s%d) flash bios failed - unable to erase flash", ips_name, ha->host_num); goto error; } return IPS_SUCCESS_IMM; } error: pt->BasicStatus = 0x0B; pt->ExtendedStatus = 0x00; ips_free_flash_copperhead(ha); return IPS_FAILURE; } /****************************************************************************/ /* */ /* Routine Name: ips_fill_scb_sg_single */ /* */ /* Routine Description: */ /* Fill in a single scb sg_list element from an address */ /* return a -1 if a breakup occurred */ /****************************************************************************/ static int ips_fill_scb_sg_single(ips_ha_t * ha, dma_addr_t busaddr, ips_scb_t * scb, int indx, unsigned int e_len) { int ret_val = 0; if ((scb->data_len + e_len) > ha->max_xfer) { e_len = ha->max_xfer - scb->data_len; scb->breakup = indx; ++scb->sg_break; ret_val = -1; } else { scb->breakup = 0; scb->sg_break = 0; } if (IPS_USE_ENH_SGLIST(ha)) { scb->sg_list.enh_list[indx].address_lo = cpu_to_le32(lower_32_bits(busaddr)); scb->sg_list.enh_list[indx].address_hi = cpu_to_le32(upper_32_bits(busaddr)); scb->sg_list.enh_list[indx].length = cpu_to_le32(e_len); } else { scb->sg_list.std_list[indx].address = cpu_to_le32(lower_32_bits(busaddr)); scb->sg_list.std_list[indx].length = cpu_to_le32(e_len); } ++scb->sg_len; scb->data_len += e_len; return ret_val; } /****************************************************************************/ /* Routine Name: ips_flash_firmware */ /* Routine Description: */ /* flashes the firmware of a copperhead adapter */ /****************************************************************************/ static int ips_flash_firmware(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb) { IPS_SG_LIST sg_list; uint32_t cmd_busaddr; if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE && pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_FW) { memset(&pt->CoppCP.cmd, 0, sizeof (IPS_HOST_COMMAND)); pt->CoppCP.cmd.flashfw.op_code = IPS_CMD_DOWNLOAD; pt->CoppCP.cmd.flashfw.count = cpu_to_le32(ha->flash_datasize); } else { pt->BasicStatus = 0x0B; pt->ExtendedStatus = 0x00; ips_free_flash_copperhead(ha); return IPS_FAILURE; } /* Save the S/G list pointer so it doesn't get clobbered */ sg_list.list = scb->sg_list.list; cmd_busaddr = scb->scb_busaddr; /* copy in the CP */ memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD)); /* FIX stuff that might be wrong */ scb->sg_list.list = sg_list.list; scb->scb_busaddr = cmd_busaddr; scb->bus = scb->scsi_cmd->device->channel; scb->target_id = scb->scsi_cmd->device->id; scb->lun = scb->scsi_cmd->device->lun; scb->sg_len = 0; scb->data_len = 0; scb->flags = 0; scb->op_code = 0; scb->callback = ipsintr_done; scb->timeout = ips_cmd_timeout; scb->data_len = ha->flash_datasize; scb->data_busaddr = dma_map_single(&ha->pcidev->dev, ha->flash_data, scb->data_len, IPS_DMA_DIR(scb)); scb->flags |= IPS_SCB_MAP_SINGLE; scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb); scb->cmd.flashfw.buffer_addr = cpu_to_le32(scb->data_busaddr); if (pt->TimeOut) scb->timeout = pt->TimeOut; scb->scsi_cmd->result = DID_OK << 16; return IPS_SUCCESS; } /****************************************************************************/ /* Routine Name: ips_free_flash_copperhead */ /* Routine Description: */ /* release the memory resources used to hold the flash image */ /****************************************************************************/ static void ips_free_flash_copperhead(ips_ha_t * ha) { if (ha->flash_data == ips_FlashData) test_and_clear_bit(0, &ips_FlashDataInUse); else if (ha->flash_data) dma_free_coherent(&ha->pcidev->dev, ha->flash_len, ha->flash_data, ha->flash_busaddr); ha->flash_data = NULL; } /****************************************************************************/ /* */ /* Routine Name: ips_usrcmd */ /* */ /* Routine Description: */ /* */ /* Process a user command and make it ready to send */ /* */ /****************************************************************************/ static int ips_usrcmd(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb) { IPS_SG_LIST sg_list; uint32_t cmd_busaddr; METHOD_TRACE("ips_usrcmd", 1); if ((!scb) || (!pt) || (!ha)) return (0); /* Save the S/G list pointer so it doesn't get clobbered */ sg_list.list = scb->sg_list.list; cmd_busaddr = scb->scb_busaddr; /* copy in the CP */ memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD)); memcpy(&scb->dcdb, &pt->CoppCP.dcdb, sizeof (IPS_DCDB_TABLE)); /* FIX stuff that might be wrong */ scb->sg_list.list = sg_list.list; scb->scb_busaddr = cmd_busaddr; scb->bus = scb->scsi_cmd->device->channel; scb->target_id = scb->scsi_cmd->device->id; scb->lun = scb->scsi_cmd->device->lun; scb->sg_len = 0; scb->data_len = 0; scb->flags = 0; scb->op_code = 0; scb->callback = ipsintr_done; scb->timeout = ips_cmd_timeout; scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); /* we don't support DCDB/READ/WRITE Scatter Gather */ if ((scb->cmd.basic_io.op_code == IPS_CMD_READ_SG) || (scb->cmd.basic_io.op_code == IPS_CMD_WRITE_SG) || (scb->cmd.basic_io.op_code == IPS_CMD_DCDB_SG)) return (0); if (pt->CmdBSize) { scb->data_len = pt->CmdBSize; scb->data_busaddr = ha->ioctl_busaddr + sizeof (ips_passthru_t); } else { scb->data_busaddr = 0L; } if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr + (unsigned long) &scb-> dcdb - (unsigned long) scb); if (pt->CmdBSize) { if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) scb->dcdb.buffer_pointer = cpu_to_le32(scb->data_busaddr); else scb->cmd.basic_io.sg_addr = cpu_to_le32(scb->data_busaddr); } /* set timeouts */ if (pt->TimeOut) { scb->timeout = pt->TimeOut; if (pt->TimeOut <= 10) scb->dcdb.cmd_attribute |= IPS_TIMEOUT10; else if (pt->TimeOut <= 60) scb->dcdb.cmd_attribute |= IPS_TIMEOUT60; else scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M; } /* assume success */ scb->scsi_cmd->result = DID_OK << 16; /* success */ return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_cleanup_passthru */ /* */ /* Routine Description: */ /* */ /* Cleanup after a passthru command */ /* */ /****************************************************************************/ static void ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb) { ips_passthru_t *pt; METHOD_TRACE("ips_cleanup_passthru", 1); if ((!scb) || (!scb->scsi_cmd) || (!scsi_sglist(scb->scsi_cmd))) { DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru", ips_name, ha->host_num); return; } pt = (ips_passthru_t *) ha->ioctl_data; /* Copy data back to the user */ if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) /* Copy DCDB Back to Caller's Area */ memcpy(&pt->CoppCP.dcdb, &scb->dcdb, sizeof (IPS_DCDB_TABLE)); pt->BasicStatus = scb->basic_status; pt->ExtendedStatus = scb->extended_status; pt->AdapterType = ha->ad_type; if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD && (scb->cmd.flashfw.op_code == IPS_CMD_DOWNLOAD || scb->cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW)) ips_free_flash_copperhead(ha); ips_scmd_buf_write(scb->scsi_cmd, ha->ioctl_data, ha->ioctl_datasize); } /****************************************************************************/ /* */ /* Routine Name: ips_host_info */ /* */ /* Routine Description: */ /* */ /* The passthru interface for the driver */ /* */ /****************************************************************************/ static int ips_host_info(ips_ha_t *ha, struct seq_file *m) { METHOD_TRACE("ips_host_info", 1); seq_puts(m, "\nIBM ServeRAID General Information:\n\n"); if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) && (le16_to_cpu(ha->nvram->adapter_type) != 0)) seq_printf(m, "\tController Type : %s\n", ips_adapter_name[ha->ad_type - 1]); else seq_puts(m, "\tController Type : Unknown\n"); if (ha->io_addr) seq_printf(m, "\tIO region : 0x%x (%d bytes)\n", ha->io_addr, ha->io_len); if (ha->mem_addr) { seq_printf(m, "\tMemory region : 0x%x (%d bytes)\n", ha->mem_addr, ha->mem_len); seq_printf(m, "\tShared memory address : 0x%lx\n", (unsigned long)ha->mem_ptr); } seq_printf(m, "\tIRQ number : %d\n", ha->pcidev->irq); /* For the Next 3 lines Check for Binary 0 at the end and don't include it if it's there. */ /* That keeps everything happy for "text" operations on the proc file. */ if (le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) { if (ha->nvram->bios_low[3] == 0) { seq_printf(m, "\tBIOS Version : %c%c%c%c%c%c%c\n", ha->nvram->bios_high[0], ha->nvram->bios_high[1], ha->nvram->bios_high[2], ha->nvram->bios_high[3], ha->nvram->bios_low[0], ha->nvram->bios_low[1], ha->nvram->bios_low[2]); } else { seq_printf(m, "\tBIOS Version : %c%c%c%c%c%c%c%c\n", ha->nvram->bios_high[0], ha->nvram->bios_high[1], ha->nvram->bios_high[2], ha->nvram->bios_high[3], ha->nvram->bios_low[0], ha->nvram->bios_low[1], ha->nvram->bios_low[2], ha->nvram->bios_low[3]); } } if (ha->enq->CodeBlkVersion[7] == 0) { seq_printf(m, "\tFirmware Version : %c%c%c%c%c%c%c\n", ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1], ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3], ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5], ha->enq->CodeBlkVersion[6]); } else { seq_printf(m, "\tFirmware Version : %c%c%c%c%c%c%c%c\n", ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1], ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3], ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5], ha->enq->CodeBlkVersion[6], ha->enq->CodeBlkVersion[7]); } if (ha->enq->BootBlkVersion[7] == 0) { seq_printf(m, "\tBoot Block Version : %c%c%c%c%c%c%c\n", ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1], ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3], ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5], ha->enq->BootBlkVersion[6]); } else { seq_printf(m, "\tBoot Block Version : %c%c%c%c%c%c%c%c\n", ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1], ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3], ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5], ha->enq->BootBlkVersion[6], ha->enq->BootBlkVersion[7]); } seq_printf(m, "\tDriver Version : %s%s\n", IPS_VERSION_HIGH, IPS_VERSION_LOW); seq_printf(m, "\tDriver Build : %d\n", IPS_BUILD_IDENT); seq_printf(m, "\tMax Physical Devices : %d\n", ha->enq->ucMaxPhysicalDevices); seq_printf(m, "\tMax Active Commands : %d\n", ha->max_cmds); seq_printf(m, "\tCurrent Queued Commands : %d\n", ha->scb_waitlist.count); seq_printf(m, "\tCurrent Active Commands : %d\n", ha->scb_activelist.count - ha->num_ioctl); seq_printf(m, "\tCurrent Queued PT Commands : %d\n", ha->copp_waitlist.count); seq_printf(m, "\tCurrent Active PT Commands : %d\n", ha->num_ioctl); seq_putc(m, '\n'); return 0; } /****************************************************************************/ /* */ /* Routine Name: ips_identify_controller */ /* */ /* Routine Description: */ /* */ /* Identify this controller */ /* */ /****************************************************************************/ static void ips_identify_controller(ips_ha_t * ha) { METHOD_TRACE("ips_identify_controller", 1); switch (ha->pcidev->device) { case IPS_DEVICEID_COPPERHEAD: if (ha->pcidev->revision <= IPS_REVID_SERVERAID) { ha->ad_type = IPS_ADTYPE_SERVERAID; } else if (ha->pcidev->revision == IPS_REVID_SERVERAID2) { ha->ad_type = IPS_ADTYPE_SERVERAID2; } else if (ha->pcidev->revision == IPS_REVID_NAVAJO) { ha->ad_type = IPS_ADTYPE_NAVAJO; } else if ((ha->pcidev->revision == IPS_REVID_SERVERAID2) && (ha->slot_num == 0)) { ha->ad_type = IPS_ADTYPE_KIOWA; } else if ((ha->pcidev->revision >= IPS_REVID_CLARINETP1) && (ha->pcidev->revision <= IPS_REVID_CLARINETP3)) { if (ha->enq->ucMaxPhysicalDevices == 15) ha->ad_type = IPS_ADTYPE_SERVERAID3L; else ha->ad_type = IPS_ADTYPE_SERVERAID3; } else if ((ha->pcidev->revision >= IPS_REVID_TROMBONE32) && (ha->pcidev->revision <= IPS_REVID_TROMBONE64)) { ha->ad_type = IPS_ADTYPE_SERVERAID4H; } break; case IPS_DEVICEID_MORPHEUS: switch (ha->pcidev->subsystem_device) { case IPS_SUBDEVICEID_4L: ha->ad_type = IPS_ADTYPE_SERVERAID4L; break; case IPS_SUBDEVICEID_4M: ha->ad_type = IPS_ADTYPE_SERVERAID4M; break; case IPS_SUBDEVICEID_4MX: ha->ad_type = IPS_ADTYPE_SERVERAID4MX; break; case IPS_SUBDEVICEID_4LX: ha->ad_type = IPS_ADTYPE_SERVERAID4LX; break; case IPS_SUBDEVICEID_5I2: ha->ad_type = IPS_ADTYPE_SERVERAID5I2; break; case IPS_SUBDEVICEID_5I1: ha->ad_type = IPS_ADTYPE_SERVERAID5I1; break; } break; case IPS_DEVICEID_MARCO: switch (ha->pcidev->subsystem_device) { case IPS_SUBDEVICEID_6M: ha->ad_type = IPS_ADTYPE_SERVERAID6M; break; case IPS_SUBDEVICEID_6I: ha->ad_type = IPS_ADTYPE_SERVERAID6I; break; case IPS_SUBDEVICEID_7k: ha->ad_type = IPS_ADTYPE_SERVERAID7k; break; case IPS_SUBDEVICEID_7M: ha->ad_type = IPS_ADTYPE_SERVERAID7M; break; } break; } } /****************************************************************************/ /* */ /* Routine Name: ips_get_bios_version */ /* */ /* Routine Description: */ /* */ /* Get the BIOS revision number */ /* */ /****************************************************************************/ static void ips_get_bios_version(ips_ha_t * ha, int intr) { ips_scb_t *scb; int ret; uint8_t major; uint8_t minor; uint8_t subminor; uint8_t *buffer; METHOD_TRACE("ips_get_bios_version", 1); major = 0; minor = 0; memcpy(ha->bios_version, " ?", 8); if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) { if (IPS_USE_MEMIO(ha)) { /* Memory Mapped I/O */ /* test 1st byte */ writel(0, ha->mem_ptr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55) return; writel(1, ha->mem_ptr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA) return; /* Get Major version */ writel(0x1FF, ha->mem_ptr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ major = readb(ha->mem_ptr + IPS_REG_FLDP); /* Get Minor version */ writel(0x1FE, ha->mem_ptr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ minor = readb(ha->mem_ptr + IPS_REG_FLDP); /* Get SubMinor version */ writel(0x1FD, ha->mem_ptr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ subminor = readb(ha->mem_ptr + IPS_REG_FLDP); } else { /* Programmed I/O */ /* test 1st byte */ outl(0, ha->io_addr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55) return; outl(1, ha->io_addr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA) return; /* Get Major version */ outl(0x1FF, ha->io_addr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ major = inb(ha->io_addr + IPS_REG_FLDP); /* Get Minor version */ outl(0x1FE, ha->io_addr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ minor = inb(ha->io_addr + IPS_REG_FLDP); /* Get SubMinor version */ outl(0x1FD, ha->io_addr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ subminor = inb(ha->io_addr + IPS_REG_FLDP); } } else { /* Morpheus Family - Send Command to the card */ buffer = ha->ioctl_data; memset(buffer, 0, 0x1000); scb = &ha->scbs[ha->max_cmds - 1]; ips_init_scb(ha, scb); scb->timeout = ips_cmd_timeout; scb->cdb[0] = IPS_CMD_RW_BIOSFW; scb->cmd.flashfw.op_code = IPS_CMD_RW_BIOSFW; scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb); scb->cmd.flashfw.type = 1; scb->cmd.flashfw.direction = 0; scb->cmd.flashfw.count = cpu_to_le32(0x800); scb->cmd.flashfw.total_packets = 1; scb->cmd.flashfw.packet_num = 0; scb->data_len = 0x1000; scb->cmd.flashfw.buffer_addr = ha->ioctl_busaddr; /* issue the command */ if (((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) { /* Error occurred */ return; } if ((buffer[0xC0] == 0x55) && (buffer[0xC1] == 0xAA)) { major = buffer[0x1ff + 0xC0]; /* Offset 0x1ff after the header (0xc0) */ minor = buffer[0x1fe + 0xC0]; /* Offset 0x1fe after the header (0xc0) */ subminor = buffer[0x1fd + 0xC0]; /* Offset 0x1fd after the header (0xc0) */ } else { return; } } ha->bios_version[0] = hex_asc_upper_hi(major); ha->bios_version[1] = '.'; ha->bios_version[2] = hex_asc_upper_lo(major); ha->bios_version[3] = hex_asc_upper_lo(subminor); ha->bios_version[4] = '.'; ha->bios_version[5] = hex_asc_upper_hi(minor); ha->bios_version[6] = hex_asc_upper_lo(minor); ha->bios_version[7] = 0; } /****************************************************************************/ /* */ /* Routine Name: ips_hainit */ /* */ /* Routine Description: */ /* */ /* Initialize the controller */ /* */ /* NOTE: Assumes to be called from with a lock */ /* */ /****************************************************************************/ static int ips_hainit(ips_ha_t * ha) { int i; METHOD_TRACE("ips_hainit", 1); if (!ha) return (0); if (ha->func.statinit) (*ha->func.statinit) (ha); if (ha->func.enableint) (*ha->func.enableint) (ha); /* Send FFDC */ ha->reset_count = 1; ha->last_ffdc = ktime_get_real_seconds(); ips_ffdc_reset(ha, IPS_INTR_IORL); if (!ips_read_config(ha, IPS_INTR_IORL)) { IPS_PRINTK(KERN_WARNING, ha->pcidev, "unable to read config from controller.\n"); return (0); } /* end if */ if (!ips_read_adapter_status(ha, IPS_INTR_IORL)) { IPS_PRINTK(KERN_WARNING, ha->pcidev, "unable to read controller status.\n"); return (0); } /* Identify this controller */ ips_identify_controller(ha); if (!ips_read_subsystem_parameters(ha, IPS_INTR_IORL)) { IPS_PRINTK(KERN_WARNING, ha->pcidev, "unable to read subsystem parameters.\n"); return (0); } /* write nvram user page 5 */ if (!ips_write_driver_status(ha, IPS_INTR_IORL)) { IPS_PRINTK(KERN_WARNING, ha->pcidev, "unable to write driver info to controller.\n"); return (0); } /* If there are Logical Drives and a Reset Occurred, then an EraseStripeLock is Needed */ if ((ha->conf->ucLogDriveCount > 0) && (ha->requires_esl == 1)) ips_clear_adapter(ha, IPS_INTR_IORL); /* set limits on SID, LUN, BUS */ ha->ntargets = IPS_MAX_TARGETS + 1; ha->nlun = 1; ha->nbus = (ha->enq->ucMaxPhysicalDevices / IPS_MAX_TARGETS) + 1; switch (ha->conf->logical_drive[0].ucStripeSize) { case 4: ha->max_xfer = 0x10000; break; case 5: ha->max_xfer = 0x20000; break; case 6: ha->max_xfer = 0x40000; break; case 7: default: ha->max_xfer = 0x80000; break; } /* setup max concurrent commands */ if (le32_to_cpu(ha->subsys->param[4]) & 0x1) { /* Use the new method */ ha->max_cmds = ha->enq->ucConcurrentCmdCount; } else { /* use the old method */ switch (ha->conf->logical_drive[0].ucStripeSize) { case 4: ha->max_cmds = 32; break; case 5: ha->max_cmds = 16; break; case 6: ha->max_cmds = 8; break; case 7: default: ha->max_cmds = 4; break; } } /* Limit the Active Commands on a Lite Adapter */ if ((ha->ad_type == IPS_ADTYPE_SERVERAID3L) || (ha->ad_type == IPS_ADTYPE_SERVERAID4L) || (ha->ad_type == IPS_ADTYPE_SERVERAID4LX)) { if ((ha->max_cmds > MaxLiteCmds) && (MaxLiteCmds)) ha->max_cmds = MaxLiteCmds; } /* set controller IDs */ ha->ha_id[0] = IPS_ADAPTER_ID; for (i = 1; i < ha->nbus; i++) { ha->ha_id[i] = ha->conf->init_id[i - 1] & 0x1f; ha->dcdb_active[i - 1] = 0; } return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_next */ /* */ /* Routine Description: */ /* */ /* Take the next command off the queue and send it to the controller */ /* */ /****************************************************************************/ static void ips_next(ips_ha_t * ha, int intr) { ips_scb_t *scb; struct scsi_cmnd *SC; struct scsi_cmnd *p; struct scsi_cmnd *q; ips_copp_wait_item_t *item; int ret; struct Scsi_Host *host; METHOD_TRACE("ips_next", 1); if (!ha) return; host = ips_sh[ha->host_num]; /* * Block access to the queue function so * this command won't time out */ if (intr == IPS_INTR_ON) spin_lock(host->host_lock); if ((ha->subsys->param[3] & 0x300000) && (ha->scb_activelist.count == 0)) { time64_t now = ktime_get_real_seconds(); if (now - ha->last_ffdc > IPS_SECS_8HOURS) { ha->last_ffdc = now; ips_ffdc_time(ha); } } /* * Send passthru commands * These have priority over normal I/O * but shouldn't affect performance too much * since we limit the number that can be active * on the card at any one time */ while ((ha->num_ioctl < IPS_MAX_IOCTL) && (ha->copp_waitlist.head) && (scb = ips_getscb(ha))) { item = ips_removeq_copp_head(&ha->copp_waitlist); ha->num_ioctl++; if (intr == IPS_INTR_ON) spin_unlock(host->host_lock); scb->scsi_cmd = item->scsi_cmd; kfree(item); ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr); if (intr == IPS_INTR_ON) spin_lock(host->host_lock); switch (ret) { case IPS_FAILURE: if (scb->scsi_cmd) { scb->scsi_cmd->result = DID_ERROR << 16; scsi_done(scb->scsi_cmd); } ips_freescb(ha, scb); break; case IPS_SUCCESS_IMM: if (scb->scsi_cmd) { scb->scsi_cmd->result = DID_OK << 16; scsi_done(scb->scsi_cmd); } ips_freescb(ha, scb); break; default: break; } /* end case */ if (ret != IPS_SUCCESS) { ha->num_ioctl--; continue; } ret = ips_send_cmd(ha, scb); if (ret == IPS_SUCCESS) ips_putq_scb_head(&ha->scb_activelist, scb); else ha->num_ioctl--; switch (ret) { case IPS_FAILURE: if (scb->scsi_cmd) { scb->scsi_cmd->result = DID_ERROR << 16; } ips_freescb(ha, scb); break; case IPS_SUCCESS_IMM: ips_freescb(ha, scb); break; default: break; } /* end case */ } /* * Send "Normal" I/O commands */ p = ha->scb_waitlist.head; while ((p) && (scb = ips_getscb(ha))) { if ((scmd_channel(p) > 0) && (ha-> dcdb_active[scmd_channel(p) - 1] & (1 << scmd_id(p)))) { ips_freescb(ha, scb); p = (struct scsi_cmnd *) p->host_scribble; continue; } q = p; SC = ips_removeq_wait(&ha->scb_waitlist, q); if (intr == IPS_INTR_ON) spin_unlock(host->host_lock); /* Unlock HA after command is taken off queue */ SC->result = DID_OK; SC->host_scribble = NULL; scb->target_id = SC->device->id; scb->lun = SC->device->lun; scb->bus = SC->device->channel; scb->scsi_cmd = SC; scb->breakup = 0; scb->data_len = 0; scb->callback = ipsintr_done; scb->timeout = ips_cmd_timeout; memset(&scb->cmd, 0, 16); /* copy in the CDB */ memcpy(scb->cdb, SC->cmnd, SC->cmd_len); scb->sg_count = scsi_dma_map(SC); BUG_ON(scb->sg_count < 0); if (scb->sg_count) { struct scatterlist *sg; int i; scb->flags |= IPS_SCB_MAP_SG; scsi_for_each_sg(SC, sg, scb->sg_count, i) { if (ips_fill_scb_sg_single (ha, sg_dma_address(sg), scb, i, sg_dma_len(sg)) < 0) break; } scb->dcdb.transfer_length = scb->data_len; } else { scb->data_busaddr = 0L; scb->sg_len = 0; scb->data_len = 0; scb->dcdb.transfer_length = 0; } scb->dcdb.cmd_attribute = ips_command_direction[scb->scsi_cmd->cmnd[0]]; /* Allow a WRITE BUFFER Command to Have no Data */ /* This is Used by Tape Flash Utilites */ if ((scb->scsi_cmd->cmnd[0] == WRITE_BUFFER) && (scb->data_len == 0)) scb->dcdb.cmd_attribute = 0; if (!(scb->dcdb.cmd_attribute & 0x3)) scb->dcdb.transfer_length = 0; if (scb->data_len >= IPS_MAX_XFER) { scb->dcdb.cmd_attribute |= IPS_TRANSFER64K; scb->dcdb.transfer_length = 0; } if (intr == IPS_INTR_ON) spin_lock(host->host_lock); ret = ips_send_cmd(ha, scb); switch (ret) { case IPS_SUCCESS: ips_putq_scb_head(&ha->scb_activelist, scb); break; case IPS_FAILURE: if (scb->scsi_cmd) { scb->scsi_cmd->result = DID_ERROR << 16; scsi_done(scb->scsi_cmd); } if (scb->bus) ha->dcdb_active[scb->bus - 1] &= ~(1 << scb->target_id); ips_freescb(ha, scb); break; case IPS_SUCCESS_IMM: if (scb->scsi_cmd) scsi_done(scb->scsi_cmd); if (scb->bus) ha->dcdb_active[scb->bus - 1] &= ~(1 << scb->target_id); ips_freescb(ha, scb); break; default: break; } /* end case */ p = (struct scsi_cmnd *) p->host_scribble; } /* end while */ if (intr == IPS_INTR_ON) spin_unlock(host->host_lock); } /****************************************************************************/ /* */ /* Routine Name: ips_putq_scb_head */ /* */ /* Routine Description: */ /* */ /* Add an item to the head of the queue */ /* */ /* ASSUMED to be called from within the HA lock */ /* */ /****************************************************************************/ static void ips_putq_scb_head(ips_scb_queue_t * queue, ips_scb_t * item) { METHOD_TRACE("ips_putq_scb_head", 1); if (!item) return; item->q_next = queue->head; queue->head = item; if (!queue->tail) queue->tail = item; queue->count++; } /****************************************************************************/ /* */ /* Routine Name: ips_removeq_scb_head */ /* */ /* Routine Description: */ /* */ /* Remove the head of the queue */ /* */ /* ASSUMED to be called from within the HA lock */ /* */ /****************************************************************************/ static ips_scb_t * ips_removeq_scb_head(ips_scb_queue_t * queue) { ips_scb_t *item; METHOD_TRACE("ips_removeq_scb_head", 1); item = queue->head; if (!item) { return (NULL); } queue->head = item->q_next; item->q_next = NULL; if (queue->tail == item) queue->tail = NULL; queue->count--; return (item); } /****************************************************************************/ /* */ /* Routine Name: ips_removeq_scb */ /* */ /* Routine Description: */ /* */ /* Remove an item from a queue */ /* */ /* ASSUMED to be called from within the HA lock */ /* */ /****************************************************************************/ static ips_scb_t * ips_removeq_scb(ips_scb_queue_t * queue, ips_scb_t * item) { ips_scb_t *p; METHOD_TRACE("ips_removeq_scb", 1); if (!item) return (NULL); if (item == queue->head) { return (ips_removeq_scb_head(queue)); } p = queue->head; while ((p) && (item != p->q_next)) p = p->q_next; if (p) { /* found a match */ p->q_next = item->q_next; if (!item->q_next) queue->tail = p; item->q_next = NULL; queue->count--; return (item); } return (NULL); } /****************************************************************************/ /* */ /* Routine Name: ips_putq_wait_tail */ /* */ /* Routine Description: */ /* */ /* Add an item to the tail of the queue */ /* */ /* ASSUMED to be called from within the HA lock */ /* */ /****************************************************************************/ static void ips_putq_wait_tail(ips_wait_queue_entry_t *queue, struct scsi_cmnd *item) { METHOD_TRACE("ips_putq_wait_tail", 1); if (!item) return; item->host_scribble = NULL; if (queue->tail) queue->tail->host_scribble = (char *) item; queue->tail = item; if (!queue->head) queue->head = item; queue->count++; } /****************************************************************************/ /* */ /* Routine Name: ips_removeq_wait_head */ /* */ /* Routine Description: */ /* */ /* Remove the head of the queue */ /* */ /* ASSUMED to be called from within the HA lock */ /* */ /****************************************************************************/ static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *queue) { struct scsi_cmnd *item; METHOD_TRACE("ips_removeq_wait_head", 1); item = queue->head; if (!item) { return (NULL); } queue->head = (struct scsi_cmnd *) item->host_scribble; item->host_scribble = NULL; if (queue->tail == item) queue->tail = NULL; queue->count--; return (item); } /****************************************************************************/ /* */ /* Routine Name: ips_removeq_wait */ /* */ /* Routine Description: */ /* */ /* Remove an item from a queue */ /* */ /* ASSUMED to be called from within the HA lock */ /* */ /****************************************************************************/ static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *queue, struct scsi_cmnd *item) { struct scsi_cmnd *p; METHOD_TRACE("ips_removeq_wait", 1); if (!item) return (NULL); if (item == queue->head) { return (ips_removeq_wait_head(queue)); } p = queue->head; while ((p) && (item != (struct scsi_cmnd *) p->host_scribble)) p = (struct scsi_cmnd *) p->host_scribble; if (p) { /* found a match */ p->host_scribble = item->host_scribble; if (!item->host_scribble) queue->tail = p; item->host_scribble = NULL; queue->count--; return (item); } return (NULL); } /****************************************************************************/ /* */ /* Routine Name: ips_putq_copp_tail */ /* */ /* Routine Description: */ /* */ /* Add an item to the tail of the queue */ /* */ /* ASSUMED to be called from within the HA lock */ /* */ /****************************************************************************/ static void ips_putq_copp_tail(ips_copp_queue_t * queue, ips_copp_wait_item_t * item) { METHOD_TRACE("ips_putq_copp_tail", 1); if (!item) return; item->next = NULL; if (queue->tail) queue->tail->next = item; queue->tail = item; if (!queue->head) queue->head = item; queue->count++; } /****************************************************************************/ /* */ /* Routine Name: ips_removeq_copp_head */ /* */ /* Routine Description: */ /* */ /* Remove the head of the queue */ /* */ /* ASSUMED to be called from within the HA lock */ /* */ /****************************************************************************/ static ips_copp_wait_item_t * ips_removeq_copp_head(ips_copp_queue_t * queue) { ips_copp_wait_item_t *item; METHOD_TRACE("ips_removeq_copp_head", 1); item = queue->head; if (!item) { return (NULL); } queue->head = item->next; item->next = NULL; if (queue->tail == item) queue->tail = NULL; queue->count--; return (item); } /****************************************************************************/ /* */ /* Routine Name: ips_removeq_copp */ /* */ /* Routine Description: */ /* */ /* Remove an item from a queue */ /* */ /* ASSUMED to be called from within the HA lock */ /* */ /****************************************************************************/ static ips_copp_wait_item_t * ips_removeq_copp(ips_copp_queue_t * queue, ips_copp_wait_item_t * item) { ips_copp_wait_item_t *p; METHOD_TRACE("ips_removeq_copp", 1); if (!item) return (NULL); if (item == queue->head) { return (ips_removeq_copp_head(queue)); } p = queue->head; while ((p) && (item != p->next)) p = p->next; if (p) { /* found a match */ p->next = item->next; if (!item->next) queue->tail = p; item->next = NULL; queue->count--; return (item); } return (NULL); } /****************************************************************************/ /* */ /* Routine Name: ipsintr_blocking */ /* */ /* Routine Description: */ /* */ /* Finalize an interrupt for internal commands */ /* */ /****************************************************************************/ static void ipsintr_blocking(ips_ha_t * ha, ips_scb_t * scb) { METHOD_TRACE("ipsintr_blocking", 2); ips_freescb(ha, scb); if (ha->waitflag && ha->cmd_in_progress == scb->cdb[0]) { ha->waitflag = false; return; } } /****************************************************************************/ /* */ /* Routine Name: ipsintr_done */ /* */ /* Routine Description: */ /* */ /* Finalize an interrupt for non-internal commands */ /* */ /****************************************************************************/ static void ipsintr_done(ips_ha_t * ha, ips_scb_t * scb) { METHOD_TRACE("ipsintr_done", 2); if (!scb) { IPS_PRINTK(KERN_WARNING, ha->pcidev, "Spurious interrupt; scb NULL.\n"); return; } if (scb->scsi_cmd == NULL) { /* unexpected interrupt */ IPS_PRINTK(KERN_WARNING, ha->pcidev, "Spurious interrupt; scsi_cmd not set.\n"); return; } ips_done(ha, scb); } /****************************************************************************/ /* */ /* Routine Name: ips_done */ /* */ /* Routine Description: */ /* */ /* Do housekeeping on completed commands */ /* ASSUMED to be called form within the request lock */ /****************************************************************************/ static void ips_done(ips_ha_t * ha, ips_scb_t * scb) { int ret; METHOD_TRACE("ips_done", 1); if (!scb) return; if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd))) { ips_cleanup_passthru(ha, scb); ha->num_ioctl--; } else { /* * Check to see if this command had too much * data and had to be broke up. If so, queue * the rest of the data and continue. */ if ((scb->breakup) || (scb->sg_break)) { struct scatterlist *sg; int i, sg_dma_index, ips_sg_index = 0; /* we had a data breakup */ scb->data_len = 0; sg = scsi_sglist(scb->scsi_cmd); /* Spin forward to last dma chunk */ sg_dma_index = scb->breakup; for (i = 0; i < scb->breakup; i++) sg = sg_next(sg); /* Take care of possible partial on last chunk */ ips_fill_scb_sg_single(ha, sg_dma_address(sg), scb, ips_sg_index++, sg_dma_len(sg)); for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd); sg_dma_index++, sg = sg_next(sg)) { if (ips_fill_scb_sg_single (ha, sg_dma_address(sg), scb, ips_sg_index++, sg_dma_len(sg)) < 0) break; } scb->dcdb.transfer_length = scb->data_len; scb->dcdb.cmd_attribute |= ips_command_direction[scb->scsi_cmd->cmnd[0]]; if (!(scb->dcdb.cmd_attribute & 0x3)) scb->dcdb.transfer_length = 0; if (scb->data_len >= IPS_MAX_XFER) { scb->dcdb.cmd_attribute |= IPS_TRANSFER64K; scb->dcdb.transfer_length = 0; } ret = ips_send_cmd(ha, scb); switch (ret) { case IPS_FAILURE: if (scb->scsi_cmd) { scb->scsi_cmd->result = DID_ERROR << 16; scsi_done(scb->scsi_cmd); } ips_freescb(ha, scb); break; case IPS_SUCCESS_IMM: if (scb->scsi_cmd) { scb->scsi_cmd->result = DID_ERROR << 16; scsi_done(scb->scsi_cmd); } ips_freescb(ha, scb); break; default: break; } /* end case */ return; } } /* end if passthru */ if (scb->bus) { ha->dcdb_active[scb->bus - 1] &= ~(1 << scb->target_id); } scsi_done(scb->scsi_cmd); ips_freescb(ha, scb); } /****************************************************************************/ /* */ /* Routine Name: ips_map_status */ /* */ /* Routine Description: */ /* */ /* Map Controller Error codes to Linux Error Codes */ /* */ /****************************************************************************/ static int ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp) { int errcode; int device_error; uint32_t transfer_len; IPS_DCDB_TABLE_TAPE *tapeDCDB; IPS_SCSI_INQ_DATA inquiryData; METHOD_TRACE("ips_map_status", 1); if (scb->bus) { DEBUG_VAR(2, "(%s%d) Physical device error (%d %d %d): %x %x, Sense Key: %x, ASC: %x, ASCQ: %x", ips_name, ha->host_num, scb->scsi_cmd->device->channel, scb->scsi_cmd->device->id, scb->scsi_cmd->device->lun, scb->basic_status, scb->extended_status, scb->extended_status == IPS_ERR_CKCOND ? scb->dcdb.sense_info[2] & 0xf : 0, scb->extended_status == IPS_ERR_CKCOND ? scb->dcdb.sense_info[12] : 0, scb->extended_status == IPS_ERR_CKCOND ? scb->dcdb.sense_info[13] : 0); } /* default driver error */ errcode = DID_ERROR; device_error = 0; switch (scb->basic_status & IPS_GSC_STATUS_MASK) { case IPS_CMD_TIMEOUT: errcode = DID_TIME_OUT; break; case IPS_INVAL_OPCO: case IPS_INVAL_CMD_BLK: case IPS_INVAL_PARM_BLK: case IPS_LD_ERROR: case IPS_CMD_CMPLT_WERROR: break; case IPS_PHYS_DRV_ERROR: switch (scb->extended_status) { case IPS_ERR_SEL_TO: if (scb->bus) errcode = DID_NO_CONNECT; break; case IPS_ERR_OU_RUN: if ((scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB) || (scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG)) { tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb; transfer_len = tapeDCDB->transfer_length; } else { transfer_len = (uint32_t) scb->dcdb.transfer_length; } if ((scb->bus) && (transfer_len < scb->data_len)) { /* Underrun - set default to no error */ errcode = DID_OK; /* Restrict access to physical DASD */ if (scb->scsi_cmd->cmnd[0] == INQUIRY) { ips_scmd_buf_read(scb->scsi_cmd, &inquiryData, sizeof (inquiryData)); if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK) { errcode = DID_TIME_OUT; break; } } } else errcode = DID_ERROR; break; case IPS_ERR_RECOVERY: /* don't fail recovered errors */ if (scb->bus) errcode = DID_OK; break; case IPS_ERR_HOST_RESET: case IPS_ERR_DEV_RESET: errcode = DID_RESET; break; case IPS_ERR_CKCOND: if (scb->bus) { if ((scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB) || (scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG)) { tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb; memcpy_and_pad(scb->scsi_cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, tapeDCDB->sense_info, sizeof(tapeDCDB->sense_info), 0); } else { memcpy_and_pad(scb->scsi_cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, scb->dcdb.sense_info, sizeof(scb->dcdb.sense_info), 0); } device_error = 2; /* check condition */ } errcode = DID_OK; break; default: errcode = DID_ERROR; break; } /* end switch */ } /* end switch */ scb->scsi_cmd->result = device_error | (errcode << 16); return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_send_wait */ /* */ /* Routine Description: */ /* */ /* Send a command to the controller and wait for it to return */ /* */ /* The FFDC Time Stamp use this function for the callback, but doesn't */ /* actually need to wait. */ /****************************************************************************/ static int ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr) { int ret; METHOD_TRACE("ips_send_wait", 1); if (intr != IPS_FFDC) { /* Won't be Waiting if this is a Time Stamp */ ha->waitflag = true; ha->cmd_in_progress = scb->cdb[0]; } scb->callback = ipsintr_blocking; ret = ips_send_cmd(ha, scb); if ((ret == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM)) return (ret); if (intr != IPS_FFDC) /* Don't Wait around if this is a Time Stamp */ ret = ips_wait(ha, timeout, intr); return (ret); } /****************************************************************************/ /* */ /* Routine Name: ips_scmd_buf_write */ /* */ /* Routine Description: */ /* Write data to struct scsi_cmnd request_buffer at proper offsets */ /****************************************************************************/ static void ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count) { unsigned long flags; local_irq_save(flags); scsi_sg_copy_from_buffer(scmd, data, count); local_irq_restore(flags); } /****************************************************************************/ /* */ /* Routine Name: ips_scmd_buf_read */ /* */ /* Routine Description: */ /* Copy data from a struct scsi_cmnd to a new, linear buffer */ /****************************************************************************/ static void ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count) { unsigned long flags; local_irq_save(flags); scsi_sg_copy_to_buffer(scmd, data, count); local_irq_restore(flags); } /****************************************************************************/ /* */ /* Routine Name: ips_send_cmd */ /* */ /* Routine Description: */ /* */ /* Map SCSI commands to ServeRAID commands for logical drives */ /* */ /****************************************************************************/ static int ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb) { int ret; char *sp; int device_error; IPS_DCDB_TABLE_TAPE *tapeDCDB; int TimeOut; METHOD_TRACE("ips_send_cmd", 1); ret = IPS_SUCCESS; if (!scb->scsi_cmd) { /* internal command */ if (scb->bus > 0) { /* Controller commands can't be issued */ /* to real devices -- fail them */ if (ha->waitflag && ha->cmd_in_progress == scb->cdb[0]) ha->waitflag = false; return (1); } } else if ((scb->bus == 0) && (!ips_is_passthru(scb->scsi_cmd))) { /* command to logical bus -- interpret */ ret = IPS_SUCCESS_IMM; switch (scb->scsi_cmd->cmnd[0]) { case ALLOW_MEDIUM_REMOVAL: case REZERO_UNIT: case ERASE: case WRITE_FILEMARKS: case SPACE: scb->scsi_cmd->result = DID_ERROR << 16; break; case START_STOP: scb->scsi_cmd->result = DID_OK << 16; break; case TEST_UNIT_READY: case INQUIRY: if (scb->target_id == IPS_ADAPTER_ID) { /* * Either we have a TUR * or we have a SCSI inquiry */ if (scb->scsi_cmd->cmnd[0] == TEST_UNIT_READY) scb->scsi_cmd->result = DID_OK << 16; if (scb->scsi_cmd->cmnd[0] == INQUIRY) { IPS_SCSI_INQ_DATA inquiry; memset(&inquiry, 0, sizeof (IPS_SCSI_INQ_DATA)); inquiry.DeviceType = IPS_SCSI_INQ_TYPE_PROCESSOR; inquiry.DeviceTypeQualifier = IPS_SCSI_INQ_LU_CONNECTED; inquiry.Version = IPS_SCSI_INQ_REV2; inquiry.ResponseDataFormat = IPS_SCSI_INQ_RD_REV2; inquiry.AdditionalLength = 31; inquiry.Flags[0] = IPS_SCSI_INQ_Address16; inquiry.Flags[1] = IPS_SCSI_INQ_WBus16 | IPS_SCSI_INQ_Sync; memcpy(inquiry.VendorId, "IBM ", 8); memcpy(inquiry.ProductId, "SERVERAID ", 16); memcpy(inquiry.ProductRevisionLevel, "1.00", 4); ips_scmd_buf_write(scb->scsi_cmd, &inquiry, sizeof (inquiry)); scb->scsi_cmd->result = DID_OK << 16; } } else { scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO; scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb); scb->cmd.logical_info.reserved = 0; scb->cmd.logical_info.reserved2 = 0; scb->data_len = sizeof (IPS_LD_INFO); scb->data_busaddr = ha->logical_drive_info_dma_addr; scb->flags = 0; scb->cmd.logical_info.buffer_addr = scb->data_busaddr; ret = IPS_SUCCESS; } break; case REQUEST_SENSE: ips_reqsen(ha, scb); scb->scsi_cmd->result = DID_OK << 16; break; case READ_6: case WRITE_6: if (!scb->sg_len) { scb->cmd.basic_io.op_code = (scb->scsi_cmd->cmnd[0] == READ_6) ? IPS_CMD_READ : IPS_CMD_WRITE; scb->cmd.basic_io.enhanced_sg = 0; scb->cmd.basic_io.sg_addr = cpu_to_le32(scb->data_busaddr); } else { scb->cmd.basic_io.op_code = (scb->scsi_cmd->cmnd[0] == READ_6) ? IPS_CMD_READ_SG : IPS_CMD_WRITE_SG; scb->cmd.basic_io.enhanced_sg = IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0; scb->cmd.basic_io.sg_addr = cpu_to_le32(scb->sg_busaddr); } scb->cmd.basic_io.segment_4G = 0; scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); scb->cmd.basic_io.log_drv = scb->target_id; scb->cmd.basic_io.sg_count = scb->sg_len; if (scb->cmd.basic_io.lba) le32_add_cpu(&scb->cmd.basic_io.lba, le16_to_cpu(scb->cmd.basic_io. sector_count)); else scb->cmd.basic_io.lba = (((scb->scsi_cmd-> cmnd[1] & 0x1f) << 16) | (scb->scsi_cmd-> cmnd[2] << 8) | (scb->scsi_cmd->cmnd[3])); scb->cmd.basic_io.sector_count = cpu_to_le16(scb->data_len / IPS_BLKSIZE); if (le16_to_cpu(scb->cmd.basic_io.sector_count) == 0) scb->cmd.basic_io.sector_count = cpu_to_le16(256); ret = IPS_SUCCESS; break; case READ_10: case WRITE_10: if (!scb->sg_len) { scb->cmd.basic_io.op_code = (scb->scsi_cmd->cmnd[0] == READ_10) ? IPS_CMD_READ : IPS_CMD_WRITE; scb->cmd.basic_io.enhanced_sg = 0; scb->cmd.basic_io.sg_addr = cpu_to_le32(scb->data_busaddr); } else { scb->cmd.basic_io.op_code = (scb->scsi_cmd->cmnd[0] == READ_10) ? IPS_CMD_READ_SG : IPS_CMD_WRITE_SG; scb->cmd.basic_io.enhanced_sg = IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0; scb->cmd.basic_io.sg_addr = cpu_to_le32(scb->sg_busaddr); } scb->cmd.basic_io.segment_4G = 0; scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); scb->cmd.basic_io.log_drv = scb->target_id; scb->cmd.basic_io.sg_count = scb->sg_len; if (scb->cmd.basic_io.lba) le32_add_cpu(&scb->cmd.basic_io.lba, le16_to_cpu(scb->cmd.basic_io. sector_count)); else scb->cmd.basic_io.lba = ((scb->scsi_cmd->cmnd[2] << 24) | (scb-> scsi_cmd-> cmnd[3] << 16) | (scb->scsi_cmd->cmnd[4] << 8) | scb-> scsi_cmd->cmnd[5]); scb->cmd.basic_io.sector_count = cpu_to_le16(scb->data_len / IPS_BLKSIZE); if (cpu_to_le16(scb->cmd.basic_io.sector_count) == 0) { /* * This is a null condition * we don't have to do anything * so just return */ scb->scsi_cmd->result = DID_OK << 16; } else ret = IPS_SUCCESS; break; case RESERVE: case RELEASE: scb->scsi_cmd->result = DID_OK << 16; break; case MODE_SENSE: scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY; scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); scb->cmd.basic_io.segment_4G = 0; scb->cmd.basic_io.enhanced_sg = 0; scb->data_len = sizeof (*ha->enq); scb->cmd.basic_io.sg_addr = ha->enq_busaddr; ret = IPS_SUCCESS; break; case READ_CAPACITY: scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO; scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb); scb->cmd.logical_info.reserved = 0; scb->cmd.logical_info.reserved2 = 0; scb->cmd.logical_info.reserved3 = 0; scb->data_len = sizeof (IPS_LD_INFO); scb->data_busaddr = ha->logical_drive_info_dma_addr; scb->flags = 0; scb->cmd.logical_info.buffer_addr = scb->data_busaddr; ret = IPS_SUCCESS; break; case SEND_DIAGNOSTIC: case REASSIGN_BLOCKS: case FORMAT_UNIT: case SEEK_10: case VERIFY: case READ_DEFECT_DATA: case READ_BUFFER: case WRITE_BUFFER: scb->scsi_cmd->result = DID_OK << 16; break; default: /* Set the Return Info to appear like the Command was */ /* attempted, a Check Condition occurred, and Sense */ /* Data indicating an Invalid CDB OpCode is returned. */ sp = (char *) scb->scsi_cmd->sense_buffer; sp[0] = 0x70; /* Error Code */ sp[2] = ILLEGAL_REQUEST; /* Sense Key 5 Illegal Req. */ sp[7] = 0x0A; /* Additional Sense Length */ sp[12] = 0x20; /* ASC = Invalid OpCode */ sp[13] = 0x00; /* ASCQ */ device_error = 2; /* Indicate Check Condition */ scb->scsi_cmd->result = device_error | (DID_OK << 16); break; } /* end switch */ } /* end if */ if (ret == IPS_SUCCESS_IMM) return (ret); /* setup DCDB */ if (scb->bus > 0) { /* If we already know the Device is Not there, no need to attempt a Command */ /* This also protects an NT FailOver Controller from getting CDB's sent to it */ if (ha->conf->dev[scb->bus - 1][scb->target_id].ucState == 0) { scb->scsi_cmd->result = DID_NO_CONNECT << 16; return (IPS_SUCCESS_IMM); } ha->dcdb_active[scb->bus - 1] |= (1 << scb->target_id); scb->cmd.dcdb.command_id = IPS_COMMAND_ID(ha, scb); scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr + (unsigned long) &scb-> dcdb - (unsigned long) scb); scb->cmd.dcdb.reserved = 0; scb->cmd.dcdb.reserved2 = 0; scb->cmd.dcdb.reserved3 = 0; scb->cmd.dcdb.segment_4G = 0; scb->cmd.dcdb.enhanced_sg = 0; TimeOut = scsi_cmd_to_rq(scb->scsi_cmd)->timeout; if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */ if (!scb->sg_len) { scb->cmd.dcdb.op_code = IPS_CMD_EXTENDED_DCDB; } else { scb->cmd.dcdb.op_code = IPS_CMD_EXTENDED_DCDB_SG; scb->cmd.dcdb.enhanced_sg = IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0; } tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb; /* Use Same Data Area as Old DCDB Struct */ tapeDCDB->device_address = ((scb->bus - 1) << 4) | scb->target_id; tapeDCDB->cmd_attribute |= IPS_DISCONNECT_ALLOWED; tapeDCDB->cmd_attribute &= ~IPS_TRANSFER64K; /* Always Turn OFF 64K Size Flag */ if (TimeOut) { if (TimeOut < (10 * HZ)) tapeDCDB->cmd_attribute |= IPS_TIMEOUT10; /* TimeOut is 10 Seconds */ else if (TimeOut < (60 * HZ)) tapeDCDB->cmd_attribute |= IPS_TIMEOUT60; /* TimeOut is 60 Seconds */ else if (TimeOut < (1200 * HZ)) tapeDCDB->cmd_attribute |= IPS_TIMEOUT20M; /* TimeOut is 20 Minutes */ } tapeDCDB->cdb_length = scb->scsi_cmd->cmd_len; tapeDCDB->reserved_for_LUN = 0; tapeDCDB->transfer_length = scb->data_len; if (scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG) tapeDCDB->buffer_pointer = cpu_to_le32(scb->sg_busaddr); else tapeDCDB->buffer_pointer = cpu_to_le32(scb->data_busaddr); tapeDCDB->sg_count = scb->sg_len; tapeDCDB->sense_length = sizeof (tapeDCDB->sense_info); tapeDCDB->scsi_status = 0; tapeDCDB->reserved = 0; memcpy(tapeDCDB->scsi_cdb, scb->scsi_cmd->cmnd, scb->scsi_cmd->cmd_len); } else { if (!scb->sg_len) { scb->cmd.dcdb.op_code = IPS_CMD_DCDB; } else { scb->cmd.dcdb.op_code = IPS_CMD_DCDB_SG; scb->cmd.dcdb.enhanced_sg = IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0; } scb->dcdb.device_address = ((scb->bus - 1) << 4) | scb->target_id; scb->dcdb.cmd_attribute |= IPS_DISCONNECT_ALLOWED; if (TimeOut) { if (TimeOut < (10 * HZ)) scb->dcdb.cmd_attribute |= IPS_TIMEOUT10; /* TimeOut is 10 Seconds */ else if (TimeOut < (60 * HZ)) scb->dcdb.cmd_attribute |= IPS_TIMEOUT60; /* TimeOut is 60 Seconds */ else if (TimeOut < (1200 * HZ)) scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M; /* TimeOut is 20 Minutes */ } scb->dcdb.transfer_length = scb->data_len; if (scb->dcdb.cmd_attribute & IPS_TRANSFER64K) scb->dcdb.transfer_length = 0; if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB_SG) scb->dcdb.buffer_pointer = cpu_to_le32(scb->sg_busaddr); else scb->dcdb.buffer_pointer = cpu_to_le32(scb->data_busaddr); scb->dcdb.cdb_length = scb->scsi_cmd->cmd_len; scb->dcdb.sense_length = sizeof (scb->dcdb.sense_info); scb->dcdb.sg_count = scb->sg_len; scb->dcdb.reserved = 0; memcpy(scb->dcdb.scsi_cdb, scb->scsi_cmd->cmnd, scb->scsi_cmd->cmd_len); scb->dcdb.scsi_status = 0; scb->dcdb.reserved2[0] = 0; scb->dcdb.reserved2[1] = 0; scb->dcdb.reserved2[2] = 0; } } return ((*ha->func.issue) (ha, scb)); } /****************************************************************************/ /* */ /* Routine Name: ips_chk_status */ /* */ /* Routine Description: */ /* */ /* Check the status of commands to logical drives */ /* Assumed to be called with the HA lock */ /****************************************************************************/ static void ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus) { ips_scb_t *scb; ips_stat_t *sp; uint8_t basic_status; uint8_t ext_status; int errcode; IPS_SCSI_INQ_DATA inquiryData; METHOD_TRACE("ips_chkstatus", 1); scb = &ha->scbs[pstatus->fields.command_id]; scb->basic_status = basic_status = pstatus->fields.basic_status & IPS_BASIC_STATUS_MASK; scb->extended_status = ext_status = pstatus->fields.extended_status; sp = &ha->sp; sp->residue_len = 0; sp->scb_addr = (void *) scb; /* Remove the item from the active queue */ ips_removeq_scb(&ha->scb_activelist, scb); if (!scb->scsi_cmd) /* internal commands are handled in do_ipsintr */ return; DEBUG_VAR(2, "(%s%d) ips_chkstatus: cmd 0x%X id %d (%d %d %d)", ips_name, ha->host_num, scb->cdb[0], scb->cmd.basic_io.command_id, scb->bus, scb->target_id, scb->lun); if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd))) /* passthru - just returns the raw result */ return; errcode = DID_OK; if (((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_SUCCESS) || ((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_RECOVERED_ERROR)) { if (scb->bus == 0) { if ((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_RECOVERED_ERROR) { DEBUG_VAR(1, "(%s%d) Recovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x", ips_name, ha->host_num, scb->cmd.basic_io.op_code, basic_status, ext_status); } switch (scb->scsi_cmd->cmnd[0]) { case ALLOW_MEDIUM_REMOVAL: case REZERO_UNIT: case ERASE: case WRITE_FILEMARKS: case SPACE: errcode = DID_ERROR; break; case START_STOP: break; case TEST_UNIT_READY: if (!ips_online(ha, scb)) { errcode = DID_TIME_OUT; } break; case INQUIRY: if (ips_online(ha, scb)) { ips_inquiry(ha, scb); } else { errcode = DID_TIME_OUT; } break; case REQUEST_SENSE: ips_reqsen(ha, scb); break; case READ_6: case WRITE_6: case READ_10: case WRITE_10: case RESERVE: case RELEASE: break; case MODE_SENSE: if (!ips_online(ha, scb) || !ips_msense(ha, scb)) { errcode = DID_ERROR; } break; case READ_CAPACITY: if (ips_online(ha, scb)) ips_rdcap(ha, scb); else { errcode = DID_TIME_OUT; } break; case SEND_DIAGNOSTIC: case REASSIGN_BLOCKS: break; case FORMAT_UNIT: errcode = DID_ERROR; break; case SEEK_10: case VERIFY: case READ_DEFECT_DATA: case READ_BUFFER: case WRITE_BUFFER: break; default: errcode = DID_ERROR; } /* end switch */ scb->scsi_cmd->result = errcode << 16; } else { /* bus == 0 */ /* restrict access to physical drives */ if (scb->scsi_cmd->cmnd[0] == INQUIRY) { ips_scmd_buf_read(scb->scsi_cmd, &inquiryData, sizeof (inquiryData)); if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK) scb->scsi_cmd->result = DID_TIME_OUT << 16; } } /* else */ } else { /* recovered error / success */ if (scb->bus == 0) { DEBUG_VAR(1, "(%s%d) Unrecovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x", ips_name, ha->host_num, scb->cmd.basic_io.op_code, basic_status, ext_status); } ips_map_status(ha, scb, sp); } /* else */ } /****************************************************************************/ /* */ /* Routine Name: ips_online */ /* */ /* Routine Description: */ /* */ /* Determine if a logical drive is online */ /* */ /****************************************************************************/ static int ips_online(ips_ha_t * ha, ips_scb_t * scb) { METHOD_TRACE("ips_online", 1); if (scb->target_id >= IPS_MAX_LD) return (0); if ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1) { memset(ha->logical_drive_info, 0, sizeof (IPS_LD_INFO)); return (0); } if (ha->logical_drive_info->drive_info[scb->target_id].state != IPS_LD_OFFLINE && ha->logical_drive_info->drive_info[scb->target_id].state != IPS_LD_FREE && ha->logical_drive_info->drive_info[scb->target_id].state != IPS_LD_CRS && ha->logical_drive_info->drive_info[scb->target_id].state != IPS_LD_SYS) return (1); else return (0); } /****************************************************************************/ /* */ /* Routine Name: ips_inquiry */ /* */ /* Routine Description: */ /* */ /* Simulate an inquiry command to a logical drive */ /* */ /****************************************************************************/ static int ips_inquiry(ips_ha_t * ha, ips_scb_t * scb) { IPS_SCSI_INQ_DATA inquiry; METHOD_TRACE("ips_inquiry", 1); memset(&inquiry, 0, sizeof (IPS_SCSI_INQ_DATA)); inquiry.DeviceType = IPS_SCSI_INQ_TYPE_DASD; inquiry.DeviceTypeQualifier = IPS_SCSI_INQ_LU_CONNECTED; inquiry.Version = IPS_SCSI_INQ_REV2; inquiry.ResponseDataFormat = IPS_SCSI_INQ_RD_REV2; inquiry.AdditionalLength = 31; inquiry.Flags[0] = IPS_SCSI_INQ_Address16; inquiry.Flags[1] = IPS_SCSI_INQ_WBus16 | IPS_SCSI_INQ_Sync | IPS_SCSI_INQ_CmdQue; memcpy(inquiry.VendorId, "IBM ", 8); memcpy(inquiry.ProductId, "SERVERAID ", 16); memcpy(inquiry.ProductRevisionLevel, "1.00", 4); ips_scmd_buf_write(scb->scsi_cmd, &inquiry, sizeof (inquiry)); return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_rdcap */ /* */ /* Routine Description: */ /* */ /* Simulate a read capacity command to a logical drive */ /* */ /****************************************************************************/ static int ips_rdcap(ips_ha_t * ha, ips_scb_t * scb) { IPS_SCSI_CAPACITY cap; METHOD_TRACE("ips_rdcap", 1); if (scsi_bufflen(scb->scsi_cmd) < 8) return (0); cap.lba = cpu_to_be32(le32_to_cpu (ha->logical_drive_info-> drive_info[scb->target_id].sector_count) - 1); cap.len = cpu_to_be32((uint32_t) IPS_BLKSIZE); ips_scmd_buf_write(scb->scsi_cmd, &cap, sizeof (cap)); return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_msense */ /* */ /* Routine Description: */ /* */ /* Simulate a mode sense command to a logical drive */ /* */ /****************************************************************************/ static int ips_msense(ips_ha_t * ha, ips_scb_t * scb) { uint16_t heads; uint16_t sectors; uint32_t cylinders; IPS_SCSI_MODE_PAGE_DATA mdata; METHOD_TRACE("ips_msense", 1); if (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) > 0x400000 && (ha->enq->ucMiscFlag & 0x8) == 0) { heads = IPS_NORM_HEADS; sectors = IPS_NORM_SECTORS; } else { heads = IPS_COMP_HEADS; sectors = IPS_COMP_SECTORS; } cylinders = (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) - 1) / (heads * sectors); memset(&mdata, 0, sizeof (IPS_SCSI_MODE_PAGE_DATA)); mdata.hdr.BlockDescLength = 8; switch (scb->scsi_cmd->cmnd[2] & 0x3f) { case 0x03: /* page 3 */ mdata.pdata.pg3.PageCode = 3; mdata.pdata.pg3.PageLength = sizeof (IPS_SCSI_MODE_PAGE3); mdata.hdr.DataLength = 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg3.PageLength; mdata.pdata.pg3.TracksPerZone = 0; mdata.pdata.pg3.AltSectorsPerZone = 0; mdata.pdata.pg3.AltTracksPerZone = 0; mdata.pdata.pg3.AltTracksPerVolume = 0; mdata.pdata.pg3.SectorsPerTrack = cpu_to_be16(sectors); mdata.pdata.pg3.BytesPerSector = cpu_to_be16(IPS_BLKSIZE); mdata.pdata.pg3.Interleave = cpu_to_be16(1); mdata.pdata.pg3.TrackSkew = 0; mdata.pdata.pg3.CylinderSkew = 0; mdata.pdata.pg3.flags = IPS_SCSI_MP3_SoftSector; break; case 0x4: mdata.pdata.pg4.PageCode = 4; mdata.pdata.pg4.PageLength = sizeof (IPS_SCSI_MODE_PAGE4); mdata.hdr.DataLength = 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg4.PageLength; mdata.pdata.pg4.CylindersHigh = cpu_to_be16((cylinders >> 8) & 0xFFFF); mdata.pdata.pg4.CylindersLow = (cylinders & 0xFF); mdata.pdata.pg4.Heads = heads; mdata.pdata.pg4.WritePrecompHigh = 0; mdata.pdata.pg4.WritePrecompLow = 0; mdata.pdata.pg4.ReducedWriteCurrentHigh = 0; mdata.pdata.pg4.ReducedWriteCurrentLow = 0; mdata.pdata.pg4.StepRate = cpu_to_be16(1); mdata.pdata.pg4.LandingZoneHigh = 0; mdata.pdata.pg4.LandingZoneLow = 0; mdata.pdata.pg4.flags = 0; mdata.pdata.pg4.RotationalOffset = 0; mdata.pdata.pg4.MediumRotationRate = 0; break; case 0x8: mdata.pdata.pg8.PageCode = 8; mdata.pdata.pg8.PageLength = sizeof (IPS_SCSI_MODE_PAGE8); mdata.hdr.DataLength = 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg8.PageLength; /* everything else is left set to 0 */ break; default: return (0); } /* end switch */ ips_scmd_buf_write(scb->scsi_cmd, &mdata, sizeof (mdata)); return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_reqsen */ /* */ /* Routine Description: */ /* */ /* Simulate a request sense command to a logical drive */ /* */ /****************************************************************************/ static int ips_reqsen(ips_ha_t * ha, ips_scb_t * scb) { IPS_SCSI_REQSEN reqsen; METHOD_TRACE("ips_reqsen", 1); memset(&reqsen, 0, sizeof (IPS_SCSI_REQSEN)); reqsen.ResponseCode = IPS_SCSI_REQSEN_VALID | IPS_SCSI_REQSEN_CURRENT_ERR; reqsen.AdditionalLength = 10; reqsen.AdditionalSenseCode = IPS_SCSI_REQSEN_NO_SENSE; reqsen.AdditionalSenseCodeQual = IPS_SCSI_REQSEN_NO_SENSE; ips_scmd_buf_write(scb->scsi_cmd, &reqsen, sizeof (reqsen)); return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_free */ /* */ /* Routine Description: */ /* */ /* Free any allocated space for this controller */ /* */ /****************************************************************************/ static void ips_free(ips_ha_t * ha) { METHOD_TRACE("ips_free", 1); if (ha) { if (ha->enq) { dma_free_coherent(&ha->pcidev->dev, sizeof(IPS_ENQ), ha->enq, ha->enq_busaddr); ha->enq = NULL; } kfree(ha->conf); ha->conf = NULL; if (ha->adapt) { dma_free_coherent(&ha->pcidev->dev, sizeof (IPS_ADAPTER) + sizeof (IPS_IO_CMD), ha->adapt, ha->adapt->hw_status_start); ha->adapt = NULL; } if (ha->logical_drive_info) { dma_free_coherent(&ha->pcidev->dev, sizeof (IPS_LD_INFO), ha->logical_drive_info, ha->logical_drive_info_dma_addr); ha->logical_drive_info = NULL; } kfree(ha->nvram); ha->nvram = NULL; kfree(ha->subsys); ha->subsys = NULL; if (ha->ioctl_data) { dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len, ha->ioctl_data, ha->ioctl_busaddr); ha->ioctl_data = NULL; ha->ioctl_datasize = 0; ha->ioctl_len = 0; } ips_deallocatescbs(ha, ha->max_cmds); /* free memory mapped (if applicable) */ if (ha->mem_ptr) { iounmap(ha->ioremap_ptr); ha->ioremap_ptr = NULL; ha->mem_ptr = NULL; } ha->mem_addr = 0; } } /****************************************************************************/ /* */ /* Routine Name: ips_deallocatescbs */ /* */ /* Routine Description: */ /* */ /* Free the command blocks */ /* */ /****************************************************************************/ static int ips_deallocatescbs(ips_ha_t * ha, int cmds) { if (ha->scbs) { dma_free_coherent(&ha->pcidev->dev, IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds, ha->scbs->sg_list.list, ha->scbs->sg_busaddr); dma_free_coherent(&ha->pcidev->dev, sizeof (ips_scb_t) * cmds, ha->scbs, ha->scbs->scb_busaddr); ha->scbs = NULL; } /* end if */ return 1; } /****************************************************************************/ /* */ /* Routine Name: ips_allocatescbs */ /* */ /* Routine Description: */ /* */ /* Allocate the command blocks */ /* */ /****************************************************************************/ static int ips_allocatescbs(ips_ha_t * ha) { ips_scb_t *scb_p; IPS_SG_LIST ips_sg; int i; dma_addr_t command_dma, sg_dma; METHOD_TRACE("ips_allocatescbs", 1); /* Allocate memory for the SCBs */ ha->scbs = dma_alloc_coherent(&ha->pcidev->dev, ha->max_cmds * sizeof (ips_scb_t), &command_dma, GFP_KERNEL); if (ha->scbs == NULL) return 0; ips_sg.list = dma_alloc_coherent(&ha->pcidev->dev, IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * ha->max_cmds, &sg_dma, GFP_KERNEL); if (ips_sg.list == NULL) { dma_free_coherent(&ha->pcidev->dev, ha->max_cmds * sizeof (ips_scb_t), ha->scbs, command_dma); return 0; } memset(ha->scbs, 0, ha->max_cmds * sizeof (ips_scb_t)); for (i = 0; i < ha->max_cmds; i++) { scb_p = &ha->scbs[i]; scb_p->scb_busaddr = command_dma + sizeof (ips_scb_t) * i; /* set up S/G list */ if (IPS_USE_ENH_SGLIST(ha)) { scb_p->sg_list.enh_list = ips_sg.enh_list + i * IPS_MAX_SG; scb_p->sg_busaddr = sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i; } else { scb_p->sg_list.std_list = ips_sg.std_list + i * IPS_MAX_SG; scb_p->sg_busaddr = sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i; } /* add to the free list */ if (i < ha->max_cmds - 1) { scb_p->q_next = ha->scb_freelist; ha->scb_freelist = scb_p; } } /* success */ return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_init_scb */ /* */ /* Routine Description: */ /* */ /* Initialize a CCB to default values */ /* */ /****************************************************************************/ static void ips_init_scb(ips_ha_t * ha, ips_scb_t * scb) { IPS_SG_LIST sg_list; uint32_t cmd_busaddr, sg_busaddr; METHOD_TRACE("ips_init_scb", 1); if (scb == NULL) return; sg_list.list = scb->sg_list.list; cmd_busaddr = scb->scb_busaddr; sg_busaddr = scb->sg_busaddr; /* zero fill */ memset(scb, 0, sizeof (ips_scb_t)); memset(ha->dummy, 0, sizeof (IPS_IO_CMD)); /* Initialize dummy command bucket */ ha->dummy->op_code = 0xFF; ha->dummy->ccsar = cpu_to_le32(ha->adapt->hw_status_start + sizeof (IPS_ADAPTER)); ha->dummy->command_id = IPS_MAX_CMDS; /* set bus address of scb */ scb->scb_busaddr = cmd_busaddr; scb->sg_busaddr = sg_busaddr; scb->sg_list.list = sg_list.list; /* Neptune Fix */ scb->cmd.basic_io.cccr = cpu_to_le32((uint32_t) IPS_BIT_ILE); scb->cmd.basic_io.ccsar = cpu_to_le32(ha->adapt->hw_status_start + sizeof (IPS_ADAPTER)); } /****************************************************************************/ /* */ /* Routine Name: ips_get_scb */ /* */ /* Routine Description: */ /* */ /* Initialize a CCB to default values */ /* */ /* ASSUMED to be called from within a lock */ /* */ /****************************************************************************/ static ips_scb_t * ips_getscb(ips_ha_t * ha) { ips_scb_t *scb; METHOD_TRACE("ips_getscb", 1); if ((scb = ha->scb_freelist) == NULL) { return (NULL); } ha->scb_freelist = scb->q_next; scb->flags = 0; scb->q_next = NULL; ips_init_scb(ha, scb); return (scb); } /****************************************************************************/ /* */ /* Routine Name: ips_free_scb */ /* */ /* Routine Description: */ /* */ /* Return an unused CCB back to the free list */ /* */ /* ASSUMED to be called from within a lock */ /* */ /****************************************************************************/ static void ips_freescb(ips_ha_t * ha, ips_scb_t * scb) { METHOD_TRACE("ips_freescb", 1); if (scb->flags & IPS_SCB_MAP_SG) scsi_dma_unmap(scb->scsi_cmd); else if (scb->flags & IPS_SCB_MAP_SINGLE) dma_unmap_single(&ha->pcidev->dev, scb->data_busaddr, scb->data_len, IPS_DMA_DIR(scb)); /* check to make sure this is not our "special" scb */ if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) { scb->q_next = ha->scb_freelist; ha->scb_freelist = scb; } } /****************************************************************************/ /* */ /* Routine Name: ips_isinit_copperhead */ /* */ /* Routine Description: */ /* */ /* Is controller initialized ? */ /* */ /****************************************************************************/ static int ips_isinit_copperhead(ips_ha_t * ha) { uint8_t scpr; uint8_t isr; METHOD_TRACE("ips_isinit_copperhead", 1); isr = inb(ha->io_addr + IPS_REG_HISR); scpr = inb(ha->io_addr + IPS_REG_SCPR); if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0)) return (0); else return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_isinit_copperhead_memio */ /* */ /* Routine Description: */ /* */ /* Is controller initialized ? */ /* */ /****************************************************************************/ static int ips_isinit_copperhead_memio(ips_ha_t * ha) { uint8_t isr = 0; uint8_t scpr; METHOD_TRACE("ips_is_init_copperhead_memio", 1); isr = readb(ha->mem_ptr + IPS_REG_HISR); scpr = readb(ha->mem_ptr + IPS_REG_SCPR); if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0)) return (0); else return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_isinit_morpheus */ /* */ /* Routine Description: */ /* */ /* Is controller initialized ? */ /* */ /****************************************************************************/ static int ips_isinit_morpheus(ips_ha_t * ha) { uint32_t post; uint32_t bits; METHOD_TRACE("ips_is_init_morpheus", 1); if (ips_isintr_morpheus(ha)) ips_flush_and_reset(ha); post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); bits = readl(ha->mem_ptr + IPS_REG_I2O_HIR); if (post == 0) return (0); else if (bits & 0x3) return (0); else return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_flush_and_reset */ /* */ /* Routine Description: */ /* */ /* Perform cleanup ( FLUSH and RESET ) when the adapter is in an unknown */ /* state ( was trying to INIT and an interrupt was already pending ) ... */ /* */ /****************************************************************************/ static void ips_flush_and_reset(ips_ha_t *ha) { ips_scb_t *scb; int ret; int time; int done; dma_addr_t command_dma; /* Create a usuable SCB */ scb = dma_alloc_coherent(&ha->pcidev->dev, sizeof(ips_scb_t), &command_dma, GFP_KERNEL); if (scb) { memset(scb, 0, sizeof(ips_scb_t)); ips_init_scb(ha, scb); scb->scb_busaddr = command_dma; scb->timeout = ips_cmd_timeout; scb->cdb[0] = IPS_CMD_FLUSH; scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH; scb->cmd.flush_cache.command_id = IPS_MAX_CMDS; /* Use an ID that would otherwise not exist */ scb->cmd.flush_cache.state = IPS_NORM_STATE; scb->cmd.flush_cache.reserved = 0; scb->cmd.flush_cache.reserved2 = 0; scb->cmd.flush_cache.reserved3 = 0; scb->cmd.flush_cache.reserved4 = 0; ret = ips_send_cmd(ha, scb); /* Send the Flush Command */ if (ret == IPS_SUCCESS) { time = 60 * IPS_ONE_SEC; /* Max Wait time is 60 seconds */ done = 0; while ((time > 0) && (!done)) { done = ips_poll_for_flush_complete(ha); /* This may look evil, but it's only done during extremely rare start-up conditions ! */ udelay(1000); time--; } } } /* Now RESET and INIT the adapter */ (*ha->func.reset) (ha); dma_free_coherent(&ha->pcidev->dev, sizeof(ips_scb_t), scb, command_dma); return; } /****************************************************************************/ /* */ /* Routine Name: ips_poll_for_flush_complete */ /* */ /* Routine Description: */ /* */ /* Poll for the Flush Command issued by ips_flush_and_reset() to complete */ /* All other responses are just taken off the queue and ignored */ /* */ /****************************************************************************/ static int ips_poll_for_flush_complete(ips_ha_t * ha) { IPS_STATUS cstatus; while (true) { cstatus.value = (*ha->func.statupd) (ha); if (cstatus.value == 0xffffffff) /* If No Interrupt to process */ break; /* Success is when we see the Flush Command ID */ if (cstatus.fields.command_id == IPS_MAX_CMDS) return 1; } return 0; } /****************************************************************************/ /* */ /* Routine Name: ips_enable_int_copperhead */ /* */ /* Routine Description: */ /* Turn on interrupts */ /* */ /****************************************************************************/ static void ips_enable_int_copperhead(ips_ha_t * ha) { METHOD_TRACE("ips_enable_int_copperhead", 1); outb(ha->io_addr + IPS_REG_HISR, IPS_BIT_EI); inb(ha->io_addr + IPS_REG_HISR); /*Ensure PCI Posting Completes*/ } /****************************************************************************/ /* */ /* Routine Name: ips_enable_int_copperhead_memio */ /* */ /* Routine Description: */ /* Turn on interrupts */ /* */ /****************************************************************************/ static void ips_enable_int_copperhead_memio(ips_ha_t * ha) { METHOD_TRACE("ips_enable_int_copperhead_memio", 1); writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR); readb(ha->mem_ptr + IPS_REG_HISR); /*Ensure PCI Posting Completes*/ } /****************************************************************************/ /* */ /* Routine Name: ips_enable_int_morpheus */ /* */ /* Routine Description: */ /* Turn on interrupts */ /* */ /****************************************************************************/ static void ips_enable_int_morpheus(ips_ha_t * ha) { uint32_t Oimr; METHOD_TRACE("ips_enable_int_morpheus", 1); Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR); Oimr &= ~0x08; writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR); readl(ha->mem_ptr + IPS_REG_I960_OIMR); /*Ensure PCI Posting Completes*/ } /****************************************************************************/ /* */ /* Routine Name: ips_init_copperhead */ /* */ /* Routine Description: */ /* */ /* Initialize a copperhead controller */ /* */ /****************************************************************************/ static int ips_init_copperhead(ips_ha_t * ha) { uint8_t Isr; uint8_t Cbsp; uint8_t PostByte[IPS_MAX_POST_BYTES]; int i, j; METHOD_TRACE("ips_init_copperhead", 1); for (i = 0; i < IPS_MAX_POST_BYTES; i++) { for (j = 0; j < 45; j++) { Isr = inb(ha->io_addr + IPS_REG_HISR); if (Isr & IPS_BIT_GHI) break; /* Delay for 1 Second */ MDELAY(IPS_ONE_SEC); } if (j >= 45) /* error occurred */ return (0); PostByte[i] = inb(ha->io_addr + IPS_REG_ISPR); outb(Isr, ha->io_addr + IPS_REG_HISR); } if (PostByte[0] < IPS_GOOD_POST_STATUS) { IPS_PRINTK(KERN_WARNING, ha->pcidev, "reset controller fails (post status %x %x).\n", PostByte[0], PostByte[1]); return (0); } for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) { for (j = 0; j < 240; j++) { Isr = inb(ha->io_addr + IPS_REG_HISR); if (Isr & IPS_BIT_GHI) break; /* Delay for 1 Second */ MDELAY(IPS_ONE_SEC); } if (j >= 240) /* error occurred */ return (0); inb(ha->io_addr + IPS_REG_ISPR); outb(Isr, ha->io_addr + IPS_REG_HISR); } for (i = 0; i < 240; i++) { Cbsp = inb(ha->io_addr + IPS_REG_CBSP); if ((Cbsp & IPS_BIT_OP) == 0) break; /* Delay for 1 Second */ MDELAY(IPS_ONE_SEC); } if (i >= 240) /* reset failed */ return (0); /* setup CCCR */ outl(0x1010, ha->io_addr + IPS_REG_CCCR); /* Enable busmastering */ outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) /* fix for anaconda64 */ outl(0, ha->io_addr + IPS_REG_NDAE); /* Enable interrupts */ outb(IPS_BIT_EI, ha->io_addr + IPS_REG_HISR); return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_init_copperhead_memio */ /* */ /* Routine Description: */ /* */ /* Initialize a copperhead controller with memory mapped I/O */ /* */ /****************************************************************************/ static int ips_init_copperhead_memio(ips_ha_t * ha) { uint8_t Isr = 0; uint8_t Cbsp; uint8_t PostByte[IPS_MAX_POST_BYTES]; int i, j; METHOD_TRACE("ips_init_copperhead_memio", 1); for (i = 0; i < IPS_MAX_POST_BYTES; i++) { for (j = 0; j < 45; j++) { Isr = readb(ha->mem_ptr + IPS_REG_HISR); if (Isr & IPS_BIT_GHI) break; /* Delay for 1 Second */ MDELAY(IPS_ONE_SEC); } if (j >= 45) /* error occurred */ return (0); PostByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR); writeb(Isr, ha->mem_ptr + IPS_REG_HISR); } if (PostByte[0] < IPS_GOOD_POST_STATUS) { IPS_PRINTK(KERN_WARNING, ha->pcidev, "reset controller fails (post status %x %x).\n", PostByte[0], PostByte[1]); return (0); } for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) { for (j = 0; j < 240; j++) { Isr = readb(ha->mem_ptr + IPS_REG_HISR); if (Isr & IPS_BIT_GHI) break; /* Delay for 1 Second */ MDELAY(IPS_ONE_SEC); } if (j >= 240) /* error occurred */ return (0); readb(ha->mem_ptr + IPS_REG_ISPR); writeb(Isr, ha->mem_ptr + IPS_REG_HISR); } for (i = 0; i < 240; i++) { Cbsp = readb(ha->mem_ptr + IPS_REG_CBSP); if ((Cbsp & IPS_BIT_OP) == 0) break; /* Delay for 1 Second */ MDELAY(IPS_ONE_SEC); } if (i >= 240) /* error occurred */ return (0); /* setup CCCR */ writel(0x1010, ha->mem_ptr + IPS_REG_CCCR); /* Enable busmastering */ writeb(IPS_BIT_EBM, ha->mem_ptr + IPS_REG_SCPR); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) /* fix for anaconda64 */ writel(0, ha->mem_ptr + IPS_REG_NDAE); /* Enable interrupts */ writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR); /* if we get here then everything went OK */ return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_init_morpheus */ /* */ /* Routine Description: */ /* */ /* Initialize a morpheus controller */ /* */ /****************************************************************************/ static int ips_init_morpheus(ips_ha_t * ha) { uint32_t Post; uint32_t Config; uint32_t Isr; uint32_t Oimr; int i; METHOD_TRACE("ips_init_morpheus", 1); /* Wait up to 45 secs for Post */ for (i = 0; i < 45; i++) { Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR); if (Isr & IPS_BIT_I960_MSG0I) break; /* Delay for 1 Second */ MDELAY(IPS_ONE_SEC); } if (i >= 45) { /* error occurred */ IPS_PRINTK(KERN_WARNING, ha->pcidev, "timeout waiting for post.\n"); return (0); } Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); if (Post == 0x4F00) { /* If Flashing the Battery PIC */ IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flashing Battery PIC, Please wait ...\n"); /* Clear the interrupt bit */ Isr = (uint32_t) IPS_BIT_I960_MSG0I; writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR); for (i = 0; i < 120; i++) { /* Wait Up to 2 Min. for Completion */ Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); if (Post != 0x4F00) break; /* Delay for 1 Second */ MDELAY(IPS_ONE_SEC); } if (i >= 120) { IPS_PRINTK(KERN_WARNING, ha->pcidev, "timeout waiting for Battery PIC Flash\n"); return (0); } } /* Clear the interrupt bit */ Isr = (uint32_t) IPS_BIT_I960_MSG0I; writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR); if (Post < (IPS_GOOD_POST_STATUS << 8)) { IPS_PRINTK(KERN_WARNING, ha->pcidev, "reset controller fails (post status %x).\n", Post); return (0); } /* Wait up to 240 secs for config bytes */ for (i = 0; i < 240; i++) { Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR); if (Isr & IPS_BIT_I960_MSG1I) break; /* Delay for 1 Second */ MDELAY(IPS_ONE_SEC); } if (i >= 240) { /* error occurred */ IPS_PRINTK(KERN_WARNING, ha->pcidev, "timeout waiting for config.\n"); return (0); } Config = readl(ha->mem_ptr + IPS_REG_I960_MSG1); /* Clear interrupt bit */ Isr = (uint32_t) IPS_BIT_I960_MSG1I; writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR); /* Turn on the interrupts */ Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR); Oimr &= ~0x8; writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR); /* if we get here then everything went OK */ /* Since we did a RESET, an EraseStripeLock may be needed */ if (Post == 0xEF10) { if ((Config == 0x000F) || (Config == 0x0009)) ha->requires_esl = 1; } return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_reset_copperhead */ /* */ /* Routine Description: */ /* */ /* Reset the controller */ /* */ /****************************************************************************/ static int ips_reset_copperhead(ips_ha_t * ha) { int reset_counter; METHOD_TRACE("ips_reset_copperhead", 1); DEBUG_VAR(1, "(%s%d) ips_reset_copperhead: io addr: %x, irq: %d", ips_name, ha->host_num, ha->io_addr, ha->pcidev->irq); reset_counter = 0; while (reset_counter < 2) { reset_counter++; outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR); /* Delay for 1 Second */ MDELAY(IPS_ONE_SEC); outb(0, ha->io_addr + IPS_REG_SCPR); /* Delay for 1 Second */ MDELAY(IPS_ONE_SEC); if ((*ha->func.init) (ha)) break; else if (reset_counter >= 2) { return (0); } } return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_reset_copperhead_memio */ /* */ /* Routine Description: */ /* */ /* Reset the controller */ /* */ /****************************************************************************/ static int ips_reset_copperhead_memio(ips_ha_t * ha) { int reset_counter; METHOD_TRACE("ips_reset_copperhead_memio", 1); DEBUG_VAR(1, "(%s%d) ips_reset_copperhead_memio: mem addr: %x, irq: %d", ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq); reset_counter = 0; while (reset_counter < 2) { reset_counter++; writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR); /* Delay for 1 Second */ MDELAY(IPS_ONE_SEC); writeb(0, ha->mem_ptr + IPS_REG_SCPR); /* Delay for 1 Second */ MDELAY(IPS_ONE_SEC); if ((*ha->func.init) (ha)) break; else if (reset_counter >= 2) { return (0); } } return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_reset_morpheus */ /* */ /* Routine Description: */ /* */ /* Reset the controller */ /* */ /****************************************************************************/ static int ips_reset_morpheus(ips_ha_t * ha) { int reset_counter; uint8_t junk; METHOD_TRACE("ips_reset_morpheus", 1); DEBUG_VAR(1, "(%s%d) ips_reset_morpheus: mem addr: %x, irq: %d", ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq); reset_counter = 0; while (reset_counter < 2) { reset_counter++; writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR); /* Delay for 5 Seconds */ MDELAY(5 * IPS_ONE_SEC); /* Do a PCI config read to wait for adapter */ pci_read_config_byte(ha->pcidev, 4, &junk); if ((*ha->func.init) (ha)) break; else if (reset_counter >= 2) { return (0); } } return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_statinit */ /* */ /* Routine Description: */ /* */ /* Initialize the status queues on the controller */ /* */ /****************************************************************************/ static void ips_statinit(ips_ha_t * ha) { uint32_t phys_status_start; METHOD_TRACE("ips_statinit", 1); ha->adapt->p_status_start = ha->adapt->status; ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS; ha->adapt->p_status_tail = ha->adapt->status; phys_status_start = ha->adapt->hw_status_start; outl(phys_status_start, ha->io_addr + IPS_REG_SQSR); outl(phys_status_start + IPS_STATUS_Q_SIZE, ha->io_addr + IPS_REG_SQER); outl(phys_status_start + IPS_STATUS_SIZE, ha->io_addr + IPS_REG_SQHR); outl(phys_status_start, ha->io_addr + IPS_REG_SQTR); ha->adapt->hw_status_tail = phys_status_start; } /****************************************************************************/ /* */ /* Routine Name: ips_statinit_memio */ /* */ /* Routine Description: */ /* */ /* Initialize the status queues on the controller */ /* */ /****************************************************************************/ static void ips_statinit_memio(ips_ha_t * ha) { uint32_t phys_status_start; METHOD_TRACE("ips_statinit_memio", 1); ha->adapt->p_status_start = ha->adapt->status; ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS; ha->adapt->p_status_tail = ha->adapt->status; phys_status_start = ha->adapt->hw_status_start; writel(phys_status_start, ha->mem_ptr + IPS_REG_SQSR); writel(phys_status_start + IPS_STATUS_Q_SIZE, ha->mem_ptr + IPS_REG_SQER); writel(phys_status_start + IPS_STATUS_SIZE, ha->mem_ptr + IPS_REG_SQHR); writel(phys_status_start, ha->mem_ptr + IPS_REG_SQTR); ha->adapt->hw_status_tail = phys_status_start; } /****************************************************************************/ /* */ /* Routine Name: ips_statupd_copperhead */ /* */ /* Routine Description: */ /* */ /* Remove an element from the status queue */ /* */ /****************************************************************************/ static uint32_t ips_statupd_copperhead(ips_ha_t * ha) { METHOD_TRACE("ips_statupd_copperhead", 1); if (ha->adapt->p_status_tail != ha->adapt->p_status_end) { ha->adapt->p_status_tail++; ha->adapt->hw_status_tail += sizeof (IPS_STATUS); } else { ha->adapt->p_status_tail = ha->adapt->p_status_start; ha->adapt->hw_status_tail = ha->adapt->hw_status_start; } outl(ha->adapt->hw_status_tail, ha->io_addr + IPS_REG_SQTR); return (ha->adapt->p_status_tail->value); } /****************************************************************************/ /* */ /* Routine Name: ips_statupd_copperhead_memio */ /* */ /* Routine Description: */ /* */ /* Remove an element from the status queue */ /* */ /****************************************************************************/ static uint32_t ips_statupd_copperhead_memio(ips_ha_t * ha) { METHOD_TRACE("ips_statupd_copperhead_memio", 1); if (ha->adapt->p_status_tail != ha->adapt->p_status_end) { ha->adapt->p_status_tail++; ha->adapt->hw_status_tail += sizeof (IPS_STATUS); } else { ha->adapt->p_status_tail = ha->adapt->p_status_start; ha->adapt->hw_status_tail = ha->adapt->hw_status_start; } writel(ha->adapt->hw_status_tail, ha->mem_ptr + IPS_REG_SQTR); return (ha->adapt->p_status_tail->value); } /****************************************************************************/ /* */ /* Routine Name: ips_statupd_morpheus */ /* */ /* Routine Description: */ /* */ /* Remove an element from the status queue */ /* */ /****************************************************************************/ static uint32_t ips_statupd_morpheus(ips_ha_t * ha) { uint32_t val; METHOD_TRACE("ips_statupd_morpheus", 1); val = readl(ha->mem_ptr + IPS_REG_I2O_OUTMSGQ); return (val); } /****************************************************************************/ /* */ /* Routine Name: ips_issue_copperhead */ /* */ /* Routine Description: */ /* */ /* Send a command down to the controller */ /* */ /****************************************************************************/ static int ips_issue_copperhead(ips_ha_t * ha, ips_scb_t * scb) { uint32_t TimeOut; uint32_t val; METHOD_TRACE("ips_issue_copperhead", 1); if (scb->scsi_cmd) { DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", ips_name, ha->host_num, scb->cdb[0], scb->cmd.basic_io.command_id, scb->bus, scb->target_id, scb->lun); } else { DEBUG_VAR(2, KERN_NOTICE "(%s%d) ips_issue: logical cmd id %d", ips_name, ha->host_num, scb->cmd.basic_io.command_id); } TimeOut = 0; while ((val = le32_to_cpu(inl(ha->io_addr + IPS_REG_CCCR))) & IPS_BIT_SEM) { udelay(1000); if (++TimeOut >= IPS_SEM_TIMEOUT) { if (!(val & IPS_BIT_START_STOP)) break; IPS_PRINTK(KERN_WARNING, ha->pcidev, "ips_issue val [0x%x].\n", val); IPS_PRINTK(KERN_WARNING, ha->pcidev, "ips_issue semaphore chk timeout.\n"); return (IPS_FAILURE); } /* end if */ } /* end while */ outl(scb->scb_busaddr, ha->io_addr + IPS_REG_CCSAR); outw(IPS_BIT_START_CMD, ha->io_addr + IPS_REG_CCCR); return (IPS_SUCCESS); } /****************************************************************************/ /* */ /* Routine Name: ips_issue_copperhead_memio */ /* */ /* Routine Description: */ /* */ /* Send a command down to the controller */ /* */ /****************************************************************************/ static int ips_issue_copperhead_memio(ips_ha_t * ha, ips_scb_t * scb) { uint32_t TimeOut; uint32_t val; METHOD_TRACE("ips_issue_copperhead_memio", 1); if (scb->scsi_cmd) { DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", ips_name, ha->host_num, scb->cdb[0], scb->cmd.basic_io.command_id, scb->bus, scb->target_id, scb->lun); } else { DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d", ips_name, ha->host_num, scb->cmd.basic_io.command_id); } TimeOut = 0; while ((val = readl(ha->mem_ptr + IPS_REG_CCCR)) & IPS_BIT_SEM) { udelay(1000); if (++TimeOut >= IPS_SEM_TIMEOUT) { if (!(val & IPS_BIT_START_STOP)) break; IPS_PRINTK(KERN_WARNING, ha->pcidev, "ips_issue val [0x%x].\n", val); IPS_PRINTK(KERN_WARNING, ha->pcidev, "ips_issue semaphore chk timeout.\n"); return (IPS_FAILURE); } /* end if */ } /* end while */ writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_CCSAR); writel(IPS_BIT_START_CMD, ha->mem_ptr + IPS_REG_CCCR); return (IPS_SUCCESS); } /****************************************************************************/ /* */ /* Routine Name: ips_issue_i2o */ /* */ /* Routine Description: */ /* */ /* Send a command down to the controller */ /* */ /****************************************************************************/ static int ips_issue_i2o(ips_ha_t * ha, ips_scb_t * scb) { METHOD_TRACE("ips_issue_i2o", 1); if (scb->scsi_cmd) { DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", ips_name, ha->host_num, scb->cdb[0], scb->cmd.basic_io.command_id, scb->bus, scb->target_id, scb->lun); } else { DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d", ips_name, ha->host_num, scb->cmd.basic_io.command_id); } outl(scb->scb_busaddr, ha->io_addr + IPS_REG_I2O_INMSGQ); return (IPS_SUCCESS); } /****************************************************************************/ /* */ /* Routine Name: ips_issue_i2o_memio */ /* */ /* Routine Description: */ /* */ /* Send a command down to the controller */ /* */ /****************************************************************************/ static int ips_issue_i2o_memio(ips_ha_t * ha, ips_scb_t * scb) { METHOD_TRACE("ips_issue_i2o_memio", 1); if (scb->scsi_cmd) { DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)", ips_name, ha->host_num, scb->cdb[0], scb->cmd.basic_io.command_id, scb->bus, scb->target_id, scb->lun); } else { DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d", ips_name, ha->host_num, scb->cmd.basic_io.command_id); } writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_I2O_INMSGQ); return (IPS_SUCCESS); } /****************************************************************************/ /* */ /* Routine Name: ips_isintr_copperhead */ /* */ /* Routine Description: */ /* */ /* Test to see if an interrupt is for us */ /* */ /****************************************************************************/ static int ips_isintr_copperhead(ips_ha_t * ha) { uint8_t Isr; METHOD_TRACE("ips_isintr_copperhead", 2); Isr = inb(ha->io_addr + IPS_REG_HISR); if (Isr == 0xFF) /* ?!?! Nothing really there */ return (0); if (Isr & IPS_BIT_SCE) return (1); else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) { /* status queue overflow or GHI */ /* just clear the interrupt */ outb(Isr, ha->io_addr + IPS_REG_HISR); } return (0); } /****************************************************************************/ /* */ /* Routine Name: ips_isintr_copperhead_memio */ /* */ /* Routine Description: */ /* */ /* Test to see if an interrupt is for us */ /* */ /****************************************************************************/ static int ips_isintr_copperhead_memio(ips_ha_t * ha) { uint8_t Isr; METHOD_TRACE("ips_isintr_memio", 2); Isr = readb(ha->mem_ptr + IPS_REG_HISR); if (Isr == 0xFF) /* ?!?! Nothing really there */ return (0); if (Isr & IPS_BIT_SCE) return (1); else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) { /* status queue overflow or GHI */ /* just clear the interrupt */ writeb(Isr, ha->mem_ptr + IPS_REG_HISR); } return (0); } /****************************************************************************/ /* */ /* Routine Name: ips_isintr_morpheus */ /* */ /* Routine Description: */ /* */ /* Test to see if an interrupt is for us */ /* */ /****************************************************************************/ static int ips_isintr_morpheus(ips_ha_t * ha) { uint32_t Isr; METHOD_TRACE("ips_isintr_morpheus", 2); Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR); if (Isr & IPS_BIT_I2O_OPQI) return (1); else return (0); } /****************************************************************************/ /* */ /* Routine Name: ips_wait */ /* */ /* Routine Description: */ /* */ /* Wait for a command to complete */ /* */ /****************************************************************************/ static int ips_wait(ips_ha_t * ha, int time, int intr) { int ret; int done; METHOD_TRACE("ips_wait", 1); ret = IPS_FAILURE; done = false; time *= IPS_ONE_SEC; /* convert seconds */ while ((time > 0) && (!done)) { if (intr == IPS_INTR_ON) { if (!ha->waitflag) { ret = IPS_SUCCESS; done = true; break; } } else if (intr == IPS_INTR_IORL) { if (!ha->waitflag) { /* * controller generated an interrupt to * acknowledge completion of the command * and ips_intr() has serviced the interrupt. */ ret = IPS_SUCCESS; done = true; break; } /* * NOTE: we already have the io_request_lock so * even if we get an interrupt it won't get serviced * until after we finish. */ (*ha->func.intr) (ha); } /* This looks like a very evil loop, but it only does this during start-up */ udelay(1000); time--; } return (ret); } /****************************************************************************/ /* */ /* Routine Name: ips_write_driver_status */ /* */ /* Routine Description: */ /* */ /* Write OS/Driver version to Page 5 of the nvram on the controller */ /* */ /****************************************************************************/ static int ips_write_driver_status(ips_ha_t * ha, int intr) { METHOD_TRACE("ips_write_driver_status", 1); if (!ips_readwrite_page5(ha, false, intr)) { IPS_PRINTK(KERN_WARNING, ha->pcidev, "unable to read NVRAM page 5.\n"); return (0); } /* check to make sure the page has a valid */ /* signature */ if (le32_to_cpu(ha->nvram->signature) != IPS_NVRAM_P5_SIG) { DEBUG_VAR(1, "(%s%d) NVRAM page 5 has an invalid signature: %X.", ips_name, ha->host_num, ha->nvram->signature); ha->nvram->signature = IPS_NVRAM_P5_SIG; } DEBUG_VAR(2, "(%s%d) Ad Type: %d, Ad Slot: %d, BIOS: %c%c%c%c %c%c%c%c.", ips_name, ha->host_num, le16_to_cpu(ha->nvram->adapter_type), ha->nvram->adapter_slot, ha->nvram->bios_high[0], ha->nvram->bios_high[1], ha->nvram->bios_high[2], ha->nvram->bios_high[3], ha->nvram->bios_low[0], ha->nvram->bios_low[1], ha->nvram->bios_low[2], ha->nvram->bios_low[3]); ips_get_bios_version(ha, intr); /* change values (as needed) */ ha->nvram->operating_system = IPS_OS_LINUX; ha->nvram->adapter_type = ha->ad_type; memcpy((char *) ha->nvram->driver_high, IPS_VERSION_HIGH, 4); memcpy((char *) ha->nvram->driver_low, IPS_VERSION_LOW, 4); memcpy((char *) ha->nvram->bios_high, ha->bios_version, 4); memcpy((char *) ha->nvram->bios_low, ha->bios_version + 4, 4); ha->nvram->versioning = 0; /* Indicate the Driver Does Not Support Versioning */ /* now update the page */ if (!ips_readwrite_page5(ha, true, intr)) { IPS_PRINTK(KERN_WARNING, ha->pcidev, "unable to write NVRAM page 5.\n"); return (0); } /* IF NVRAM Page 5 is OK, Use it for Slot Number Info Because Linux Doesn't Do Slots */ ha->slot_num = ha->nvram->adapter_slot; return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_read_adapter_status */ /* */ /* Routine Description: */ /* */ /* Do an Inquiry command to the adapter */ /* */ /****************************************************************************/ static int ips_read_adapter_status(ips_ha_t * ha, int intr) { ips_scb_t *scb; int ret; METHOD_TRACE("ips_read_adapter_status", 1); scb = &ha->scbs[ha->max_cmds - 1]; ips_init_scb(ha, scb); scb->timeout = ips_cmd_timeout; scb->cdb[0] = IPS_CMD_ENQUIRY; scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY; scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); scb->cmd.basic_io.sg_count = 0; scb->cmd.basic_io.lba = 0; scb->cmd.basic_io.sector_count = 0; scb->cmd.basic_io.log_drv = 0; scb->data_len = sizeof (*ha->enq); scb->cmd.basic_io.sg_addr = ha->enq_busaddr; /* send command */ if (((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) return (0); return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_read_subsystem_parameters */ /* */ /* Routine Description: */ /* */ /* Read subsystem parameters from the adapter */ /* */ /****************************************************************************/ static int ips_read_subsystem_parameters(ips_ha_t * ha, int intr) { ips_scb_t *scb; int ret; METHOD_TRACE("ips_read_subsystem_parameters", 1); scb = &ha->scbs[ha->max_cmds - 1]; ips_init_scb(ha, scb); scb->timeout = ips_cmd_timeout; scb->cdb[0] = IPS_CMD_GET_SUBSYS; scb->cmd.basic_io.op_code = IPS_CMD_GET_SUBSYS; scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); scb->cmd.basic_io.sg_count = 0; scb->cmd.basic_io.lba = 0; scb->cmd.basic_io.sector_count = 0; scb->cmd.basic_io.log_drv = 0; scb->data_len = sizeof (*ha->subsys); scb->cmd.basic_io.sg_addr = ha->ioctl_busaddr; /* send command */ if (((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) return (0); memcpy(ha->subsys, ha->ioctl_data, sizeof(*ha->subsys)); return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_read_config */ /* */ /* Routine Description: */ /* */ /* Read the configuration on the adapter */ /* */ /****************************************************************************/ static int ips_read_config(ips_ha_t * ha, int intr) { ips_scb_t *scb; int i; int ret; METHOD_TRACE("ips_read_config", 1); /* set defaults for initiator IDs */ for (i = 0; i < 4; i++) ha->conf->init_id[i] = 7; scb = &ha->scbs[ha->max_cmds - 1]; ips_init_scb(ha, scb); scb->timeout = ips_cmd_timeout; scb->cdb[0] = IPS_CMD_READ_CONF; scb->cmd.basic_io.op_code = IPS_CMD_READ_CONF; scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb); scb->data_len = sizeof (*ha->conf); scb->cmd.basic_io.sg_addr = ha->ioctl_busaddr; /* send command */ if (((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) { memset(ha->conf, 0, sizeof (IPS_CONF)); /* reset initiator IDs */ for (i = 0; i < 4; i++) ha->conf->init_id[i] = 7; /* Allow Completed with Errors, so JCRM can access the Adapter to fix the problems */ if ((scb->basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_CMPLT_WERROR) return (1); return (0); } memcpy(ha->conf, ha->ioctl_data, sizeof(*ha->conf)); return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_readwrite_page5 */ /* */ /* Routine Description: */ /* */ /* Read nvram page 5 from the adapter */ /* */ /****************************************************************************/ static int ips_readwrite_page5(ips_ha_t * ha, int write, int intr) { ips_scb_t *scb; int ret; METHOD_TRACE("ips_readwrite_page5", 1); scb = &ha->scbs[ha->max_cmds - 1]; ips_init_scb(ha, scb); scb->timeout = ips_cmd_timeout; scb->cdb[0] = IPS_CMD_RW_NVRAM_PAGE; scb->cmd.nvram.op_code = IPS_CMD_RW_NVRAM_PAGE; scb->cmd.nvram.command_id = IPS_COMMAND_ID(ha, scb); scb->cmd.nvram.page = 5; scb->cmd.nvram.write = write; scb->cmd.nvram.reserved = 0; scb->cmd.nvram.reserved2 = 0; scb->data_len = sizeof (*ha->nvram); scb->cmd.nvram.buffer_addr = ha->ioctl_busaddr; if (write) memcpy(ha->ioctl_data, ha->nvram, sizeof(*ha->nvram)); /* issue the command */ if (((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) { memset(ha->nvram, 0, sizeof (IPS_NVRAM_P5)); return (0); } if (!write) memcpy(ha->nvram, ha->ioctl_data, sizeof(*ha->nvram)); return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_clear_adapter */ /* */ /* Routine Description: */ /* */ /* Clear the stripe lock tables */ /* */ /****************************************************************************/ static int ips_clear_adapter(ips_ha_t * ha, int intr) { ips_scb_t *scb; int ret; METHOD_TRACE("ips_clear_adapter", 1); scb = &ha->scbs[ha->max_cmds - 1]; ips_init_scb(ha, scb); scb->timeout = ips_reset_timeout; scb->cdb[0] = IPS_CMD_CONFIG_SYNC; scb->cmd.config_sync.op_code = IPS_CMD_CONFIG_SYNC; scb->cmd.config_sync.command_id = IPS_COMMAND_ID(ha, scb); scb->cmd.config_sync.channel = 0; scb->cmd.config_sync.source_target = IPS_POCL; scb->cmd.config_sync.reserved = 0; scb->cmd.config_sync.reserved2 = 0; scb->cmd.config_sync.reserved3 = 0; /* issue command */ if (((ret = ips_send_wait(ha, scb, ips_reset_timeout, intr)) == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) return (0); /* send unlock stripe command */ ips_init_scb(ha, scb); scb->cdb[0] = IPS_CMD_ERROR_TABLE; scb->timeout = ips_reset_timeout; scb->cmd.unlock_stripe.op_code = IPS_CMD_ERROR_TABLE; scb->cmd.unlock_stripe.command_id = IPS_COMMAND_ID(ha, scb); scb->cmd.unlock_stripe.log_drv = 0; scb->cmd.unlock_stripe.control = IPS_CSL; scb->cmd.unlock_stripe.reserved = 0; scb->cmd.unlock_stripe.reserved2 = 0; scb->cmd.unlock_stripe.reserved3 = 0; /* issue command */ if (((ret = ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM) || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) return (0); return (1); } /****************************************************************************/ /* */ /* Routine Name: ips_ffdc_reset */ /* */ /* Routine Description: */ /* */ /* FFDC: write reset info */ /* */ /****************************************************************************/ static void ips_ffdc_reset(ips_ha_t * ha, int intr) { ips_scb_t *scb; METHOD_TRACE("ips_ffdc_reset", 1); scb = &ha->scbs[ha->max_cmds - 1]; ips_init_scb(ha, scb); scb->timeout = ips_cmd_timeout; scb->cdb[0] = IPS_CMD_FFDC; scb->cmd.ffdc.op_code = IPS_CMD_FFDC; scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb); scb->cmd.ffdc.reset_count = ha->reset_count; scb->cmd.ffdc.reset_type = 0x80; /* convert time to what the card wants */ ips_fix_ffdc_time(ha, scb, ha->last_ffdc); /* issue command */ ips_send_wait(ha, scb, ips_cmd_timeout, intr); } /****************************************************************************/ /* */ /* Routine Name: ips_ffdc_time */ /* */ /* Routine Description: */ /* */ /* FFDC: write time info */ /* */ /****************************************************************************/ static void ips_ffdc_time(ips_ha_t * ha) { ips_scb_t *scb; METHOD_TRACE("ips_ffdc_time", 1); DEBUG_VAR(1, "(%s%d) Sending time update.", ips_name, ha->host_num); scb = &ha->scbs[ha->max_cmds - 1]; ips_init_scb(ha, scb); scb->timeout = ips_cmd_timeout; scb->cdb[0] = IPS_CMD_FFDC; scb->cmd.ffdc.op_code = IPS_CMD_FFDC; scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb); scb->cmd.ffdc.reset_count = 0; scb->cmd.ffdc.reset_type = 0; /* convert time to what the card wants */ ips_fix_ffdc_time(ha, scb, ha->last_ffdc); /* issue command */ ips_send_wait(ha, scb, ips_cmd_timeout, IPS_FFDC); } /****************************************************************************/ /* */ /* Routine Name: ips_fix_ffdc_time */ /* */ /* Routine Description: */ /* Adjust time_t to what the card wants */ /* */ /****************************************************************************/ static void ips_fix_ffdc_time(ips_ha_t * ha, ips_scb_t * scb, time64_t current_time) { struct tm tm; METHOD_TRACE("ips_fix_ffdc_time", 1); time64_to_tm(current_time, 0, &tm); scb->cmd.ffdc.hour = tm.tm_hour; scb->cmd.ffdc.minute = tm.tm_min; scb->cmd.ffdc.second = tm.tm_sec; scb->cmd.ffdc.yearH = (tm.tm_year + 1900) / 100; scb->cmd.ffdc.yearL = tm.tm_year % 100; scb->cmd.ffdc.month = tm.tm_mon + 1; scb->cmd.ffdc.day = tm.tm_mday; } /**************************************************************************** * BIOS Flash Routines * ****************************************************************************/ /****************************************************************************/ /* */ /* Routine Name: ips_erase_bios */ /* */ /* Routine Description: */ /* Erase the BIOS on the adapter */ /* */ /****************************************************************************/ static int ips_erase_bios(ips_ha_t * ha) { int timeout; uint8_t status = 0; METHOD_TRACE("ips_erase_bios", 1); status = 0; /* Clear the status register */ outl(0, ha->io_addr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ outb(0x50, ha->io_addr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ /* Erase Setup */ outb(0x20, ha->io_addr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ /* Erase Confirm */ outb(0xD0, ha->io_addr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ /* Erase Status */ outb(0x70, ha->io_addr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ timeout = 80000; /* 80 seconds */ while (timeout > 0) { if (ha->pcidev->revision == IPS_REVID_TROMBONE64) { outl(0, ha->io_addr + IPS_REG_FLAP); udelay(25); /* 25 us */ } status = inb(ha->io_addr + IPS_REG_FLDP); if (status & 0x80) break; MDELAY(1); timeout--; } /* check for timeout */ if (timeout <= 0) { /* timeout */ /* try to suspend the erase */ outb(0xB0, ha->io_addr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ /* wait for 10 seconds */ timeout = 10000; while (timeout > 0) { if (ha->pcidev->revision == IPS_REVID_TROMBONE64) { outl(0, ha->io_addr + IPS_REG_FLAP); udelay(25); /* 25 us */ } status = inb(ha->io_addr + IPS_REG_FLDP); if (status & 0xC0) break; MDELAY(1); timeout--; } return (1); } /* check for valid VPP */ if (status & 0x08) /* VPP failure */ return (1); /* check for successful flash */ if (status & 0x30) /* sequence error */ return (1); /* Otherwise, we were successful */ /* clear status */ outb(0x50, ha->io_addr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ /* enable reads */ outb(0xFF, ha->io_addr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ return (0); } /****************************************************************************/ /* */ /* Routine Name: ips_erase_bios_memio */ /* */ /* Routine Description: */ /* Erase the BIOS on the adapter */ /* */ /****************************************************************************/ static int ips_erase_bios_memio(ips_ha_t * ha) { int timeout; uint8_t status; METHOD_TRACE("ips_erase_bios_memio", 1); status = 0; /* Clear the status register */ writel(0, ha->mem_ptr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ writeb(0x50, ha->mem_ptr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ /* Erase Setup */ writeb(0x20, ha->mem_ptr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ /* Erase Confirm */ writeb(0xD0, ha->mem_ptr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ /* Erase Status */ writeb(0x70, ha->mem_ptr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ timeout = 80000; /* 80 seconds */ while (timeout > 0) { if (ha->pcidev->revision == IPS_REVID_TROMBONE64) { writel(0, ha->mem_ptr + IPS_REG_FLAP); udelay(25); /* 25 us */ } status = readb(ha->mem_ptr + IPS_REG_FLDP); if (status & 0x80) break; MDELAY(1); timeout--; } /* check for timeout */ if (timeout <= 0) { /* timeout */ /* try to suspend the erase */ writeb(0xB0, ha->mem_ptr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ /* wait for 10 seconds */ timeout = 10000; while (timeout > 0) { if (ha->pcidev->revision == IPS_REVID_TROMBONE64) { writel(0, ha->mem_ptr + IPS_REG_FLAP); udelay(25); /* 25 us */ } status = readb(ha->mem_ptr + IPS_REG_FLDP); if (status & 0xC0) break; MDELAY(1); timeout--; } return (1); } /* check for valid VPP */ if (status & 0x08) /* VPP failure */ return (1); /* check for successful flash */ if (status & 0x30) /* sequence error */ return (1); /* Otherwise, we were successful */ /* clear status */ writeb(0x50, ha->mem_ptr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ /* enable reads */ writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ return (0); } /****************************************************************************/ /* */ /* Routine Name: ips_program_bios */ /* */ /* Routine Description: */ /* Program the BIOS on the adapter */ /* */ /****************************************************************************/ static int ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize, uint32_t offset) { int i; int timeout; uint8_t status = 0; METHOD_TRACE("ips_program_bios", 1); status = 0; for (i = 0; i < buffersize; i++) { /* write a byte */ outl(i + offset, ha->io_addr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ outb(0x40, ha->io_addr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ outb(buffer[i], ha->io_addr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ /* wait up to one second */ timeout = 1000; while (timeout > 0) { if (ha->pcidev->revision == IPS_REVID_TROMBONE64) { outl(0, ha->io_addr + IPS_REG_FLAP); udelay(25); /* 25 us */ } status = inb(ha->io_addr + IPS_REG_FLDP); if (status & 0x80) break; MDELAY(1); timeout--; } if (timeout == 0) { /* timeout error */ outl(0, ha->io_addr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ outb(0xFF, ha->io_addr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ return (1); } /* check the status */ if (status & 0x18) { /* programming error */ outl(0, ha->io_addr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ outb(0xFF, ha->io_addr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ return (1); } } /* end for */ /* Enable reading */ outl(0, ha->io_addr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ outb(0xFF, ha->io_addr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ return (0); } /****************************************************************************/ /* */ /* Routine Name: ips_program_bios_memio */ /* */ /* Routine Description: */ /* Program the BIOS on the adapter */ /* */ /****************************************************************************/ static int ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize, uint32_t offset) { int i; int timeout; uint8_t status = 0; METHOD_TRACE("ips_program_bios_memio", 1); status = 0; for (i = 0; i < buffersize; i++) { /* write a byte */ writel(i + offset, ha->mem_ptr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ writeb(0x40, ha->mem_ptr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ writeb(buffer[i], ha->mem_ptr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ /* wait up to one second */ timeout = 1000; while (timeout > 0) { if (ha->pcidev->revision == IPS_REVID_TROMBONE64) { writel(0, ha->mem_ptr + IPS_REG_FLAP); udelay(25); /* 25 us */ } status = readb(ha->mem_ptr + IPS_REG_FLDP); if (status & 0x80) break; MDELAY(1); timeout--; } if (timeout == 0) { /* timeout error */ writel(0, ha->mem_ptr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ return (1); } /* check the status */ if (status & 0x18) { /* programming error */ writel(0, ha->mem_ptr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ return (1); } } /* end for */ /* Enable reading */ writel(0, ha->mem_ptr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ return (0); } /****************************************************************************/ /* */ /* Routine Name: ips_verify_bios */ /* */ /* Routine Description: */ /* Verify the BIOS on the adapter */ /* */ /****************************************************************************/ static int ips_verify_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize, uint32_t offset) { uint8_t checksum; int i; METHOD_TRACE("ips_verify_bios", 1); /* test 1st byte */ outl(0, ha->io_addr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55) return (1); outl(1, ha->io_addr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA) return (1); checksum = 0xff; for (i = 2; i < buffersize; i++) { outl(i + offset, ha->io_addr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ checksum = (uint8_t) checksum + inb(ha->io_addr + IPS_REG_FLDP); } if (checksum != 0) /* failure */ return (1); else /* success */ return (0); } /****************************************************************************/ /* */ /* Routine Name: ips_verify_bios_memio */ /* */ /* Routine Description: */ /* Verify the BIOS on the adapter */ /* */ /****************************************************************************/ static int ips_verify_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize, uint32_t offset) { uint8_t checksum; int i; METHOD_TRACE("ips_verify_bios_memio", 1); /* test 1st byte */ writel(0, ha->mem_ptr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55) return (1); writel(1, ha->mem_ptr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA) return (1); checksum = 0xff; for (i = 2; i < buffersize; i++) { writel(i + offset, ha->mem_ptr + IPS_REG_FLAP); if (ha->pcidev->revision == IPS_REVID_TROMBONE64) udelay(25); /* 25 us */ checksum = (uint8_t) checksum + readb(ha->mem_ptr + IPS_REG_FLDP); } if (checksum != 0) /* failure */ return (1); else /* success */ return (0); } /****************************************************************************/ /* */ /* Routine Name: ips_abort_init */ /* */ /* Routine Description: */ /* cleanup routine for a failed adapter initialization */ /****************************************************************************/ static int ips_abort_init(ips_ha_t * ha, int index) { ha->active = 0; ips_free(ha); ips_ha[index] = NULL; ips_sh[index] = NULL; return -1; } /****************************************************************************/ /* */ /* Routine Name: ips_shift_controllers */ /* */ /* Routine Description: */ /* helper function for ordering adapters */ /****************************************************************************/ static void ips_shift_controllers(int lowindex, int highindex) { ips_ha_t *ha_sav = ips_ha[highindex]; struct Scsi_Host *sh_sav = ips_sh[highindex]; int i; for (i = highindex; i > lowindex; i--) { ips_ha[i] = ips_ha[i - 1]; ips_sh[i] = ips_sh[i - 1]; ips_ha[i]->host_num = i; } ha_sav->host_num = lowindex; ips_ha[lowindex] = ha_sav; ips_sh[lowindex] = sh_sav; } /****************************************************************************/ /* */ /* Routine Name: ips_order_controllers */ /* */ /* Routine Description: */ /* place controllers is the "proper" boot order */ /****************************************************************************/ static void ips_order_controllers(void) { int i, j, tmp, position = 0; IPS_NVRAM_P5 *nvram; if (!ips_ha[0]) return; nvram = ips_ha[0]->nvram; if (nvram->adapter_order[0]) { for (i = 1; i <= nvram->adapter_order[0]; i++) { for (j = position; j < ips_num_controllers; j++) { switch (ips_ha[j]->ad_type) { case IPS_ADTYPE_SERVERAID6M: case IPS_ADTYPE_SERVERAID7M: if (nvram->adapter_order[i] == 'M') { ips_shift_controllers(position, j); position++; } break; case IPS_ADTYPE_SERVERAID4L: case IPS_ADTYPE_SERVERAID4M: case IPS_ADTYPE_SERVERAID4MX: case IPS_ADTYPE_SERVERAID4LX: if (nvram->adapter_order[i] == 'N') { ips_shift_controllers(position, j); position++; } break; case IPS_ADTYPE_SERVERAID6I: case IPS_ADTYPE_SERVERAID5I2: case IPS_ADTYPE_SERVERAID5I1: case IPS_ADTYPE_SERVERAID7k: if (nvram->adapter_order[i] == 'S') { ips_shift_controllers(position, j); position++; } break; case IPS_ADTYPE_SERVERAID: case IPS_ADTYPE_SERVERAID2: case IPS_ADTYPE_NAVAJO: case IPS_ADTYPE_KIOWA: case IPS_ADTYPE_SERVERAID3L: case IPS_ADTYPE_SERVERAID3: case IPS_ADTYPE_SERVERAID4H: if (nvram->adapter_order[i] == 'A') { ips_shift_controllers(position, j); position++; } break; default: break; } } } /* if adapter_order[0], then ordering is complete */ return; } /* old bios, use older ordering */ tmp = 0; for (i = position; i < ips_num_controllers; i++) { if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I2 || ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I1) { ips_shift_controllers(position, i); position++; tmp = 1; } } /* if there were no 5I cards, then don't do any extra ordering */ if (!tmp) return; for (i = position; i < ips_num_controllers; i++) { if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4L || ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4M || ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4LX || ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4MX) { ips_shift_controllers(position, i); position++; } } return; } /****************************************************************************/ /* */ /* Routine Name: ips_register_scsi */ /* */ /* Routine Description: */ /* perform any registration and setup with the scsi layer */ /****************************************************************************/ static int ips_register_scsi(int index) { struct Scsi_Host *sh; ips_ha_t *ha, *oldha = ips_ha[index]; sh = scsi_host_alloc(&ips_driver_template, sizeof (ips_ha_t)); if (!sh) { IPS_PRINTK(KERN_WARNING, oldha->pcidev, "Unable to register controller with SCSI subsystem\n"); return -1; } ha = IPS_HA(sh); memcpy(ha, oldha, sizeof (ips_ha_t)); free_irq(oldha->pcidev->irq, oldha); /* Install the interrupt handler with the new ha */ if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) { IPS_PRINTK(KERN_WARNING, ha->pcidev, "Unable to install interrupt handler\n"); goto err_out_sh; } kfree(oldha); /* Store away needed values for later use */ sh->unique_id = (ha->io_addr) ? ha->io_addr : ha->mem_addr; sh->sg_tablesize = sh->hostt->sg_tablesize; sh->can_queue = sh->hostt->can_queue; sh->cmd_per_lun = sh->hostt->cmd_per_lun; sh->max_sectors = 128; sh->max_id = ha->ntargets; sh->max_lun = ha->nlun; sh->max_channel = ha->nbus - 1; sh->can_queue = ha->max_cmds - 1; if (scsi_add_host(sh, &ha->pcidev->dev)) goto err_out; ips_sh[index] = sh; ips_ha[index] = ha; scsi_scan_host(sh); return 0; err_out: free_irq(ha->pcidev->irq, ha); err_out_sh: scsi_host_put(sh); return -1; } /*---------------------------------------------------------------------------*/ /* Routine Name: ips_remove_device */ /* */ /* Routine Description: */ /* Remove one Adapter ( Hot Plugging ) */ /*---------------------------------------------------------------------------*/ static void ips_remove_device(struct pci_dev *pci_dev) { struct Scsi_Host *sh = pci_get_drvdata(pci_dev); pci_set_drvdata(pci_dev, NULL); ips_release(sh); pci_release_regions(pci_dev); pci_disable_device(pci_dev); } /****************************************************************************/ /* */ /* Routine Name: ips_module_init */ /* */ /* Routine Description: */ /* function called on module load */ /****************************************************************************/ static int __init ips_module_init(void) { #if !defined(__i386__) && !defined(__ia64__) && !defined(__x86_64__) printk(KERN_ERR "ips: This driver has only been tested on the x86/ia64/x86_64 platforms\n"); add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); #endif if (pci_register_driver(&ips_pci_driver) < 0) return -ENODEV; ips_driver_template.module = THIS_MODULE; ips_order_controllers(); if (!ips_detect(&ips_driver_template)) { pci_unregister_driver(&ips_pci_driver); return -ENODEV; } register_reboot_notifier(&ips_notifier); return 0; } /****************************************************************************/ /* */ /* Routine Name: ips_module_exit */ /* */ /* Routine Description: */ /* function called on module unload */ /****************************************************************************/ static void __exit ips_module_exit(void) { pci_unregister_driver(&ips_pci_driver); unregister_reboot_notifier(&ips_notifier); } module_init(ips_module_init); module_exit(ips_module_exit); /*---------------------------------------------------------------------------*/ /* Routine Name: ips_insert_device */ /* */ /* Routine Description: */ /* Add One Adapter ( Hot Plug ) */ /* */ /* Return Value: */ /* 0 if Successful, else non-zero */ /*---------------------------------------------------------------------------*/ static int ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent) { int index = -1; int rc; METHOD_TRACE("ips_insert_device", 1); rc = pci_enable_device(pci_dev); if (rc) return rc; rc = pci_request_regions(pci_dev, "ips"); if (rc) goto err_out; rc = ips_init_phase1(pci_dev, &index); if (rc == SUCCESS) rc = ips_init_phase2(index); if (ips_hotplug) if (ips_register_scsi(index)) { ips_free(ips_ha[index]); rc = -1; } if (rc == SUCCESS) ips_num_controllers++; ips_next_controller = ips_num_controllers; if (rc < 0) { rc = -ENODEV; goto err_out_regions; } pci_set_drvdata(pci_dev, ips_sh[index]); return 0; err_out_regions: pci_release_regions(pci_dev); err_out: pci_disable_device(pci_dev); return rc; } /*---------------------------------------------------------------------------*/ /* Routine Name: ips_init_phase1 */ /* */ /* Routine Description: */ /* Adapter Initialization */ /* */ /* Return Value: */ /* 0 if Successful, else non-zero */ /*---------------------------------------------------------------------------*/ static int ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr) { ips_ha_t *ha; uint32_t io_addr; uint32_t mem_addr; uint32_t io_len; uint32_t mem_len; int j; int index; dma_addr_t dma_address; char __iomem *ioremap_ptr; char __iomem *mem_ptr; uint32_t IsDead; METHOD_TRACE("ips_init_phase1", 1); index = IPS_MAX_ADAPTERS; for (j = 0; j < IPS_MAX_ADAPTERS; j++) { if (ips_ha[j] == NULL) { index = j; break; } } if (index >= IPS_MAX_ADAPTERS) return -1; /* Init MEM/IO addresses to 0 */ mem_addr = 0; io_addr = 0; mem_len = 0; io_len = 0; for (j = 0; j < 2; j++) { if (!pci_resource_start(pci_dev, j)) break; if (pci_resource_flags(pci_dev, j) & IORESOURCE_IO) { io_addr = pci_resource_start(pci_dev, j); io_len = pci_resource_len(pci_dev, j); } else { mem_addr = pci_resource_start(pci_dev, j); mem_len = pci_resource_len(pci_dev, j); } } /* setup memory mapped area (if applicable) */ if (mem_addr) { uint32_t base; uint32_t offs; base = mem_addr & PAGE_MASK; offs = mem_addr - base; ioremap_ptr = ioremap(base, PAGE_SIZE); if (!ioremap_ptr) return -1; mem_ptr = ioremap_ptr + offs; } else { ioremap_ptr = NULL; mem_ptr = NULL; } /* found a controller */ ha = kzalloc(sizeof (ips_ha_t), GFP_KERNEL); if (ha == NULL) { IPS_PRINTK(KERN_WARNING, pci_dev, "Unable to allocate temporary ha struct\n"); return -1; } ips_sh[index] = NULL; ips_ha[index] = ha; ha->active = 1; /* Store info in HA structure */ ha->io_addr = io_addr; ha->io_len = io_len; ha->mem_addr = mem_addr; ha->mem_len = mem_len; ha->mem_ptr = mem_ptr; ha->ioremap_ptr = ioremap_ptr; ha->host_num = (uint32_t) index; ha->slot_num = PCI_SLOT(pci_dev->devfn); ha->pcidev = pci_dev; /* * Set the pci_dev's dma_mask. Not all adapters support 64bit * addressing so don't enable it if the adapter can't support * it! Also, don't use 64bit addressing if dma addresses * are guaranteed to be < 4G. */ if (sizeof(dma_addr_t) > 4 && IPS_HAS_ENH_SGLIST(ha) && !dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(64))) { (ha)->flags |= IPS_HA_ENH_SG; } else { if (dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(32)) != 0) { printk(KERN_WARNING "Unable to set DMA Mask\n"); return ips_abort_init(ha, index); } } if(ips_cd_boot && !ips_FlashData){ ips_FlashData = dma_alloc_coherent(&pci_dev->dev, PAGE_SIZE << 7, &ips_flashbusaddr, GFP_KERNEL); } ha->enq = dma_alloc_coherent(&pci_dev->dev, sizeof (IPS_ENQ), &ha->enq_busaddr, GFP_KERNEL); if (!ha->enq) { IPS_PRINTK(KERN_WARNING, pci_dev, "Unable to allocate host inquiry structure\n"); return ips_abort_init(ha, index); } ha->adapt = dma_alloc_coherent(&pci_dev->dev, sizeof (IPS_ADAPTER) + sizeof (IPS_IO_CMD), &dma_address, GFP_KERNEL); if (!ha->adapt) { IPS_PRINTK(KERN_WARNING, pci_dev, "Unable to allocate host adapt & dummy structures\n"); return ips_abort_init(ha, index); } ha->adapt->hw_status_start = dma_address; ha->dummy = (void *) (ha->adapt + 1); ha->logical_drive_info = dma_alloc_coherent(&pci_dev->dev, sizeof (IPS_LD_INFO), &dma_address, GFP_KERNEL); if (!ha->logical_drive_info) { IPS_PRINTK(KERN_WARNING, pci_dev, "Unable to allocate logical drive info structure\n"); return ips_abort_init(ha, index); } ha->logical_drive_info_dma_addr = dma_address; ha->conf = kmalloc(sizeof (IPS_CONF), GFP_KERNEL); if (!ha->conf) { IPS_PRINTK(KERN_WARNING, pci_dev, "Unable to allocate host conf structure\n"); return ips_abort_init(ha, index); } ha->nvram = kmalloc(sizeof (IPS_NVRAM_P5), GFP_KERNEL); if (!ha->nvram) { IPS_PRINTK(KERN_WARNING, pci_dev, "Unable to allocate host NVRAM structure\n"); return ips_abort_init(ha, index); } ha->subsys = kmalloc(sizeof (IPS_SUBSYS), GFP_KERNEL); if (!ha->subsys) { IPS_PRINTK(KERN_WARNING, pci_dev, "Unable to allocate host subsystem structure\n"); return ips_abort_init(ha, index); } /* the ioctl buffer is now used during adapter initialization, so its * successful allocation is now required */ if (ips_ioctlsize < PAGE_SIZE) ips_ioctlsize = PAGE_SIZE; ha->ioctl_data = dma_alloc_coherent(&pci_dev->dev, ips_ioctlsize, &ha->ioctl_busaddr, GFP_KERNEL); ha->ioctl_len = ips_ioctlsize; if (!ha->ioctl_data) { IPS_PRINTK(KERN_WARNING, pci_dev, "Unable to allocate IOCTL data\n"); return ips_abort_init(ha, index); } /* * Setup Functions */ ips_setup_funclist(ha); if ((IPS_IS_MORPHEUS(ha)) || (IPS_IS_MARCO(ha))) { /* If Morpheus appears dead, reset it */ IsDead = readl(ha->mem_ptr + IPS_REG_I960_MSG1); if (IsDead == 0xDEADBEEF) { ips_reset_morpheus(ha); } } /* * Initialize the card if it isn't already */ if (!(*ha->func.isinit) (ha)) { if (!(*ha->func.init) (ha)) { /* * Initialization failed */ IPS_PRINTK(KERN_WARNING, pci_dev, "Unable to initialize controller\n"); return ips_abort_init(ha, index); } } *indexPtr = index; return SUCCESS; } /*---------------------------------------------------------------------------*/ /* Routine Name: ips_init_phase2 */ /* */ /* Routine Description: */ /* Adapter Initialization Phase 2 */ /* */ /* Return Value: */ /* 0 if Successful, else non-zero */ /*---------------------------------------------------------------------------*/ static int ips_init_phase2(int index) { ips_ha_t *ha; ha = ips_ha[index]; METHOD_TRACE("ips_init_phase2", 1); if (!ha->active) { ips_ha[index] = NULL; return -1; } /* Install the interrupt handler */ if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) { IPS_PRINTK(KERN_WARNING, ha->pcidev, "Unable to install interrupt handler\n"); return ips_abort_init(ha, index); } /* * Allocate a temporary SCB for initialization */ ha->max_cmds = 1; if (!ips_allocatescbs(ha)) { IPS_PRINTK(KERN_WARNING, ha->pcidev, "Unable to allocate a CCB\n"); free_irq(ha->pcidev->irq, ha); return ips_abort_init(ha, index); } if (!ips_hainit(ha)) { IPS_PRINTK(KERN_WARNING, ha->pcidev, "Unable to initialize controller\n"); free_irq(ha->pcidev->irq, ha); return ips_abort_init(ha, index); } /* Free the temporary SCB */ ips_deallocatescbs(ha, 1); /* allocate CCBs */ if (!ips_allocatescbs(ha)) { IPS_PRINTK(KERN_WARNING, ha->pcidev, "Unable to allocate CCBs\n"); free_irq(ha->pcidev->irq, ha); return ips_abort_init(ha, index); } return SUCCESS; } MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING); MODULE_VERSION(IPS_VER_STRING);
linux-master
drivers/scsi/ips.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/bitops.h> #include <linux/seq_file.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_host.h> #include "scsi_debugfs.h" #define SCSI_CMD_FLAG_NAME(name)[const_ilog2(SCMD_##name)] = #name static const char *const scsi_cmd_flags[] = { SCSI_CMD_FLAG_NAME(TAGGED), SCSI_CMD_FLAG_NAME(INITIALIZED), SCSI_CMD_FLAG_NAME(LAST), }; #undef SCSI_CMD_FLAG_NAME static int scsi_flags_show(struct seq_file *m, const unsigned long flags, const char *const *flag_name, int flag_name_count) { bool sep = false; int i; for_each_set_bit(i, &flags, BITS_PER_LONG) { if (sep) seq_puts(m, "|"); sep = true; if (i < flag_name_count && flag_name[i]) seq_puts(m, flag_name[i]); else seq_printf(m, "%d", i); } return 0; } void scsi_show_rq(struct seq_file *m, struct request *rq) { struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq), *cmd2; struct Scsi_Host *shost = cmd->device->host; int alloc_ms = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc); int timeout_ms = jiffies_to_msecs(rq->timeout); const char *list_info = NULL; char buf[80] = "(?)"; spin_lock_irq(shost->host_lock); list_for_each_entry(cmd2, &shost->eh_abort_list, eh_entry) { if (cmd == cmd2) { list_info = "on eh_abort_list"; goto unlock; } } list_for_each_entry(cmd2, &shost->eh_cmd_q, eh_entry) { if (cmd == cmd2) { list_info = "on eh_cmd_q"; goto unlock; } } unlock: spin_unlock_irq(shost->host_lock); __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len); seq_printf(m, ", .cmd=%s, .retries=%d, .allowed=%d, .result = %#x, %s%s.flags=", buf, cmd->retries, cmd->allowed, cmd->result, list_info ? : "", list_info ? ", " : ""); scsi_flags_show(m, cmd->flags, scsi_cmd_flags, ARRAY_SIZE(scsi_cmd_flags)); seq_printf(m, ", .timeout=%d.%03d, allocated %d.%03d s ago", timeout_ms / 1000, timeout_ms % 1000, alloc_ms / 1000, alloc_ms % 1000); }
linux-master
drivers/scsi/scsi_debugfs.c
// SPDX-License-Identifier: GPL-2.0-only /* qlogicpti.c: Performance Technologies QlogicISP sbus card driver. * * Copyright (C) 1996, 2006, 2008 David S. Miller ([email protected]) * * A lot of this driver was directly stolen from Erik H. Moe's PCI * Qlogic ISP driver. Mucho kudos to him for this code. * * An even bigger kudos to John Grana at Performance Technologies * for providing me with the hardware to write this driver, you rule * John you really do. * * May, 2, 1997: Added support for QLGC,isp --jj */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/string.h> #include <linux/gfp.h> #include <linux/blkdev.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/jiffies.h> #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/firmware.h> #include <linux/pgtable.h> #include <asm/byteorder.h> #include "qlogicpti.h" #include <asm/dma.h> #include <asm/ptrace.h> #include <asm/oplib.h> #include <asm/io.h> #include <asm/irq.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_host.h> #define MAX_TARGETS 16 #define MAX_LUNS 8 /* 32 for 1.31 F/W */ #define DEFAULT_LOOP_COUNT 10000 static struct qlogicpti *qptichain = NULL; static DEFINE_SPINLOCK(qptichain_lock); #define PACKB(a, b) (((a)<<4)|(b)) static const u_char mbox_param[] = { PACKB(1, 1), /* MBOX_NO_OP */ PACKB(5, 5), /* MBOX_LOAD_RAM */ PACKB(2, 0), /* MBOX_EXEC_FIRMWARE */ PACKB(5, 5), /* MBOX_DUMP_RAM */ PACKB(3, 3), /* MBOX_WRITE_RAM_WORD */ PACKB(2, 3), /* MBOX_READ_RAM_WORD */ PACKB(6, 6), /* MBOX_MAILBOX_REG_TEST */ PACKB(2, 3), /* MBOX_VERIFY_CHECKSUM */ PACKB(1, 3), /* MBOX_ABOUT_FIRMWARE */ PACKB(0, 0), /* 0x0009 */ PACKB(0, 0), /* 0x000a */ PACKB(0, 0), /* 0x000b */ PACKB(0, 0), /* 0x000c */ PACKB(0, 0), /* 0x000d */ PACKB(1, 2), /* MBOX_CHECK_FIRMWARE */ PACKB(0, 0), /* 0x000f */ PACKB(5, 5), /* MBOX_INIT_REQ_QUEUE */ PACKB(6, 6), /* MBOX_INIT_RES_QUEUE */ PACKB(4, 4), /* MBOX_EXECUTE_IOCB */ PACKB(2, 2), /* MBOX_WAKE_UP */ PACKB(1, 6), /* MBOX_STOP_FIRMWARE */ PACKB(4, 4), /* MBOX_ABORT */ PACKB(2, 2), /* MBOX_ABORT_DEVICE */ PACKB(3, 3), /* MBOX_ABORT_TARGET */ PACKB(2, 2), /* MBOX_BUS_RESET */ PACKB(2, 3), /* MBOX_STOP_QUEUE */ PACKB(2, 3), /* MBOX_START_QUEUE */ PACKB(2, 3), /* MBOX_SINGLE_STEP_QUEUE */ PACKB(2, 3), /* MBOX_ABORT_QUEUE */ PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_STATUS */ PACKB(0, 0), /* 0x001e */ PACKB(1, 3), /* MBOX_GET_FIRMWARE_STATUS */ PACKB(1, 2), /* MBOX_GET_INIT_SCSI_ID */ PACKB(1, 2), /* MBOX_GET_SELECT_TIMEOUT */ PACKB(1, 3), /* MBOX_GET_RETRY_COUNT */ PACKB(1, 2), /* MBOX_GET_TAG_AGE_LIMIT */ PACKB(1, 2), /* MBOX_GET_CLOCK_RATE */ PACKB(1, 2), /* MBOX_GET_ACT_NEG_STATE */ PACKB(1, 2), /* MBOX_GET_ASYNC_DATA_SETUP_TIME */ PACKB(1, 3), /* MBOX_GET_SBUS_PARAMS */ PACKB(2, 4), /* MBOX_GET_TARGET_PARAMS */ PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_PARAMS */ PACKB(0, 0), /* 0x002a */ PACKB(0, 0), /* 0x002b */ PACKB(0, 0), /* 0x002c */ PACKB(0, 0), /* 0x002d */ PACKB(0, 0), /* 0x002e */ PACKB(0, 0), /* 0x002f */ PACKB(2, 2), /* MBOX_SET_INIT_SCSI_ID */ PACKB(2, 2), /* MBOX_SET_SELECT_TIMEOUT */ PACKB(3, 3), /* MBOX_SET_RETRY_COUNT */ PACKB(2, 2), /* MBOX_SET_TAG_AGE_LIMIT */ PACKB(2, 2), /* MBOX_SET_CLOCK_RATE */ PACKB(2, 2), /* MBOX_SET_ACTIVE_NEG_STATE */ PACKB(2, 2), /* MBOX_SET_ASYNC_DATA_SETUP_TIME */ PACKB(3, 3), /* MBOX_SET_SBUS_CONTROL_PARAMS */ PACKB(4, 4), /* MBOX_SET_TARGET_PARAMS */ PACKB(4, 4), /* MBOX_SET_DEV_QUEUE_PARAMS */ PACKB(0, 0), /* 0x003a */ PACKB(0, 0), /* 0x003b */ PACKB(0, 0), /* 0x003c */ PACKB(0, 0), /* 0x003d */ PACKB(0, 0), /* 0x003e */ PACKB(0, 0), /* 0x003f */ PACKB(0, 0), /* 0x0040 */ PACKB(0, 0), /* 0x0041 */ PACKB(0, 0) /* 0x0042 */ }; #define MAX_MBOX_COMMAND ARRAY_SIZE(mbox_param) /* queue length's _must_ be power of two: */ #define QUEUE_DEPTH(in, out, ql) ((in - out) & (ql)) #define REQ_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, \ QLOGICPTI_REQ_QUEUE_LEN) #define RES_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, RES_QUEUE_LEN) static inline void qlogicpti_enable_irqs(struct qlogicpti *qpti) { sbus_writew(SBUS_CTRL_ERIRQ | SBUS_CTRL_GENAB, qpti->qregs + SBUS_CTRL); } static inline void qlogicpti_disable_irqs(struct qlogicpti *qpti) { sbus_writew(0, qpti->qregs + SBUS_CTRL); } static inline void set_sbus_cfg1(struct qlogicpti *qpti) { u16 val; u8 bursts = qpti->bursts; #if 0 /* It appears that at least PTI cards do not support * 64-byte bursts and that setting the B64 bit actually * is a nop and the chip ends up using the smallest burst * size. -DaveM */ if (sbus_can_burst64() && (bursts & DMA_BURST64)) { val = (SBUS_CFG1_BENAB | SBUS_CFG1_B64); } else #endif if (bursts & DMA_BURST32) { val = (SBUS_CFG1_BENAB | SBUS_CFG1_B32); } else if (bursts & DMA_BURST16) { val = (SBUS_CFG1_BENAB | SBUS_CFG1_B16); } else if (bursts & DMA_BURST8) { val = (SBUS_CFG1_BENAB | SBUS_CFG1_B8); } else { val = 0; /* No sbus bursts for you... */ } sbus_writew(val, qpti->qregs + SBUS_CFG1); } static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int force) { int loop_count; u16 tmp; if (mbox_param[param[0]] == 0) return 1; /* Set SBUS semaphore. */ tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE); tmp |= SBUS_SEMAPHORE_LCK; sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE); /* Wait for host IRQ bit to clear. */ loop_count = DEFAULT_LOOP_COUNT; while (--loop_count && (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_HIRQ)) { barrier(); cpu_relax(); } if (!loop_count) printk(KERN_EMERG "qlogicpti%d: mbox_command loop timeout #1\n", qpti->qpti_id); /* Write mailbox command registers. */ switch (mbox_param[param[0]] >> 4) { case 6: sbus_writew(param[5], qpti->qregs + MBOX5); fallthrough; case 5: sbus_writew(param[4], qpti->qregs + MBOX4); fallthrough; case 4: sbus_writew(param[3], qpti->qregs + MBOX3); fallthrough; case 3: sbus_writew(param[2], qpti->qregs + MBOX2); fallthrough; case 2: sbus_writew(param[1], qpti->qregs + MBOX1); fallthrough; case 1: sbus_writew(param[0], qpti->qregs + MBOX0); } /* Clear RISC interrupt. */ tmp = sbus_readw(qpti->qregs + HCCTRL); tmp |= HCCTRL_CRIRQ; sbus_writew(tmp, qpti->qregs + HCCTRL); /* Clear SBUS semaphore. */ sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); /* Set HOST interrupt. */ tmp = sbus_readw(qpti->qregs + HCCTRL); tmp |= HCCTRL_SHIRQ; sbus_writew(tmp, qpti->qregs + HCCTRL); /* Wait for HOST interrupt clears. */ loop_count = DEFAULT_LOOP_COUNT; while (--loop_count && (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_CRIRQ)) udelay(20); if (!loop_count) printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #2\n", qpti->qpti_id, param[0]); /* Wait for SBUS semaphore to get set. */ loop_count = DEFAULT_LOOP_COUNT; while (--loop_count && !(sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK)) { udelay(20); /* Workaround for some buggy chips. */ if (sbus_readw(qpti->qregs + MBOX0) & 0x4000) break; } if (!loop_count) printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #3\n", qpti->qpti_id, param[0]); /* Wait for MBOX busy condition to go away. */ loop_count = DEFAULT_LOOP_COUNT; while (--loop_count && (sbus_readw(qpti->qregs + MBOX0) == 0x04)) udelay(20); if (!loop_count) printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #4\n", qpti->qpti_id, param[0]); /* Read back output parameters. */ switch (mbox_param[param[0]] & 0xf) { case 6: param[5] = sbus_readw(qpti->qregs + MBOX5); fallthrough; case 5: param[4] = sbus_readw(qpti->qregs + MBOX4); fallthrough; case 4: param[3] = sbus_readw(qpti->qregs + MBOX3); fallthrough; case 3: param[2] = sbus_readw(qpti->qregs + MBOX2); fallthrough; case 2: param[1] = sbus_readw(qpti->qregs + MBOX1); fallthrough; case 1: param[0] = sbus_readw(qpti->qregs + MBOX0); } /* Clear RISC interrupt. */ tmp = sbus_readw(qpti->qregs + HCCTRL); tmp |= HCCTRL_CRIRQ; sbus_writew(tmp, qpti->qregs + HCCTRL); /* Release SBUS semaphore. */ tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE); tmp &= ~(SBUS_SEMAPHORE_LCK); sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE); /* We're done. */ return 0; } static inline void qlogicpti_set_hostdev_defaults(struct qlogicpti *qpti) { int i; qpti->host_param.initiator_scsi_id = qpti->scsi_id; qpti->host_param.bus_reset_delay = 3; qpti->host_param.retry_count = 0; qpti->host_param.retry_delay = 5; qpti->host_param.async_data_setup_time = 3; qpti->host_param.req_ack_active_negation = 1; qpti->host_param.data_line_active_negation = 1; qpti->host_param.data_dma_burst_enable = 1; qpti->host_param.command_dma_burst_enable = 1; qpti->host_param.tag_aging = 8; qpti->host_param.selection_timeout = 250; qpti->host_param.max_queue_depth = 256; for(i = 0; i < MAX_TARGETS; i++) { /* * disconnect, parity, arq, reneg on reset, and, oddly enough * tags...the midlayer's notion of tagged support has to match * our device settings, and since we base whether we enable a * tag on a per-cmnd basis upon what the midlayer sez, we * actually enable the capability here. */ qpti->dev_param[i].device_flags = 0xcd; qpti->dev_param[i].execution_throttle = 16; if (qpti->ultra) { qpti->dev_param[i].synchronous_period = 12; qpti->dev_param[i].synchronous_offset = 8; } else { qpti->dev_param[i].synchronous_period = 25; qpti->dev_param[i].synchronous_offset = 12; } qpti->dev_param[i].device_enable = 1; } } static int qlogicpti_reset_hardware(struct Scsi_Host *host) { struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; u_short param[6]; unsigned short risc_code_addr; int loop_count, i; unsigned long flags; risc_code_addr = 0x1000; /* all load addresses are at 0x1000 */ spin_lock_irqsave(host->host_lock, flags); sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL); /* Only reset the scsi bus if it is not free. */ if (sbus_readw(qpti->qregs + CPU_PCTRL) & CPU_PCTRL_BSY) { sbus_writew(CPU_ORIDE_RMOD, qpti->qregs + CPU_ORIDE); sbus_writew(CPU_CMD_BRESET, qpti->qregs + CPU_CMD); udelay(400); } sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL); sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL); sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL); loop_count = DEFAULT_LOOP_COUNT; while (--loop_count && ((sbus_readw(qpti->qregs + MBOX0) & 0xff) == 0x04)) udelay(20); if (!loop_count) printk(KERN_EMERG "qlogicpti%d: reset_hardware loop timeout\n", qpti->qpti_id); sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL); set_sbus_cfg1(qpti); qlogicpti_enable_irqs(qpti); if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) { qpti->ultra = 1; sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA), qpti->qregs + RISC_MTREG); } else { qpti->ultra = 0; sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT), qpti->qregs + RISC_MTREG); } /* reset adapter and per-device default values. */ /* do it after finding out whether we're ultra mode capable */ qlogicpti_set_hostdev_defaults(qpti); /* Release the RISC processor. */ sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); /* Get RISC to start executing the firmware code. */ param[0] = MBOX_EXEC_FIRMWARE; param[1] = risc_code_addr; if (qlogicpti_mbox_command(qpti, param, 1)) { printk(KERN_EMERG "qlogicpti%d: Cannot execute ISP firmware.\n", qpti->qpti_id); spin_unlock_irqrestore(host->host_lock, flags); return 1; } /* Set initiator scsi ID. */ param[0] = MBOX_SET_INIT_SCSI_ID; param[1] = qpti->host_param.initiator_scsi_id; if (qlogicpti_mbox_command(qpti, param, 1) || (param[0] != MBOX_COMMAND_COMPLETE)) { printk(KERN_EMERG "qlogicpti%d: Cannot set initiator SCSI ID.\n", qpti->qpti_id); spin_unlock_irqrestore(host->host_lock, flags); return 1; } /* Initialize state of the queues, both hw and sw. */ qpti->req_in_ptr = qpti->res_out_ptr = 0; param[0] = MBOX_INIT_RES_QUEUE; param[1] = RES_QUEUE_LEN + 1; param[2] = (u_short) (qpti->res_dvma >> 16); param[3] = (u_short) (qpti->res_dvma & 0xffff); param[4] = param[5] = 0; if (qlogicpti_mbox_command(qpti, param, 1)) { printk(KERN_EMERG "qlogicpti%d: Cannot init response queue.\n", qpti->qpti_id); spin_unlock_irqrestore(host->host_lock, flags); return 1; } param[0] = MBOX_INIT_REQ_QUEUE; param[1] = QLOGICPTI_REQ_QUEUE_LEN + 1; param[2] = (u_short) (qpti->req_dvma >> 16); param[3] = (u_short) (qpti->req_dvma & 0xffff); param[4] = param[5] = 0; if (qlogicpti_mbox_command(qpti, param, 1)) { printk(KERN_EMERG "qlogicpti%d: Cannot init request queue.\n", qpti->qpti_id); spin_unlock_irqrestore(host->host_lock, flags); return 1; } param[0] = MBOX_SET_RETRY_COUNT; param[1] = qpti->host_param.retry_count; param[2] = qpti->host_param.retry_delay; qlogicpti_mbox_command(qpti, param, 0); param[0] = MBOX_SET_TAG_AGE_LIMIT; param[1] = qpti->host_param.tag_aging; qlogicpti_mbox_command(qpti, param, 0); for (i = 0; i < MAX_TARGETS; i++) { param[0] = MBOX_GET_DEV_QUEUE_PARAMS; param[1] = (i << 8); qlogicpti_mbox_command(qpti, param, 0); } param[0] = MBOX_GET_FIRMWARE_STATUS; qlogicpti_mbox_command(qpti, param, 0); param[0] = MBOX_SET_SELECT_TIMEOUT; param[1] = qpti->host_param.selection_timeout; qlogicpti_mbox_command(qpti, param, 0); for (i = 0; i < MAX_TARGETS; i++) { param[0] = MBOX_SET_TARGET_PARAMS; param[1] = (i << 8); param[2] = (qpti->dev_param[i].device_flags << 8); /* * Since we're now loading 1.31 f/w, force narrow/async. */ param[2] |= 0xc0; param[3] = 0; /* no offset, we do not have sync mode yet */ qlogicpti_mbox_command(qpti, param, 0); } /* * Always (sigh) do an initial bus reset (kicks f/w). */ param[0] = MBOX_BUS_RESET; param[1] = qpti->host_param.bus_reset_delay; qlogicpti_mbox_command(qpti, param, 0); qpti->send_marker = 1; spin_unlock_irqrestore(host->host_lock, flags); return 0; } #define PTI_RESET_LIMIT 400 static int qlogicpti_load_firmware(struct qlogicpti *qpti) { const struct firmware *fw; const char fwname[] = "qlogic/isp1000.bin"; const __le16 *fw_data; struct Scsi_Host *host = qpti->qhost; unsigned short csum = 0; unsigned short param[6]; unsigned short risc_code_addr, risc_code_length; int err; unsigned long flags; int i, timeout; err = request_firmware(&fw, fwname, &qpti->op->dev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fwname, err); return err; } if (fw->size % 2) { printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", fw->size, fwname); err = -EINVAL; goto outfirm; } fw_data = (const __le16 *)&fw->data[0]; risc_code_addr = 0x1000; /* all f/w modules load at 0x1000 */ risc_code_length = fw->size / 2; spin_lock_irqsave(host->host_lock, flags); /* Verify the checksum twice, one before loading it, and once * afterwards via the mailbox commands. */ for (i = 0; i < risc_code_length; i++) csum += __le16_to_cpu(fw_data[i]); if (csum) { printk(KERN_EMERG "qlogicpti%d: Aieee, firmware checksum failed!", qpti->qpti_id); err = 1; goto out; } sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL); sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL); sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL); timeout = PTI_RESET_LIMIT; while (--timeout && (sbus_readw(qpti->qregs + SBUS_CTRL) & SBUS_CTRL_RESET)) udelay(20); if (!timeout) { printk(KERN_EMERG "qlogicpti%d: Cannot reset the ISP.", qpti->qpti_id); err = 1; goto out; } sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL); mdelay(1); sbus_writew((SBUS_CTRL_GENAB | SBUS_CTRL_ERIRQ), qpti->qregs + SBUS_CTRL); set_sbus_cfg1(qpti); sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) { qpti->ultra = 1; sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA), qpti->qregs + RISC_MTREG); } else { qpti->ultra = 0; sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT), qpti->qregs + RISC_MTREG); } sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); /* Pin lines are only stable while RISC is paused. */ sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL); if (sbus_readw(qpti->qregs + CPU_PDIFF) & CPU_PDIFF_MODE) qpti->differential = 1; else qpti->differential = 0; sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); /* This shouldn't be necessary- we've reset things so we should be running from the ROM now.. */ param[0] = MBOX_STOP_FIRMWARE; param[1] = param[2] = param[3] = param[4] = param[5] = 0; if (qlogicpti_mbox_command(qpti, param, 1)) { printk(KERN_EMERG "qlogicpti%d: Cannot stop firmware for reload.\n", qpti->qpti_id); err = 1; goto out; } /* Load it up.. */ for (i = 0; i < risc_code_length; i++) { param[0] = MBOX_WRITE_RAM_WORD; param[1] = risc_code_addr + i; param[2] = __le16_to_cpu(fw_data[i]); if (qlogicpti_mbox_command(qpti, param, 1) || param[0] != MBOX_COMMAND_COMPLETE) { printk("qlogicpti%d: Firmware dload failed, I'm bolixed!\n", qpti->qpti_id); err = 1; goto out; } } /* Reset the ISP again. */ sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL); mdelay(1); qlogicpti_enable_irqs(qpti); sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); /* Ask ISP to verify the checksum of the new code. */ param[0] = MBOX_VERIFY_CHECKSUM; param[1] = risc_code_addr; if (qlogicpti_mbox_command(qpti, param, 1) || (param[0] != MBOX_COMMAND_COMPLETE)) { printk(KERN_EMERG "qlogicpti%d: New firmware csum failure!\n", qpti->qpti_id); err = 1; goto out; } /* Start using newly downloaded firmware. */ param[0] = MBOX_EXEC_FIRMWARE; param[1] = risc_code_addr; qlogicpti_mbox_command(qpti, param, 1); param[0] = MBOX_ABOUT_FIRMWARE; if (qlogicpti_mbox_command(qpti, param, 1) || (param[0] != MBOX_COMMAND_COMPLETE)) { printk(KERN_EMERG "qlogicpti%d: AboutFirmware cmd fails.\n", qpti->qpti_id); err = 1; goto out; } /* Snag the major and minor revisions from the result. */ qpti->fware_majrev = param[1]; qpti->fware_minrev = param[2]; qpti->fware_micrev = param[3]; /* Set the clock rate */ param[0] = MBOX_SET_CLOCK_RATE; param[1] = qpti->clock; if (qlogicpti_mbox_command(qpti, param, 1) || (param[0] != MBOX_COMMAND_COMPLETE)) { printk(KERN_EMERG "qlogicpti%d: could not set clock rate.\n", qpti->qpti_id); err = 1; goto out; } if (qpti->is_pti != 0) { /* Load scsi initiator ID and interrupt level into sbus static ram. */ param[0] = MBOX_WRITE_RAM_WORD; param[1] = 0xff80; param[2] = (unsigned short) qpti->scsi_id; qlogicpti_mbox_command(qpti, param, 1); param[0] = MBOX_WRITE_RAM_WORD; param[1] = 0xff00; param[2] = (unsigned short) 3; qlogicpti_mbox_command(qpti, param, 1); } out: spin_unlock_irqrestore(host->host_lock, flags); outfirm: release_firmware(fw); return err; } static int qlogicpti_verify_tmon(struct qlogicpti *qpti) { int curstat = sbus_readb(qpti->sreg); curstat &= 0xf0; if (!(curstat & SREG_FUSE) && (qpti->swsreg & SREG_FUSE)) printk("qlogicpti%d: Fuse returned to normal state.\n", qpti->qpti_id); if (!(curstat & SREG_TPOWER) && (qpti->swsreg & SREG_TPOWER)) printk("qlogicpti%d: termpwr back to normal state.\n", qpti->qpti_id); if (curstat != qpti->swsreg) { int error = 0; if (curstat & SREG_FUSE) { error++; printk("qlogicpti%d: Fuse is open!\n", qpti->qpti_id); } if (curstat & SREG_TPOWER) { error++; printk("qlogicpti%d: termpwr failure\n", qpti->qpti_id); } if (qpti->differential && (curstat & SREG_DSENSE) != SREG_DSENSE) { error++; printk("qlogicpti%d: You have a single ended device on a " "differential bus! Please fix!\n", qpti->qpti_id); } qpti->swsreg = curstat; return error; } return 0; } static irqreturn_t qpti_intr(int irq, void *dev_id); static void qpti_chain_add(struct qlogicpti *qpti) { spin_lock_irq(&qptichain_lock); if (qptichain != NULL) { struct qlogicpti *qlink = qptichain; while(qlink->next) qlink = qlink->next; qlink->next = qpti; } else { qptichain = qpti; } qpti->next = NULL; spin_unlock_irq(&qptichain_lock); } static void qpti_chain_del(struct qlogicpti *qpti) { spin_lock_irq(&qptichain_lock); if (qptichain == qpti) { qptichain = qpti->next; } else { struct qlogicpti *qlink = qptichain; while(qlink->next != qpti) qlink = qlink->next; qlink->next = qpti->next; } qpti->next = NULL; spin_unlock_irq(&qptichain_lock); } static int qpti_map_regs(struct qlogicpti *qpti) { struct platform_device *op = qpti->op; qpti->qregs = of_ioremap(&op->resource[0], 0, resource_size(&op->resource[0]), "PTI Qlogic/ISP"); if (!qpti->qregs) { printk("PTI: Qlogic/ISP registers are unmappable\n"); return -ENODEV; } if (qpti->is_pti) { qpti->sreg = of_ioremap(&op->resource[0], (16 * 4096), sizeof(unsigned char), "PTI Qlogic/ISP statreg"); if (!qpti->sreg) { printk("PTI: Qlogic/ISP status register is unmappable\n"); return -ENODEV; } } return 0; } static int qpti_register_irq(struct qlogicpti *qpti) { struct platform_device *op = qpti->op; qpti->qhost->irq = qpti->irq = op->archdata.irqs[0]; /* We used to try various overly-clever things to * reduce the interrupt processing overhead on * sun4c/sun4m when multiple PTI's shared the * same IRQ. It was too complex and messy to * sanely maintain. */ if (request_irq(qpti->irq, qpti_intr, IRQF_SHARED, "QlogicPTI", qpti)) goto fail; printk("qlogicpti%d: IRQ %d ", qpti->qpti_id, qpti->irq); return 0; fail: printk("qlogicpti%d: Cannot acquire irq line\n", qpti->qpti_id); return -1; } static void qpti_get_scsi_id(struct qlogicpti *qpti) { struct platform_device *op = qpti->op; struct device_node *dp; dp = op->dev.of_node; qpti->scsi_id = of_getintprop_default(dp, "initiator-id", -1); if (qpti->scsi_id == -1) qpti->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", -1); if (qpti->scsi_id == -1) qpti->scsi_id = of_getintprop_default(dp->parent, "scsi-initiator-id", 7); qpti->qhost->this_id = qpti->scsi_id; qpti->qhost->max_sectors = 64; printk("SCSI ID %d ", qpti->scsi_id); } static void qpti_get_bursts(struct qlogicpti *qpti) { struct platform_device *op = qpti->op; u8 bursts, bmask; bursts = of_getintprop_default(op->dev.of_node, "burst-sizes", 0xff); bmask = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0xff); if (bmask != 0xff) bursts &= bmask; if (bursts == 0xff || (bursts & DMA_BURST16) == 0 || (bursts & DMA_BURST32) == 0) bursts = (DMA_BURST32 - 1); qpti->bursts = bursts; } static void qpti_get_clock(struct qlogicpti *qpti) { unsigned int cfreq; /* Check for what the clock input to this card is. * Default to 40Mhz. */ cfreq = prom_getintdefault(qpti->prom_node,"clock-frequency",40000000); qpti->clock = (cfreq + 500000)/1000000; if (qpti->clock == 0) /* bullshit */ qpti->clock = 40; } /* The request and response queues must each be aligned * on a page boundary. */ static int qpti_map_queues(struct qlogicpti *qpti) { struct platform_device *op = qpti->op; #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) qpti->res_cpu = dma_alloc_coherent(&op->dev, QSIZE(RES_QUEUE_LEN), &qpti->res_dvma, GFP_ATOMIC); if (qpti->res_cpu == NULL || qpti->res_dvma == 0) { printk("QPTI: Cannot map response queue.\n"); return -1; } qpti->req_cpu = dma_alloc_coherent(&op->dev, QSIZE(QLOGICPTI_REQ_QUEUE_LEN), &qpti->req_dvma, GFP_ATOMIC); if (qpti->req_cpu == NULL || qpti->req_dvma == 0) { dma_free_coherent(&op->dev, QSIZE(RES_QUEUE_LEN), qpti->res_cpu, qpti->res_dvma); printk("QPTI: Cannot map request queue.\n"); return -1; } memset(qpti->res_cpu, 0, QSIZE(RES_QUEUE_LEN)); memset(qpti->req_cpu, 0, QSIZE(QLOGICPTI_REQ_QUEUE_LEN)); return 0; } static const char *qlogicpti_info(struct Scsi_Host *host) { static char buf[80]; struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; sprintf(buf, "PTI Qlogic,ISP SBUS SCSI irq %d regs at %p", qpti->qhost->irq, qpti->qregs); return buf; } /* I am a certified frobtronicist. */ static inline void marker_frob(struct Command_Entry *cmd) { struct Marker_Entry *marker = (struct Marker_Entry *) cmd; memset(marker, 0, sizeof(struct Marker_Entry)); marker->hdr.entry_cnt = 1; marker->hdr.entry_type = ENTRY_MARKER; marker->modifier = SYNC_ALL; marker->rsvd = 0; } static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd, struct qlogicpti *qpti) { memset(cmd, 0, sizeof(struct Command_Entry)); cmd->hdr.entry_cnt = 1; cmd->hdr.entry_type = ENTRY_COMMAND; cmd->target_id = Cmnd->device->id; cmd->target_lun = Cmnd->device->lun; cmd->cdb_length = Cmnd->cmd_len; cmd->control_flags = 0; if (Cmnd->device->tagged_supported) { if (qpti->cmd_count[Cmnd->device->id] == 0) qpti->tag_ages[Cmnd->device->id] = jiffies; if (time_after(jiffies, qpti->tag_ages[Cmnd->device->id] + (5*HZ))) { cmd->control_flags = CFLAG_ORDERED_TAG; qpti->tag_ages[Cmnd->device->id] = jiffies; } else cmd->control_flags = CFLAG_SIMPLE_TAG; } if ((Cmnd->cmnd[0] == WRITE_6) || (Cmnd->cmnd[0] == WRITE_10) || (Cmnd->cmnd[0] == WRITE_12)) cmd->control_flags |= CFLAG_WRITE; else cmd->control_flags |= CFLAG_READ; cmd->time_out = scsi_cmd_to_rq(Cmnd)->timeout / HZ; memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len); } /* Do it to it baby. */ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd, struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr) { struct dataseg *ds; struct scatterlist *sg, *s; int i, n; if (scsi_bufflen(Cmnd)) { int sg_count; sg = scsi_sglist(Cmnd); sg_count = dma_map_sg(&qpti->op->dev, sg, scsi_sg_count(Cmnd), Cmnd->sc_data_direction); if (!sg_count) return -1; ds = cmd->dataseg; cmd->segment_cnt = sg_count; /* Fill in first four sg entries: */ n = sg_count; if (n > 4) n = 4; for_each_sg(sg, s, n, i) { ds[i].d_base = sg_dma_address(s); ds[i].d_count = sg_dma_len(s); } sg_count -= 4; sg = s; while (sg_count > 0) { struct Continuation_Entry *cont; ++cmd->hdr.entry_cnt; cont = (struct Continuation_Entry *) &qpti->req_cpu[in_ptr]; in_ptr = NEXT_REQ_PTR(in_ptr); if (in_ptr == out_ptr) return -1; cont->hdr.entry_type = ENTRY_CONTINUATION; cont->hdr.entry_cnt = 0; cont->hdr.sys_def_1 = 0; cont->hdr.flags = 0; cont->reserved = 0; ds = cont->dataseg; n = sg_count; if (n > 7) n = 7; for_each_sg(sg, s, n, i) { ds[i].d_base = sg_dma_address(s); ds[i].d_count = sg_dma_len(s); } sg_count -= n; sg = s; } } else { cmd->dataseg[0].d_base = 0; cmd->dataseg[0].d_count = 0; cmd->segment_cnt = 1; /* Shouldn't this be 0? */ } /* Committed, record Scsi_Cmd so we can find it later. */ cmd->handle = in_ptr; qpti->cmd_slots[in_ptr] = Cmnd; qpti->cmd_count[Cmnd->device->id]++; sbus_writew(in_ptr, qpti->qregs + MBOX4); qpti->req_in_ptr = in_ptr; return in_ptr; } static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int out_ptr) { /* Temporary workaround until bug is found and fixed (one bug has been found already, but fixing it makes things even worse) -jj */ int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64; host->can_queue = scsi_host_busy(host) + num_free; host->sg_tablesize = QLOGICPTI_MAX_SG(num_free); } static int qlogicpti_slave_configure(struct scsi_device *sdev) { struct qlogicpti *qpti = shost_priv(sdev->host); int tgt = sdev->id; u_short param[6]; /* tags handled in midlayer */ /* enable sync mode? */ if (sdev->sdtr) { qpti->dev_param[tgt].device_flags |= 0x10; } else { qpti->dev_param[tgt].synchronous_offset = 0; qpti->dev_param[tgt].synchronous_period = 0; } /* are we wide capable? */ if (sdev->wdtr) qpti->dev_param[tgt].device_flags |= 0x20; param[0] = MBOX_SET_TARGET_PARAMS; param[1] = (tgt << 8); param[2] = (qpti->dev_param[tgt].device_flags << 8); if (qpti->dev_param[tgt].device_flags & 0x10) { param[3] = (qpti->dev_param[tgt].synchronous_offset << 8) | qpti->dev_param[tgt].synchronous_period; } else { param[3] = 0; } qlogicpti_mbox_command(qpti, param, 0); return 0; } /* * The middle SCSI layer ensures that queuecommand never gets invoked * concurrently with itself or the interrupt handler (though the * interrupt handler may call this routine as part of * request-completion handling). * * "This code must fly." -davem */ static int qlogicpti_queuecommand_lck(struct scsi_cmnd *Cmnd) { void (*done)(struct scsi_cmnd *) = scsi_done; struct Scsi_Host *host = Cmnd->device->host; struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; struct Command_Entry *cmd; u_int out_ptr; int in_ptr; in_ptr = qpti->req_in_ptr; cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr]; out_ptr = sbus_readw(qpti->qregs + MBOX4); in_ptr = NEXT_REQ_PTR(in_ptr); if (in_ptr == out_ptr) goto toss_command; if (qpti->send_marker) { marker_frob(cmd); qpti->send_marker = 0; if (NEXT_REQ_PTR(in_ptr) == out_ptr) { sbus_writew(in_ptr, qpti->qregs + MBOX4); qpti->req_in_ptr = in_ptr; goto toss_command; } cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr]; in_ptr = NEXT_REQ_PTR(in_ptr); } cmd_frob(cmd, Cmnd, qpti); if ((in_ptr = load_cmd(Cmnd, cmd, qpti, in_ptr, out_ptr)) == -1) goto toss_command; update_can_queue(host, in_ptr, out_ptr); return 0; toss_command: printk(KERN_EMERG "qlogicpti%d: request queue overflow\n", qpti->qpti_id); /* Unfortunately, unless you use the new EH code, which * we don't, the midlayer will ignore the return value, * which is insane. We pick up the pieces like this. */ Cmnd->result = DID_BUS_BUSY; done(Cmnd); return 1; } static DEF_SCSI_QCMD(qlogicpti_queuecommand) static int qlogicpti_return_status(struct Status_Entry *sts, int id) { int host_status = DID_ERROR; switch (sts->completion_status) { case CS_COMPLETE: host_status = DID_OK; break; case CS_INCOMPLETE: if (!(sts->state_flags & SF_GOT_BUS)) host_status = DID_NO_CONNECT; else if (!(sts->state_flags & SF_GOT_TARGET)) host_status = DID_BAD_TARGET; else if (!(sts->state_flags & SF_SENT_CDB)) host_status = DID_ERROR; else if (!(sts->state_flags & SF_TRANSFERRED_DATA)) host_status = DID_ERROR; else if (!(sts->state_flags & SF_GOT_STATUS)) host_status = DID_ERROR; else if (!(sts->state_flags & SF_GOT_SENSE)) host_status = DID_ERROR; break; case CS_DMA_ERROR: case CS_TRANSPORT_ERROR: host_status = DID_ERROR; break; case CS_RESET_OCCURRED: case CS_BUS_RESET: host_status = DID_RESET; break; case CS_ABORTED: host_status = DID_ABORT; break; case CS_TIMEOUT: host_status = DID_TIME_OUT; break; case CS_DATA_OVERRUN: case CS_COMMAND_OVERRUN: case CS_STATUS_OVERRUN: case CS_BAD_MESSAGE: case CS_NO_MESSAGE_OUT: case CS_EXT_ID_FAILED: case CS_IDE_MSG_FAILED: case CS_ABORT_MSG_FAILED: case CS_NOP_MSG_FAILED: case CS_PARITY_ERROR_MSG_FAILED: case CS_DEVICE_RESET_MSG_FAILED: case CS_ID_MSG_FAILED: case CS_UNEXP_BUS_FREE: host_status = DID_ERROR; break; case CS_DATA_UNDERRUN: host_status = DID_OK; break; default: printk(KERN_EMERG "qlogicpti%d: unknown completion status 0x%04x\n", id, sts->completion_status); host_status = DID_ERROR; break; } return (sts->scsi_status & STATUS_MASK) | (host_status << 16); } static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti) { struct scsi_cmnd *Cmnd, *done_queue = NULL; struct Status_Entry *sts; u_int in_ptr, out_ptr; if (!(sbus_readw(qpti->qregs + SBUS_STAT) & SBUS_STAT_RINT)) return NULL; in_ptr = sbus_readw(qpti->qregs + MBOX5); sbus_writew(HCCTRL_CRIRQ, qpti->qregs + HCCTRL); if (sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK) { switch (sbus_readw(qpti->qregs + MBOX0)) { case ASYNC_SCSI_BUS_RESET: case EXECUTION_TIMEOUT_RESET: qpti->send_marker = 1; break; case INVALID_COMMAND: case HOST_INTERFACE_ERROR: case COMMAND_ERROR: case COMMAND_PARAM_ERROR: break; }; sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); } /* This looks like a network driver! */ out_ptr = qpti->res_out_ptr; while (out_ptr != in_ptr) { u_int cmd_slot; sts = (struct Status_Entry *) &qpti->res_cpu[out_ptr]; out_ptr = NEXT_RES_PTR(out_ptr); /* We store an index in the handle, not the pointer in * some form. This avoids problems due to the fact * that the handle provided is only 32-bits. -DaveM */ cmd_slot = sts->handle; Cmnd = qpti->cmd_slots[cmd_slot]; qpti->cmd_slots[cmd_slot] = NULL; if (sts->completion_status == CS_RESET_OCCURRED || sts->completion_status == CS_ABORTED || (sts->status_flags & STF_BUS_RESET)) qpti->send_marker = 1; if (sts->state_flags & SF_GOT_SENSE) memcpy(Cmnd->sense_buffer, sts->req_sense_data, SCSI_SENSE_BUFFERSIZE); if (sts->hdr.entry_type == ENTRY_STATUS) Cmnd->result = qlogicpti_return_status(sts, qpti->qpti_id); else Cmnd->result = DID_ERROR << 16; if (scsi_bufflen(Cmnd)) dma_unmap_sg(&qpti->op->dev, scsi_sglist(Cmnd), scsi_sg_count(Cmnd), Cmnd->sc_data_direction); qpti->cmd_count[Cmnd->device->id]--; sbus_writew(out_ptr, qpti->qregs + MBOX5); Cmnd->host_scribble = (unsigned char *) done_queue; done_queue = Cmnd; } qpti->res_out_ptr = out_ptr; return done_queue; } static irqreturn_t qpti_intr(int irq, void *dev_id) { struct qlogicpti *qpti = dev_id; unsigned long flags; struct scsi_cmnd *dq; spin_lock_irqsave(qpti->qhost->host_lock, flags); dq = qlogicpti_intr_handler(qpti); if (dq != NULL) { do { struct scsi_cmnd *next; next = (struct scsi_cmnd *) dq->host_scribble; scsi_done(dq); dq = next; } while (dq != NULL); } spin_unlock_irqrestore(qpti->qhost->host_lock, flags); return IRQ_HANDLED; } static int qlogicpti_abort(struct scsi_cmnd *Cmnd) { u_short param[6]; struct Scsi_Host *host = Cmnd->device->host; struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; int return_status = SUCCESS; u32 cmd_cookie; int i; printk(KERN_WARNING "qlogicpti%d: Aborting cmd for tgt[%d] lun[%d]\n", qpti->qpti_id, (int)Cmnd->device->id, (int)Cmnd->device->lun); qlogicpti_disable_irqs(qpti); /* Find the 32-bit cookie we gave to the firmware for * this command. */ for (i = 0; i < QLOGICPTI_REQ_QUEUE_LEN + 1; i++) if (qpti->cmd_slots[i] == Cmnd) break; cmd_cookie = i; param[0] = MBOX_ABORT; param[1] = (((u_short) Cmnd->device->id) << 8) | Cmnd->device->lun; param[2] = cmd_cookie >> 16; param[3] = cmd_cookie & 0xffff; if (qlogicpti_mbox_command(qpti, param, 0) || (param[0] != MBOX_COMMAND_COMPLETE)) { printk(KERN_EMERG "qlogicpti%d: scsi abort failure: %x\n", qpti->qpti_id, param[0]); return_status = FAILED; } qlogicpti_enable_irqs(qpti); return return_status; } static int qlogicpti_reset(struct scsi_cmnd *Cmnd) { u_short param[6]; struct Scsi_Host *host = Cmnd->device->host; struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; int return_status = SUCCESS; printk(KERN_WARNING "qlogicpti%d: Resetting SCSI bus!\n", qpti->qpti_id); qlogicpti_disable_irqs(qpti); param[0] = MBOX_BUS_RESET; param[1] = qpti->host_param.bus_reset_delay; if (qlogicpti_mbox_command(qpti, param, 0) || (param[0] != MBOX_COMMAND_COMPLETE)) { printk(KERN_EMERG "qlogicisp%d: scsi bus reset failure: %x\n", qpti->qpti_id, param[0]); return_status = FAILED; } qlogicpti_enable_irqs(qpti); return return_status; } static const struct scsi_host_template qpti_template = { .module = THIS_MODULE, .name = "qlogicpti", .info = qlogicpti_info, .queuecommand = qlogicpti_queuecommand, .slave_configure = qlogicpti_slave_configure, .eh_abort_handler = qlogicpti_abort, .eh_host_reset_handler = qlogicpti_reset, .can_queue = QLOGICPTI_REQ_QUEUE_LEN, .this_id = 7, .sg_tablesize = QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN), }; static const struct of_device_id qpti_match[]; static int qpti_sbus_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; struct Scsi_Host *host; struct qlogicpti *qpti; static int nqptis; const char *fcode; /* Sometimes Antares cards come up not completely * setup, and we get a report of a zero IRQ. */ if (op->archdata.irqs[0] == 0) return -ENODEV; host = scsi_host_alloc(&qpti_template, sizeof(struct qlogicpti)); if (!host) return -ENOMEM; qpti = shost_priv(host); host->max_id = MAX_TARGETS; qpti->qhost = host; qpti->op = op; qpti->qpti_id = nqptis; qpti->is_pti = !of_node_name_eq(op->dev.of_node, "QLGC,isp"); if (qpti_map_regs(qpti) < 0) goto fail_unlink; if (qpti_register_irq(qpti) < 0) goto fail_unmap_regs; qpti_get_scsi_id(qpti); qpti_get_bursts(qpti); qpti_get_clock(qpti); /* Clear out scsi_cmnd array. */ memset(qpti->cmd_slots, 0, sizeof(qpti->cmd_slots)); if (qpti_map_queues(qpti) < 0) goto fail_free_irq; /* Load the firmware. */ if (qlogicpti_load_firmware(qpti)) goto fail_unmap_queues; if (qpti->is_pti) { /* Check the PTI status reg. */ if (qlogicpti_verify_tmon(qpti)) goto fail_unmap_queues; } /* Reset the ISP and init res/req queues. */ if (qlogicpti_reset_hardware(host)) goto fail_unmap_queues; printk("(Firmware v%d.%d.%d)", qpti->fware_majrev, qpti->fware_minrev, qpti->fware_micrev); fcode = of_get_property(dp, "isp-fcode", NULL); if (fcode && fcode[0]) printk("(FCode %s)", fcode); qpti->differential = of_property_read_bool(dp, "differential"); printk("\nqlogicpti%d: [%s Wide, using %s interface]\n", qpti->qpti_id, (qpti->ultra ? "Ultra" : "Fast"), (qpti->differential ? "differential" : "single ended")); if (scsi_add_host(host, &op->dev)) { printk("qlogicpti%d: Failed scsi_add_host\n", qpti->qpti_id); goto fail_unmap_queues; } dev_set_drvdata(&op->dev, qpti); qpti_chain_add(qpti); scsi_scan_host(host); nqptis++; return 0; fail_unmap_queues: #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) dma_free_coherent(&op->dev, QSIZE(RES_QUEUE_LEN), qpti->res_cpu, qpti->res_dvma); dma_free_coherent(&op->dev, QSIZE(QLOGICPTI_REQ_QUEUE_LEN), qpti->req_cpu, qpti->req_dvma); #undef QSIZE fail_free_irq: free_irq(qpti->irq, qpti); fail_unmap_regs: of_iounmap(&op->resource[0], qpti->qregs, resource_size(&op->resource[0])); if (qpti->is_pti) of_iounmap(&op->resource[0], qpti->sreg, sizeof(unsigned char)); fail_unlink: scsi_host_put(host); return -ENODEV; } static int qpti_sbus_remove(struct platform_device *op) { struct qlogicpti *qpti = dev_get_drvdata(&op->dev); qpti_chain_del(qpti); scsi_remove_host(qpti->qhost); /* Shut up the card. */ sbus_writew(0, qpti->qregs + SBUS_CTRL); /* Free IRQ handler and unmap Qlogic,ISP and PTI status regs. */ free_irq(qpti->irq, qpti); #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) dma_free_coherent(&op->dev, QSIZE(RES_QUEUE_LEN), qpti->res_cpu, qpti->res_dvma); dma_free_coherent(&op->dev, QSIZE(QLOGICPTI_REQ_QUEUE_LEN), qpti->req_cpu, qpti->req_dvma); #undef QSIZE of_iounmap(&op->resource[0], qpti->qregs, resource_size(&op->resource[0])); if (qpti->is_pti) of_iounmap(&op->resource[0], qpti->sreg, sizeof(unsigned char)); scsi_host_put(qpti->qhost); return 0; } static const struct of_device_id qpti_match[] = { { .name = "ptisp", }, { .name = "PTI,ptisp", }, { .name = "QLGC,isp", }, { .name = "SUNW,isp", }, {}, }; MODULE_DEVICE_TABLE(of, qpti_match); static struct platform_driver qpti_sbus_driver = { .driver = { .name = "qpti", .of_match_table = qpti_match, }, .probe = qpti_sbus_probe, .remove = qpti_sbus_remove, }; module_platform_driver(qpti_sbus_driver); MODULE_DESCRIPTION("QlogicISP SBUS driver"); MODULE_AUTHOR("David S. Miller ([email protected])"); MODULE_LICENSE("GPL"); MODULE_VERSION("2.1"); MODULE_FIRMWARE("qlogic/isp1000.bin");
linux-master
drivers/scsi/qlogicpti.c
// SPDX-License-Identifier: GPL-2.0-only /* * raid_class.c - implementation of a simple raid visualisation class * * Copyright (c) 2005 - James Bottomley <[email protected]> * * This class is designed to allow raid attributes to be visualised and * manipulated in a form independent of the underlying raid. Ultimately this * should work for both hardware and software raids. */ #include <linux/init.h> #include <linux/module.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/raid_class.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #define RAID_NUM_ATTRS 3 struct raid_internal { struct raid_template r; struct raid_function_template *f; /* The actual attributes */ struct device_attribute private_attrs[RAID_NUM_ATTRS]; /* The array of null terminated pointers to attributes * needed by scsi_sysfs.c */ struct device_attribute *attrs[RAID_NUM_ATTRS + 1]; }; struct raid_component { struct list_head node; struct device dev; int num; }; #define to_raid_internal(tmpl) container_of(tmpl, struct raid_internal, r) #define tc_to_raid_internal(tcont) ({ \ struct raid_template *r = \ container_of(tcont, struct raid_template, raid_attrs); \ to_raid_internal(r); \ }) #define ac_to_raid_internal(acont) ({ \ struct transport_container *tc = \ container_of(acont, struct transport_container, ac); \ tc_to_raid_internal(tc); \ }) #define device_to_raid_internal(dev) ({ \ struct attribute_container *ac = \ attribute_container_classdev_to_container(dev); \ ac_to_raid_internal(ac); \ }) static int raid_match(struct attribute_container *cont, struct device *dev) { /* We have to look for every subsystem that could house * emulated RAID devices, so start with SCSI */ struct raid_internal *i = ac_to_raid_internal(cont); if (IS_ENABLED(CONFIG_SCSI) && scsi_is_sdev_device(dev)) { struct scsi_device *sdev = to_scsi_device(dev); if (i->f->cookie != sdev->host->hostt) return 0; return i->f->is_raid(dev); } /* FIXME: look at other subsystems too */ return 0; } static int raid_setup(struct transport_container *tc, struct device *dev, struct device *cdev) { struct raid_data *rd; BUG_ON(dev_get_drvdata(cdev)); rd = kzalloc(sizeof(*rd), GFP_KERNEL); if (!rd) return -ENOMEM; INIT_LIST_HEAD(&rd->component_list); dev_set_drvdata(cdev, rd); return 0; } static int raid_remove(struct transport_container *tc, struct device *dev, struct device *cdev) { struct raid_data *rd = dev_get_drvdata(cdev); struct raid_component *rc, *next; dev_printk(KERN_ERR, dev, "RAID REMOVE\n"); dev_set_drvdata(cdev, NULL); list_for_each_entry_safe(rc, next, &rd->component_list, node) { list_del(&rc->node); dev_printk(KERN_ERR, rc->dev.parent, "RAID COMPONENT REMOVE\n"); device_unregister(&rc->dev); } dev_printk(KERN_ERR, dev, "RAID REMOVE DONE\n"); kfree(rd); return 0; } static DECLARE_TRANSPORT_CLASS(raid_class, "raid_devices", raid_setup, raid_remove, NULL); static const struct { enum raid_state value; char *name; } raid_states[] = { { RAID_STATE_UNKNOWN, "unknown" }, { RAID_STATE_ACTIVE, "active" }, { RAID_STATE_DEGRADED, "degraded" }, { RAID_STATE_RESYNCING, "resyncing" }, { RAID_STATE_OFFLINE, "offline" }, }; static const char *raid_state_name(enum raid_state state) { int i; char *name = NULL; for (i = 0; i < ARRAY_SIZE(raid_states); i++) { if (raid_states[i].value == state) { name = raid_states[i].name; break; } } return name; } static struct { enum raid_level value; char *name; } raid_levels[] = { { RAID_LEVEL_UNKNOWN, "unknown" }, { RAID_LEVEL_LINEAR, "linear" }, { RAID_LEVEL_0, "raid0" }, { RAID_LEVEL_1, "raid1" }, { RAID_LEVEL_10, "raid10" }, { RAID_LEVEL_1E, "raid1e" }, { RAID_LEVEL_3, "raid3" }, { RAID_LEVEL_4, "raid4" }, { RAID_LEVEL_5, "raid5" }, { RAID_LEVEL_50, "raid50" }, { RAID_LEVEL_6, "raid6" }, { RAID_LEVEL_JBOD, "jbod" }, }; static const char *raid_level_name(enum raid_level level) { int i; char *name = NULL; for (i = 0; i < ARRAY_SIZE(raid_levels); i++) { if (raid_levels[i].value == level) { name = raid_levels[i].name; break; } } return name; } #define raid_attr_show_internal(attr, fmt, var, code) \ static ssize_t raid_show_##attr(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct raid_data *rd = dev_get_drvdata(dev); \ code \ return snprintf(buf, 20, #fmt "\n", var); \ } #define raid_attr_ro_states(attr, states, code) \ raid_attr_show_internal(attr, %s, name, \ const char *name; \ code \ name = raid_##states##_name(rd->attr); \ ) \ static DEVICE_ATTR(attr, S_IRUGO, raid_show_##attr, NULL) #define raid_attr_ro_internal(attr, code) \ raid_attr_show_internal(attr, %d, rd->attr, code) \ static DEVICE_ATTR(attr, S_IRUGO, raid_show_##attr, NULL) #define ATTR_CODE(attr) \ struct raid_internal *i = device_to_raid_internal(dev); \ if (i->f->get_##attr) \ i->f->get_##attr(dev->parent); #define raid_attr_ro(attr) raid_attr_ro_internal(attr, ) #define raid_attr_ro_fn(attr) raid_attr_ro_internal(attr, ATTR_CODE(attr)) #define raid_attr_ro_state(attr) raid_attr_ro_states(attr, attr, ) #define raid_attr_ro_state_fn(attr) raid_attr_ro_states(attr, attr, ATTR_CODE(attr)) raid_attr_ro_state(level); raid_attr_ro_fn(resync); raid_attr_ro_state_fn(state); struct raid_template * raid_class_attach(struct raid_function_template *ft) { struct raid_internal *i = kzalloc(sizeof(struct raid_internal), GFP_KERNEL); int count = 0; if (unlikely(!i)) return NULL; i->f = ft; i->r.raid_attrs.ac.class = &raid_class.class; i->r.raid_attrs.ac.match = raid_match; i->r.raid_attrs.ac.attrs = &i->attrs[0]; attribute_container_register(&i->r.raid_attrs.ac); i->attrs[count++] = &dev_attr_level; i->attrs[count++] = &dev_attr_resync; i->attrs[count++] = &dev_attr_state; i->attrs[count] = NULL; BUG_ON(count > RAID_NUM_ATTRS); return &i->r; } EXPORT_SYMBOL(raid_class_attach); void raid_class_release(struct raid_template *r) { struct raid_internal *i = to_raid_internal(r); BUG_ON(attribute_container_unregister(&i->r.raid_attrs.ac)); kfree(i); } EXPORT_SYMBOL(raid_class_release); static __init int raid_init(void) { return transport_class_register(&raid_class); } static __exit void raid_exit(void) { transport_class_unregister(&raid_class); } MODULE_AUTHOR("James Bottomley"); MODULE_DESCRIPTION("RAID device class"); MODULE_LICENSE("GPL"); module_init(raid_init); module_exit(raid_exit);
linux-master
drivers/scsi/raid_class.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters * * Copyright (c) 1995-2000 Advanced System Products, Inc. * Copyright (c) 2000-2001 ConnectCom Solutions, Inc. * Copyright (c) 2007 Matthew Wilcox <[email protected]> * Copyright (c) 2014 Hannes Reinecke <[email protected]> * All Rights Reserved. */ /* * As of March 8, 2000 Advanced System Products, Inc. (AdvanSys) * changed its name to ConnectCom Solutions, Inc. * On June 18, 2001 Initio Corp. acquired ConnectCom's SCSI assets */ #include <linux/module.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/isa.h> #include <linux/eisa.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/dma-mapping.h> #include <linux/firmware.h> #include <linux/dmapool.h> #include <asm/io.h> #include <asm/dma.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #define DRV_NAME "advansys" #define ASC_VERSION "3.5" /* AdvanSys Driver Version */ /* FIXME: * * 1. Use scsi_transport_spi * 2. advansys_info is not safe against multiple simultaneous callers * 3. Add module_param to override ISA/VLB ioport array */ /* Enable driver /proc statistics. */ #define ADVANSYS_STATS /* Enable driver tracing. */ #undef ADVANSYS_DEBUG typedef unsigned char uchar; #define isodd_word(val) ((((uint)val) & (uint)0x0001) != 0) #define PCI_VENDOR_ID_ASP 0x10cd #define PCI_DEVICE_ID_ASP_1200A 0x1100 #define PCI_DEVICE_ID_ASP_ABP940 0x1200 #define PCI_DEVICE_ID_ASP_ABP940U 0x1300 #define PCI_DEVICE_ID_ASP_ABP940UW 0x2300 #define PCI_DEVICE_ID_38C0800_REV1 0x2500 #define PCI_DEVICE_ID_38C1600_REV1 0x2700 #define PortAddr unsigned int /* port address size */ #define inp(port) inb(port) #define outp(port, byte) outb((byte), (port)) #define inpw(port) inw(port) #define outpw(port, word) outw((word), (port)) #define ASC_MAX_SG_QUEUE 7 #define ASC_MAX_SG_LIST 255 #define ASC_CS_TYPE unsigned short #define ASC_IS_EISA (0x0002) #define ASC_IS_PCI (0x0004) #define ASC_IS_PCI_ULTRA (0x0104) #define ASC_IS_PCMCIA (0x0008) #define ASC_IS_MCA (0x0020) #define ASC_IS_VL (0x0040) #define ASC_IS_WIDESCSI_16 (0x0100) #define ASC_IS_WIDESCSI_32 (0x0200) #define ASC_IS_BIG_ENDIAN (0x8000) #define ASC_CHIP_MIN_VER_VL (0x01) #define ASC_CHIP_MAX_VER_VL (0x07) #define ASC_CHIP_MIN_VER_PCI (0x09) #define ASC_CHIP_MAX_VER_PCI (0x0F) #define ASC_CHIP_VER_PCI_BIT (0x08) #define ASC_CHIP_VER_ASYN_BUG (0x21) #define ASC_CHIP_VER_PCI 0x08 #define ASC_CHIP_VER_PCI_ULTRA_3150 (ASC_CHIP_VER_PCI | 0x02) #define ASC_CHIP_VER_PCI_ULTRA_3050 (ASC_CHIP_VER_PCI | 0x03) #define ASC_CHIP_MIN_VER_EISA (0x41) #define ASC_CHIP_MAX_VER_EISA (0x47) #define ASC_CHIP_VER_EISA_BIT (0x40) #define ASC_CHIP_LATEST_VER_EISA ((ASC_CHIP_MIN_VER_EISA - 1) + 3) #define ASC_MAX_VL_DMA_COUNT (0x07FFFFFFL) #define ASC_MAX_PCI_DMA_COUNT (0xFFFFFFFFL) #define ASC_SCSI_ID_BITS 3 #define ASC_SCSI_TIX_TYPE uchar #define ASC_ALL_DEVICE_BIT_SET 0xFF #define ASC_SCSI_BIT_ID_TYPE uchar #define ASC_MAX_TID 7 #define ASC_MAX_LUN 7 #define ASC_SCSI_WIDTH_BIT_SET 0xFF #define ASC_MAX_SENSE_LEN 32 #define ASC_MIN_SENSE_LEN 14 #define ASC_SCSI_RESET_HOLD_TIME_US 60 /* * Narrow boards only support 12-byte commands, while wide boards * extend to 16-byte commands. */ #define ASC_MAX_CDB_LEN 12 #define ADV_MAX_CDB_LEN 16 #define MS_SDTR_LEN 0x03 #define MS_WDTR_LEN 0x02 #define ASC_SG_LIST_PER_Q 7 #define QS_FREE 0x00 #define QS_READY 0x01 #define QS_DISC1 0x02 #define QS_DISC2 0x04 #define QS_BUSY 0x08 #define QS_ABORTED 0x40 #define QS_DONE 0x80 #define QC_NO_CALLBACK 0x01 #define QC_SG_SWAP_QUEUE 0x02 #define QC_SG_HEAD 0x04 #define QC_DATA_IN 0x08 #define QC_DATA_OUT 0x10 #define QC_URGENT 0x20 #define QC_MSG_OUT 0x40 #define QC_REQ_SENSE 0x80 #define QCSG_SG_XFER_LIST 0x02 #define QCSG_SG_XFER_MORE 0x04 #define QCSG_SG_XFER_END 0x08 #define QD_IN_PROGRESS 0x00 #define QD_NO_ERROR 0x01 #define QD_ABORTED_BY_HOST 0x02 #define QD_WITH_ERROR 0x04 #define QD_INVALID_REQUEST 0x80 #define QD_INVALID_HOST_NUM 0x81 #define QD_INVALID_DEVICE 0x82 #define QD_ERR_INTERNAL 0xFF #define QHSTA_NO_ERROR 0x00 #define QHSTA_M_SEL_TIMEOUT 0x11 #define QHSTA_M_DATA_OVER_RUN 0x12 #define QHSTA_M_DATA_UNDER_RUN 0x12 #define QHSTA_M_UNEXPECTED_BUS_FREE 0x13 #define QHSTA_M_BAD_BUS_PHASE_SEQ 0x14 #define QHSTA_D_QDONE_SG_LIST_CORRUPTED 0x21 #define QHSTA_D_ASC_DVC_ERROR_CODE_SET 0x22 #define QHSTA_D_HOST_ABORT_FAILED 0x23 #define QHSTA_D_EXE_SCSI_Q_FAILED 0x24 #define QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT 0x25 #define QHSTA_D_ASPI_NO_BUF_POOL 0x26 #define QHSTA_M_WTM_TIMEOUT 0x41 #define QHSTA_M_BAD_CMPL_STATUS_IN 0x42 #define QHSTA_M_NO_AUTO_REQ_SENSE 0x43 #define QHSTA_M_AUTO_REQ_SENSE_FAIL 0x44 #define QHSTA_M_TARGET_STATUS_BUSY 0x45 #define QHSTA_M_BAD_TAG_CODE 0x46 #define QHSTA_M_BAD_QUEUE_FULL_OR_BUSY 0x47 #define QHSTA_M_HUNG_REQ_SCSI_BUS_RESET 0x48 #define QHSTA_D_LRAM_CMP_ERROR 0x81 #define QHSTA_M_MICRO_CODE_ERROR_HALT 0xA1 #define ASC_FLAG_SCSIQ_REQ 0x01 #define ASC_FLAG_BIOS_SCSIQ_REQ 0x02 #define ASC_FLAG_BIOS_ASYNC_IO 0x04 #define ASC_FLAG_SRB_LINEAR_ADDR 0x08 #define ASC_FLAG_WIN16 0x10 #define ASC_FLAG_WIN32 0x20 #define ASC_FLAG_DOS_VM_CALLBACK 0x80 #define ASC_TAG_FLAG_EXTRA_BYTES 0x10 #define ASC_TAG_FLAG_DISABLE_DISCONNECT 0x04 #define ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX 0x08 #define ASC_TAG_FLAG_DISABLE_CHK_COND_INT_HOST 0x40 #define ASC_SCSIQ_CPY_BEG 4 #define ASC_SCSIQ_SGHD_CPY_BEG 2 #define ASC_SCSIQ_B_FWD 0 #define ASC_SCSIQ_B_BWD 1 #define ASC_SCSIQ_B_STATUS 2 #define ASC_SCSIQ_B_QNO 3 #define ASC_SCSIQ_B_CNTL 4 #define ASC_SCSIQ_B_SG_QUEUE_CNT 5 #define ASC_SCSIQ_D_DATA_ADDR 8 #define ASC_SCSIQ_D_DATA_CNT 12 #define ASC_SCSIQ_B_SENSE_LEN 20 #define ASC_SCSIQ_DONE_INFO_BEG 22 #define ASC_SCSIQ_D_SRBPTR 22 #define ASC_SCSIQ_B_TARGET_IX 26 #define ASC_SCSIQ_B_CDB_LEN 28 #define ASC_SCSIQ_B_TAG_CODE 29 #define ASC_SCSIQ_W_VM_ID 30 #define ASC_SCSIQ_DONE_STATUS 32 #define ASC_SCSIQ_HOST_STATUS 33 #define ASC_SCSIQ_SCSI_STATUS 34 #define ASC_SCSIQ_CDB_BEG 36 #define ASC_SCSIQ_DW_REMAIN_XFER_ADDR 56 #define ASC_SCSIQ_DW_REMAIN_XFER_CNT 60 #define ASC_SCSIQ_B_FIRST_SG_WK_QP 48 #define ASC_SCSIQ_B_SG_WK_QP 49 #define ASC_SCSIQ_B_SG_WK_IX 50 #define ASC_SCSIQ_W_ALT_DC1 52 #define ASC_SCSIQ_B_LIST_CNT 6 #define ASC_SCSIQ_B_CUR_LIST_CNT 7 #define ASC_SGQ_B_SG_CNTL 4 #define ASC_SGQ_B_SG_HEAD_QP 5 #define ASC_SGQ_B_SG_LIST_CNT 6 #define ASC_SGQ_B_SG_CUR_LIST_CNT 7 #define ASC_SGQ_LIST_BEG 8 #define ASC_DEF_SCSI1_QNG 4 #define ASC_MAX_SCSI1_QNG 4 #define ASC_DEF_SCSI2_QNG 16 #define ASC_MAX_SCSI2_QNG 32 #define ASC_TAG_CODE_MASK 0x23 #define ASC_STOP_REQ_RISC_STOP 0x01 #define ASC_STOP_ACK_RISC_STOP 0x03 #define ASC_STOP_CLEAN_UP_BUSY_Q 0x10 #define ASC_STOP_CLEAN_UP_DISC_Q 0x20 #define ASC_STOP_HOST_REQ_RISC_HALT 0x40 #define ASC_TIDLUN_TO_IX(tid, lun) (ASC_SCSI_TIX_TYPE)((tid) + ((lun)<<ASC_SCSI_ID_BITS)) #define ASC_TID_TO_TARGET_ID(tid) (ASC_SCSI_BIT_ID_TYPE)(0x01 << (tid)) #define ASC_TIX_TO_TARGET_ID(tix) (0x01 << ((tix) & ASC_MAX_TID)) #define ASC_TIX_TO_TID(tix) ((tix) & ASC_MAX_TID) #define ASC_TID_TO_TIX(tid) ((tid) & ASC_MAX_TID) #define ASC_TIX_TO_LUN(tix) (((tix) >> ASC_SCSI_ID_BITS) & ASC_MAX_LUN) #define ASC_QNO_TO_QADDR(q_no) ((ASC_QADR_BEG)+((int)(q_no) << 6)) typedef struct asc_scsiq_1 { uchar status; uchar q_no; uchar cntl; uchar sg_queue_cnt; uchar target_id; uchar target_lun; __le32 data_addr; __le32 data_cnt; __le32 sense_addr; uchar sense_len; uchar extra_bytes; } ASC_SCSIQ_1; typedef struct asc_scsiq_2 { u32 srb_tag; uchar target_ix; uchar flag; uchar cdb_len; uchar tag_code; ushort vm_id; } ASC_SCSIQ_2; typedef struct asc_scsiq_3 { uchar done_stat; uchar host_stat; uchar scsi_stat; uchar scsi_msg; } ASC_SCSIQ_3; typedef struct asc_scsiq_4 { uchar cdb[ASC_MAX_CDB_LEN]; uchar y_first_sg_list_qp; uchar y_working_sg_qp; uchar y_working_sg_ix; uchar y_res; ushort x_req_count; ushort x_reconnect_rtn; __le32 x_saved_data_addr; __le32 x_saved_data_cnt; } ASC_SCSIQ_4; typedef struct asc_q_done_info { ASC_SCSIQ_2 d2; ASC_SCSIQ_3 d3; uchar q_status; uchar q_no; uchar cntl; uchar sense_len; uchar extra_bytes; uchar res; u32 remain_bytes; } ASC_QDONE_INFO; typedef struct asc_sg_list { __le32 addr; __le32 bytes; } ASC_SG_LIST; typedef struct asc_sg_head { ushort entry_cnt; ushort queue_cnt; ushort entry_to_copy; ushort res; ASC_SG_LIST sg_list[]; } ASC_SG_HEAD; typedef struct asc_scsi_q { ASC_SCSIQ_1 q1; ASC_SCSIQ_2 q2; uchar *cdbptr; ASC_SG_HEAD *sg_head; ushort remain_sg_entry_cnt; ushort next_sg_index; } ASC_SCSI_Q; typedef struct asc_scsi_bios_req_q { ASC_SCSIQ_1 r1; ASC_SCSIQ_2 r2; uchar *cdbptr; ASC_SG_HEAD *sg_head; uchar *sense_ptr; ASC_SCSIQ_3 r3; uchar cdb[ASC_MAX_CDB_LEN]; uchar sense[ASC_MIN_SENSE_LEN]; } ASC_SCSI_BIOS_REQ_Q; typedef struct asc_risc_q { uchar fwd; uchar bwd; ASC_SCSIQ_1 i1; ASC_SCSIQ_2 i2; ASC_SCSIQ_3 i3; ASC_SCSIQ_4 i4; } ASC_RISC_Q; typedef struct asc_sg_list_q { uchar seq_no; uchar q_no; uchar cntl; uchar sg_head_qp; uchar sg_list_cnt; uchar sg_cur_list_cnt; } ASC_SG_LIST_Q; typedef struct asc_risc_sg_list_q { uchar fwd; uchar bwd; ASC_SG_LIST_Q sg; ASC_SG_LIST sg_list[7]; } ASC_RISC_SG_LIST_Q; #define ASCQ_ERR_Q_STATUS 0x0D #define ASCQ_ERR_CUR_QNG 0x17 #define ASCQ_ERR_SG_Q_LINKS 0x18 #define ASCQ_ERR_ISR_RE_ENTRY 0x1A #define ASCQ_ERR_CRITICAL_RE_ENTRY 0x1B #define ASCQ_ERR_ISR_ON_CRITICAL 0x1C /* * Warning code values are set in ASC_DVC_VAR 'warn_code'. */ #define ASC_WARN_NO_ERROR 0x0000 #define ASC_WARN_IO_PORT_ROTATE 0x0001 #define ASC_WARN_EEPROM_CHKSUM 0x0002 #define ASC_WARN_IRQ_MODIFIED 0x0004 #define ASC_WARN_AUTO_CONFIG 0x0008 #define ASC_WARN_CMD_QNG_CONFLICT 0x0010 #define ASC_WARN_EEPROM_RECOVER 0x0020 #define ASC_WARN_CFG_MSW_RECOVER 0x0040 /* * Error code values are set in {ASC/ADV}_DVC_VAR 'err_code'. */ #define ASC_IERR_NO_CARRIER 0x0001 /* No more carrier memory */ #define ASC_IERR_MCODE_CHKSUM 0x0002 /* micro code check sum error */ #define ASC_IERR_SET_PC_ADDR 0x0004 #define ASC_IERR_START_STOP_CHIP 0x0008 /* start/stop chip failed */ #define ASC_IERR_ILLEGAL_CONNECTION 0x0010 /* Illegal cable connection */ #define ASC_IERR_SINGLE_END_DEVICE 0x0020 /* SE device on DIFF bus */ #define ASC_IERR_REVERSED_CABLE 0x0040 /* Narrow flat cable reversed */ #define ASC_IERR_SET_SCSI_ID 0x0080 /* set SCSI ID failed */ #define ASC_IERR_HVD_DEVICE 0x0100 /* HVD device on LVD port */ #define ASC_IERR_BAD_SIGNATURE 0x0200 /* signature not found */ #define ASC_IERR_NO_BUS_TYPE 0x0400 #define ASC_IERR_BIST_PRE_TEST 0x0800 /* BIST pre-test error */ #define ASC_IERR_BIST_RAM_TEST 0x1000 /* BIST RAM test error */ #define ASC_IERR_BAD_CHIPTYPE 0x2000 /* Invalid chip_type setting */ #define ASC_DEF_MAX_TOTAL_QNG (0xF0) #define ASC_MIN_TAG_Q_PER_DVC (0x04) #define ASC_MIN_FREE_Q (0x02) #define ASC_MIN_TOTAL_QNG ((ASC_MAX_SG_QUEUE)+(ASC_MIN_FREE_Q)) #define ASC_MAX_TOTAL_QNG 240 #define ASC_MAX_PCI_ULTRA_INRAM_TOTAL_QNG 16 #define ASC_MAX_PCI_ULTRA_INRAM_TAG_QNG 8 #define ASC_MAX_PCI_INRAM_TOTAL_QNG 20 #define ASC_MAX_INRAM_TAG_QNG 16 #define ASC_IOADR_GAP 0x10 #define ASC_SYN_MAX_OFFSET 0x0F #define ASC_DEF_SDTR_OFFSET 0x0F #define ASC_SDTR_ULTRA_PCI_10MB_INDEX 0x02 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41 /* The narrow chip only supports a limited selection of transfer rates. * These are encoded in the range 0..7 or 0..15 depending whether the chip * is Ultra-capable or not. These tables let us convert from one to the other. */ static const unsigned char asc_syn_xfer_period[8] = { 25, 30, 35, 40, 50, 60, 70, 85 }; static const unsigned char asc_syn_ultra_xfer_period[16] = { 12, 19, 25, 32, 38, 44, 50, 57, 63, 69, 75, 82, 88, 94, 100, 107 }; typedef struct ext_msg { uchar msg_type; uchar msg_len; uchar msg_req; union { struct { uchar sdtr_xfer_period; uchar sdtr_req_ack_offset; } sdtr; struct { uchar wdtr_width; } wdtr; struct { uchar mdp_b3; uchar mdp_b2; uchar mdp_b1; uchar mdp_b0; } mdp; } u_ext_msg; uchar res; } EXT_MSG; #define xfer_period u_ext_msg.sdtr.sdtr_xfer_period #define req_ack_offset u_ext_msg.sdtr.sdtr_req_ack_offset #define wdtr_width u_ext_msg.wdtr.wdtr_width #define mdp_b3 u_ext_msg.mdp_b3 #define mdp_b2 u_ext_msg.mdp_b2 #define mdp_b1 u_ext_msg.mdp_b1 #define mdp_b0 u_ext_msg.mdp_b0 typedef struct asc_dvc_cfg { ASC_SCSI_BIT_ID_TYPE can_tagged_qng; ASC_SCSI_BIT_ID_TYPE cmd_qng_enabled; ASC_SCSI_BIT_ID_TYPE disc_enable; ASC_SCSI_BIT_ID_TYPE sdtr_enable; uchar chip_scsi_id; uchar chip_version; ushort mcode_date; ushort mcode_version; uchar max_tag_qng[ASC_MAX_TID + 1]; uchar sdtr_period_offset[ASC_MAX_TID + 1]; uchar adapter_info[6]; } ASC_DVC_CFG; #define ASC_DEF_DVC_CNTL 0xFFFF #define ASC_DEF_CHIP_SCSI_ID 7 #define ASC_DEF_ISA_DMA_SPEED 4 #define ASC_INIT_STATE_BEG_GET_CFG 0x0001 #define ASC_INIT_STATE_END_GET_CFG 0x0002 #define ASC_INIT_STATE_BEG_SET_CFG 0x0004 #define ASC_INIT_STATE_END_SET_CFG 0x0008 #define ASC_INIT_STATE_BEG_LOAD_MC 0x0010 #define ASC_INIT_STATE_END_LOAD_MC 0x0020 #define ASC_INIT_STATE_BEG_INQUIRY 0x0040 #define ASC_INIT_STATE_END_INQUIRY 0x0080 #define ASC_INIT_RESET_SCSI_DONE 0x0100 #define ASC_INIT_STATE_WITHOUT_EEP 0x8000 #define ASC_BUG_FIX_IF_NOT_DWB 0x0001 #define ASC_BUG_FIX_ASYN_USE_SYN 0x0002 #define ASC_MIN_TAGGED_CMD 7 #define ASC_MAX_SCSI_RESET_WAIT 30 #define ASC_OVERRUN_BSIZE 64 struct asc_dvc_var; /* Forward Declaration. */ typedef struct asc_dvc_var { PortAddr iop_base; ushort err_code; ushort dvc_cntl; ushort bug_fix_cntl; ushort bus_type; ASC_SCSI_BIT_ID_TYPE init_sdtr; ASC_SCSI_BIT_ID_TYPE sdtr_done; ASC_SCSI_BIT_ID_TYPE use_tagged_qng; ASC_SCSI_BIT_ID_TYPE unit_not_ready; ASC_SCSI_BIT_ID_TYPE queue_full_or_busy; ASC_SCSI_BIT_ID_TYPE start_motor; uchar *overrun_buf; dma_addr_t overrun_dma; uchar scsi_reset_wait; uchar chip_no; bool is_in_int; uchar max_total_qng; uchar cur_total_qng; uchar in_critical_cnt; uchar last_q_shortage; ushort init_state; uchar cur_dvc_qng[ASC_MAX_TID + 1]; uchar max_dvc_qng[ASC_MAX_TID + 1]; ASC_SCSI_Q *scsiq_busy_head[ASC_MAX_TID + 1]; ASC_SCSI_Q *scsiq_busy_tail[ASC_MAX_TID + 1]; const uchar *sdtr_period_tbl; ASC_DVC_CFG *cfg; ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer_always; char redo_scam; ushort res2; uchar dos_int13_table[ASC_MAX_TID + 1]; unsigned int max_dma_count; ASC_SCSI_BIT_ID_TYPE no_scam; ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer; uchar min_sdtr_index; uchar max_sdtr_index; struct asc_board *drv_ptr; unsigned int uc_break; } ASC_DVC_VAR; typedef struct asc_dvc_inq_info { uchar type[ASC_MAX_TID + 1][ASC_MAX_LUN + 1]; } ASC_DVC_INQ_INFO; typedef struct asc_cap_info { u32 lba; u32 blk_size; } ASC_CAP_INFO; typedef struct asc_cap_info_array { ASC_CAP_INFO cap_info[ASC_MAX_TID + 1][ASC_MAX_LUN + 1]; } ASC_CAP_INFO_ARRAY; #define ASC_MCNTL_NO_SEL_TIMEOUT (ushort)0x0001 #define ASC_MCNTL_NULL_TARGET (ushort)0x0002 #define ASC_CNTL_INITIATOR (ushort)0x0001 #define ASC_CNTL_BIOS_GT_1GB (ushort)0x0002 #define ASC_CNTL_BIOS_GT_2_DISK (ushort)0x0004 #define ASC_CNTL_BIOS_REMOVABLE (ushort)0x0008 #define ASC_CNTL_NO_SCAM (ushort)0x0010 #define ASC_CNTL_INT_MULTI_Q (ushort)0x0080 #define ASC_CNTL_NO_LUN_SUPPORT (ushort)0x0040 #define ASC_CNTL_NO_VERIFY_COPY (ushort)0x0100 #define ASC_CNTL_RESET_SCSI (ushort)0x0200 #define ASC_CNTL_INIT_INQUIRY (ushort)0x0400 #define ASC_CNTL_INIT_VERBOSE (ushort)0x0800 #define ASC_CNTL_SCSI_PARITY (ushort)0x1000 #define ASC_CNTL_BURST_MODE (ushort)0x2000 #define ASC_CNTL_SDTR_ENABLE_ULTRA (ushort)0x4000 #define ASC_EEP_DVC_CFG_BEG_VL 2 #define ASC_EEP_MAX_DVC_ADDR_VL 15 #define ASC_EEP_DVC_CFG_BEG 32 #define ASC_EEP_MAX_DVC_ADDR 45 #define ASC_EEP_MAX_RETRY 20 /* * These macros keep the chip SCSI id bitfields in board order. C bitfields * aren't portable between big and little-endian platforms so they are not used. */ #define ASC_EEP_GET_CHIP_ID(cfg) ((cfg)->id_speed & 0x0f) #define ASC_EEP_GET_DMA_SPD(cfg) (((cfg)->id_speed & 0xf0) >> 4) #define ASC_EEP_SET_CHIP_ID(cfg, sid) \ ((cfg)->id_speed = ((cfg)->id_speed & 0xf0) | ((sid) & ASC_MAX_TID)) #define ASC_EEP_SET_DMA_SPD(cfg, spd) \ ((cfg)->id_speed = ((cfg)->id_speed & 0x0f) | ((spd) & 0x0f) << 4) typedef struct asceep_config { ushort cfg_lsw; ushort cfg_msw; uchar init_sdtr; uchar disc_enable; uchar use_cmd_qng; uchar start_motor; uchar max_total_qng; uchar max_tag_qng; uchar bios_scan; uchar power_up_wait; uchar no_scam; uchar id_speed; /* low order 4 bits is chip scsi id */ /* high order 4 bits is isa dma speed */ uchar dos_int13_table[ASC_MAX_TID + 1]; uchar adapter_info[6]; ushort cntl; ushort chksum; } ASCEEP_CONFIG; #define ASC_EEP_CMD_READ 0x80 #define ASC_EEP_CMD_WRITE 0x40 #define ASC_EEP_CMD_WRITE_ABLE 0x30 #define ASC_EEP_CMD_WRITE_DISABLE 0x00 #define ASCV_MSGOUT_BEG 0x0000 #define ASCV_MSGOUT_SDTR_PERIOD (ASCV_MSGOUT_BEG+3) #define ASCV_MSGOUT_SDTR_OFFSET (ASCV_MSGOUT_BEG+4) #define ASCV_BREAK_SAVED_CODE (ushort)0x0006 #define ASCV_MSGIN_BEG (ASCV_MSGOUT_BEG+8) #define ASCV_MSGIN_SDTR_PERIOD (ASCV_MSGIN_BEG+3) #define ASCV_MSGIN_SDTR_OFFSET (ASCV_MSGIN_BEG+4) #define ASCV_SDTR_DATA_BEG (ASCV_MSGIN_BEG+8) #define ASCV_SDTR_DONE_BEG (ASCV_SDTR_DATA_BEG+8) #define ASCV_MAX_DVC_QNG_BEG (ushort)0x0020 #define ASCV_BREAK_ADDR (ushort)0x0028 #define ASCV_BREAK_NOTIFY_COUNT (ushort)0x002A #define ASCV_BREAK_CONTROL (ushort)0x002C #define ASCV_BREAK_HIT_COUNT (ushort)0x002E #define ASCV_ASCDVC_ERR_CODE_W (ushort)0x0030 #define ASCV_MCODE_CHKSUM_W (ushort)0x0032 #define ASCV_MCODE_SIZE_W (ushort)0x0034 #define ASCV_STOP_CODE_B (ushort)0x0036 #define ASCV_DVC_ERR_CODE_B (ushort)0x0037 #define ASCV_OVERRUN_PADDR_D (ushort)0x0038 #define ASCV_OVERRUN_BSIZE_D (ushort)0x003C #define ASCV_HALTCODE_W (ushort)0x0040 #define ASCV_CHKSUM_W (ushort)0x0042 #define ASCV_MC_DATE_W (ushort)0x0044 #define ASCV_MC_VER_W (ushort)0x0046 #define ASCV_NEXTRDY_B (ushort)0x0048 #define ASCV_DONENEXT_B (ushort)0x0049 #define ASCV_USE_TAGGED_QNG_B (ushort)0x004A #define ASCV_SCSIBUSY_B (ushort)0x004B #define ASCV_Q_DONE_IN_PROGRESS_B (ushort)0x004C #define ASCV_CURCDB_B (ushort)0x004D #define ASCV_RCLUN_B (ushort)0x004E #define ASCV_BUSY_QHEAD_B (ushort)0x004F #define ASCV_DISC1_QHEAD_B (ushort)0x0050 #define ASCV_DISC_ENABLE_B (ushort)0x0052 #define ASCV_CAN_TAGGED_QNG_B (ushort)0x0053 #define ASCV_HOSTSCSI_ID_B (ushort)0x0055 #define ASCV_MCODE_CNTL_B (ushort)0x0056 #define ASCV_NULL_TARGET_B (ushort)0x0057 #define ASCV_FREE_Q_HEAD_W (ushort)0x0058 #define ASCV_DONE_Q_TAIL_W (ushort)0x005A #define ASCV_FREE_Q_HEAD_B (ushort)(ASCV_FREE_Q_HEAD_W+1) #define ASCV_DONE_Q_TAIL_B (ushort)(ASCV_DONE_Q_TAIL_W+1) #define ASCV_HOST_FLAG_B (ushort)0x005D #define ASCV_TOTAL_READY_Q_B (ushort)0x0064 #define ASCV_VER_SERIAL_B (ushort)0x0065 #define ASCV_HALTCODE_SAVED_W (ushort)0x0066 #define ASCV_WTM_FLAG_B (ushort)0x0068 #define ASCV_RISC_FLAG_B (ushort)0x006A #define ASCV_REQ_SG_LIST_QP (ushort)0x006B #define ASC_HOST_FLAG_IN_ISR 0x01 #define ASC_HOST_FLAG_ACK_INT 0x02 #define ASC_RISC_FLAG_GEN_INT 0x01 #define ASC_RISC_FLAG_REQ_SG_LIST 0x02 #define IOP_CTRL (0x0F) #define IOP_STATUS (0x0E) #define IOP_INT_ACK IOP_STATUS #define IOP_REG_IFC (0x0D) #define IOP_SYN_OFFSET (0x0B) #define IOP_EXTRA_CONTROL (0x0D) #define IOP_REG_PC (0x0C) #define IOP_RAM_ADDR (0x0A) #define IOP_RAM_DATA (0x08) #define IOP_EEP_DATA (0x06) #define IOP_EEP_CMD (0x07) #define IOP_VERSION (0x03) #define IOP_CONFIG_HIGH (0x04) #define IOP_CONFIG_LOW (0x02) #define IOP_SIG_BYTE (0x01) #define IOP_SIG_WORD (0x00) #define IOP_REG_DC1 (0x0E) #define IOP_REG_DC0 (0x0C) #define IOP_REG_SB (0x0B) #define IOP_REG_DA1 (0x0A) #define IOP_REG_DA0 (0x08) #define IOP_REG_SC (0x09) #define IOP_DMA_SPEED (0x07) #define IOP_REG_FLAG (0x07) #define IOP_FIFO_H (0x06) #define IOP_FIFO_L (0x04) #define IOP_REG_ID (0x05) #define IOP_REG_QP (0x03) #define IOP_REG_IH (0x02) #define IOP_REG_IX (0x01) #define IOP_REG_AX (0x00) #define IFC_REG_LOCK (0x00) #define IFC_REG_UNLOCK (0x09) #define IFC_WR_EN_FILTER (0x10) #define IFC_RD_NO_EEPROM (0x10) #define IFC_SLEW_RATE (0x20) #define IFC_ACT_NEG (0x40) #define IFC_INP_FILTER (0x80) #define IFC_INIT_DEFAULT (IFC_ACT_NEG | IFC_REG_UNLOCK) #define SC_SEL (uchar)(0x80) #define SC_BSY (uchar)(0x40) #define SC_ACK (uchar)(0x20) #define SC_REQ (uchar)(0x10) #define SC_ATN (uchar)(0x08) #define SC_IO (uchar)(0x04) #define SC_CD (uchar)(0x02) #define SC_MSG (uchar)(0x01) #define SEC_SCSI_CTL (uchar)(0x80) #define SEC_ACTIVE_NEGATE (uchar)(0x40) #define SEC_SLEW_RATE (uchar)(0x20) #define SEC_ENABLE_FILTER (uchar)(0x10) #define ASC_HALT_EXTMSG_IN (ushort)0x8000 #define ASC_HALT_CHK_CONDITION (ushort)0x8100 #define ASC_HALT_SS_QUEUE_FULL (ushort)0x8200 #define ASC_HALT_DISABLE_ASYN_USE_SYN_FIX (ushort)0x8300 #define ASC_HALT_ENABLE_ASYN_USE_SYN_FIX (ushort)0x8400 #define ASC_HALT_SDTR_REJECTED (ushort)0x4000 #define ASC_HALT_HOST_COPY_SG_LIST_TO_RISC ( ushort )0x2000 #define ASC_MAX_QNO 0xF8 #define ASC_DATA_SEC_BEG (ushort)0x0080 #define ASC_DATA_SEC_END (ushort)0x0080 #define ASC_CODE_SEC_BEG (ushort)0x0080 #define ASC_CODE_SEC_END (ushort)0x0080 #define ASC_QADR_BEG (0x4000) #define ASC_QADR_USED (ushort)(ASC_MAX_QNO * 64) #define ASC_QADR_END (ushort)0x7FFF #define ASC_QLAST_ADR (ushort)0x7FC0 #define ASC_QBLK_SIZE 0x40 #define ASC_BIOS_DATA_QBEG 0xF8 #define ASC_MIN_ACTIVE_QNO 0x01 #define ASC_QLINK_END 0xFF #define ASC_EEPROM_WORDS 0x10 #define ASC_MAX_MGS_LEN 0x10 #define ASC_BIOS_ADDR_DEF 0xDC00 #define ASC_BIOS_SIZE 0x3800 #define ASC_BIOS_RAM_OFF 0x3800 #define ASC_BIOS_RAM_SIZE 0x800 #define ASC_BIOS_MIN_ADDR 0xC000 #define ASC_BIOS_MAX_ADDR 0xEC00 #define ASC_BIOS_BANK_SIZE 0x0400 #define ASC_MCODE_START_ADDR 0x0080 #define ASC_CFG0_HOST_INT_ON 0x0020 #define ASC_CFG0_BIOS_ON 0x0040 #define ASC_CFG0_VERA_BURST_ON 0x0080 #define ASC_CFG0_SCSI_PARITY_ON 0x0800 #define ASC_CFG1_SCSI_TARGET_ON 0x0080 #define ASC_CFG1_LRAM_8BITS_ON 0x0800 #define ASC_CFG_MSW_CLR_MASK 0x3080 #define CSW_TEST1 (ASC_CS_TYPE)0x8000 #define CSW_AUTO_CONFIG (ASC_CS_TYPE)0x4000 #define CSW_RESERVED1 (ASC_CS_TYPE)0x2000 #define CSW_IRQ_WRITTEN (ASC_CS_TYPE)0x1000 #define CSW_33MHZ_SELECTED (ASC_CS_TYPE)0x0800 #define CSW_TEST2 (ASC_CS_TYPE)0x0400 #define CSW_TEST3 (ASC_CS_TYPE)0x0200 #define CSW_RESERVED2 (ASC_CS_TYPE)0x0100 #define CSW_DMA_DONE (ASC_CS_TYPE)0x0080 #define CSW_FIFO_RDY (ASC_CS_TYPE)0x0040 #define CSW_EEP_READ_DONE (ASC_CS_TYPE)0x0020 #define CSW_HALTED (ASC_CS_TYPE)0x0010 #define CSW_SCSI_RESET_ACTIVE (ASC_CS_TYPE)0x0008 #define CSW_PARITY_ERR (ASC_CS_TYPE)0x0004 #define CSW_SCSI_RESET_LATCH (ASC_CS_TYPE)0x0002 #define CSW_INT_PENDING (ASC_CS_TYPE)0x0001 #define CIW_CLR_SCSI_RESET_INT (ASC_CS_TYPE)0x1000 #define CIW_INT_ACK (ASC_CS_TYPE)0x0100 #define CIW_TEST1 (ASC_CS_TYPE)0x0200 #define CIW_TEST2 (ASC_CS_TYPE)0x0400 #define CIW_SEL_33MHZ (ASC_CS_TYPE)0x0800 #define CIW_IRQ_ACT (ASC_CS_TYPE)0x1000 #define CC_CHIP_RESET (uchar)0x80 #define CC_SCSI_RESET (uchar)0x40 #define CC_HALT (uchar)0x20 #define CC_SINGLE_STEP (uchar)0x10 #define CC_DMA_ABLE (uchar)0x08 #define CC_TEST (uchar)0x04 #define CC_BANK_ONE (uchar)0x02 #define CC_DIAG (uchar)0x01 #define ASC_1000_ID0W 0x04C1 #define ASC_1000_ID0W_FIX 0x00C1 #define ASC_1000_ID1B 0x25 #define ASC_EISA_REV_IOP_MASK (0x0C83) #define ASC_EISA_CFG_IOP_MASK (0x0C86) #define ASC_GET_EISA_SLOT(iop) (PortAddr)((iop) & 0xF000) #define INS_HALTINT (ushort)0x6281 #define INS_HALT (ushort)0x6280 #define INS_SINT (ushort)0x6200 #define INS_RFLAG_WTM (ushort)0x7380 #define ASC_MC_SAVE_CODE_WSIZE 0x500 #define ASC_MC_SAVE_DATA_WSIZE 0x40 typedef struct asc_mc_saved { ushort data[ASC_MC_SAVE_DATA_WSIZE]; ushort code[ASC_MC_SAVE_CODE_WSIZE]; } ASC_MC_SAVED; #define AscGetQDoneInProgress(port) AscReadLramByte((port), ASCV_Q_DONE_IN_PROGRESS_B) #define AscPutQDoneInProgress(port, val) AscWriteLramByte((port), ASCV_Q_DONE_IN_PROGRESS_B, val) #define AscGetVarFreeQHead(port) AscReadLramWord((port), ASCV_FREE_Q_HEAD_W) #define AscGetVarDoneQTail(port) AscReadLramWord((port), ASCV_DONE_Q_TAIL_W) #define AscPutVarFreeQHead(port, val) AscWriteLramWord((port), ASCV_FREE_Q_HEAD_W, val) #define AscPutVarDoneQTail(port, val) AscWriteLramWord((port), ASCV_DONE_Q_TAIL_W, val) #define AscGetRiscVarFreeQHead(port) AscReadLramByte((port), ASCV_NEXTRDY_B) #define AscGetRiscVarDoneQTail(port) AscReadLramByte((port), ASCV_DONENEXT_B) #define AscPutRiscVarFreeQHead(port, val) AscWriteLramByte((port), ASCV_NEXTRDY_B, val) #define AscPutRiscVarDoneQTail(port, val) AscWriteLramByte((port), ASCV_DONENEXT_B, val) #define AscPutMCodeSDTRDoneAtID(port, id, data) AscWriteLramByte((port), (ushort)((ushort)ASCV_SDTR_DONE_BEG+(ushort)id), (data)) #define AscGetMCodeSDTRDoneAtID(port, id) AscReadLramByte((port), (ushort)((ushort)ASCV_SDTR_DONE_BEG+(ushort)id)) #define AscPutMCodeInitSDTRAtID(port, id, data) AscWriteLramByte((port), (ushort)((ushort)ASCV_SDTR_DATA_BEG+(ushort)id), data) #define AscGetMCodeInitSDTRAtID(port, id) AscReadLramByte((port), (ushort)((ushort)ASCV_SDTR_DATA_BEG+(ushort)id)) #define AscGetChipSignatureByte(port) (uchar)inp((port)+IOP_SIG_BYTE) #define AscGetChipSignatureWord(port) (ushort)inpw((port)+IOP_SIG_WORD) #define AscGetChipVerNo(port) (uchar)inp((port)+IOP_VERSION) #define AscGetChipCfgLsw(port) (ushort)inpw((port)+IOP_CONFIG_LOW) #define AscGetChipCfgMsw(port) (ushort)inpw((port)+IOP_CONFIG_HIGH) #define AscSetChipCfgLsw(port, data) outpw((port)+IOP_CONFIG_LOW, data) #define AscSetChipCfgMsw(port, data) outpw((port)+IOP_CONFIG_HIGH, data) #define AscGetChipEEPCmd(port) (uchar)inp((port)+IOP_EEP_CMD) #define AscSetChipEEPCmd(port, data) outp((port)+IOP_EEP_CMD, data) #define AscGetChipEEPData(port) (ushort)inpw((port)+IOP_EEP_DATA) #define AscSetChipEEPData(port, data) outpw((port)+IOP_EEP_DATA, data) #define AscGetChipLramAddr(port) (ushort)inpw((PortAddr)((port)+IOP_RAM_ADDR)) #define AscSetChipLramAddr(port, addr) outpw((PortAddr)((port)+IOP_RAM_ADDR), addr) #define AscGetChipLramData(port) (ushort)inpw((port)+IOP_RAM_DATA) #define AscSetChipLramData(port, data) outpw((port)+IOP_RAM_DATA, data) #define AscGetChipIFC(port) (uchar)inp((port)+IOP_REG_IFC) #define AscSetChipIFC(port, data) outp((port)+IOP_REG_IFC, data) #define AscGetChipStatus(port) (ASC_CS_TYPE)inpw((port)+IOP_STATUS) #define AscSetChipStatus(port, cs_val) outpw((port)+IOP_STATUS, cs_val) #define AscGetChipControl(port) (uchar)inp((port)+IOP_CTRL) #define AscSetChipControl(port, cc_val) outp((port)+IOP_CTRL, cc_val) #define AscGetChipSyn(port) (uchar)inp((port)+IOP_SYN_OFFSET) #define AscSetChipSyn(port, data) outp((port)+IOP_SYN_OFFSET, data) #define AscSetPCAddr(port, data) outpw((port)+IOP_REG_PC, data) #define AscGetPCAddr(port) (ushort)inpw((port)+IOP_REG_PC) #define AscIsIntPending(port) (AscGetChipStatus(port) & (CSW_INT_PENDING | CSW_SCSI_RESET_LATCH)) #define AscGetChipScsiID(port) ((AscGetChipCfgLsw(port) >> 8) & ASC_MAX_TID) #define AscGetExtraControl(port) (uchar)inp((port)+IOP_EXTRA_CONTROL) #define AscSetExtraControl(port, data) outp((port)+IOP_EXTRA_CONTROL, data) #define AscReadChipAX(port) (ushort)inpw((port)+IOP_REG_AX) #define AscWriteChipAX(port, data) outpw((port)+IOP_REG_AX, data) #define AscReadChipIX(port) (uchar)inp((port)+IOP_REG_IX) #define AscWriteChipIX(port, data) outp((port)+IOP_REG_IX, data) #define AscReadChipIH(port) (ushort)inpw((port)+IOP_REG_IH) #define AscWriteChipIH(port, data) outpw((port)+IOP_REG_IH, data) #define AscReadChipQP(port) (uchar)inp((port)+IOP_REG_QP) #define AscWriteChipQP(port, data) outp((port)+IOP_REG_QP, data) #define AscReadChipFIFO_L(port) (ushort)inpw((port)+IOP_REG_FIFO_L) #define AscWriteChipFIFO_L(port, data) outpw((port)+IOP_REG_FIFO_L, data) #define AscReadChipFIFO_H(port) (ushort)inpw((port)+IOP_REG_FIFO_H) #define AscWriteChipFIFO_H(port, data) outpw((port)+IOP_REG_FIFO_H, data) #define AscReadChipDmaSpeed(port) (uchar)inp((port)+IOP_DMA_SPEED) #define AscWriteChipDmaSpeed(port, data) outp((port)+IOP_DMA_SPEED, data) #define AscReadChipDA0(port) (ushort)inpw((port)+IOP_REG_DA0) #define AscWriteChipDA0(port) outpw((port)+IOP_REG_DA0, data) #define AscReadChipDA1(port) (ushort)inpw((port)+IOP_REG_DA1) #define AscWriteChipDA1(port) outpw((port)+IOP_REG_DA1, data) #define AscReadChipDC0(port) (ushort)inpw((port)+IOP_REG_DC0) #define AscWriteChipDC0(port) outpw((port)+IOP_REG_DC0, data) #define AscReadChipDC1(port) (ushort)inpw((port)+IOP_REG_DC1) #define AscWriteChipDC1(port) outpw((port)+IOP_REG_DC1, data) #define AscReadChipDvcID(port) (uchar)inp((port)+IOP_REG_ID) #define AscWriteChipDvcID(port, data) outp((port)+IOP_REG_ID, data) #define AdvPortAddr void __iomem * /* Virtual memory address size */ /* * Define Adv Library required memory access macros. */ #define ADV_MEM_READB(addr) readb(addr) #define ADV_MEM_READW(addr) readw(addr) #define ADV_MEM_WRITEB(addr, byte) writeb(byte, addr) #define ADV_MEM_WRITEW(addr, word) writew(word, addr) #define ADV_MEM_WRITEDW(addr, dword) writel(dword, addr) /* * Define total number of simultaneous maximum element scatter-gather * request blocks per wide adapter. ASC_DEF_MAX_HOST_QNG (253) is the * maximum number of outstanding commands per wide host adapter. Each * command uses one or more ADV_SG_BLOCK each with 15 scatter-gather * elements. Allow each command to have at least one ADV_SG_BLOCK structure. * This allows about 15 commands to have the maximum 17 ADV_SG_BLOCK * structures or 255 scatter-gather elements. */ #define ADV_TOT_SG_BLOCK ASC_DEF_MAX_HOST_QNG /* * Define maximum number of scatter-gather elements per request. */ #define ADV_MAX_SG_LIST 255 #define NO_OF_SG_PER_BLOCK 15 #define ADV_EEP_DVC_CFG_BEGIN (0x00) #define ADV_EEP_DVC_CFG_END (0x15) #define ADV_EEP_DVC_CTL_BEGIN (0x16) /* location of OEM name */ #define ADV_EEP_MAX_WORD_ADDR (0x1E) #define ADV_EEP_DELAY_MS 100 #define ADV_EEPROM_BIG_ENDIAN 0x8000 /* EEPROM Bit 15 */ #define ADV_EEPROM_BIOS_ENABLE 0x4000 /* EEPROM Bit 14 */ /* * For the ASC3550 Bit 13 is Termination Polarity control bit. * For later ICs Bit 13 controls whether the CIS (Card Information * Service Section) is loaded from EEPROM. */ #define ADV_EEPROM_TERM_POL 0x2000 /* EEPROM Bit 13 */ #define ADV_EEPROM_CIS_LD 0x2000 /* EEPROM Bit 13 */ /* * ASC38C1600 Bit 11 * * If EEPROM Bit 11 is 0 for Function 0, then Function 0 will specify * INT A in the PCI Configuration Space Int Pin field. If it is 1, then * Function 0 will specify INT B. * * If EEPROM Bit 11 is 0 for Function 1, then Function 1 will specify * INT B in the PCI Configuration Space Int Pin field. If it is 1, then * Function 1 will specify INT A. */ #define ADV_EEPROM_INTAB 0x0800 /* EEPROM Bit 11 */ typedef struct adveep_3550_config { /* Word Offset, Description */ ushort cfg_lsw; /* 00 power up initialization */ /* bit 13 set - Term Polarity Control */ /* bit 14 set - BIOS Enable */ /* bit 15 set - Big Endian Mode */ ushort cfg_msw; /* 01 unused */ ushort disc_enable; /* 02 disconnect enable */ ushort wdtr_able; /* 03 Wide DTR able */ ushort sdtr_able; /* 04 Synchronous DTR able */ ushort start_motor; /* 05 send start up motor */ ushort tagqng_able; /* 06 tag queuing able */ ushort bios_scan; /* 07 BIOS device control */ ushort scam_tolerant; /* 08 no scam */ uchar adapter_scsi_id; /* 09 Host Adapter ID */ uchar bios_boot_delay; /* power up wait */ uchar scsi_reset_delay; /* 10 reset delay */ uchar bios_id_lun; /* first boot device scsi id & lun */ /* high nibble is lun */ /* low nibble is scsi id */ uchar termination; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ uchar reserved1; /* reserved byte (not used) */ ushort bios_ctrl; /* 12 BIOS control bits */ /* bit 0 BIOS don't act as initiator. */ /* bit 1 BIOS > 1 GB support */ /* bit 2 BIOS > 2 Disk Support */ /* bit 3 BIOS don't support removables */ /* bit 4 BIOS support bootable CD */ /* bit 5 BIOS scan enabled */ /* bit 6 BIOS support multiple LUNs */ /* bit 7 BIOS display of message */ /* bit 8 SCAM disabled */ /* bit 9 Reset SCSI bus during init. */ /* bit 10 */ /* bit 11 No verbose initialization. */ /* bit 12 SCSI parity enabled */ /* bit 13 */ /* bit 14 */ /* bit 15 */ ushort ultra_able; /* 13 ULTRA speed able */ ushort reserved2; /* 14 reserved */ uchar max_host_qng; /* 15 maximum host queuing */ uchar max_dvc_qng; /* maximum per device queuing */ ushort dvc_cntl; /* 16 control bit for driver */ ushort bug_fix; /* 17 control bit for bug fix */ ushort serial_number_word1; /* 18 Board serial number word 1 */ ushort serial_number_word2; /* 19 Board serial number word 2 */ ushort serial_number_word3; /* 20 Board serial number word 3 */ ushort check_sum; /* 21 EEP check sum */ uchar oem_name[16]; /* 22 OEM name */ ushort dvc_err_code; /* 30 last device driver error code */ ushort adv_err_code; /* 31 last uc and Adv Lib error code */ ushort adv_err_addr; /* 32 last uc error address */ ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */ ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */ ushort saved_adv_err_addr; /* 35 saved last uc error address */ ushort num_of_err; /* 36 number of error */ } ADVEEP_3550_CONFIG; typedef struct adveep_38C0800_config { /* Word Offset, Description */ ushort cfg_lsw; /* 00 power up initialization */ /* bit 13 set - Load CIS */ /* bit 14 set - BIOS Enable */ /* bit 15 set - Big Endian Mode */ ushort cfg_msw; /* 01 unused */ ushort disc_enable; /* 02 disconnect enable */ ushort wdtr_able; /* 03 Wide DTR able */ ushort sdtr_speed1; /* 04 SDTR Speed TID 0-3 */ ushort start_motor; /* 05 send start up motor */ ushort tagqng_able; /* 06 tag queuing able */ ushort bios_scan; /* 07 BIOS device control */ ushort scam_tolerant; /* 08 no scam */ uchar adapter_scsi_id; /* 09 Host Adapter ID */ uchar bios_boot_delay; /* power up wait */ uchar scsi_reset_delay; /* 10 reset delay */ uchar bios_id_lun; /* first boot device scsi id & lun */ /* high nibble is lun */ /* low nibble is scsi id */ uchar termination_se; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ uchar termination_lvd; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ ushort bios_ctrl; /* 12 BIOS control bits */ /* bit 0 BIOS don't act as initiator. */ /* bit 1 BIOS > 1 GB support */ /* bit 2 BIOS > 2 Disk Support */ /* bit 3 BIOS don't support removables */ /* bit 4 BIOS support bootable CD */ /* bit 5 BIOS scan enabled */ /* bit 6 BIOS support multiple LUNs */ /* bit 7 BIOS display of message */ /* bit 8 SCAM disabled */ /* bit 9 Reset SCSI bus during init. */ /* bit 10 */ /* bit 11 No verbose initialization. */ /* bit 12 SCSI parity enabled */ /* bit 13 */ /* bit 14 */ /* bit 15 */ ushort sdtr_speed2; /* 13 SDTR speed TID 4-7 */ ushort sdtr_speed3; /* 14 SDTR speed TID 8-11 */ uchar max_host_qng; /* 15 maximum host queueing */ uchar max_dvc_qng; /* maximum per device queuing */ ushort dvc_cntl; /* 16 control bit for driver */ ushort sdtr_speed4; /* 17 SDTR speed 4 TID 12-15 */ ushort serial_number_word1; /* 18 Board serial number word 1 */ ushort serial_number_word2; /* 19 Board serial number word 2 */ ushort serial_number_word3; /* 20 Board serial number word 3 */ ushort check_sum; /* 21 EEP check sum */ uchar oem_name[16]; /* 22 OEM name */ ushort dvc_err_code; /* 30 last device driver error code */ ushort adv_err_code; /* 31 last uc and Adv Lib error code */ ushort adv_err_addr; /* 32 last uc error address */ ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */ ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */ ushort saved_adv_err_addr; /* 35 saved last uc error address */ ushort reserved36; /* 36 reserved */ ushort reserved37; /* 37 reserved */ ushort reserved38; /* 38 reserved */ ushort reserved39; /* 39 reserved */ ushort reserved40; /* 40 reserved */ ushort reserved41; /* 41 reserved */ ushort reserved42; /* 42 reserved */ ushort reserved43; /* 43 reserved */ ushort reserved44; /* 44 reserved */ ushort reserved45; /* 45 reserved */ ushort reserved46; /* 46 reserved */ ushort reserved47; /* 47 reserved */ ushort reserved48; /* 48 reserved */ ushort reserved49; /* 49 reserved */ ushort reserved50; /* 50 reserved */ ushort reserved51; /* 51 reserved */ ushort reserved52; /* 52 reserved */ ushort reserved53; /* 53 reserved */ ushort reserved54; /* 54 reserved */ ushort reserved55; /* 55 reserved */ ushort cisptr_lsw; /* 56 CIS PTR LSW */ ushort cisprt_msw; /* 57 CIS PTR MSW */ ushort subsysvid; /* 58 SubSystem Vendor ID */ ushort subsysid; /* 59 SubSystem ID */ ushort reserved60; /* 60 reserved */ ushort reserved61; /* 61 reserved */ ushort reserved62; /* 62 reserved */ ushort reserved63; /* 63 reserved */ } ADVEEP_38C0800_CONFIG; typedef struct adveep_38C1600_config { /* Word Offset, Description */ ushort cfg_lsw; /* 00 power up initialization */ /* bit 11 set - Func. 0 INTB, Func. 1 INTA */ /* clear - Func. 0 INTA, Func. 1 INTB */ /* bit 13 set - Load CIS */ /* bit 14 set - BIOS Enable */ /* bit 15 set - Big Endian Mode */ ushort cfg_msw; /* 01 unused */ ushort disc_enable; /* 02 disconnect enable */ ushort wdtr_able; /* 03 Wide DTR able */ ushort sdtr_speed1; /* 04 SDTR Speed TID 0-3 */ ushort start_motor; /* 05 send start up motor */ ushort tagqng_able; /* 06 tag queuing able */ ushort bios_scan; /* 07 BIOS device control */ ushort scam_tolerant; /* 08 no scam */ uchar adapter_scsi_id; /* 09 Host Adapter ID */ uchar bios_boot_delay; /* power up wait */ uchar scsi_reset_delay; /* 10 reset delay */ uchar bios_id_lun; /* first boot device scsi id & lun */ /* high nibble is lun */ /* low nibble is scsi id */ uchar termination_se; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ uchar termination_lvd; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ ushort bios_ctrl; /* 12 BIOS control bits */ /* bit 0 BIOS don't act as initiator. */ /* bit 1 BIOS > 1 GB support */ /* bit 2 BIOS > 2 Disk Support */ /* bit 3 BIOS don't support removables */ /* bit 4 BIOS support bootable CD */ /* bit 5 BIOS scan enabled */ /* bit 6 BIOS support multiple LUNs */ /* bit 7 BIOS display of message */ /* bit 8 SCAM disabled */ /* bit 9 Reset SCSI bus during init. */ /* bit 10 Basic Integrity Checking disabled */ /* bit 11 No verbose initialization. */ /* bit 12 SCSI parity enabled */ /* bit 13 AIPP (Asyn. Info. Ph. Prot.) dis. */ /* bit 14 */ /* bit 15 */ ushort sdtr_speed2; /* 13 SDTR speed TID 4-7 */ ushort sdtr_speed3; /* 14 SDTR speed TID 8-11 */ uchar max_host_qng; /* 15 maximum host queueing */ uchar max_dvc_qng; /* maximum per device queuing */ ushort dvc_cntl; /* 16 control bit for driver */ ushort sdtr_speed4; /* 17 SDTR speed 4 TID 12-15 */ ushort serial_number_word1; /* 18 Board serial number word 1 */ ushort serial_number_word2; /* 19 Board serial number word 2 */ ushort serial_number_word3; /* 20 Board serial number word 3 */ ushort check_sum; /* 21 EEP check sum */ uchar oem_name[16]; /* 22 OEM name */ ushort dvc_err_code; /* 30 last device driver error code */ ushort adv_err_code; /* 31 last uc and Adv Lib error code */ ushort adv_err_addr; /* 32 last uc error address */ ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */ ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */ ushort saved_adv_err_addr; /* 35 saved last uc error address */ ushort reserved36; /* 36 reserved */ ushort reserved37; /* 37 reserved */ ushort reserved38; /* 38 reserved */ ushort reserved39; /* 39 reserved */ ushort reserved40; /* 40 reserved */ ushort reserved41; /* 41 reserved */ ushort reserved42; /* 42 reserved */ ushort reserved43; /* 43 reserved */ ushort reserved44; /* 44 reserved */ ushort reserved45; /* 45 reserved */ ushort reserved46; /* 46 reserved */ ushort reserved47; /* 47 reserved */ ushort reserved48; /* 48 reserved */ ushort reserved49; /* 49 reserved */ ushort reserved50; /* 50 reserved */ ushort reserved51; /* 51 reserved */ ushort reserved52; /* 52 reserved */ ushort reserved53; /* 53 reserved */ ushort reserved54; /* 54 reserved */ ushort reserved55; /* 55 reserved */ ushort cisptr_lsw; /* 56 CIS PTR LSW */ ushort cisprt_msw; /* 57 CIS PTR MSW */ ushort subsysvid; /* 58 SubSystem Vendor ID */ ushort subsysid; /* 59 SubSystem ID */ ushort reserved60; /* 60 reserved */ ushort reserved61; /* 61 reserved */ ushort reserved62; /* 62 reserved */ ushort reserved63; /* 63 reserved */ } ADVEEP_38C1600_CONFIG; /* * EEPROM Commands */ #define ASC_EEP_CMD_DONE 0x0200 /* bios_ctrl */ #define BIOS_CTRL_BIOS 0x0001 #define BIOS_CTRL_EXTENDED_XLAT 0x0002 #define BIOS_CTRL_GT_2_DISK 0x0004 #define BIOS_CTRL_BIOS_REMOVABLE 0x0008 #define BIOS_CTRL_BOOTABLE_CD 0x0010 #define BIOS_CTRL_MULTIPLE_LUN 0x0040 #define BIOS_CTRL_DISPLAY_MSG 0x0080 #define BIOS_CTRL_NO_SCAM 0x0100 #define BIOS_CTRL_RESET_SCSI_BUS 0x0200 #define BIOS_CTRL_INIT_VERBOSE 0x0800 #define BIOS_CTRL_SCSI_PARITY 0x1000 #define BIOS_CTRL_AIPP_DIS 0x2000 #define ADV_3550_MEMSIZE 0x2000 /* 8 KB Internal Memory */ #define ADV_38C0800_MEMSIZE 0x4000 /* 16 KB Internal Memory */ /* * XXX - Since ASC38C1600 Rev.3 has a local RAM failure issue, there is * a special 16K Adv Library and Microcode version. After the issue is * resolved, should restore 32K support. * * #define ADV_38C1600_MEMSIZE 0x8000L * 32 KB Internal Memory * */ #define ADV_38C1600_MEMSIZE 0x4000 /* 16 KB Internal Memory */ /* * Byte I/O register address from base of 'iop_base'. */ #define IOPB_INTR_STATUS_REG 0x00 #define IOPB_CHIP_ID_1 0x01 #define IOPB_INTR_ENABLES 0x02 #define IOPB_CHIP_TYPE_REV 0x03 #define IOPB_RES_ADDR_4 0x04 #define IOPB_RES_ADDR_5 0x05 #define IOPB_RAM_DATA 0x06 #define IOPB_RES_ADDR_7 0x07 #define IOPB_FLAG_REG 0x08 #define IOPB_RES_ADDR_9 0x09 #define IOPB_RISC_CSR 0x0A #define IOPB_RES_ADDR_B 0x0B #define IOPB_RES_ADDR_C 0x0C #define IOPB_RES_ADDR_D 0x0D #define IOPB_SOFT_OVER_WR 0x0E #define IOPB_RES_ADDR_F 0x0F #define IOPB_MEM_CFG 0x10 #define IOPB_RES_ADDR_11 0x11 #define IOPB_GPIO_DATA 0x12 #define IOPB_RES_ADDR_13 0x13 #define IOPB_FLASH_PAGE 0x14 #define IOPB_RES_ADDR_15 0x15 #define IOPB_GPIO_CNTL 0x16 #define IOPB_RES_ADDR_17 0x17 #define IOPB_FLASH_DATA 0x18 #define IOPB_RES_ADDR_19 0x19 #define IOPB_RES_ADDR_1A 0x1A #define IOPB_RES_ADDR_1B 0x1B #define IOPB_RES_ADDR_1C 0x1C #define IOPB_RES_ADDR_1D 0x1D #define IOPB_RES_ADDR_1E 0x1E #define IOPB_RES_ADDR_1F 0x1F #define IOPB_DMA_CFG0 0x20 #define IOPB_DMA_CFG1 0x21 #define IOPB_TICKLE 0x22 #define IOPB_DMA_REG_WR 0x23 #define IOPB_SDMA_STATUS 0x24 #define IOPB_SCSI_BYTE_CNT 0x25 #define IOPB_HOST_BYTE_CNT 0x26 #define IOPB_BYTE_LEFT_TO_XFER 0x27 #define IOPB_BYTE_TO_XFER_0 0x28 #define IOPB_BYTE_TO_XFER_1 0x29 #define IOPB_BYTE_TO_XFER_2 0x2A #define IOPB_BYTE_TO_XFER_3 0x2B #define IOPB_ACC_GRP 0x2C #define IOPB_RES_ADDR_2D 0x2D #define IOPB_DEV_ID 0x2E #define IOPB_RES_ADDR_2F 0x2F #define IOPB_SCSI_DATA 0x30 #define IOPB_RES_ADDR_31 0x31 #define IOPB_RES_ADDR_32 0x32 #define IOPB_SCSI_DATA_HSHK 0x33 #define IOPB_SCSI_CTRL 0x34 #define IOPB_RES_ADDR_35 0x35 #define IOPB_RES_ADDR_36 0x36 #define IOPB_RES_ADDR_37 0x37 #define IOPB_RAM_BIST 0x38 #define IOPB_PLL_TEST 0x39 #define IOPB_PCI_INT_CFG 0x3A #define IOPB_RES_ADDR_3B 0x3B #define IOPB_RFIFO_CNT 0x3C #define IOPB_RES_ADDR_3D 0x3D #define IOPB_RES_ADDR_3E 0x3E #define IOPB_RES_ADDR_3F 0x3F /* * Word I/O register address from base of 'iop_base'. */ #define IOPW_CHIP_ID_0 0x00 /* CID0 */ #define IOPW_CTRL_REG 0x02 /* CC */ #define IOPW_RAM_ADDR 0x04 /* LA */ #define IOPW_RAM_DATA 0x06 /* LD */ #define IOPW_RES_ADDR_08 0x08 #define IOPW_RISC_CSR 0x0A /* CSR */ #define IOPW_SCSI_CFG0 0x0C /* CFG0 */ #define IOPW_SCSI_CFG1 0x0E /* CFG1 */ #define IOPW_RES_ADDR_10 0x10 #define IOPW_SEL_MASK 0x12 /* SM */ #define IOPW_RES_ADDR_14 0x14 #define IOPW_FLASH_ADDR 0x16 /* FA */ #define IOPW_RES_ADDR_18 0x18 #define IOPW_EE_CMD 0x1A /* EC */ #define IOPW_EE_DATA 0x1C /* ED */ #define IOPW_SFIFO_CNT 0x1E /* SFC */ #define IOPW_RES_ADDR_20 0x20 #define IOPW_Q_BASE 0x22 /* QB */ #define IOPW_QP 0x24 /* QP */ #define IOPW_IX 0x26 /* IX */ #define IOPW_SP 0x28 /* SP */ #define IOPW_PC 0x2A /* PC */ #define IOPW_RES_ADDR_2C 0x2C #define IOPW_RES_ADDR_2E 0x2E #define IOPW_SCSI_DATA 0x30 /* SD */ #define IOPW_SCSI_DATA_HSHK 0x32 /* SDH */ #define IOPW_SCSI_CTRL 0x34 /* SC */ #define IOPW_HSHK_CFG 0x36 /* HCFG */ #define IOPW_SXFR_STATUS 0x36 /* SXS */ #define IOPW_SXFR_CNTL 0x38 /* SXL */ #define IOPW_SXFR_CNTH 0x3A /* SXH */ #define IOPW_RES_ADDR_3C 0x3C #define IOPW_RFIFO_DATA 0x3E /* RFD */ /* * Doubleword I/O register address from base of 'iop_base'. */ #define IOPDW_RES_ADDR_0 0x00 #define IOPDW_RAM_DATA 0x04 #define IOPDW_RES_ADDR_8 0x08 #define IOPDW_RES_ADDR_C 0x0C #define IOPDW_RES_ADDR_10 0x10 #define IOPDW_COMMA 0x14 #define IOPDW_COMMB 0x18 #define IOPDW_RES_ADDR_1C 0x1C #define IOPDW_SDMA_ADDR0 0x20 #define IOPDW_SDMA_ADDR1 0x24 #define IOPDW_SDMA_COUNT 0x28 #define IOPDW_SDMA_ERROR 0x2C #define IOPDW_RDMA_ADDR0 0x30 #define IOPDW_RDMA_ADDR1 0x34 #define IOPDW_RDMA_COUNT 0x38 #define IOPDW_RDMA_ERROR 0x3C #define ADV_CHIP_ID_BYTE 0x25 #define ADV_CHIP_ID_WORD 0x04C1 #define ADV_INTR_ENABLE_HOST_INTR 0x01 #define ADV_INTR_ENABLE_SEL_INTR 0x02 #define ADV_INTR_ENABLE_DPR_INTR 0x04 #define ADV_INTR_ENABLE_RTA_INTR 0x08 #define ADV_INTR_ENABLE_RMA_INTR 0x10 #define ADV_INTR_ENABLE_RST_INTR 0x20 #define ADV_INTR_ENABLE_DPE_INTR 0x40 #define ADV_INTR_ENABLE_GLOBAL_INTR 0x80 #define ADV_INTR_STATUS_INTRA 0x01 #define ADV_INTR_STATUS_INTRB 0x02 #define ADV_INTR_STATUS_INTRC 0x04 #define ADV_RISC_CSR_STOP (0x0000) #define ADV_RISC_TEST_COND (0x2000) #define ADV_RISC_CSR_RUN (0x4000) #define ADV_RISC_CSR_SINGLE_STEP (0x8000) #define ADV_CTRL_REG_HOST_INTR 0x0100 #define ADV_CTRL_REG_SEL_INTR 0x0200 #define ADV_CTRL_REG_DPR_INTR 0x0400 #define ADV_CTRL_REG_RTA_INTR 0x0800 #define ADV_CTRL_REG_RMA_INTR 0x1000 #define ADV_CTRL_REG_RES_BIT14 0x2000 #define ADV_CTRL_REG_DPE_INTR 0x4000 #define ADV_CTRL_REG_POWER_DONE 0x8000 #define ADV_CTRL_REG_ANY_INTR 0xFF00 #define ADV_CTRL_REG_CMD_RESET 0x00C6 #define ADV_CTRL_REG_CMD_WR_IO_REG 0x00C5 #define ADV_CTRL_REG_CMD_RD_IO_REG 0x00C4 #define ADV_CTRL_REG_CMD_WR_PCI_CFG_SPACE 0x00C3 #define ADV_CTRL_REG_CMD_RD_PCI_CFG_SPACE 0x00C2 #define ADV_TICKLE_NOP 0x00 #define ADV_TICKLE_A 0x01 #define ADV_TICKLE_B 0x02 #define ADV_TICKLE_C 0x03 #define AdvIsIntPending(port) \ (AdvReadWordRegister(port, IOPW_CTRL_REG) & ADV_CTRL_REG_HOST_INTR) /* * SCSI_CFG0 Register bit definitions */ #define TIMER_MODEAB 0xC000 /* Watchdog, Second, and Select. Timer Ctrl. */ #define PARITY_EN 0x2000 /* Enable SCSI Parity Error detection */ #define EVEN_PARITY 0x1000 /* Select Even Parity */ #define WD_LONG 0x0800 /* Watchdog Interval, 1: 57 min, 0: 13 sec */ #define QUEUE_128 0x0400 /* Queue Size, 1: 128 byte, 0: 64 byte */ #define PRIM_MODE 0x0100 /* Primitive SCSI mode */ #define SCAM_EN 0x0080 /* Enable SCAM selection */ #define SEL_TMO_LONG 0x0040 /* Sel/Resel Timeout, 1: 400 ms, 0: 1.6 ms */ #define CFRM_ID 0x0020 /* SCAM id sel. confirm., 1: fast, 0: 6.4 ms */ #define OUR_ID_EN 0x0010 /* Enable OUR_ID bits */ #define OUR_ID 0x000F /* SCSI ID */ /* * SCSI_CFG1 Register bit definitions */ #define BIG_ENDIAN 0x8000 /* Enable Big Endian Mode MIO:15, EEP:15 */ #define TERM_POL 0x2000 /* Terminator Polarity Ctrl. MIO:13, EEP:13 */ #define SLEW_RATE 0x1000 /* SCSI output buffer slew rate */ #define FILTER_SEL 0x0C00 /* Filter Period Selection */ #define FLTR_DISABLE 0x0000 /* Input Filtering Disabled */ #define FLTR_11_TO_20NS 0x0800 /* Input Filtering 11ns to 20ns */ #define FLTR_21_TO_39NS 0x0C00 /* Input Filtering 21ns to 39ns */ #define ACTIVE_DBL 0x0200 /* Disable Active Negation */ #define DIFF_MODE 0x0100 /* SCSI differential Mode (Read-Only) */ #define DIFF_SENSE 0x0080 /* 1: No SE cables, 0: SE cable (Read-Only) */ #define TERM_CTL_SEL 0x0040 /* Enable TERM_CTL_H and TERM_CTL_L */ #define TERM_CTL 0x0030 /* External SCSI Termination Bits */ #define TERM_CTL_H 0x0020 /* Enable External SCSI Upper Termination */ #define TERM_CTL_L 0x0010 /* Enable External SCSI Lower Termination */ #define CABLE_DETECT 0x000F /* External SCSI Cable Connection Status */ /* * Addendum for ASC-38C0800 Chip * * The ASC-38C1600 Chip uses the same definitions except that the * bus mode override bits [12:10] have been moved to byte register * offset 0xE (IOPB_SOFT_OVER_WR) bits [12:10]. The [12:10] bits in * SCSI_CFG1 are read-only and always available. Bit 14 (DIS_TERM_DRV) * is not needed. The [12:10] bits in IOPB_SOFT_OVER_WR are write-only. * Also each ASC-38C1600 function or channel uses only cable bits [5:4] * and [1:0]. Bits [14], [7:6], [3:2] are unused. */ #define DIS_TERM_DRV 0x4000 /* 1: Read c_det[3:0], 0: cannot read */ #define HVD_LVD_SE 0x1C00 /* Device Detect Bits */ #define HVD 0x1000 /* HVD Device Detect */ #define LVD 0x0800 /* LVD Device Detect */ #define SE 0x0400 /* SE Device Detect */ #define TERM_LVD 0x00C0 /* LVD Termination Bits */ #define TERM_LVD_HI 0x0080 /* Enable LVD Upper Termination */ #define TERM_LVD_LO 0x0040 /* Enable LVD Lower Termination */ #define TERM_SE 0x0030 /* SE Termination Bits */ #define TERM_SE_HI 0x0020 /* Enable SE Upper Termination */ #define TERM_SE_LO 0x0010 /* Enable SE Lower Termination */ #define C_DET_LVD 0x000C /* LVD Cable Detect Bits */ #define C_DET3 0x0008 /* Cable Detect for LVD External Wide */ #define C_DET2 0x0004 /* Cable Detect for LVD Internal Wide */ #define C_DET_SE 0x0003 /* SE Cable Detect Bits */ #define C_DET1 0x0002 /* Cable Detect for SE Internal Wide */ #define C_DET0 0x0001 /* Cable Detect for SE Internal Narrow */ #define CABLE_ILLEGAL_A 0x7 /* x 0 0 0 | on on | Illegal (all 3 connectors are used) */ #define CABLE_ILLEGAL_B 0xB /* 0 x 0 0 | on on | Illegal (all 3 connectors are used) */ /* * MEM_CFG Register bit definitions */ #define BIOS_EN 0x40 /* BIOS Enable MIO:14,EEP:14 */ #define FAST_EE_CLK 0x20 /* Diagnostic Bit */ #define RAM_SZ 0x1C /* Specify size of RAM to RISC */ #define RAM_SZ_2KB 0x00 /* 2 KB */ #define RAM_SZ_4KB 0x04 /* 4 KB */ #define RAM_SZ_8KB 0x08 /* 8 KB */ #define RAM_SZ_16KB 0x0C /* 16 KB */ #define RAM_SZ_32KB 0x10 /* 32 KB */ #define RAM_SZ_64KB 0x14 /* 64 KB */ /* * DMA_CFG0 Register bit definitions * * This register is only accessible to the host. */ #define BC_THRESH_ENB 0x80 /* PCI DMA Start Conditions */ #define FIFO_THRESH 0x70 /* PCI DMA FIFO Threshold */ #define FIFO_THRESH_16B 0x00 /* 16 bytes */ #define FIFO_THRESH_32B 0x20 /* 32 bytes */ #define FIFO_THRESH_48B 0x30 /* 48 bytes */ #define FIFO_THRESH_64B 0x40 /* 64 bytes */ #define FIFO_THRESH_80B 0x50 /* 80 bytes (default) */ #define FIFO_THRESH_96B 0x60 /* 96 bytes */ #define FIFO_THRESH_112B 0x70 /* 112 bytes */ #define START_CTL 0x0C /* DMA start conditions */ #define START_CTL_TH 0x00 /* Wait threshold level (default) */ #define START_CTL_ID 0x04 /* Wait SDMA/SBUS idle */ #define START_CTL_THID 0x08 /* Wait threshold and SDMA/SBUS idle */ #define START_CTL_EMFU 0x0C /* Wait SDMA FIFO empty/full */ #define READ_CMD 0x03 /* Memory Read Method */ #define READ_CMD_MR 0x00 /* Memory Read */ #define READ_CMD_MRL 0x02 /* Memory Read Long */ #define READ_CMD_MRM 0x03 /* Memory Read Multiple (default) */ /* * ASC-38C0800 RAM BIST Register bit definitions */ #define RAM_TEST_MODE 0x80 #define PRE_TEST_MODE 0x40 #define NORMAL_MODE 0x00 #define RAM_TEST_DONE 0x10 #define RAM_TEST_STATUS 0x0F #define RAM_TEST_HOST_ERROR 0x08 #define RAM_TEST_INTRAM_ERROR 0x04 #define RAM_TEST_RISC_ERROR 0x02 #define RAM_TEST_SCSI_ERROR 0x01 #define RAM_TEST_SUCCESS 0x00 #define PRE_TEST_VALUE 0x05 #define NORMAL_VALUE 0x00 /* * ASC38C1600 Definitions * * IOPB_PCI_INT_CFG Bit Field Definitions */ #define INTAB_LD 0x80 /* Value loaded from EEPROM Bit 11. */ /* * Bit 1 can be set to change the interrupt for the Function to operate in * Totem Pole mode. By default Bit 1 is 0 and the interrupt operates in * Open Drain mode. Both functions of the ASC38C1600 must be set to the same * mode, otherwise the operating mode is undefined. */ #define TOTEMPOLE 0x02 /* * Bit 0 can be used to change the Int Pin for the Function. The value is * 0 by default for both Functions with Function 0 using INT A and Function * B using INT B. For Function 0 if set, INT B is used. For Function 1 if set, * INT A is used. * * EEPROM Word 0 Bit 11 for each Function may change the initial Int Pin * value specified in the PCI Configuration Space. */ #define INTAB 0x01 /* * Adv Library Status Definitions */ #define ADV_TRUE 1 #define ADV_FALSE 0 #define ADV_SUCCESS 1 #define ADV_BUSY 0 #define ADV_ERROR (-1) /* * ADV_DVC_VAR 'warn_code' values */ #define ASC_WARN_BUSRESET_ERROR 0x0001 /* SCSI Bus Reset error */ #define ASC_WARN_EEPROM_CHKSUM 0x0002 /* EEP check sum error */ #define ASC_WARN_EEPROM_TERMINATION 0x0004 /* EEP termination bad field */ #define ASC_WARN_ERROR 0xFFFF /* ADV_ERROR return */ #define ADV_MAX_TID 15 /* max. target identifier */ #define ADV_MAX_LUN 7 /* max. logical unit number */ /* * Fixed locations of microcode operating variables. */ #define ASC_MC_CODE_BEGIN_ADDR 0x0028 /* microcode start address */ #define ASC_MC_CODE_END_ADDR 0x002A /* microcode end address */ #define ASC_MC_CODE_CHK_SUM 0x002C /* microcode code checksum */ #define ASC_MC_VERSION_DATE 0x0038 /* microcode version */ #define ASC_MC_VERSION_NUM 0x003A /* microcode number */ #define ASC_MC_BIOSMEM 0x0040 /* BIOS RISC Memory Start */ #define ASC_MC_BIOSLEN 0x0050 /* BIOS RISC Memory Length */ #define ASC_MC_BIOS_SIGNATURE 0x0058 /* BIOS Signature 0x55AA */ #define ASC_MC_BIOS_VERSION 0x005A /* BIOS Version (2 bytes) */ #define ASC_MC_SDTR_SPEED1 0x0090 /* SDTR Speed for TID 0-3 */ #define ASC_MC_SDTR_SPEED2 0x0092 /* SDTR Speed for TID 4-7 */ #define ASC_MC_SDTR_SPEED3 0x0094 /* SDTR Speed for TID 8-11 */ #define ASC_MC_SDTR_SPEED4 0x0096 /* SDTR Speed for TID 12-15 */ #define ASC_MC_CHIP_TYPE 0x009A #define ASC_MC_INTRB_CODE 0x009B #define ASC_MC_WDTR_ABLE 0x009C #define ASC_MC_SDTR_ABLE 0x009E #define ASC_MC_TAGQNG_ABLE 0x00A0 #define ASC_MC_DISC_ENABLE 0x00A2 #define ASC_MC_IDLE_CMD_STATUS 0x00A4 #define ASC_MC_IDLE_CMD 0x00A6 #define ASC_MC_IDLE_CMD_PARAMETER 0x00A8 #define ASC_MC_DEFAULT_SCSI_CFG0 0x00AC #define ASC_MC_DEFAULT_SCSI_CFG1 0x00AE #define ASC_MC_DEFAULT_MEM_CFG 0x00B0 #define ASC_MC_DEFAULT_SEL_MASK 0x00B2 #define ASC_MC_SDTR_DONE 0x00B6 #define ASC_MC_NUMBER_OF_QUEUED_CMD 0x00C0 #define ASC_MC_NUMBER_OF_MAX_CMD 0x00D0 #define ASC_MC_DEVICE_HSHK_CFG_TABLE 0x0100 #define ASC_MC_CONTROL_FLAG 0x0122 /* Microcode control flag. */ #define ASC_MC_WDTR_DONE 0x0124 #define ASC_MC_CAM_MODE_MASK 0x015E /* CAM mode TID bitmask. */ #define ASC_MC_ICQ 0x0160 #define ASC_MC_IRQ 0x0164 #define ASC_MC_PPR_ABLE 0x017A /* * BIOS LRAM variable absolute offsets. */ #define BIOS_CODESEG 0x54 #define BIOS_CODELEN 0x56 #define BIOS_SIGNATURE 0x58 #define BIOS_VERSION 0x5A /* * Microcode Control Flags * * Flags set by the Adv Library in RISC variable 'control_flag' (0x122) * and handled by the microcode. */ #define CONTROL_FLAG_IGNORE_PERR 0x0001 /* Ignore DMA Parity Errors */ #define CONTROL_FLAG_ENABLE_AIPP 0x0002 /* Enabled AIPP checking. */ /* * ASC_MC_DEVICE_HSHK_CFG_TABLE microcode table or HSHK_CFG register format */ #define HSHK_CFG_WIDE_XFR 0x8000 #define HSHK_CFG_RATE 0x0F00 #define HSHK_CFG_OFFSET 0x001F #define ASC_DEF_MAX_HOST_QNG 0xFD /* Max. number of host commands (253) */ #define ASC_DEF_MIN_HOST_QNG 0x10 /* Min. number of host commands (16) */ #define ASC_DEF_MAX_DVC_QNG 0x3F /* Max. number commands per device (63) */ #define ASC_DEF_MIN_DVC_QNG 0x04 /* Min. number commands per device (4) */ #define ASC_QC_DATA_CHECK 0x01 /* Require ASC_QC_DATA_OUT set or clear. */ #define ASC_QC_DATA_OUT 0x02 /* Data out DMA transfer. */ #define ASC_QC_START_MOTOR 0x04 /* Send auto-start motor before request. */ #define ASC_QC_NO_OVERRUN 0x08 /* Don't report overrun. */ #define ASC_QC_FREEZE_TIDQ 0x10 /* Freeze TID queue after request. XXX TBD */ #define ASC_QSC_NO_DISC 0x01 /* Don't allow disconnect for request. */ #define ASC_QSC_NO_TAGMSG 0x02 /* Don't allow tag queuing for request. */ #define ASC_QSC_NO_SYNC 0x04 /* Don't use Synch. transfer on request. */ #define ASC_QSC_NO_WIDE 0x08 /* Don't use Wide transfer on request. */ #define ASC_QSC_REDO_DTR 0x10 /* Renegotiate WDTR/SDTR before request. */ /* * Note: If a Tag Message is to be sent and neither ASC_QSC_HEAD_TAG or * ASC_QSC_ORDERED_TAG is set, then a Simple Tag Message (0x20) is used. */ #define ASC_QSC_HEAD_TAG 0x40 /* Use Head Tag Message (0x21). */ #define ASC_QSC_ORDERED_TAG 0x80 /* Use Ordered Tag Message (0x22). */ /* * All fields here are accessed by the board microcode and need to be * little-endian. */ typedef struct adv_carr_t { __le32 carr_va; /* Carrier Virtual Address */ __le32 carr_pa; /* Carrier Physical Address */ __le32 areq_vpa; /* ADV_SCSI_REQ_Q Virtual or Physical Address */ /* * next_vpa [31:4] Carrier Virtual or Physical Next Pointer * * next_vpa [3:1] Reserved Bits * next_vpa [0] Done Flag set in Response Queue. */ __le32 next_vpa; } ADV_CARR_T; /* * Mask used to eliminate low 4 bits of carrier 'next_vpa' field. */ #define ADV_NEXT_VPA_MASK 0xFFFFFFF0 #define ADV_RQ_DONE 0x00000001 #define ADV_RQ_GOOD 0x00000002 #define ADV_CQ_STOPPER 0x00000000 #define ADV_GET_CARRP(carrp) ((carrp) & ADV_NEXT_VPA_MASK) /* * Each carrier is 64 bytes, and we need three additional * carrier for icq, irq, and the termination carrier. */ #define ADV_CARRIER_COUNT (ASC_DEF_MAX_HOST_QNG + 3) #define ADV_CARRIER_BUFSIZE \ (ADV_CARRIER_COUNT * sizeof(ADV_CARR_T)) #define ADV_CHIP_ASC3550 0x01 /* Ultra-Wide IC */ #define ADV_CHIP_ASC38C0800 0x02 /* Ultra2-Wide/LVD IC */ #define ADV_CHIP_ASC38C1600 0x03 /* Ultra3-Wide/LVD2 IC */ /* * Adapter temporary configuration structure * * This structure can be discarded after initialization. Don't add * fields here needed after initialization. * * Field naming convention: * * *_enable indicates the field enables or disables a feature. The * value of the field is never reset. */ typedef struct adv_dvc_cfg { ushort disc_enable; /* enable disconnection */ uchar chip_version; /* chip version */ uchar termination; /* Term. Ctrl. bits 6-5 of SCSI_CFG1 register */ ushort control_flag; /* Microcode Control Flag */ ushort mcode_date; /* Microcode date */ ushort mcode_version; /* Microcode version */ ushort serial1; /* EEPROM serial number word 1 */ ushort serial2; /* EEPROM serial number word 2 */ ushort serial3; /* EEPROM serial number word 3 */ } ADV_DVC_CFG; struct adv_dvc_var; struct adv_scsi_req_q; typedef struct adv_sg_block { uchar reserved1; uchar reserved2; uchar reserved3; uchar sg_cnt; /* Valid entries in block. */ __le32 sg_ptr; /* Pointer to next sg block. */ struct { __le32 sg_addr; /* SG element address. */ __le32 sg_count; /* SG element count. */ } sg_list[NO_OF_SG_PER_BLOCK]; } ADV_SG_BLOCK; /* * ADV_SCSI_REQ_Q - microcode request structure * * All fields in this structure up to byte 60 are used by the microcode. * The microcode makes assumptions about the size and ordering of fields * in this structure. Do not change the structure definition here without * coordinating the change with the microcode. * * All fields accessed by microcode must be maintained in little_endian * order. */ typedef struct adv_scsi_req_q { uchar cntl; /* Ucode flags and state (ASC_MC_QC_*). */ uchar target_cmd; uchar target_id; /* Device target identifier. */ uchar target_lun; /* Device target logical unit number. */ __le32 data_addr; /* Data buffer physical address. */ __le32 data_cnt; /* Data count. Ucode sets to residual. */ __le32 sense_addr; __le32 carr_pa; uchar mflag; uchar sense_len; uchar cdb_len; /* SCSI CDB length. Must <= 16 bytes. */ uchar scsi_cntl; uchar done_status; /* Completion status. */ uchar scsi_status; /* SCSI status byte. */ uchar host_status; /* Ucode host status. */ uchar sg_working_ix; uchar cdb[12]; /* SCSI CDB bytes 0-11. */ __le32 sg_real_addr; /* SG list physical address. */ __le32 scsiq_rptr; uchar cdb16[4]; /* SCSI CDB bytes 12-15. */ __le32 scsiq_ptr; __le32 carr_va; /* * End of microcode structure - 60 bytes. The rest of the structure * is used by the Adv Library and ignored by the microcode. */ u32 srb_tag; ADV_SG_BLOCK *sg_list_ptr; /* SG list virtual address. */ } ADV_SCSI_REQ_Q; /* * The following two structures are used to process Wide Board requests. * * The ADV_SCSI_REQ_Q structure in adv_req_t is passed to the Adv Library * and microcode with the ADV_SCSI_REQ_Q field 'srb_tag' set to the * SCSI request tag. The adv_req_t structure 'cmndp' field in turn points * to the Mid-Level SCSI request structure. * * Zero or more ADV_SG_BLOCK are used with each ADV_SCSI_REQ_Q. Each * ADV_SG_BLOCK structure holds 15 scatter-gather elements. Under Linux * up to 255 scatter-gather elements may be used per request or * ADV_SCSI_REQ_Q. * * Both structures must be 32 byte aligned. */ typedef struct adv_sgblk { ADV_SG_BLOCK sg_block; /* Sgblock structure. */ dma_addr_t sg_addr; /* Physical address */ struct adv_sgblk *next_sgblkp; /* Next scatter-gather structure. */ } adv_sgblk_t; typedef struct adv_req { ADV_SCSI_REQ_Q scsi_req_q; /* Adv Library request structure. */ uchar align[24]; /* Request structure padding. */ struct scsi_cmnd *cmndp; /* Mid-Level SCSI command pointer. */ dma_addr_t req_addr; adv_sgblk_t *sgblkp; /* Adv Library scatter-gather pointer. */ } adv_req_t __aligned(32); /* * Adapter operation variable structure. * * One structure is required per host adapter. * * Field naming convention: * * *_able indicates both whether a feature should be enabled or disabled * and whether a device is capable of the feature. At initialization * this field may be set, but later if a device is found to be incapable * of the feature, the field is cleared. */ typedef struct adv_dvc_var { AdvPortAddr iop_base; /* I/O port address */ ushort err_code; /* fatal error code */ ushort bios_ctrl; /* BIOS control word, EEPROM word 12 */ ushort wdtr_able; /* try WDTR for a device */ ushort sdtr_able; /* try SDTR for a device */ ushort ultra_able; /* try SDTR Ultra speed for a device */ ushort sdtr_speed1; /* EEPROM SDTR Speed for TID 0-3 */ ushort sdtr_speed2; /* EEPROM SDTR Speed for TID 4-7 */ ushort sdtr_speed3; /* EEPROM SDTR Speed for TID 8-11 */ ushort sdtr_speed4; /* EEPROM SDTR Speed for TID 12-15 */ ushort tagqng_able; /* try tagged queuing with a device */ ushort ppr_able; /* PPR message capable per TID bitmask. */ uchar max_dvc_qng; /* maximum number of tagged commands per device */ ushort start_motor; /* start motor command allowed */ uchar scsi_reset_wait; /* delay in seconds after scsi bus reset */ uchar chip_no; /* should be assigned by caller */ uchar max_host_qng; /* maximum number of Q'ed command allowed */ ushort no_scam; /* scam_tolerant of EEPROM */ struct asc_board *drv_ptr; /* driver pointer to private structure */ uchar chip_scsi_id; /* chip SCSI target ID */ uchar chip_type; uchar bist_err_code; ADV_CARR_T *carrier; ADV_CARR_T *carr_freelist; /* Carrier free list. */ dma_addr_t carrier_addr; ADV_CARR_T *icq_sp; /* Initiator command queue stopper pointer. */ ADV_CARR_T *irq_sp; /* Initiator response queue stopper pointer. */ ushort carr_pending_cnt; /* Count of pending carriers. */ /* * Note: The following fields will not be used after initialization. The * driver may discard the buffer after initialization is done. */ ADV_DVC_CFG *cfg; /* temporary configuration structure */ } ADV_DVC_VAR; /* * Microcode idle loop commands */ #define IDLE_CMD_COMPLETED 0 #define IDLE_CMD_STOP_CHIP 0x0001 #define IDLE_CMD_STOP_CHIP_SEND_INT 0x0002 #define IDLE_CMD_SEND_INT 0x0004 #define IDLE_CMD_ABORT 0x0008 #define IDLE_CMD_DEVICE_RESET 0x0010 #define IDLE_CMD_SCSI_RESET_START 0x0020 /* Assert SCSI Bus Reset */ #define IDLE_CMD_SCSI_RESET_END 0x0040 /* Deassert SCSI Bus Reset */ #define IDLE_CMD_SCSIREQ 0x0080 #define IDLE_CMD_STATUS_SUCCESS 0x0001 #define IDLE_CMD_STATUS_FAILURE 0x0002 /* * AdvSendIdleCmd() flag definitions. */ #define ADV_NOWAIT 0x01 /* * Wait loop time out values. */ #define SCSI_WAIT_100_MSEC 100UL /* 100 milliseconds */ #define SCSI_US_PER_MSEC 1000 /* microseconds per millisecond */ #define SCSI_MAX_RETRY 10 /* retry count */ #define ADV_ASYNC_RDMA_FAILURE 0x01 /* Fatal RDMA failure. */ #define ADV_ASYNC_SCSI_BUS_RESET_DET 0x02 /* Detected SCSI Bus Reset. */ #define ADV_ASYNC_CARRIER_READY_FAILURE 0x03 /* Carrier Ready failure. */ #define ADV_RDMA_IN_CARR_AND_Q_INVALID 0x04 /* RDMAed-in data invalid. */ #define ADV_HOST_SCSI_BUS_RESET 0x80 /* Host Initiated SCSI Bus Reset. */ /* Read byte from a register. */ #define AdvReadByteRegister(iop_base, reg_off) \ (ADV_MEM_READB((iop_base) + (reg_off))) /* Write byte to a register. */ #define AdvWriteByteRegister(iop_base, reg_off, byte) \ (ADV_MEM_WRITEB((iop_base) + (reg_off), (byte))) /* Read word (2 bytes) from a register. */ #define AdvReadWordRegister(iop_base, reg_off) \ (ADV_MEM_READW((iop_base) + (reg_off))) /* Write word (2 bytes) to a register. */ #define AdvWriteWordRegister(iop_base, reg_off, word) \ (ADV_MEM_WRITEW((iop_base) + (reg_off), (word))) /* Write dword (4 bytes) to a register. */ #define AdvWriteDWordRegister(iop_base, reg_off, dword) \ (ADV_MEM_WRITEDW((iop_base) + (reg_off), (dword))) /* Read byte from LRAM. */ #define AdvReadByteLram(iop_base, addr, byte) \ do { \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)); \ (byte) = ADV_MEM_READB((iop_base) + IOPB_RAM_DATA); \ } while (0) /* Write byte to LRAM. */ #define AdvWriteByteLram(iop_base, addr, byte) \ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \ ADV_MEM_WRITEB((iop_base) + IOPB_RAM_DATA, (byte))) /* Read word (2 bytes) from LRAM. */ #define AdvReadWordLram(iop_base, addr, word) \ do { \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)); \ (word) = (ADV_MEM_READW((iop_base) + IOPW_RAM_DATA)); \ } while (0) /* Write word (2 bytes) to LRAM. */ #define AdvWriteWordLram(iop_base, addr, word) \ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, (word))) /* Write little-endian double word (4 bytes) to LRAM */ /* Because of unspecified C language ordering don't use auto-increment. */ #define AdvWriteDWordLramNoSwap(iop_base, addr, dword) \ ((ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, \ cpu_to_le16((ushort) ((dword) & 0xFFFF)))), \ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr) + 2), \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, \ cpu_to_le16((ushort) ((dword >> 16) & 0xFFFF))))) /* Read word (2 bytes) from LRAM assuming that the address is already set. */ #define AdvReadWordAutoIncLram(iop_base) \ (ADV_MEM_READW((iop_base) + IOPW_RAM_DATA)) /* Write word (2 bytes) to LRAM assuming that the address is already set. */ #define AdvWriteWordAutoIncLram(iop_base, word) \ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, (word))) /* * Define macro to check for Condor signature. * * Evaluate to ADV_TRUE if a Condor chip is found the specified port * address 'iop_base'. Otherwise evalue to ADV_FALSE. */ #define AdvFindSignature(iop_base) \ (((AdvReadByteRegister((iop_base), IOPB_CHIP_ID_1) == \ ADV_CHIP_ID_BYTE) && \ (AdvReadWordRegister((iop_base), IOPW_CHIP_ID_0) == \ ADV_CHIP_ID_WORD)) ? ADV_TRUE : ADV_FALSE) /* * Define macro to Return the version number of the chip at 'iop_base'. * * The second parameter 'bus_type' is currently unused. */ #define AdvGetChipVersion(iop_base, bus_type) \ AdvReadByteRegister((iop_base), IOPB_CHIP_TYPE_REV) /* * Abort an SRB in the chip's RISC Memory. The 'srb_tag' argument must * match the ADV_SCSI_REQ_Q 'srb_tag' field. * * If the request has not yet been sent to the device it will simply be * aborted from RISC memory. If the request is disconnected it will be * aborted on reselection by sending an Abort Message to the target ID. * * Return value: * ADV_TRUE(1) - Queue was successfully aborted. * ADV_FALSE(0) - Queue was not found on the active queue list. */ #define AdvAbortQueue(asc_dvc, srb_tag) \ AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_ABORT, \ (ADV_DCNT) (srb_tag)) /* * Send a Bus Device Reset Message to the specified target ID. * * All outstanding commands will be purged if sending the * Bus Device Reset Message is successful. * * Return Value: * ADV_TRUE(1) - All requests on the target are purged. * ADV_FALSE(0) - Couldn't issue Bus Device Reset Message; Requests * are not purged. */ #define AdvResetDevice(asc_dvc, target_id) \ AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_DEVICE_RESET, \ (ADV_DCNT) (target_id)) /* * SCSI Wide Type definition. */ #define ADV_SCSI_BIT_ID_TYPE ushort /* * AdvInitScsiTarget() 'cntl_flag' options. */ #define ADV_SCAN_LUN 0x01 #define ADV_CAPINFO_NOLUN 0x02 /* * Convert target id to target id bit mask. */ #define ADV_TID_TO_TIDMASK(tid) (0x01 << ((tid) & ADV_MAX_TID)) /* * ADV_SCSI_REQ_Q 'done_status' and 'host_status' return values. */ #define QD_NO_STATUS 0x00 /* Request not completed yet. */ #define QD_NO_ERROR 0x01 #define QD_ABORTED_BY_HOST 0x02 #define QD_WITH_ERROR 0x04 #define QHSTA_NO_ERROR 0x00 #define QHSTA_M_SEL_TIMEOUT 0x11 #define QHSTA_M_DATA_OVER_RUN 0x12 #define QHSTA_M_UNEXPECTED_BUS_FREE 0x13 #define QHSTA_M_QUEUE_ABORTED 0x15 #define QHSTA_M_SXFR_SDMA_ERR 0x16 /* SXFR_STATUS SCSI DMA Error */ #define QHSTA_M_SXFR_SXFR_PERR 0x17 /* SXFR_STATUS SCSI Bus Parity Error */ #define QHSTA_M_RDMA_PERR 0x18 /* RISC PCI DMA parity error */ #define QHSTA_M_SXFR_OFF_UFLW 0x19 /* SXFR_STATUS Offset Underflow */ #define QHSTA_M_SXFR_OFF_OFLW 0x20 /* SXFR_STATUS Offset Overflow */ #define QHSTA_M_SXFR_WD_TMO 0x21 /* SXFR_STATUS Watchdog Timeout */ #define QHSTA_M_SXFR_DESELECTED 0x22 /* SXFR_STATUS Deselected */ /* Note: QHSTA_M_SXFR_XFR_OFLW is identical to QHSTA_M_DATA_OVER_RUN. */ #define QHSTA_M_SXFR_XFR_OFLW 0x12 /* SXFR_STATUS Transfer Overflow */ #define QHSTA_M_SXFR_XFR_PH_ERR 0x24 /* SXFR_STATUS Transfer Phase Error */ #define QHSTA_M_SXFR_UNKNOWN_ERROR 0x25 /* SXFR_STATUS Unknown Error */ #define QHSTA_M_SCSI_BUS_RESET 0x30 /* Request aborted from SBR */ #define QHSTA_M_SCSI_BUS_RESET_UNSOL 0x31 /* Request aborted from unsol. SBR */ #define QHSTA_M_BUS_DEVICE_RESET 0x32 /* Request aborted from BDR */ #define QHSTA_M_DIRECTION_ERR 0x35 /* Data Phase mismatch */ #define QHSTA_M_DIRECTION_ERR_HUNG 0x36 /* Data Phase mismatch and bus hang */ #define QHSTA_M_WTM_TIMEOUT 0x41 #define QHSTA_M_BAD_CMPL_STATUS_IN 0x42 #define QHSTA_M_NO_AUTO_REQ_SENSE 0x43 #define QHSTA_M_AUTO_REQ_SENSE_FAIL 0x44 #define QHSTA_M_INVALID_DEVICE 0x45 /* Bad target ID */ #define QHSTA_M_FROZEN_TIDQ 0x46 /* TID Queue frozen. */ #define QHSTA_M_SGBACKUP_ERROR 0x47 /* Scatter-Gather backup error */ /* Return the address that is aligned at the next doubleword >= to 'addr'. */ #define ADV_32BALIGN(addr) (((ulong) (addr) + 0x1F) & ~0x1F) /* * Total contiguous memory needed for driver SG blocks. * * ADV_MAX_SG_LIST must be defined by a driver. It is the maximum * number of scatter-gather elements the driver supports in a * single request. */ #define ADV_SG_LIST_MAX_BYTE_SIZE \ (sizeof(ADV_SG_BLOCK) * \ ((ADV_MAX_SG_LIST + (NO_OF_SG_PER_BLOCK - 1))/NO_OF_SG_PER_BLOCK)) /* struct asc_board flags */ #define ASC_IS_WIDE_BOARD 0x04 /* AdvanSys Wide Board */ #define ASC_NARROW_BOARD(boardp) (((boardp)->flags & ASC_IS_WIDE_BOARD) == 0) #define NO_ISA_DMA 0xff /* No ISA DMA Channel Used */ #define ASC_INFO_SIZE 128 /* advansys_info() line size */ /* Asc Library return codes */ #define ASC_TRUE 1 #define ASC_FALSE 0 #define ASC_NOERROR 1 #define ASC_BUSY 0 #define ASC_ERROR (-1) #define ASC_STATS(shost, counter) ASC_STATS_ADD(shost, counter, 1) #ifndef ADVANSYS_STATS #define ASC_STATS_ADD(shost, counter, count) #else /* ADVANSYS_STATS */ #define ASC_STATS_ADD(shost, counter, count) \ (((struct asc_board *) shost_priv(shost))->asc_stats.counter += (count)) #endif /* ADVANSYS_STATS */ /* If the result wraps when calculating tenths, return 0. */ #define ASC_TENTHS(num, den) \ (((10 * ((num)/(den))) > (((num) * 10)/(den))) ? \ 0 : ((((num) * 10)/(den)) - (10 * ((num)/(den))))) /* * Display a message to the console. */ #define ASC_PRINT(s) \ { \ printk("advansys: "); \ printk(s); \ } #define ASC_PRINT1(s, a1) \ { \ printk("advansys: "); \ printk((s), (a1)); \ } #define ASC_PRINT2(s, a1, a2) \ { \ printk("advansys: "); \ printk((s), (a1), (a2)); \ } #define ASC_PRINT3(s, a1, a2, a3) \ { \ printk("advansys: "); \ printk((s), (a1), (a2), (a3)); \ } #define ASC_PRINT4(s, a1, a2, a3, a4) \ { \ printk("advansys: "); \ printk((s), (a1), (a2), (a3), (a4)); \ } #ifndef ADVANSYS_DEBUG #define ASC_DBG(lvl, s...) #define ASC_DBG_PRT_SCSI_HOST(lvl, s) #define ASC_DBG_PRT_ASC_SCSI_Q(lvl, scsiqp) #define ASC_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp) #define ASC_DBG_PRT_ASC_QDONE_INFO(lvl, qdone) #define ADV_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp) #define ASC_DBG_PRT_HEX(lvl, name, start, length) #define ASC_DBG_PRT_CDB(lvl, cdb, len) #define ASC_DBG_PRT_SENSE(lvl, sense, len) #define ASC_DBG_PRT_INQUIRY(lvl, inq, len) #else /* ADVANSYS_DEBUG */ /* * Debugging Message Levels: * 0: Errors Only * 1: High-Level Tracing * 2-N: Verbose Tracing */ #define ASC_DBG(lvl, format, arg...) { \ if (asc_dbglvl >= (lvl)) \ printk(KERN_DEBUG "%s: %s: " format, DRV_NAME, \ __func__ , ## arg); \ } #define ASC_DBG_PRT_SCSI_HOST(lvl, s) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_scsi_host(s); \ } \ } #define ASC_DBG_PRT_ASC_SCSI_Q(lvl, scsiqp) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_asc_scsi_q(scsiqp); \ } \ } #define ASC_DBG_PRT_ASC_QDONE_INFO(lvl, qdone) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_asc_qdone_info(qdone); \ } \ } #define ASC_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_adv_scsi_req_q(scsiqp); \ } \ } #define ASC_DBG_PRT_HEX(lvl, name, start, length) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_hex((name), (start), (length)); \ } \ } #define ASC_DBG_PRT_CDB(lvl, cdb, len) \ ASC_DBG_PRT_HEX((lvl), "CDB", (uchar *) (cdb), (len)); #define ASC_DBG_PRT_SENSE(lvl, sense, len) \ ASC_DBG_PRT_HEX((lvl), "SENSE", (uchar *) (sense), (len)); #define ASC_DBG_PRT_INQUIRY(lvl, inq, len) \ ASC_DBG_PRT_HEX((lvl), "INQUIRY", (uchar *) (inq), (len)); #endif /* ADVANSYS_DEBUG */ #ifdef ADVANSYS_STATS /* Per board statistics structure */ struct asc_stats { /* Driver Entrypoint Statistics */ unsigned int queuecommand; /* # calls to advansys_queuecommand() */ unsigned int reset; /* # calls to advansys_eh_bus_reset() */ unsigned int biosparam; /* # calls to advansys_biosparam() */ unsigned int interrupt; /* # advansys_interrupt() calls */ unsigned int callback; /* # calls to asc/adv_isr_callback() */ unsigned int done; /* # calls to request's scsi_done function */ unsigned int build_error; /* # asc/adv_build_req() ASC_ERROR returns. */ unsigned int adv_build_noreq; /* # adv_build_req() adv_req_t alloc. fail. */ unsigned int adv_build_nosg; /* # adv_build_req() adv_sgblk_t alloc. fail. */ /* AscExeScsiQueue()/AdvExeScsiQueue() Statistics */ unsigned int exe_noerror; /* # ASC_NOERROR returns. */ unsigned int exe_busy; /* # ASC_BUSY returns. */ unsigned int exe_error; /* # ASC_ERROR returns. */ unsigned int exe_unknown; /* # unknown returns. */ /* Data Transfer Statistics */ unsigned int xfer_cnt; /* # I/O requests received */ unsigned int xfer_elem; /* # scatter-gather elements */ unsigned int xfer_sect; /* # 512-byte blocks */ }; #endif /* ADVANSYS_STATS */ /* * Structure allocated for each board. * * This structure is allocated by scsi_host_alloc() at the end * of the 'Scsi_Host' structure starting at the 'hostdata' * field. It is guaranteed to be allocated from DMA-able memory. */ struct asc_board { struct device *dev; struct Scsi_Host *shost; uint flags; /* Board flags */ unsigned int irq; union { ASC_DVC_VAR asc_dvc_var; /* Narrow board */ ADV_DVC_VAR adv_dvc_var; /* Wide board */ } dvc_var; union { ASC_DVC_CFG asc_dvc_cfg; /* Narrow board */ ADV_DVC_CFG adv_dvc_cfg; /* Wide board */ } dvc_cfg; ushort asc_n_io_port; /* Number I/O ports. */ ADV_SCSI_BIT_ID_TYPE init_tidmask; /* Target init./valid mask */ ushort reqcnt[ADV_MAX_TID + 1]; /* Starvation request count */ ADV_SCSI_BIT_ID_TYPE queue_full; /* Queue full mask */ ushort queue_full_cnt[ADV_MAX_TID + 1]; /* Queue full count */ union { ASCEEP_CONFIG asc_eep; /* Narrow EEPROM config. */ ADVEEP_3550_CONFIG adv_3550_eep; /* 3550 EEPROM config. */ ADVEEP_38C0800_CONFIG adv_38C0800_eep; /* 38C0800 EEPROM config. */ ADVEEP_38C1600_CONFIG adv_38C1600_eep; /* 38C1600 EEPROM config. */ } eep_config; /* /proc/scsi/advansys/[0...] */ #ifdef ADVANSYS_STATS struct asc_stats asc_stats; /* Board statistics */ #endif /* ADVANSYS_STATS */ /* * The following fields are used only for Narrow Boards. */ uchar sdtr_data[ASC_MAX_TID + 1]; /* SDTR information */ /* * The following fields are used only for Wide Boards. */ void __iomem *ioremap_addr; /* I/O Memory remap address. */ ushort ioport; /* I/O Port address. */ adv_req_t *adv_reqp; /* Request structures. */ dma_addr_t adv_reqp_addr; size_t adv_reqp_size; struct dma_pool *adv_sgblk_pool; /* Scatter-gather structures. */ ushort bios_signature; /* BIOS Signature. */ ushort bios_version; /* BIOS Version. */ ushort bios_codeseg; /* BIOS Code Segment. */ ushort bios_codelen; /* BIOS Code Segment Length. */ }; #define asc_dvc_to_board(asc_dvc) container_of(asc_dvc, struct asc_board, \ dvc_var.asc_dvc_var) #define adv_dvc_to_board(adv_dvc) container_of(adv_dvc, struct asc_board, \ dvc_var.adv_dvc_var) #define adv_dvc_to_pdev(adv_dvc) to_pci_dev(adv_dvc_to_board(adv_dvc)->dev) struct advansys_cmd { dma_addr_t dma_handle; }; static struct advansys_cmd *advansys_cmd(struct scsi_cmnd *cmd) { return scsi_cmd_priv(cmd); } #ifdef ADVANSYS_DEBUG static int asc_dbglvl = 3; /* * asc_prt_asc_dvc_var() */ static void asc_prt_asc_dvc_var(ASC_DVC_VAR *h) { printk("ASC_DVC_VAR at addr 0x%lx\n", (ulong)h); printk(" iop_base 0x%x, err_code 0x%x, dvc_cntl 0x%x, bug_fix_cntl " "%d,\n", h->iop_base, h->err_code, h->dvc_cntl, h->bug_fix_cntl); printk(" bus_type %d, init_sdtr 0x%x,\n", h->bus_type, (unsigned)h->init_sdtr); printk(" sdtr_done 0x%x, use_tagged_qng 0x%x, unit_not_ready 0x%x, " "chip_no 0x%x,\n", (unsigned)h->sdtr_done, (unsigned)h->use_tagged_qng, (unsigned)h->unit_not_ready, (unsigned)h->chip_no); printk(" queue_full_or_busy 0x%x, start_motor 0x%x, scsi_reset_wait " "%u,\n", (unsigned)h->queue_full_or_busy, (unsigned)h->start_motor, (unsigned)h->scsi_reset_wait); printk(" is_in_int %u, max_total_qng %u, cur_total_qng %u, " "in_critical_cnt %u,\n", (unsigned)h->is_in_int, (unsigned)h->max_total_qng, (unsigned)h->cur_total_qng, (unsigned)h->in_critical_cnt); printk(" last_q_shortage %u, init_state 0x%x, no_scam 0x%x, " "pci_fix_asyn_xfer 0x%x,\n", (unsigned)h->last_q_shortage, (unsigned)h->init_state, (unsigned)h->no_scam, (unsigned)h->pci_fix_asyn_xfer); printk(" cfg 0x%lx\n", (ulong)h->cfg); } /* * asc_prt_asc_dvc_cfg() */ static void asc_prt_asc_dvc_cfg(ASC_DVC_CFG *h) { printk("ASC_DVC_CFG at addr 0x%lx\n", (ulong)h); printk(" can_tagged_qng 0x%x, cmd_qng_enabled 0x%x,\n", h->can_tagged_qng, h->cmd_qng_enabled); printk(" disc_enable 0x%x, sdtr_enable 0x%x,\n", h->disc_enable, h->sdtr_enable); printk(" chip_scsi_id %d, chip_version %d,\n", h->chip_scsi_id, h->chip_version); printk(" mcode_date 0x%x, mcode_version %d\n", h->mcode_date, h->mcode_version); } /* * asc_prt_adv_dvc_var() * * Display an ADV_DVC_VAR structure. */ static void asc_prt_adv_dvc_var(ADV_DVC_VAR *h) { printk(" ADV_DVC_VAR at addr 0x%lx\n", (ulong)h); printk(" iop_base 0x%lx, err_code 0x%x, ultra_able 0x%x\n", (ulong)h->iop_base, h->err_code, (unsigned)h->ultra_able); printk(" sdtr_able 0x%x, wdtr_able 0x%x\n", (unsigned)h->sdtr_able, (unsigned)h->wdtr_able); printk(" start_motor 0x%x, scsi_reset_wait 0x%x\n", (unsigned)h->start_motor, (unsigned)h->scsi_reset_wait); printk(" max_host_qng %u, max_dvc_qng %u, carr_freelist 0x%p\n", (unsigned)h->max_host_qng, (unsigned)h->max_dvc_qng, h->carr_freelist); printk(" icq_sp 0x%p, irq_sp 0x%p\n", h->icq_sp, h->irq_sp); printk(" no_scam 0x%x, tagqng_able 0x%x\n", (unsigned)h->no_scam, (unsigned)h->tagqng_able); printk(" chip_scsi_id 0x%x, cfg 0x%lx\n", (unsigned)h->chip_scsi_id, (ulong)h->cfg); } /* * asc_prt_adv_dvc_cfg() * * Display an ADV_DVC_CFG structure. */ static void asc_prt_adv_dvc_cfg(ADV_DVC_CFG *h) { printk(" ADV_DVC_CFG at addr 0x%lx\n", (ulong)h); printk(" disc_enable 0x%x, termination 0x%x\n", h->disc_enable, h->termination); printk(" chip_version 0x%x, mcode_date 0x%x\n", h->chip_version, h->mcode_date); printk(" mcode_version 0x%x, control_flag 0x%x\n", h->mcode_version, h->control_flag); } /* * asc_prt_scsi_host() */ static void asc_prt_scsi_host(struct Scsi_Host *s) { struct asc_board *boardp = shost_priv(s); printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev)); printk(" host_busy %d, host_no %d,\n", scsi_host_busy(s), s->host_no); printk(" base 0x%lx, io_port 0x%lx, irq %d,\n", (ulong)s->base, (ulong)s->io_port, boardp->irq); printk(" dma_channel %d, this_id %d, can_queue %d,\n", s->dma_channel, s->this_id, s->can_queue); printk(" cmd_per_lun %d, sg_tablesize %d\n", s->cmd_per_lun, s->sg_tablesize); if (ASC_NARROW_BOARD(boardp)) { asc_prt_asc_dvc_var(&boardp->dvc_var.asc_dvc_var); asc_prt_asc_dvc_cfg(&boardp->dvc_cfg.asc_dvc_cfg); } else { asc_prt_adv_dvc_var(&boardp->dvc_var.adv_dvc_var); asc_prt_adv_dvc_cfg(&boardp->dvc_cfg.adv_dvc_cfg); } } /* * asc_prt_hex() * * Print hexadecimal output in 4 byte groupings 32 bytes * or 8 double-words per line. */ static void asc_prt_hex(char *f, uchar *s, int l) { int i; int j; int k; int m; printk("%s: (%d bytes)\n", f, l); for (i = 0; i < l; i += 32) { /* Display a maximum of 8 double-words per line. */ if ((k = (l - i) / 4) >= 8) { k = 8; m = 0; } else { m = (l - i) % 4; } for (j = 0; j < k; j++) { printk(" %2.2X%2.2X%2.2X%2.2X", (unsigned)s[i + (j * 4)], (unsigned)s[i + (j * 4) + 1], (unsigned)s[i + (j * 4) + 2], (unsigned)s[i + (j * 4) + 3]); } switch (m) { case 0: default: break; case 1: printk(" %2.2X", (unsigned)s[i + (j * 4)]); break; case 2: printk(" %2.2X%2.2X", (unsigned)s[i + (j * 4)], (unsigned)s[i + (j * 4) + 1]); break; case 3: printk(" %2.2X%2.2X%2.2X", (unsigned)s[i + (j * 4) + 1], (unsigned)s[i + (j * 4) + 2], (unsigned)s[i + (j * 4) + 3]); break; } printk("\n"); } } /* * asc_prt_asc_scsi_q() */ static void asc_prt_asc_scsi_q(ASC_SCSI_Q *q) { ASC_SG_HEAD *sgp; int i; printk("ASC_SCSI_Q at addr 0x%lx\n", (ulong)q); printk (" target_ix 0x%x, target_lun %u, srb_tag 0x%x, tag_code 0x%x,\n", q->q2.target_ix, q->q1.target_lun, q->q2.srb_tag, q->q2.tag_code); printk (" data_addr 0x%lx, data_cnt %lu, sense_addr 0x%lx, sense_len %u,\n", (ulong)le32_to_cpu(q->q1.data_addr), (ulong)le32_to_cpu(q->q1.data_cnt), (ulong)le32_to_cpu(q->q1.sense_addr), q->q1.sense_len); printk(" cdbptr 0x%lx, cdb_len %u, sg_head 0x%lx, sg_queue_cnt %u\n", (ulong)q->cdbptr, q->q2.cdb_len, (ulong)q->sg_head, q->q1.sg_queue_cnt); if (q->sg_head) { sgp = q->sg_head; printk("ASC_SG_HEAD at addr 0x%lx\n", (ulong)sgp); printk(" entry_cnt %u, queue_cnt %u\n", sgp->entry_cnt, sgp->queue_cnt); for (i = 0; i < sgp->entry_cnt; i++) { printk(" [%u]: addr 0x%lx, bytes %lu\n", i, (ulong)le32_to_cpu(sgp->sg_list[i].addr), (ulong)le32_to_cpu(sgp->sg_list[i].bytes)); } } } /* * asc_prt_asc_qdone_info() */ static void asc_prt_asc_qdone_info(ASC_QDONE_INFO *q) { printk("ASC_QDONE_INFO at addr 0x%lx\n", (ulong)q); printk(" srb_tag 0x%x, target_ix %u, cdb_len %u, tag_code %u,\n", q->d2.srb_tag, q->d2.target_ix, q->d2.cdb_len, q->d2.tag_code); printk (" done_stat 0x%x, host_stat 0x%x, scsi_stat 0x%x, scsi_msg 0x%x\n", q->d3.done_stat, q->d3.host_stat, q->d3.scsi_stat, q->d3.scsi_msg); } /* * asc_prt_adv_sgblock() * * Display an ADV_SG_BLOCK structure. */ static void asc_prt_adv_sgblock(int sgblockno, ADV_SG_BLOCK *b) { int i; printk(" ADV_SG_BLOCK at addr 0x%lx (sgblockno %d)\n", (ulong)b, sgblockno); printk(" sg_cnt %u, sg_ptr 0x%x\n", b->sg_cnt, (u32)le32_to_cpu(b->sg_ptr)); BUG_ON(b->sg_cnt > NO_OF_SG_PER_BLOCK); if (b->sg_ptr != 0) BUG_ON(b->sg_cnt != NO_OF_SG_PER_BLOCK); for (i = 0; i < b->sg_cnt; i++) { printk(" [%u]: sg_addr 0x%x, sg_count 0x%x\n", i, (u32)le32_to_cpu(b->sg_list[i].sg_addr), (u32)le32_to_cpu(b->sg_list[i].sg_count)); } } /* * asc_prt_adv_scsi_req_q() * * Display an ADV_SCSI_REQ_Q structure. */ static void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q) { int sg_blk_cnt; struct adv_sg_block *sg_ptr; adv_sgblk_t *sgblkp; printk("ADV_SCSI_REQ_Q at addr 0x%lx\n", (ulong)q); printk(" target_id %u, target_lun %u, srb_tag 0x%x\n", q->target_id, q->target_lun, q->srb_tag); printk(" cntl 0x%x, data_addr 0x%lx\n", q->cntl, (ulong)le32_to_cpu(q->data_addr)); printk(" data_cnt %lu, sense_addr 0x%lx, sense_len %u,\n", (ulong)le32_to_cpu(q->data_cnt), (ulong)le32_to_cpu(q->sense_addr), q->sense_len); printk (" cdb_len %u, done_status 0x%x, host_status 0x%x, scsi_status 0x%x\n", q->cdb_len, q->done_status, q->host_status, q->scsi_status); printk(" sg_working_ix 0x%x, target_cmd %u\n", q->sg_working_ix, q->target_cmd); printk(" scsiq_rptr 0x%lx, sg_real_addr 0x%lx, sg_list_ptr 0x%lx\n", (ulong)le32_to_cpu(q->scsiq_rptr), (ulong)le32_to_cpu(q->sg_real_addr), (ulong)q->sg_list_ptr); /* Display the request's ADV_SG_BLOCK structures. */ if (q->sg_list_ptr != NULL) { sgblkp = container_of(q->sg_list_ptr, adv_sgblk_t, sg_block); sg_blk_cnt = 0; while (sgblkp) { sg_ptr = &sgblkp->sg_block; asc_prt_adv_sgblock(sg_blk_cnt, sg_ptr); if (sg_ptr->sg_ptr == 0) { break; } sgblkp = sgblkp->next_sgblkp; sg_blk_cnt++; } } } #endif /* ADVANSYS_DEBUG */ /* * advansys_info() * * Return suitable for printing on the console with the argument * adapter's configuration information. * * Note: The information line should not exceed ASC_INFO_SIZE bytes, * otherwise the static 'info' array will be overrun. */ static const char *advansys_info(struct Scsi_Host *shost) { static char info[ASC_INFO_SIZE]; struct asc_board *boardp = shost_priv(shost); ASC_DVC_VAR *asc_dvc_varp; ADV_DVC_VAR *adv_dvc_varp; char *busname; char *widename = NULL; if (ASC_NARROW_BOARD(boardp)) { asc_dvc_varp = &boardp->dvc_var.asc_dvc_var; ASC_DBG(1, "begin\n"); if (asc_dvc_varp->bus_type & ASC_IS_VL) { busname = "VL"; } else if (asc_dvc_varp->bus_type & ASC_IS_EISA) { busname = "EISA"; } else if (asc_dvc_varp->bus_type & ASC_IS_PCI) { if ((asc_dvc_varp->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) { busname = "PCI Ultra"; } else { busname = "PCI"; } } else { busname = "?"; shost_printk(KERN_ERR, shost, "unknown bus " "type %d\n", asc_dvc_varp->bus_type); } sprintf(info, "AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X", ASC_VERSION, busname, (ulong)shost->io_port, (ulong)shost->io_port + ASC_IOADR_GAP - 1, boardp->irq); } else { /* * Wide Adapter Information * * Memory-mapped I/O is used instead of I/O space to access * the adapter, but display the I/O Port range. The Memory * I/O address is displayed through the driver /proc file. */ adv_dvc_varp = &boardp->dvc_var.adv_dvc_var; if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { widename = "Ultra-Wide"; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { widename = "Ultra2-Wide"; } else { widename = "Ultra3-Wide"; } sprintf(info, "AdvanSys SCSI %s: PCI %s: PCIMEM 0x%lX-0x%lX, IRQ 0x%X", ASC_VERSION, widename, (ulong)adv_dvc_varp->iop_base, (ulong)adv_dvc_varp->iop_base + boardp->asc_n_io_port - 1, boardp->irq); } BUG_ON(strlen(info) >= ASC_INFO_SIZE); ASC_DBG(1, "end\n"); return info; } #ifdef CONFIG_PROC_FS /* * asc_prt_board_devices() * * Print driver information for devices attached to the board. */ static void asc_prt_board_devices(struct seq_file *m, struct Scsi_Host *shost) { struct asc_board *boardp = shost_priv(shost); int chip_scsi_id; int i; seq_printf(m, "\nDevice Information for AdvanSys SCSI Host %d:\n", shost->host_no); if (ASC_NARROW_BOARD(boardp)) { chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id; } else { chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id; } seq_puts(m, "Target IDs Detected:"); for (i = 0; i <= ADV_MAX_TID; i++) { if (boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) seq_printf(m, " %X,", i); } seq_printf(m, " (%X=Host Adapter)\n", chip_scsi_id); } /* * Display Wide Board BIOS Information. */ static void asc_prt_adv_bios(struct seq_file *m, struct Scsi_Host *shost) { struct asc_board *boardp = shost_priv(shost); ushort major, minor, letter; seq_puts(m, "\nROM BIOS Version: "); /* * If the BIOS saved a valid signature, then fill in * the BIOS code segment base address. */ if (boardp->bios_signature != 0x55AA) { seq_puts(m, "Disabled or Pre-3.1\n" "BIOS either disabled or Pre-3.1. If it is pre-3.1, then a newer version\n" "can be found at the ConnectCom FTP site: ftp://ftp.connectcom.net/pub\n"); } else { major = (boardp->bios_version >> 12) & 0xF; minor = (boardp->bios_version >> 8) & 0xF; letter = (boardp->bios_version & 0xFF); seq_printf(m, "%d.%d%c\n", major, minor, letter >= 26 ? '?' : letter + 'A'); /* * Current available ROM BIOS release is 3.1I for UW * and 3.2I for U2W. This code doesn't differentiate * UW and U2W boards. */ if (major < 3 || (major <= 3 && minor < 1) || (major <= 3 && minor <= 1 && letter < ('I' - 'A'))) { seq_puts(m, "Newer version of ROM BIOS is available at the ConnectCom FTP site:\n" "ftp://ftp.connectcom.net/pub\n"); } } } /* * Add serial number to information bar if signature AAh * is found in at bit 15-9 (7 bits) of word 1. * * Serial Number consists fo 12 alpha-numeric digits. * * 1 - Product type (A,B,C,D..) Word0: 15-13 (3 bits) * 2 - MFG Location (A,B,C,D..) Word0: 12-10 (3 bits) * 3-4 - Product ID (0-99) Word0: 9-0 (10 bits) * 5 - Product revision (A-J) Word0: " " * * Signature Word1: 15-9 (7 bits) * 6 - Year (0-9) Word1: 8-6 (3 bits) & Word2: 15 (1 bit) * 7-8 - Week of the year (1-52) Word1: 5-0 (6 bits) * * 9-12 - Serial Number (A001-Z999) Word2: 14-0 (15 bits) * * Note 1: Only production cards will have a serial number. * * Note 2: Signature is most significant 7 bits (0xFE). * * Returns ASC_TRUE if serial number found, otherwise returns ASC_FALSE. */ static int asc_get_eeprom_string(ushort *serialnum, uchar *cp) { ushort w, num; if ((serialnum[1] & 0xFE00) != ((ushort)0xAA << 8)) { return ASC_FALSE; } else { /* * First word - 6 digits. */ w = serialnum[0]; /* Product type - 1st digit. */ if ((*cp = 'A' + ((w & 0xE000) >> 13)) == 'H') { /* Product type is P=Prototype */ *cp += 0x8; } cp++; /* Manufacturing location - 2nd digit. */ *cp++ = 'A' + ((w & 0x1C00) >> 10); /* Product ID - 3rd, 4th digits. */ num = w & 0x3FF; *cp++ = '0' + (num / 100); num %= 100; *cp++ = '0' + (num / 10); /* Product revision - 5th digit. */ *cp++ = 'A' + (num % 10); /* * Second word */ w = serialnum[1]; /* * Year - 6th digit. * * If bit 15 of third word is set, then the * last digit of the year is greater than 7. */ if (serialnum[2] & 0x8000) { *cp++ = '8' + ((w & 0x1C0) >> 6); } else { *cp++ = '0' + ((w & 0x1C0) >> 6); } /* Week of year - 7th, 8th digits. */ num = w & 0x003F; *cp++ = '0' + num / 10; num %= 10; *cp++ = '0' + num; /* * Third word */ w = serialnum[2] & 0x7FFF; /* Serial number - 9th digit. */ *cp++ = 'A' + (w / 1000); /* 10th, 11th, 12th digits. */ num = w % 1000; *cp++ = '0' + num / 100; num %= 100; *cp++ = '0' + num / 10; num %= 10; *cp++ = '0' + num; *cp = '\0'; /* Null Terminate the string. */ return ASC_TRUE; } } /* * asc_prt_asc_board_eeprom() * * Print board EEPROM configuration. */ static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost) { struct asc_board *boardp = shost_priv(shost); ASCEEP_CONFIG *ep; int i; uchar serialstr[13]; ep = &boardp->eep_config.asc_eep; seq_printf(m, "\nEEPROM Settings for AdvanSys SCSI Host %d:\n", shost->host_no); if (asc_get_eeprom_string((ushort *)&ep->adapter_info[0], serialstr) == ASC_TRUE) seq_printf(m, " Serial Number: %s\n", serialstr); else if (ep->adapter_info[5] == 0xBB) seq_puts(m, " Default Settings Used for EEPROM-less Adapter.\n"); else seq_puts(m, " Serial Number Signature Not Present.\n"); seq_printf(m, " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", ASC_EEP_GET_CHIP_ID(ep), ep->max_total_qng, ep->max_tag_qng); seq_printf(m, " cntl 0x%x, no_scam 0x%x\n", ep->cntl, ep->no_scam); seq_puts(m, " Target ID: "); for (i = 0; i <= ASC_MAX_TID; i++) seq_printf(m, " %d", i); seq_puts(m, "\n Disconnects: "); for (i = 0; i <= ASC_MAX_TID; i++) seq_printf(m, " %c", (ep->disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); seq_puts(m, "\n Command Queuing: "); for (i = 0; i <= ASC_MAX_TID; i++) seq_printf(m, " %c", (ep->use_cmd_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); seq_puts(m, "\n Start Motor: "); for (i = 0; i <= ASC_MAX_TID; i++) seq_printf(m, " %c", (ep->start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); seq_puts(m, "\n Synchronous Transfer:"); for (i = 0; i <= ASC_MAX_TID; i++) seq_printf(m, " %c", (ep->init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); seq_putc(m, '\n'); } /* * asc_prt_adv_board_eeprom() * * Print board EEPROM configuration. */ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost) { struct asc_board *boardp = shost_priv(shost); ADV_DVC_VAR *adv_dvc_varp; int i; char *termstr; uchar serialstr[13]; ADVEEP_3550_CONFIG *ep_3550 = NULL; ADVEEP_38C0800_CONFIG *ep_38C0800 = NULL; ADVEEP_38C1600_CONFIG *ep_38C1600 = NULL; ushort word; ushort *wordp; ushort sdtr_speed = 0; adv_dvc_varp = &boardp->dvc_var.adv_dvc_var; if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { ep_3550 = &boardp->eep_config.adv_3550_eep; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { ep_38C0800 = &boardp->eep_config.adv_38C0800_eep; } else { ep_38C1600 = &boardp->eep_config.adv_38C1600_eep; } seq_printf(m, "\nEEPROM Settings for AdvanSys SCSI Host %d:\n", shost->host_no); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { wordp = &ep_3550->serial_number_word1; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { wordp = &ep_38C0800->serial_number_word1; } else { wordp = &ep_38C1600->serial_number_word1; } if (asc_get_eeprom_string(wordp, serialstr) == ASC_TRUE) seq_printf(m, " Serial Number: %s\n", serialstr); else seq_puts(m, " Serial Number Signature Not Present.\n"); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) seq_printf(m, " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", ep_3550->adapter_scsi_id, ep_3550->max_host_qng, ep_3550->max_dvc_qng); else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) seq_printf(m, " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", ep_38C0800->adapter_scsi_id, ep_38C0800->max_host_qng, ep_38C0800->max_dvc_qng); else seq_printf(m, " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", ep_38C1600->adapter_scsi_id, ep_38C1600->max_host_qng, ep_38C1600->max_dvc_qng); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->termination; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->termination_lvd; } else { word = ep_38C1600->termination_lvd; } switch (word) { case 1: termstr = "Low Off/High Off"; break; case 2: termstr = "Low Off/High On"; break; case 3: termstr = "Low On/High On"; break; default: case 0: termstr = "Automatic"; break; } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) seq_printf(m, " termination: %u (%s), bios_ctrl: 0x%x\n", ep_3550->termination, termstr, ep_3550->bios_ctrl); else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) seq_printf(m, " termination: %u (%s), bios_ctrl: 0x%x\n", ep_38C0800->termination_lvd, termstr, ep_38C0800->bios_ctrl); else seq_printf(m, " termination: %u (%s), bios_ctrl: 0x%x\n", ep_38C1600->termination_lvd, termstr, ep_38C1600->bios_ctrl); seq_puts(m, " Target ID: "); for (i = 0; i <= ADV_MAX_TID; i++) seq_printf(m, " %X", i); seq_putc(m, '\n'); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->disc_enable; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->disc_enable; } else { word = ep_38C1600->disc_enable; } seq_puts(m, " Disconnects: "); for (i = 0; i <= ADV_MAX_TID; i++) seq_printf(m, " %c", (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); seq_putc(m, '\n'); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->tagqng_able; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->tagqng_able; } else { word = ep_38C1600->tagqng_able; } seq_puts(m, " Command Queuing: "); for (i = 0; i <= ADV_MAX_TID; i++) seq_printf(m, " %c", (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); seq_putc(m, '\n'); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->start_motor; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->start_motor; } else { word = ep_38C1600->start_motor; } seq_puts(m, " Start Motor: "); for (i = 0; i <= ADV_MAX_TID; i++) seq_printf(m, " %c", (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); seq_putc(m, '\n'); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { seq_puts(m, " Synchronous Transfer:"); for (i = 0; i <= ADV_MAX_TID; i++) seq_printf(m, " %c", (ep_3550->sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); seq_putc(m, '\n'); } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { seq_puts(m, " Ultra Transfer: "); for (i = 0; i <= ADV_MAX_TID; i++) seq_printf(m, " %c", (ep_3550->ultra_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); seq_putc(m, '\n'); } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->wdtr_able; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->wdtr_able; } else { word = ep_38C1600->wdtr_able; } seq_puts(m, " Wide Transfer: "); for (i = 0; i <= ADV_MAX_TID; i++) seq_printf(m, " %c", (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); seq_putc(m, '\n'); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800 || adv_dvc_varp->chip_type == ADV_CHIP_ASC38C1600) { seq_puts(m, " Synchronous Transfer Speed (Mhz):\n "); for (i = 0; i <= ADV_MAX_TID; i++) { char *speed_str; if (i == 0) { sdtr_speed = adv_dvc_varp->sdtr_speed1; } else if (i == 4) { sdtr_speed = adv_dvc_varp->sdtr_speed2; } else if (i == 8) { sdtr_speed = adv_dvc_varp->sdtr_speed3; } else if (i == 12) { sdtr_speed = adv_dvc_varp->sdtr_speed4; } switch (sdtr_speed & ADV_MAX_TID) { case 0: speed_str = "Off"; break; case 1: speed_str = " 5"; break; case 2: speed_str = " 10"; break; case 3: speed_str = " 20"; break; case 4: speed_str = " 40"; break; case 5: speed_str = " 80"; break; default: speed_str = "Unk"; break; } seq_printf(m, "%X:%s ", i, speed_str); if (i == 7) seq_puts(m, "\n "); sdtr_speed >>= 4; } seq_putc(m, '\n'); } } /* * asc_prt_driver_conf() */ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost) { struct asc_board *boardp = shost_priv(shost); seq_printf(m, "\nLinux Driver Configuration and Information for AdvanSys SCSI Host %d:\n", shost->host_no); seq_printf(m, " host_busy %d, max_id %u, max_lun %llu, max_channel %u\n", scsi_host_busy(shost), shost->max_id, shost->max_lun, shost->max_channel); seq_printf(m, " unique_id %d, can_queue %d, this_id %d, sg_tablesize %u, cmd_per_lun %u\n", shost->unique_id, shost->can_queue, shost->this_id, shost->sg_tablesize, shost->cmd_per_lun); seq_printf(m, " flags 0x%x, last_reset 0x%lx, jiffies 0x%lx, asc_n_io_port 0x%x\n", boardp->flags, shost->last_reset, jiffies, boardp->asc_n_io_port); seq_printf(m, " io_port 0x%lx\n", shost->io_port); } /* * asc_prt_asc_board_info() * * Print dynamic board configuration information. */ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost) { struct asc_board *boardp = shost_priv(shost); int chip_scsi_id; ASC_DVC_VAR *v; ASC_DVC_CFG *c; int i; int renegotiate = 0; v = &boardp->dvc_var.asc_dvc_var; c = &boardp->dvc_cfg.asc_dvc_cfg; chip_scsi_id = c->chip_scsi_id; seq_printf(m, "\nAsc Library Configuration and Statistics for AdvanSys SCSI Host %d:\n", shost->host_no); seq_printf(m, " chip_version %u, mcode_date 0x%x, " "mcode_version 0x%x, err_code %u\n", c->chip_version, c->mcode_date, c->mcode_version, v->err_code); /* Current number of commands waiting for the host. */ seq_printf(m, " Total Command Pending: %d\n", v->cur_total_qng); seq_puts(m, " Command Queuing:"); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } seq_printf(m, " %X:%c", i, (v->use_tagged_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); } /* Current number of commands waiting for a device. */ seq_puts(m, "\n Command Queue Pending:"); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } seq_printf(m, " %X:%u", i, v->cur_dvc_qng[i]); } /* Current limit on number of commands that can be sent to a device. */ seq_puts(m, "\n Command Queue Limit:"); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } seq_printf(m, " %X:%u", i, v->max_dvc_qng[i]); } /* Indicate whether the device has returned queue full status. */ seq_puts(m, "\n Command Queue Full:"); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } if (boardp->queue_full & ADV_TID_TO_TIDMASK(i)) seq_printf(m, " %X:Y-%d", i, boardp->queue_full_cnt[i]); else seq_printf(m, " %X:N", i); } seq_puts(m, "\n Synchronous Transfer:"); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } seq_printf(m, " %X:%c", i, (v->sdtr_done & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); } seq_putc(m, '\n'); for (i = 0; i <= ASC_MAX_TID; i++) { uchar syn_period_ix; if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0) || ((v->init_sdtr & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } seq_printf(m, " %X:", i); if ((boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET) == 0) { seq_puts(m, " Asynchronous"); } else { syn_period_ix = (boardp->sdtr_data[i] >> 4) & (v->max_sdtr_index - 1); seq_printf(m, " Transfer Period Factor: %d (%d.%d Mhz),", v->sdtr_period_tbl[syn_period_ix], 250 / v->sdtr_period_tbl[syn_period_ix], ASC_TENTHS(250, v->sdtr_period_tbl[syn_period_ix])); seq_printf(m, " REQ/ACK Offset: %d", boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET); } if ((v->sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { seq_puts(m, "*\n"); renegotiate = 1; } else { seq_putc(m, '\n'); } } if (renegotiate) { seq_puts(m, " * = Re-negotiation pending before next command.\n"); } } /* * asc_prt_adv_board_info() * * Print dynamic board configuration information. */ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) { struct asc_board *boardp = shost_priv(shost); int i; ADV_DVC_VAR *v; ADV_DVC_CFG *c; AdvPortAddr iop_base; ushort chip_scsi_id; ushort lramword; uchar lrambyte; ushort tagqng_able; ushort sdtr_able, wdtr_able; ushort wdtr_done, sdtr_done; ushort period = 0; int renegotiate = 0; v = &boardp->dvc_var.adv_dvc_var; c = &boardp->dvc_cfg.adv_dvc_cfg; iop_base = v->iop_base; chip_scsi_id = v->chip_scsi_id; seq_printf(m, "\nAdv Library Configuration and Statistics for AdvanSys SCSI Host %d:\n", shost->host_no); seq_printf(m, " iop_base 0x%p, cable_detect: %X, err_code %u\n", v->iop_base, AdvReadWordRegister(iop_base,IOPW_SCSI_CFG1) & CABLE_DETECT, v->err_code); seq_printf(m, " chip_version %u, mcode_date 0x%x, " "mcode_version 0x%x\n", c->chip_version, c->mcode_date, c->mcode_version); AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); seq_puts(m, " Queuing Enabled:"); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } seq_printf(m, " %X:%c", i, (tagqng_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); } seq_puts(m, "\n Queue Limit:"); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + i, lrambyte); seq_printf(m, " %X:%d", i, lrambyte); } seq_puts(m, "\n Command Pending:"); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_QUEUED_CMD + i, lrambyte); seq_printf(m, " %X:%d", i, lrambyte); } seq_putc(m, '\n'); AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); seq_puts(m, " Wide Enabled:"); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } seq_printf(m, " %X:%c", i, (wdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); } seq_putc(m, '\n'); AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, wdtr_done); seq_puts(m, " Transfer Bit Width:"); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } AdvReadWordLram(iop_base, ASC_MC_DEVICE_HSHK_CFG_TABLE + (2 * i), lramword); seq_printf(m, " %X:%d", i, (lramword & 0x8000) ? 16 : 8); if ((wdtr_able & ADV_TID_TO_TIDMASK(i)) && (wdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { seq_putc(m, '*'); renegotiate = 1; } } seq_putc(m, '\n'); AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); seq_puts(m, " Synchronous Enabled:"); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } seq_printf(m, " %X:%c", i, (sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); } seq_putc(m, '\n'); AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, sdtr_done); for (i = 0; i <= ADV_MAX_TID; i++) { AdvReadWordLram(iop_base, ASC_MC_DEVICE_HSHK_CFG_TABLE + (2 * i), lramword); lramword &= ~0x8000; if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0) || ((sdtr_able & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } seq_printf(m, " %X:", i); if ((lramword & 0x1F) == 0) { /* Check for REQ/ACK Offset 0. */ seq_puts(m, " Asynchronous"); } else { seq_puts(m, " Transfer Period Factor: "); if ((lramword & 0x1F00) == 0x1100) { /* 80 Mhz */ seq_puts(m, "9 (80.0 Mhz),"); } else if ((lramword & 0x1F00) == 0x1000) { /* 40 Mhz */ seq_puts(m, "10 (40.0 Mhz),"); } else { /* 20 Mhz or below. */ period = (((lramword >> 8) * 25) + 50) / 4; if (period == 0) { /* Should never happen. */ seq_printf(m, "%d (? Mhz), ", period); } else { seq_printf(m, "%d (%d.%d Mhz),", period, 250 / period, ASC_TENTHS(250, period)); } } seq_printf(m, " REQ/ACK Offset: %d", lramword & 0x1F); } if ((sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { seq_puts(m, "*\n"); renegotiate = 1; } else { seq_putc(m, '\n'); } } if (renegotiate) { seq_puts(m, " * = Re-negotiation pending before next command.\n"); } } #ifdef ADVANSYS_STATS /* * asc_prt_board_stats() */ static void asc_prt_board_stats(struct seq_file *m, struct Scsi_Host *shost) { struct asc_board *boardp = shost_priv(shost); struct asc_stats *s = &boardp->asc_stats; seq_printf(m, "\nLinux Driver Statistics for AdvanSys SCSI Host %d:\n", shost->host_no); seq_printf(m, " queuecommand %u, reset %u, biosparam %u, interrupt %u\n", s->queuecommand, s->reset, s->biosparam, s->interrupt); seq_printf(m, " callback %u, done %u, build_error %u, build_noreq %u, build_nosg %u\n", s->callback, s->done, s->build_error, s->adv_build_noreq, s->adv_build_nosg); seq_printf(m, " exe_noerror %u, exe_busy %u, exe_error %u, exe_unknown %u\n", s->exe_noerror, s->exe_busy, s->exe_error, s->exe_unknown); /* * Display data transfer statistics. */ if (s->xfer_cnt > 0) { seq_printf(m, " xfer_cnt %u, xfer_elem %u, ", s->xfer_cnt, s->xfer_elem); seq_printf(m, "xfer_bytes %u.%01u kb\n", s->xfer_sect / 2, ASC_TENTHS(s->xfer_sect, 2)); /* Scatter gather transfer statistics */ seq_printf(m, " avg_num_elem %u.%01u, ", s->xfer_elem / s->xfer_cnt, ASC_TENTHS(s->xfer_elem, s->xfer_cnt)); seq_printf(m, "avg_elem_size %u.%01u kb, ", (s->xfer_sect / 2) / s->xfer_elem, ASC_TENTHS((s->xfer_sect / 2), s->xfer_elem)); seq_printf(m, "avg_xfer_size %u.%01u kb\n", (s->xfer_sect / 2) / s->xfer_cnt, ASC_TENTHS((s->xfer_sect / 2), s->xfer_cnt)); } } #endif /* ADVANSYS_STATS */ /* * advansys_show_info() - /proc/scsi/advansys/{0,1,2,3,...} * * m: seq_file to print into * shost: Scsi_Host * * Return the number of bytes read from or written to a * /proc/scsi/advansys/[0...] file. */ static int advansys_show_info(struct seq_file *m, struct Scsi_Host *shost) { struct asc_board *boardp = shost_priv(shost); ASC_DBG(1, "begin\n"); /* * User read of /proc/scsi/advansys/[0...] file. */ /* * Get board configuration information. * * advansys_info() returns the board string from its own static buffer. */ /* Copy board information. */ seq_printf(m, "%s\n", (char *)advansys_info(shost)); /* * Display Wide Board BIOS Information. */ if (!ASC_NARROW_BOARD(boardp)) asc_prt_adv_bios(m, shost); /* * Display driver information for each device attached to the board. */ asc_prt_board_devices(m, shost); /* * Display EEPROM configuration for the board. */ if (ASC_NARROW_BOARD(boardp)) asc_prt_asc_board_eeprom(m, shost); else asc_prt_adv_board_eeprom(m, shost); /* * Display driver configuration and information for the board. */ asc_prt_driver_conf(m, shost); #ifdef ADVANSYS_STATS /* * Display driver statistics for the board. */ asc_prt_board_stats(m, shost); #endif /* ADVANSYS_STATS */ /* * Display Asc Library dynamic configuration information * for the board. */ if (ASC_NARROW_BOARD(boardp)) asc_prt_asc_board_info(m, shost); else asc_prt_adv_board_info(m, shost); return 0; } #endif /* CONFIG_PROC_FS */ static void asc_scsi_done(struct scsi_cmnd *scp) { scsi_dma_unmap(scp); ASC_STATS(scp->device->host, done); scsi_done(scp); } static void AscSetBank(PortAddr iop_base, uchar bank) { uchar val; val = AscGetChipControl(iop_base) & (~ (CC_SINGLE_STEP | CC_TEST | CC_DIAG | CC_SCSI_RESET | CC_CHIP_RESET)); if (bank == 1) { val |= CC_BANK_ONE; } else if (bank == 2) { val |= CC_DIAG | CC_BANK_ONE; } else { val &= ~CC_BANK_ONE; } AscSetChipControl(iop_base, val); } static void AscSetChipIH(PortAddr iop_base, ushort ins_code) { AscSetBank(iop_base, 1); AscWriteChipIH(iop_base, ins_code); AscSetBank(iop_base, 0); } static int AscStartChip(PortAddr iop_base) { AscSetChipControl(iop_base, 0); if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) { return (0); } return (1); } static bool AscStopChip(PortAddr iop_base) { uchar cc_val; cc_val = AscGetChipControl(iop_base) & (~(CC_SINGLE_STEP | CC_TEST | CC_DIAG)); AscSetChipControl(iop_base, (uchar)(cc_val | CC_HALT)); AscSetChipIH(iop_base, INS_HALT); AscSetChipIH(iop_base, INS_RFLAG_WTM); if ((AscGetChipStatus(iop_base) & CSW_HALTED) == 0) { return false; } return true; } static bool AscIsChipHalted(PortAddr iop_base) { if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) { if ((AscGetChipControl(iop_base) & CC_HALT) != 0) { return true; } } return false; } static bool AscResetChipAndScsiBus(ASC_DVC_VAR *asc_dvc) { PortAddr iop_base; int i = 10; iop_base = asc_dvc->iop_base; while ((AscGetChipStatus(iop_base) & CSW_SCSI_RESET_ACTIVE) && (i-- > 0)) { mdelay(100); } AscStopChip(iop_base); AscSetChipControl(iop_base, CC_CHIP_RESET | CC_SCSI_RESET | CC_HALT); udelay(60); AscSetChipIH(iop_base, INS_RFLAG_WTM); AscSetChipIH(iop_base, INS_HALT); AscSetChipControl(iop_base, CC_CHIP_RESET | CC_HALT); AscSetChipControl(iop_base, CC_HALT); mdelay(200); AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT); AscSetChipStatus(iop_base, 0); return (AscIsChipHalted(iop_base)); } static int AscFindSignature(PortAddr iop_base) { ushort sig_word; ASC_DBG(1, "AscGetChipSignatureByte(0x%x) 0x%x\n", iop_base, AscGetChipSignatureByte(iop_base)); if (AscGetChipSignatureByte(iop_base) == (uchar)ASC_1000_ID1B) { ASC_DBG(1, "AscGetChipSignatureWord(0x%x) 0x%x\n", iop_base, AscGetChipSignatureWord(iop_base)); sig_word = AscGetChipSignatureWord(iop_base); if ((sig_word == (ushort)ASC_1000_ID0W) || (sig_word == (ushort)ASC_1000_ID0W_FIX)) { return (1); } } return (0); } static void AscEnableInterrupt(PortAddr iop_base) { ushort cfg; cfg = AscGetChipCfgLsw(iop_base); AscSetChipCfgLsw(iop_base, cfg | ASC_CFG0_HOST_INT_ON); } static void AscDisableInterrupt(PortAddr iop_base) { ushort cfg; cfg = AscGetChipCfgLsw(iop_base); AscSetChipCfgLsw(iop_base, cfg & (~ASC_CFG0_HOST_INT_ON)); } static uchar AscReadLramByte(PortAddr iop_base, ushort addr) { unsigned char byte_data; unsigned short word_data; if (isodd_word(addr)) { AscSetChipLramAddr(iop_base, addr - 1); word_data = AscGetChipLramData(iop_base); byte_data = (word_data >> 8) & 0xFF; } else { AscSetChipLramAddr(iop_base, addr); word_data = AscGetChipLramData(iop_base); byte_data = word_data & 0xFF; } return byte_data; } static ushort AscReadLramWord(PortAddr iop_base, ushort addr) { ushort word_data; AscSetChipLramAddr(iop_base, addr); word_data = AscGetChipLramData(iop_base); return (word_data); } static void AscMemWordSetLram(PortAddr iop_base, ushort s_addr, ushort set_wval, int words) { int i; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < words; i++) { AscSetChipLramData(iop_base, set_wval); } } static void AscWriteLramWord(PortAddr iop_base, ushort addr, ushort word_val) { AscSetChipLramAddr(iop_base, addr); AscSetChipLramData(iop_base, word_val); } static void AscWriteLramByte(PortAddr iop_base, ushort addr, uchar byte_val) { ushort word_data; if (isodd_word(addr)) { addr--; word_data = AscReadLramWord(iop_base, addr); word_data &= 0x00FF; word_data |= (((ushort)byte_val << 8) & 0xFF00); } else { word_data = AscReadLramWord(iop_base, addr); word_data &= 0xFF00; word_data |= ((ushort)byte_val & 0x00FF); } AscWriteLramWord(iop_base, addr, word_data); } /* * Copy 2 bytes to LRAM. * * The source data is assumed to be in little-endian order in memory * and is maintained in little-endian order when written to LRAM. */ static void AscMemWordCopyPtrToLram(PortAddr iop_base, ushort s_addr, const uchar *s_buffer, int words) { int i; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 2 * words; i += 2) { /* * On a little-endian system the second argument below * produces a little-endian ushort which is written to * LRAM in little-endian order. On a big-endian system * the second argument produces a big-endian ushort which * is "transparently" byte-swapped by outpw() and written * in little-endian order to LRAM. */ outpw(iop_base + IOP_RAM_DATA, ((ushort)s_buffer[i + 1] << 8) | s_buffer[i]); } } /* * Copy 4 bytes to LRAM. * * The source data is assumed to be in little-endian order in memory * and is maintained in little-endian order when written to LRAM. */ static void AscMemDWordCopyPtrToLram(PortAddr iop_base, ushort s_addr, uchar *s_buffer, int dwords) { int i; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 4 * dwords; i += 4) { outpw(iop_base + IOP_RAM_DATA, ((ushort)s_buffer[i + 1] << 8) | s_buffer[i]); /* LSW */ outpw(iop_base + IOP_RAM_DATA, ((ushort)s_buffer[i + 3] << 8) | s_buffer[i + 2]); /* MSW */ } } /* * Copy 2 bytes from LRAM. * * The source data is assumed to be in little-endian order in LRAM * and is maintained in little-endian order when written to memory. */ static void AscMemWordCopyPtrFromLram(PortAddr iop_base, ushort s_addr, uchar *d_buffer, int words) { int i; ushort word; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 2 * words; i += 2) { word = inpw(iop_base + IOP_RAM_DATA); d_buffer[i] = word & 0xff; d_buffer[i + 1] = (word >> 8) & 0xff; } } static u32 AscMemSumLramWord(PortAddr iop_base, ushort s_addr, int words) { u32 sum = 0; int i; for (i = 0; i < words; i++, s_addr += 2) { sum += AscReadLramWord(iop_base, s_addr); } return (sum); } static void AscInitLram(ASC_DVC_VAR *asc_dvc) { uchar i; ushort s_addr; PortAddr iop_base; iop_base = asc_dvc->iop_base; AscMemWordSetLram(iop_base, ASC_QADR_BEG, 0, (ushort)(((int)(asc_dvc->max_total_qng + 2 + 1) * 64) >> 1)); i = ASC_MIN_ACTIVE_QNO; s_addr = ASC_QADR_BEG + ASC_QBLK_SIZE; AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_FWD), (uchar)(i + 1)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_BWD), (uchar)(asc_dvc->max_total_qng)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_QNO), (uchar)i); i++; s_addr += ASC_QBLK_SIZE; for (; i < asc_dvc->max_total_qng; i++, s_addr += ASC_QBLK_SIZE) { AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_FWD), (uchar)(i + 1)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_BWD), (uchar)(i - 1)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_QNO), (uchar)i); } AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_FWD), (uchar)ASC_QLINK_END); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_BWD), (uchar)(asc_dvc->max_total_qng - 1)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_QNO), (uchar)asc_dvc->max_total_qng); i++; s_addr += ASC_QBLK_SIZE; for (; i <= (uchar)(asc_dvc->max_total_qng + 3); i++, s_addr += ASC_QBLK_SIZE) { AscWriteLramByte(iop_base, (ushort)(s_addr + (ushort)ASC_SCSIQ_B_FWD), i); AscWriteLramByte(iop_base, (ushort)(s_addr + (ushort)ASC_SCSIQ_B_BWD), i); AscWriteLramByte(iop_base, (ushort)(s_addr + (ushort)ASC_SCSIQ_B_QNO), i); } } static u32 AscLoadMicroCode(PortAddr iop_base, ushort s_addr, const uchar *mcode_buf, ushort mcode_size) { u32 chksum; ushort mcode_word_size; ushort mcode_chksum; /* Write the microcode buffer starting at LRAM address 0. */ mcode_word_size = (ushort)(mcode_size >> 1); AscMemWordSetLram(iop_base, s_addr, 0, mcode_word_size); AscMemWordCopyPtrToLram(iop_base, s_addr, mcode_buf, mcode_word_size); chksum = AscMemSumLramWord(iop_base, s_addr, mcode_word_size); ASC_DBG(1, "chksum 0x%lx\n", (ulong)chksum); mcode_chksum = (ushort)AscMemSumLramWord(iop_base, (ushort)ASC_CODE_SEC_BEG, (ushort)((mcode_size - s_addr - (ushort) ASC_CODE_SEC_BEG) / 2)); ASC_DBG(1, "mcode_chksum 0x%lx\n", (ulong)mcode_chksum); AscWriteLramWord(iop_base, ASCV_MCODE_CHKSUM_W, mcode_chksum); AscWriteLramWord(iop_base, ASCV_MCODE_SIZE_W, mcode_size); return chksum; } static void AscInitQLinkVar(ASC_DVC_VAR *asc_dvc) { PortAddr iop_base; int i; ushort lram_addr; iop_base = asc_dvc->iop_base; AscPutRiscVarFreeQHead(iop_base, 1); AscPutRiscVarDoneQTail(iop_base, asc_dvc->max_total_qng); AscPutVarFreeQHead(iop_base, 1); AscPutVarDoneQTail(iop_base, asc_dvc->max_total_qng); AscWriteLramByte(iop_base, ASCV_BUSY_QHEAD_B, (uchar)((int)asc_dvc->max_total_qng + 1)); AscWriteLramByte(iop_base, ASCV_DISC1_QHEAD_B, (uchar)((int)asc_dvc->max_total_qng + 2)); AscWriteLramByte(iop_base, (ushort)ASCV_TOTAL_READY_Q_B, asc_dvc->max_total_qng); AscWriteLramWord(iop_base, ASCV_ASCDVC_ERR_CODE_W, 0); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, 0); AscWriteLramByte(iop_base, ASCV_SCSIBUSY_B, 0); AscWriteLramByte(iop_base, ASCV_WTM_FLAG_B, 0); AscPutQDoneInProgress(iop_base, 0); lram_addr = ASC_QADR_BEG; for (i = 0; i < 32; i++, lram_addr += 2) { AscWriteLramWord(iop_base, lram_addr, 0); } } static int AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc) { int i; int warn_code; PortAddr iop_base; __le32 phy_addr; __le32 phy_size; struct asc_board *board = asc_dvc_to_board(asc_dvc); iop_base = asc_dvc->iop_base; warn_code = 0; for (i = 0; i <= ASC_MAX_TID; i++) { AscPutMCodeInitSDTRAtID(iop_base, i, asc_dvc->cfg->sdtr_period_offset[i]); } AscInitQLinkVar(asc_dvc); AscWriteLramByte(iop_base, ASCV_DISC_ENABLE_B, asc_dvc->cfg->disc_enable); AscWriteLramByte(iop_base, ASCV_HOSTSCSI_ID_B, ASC_TID_TO_TARGET_ID(asc_dvc->cfg->chip_scsi_id)); /* Ensure overrun buffer is aligned on an 8 byte boundary. */ BUG_ON((unsigned long)asc_dvc->overrun_buf & 7); asc_dvc->overrun_dma = dma_map_single(board->dev, asc_dvc->overrun_buf, ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); if (dma_mapping_error(board->dev, asc_dvc->overrun_dma)) { warn_code = -ENOMEM; goto err_dma_map; } phy_addr = cpu_to_le32(asc_dvc->overrun_dma); AscMemDWordCopyPtrToLram(iop_base, ASCV_OVERRUN_PADDR_D, (uchar *)&phy_addr, 1); phy_size = cpu_to_le32(ASC_OVERRUN_BSIZE); AscMemDWordCopyPtrToLram(iop_base, ASCV_OVERRUN_BSIZE_D, (uchar *)&phy_size, 1); asc_dvc->cfg->mcode_date = AscReadLramWord(iop_base, (ushort)ASCV_MC_DATE_W); asc_dvc->cfg->mcode_version = AscReadLramWord(iop_base, (ushort)ASCV_MC_VER_W); AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR); if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) { asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR; warn_code = -EINVAL; goto err_mcode_start; } if (AscStartChip(iop_base) != 1) { asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP; warn_code = -EIO; goto err_mcode_start; } return warn_code; err_mcode_start: dma_unmap_single(board->dev, asc_dvc->overrun_dma, ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); err_dma_map: asc_dvc->overrun_dma = 0; return warn_code; } static int AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc) { const struct firmware *fw; const char fwname[] = "advansys/mcode.bin"; int err; unsigned long chksum; int warn_code; PortAddr iop_base; iop_base = asc_dvc->iop_base; warn_code = 0; if ((asc_dvc->dvc_cntl & ASC_CNTL_RESET_SCSI) && !(asc_dvc->init_state & ASC_INIT_RESET_SCSI_DONE)) { AscResetChipAndScsiBus(asc_dvc); mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */ } asc_dvc->init_state |= ASC_INIT_STATE_BEG_LOAD_MC; if (asc_dvc->err_code != 0) return ASC_ERROR; if (!AscFindSignature(asc_dvc->iop_base)) { asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; return warn_code; } AscDisableInterrupt(iop_base); AscInitLram(asc_dvc); err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fwname, err); asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM; return err; } if (fw->size < 4) { printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", fw->size, fwname); release_firmware(fw); asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM; return -EINVAL; } chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | (fw->data[1] << 8) | fw->data[0]; ASC_DBG(1, "_asc_mcode_chksum 0x%lx\n", (ulong)chksum); if (AscLoadMicroCode(iop_base, 0, &fw->data[4], fw->size - 4) != chksum) { asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM; release_firmware(fw); return warn_code; } release_firmware(fw); warn_code |= AscInitMicroCodeVar(asc_dvc); if (!asc_dvc->overrun_dma) return warn_code; asc_dvc->init_state |= ASC_INIT_STATE_END_LOAD_MC; AscEnableInterrupt(iop_base); return warn_code; } /* * Load the Microcode * * Write the microcode image to RISC memory starting at address 0. * * The microcode is stored compressed in the following format: * * 254 word (508 byte) table indexed by byte code followed * by the following byte codes: * * 1-Byte Code: * 00: Emit word 0 in table. * 01: Emit word 1 in table. * . * FD: Emit word 253 in table. * * Multi-Byte Code: * FE WW WW: (3 byte code) Word to emit is the next word WW WW. * FF BB WW WW: (4 byte code) Emit BB count times next word WW WW. * * Returns 0 or an error if the checksum doesn't match */ static int AdvLoadMicrocode(AdvPortAddr iop_base, const unsigned char *buf, int size, int memsize, int chksum) { int i, j, end, len = 0; u32 sum; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0); for (i = 253 * 2; i < size; i++) { if (buf[i] == 0xff) { unsigned short word = (buf[i + 3] << 8) | buf[i + 2]; for (j = 0; j < buf[i + 1]; j++) { AdvWriteWordAutoIncLram(iop_base, word); len += 2; } i += 3; } else if (buf[i] == 0xfe) { unsigned short word = (buf[i + 2] << 8) | buf[i + 1]; AdvWriteWordAutoIncLram(iop_base, word); i += 2; len += 2; } else { unsigned int off = buf[i] * 2; unsigned short word = (buf[off + 1] << 8) | buf[off]; AdvWriteWordAutoIncLram(iop_base, word); len += 2; } } end = len; while (len < memsize) { AdvWriteWordAutoIncLram(iop_base, 0); len += 2; } /* Verify the microcode checksum. */ sum = 0; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0); for (len = 0; len < end; len += 2) { sum += AdvReadWordAutoIncLram(iop_base); } if (sum != chksum) return ASC_IERR_MCODE_CHKSUM; return 0; } static void AdvBuildCarrierFreelist(struct adv_dvc_var *adv_dvc) { off_t carr_offset = 0, next_offset; dma_addr_t carr_paddr; int carr_num = ADV_CARRIER_BUFSIZE / sizeof(ADV_CARR_T), i; for (i = 0; i < carr_num; i++) { carr_offset = i * sizeof(ADV_CARR_T); /* Get physical address of the carrier 'carrp'. */ carr_paddr = adv_dvc->carrier_addr + carr_offset; adv_dvc->carrier[i].carr_pa = cpu_to_le32(carr_paddr); adv_dvc->carrier[i].carr_va = cpu_to_le32(carr_offset); adv_dvc->carrier[i].areq_vpa = 0; next_offset = carr_offset + sizeof(ADV_CARR_T); if (i == carr_num) next_offset = ~0; adv_dvc->carrier[i].next_vpa = cpu_to_le32(next_offset); } /* * We cannot have a carrier with 'carr_va' of '0', as * a reference to this carrier would be interpreted as * list termination. * So start at carrier 1 with the freelist. */ adv_dvc->carr_freelist = &adv_dvc->carrier[1]; } static ADV_CARR_T *adv_get_carrier(struct adv_dvc_var *adv_dvc, u32 offset) { int index; BUG_ON(offset > ADV_CARRIER_BUFSIZE); index = offset / sizeof(ADV_CARR_T); return &adv_dvc->carrier[index]; } static ADV_CARR_T *adv_get_next_carrier(struct adv_dvc_var *adv_dvc) { ADV_CARR_T *carrp = adv_dvc->carr_freelist; u32 next_vpa = le32_to_cpu(carrp->next_vpa); if (next_vpa == 0 || next_vpa == ~0) { ASC_DBG(1, "invalid vpa offset 0x%x\n", next_vpa); return NULL; } adv_dvc->carr_freelist = adv_get_carrier(adv_dvc, next_vpa); /* * insert stopper carrier to terminate list */ carrp->next_vpa = cpu_to_le32(ADV_CQ_STOPPER); return carrp; } /* * 'offset' is the index in the request pointer array */ static adv_req_t * adv_get_reqp(struct adv_dvc_var *adv_dvc, u32 offset) { struct asc_board *boardp = adv_dvc->drv_ptr; BUG_ON(offset > adv_dvc->max_host_qng); return &boardp->adv_reqp[offset]; } /* * Send an idle command to the chip and wait for completion. * * Command completion is polled for once per microsecond. * * The function can be called from anywhere including an interrupt handler. * But the function is not re-entrant, so it uses the DvcEnter/LeaveCritical() * functions to prevent reentrancy. * * Return Values: * ADV_TRUE - command completed successfully * ADV_FALSE - command failed * ADV_ERROR - command timed out */ static int AdvSendIdleCmd(ADV_DVC_VAR *asc_dvc, ushort idle_cmd, u32 idle_cmd_parameter) { int result, i, j; AdvPortAddr iop_base; iop_base = asc_dvc->iop_base; /* * Clear the idle command status which is set by the microcode * to a non-zero value to indicate when the command is completed. * The non-zero result is one of the IDLE_CMD_STATUS_* values */ AdvWriteWordLram(iop_base, ASC_MC_IDLE_CMD_STATUS, (ushort)0); /* * Write the idle command value after the idle command parameter * has been written to avoid a race condition. If the order is not * followed, the microcode may process the idle command before the * parameters have been written to LRAM. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IDLE_CMD_PARAMETER, cpu_to_le32(idle_cmd_parameter)); AdvWriteWordLram(iop_base, ASC_MC_IDLE_CMD, idle_cmd); /* * Tickle the RISC to tell it to process the idle command. */ AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_B); if (asc_dvc->chip_type == ADV_CHIP_ASC3550) { /* * Clear the tickle value. In the ASC-3550 the RISC flag * command 'clr_tickle_b' does not work unless the host * value is cleared. */ AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_NOP); } /* Wait for up to 100 millisecond for the idle command to timeout. */ for (i = 0; i < SCSI_WAIT_100_MSEC; i++) { /* Poll once each microsecond for command completion. */ for (j = 0; j < SCSI_US_PER_MSEC; j++) { AdvReadWordLram(iop_base, ASC_MC_IDLE_CMD_STATUS, result); if (result != 0) return result; udelay(1); } } BUG(); /* The idle command should never timeout. */ return ADV_ERROR; } /* * Reset SCSI Bus and purge all outstanding requests. * * Return Value: * ADV_TRUE(1) - All requests are purged and SCSI Bus is reset. * ADV_FALSE(0) - Microcode command failed. * ADV_ERROR(-1) - Microcode command timed-out. Microcode or IC * may be hung which requires driver recovery. */ static int AdvResetSB(ADV_DVC_VAR *asc_dvc) { int status; /* * Send the SCSI Bus Reset idle start idle command which asserts * the SCSI Bus Reset signal. */ status = AdvSendIdleCmd(asc_dvc, (ushort)IDLE_CMD_SCSI_RESET_START, 0L); if (status != ADV_TRUE) { return status; } /* * Delay for the specified SCSI Bus Reset hold time. * * The hold time delay is done on the host because the RISC has no * microsecond accurate timer. */ udelay(ASC_SCSI_RESET_HOLD_TIME_US); /* * Send the SCSI Bus Reset end idle command which de-asserts * the SCSI Bus Reset signal and purges any pending requests. */ status = AdvSendIdleCmd(asc_dvc, (ushort)IDLE_CMD_SCSI_RESET_END, 0L); if (status != ADV_TRUE) { return status; } mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */ return status; } /* * Initialize the ASC-3550. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Needed after initialization for error recovery. */ static int AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc) { const struct firmware *fw; const char fwname[] = "advansys/3550.bin"; AdvPortAddr iop_base; ushort warn_code; int begin_addr; int end_addr; ushort code_sum; int word; int i; int err; unsigned long chksum; ushort scsi_cfg1; uchar tid; ushort bios_mem[ASC_MC_BIOSLEN / 2]; /* BIOS RISC Memory 0x40-0x8F. */ ushort wdtr_able = 0, sdtr_able, tagqng_able; uchar max_cmd[ADV_MAX_TID + 1]; /* If there is already an error, don't continue. */ if (asc_dvc->err_code != 0) return ADV_ERROR; /* * The caller must set 'chip_type' to ADV_CHIP_ASC3550. */ if (asc_dvc->chip_type != ADV_CHIP_ASC3550) { asc_dvc->err_code = ASC_IERR_BAD_CHIPTYPE; return ADV_ERROR; } warn_code = 0; iop_base = asc_dvc->iop_base; /* * Save the RISC memory BIOS region before writing the microcode. * The BIOS may already be loaded and using its RISC LRAM region * so its region must be saved and restored. * * Note: This code makes the assumption, which is currently true, * that a chip reset does not clear RISC LRAM. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvReadWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Save current per TID negotiated values. */ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == 0x55AA) { ushort bios_version, major, minor; bios_version = bios_mem[(ASC_MC_BIOS_VERSION - ASC_MC_BIOSMEM) / 2]; major = (bios_version >> 12) & 0xF; minor = (bios_version >> 8) & 0xF; if (major < 3 || (major == 3 && minor == 1)) { /* BIOS 3.1 and earlier location of 'wdtr_able' variable. */ AdvReadWordLram(iop_base, 0x120, wdtr_able); } else { AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); } } AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fwname, err); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return err; } if (fw->size < 4) { printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", fw->size, fwname); release_firmware(fw); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return -EINVAL; } chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | (fw->data[1] << 8) | fw->data[0]; asc_dvc->err_code = AdvLoadMicrocode(iop_base, &fw->data[4], fw->size - 4, ADV_3550_MEMSIZE, chksum); release_firmware(fw); if (asc_dvc->err_code) return ADV_ERROR; /* * Restore the RISC memory BIOS region. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvWriteWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Calculate and write the microcode code checksum to the microcode * code checksum location ASC_MC_CODE_CHK_SUM (0x2C). */ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr); AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr); code_sum = 0; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, begin_addr); for (word = begin_addr; word < end_addr; word += 2) { code_sum += AdvReadWordAutoIncLram(iop_base); } AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum); /* * Read and save microcode version and date. */ AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE, asc_dvc->cfg->mcode_date); AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM, asc_dvc->cfg->mcode_version); /* * Set the chip type to indicate the ASC3550. */ AdvWriteWordLram(iop_base, ASC_MC_CHIP_TYPE, ADV_CHIP_ASC3550); /* * If the PCI Configuration Command Register "Parity Error Response * Control" Bit was clear (0), then set the microcode variable * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode * to ignore DMA parity errors. */ if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR) { AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); word |= CONTROL_FLAG_IGNORE_PERR; AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); } /* * For ASC-3550, setting the START_CTL_EMFU [3:2] bits sets a FIFO * threshold of 128 bytes. This register is only accessible to the host. */ AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0, START_CTL_EMFU | READ_CMD_MRM); /* * Microcode operating variables for WDTR, SDTR, and command tag * queuing will be set in slave_configure() based on what a * device reports it is capable of in Inquiry byte 7. * * If SCSI Bus Resets have been disabled, then directly set * SDTR and WDTR from the EEPROM configuration. This will allow * the BIOS and warm boot to work without a SCSI bus hang on * the Inquiry caused by host and target mismatched DTR values. * Without the SCSI Bus Reset, before an Inquiry a device can't * be assumed to be in Asynchronous, Narrow mode. */ if ((asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) == 0) { AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, asc_dvc->wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, asc_dvc->sdtr_able); } /* * Set microcode operating variables for SDTR_SPEED1, SDTR_SPEED2, * SDTR_SPEED3, and SDTR_SPEED4 based on the ULTRA EEPROM per TID * bitmask. These values determine the maximum SDTR speed negotiated * with a device. * * The SDTR per TID bitmask overrides the SDTR_SPEED1, SDTR_SPEED2, * SDTR_SPEED3, and SDTR_SPEED4 values so it is safe to set them * without determining here whether the device supports SDTR. * * 4-bit speed SDTR speed name * =========== =============== * 0000b (0x0) SDTR disabled * 0001b (0x1) 5 Mhz * 0010b (0x2) 10 Mhz * 0011b (0x3) 20 Mhz (Ultra) * 0100b (0x4) 40 Mhz (LVD/Ultra2) * 0101b (0x5) 80 Mhz (LVD2/Ultra3) * 0110b (0x6) Undefined * . * 1111b (0xF) Undefined */ word = 0; for (tid = 0; tid <= ADV_MAX_TID; tid++) { if (ADV_TID_TO_TIDMASK(tid) & asc_dvc->ultra_able) { /* Set Ultra speed for TID 'tid'. */ word |= (0x3 << (4 * (tid % 4))); } else { /* Set Fast speed for TID 'tid'. */ word |= (0x2 << (4 * (tid % 4))); } if (tid == 3) { /* Check if done with sdtr_speed1. */ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED1, word); word = 0; } else if (tid == 7) { /* Check if done with sdtr_speed2. */ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED2, word); word = 0; } else if (tid == 11) { /* Check if done with sdtr_speed3. */ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED3, word); word = 0; } else if (tid == 15) { /* Check if done with sdtr_speed4. */ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED4, word); /* End of loop. */ } } /* * Set microcode operating variable for the disconnect per TID bitmask. */ AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE, asc_dvc->cfg->disc_enable); /* * Set SCSI_CFG0 Microcode Default Value. * * The microcode will set the SCSI_CFG0 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0, PARITY_EN | QUEUE_128 | SEL_TMO_LONG | OUR_ID_EN | asc_dvc->chip_scsi_id); /* * Determine SCSI_CFG1 Microcode Default Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ /* Read current SCSI_CFG1 Register value. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); /* * If all three connectors are in use, return an error. */ if ((scsi_cfg1 & CABLE_ILLEGAL_A) == 0 || (scsi_cfg1 & CABLE_ILLEGAL_B) == 0) { asc_dvc->err_code |= ASC_IERR_ILLEGAL_CONNECTION; return ADV_ERROR; } /* * If the internal narrow cable is reversed all of the SCSI_CTRL * register signals will be set. Check for and return an error if * this condition is found. */ if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07) { asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE; return ADV_ERROR; } /* * If this is a differential board and a single-ended device * is attached to one of the connectors, return an error. */ if ((scsi_cfg1 & DIFF_MODE) && (scsi_cfg1 & DIFF_SENSE) == 0) { asc_dvc->err_code |= ASC_IERR_SINGLE_END_DEVICE; return ADV_ERROR; } /* * If automatic termination control is enabled, then set the * termination value based on a table listed in a_condor.h. * * If manual termination was specified with an EEPROM setting * then 'termination' was set-up in AdvInitFrom3550EEPROM() and * is ready to be 'ored' into SCSI_CFG1. */ if (asc_dvc->cfg->termination == 0) { /* * The software always controls termination by setting TERM_CTL_SEL. * If TERM_CTL_SEL were set to 0, the hardware would set termination. */ asc_dvc->cfg->termination |= TERM_CTL_SEL; switch (scsi_cfg1 & CABLE_DETECT) { /* TERM_CTL_H: on, TERM_CTL_L: on */ case 0x3: case 0x7: case 0xB: case 0xD: case 0xE: case 0xF: asc_dvc->cfg->termination |= (TERM_CTL_H | TERM_CTL_L); break; /* TERM_CTL_H: on, TERM_CTL_L: off */ case 0x1: case 0x5: case 0x9: case 0xA: case 0xC: asc_dvc->cfg->termination |= TERM_CTL_H; break; /* TERM_CTL_H: off, TERM_CTL_L: off */ case 0x2: case 0x6: break; } } /* * Clear any set TERM_CTL_H and TERM_CTL_L bits. */ scsi_cfg1 &= ~TERM_CTL; /* * Invert the TERM_CTL_H and TERM_CTL_L bits and then * set 'scsi_cfg1'. The TERM_POL bit does not need to be * referenced, because the hardware internally inverts * the Termination High and Low bits if TERM_POL is set. */ scsi_cfg1 |= (TERM_CTL_SEL | (~asc_dvc->cfg->termination & TERM_CTL)); /* * Set SCSI_CFG1 Microcode Default Value * * Set filter value and possibly modified termination control * bits in the Microcode SCSI_CFG1 Register Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1, FLTR_DISABLE | scsi_cfg1); /* * Set MEM_CFG Microcode Default Value * * The microcode will set the MEM_CFG register using this value * after it is started below. * * MEM_CFG may be accessed as a word or byte, but only bits 0-7 * are defined. * * ASC-3550 has 8KB internal memory. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, BIOS_EN | RAM_SZ_8KB); /* * Set SEL_MASK Microcode Default Value * * The microcode will set the SEL_MASK register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK, ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id)); AdvBuildCarrierFreelist(asc_dvc); /* * Set-up the Host->RISC Initiator Command Queue (ICQ). */ asc_dvc->icq_sp = adv_get_next_carrier(asc_dvc); if (!asc_dvc->icq_sp) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } /* * Set RISC ICQ physical address start value. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_ICQ, asc_dvc->icq_sp->carr_pa); /* * Set-up the RISC->Host Initiator Response Queue (IRQ). */ asc_dvc->irq_sp = adv_get_next_carrier(asc_dvc); if (!asc_dvc->irq_sp) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } /* * Set RISC IRQ physical address start value. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IRQ, asc_dvc->irq_sp->carr_pa); asc_dvc->carr_pending_cnt = 0; AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES, (ADV_INTR_ENABLE_HOST_INTR | ADV_INTR_ENABLE_GLOBAL_INTR)); AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word); AdvWriteWordRegister(iop_base, IOPW_PC, word); /* finally, finally, gentlemen, start your engine */ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN); /* * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus * Resets should be performed. The RISC has to be running * to issue a SCSI Bus Reset. */ if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) { /* * If the BIOS Signature is present in memory, restore the * BIOS Handshake Configuration Table and do not perform * a SCSI Bus Reset. */ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == 0x55AA) { /* * Restore per TID negotiated values. */ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } } else { if (AdvResetSB(asc_dvc) != ADV_TRUE) { warn_code = ASC_WARN_BUSRESET_ERROR; } } } return warn_code; } /* * Initialize the ASC-38C0800. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Needed after initialization for error recovery. */ static int AdvInitAsc38C0800Driver(ADV_DVC_VAR *asc_dvc) { const struct firmware *fw; const char fwname[] = "advansys/38C0800.bin"; AdvPortAddr iop_base; ushort warn_code; int begin_addr; int end_addr; ushort code_sum; int word; int i; int err; unsigned long chksum; ushort scsi_cfg1; uchar byte; uchar tid; ushort bios_mem[ASC_MC_BIOSLEN / 2]; /* BIOS RISC Memory 0x40-0x8F. */ ushort wdtr_able, sdtr_able, tagqng_able; uchar max_cmd[ADV_MAX_TID + 1]; /* If there is already an error, don't continue. */ if (asc_dvc->err_code != 0) return ADV_ERROR; /* * The caller must set 'chip_type' to ADV_CHIP_ASC38C0800. */ if (asc_dvc->chip_type != ADV_CHIP_ASC38C0800) { asc_dvc->err_code = ASC_IERR_BAD_CHIPTYPE; return ADV_ERROR; } warn_code = 0; iop_base = asc_dvc->iop_base; /* * Save the RISC memory BIOS region before writing the microcode. * The BIOS may already be loaded and using its RISC LRAM region * so its region must be saved and restored. * * Note: This code makes the assumption, which is currently true, * that a chip reset does not clear RISC LRAM. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvReadWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Save current per TID negotiated values. */ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } /* * RAM BIST (RAM Built-In Self Test) * * Address : I/O base + offset 0x38h register (byte). * Function: Bit 7-6(RW) : RAM mode * Normal Mode : 0x00 * Pre-test Mode : 0x40 * RAM Test Mode : 0x80 * Bit 5 : unused * Bit 4(RO) : Done bit * Bit 3-0(RO) : Status * Host Error : 0x08 * Int_RAM Error : 0x04 * RISC Error : 0x02 * SCSI Error : 0x01 * No Error : 0x00 * * Note: RAM BIST code should be put right here, before loading the * microcode and after saving the RISC memory BIOS region. */ /* * LRAM Pre-test * * Write PRE_TEST_MODE (0x40) to register and wait for 10 milliseconds. * If Done bit not set or low nibble not PRE_TEST_VALUE (0x05), return * an error. Reset to NORMAL_MODE (0x00) and do again. If cannot reset * to NORMAL_MODE, return an error too. */ for (i = 0; i < 2; i++) { AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, PRE_TEST_MODE); mdelay(10); /* Wait for 10ms before reading back. */ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); if ((byte & RAM_TEST_DONE) == 0 || (byte & 0x0F) != PRE_TEST_VALUE) { asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; return ADV_ERROR; } AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); mdelay(10); /* Wait for 10ms before reading back. */ if (AdvReadByteRegister(iop_base, IOPB_RAM_BIST) != NORMAL_VALUE) { asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; return ADV_ERROR; } } /* * LRAM Test - It takes about 1.5 ms to run through the test. * * Write RAM_TEST_MODE (0x80) to register and wait for 10 milliseconds. * If Done bit not set or Status not 0, save register byte, set the * err_code, and return an error. */ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, RAM_TEST_MODE); mdelay(10); /* Wait for 10ms before checking status. */ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); if ((byte & RAM_TEST_DONE) == 0 || (byte & RAM_TEST_STATUS) != 0) { /* Get here if Done bit not set or Status not 0. */ asc_dvc->bist_err_code = byte; /* for BIOS display message */ asc_dvc->err_code = ASC_IERR_BIST_RAM_TEST; return ADV_ERROR; } /* We need to reset back to normal mode after LRAM test passes. */ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fwname, err); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return err; } if (fw->size < 4) { printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", fw->size, fwname); release_firmware(fw); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return -EINVAL; } chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | (fw->data[1] << 8) | fw->data[0]; asc_dvc->err_code = AdvLoadMicrocode(iop_base, &fw->data[4], fw->size - 4, ADV_38C0800_MEMSIZE, chksum); release_firmware(fw); if (asc_dvc->err_code) return ADV_ERROR; /* * Restore the RISC memory BIOS region. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvWriteWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Calculate and write the microcode code checksum to the microcode * code checksum location ASC_MC_CODE_CHK_SUM (0x2C). */ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr); AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr); code_sum = 0; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, begin_addr); for (word = begin_addr; word < end_addr; word += 2) { code_sum += AdvReadWordAutoIncLram(iop_base); } AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum); /* * Read microcode version and date. */ AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE, asc_dvc->cfg->mcode_date); AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM, asc_dvc->cfg->mcode_version); /* * Set the chip type to indicate the ASC38C0800. */ AdvWriteWordLram(iop_base, ASC_MC_CHIP_TYPE, ADV_CHIP_ASC38C0800); /* * Write 1 to bit 14 'DIS_TERM_DRV' in the SCSI_CFG1 register. * When DIS_TERM_DRV set to 1, C_DET[3:0] will reflect current * cable detection and then we are able to read C_DET[3:0]. * * Note: We will reset DIS_TERM_DRV to 0 in the 'Set SCSI_CFG1 * Microcode Default Value' section below. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); AdvWriteWordRegister(iop_base, IOPW_SCSI_CFG1, scsi_cfg1 | DIS_TERM_DRV); /* * If the PCI Configuration Command Register "Parity Error Response * Control" Bit was clear (0), then set the microcode variable * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode * to ignore DMA parity errors. */ if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR) { AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); word |= CONTROL_FLAG_IGNORE_PERR; AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); } /* * For ASC-38C0800, set FIFO_THRESH_80B [6:4] bits and START_CTL_TH [3:2] * bits for the default FIFO threshold. * * Note: ASC-38C0800 FIFO threshold has been changed to 256 bytes. * * For DMA Errata #4 set the BC_THRESH_ENB bit. */ AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0, BC_THRESH_ENB | FIFO_THRESH_80B | START_CTL_TH | READ_CMD_MRM); /* * Microcode operating variables for WDTR, SDTR, and command tag * queuing will be set in slave_configure() based on what a * device reports it is capable of in Inquiry byte 7. * * If SCSI Bus Resets have been disabled, then directly set * SDTR and WDTR from the EEPROM configuration. This will allow * the BIOS and warm boot to work without a SCSI bus hang on * the Inquiry caused by host and target mismatched DTR values. * Without the SCSI Bus Reset, before an Inquiry a device can't * be assumed to be in Asynchronous, Narrow mode. */ if ((asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) == 0) { AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, asc_dvc->wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, asc_dvc->sdtr_able); } /* * Set microcode operating variables for DISC and SDTR_SPEED1, * SDTR_SPEED2, SDTR_SPEED3, and SDTR_SPEED4 based on the EEPROM * configuration values. * * The SDTR per TID bitmask overrides the SDTR_SPEED1, SDTR_SPEED2, * SDTR_SPEED3, and SDTR_SPEED4 values so it is safe to set them * without determining here whether the device supports SDTR. */ AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE, asc_dvc->cfg->disc_enable); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED1, asc_dvc->sdtr_speed1); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED2, asc_dvc->sdtr_speed2); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED3, asc_dvc->sdtr_speed3); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED4, asc_dvc->sdtr_speed4); /* * Set SCSI_CFG0 Microcode Default Value. * * The microcode will set the SCSI_CFG0 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0, PARITY_EN | QUEUE_128 | SEL_TMO_LONG | OUR_ID_EN | asc_dvc->chip_scsi_id); /* * Determine SCSI_CFG1 Microcode Default Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ /* Read current SCSI_CFG1 Register value. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); /* * If the internal narrow cable is reversed all of the SCSI_CTRL * register signals will be set. Check for and return an error if * this condition is found. */ if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07) { asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE; return ADV_ERROR; } /* * All kind of combinations of devices attached to one of four * connectors are acceptable except HVD device attached. For example, * LVD device can be attached to SE connector while SE device attached * to LVD connector. If LVD device attached to SE connector, it only * runs up to Ultra speed. * * If an HVD device is attached to one of LVD connectors, return an * error. However, there is no way to detect HVD device attached to * SE connectors. */ if (scsi_cfg1 & HVD) { asc_dvc->err_code = ASC_IERR_HVD_DEVICE; return ADV_ERROR; } /* * If either SE or LVD automatic termination control is enabled, then * set the termination value based on a table listed in a_condor.h. * * If manual termination was specified with an EEPROM setting then * 'termination' was set-up in AdvInitFrom38C0800EEPROM() and is ready * to be 'ored' into SCSI_CFG1. */ if ((asc_dvc->cfg->termination & TERM_SE) == 0) { /* SE automatic termination control is enabled. */ switch (scsi_cfg1 & C_DET_SE) { /* TERM_SE_HI: on, TERM_SE_LO: on */ case 0x1: case 0x2: case 0x3: asc_dvc->cfg->termination |= TERM_SE; break; /* TERM_SE_HI: on, TERM_SE_LO: off */ case 0x0: asc_dvc->cfg->termination |= TERM_SE_HI; break; } } if ((asc_dvc->cfg->termination & TERM_LVD) == 0) { /* LVD automatic termination control is enabled. */ switch (scsi_cfg1 & C_DET_LVD) { /* TERM_LVD_HI: on, TERM_LVD_LO: on */ case 0x4: case 0x8: case 0xC: asc_dvc->cfg->termination |= TERM_LVD; break; /* TERM_LVD_HI: off, TERM_LVD_LO: off */ case 0x0: break; } } /* * Clear any set TERM_SE and TERM_LVD bits. */ scsi_cfg1 &= (~TERM_SE & ~TERM_LVD); /* * Invert the TERM_SE and TERM_LVD bits and then set 'scsi_cfg1'. */ scsi_cfg1 |= (~asc_dvc->cfg->termination & 0xF0); /* * Clear BIG_ENDIAN, DIS_TERM_DRV, Terminator Polarity and HVD/LVD/SE * bits and set possibly modified termination control bits in the * Microcode SCSI_CFG1 Register Value. */ scsi_cfg1 &= (~BIG_ENDIAN & ~DIS_TERM_DRV & ~TERM_POL & ~HVD_LVD_SE); /* * Set SCSI_CFG1 Microcode Default Value * * Set possibly modified termination control and reset DIS_TERM_DRV * bits in the Microcode SCSI_CFG1 Register Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1, scsi_cfg1); /* * Set MEM_CFG Microcode Default Value * * The microcode will set the MEM_CFG register using this value * after it is started below. * * MEM_CFG may be accessed as a word or byte, but only bits 0-7 * are defined. * * ASC-38C0800 has 16KB internal memory. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, BIOS_EN | RAM_SZ_16KB); /* * Set SEL_MASK Microcode Default Value * * The microcode will set the SEL_MASK register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK, ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id)); AdvBuildCarrierFreelist(asc_dvc); /* * Set-up the Host->RISC Initiator Command Queue (ICQ). */ asc_dvc->icq_sp = adv_get_next_carrier(asc_dvc); if (!asc_dvc->icq_sp) { ASC_DBG(0, "Failed to get ICQ carrier\n"); asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } /* * Set RISC ICQ physical address start value. * carr_pa is LE, must be native before write */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_ICQ, asc_dvc->icq_sp->carr_pa); /* * Set-up the RISC->Host Initiator Response Queue (IRQ). */ asc_dvc->irq_sp = adv_get_next_carrier(asc_dvc); if (!asc_dvc->irq_sp) { ASC_DBG(0, "Failed to get IRQ carrier\n"); asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } /* * Set RISC IRQ physical address start value. * * carr_pa is LE, must be native before write * */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IRQ, asc_dvc->irq_sp->carr_pa); asc_dvc->carr_pending_cnt = 0; AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES, (ADV_INTR_ENABLE_HOST_INTR | ADV_INTR_ENABLE_GLOBAL_INTR)); AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word); AdvWriteWordRegister(iop_base, IOPW_PC, word); /* finally, finally, gentlemen, start your engine */ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN); /* * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus * Resets should be performed. The RISC has to be running * to issue a SCSI Bus Reset. */ if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) { /* * If the BIOS Signature is present in memory, restore the * BIOS Handshake Configuration Table and do not perform * a SCSI Bus Reset. */ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == 0x55AA) { /* * Restore per TID negotiated values. */ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } } else { if (AdvResetSB(asc_dvc) != ADV_TRUE) { warn_code = ASC_WARN_BUSRESET_ERROR; } } } return warn_code; } /* * Initialize the ASC-38C1600. * * On failure set the ASC_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Needed after initialization for error recovery. */ static int AdvInitAsc38C1600Driver(ADV_DVC_VAR *asc_dvc) { const struct firmware *fw; const char fwname[] = "advansys/38C1600.bin"; AdvPortAddr iop_base; ushort warn_code; int begin_addr; int end_addr; ushort code_sum; long word; int i; int err; unsigned long chksum; ushort scsi_cfg1; uchar byte; uchar tid; ushort bios_mem[ASC_MC_BIOSLEN / 2]; /* BIOS RISC Memory 0x40-0x8F. */ ushort wdtr_able, sdtr_able, ppr_able, tagqng_able; uchar max_cmd[ASC_MAX_TID + 1]; /* If there is already an error, don't continue. */ if (asc_dvc->err_code != 0) { return ADV_ERROR; } /* * The caller must set 'chip_type' to ADV_CHIP_ASC38C1600. */ if (asc_dvc->chip_type != ADV_CHIP_ASC38C1600) { asc_dvc->err_code = ASC_IERR_BAD_CHIPTYPE; return ADV_ERROR; } warn_code = 0; iop_base = asc_dvc->iop_base; /* * Save the RISC memory BIOS region before writing the microcode. * The BIOS may already be loaded and using its RISC LRAM region * so its region must be saved and restored. * * Note: This code makes the assumption, which is currently true, * that a chip reset does not clear RISC LRAM. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvReadWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Save current per TID negotiated values. */ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ASC_MAX_TID; tid++) { AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } /* * RAM BIST (Built-In Self Test) * * Address : I/O base + offset 0x38h register (byte). * Function: Bit 7-6(RW) : RAM mode * Normal Mode : 0x00 * Pre-test Mode : 0x40 * RAM Test Mode : 0x80 * Bit 5 : unused * Bit 4(RO) : Done bit * Bit 3-0(RO) : Status * Host Error : 0x08 * Int_RAM Error : 0x04 * RISC Error : 0x02 * SCSI Error : 0x01 * No Error : 0x00 * * Note: RAM BIST code should be put right here, before loading the * microcode and after saving the RISC memory BIOS region. */ /* * LRAM Pre-test * * Write PRE_TEST_MODE (0x40) to register and wait for 10 milliseconds. * If Done bit not set or low nibble not PRE_TEST_VALUE (0x05), return * an error. Reset to NORMAL_MODE (0x00) and do again. If cannot reset * to NORMAL_MODE, return an error too. */ for (i = 0; i < 2; i++) { AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, PRE_TEST_MODE); mdelay(10); /* Wait for 10ms before reading back. */ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); if ((byte & RAM_TEST_DONE) == 0 || (byte & 0x0F) != PRE_TEST_VALUE) { asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; return ADV_ERROR; } AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); mdelay(10); /* Wait for 10ms before reading back. */ if (AdvReadByteRegister(iop_base, IOPB_RAM_BIST) != NORMAL_VALUE) { asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; return ADV_ERROR; } } /* * LRAM Test - It takes about 1.5 ms to run through the test. * * Write RAM_TEST_MODE (0x80) to register and wait for 10 milliseconds. * If Done bit not set or Status not 0, save register byte, set the * err_code, and return an error. */ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, RAM_TEST_MODE); mdelay(10); /* Wait for 10ms before checking status. */ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); if ((byte & RAM_TEST_DONE) == 0 || (byte & RAM_TEST_STATUS) != 0) { /* Get here if Done bit not set or Status not 0. */ asc_dvc->bist_err_code = byte; /* for BIOS display message */ asc_dvc->err_code = ASC_IERR_BIST_RAM_TEST; return ADV_ERROR; } /* We need to reset back to normal mode after LRAM test passes. */ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fwname, err); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return err; } if (fw->size < 4) { printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", fw->size, fwname); release_firmware(fw); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return -EINVAL; } chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | (fw->data[1] << 8) | fw->data[0]; asc_dvc->err_code = AdvLoadMicrocode(iop_base, &fw->data[4], fw->size - 4, ADV_38C1600_MEMSIZE, chksum); release_firmware(fw); if (asc_dvc->err_code) return ADV_ERROR; /* * Restore the RISC memory BIOS region. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvWriteWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Calculate and write the microcode code checksum to the microcode * code checksum location ASC_MC_CODE_CHK_SUM (0x2C). */ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr); AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr); code_sum = 0; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, begin_addr); for (word = begin_addr; word < end_addr; word += 2) { code_sum += AdvReadWordAutoIncLram(iop_base); } AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum); /* * Read microcode version and date. */ AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE, asc_dvc->cfg->mcode_date); AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM, asc_dvc->cfg->mcode_version); /* * Set the chip type to indicate the ASC38C1600. */ AdvWriteWordLram(iop_base, ASC_MC_CHIP_TYPE, ADV_CHIP_ASC38C1600); /* * Write 1 to bit 14 'DIS_TERM_DRV' in the SCSI_CFG1 register. * When DIS_TERM_DRV set to 1, C_DET[3:0] will reflect current * cable detection and then we are able to read C_DET[3:0]. * * Note: We will reset DIS_TERM_DRV to 0 in the 'Set SCSI_CFG1 * Microcode Default Value' section below. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); AdvWriteWordRegister(iop_base, IOPW_SCSI_CFG1, scsi_cfg1 | DIS_TERM_DRV); /* * If the PCI Configuration Command Register "Parity Error Response * Control" Bit was clear (0), then set the microcode variable * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode * to ignore DMA parity errors. */ if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR) { AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); word |= CONTROL_FLAG_IGNORE_PERR; AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); } /* * If the BIOS control flag AIPP (Asynchronous Information * Phase Protection) disable bit is not set, then set the firmware * 'control_flag' CONTROL_FLAG_ENABLE_AIPP bit to enable * AIPP checking and encoding. */ if ((asc_dvc->bios_ctrl & BIOS_CTRL_AIPP_DIS) == 0) { AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); word |= CONTROL_FLAG_ENABLE_AIPP; AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); } /* * For ASC-38C1600 use DMA_CFG0 default values: FIFO_THRESH_80B [6:4], * and START_CTL_TH [3:2]. */ AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0, FIFO_THRESH_80B | START_CTL_TH | READ_CMD_MRM); /* * Microcode operating variables for WDTR, SDTR, and command tag * queuing will be set in slave_configure() based on what a * device reports it is capable of in Inquiry byte 7. * * If SCSI Bus Resets have been disabled, then directly set * SDTR and WDTR from the EEPROM configuration. This will allow * the BIOS and warm boot to work without a SCSI bus hang on * the Inquiry caused by host and target mismatched DTR values. * Without the SCSI Bus Reset, before an Inquiry a device can't * be assumed to be in Asynchronous, Narrow mode. */ if ((asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) == 0) { AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, asc_dvc->wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, asc_dvc->sdtr_able); } /* * Set microcode operating variables for DISC and SDTR_SPEED1, * SDTR_SPEED2, SDTR_SPEED3, and SDTR_SPEED4 based on the EEPROM * configuration values. * * The SDTR per TID bitmask overrides the SDTR_SPEED1, SDTR_SPEED2, * SDTR_SPEED3, and SDTR_SPEED4 values so it is safe to set them * without determining here whether the device supports SDTR. */ AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE, asc_dvc->cfg->disc_enable); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED1, asc_dvc->sdtr_speed1); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED2, asc_dvc->sdtr_speed2); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED3, asc_dvc->sdtr_speed3); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED4, asc_dvc->sdtr_speed4); /* * Set SCSI_CFG0 Microcode Default Value. * * The microcode will set the SCSI_CFG0 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0, PARITY_EN | QUEUE_128 | SEL_TMO_LONG | OUR_ID_EN | asc_dvc->chip_scsi_id); /* * Calculate SCSI_CFG1 Microcode Default Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. * * Each ASC-38C1600 function has only two cable detect bits. * The bus mode override bits are in IOPB_SOFT_OVER_WR. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); /* * If the cable is reversed all of the SCSI_CTRL register signals * will be set. Check for and return an error if this condition is * found. */ if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07) { asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE; return ADV_ERROR; } /* * Each ASC-38C1600 function has two connectors. Only an HVD device * can not be connected to either connector. An LVD device or SE device * may be connected to either connecor. If an SE device is connected, * then at most Ultra speed (20 Mhz) can be used on both connectors. * * If an HVD device is attached, return an error. */ if (scsi_cfg1 & HVD) { asc_dvc->err_code |= ASC_IERR_HVD_DEVICE; return ADV_ERROR; } /* * Each function in the ASC-38C1600 uses only the SE cable detect and * termination because there are two connectors for each function. Each * function may use either LVD or SE mode. Corresponding the SE automatic * termination control EEPROM bits are used for each function. Each * function has its own EEPROM. If SE automatic control is enabled for * the function, then set the termination value based on a table listed * in a_condor.h. * * If manual termination is specified in the EEPROM for the function, * then 'termination' was set-up in AscInitFrom38C1600EEPROM() and is * ready to be 'ored' into SCSI_CFG1. */ if ((asc_dvc->cfg->termination & TERM_SE) == 0) { struct pci_dev *pdev = adv_dvc_to_pdev(asc_dvc); /* SE automatic termination control is enabled. */ switch (scsi_cfg1 & C_DET_SE) { /* TERM_SE_HI: on, TERM_SE_LO: on */ case 0x1: case 0x2: case 0x3: asc_dvc->cfg->termination |= TERM_SE; break; case 0x0: if (PCI_FUNC(pdev->devfn) == 0) { /* Function 0 - TERM_SE_HI: off, TERM_SE_LO: off */ } else { /* Function 1 - TERM_SE_HI: on, TERM_SE_LO: off */ asc_dvc->cfg->termination |= TERM_SE_HI; } break; } } /* * Clear any set TERM_SE bits. */ scsi_cfg1 &= ~TERM_SE; /* * Invert the TERM_SE bits and then set 'scsi_cfg1'. */ scsi_cfg1 |= (~asc_dvc->cfg->termination & TERM_SE); /* * Clear Big Endian and Terminator Polarity bits and set possibly * modified termination control bits in the Microcode SCSI_CFG1 * Register Value. * * Big Endian bit is not used even on big endian machines. */ scsi_cfg1 &= (~BIG_ENDIAN & ~DIS_TERM_DRV & ~TERM_POL); /* * Set SCSI_CFG1 Microcode Default Value * * Set possibly modified termination control bits in the Microcode * SCSI_CFG1 Register Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1, scsi_cfg1); /* * Set MEM_CFG Microcode Default Value * * The microcode will set the MEM_CFG register using this value * after it is started below. * * MEM_CFG may be accessed as a word or byte, but only bits 0-7 * are defined. * * ASC-38C1600 has 32KB internal memory. * * XXX - Since ASC38C1600 Rev.3 has a Local RAM failure issue, we come * out a special 16K Adv Library and Microcode version. After the issue * resolved, we should turn back to the 32K support. Both a_condor.h and * mcode.sas files also need to be updated. * * AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, * BIOS_EN | RAM_SZ_32KB); */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, BIOS_EN | RAM_SZ_16KB); /* * Set SEL_MASK Microcode Default Value * * The microcode will set the SEL_MASK register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK, ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id)); AdvBuildCarrierFreelist(asc_dvc); /* * Set-up the Host->RISC Initiator Command Queue (ICQ). */ asc_dvc->icq_sp = adv_get_next_carrier(asc_dvc); if (!asc_dvc->icq_sp) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } /* * Set RISC ICQ physical address start value. Initialize the * COMMA register to the same value otherwise the RISC will * prematurely detect a command is available. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_ICQ, asc_dvc->icq_sp->carr_pa); AdvWriteDWordRegister(iop_base, IOPDW_COMMA, le32_to_cpu(asc_dvc->icq_sp->carr_pa)); /* * Set-up the RISC->Host Initiator Response Queue (IRQ). */ asc_dvc->irq_sp = adv_get_next_carrier(asc_dvc); if (!asc_dvc->irq_sp) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } /* * Set RISC IRQ physical address start value. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IRQ, asc_dvc->irq_sp->carr_pa); asc_dvc->carr_pending_cnt = 0; AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES, (ADV_INTR_ENABLE_HOST_INTR | ADV_INTR_ENABLE_GLOBAL_INTR)); AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word); AdvWriteWordRegister(iop_base, IOPW_PC, word); /* finally, finally, gentlemen, start your engine */ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN); /* * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus * Resets should be performed. The RISC has to be running * to issue a SCSI Bus Reset. */ if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) { /* * If the BIOS Signature is present in memory, restore the * per TID microcode operating variables. */ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == 0x55AA) { /* * Restore per TID negotiated values. */ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ASC_MAX_TID; tid++) { AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } } else { if (AdvResetSB(asc_dvc) != ADV_TRUE) { warn_code = ASC_WARN_BUSRESET_ERROR; } } } return warn_code; } /* * Reset chip and SCSI Bus. * * Return Value: * ADV_TRUE(1) - Chip re-initialization and SCSI Bus Reset successful. * ADV_FALSE(0) - Chip re-initialization and SCSI Bus Reset failure. */ static int AdvResetChipAndSB(ADV_DVC_VAR *asc_dvc) { int status; ushort wdtr_able, sdtr_able, tagqng_able; ushort ppr_able = 0; uchar tid, max_cmd[ADV_MAX_TID + 1]; AdvPortAddr iop_base; ushort bios_sig; iop_base = asc_dvc->iop_base; /* * Save current per TID negotiated values. */ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); } AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } /* * Force the AdvInitAsc3550/38C0800Driver() function to * perform a SCSI Bus Reset by clearing the BIOS signature word. * The initialization functions assumes a SCSI Bus Reset is not * needed if the BIOS signature word is present. */ AdvReadWordLram(iop_base, ASC_MC_BIOS_SIGNATURE, bios_sig); AdvWriteWordLram(iop_base, ASC_MC_BIOS_SIGNATURE, 0); /* * Stop chip and reset it. */ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_STOP); AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_RESET); mdelay(100); AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_WR_IO_REG); /* * Reset Adv Library error code, if any, and try * re-initializing the chip. */ asc_dvc->err_code = 0; if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { status = AdvInitAsc38C1600Driver(asc_dvc); } else if (asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { status = AdvInitAsc38C0800Driver(asc_dvc); } else { status = AdvInitAsc3550Driver(asc_dvc); } /* Translate initialization return value to status value. */ if (status == 0) { status = ADV_TRUE; } else { status = ADV_FALSE; } /* * Restore the BIOS signature word. */ AdvWriteWordLram(iop_base, ASC_MC_BIOS_SIGNATURE, bios_sig); /* * Restore per TID negotiated values. */ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); } AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } return status; } /* * adv_async_callback() - Adv Library asynchronous event callback function. */ static void adv_async_callback(ADV_DVC_VAR *adv_dvc_varp, uchar code) { switch (code) { case ADV_ASYNC_SCSI_BUS_RESET_DET: /* * The firmware detected a SCSI Bus reset. */ ASC_DBG(0, "ADV_ASYNC_SCSI_BUS_RESET_DET\n"); break; case ADV_ASYNC_RDMA_FAILURE: /* * Handle RDMA failure by resetting the SCSI Bus and * possibly the chip if it is unresponsive. Log the error * with a unique code. */ ASC_DBG(0, "ADV_ASYNC_RDMA_FAILURE\n"); AdvResetChipAndSB(adv_dvc_varp); break; case ADV_HOST_SCSI_BUS_RESET: /* * Host generated SCSI bus reset occurred. */ ASC_DBG(0, "ADV_HOST_SCSI_BUS_RESET\n"); break; default: ASC_DBG(0, "unknown code 0x%x\n", code); break; } } /* * adv_isr_callback() - Second Level Interrupt Handler called by AdvISR(). * * Callback function for the Wide SCSI Adv Library. */ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp) { struct asc_board *boardp = adv_dvc_varp->drv_ptr; adv_req_t *reqp; adv_sgblk_t *sgblkp; struct scsi_cmnd *scp; u32 resid_cnt; dma_addr_t sense_addr; ASC_DBG(1, "adv_dvc_varp 0x%p, scsiqp 0x%p\n", adv_dvc_varp, scsiqp); ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp); /* * Get the adv_req_t structure for the command that has been * completed. The adv_req_t structure actually contains the * completed ADV_SCSI_REQ_Q structure. */ scp = scsi_host_find_tag(boardp->shost, scsiqp->srb_tag); ASC_DBG(1, "scp 0x%p\n", scp); if (scp == NULL) { ASC_PRINT ("adv_isr_callback: scp is NULL; adv_req_t dropped.\n"); return; } ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len); reqp = (adv_req_t *)scp->host_scribble; ASC_DBG(1, "reqp 0x%lx\n", (ulong)reqp); if (reqp == NULL) { ASC_PRINT("adv_isr_callback: reqp is NULL\n"); return; } /* * Remove backreferences to avoid duplicate * command completions. */ scp->host_scribble = NULL; reqp->cmndp = NULL; ASC_STATS(boardp->shost, callback); ASC_DBG(1, "shost 0x%p\n", boardp->shost); sense_addr = le32_to_cpu(scsiqp->sense_addr); dma_unmap_single(boardp->dev, sense_addr, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); /* * 'done_status' contains the command's ending status. */ scp->result = 0; switch (scsiqp->done_status) { case QD_NO_ERROR: ASC_DBG(2, "QD_NO_ERROR\n"); /* * Check for an underrun condition. * * If there was no error and an underrun condition, then * then return the number of underrun bytes. */ resid_cnt = le32_to_cpu(scsiqp->data_cnt); if (scsi_bufflen(scp) != 0 && resid_cnt != 0 && resid_cnt <= scsi_bufflen(scp)) { ASC_DBG(1, "underrun condition %lu bytes\n", (ulong)resid_cnt); scsi_set_resid(scp, resid_cnt); } break; case QD_WITH_ERROR: ASC_DBG(2, "QD_WITH_ERROR\n"); switch (scsiqp->host_status) { case QHSTA_NO_ERROR: set_status_byte(scp, scsiqp->scsi_status); if (scsiqp->scsi_status == SAM_STAT_CHECK_CONDITION) { ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n"); ASC_DBG_PRT_SENSE(2, scp->sense_buffer, SCSI_SENSE_BUFFERSIZE); } break; default: /* Some other QHSTA error occurred. */ ASC_DBG(1, "host_status 0x%x\n", scsiqp->host_status); set_host_byte(scp, DID_BAD_TARGET); break; } break; case QD_ABORTED_BY_HOST: ASC_DBG(1, "QD_ABORTED_BY_HOST\n"); set_status_byte(scp, scsiqp->scsi_status); set_host_byte(scp, DID_ABORT); break; default: ASC_DBG(1, "done_status 0x%x\n", scsiqp->done_status); set_status_byte(scp, scsiqp->scsi_status); set_host_byte(scp, DID_ERROR); break; } /* * If the 'init_tidmask' bit isn't already set for the target and the * current request finished normally, then set the bit for the target * to indicate that a device is present. */ if ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(scp->device->id)) == 0 && scsiqp->done_status == QD_NO_ERROR && scsiqp->host_status == QHSTA_NO_ERROR) { boardp->init_tidmask |= ADV_TID_TO_TIDMASK(scp->device->id); } asc_scsi_done(scp); /* * Free all 'adv_sgblk_t' structures allocated for the request. */ while ((sgblkp = reqp->sgblkp) != NULL) { /* Remove 'sgblkp' from the request list. */ reqp->sgblkp = sgblkp->next_sgblkp; dma_pool_free(boardp->adv_sgblk_pool, sgblkp, sgblkp->sg_addr); } ASC_DBG(1, "done\n"); } /* * Adv Library Interrupt Service Routine * * This function is called by a driver's interrupt service routine. * The function disables and re-enables interrupts. * * When a microcode idle command is completed, the ADV_DVC_VAR * 'idle_cmd_done' field is set to ADV_TRUE. * * Note: AdvISR() can be called when interrupts are disabled or even * when there is no hardware interrupt condition present. It will * always check for completed idle commands and microcode requests. * This is an important feature that shouldn't be changed because it * allows commands to be completed from polling mode loops. * * Return: * ADV_TRUE(1) - interrupt was pending * ADV_FALSE(0) - no interrupt was pending */ static int AdvISR(ADV_DVC_VAR *asc_dvc) { AdvPortAddr iop_base; uchar int_stat; ADV_CARR_T *free_carrp; __le32 irq_next_vpa; ADV_SCSI_REQ_Q *scsiq; adv_req_t *reqp; iop_base = asc_dvc->iop_base; /* Reading the register clears the interrupt. */ int_stat = AdvReadByteRegister(iop_base, IOPB_INTR_STATUS_REG); if ((int_stat & (ADV_INTR_STATUS_INTRA | ADV_INTR_STATUS_INTRB | ADV_INTR_STATUS_INTRC)) == 0) { return ADV_FALSE; } /* * Notify the driver of an asynchronous microcode condition by * calling the adv_async_callback function. The function * is passed the microcode ASC_MC_INTRB_CODE byte value. */ if (int_stat & ADV_INTR_STATUS_INTRB) { uchar intrb_code; AdvReadByteLram(iop_base, ASC_MC_INTRB_CODE, intrb_code); if (asc_dvc->chip_type == ADV_CHIP_ASC3550 || asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { if (intrb_code == ADV_ASYNC_CARRIER_READY_FAILURE && asc_dvc->carr_pending_cnt != 0) { AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_A); if (asc_dvc->chip_type == ADV_CHIP_ASC3550) { AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_NOP); } } } adv_async_callback(asc_dvc, intrb_code); } /* * Check if the IRQ stopper carrier contains a completed request. */ while (((irq_next_vpa = le32_to_cpu(asc_dvc->irq_sp->next_vpa)) & ADV_RQ_DONE) != 0) { /* * Get a pointer to the newly completed ADV_SCSI_REQ_Q structure. * The RISC will have set 'areq_vpa' to a virtual address. * * The firmware will have copied the ADV_SCSI_REQ_Q.scsiq_ptr * field to the carrier ADV_CARR_T.areq_vpa field. The conversion * below complements the conversion of ADV_SCSI_REQ_Q.scsiq_ptr' * in AdvExeScsiQueue(). */ u32 pa_offset = le32_to_cpu(asc_dvc->irq_sp->areq_vpa); ASC_DBG(1, "irq_sp %p areq_vpa %u\n", asc_dvc->irq_sp, pa_offset); reqp = adv_get_reqp(asc_dvc, pa_offset); scsiq = &reqp->scsi_req_q; /* * Request finished with good status and the queue was not * DMAed to host memory by the firmware. Set all status fields * to indicate good status. */ if ((irq_next_vpa & ADV_RQ_GOOD) != 0) { scsiq->done_status = QD_NO_ERROR; scsiq->host_status = scsiq->scsi_status = 0; scsiq->data_cnt = 0L; } /* * Advance the stopper pointer to the next carrier * ignoring the lower four bits. Free the previous * stopper carrier. */ free_carrp = asc_dvc->irq_sp; asc_dvc->irq_sp = adv_get_carrier(asc_dvc, ADV_GET_CARRP(irq_next_vpa)); free_carrp->next_vpa = asc_dvc->carr_freelist->carr_va; asc_dvc->carr_freelist = free_carrp; asc_dvc->carr_pending_cnt--; /* * Clear request microcode control flag. */ scsiq->cntl = 0; /* * Notify the driver of the completed request by passing * the ADV_SCSI_REQ_Q pointer to its callback function. */ adv_isr_callback(asc_dvc, scsiq); /* * Note: After the driver callback function is called, 'scsiq' * can no longer be referenced. * * Fall through and continue processing other completed * requests... */ } return ADV_TRUE; } static int AscSetLibErrorCode(ASC_DVC_VAR *asc_dvc, ushort err_code) { if (asc_dvc->err_code == 0) { asc_dvc->err_code = err_code; AscWriteLramWord(asc_dvc->iop_base, ASCV_ASCDVC_ERR_CODE_W, err_code); } return err_code; } static void AscAckInterrupt(PortAddr iop_base) { uchar host_flag; uchar risc_flag; ushort loop; loop = 0; do { risc_flag = AscReadLramByte(iop_base, ASCV_RISC_FLAG_B); if (loop++ > 0x7FFF) { break; } } while ((risc_flag & ASC_RISC_FLAG_GEN_INT) != 0); host_flag = AscReadLramByte(iop_base, ASCV_HOST_FLAG_B) & (~ASC_HOST_FLAG_ACK_INT); AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, (uchar)(host_flag | ASC_HOST_FLAG_ACK_INT)); AscSetChipStatus(iop_base, CIW_INT_ACK); loop = 0; while (AscGetChipStatus(iop_base) & CSW_INT_PENDING) { AscSetChipStatus(iop_base, CIW_INT_ACK); if (loop++ > 3) { break; } } AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag); } static uchar AscGetSynPeriodIndex(ASC_DVC_VAR *asc_dvc, uchar syn_time) { const uchar *period_table; int max_index; int min_index; int i; period_table = asc_dvc->sdtr_period_tbl; max_index = (int)asc_dvc->max_sdtr_index; min_index = (int)asc_dvc->min_sdtr_index; if ((syn_time <= period_table[max_index])) { for (i = min_index; i < (max_index - 1); i++) { if (syn_time <= period_table[i]) { return (uchar)i; } } return (uchar)max_index; } else { return (uchar)(max_index + 1); } } static uchar AscMsgOutSDTR(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar sdtr_offset) { PortAddr iop_base = asc_dvc->iop_base; uchar sdtr_period_index = AscGetSynPeriodIndex(asc_dvc, sdtr_period); EXT_MSG sdtr_buf = { .msg_type = EXTENDED_MESSAGE, .msg_len = MS_SDTR_LEN, .msg_req = EXTENDED_SDTR, .xfer_period = sdtr_period, .req_ack_offset = sdtr_offset, }; sdtr_offset &= ASC_SYN_MAX_OFFSET; if (sdtr_period_index <= asc_dvc->max_sdtr_index) { AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&sdtr_buf, sizeof(EXT_MSG) >> 1); return ((sdtr_period_index << 4) | sdtr_offset); } else { sdtr_buf.req_ack_offset = 0; AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&sdtr_buf, sizeof(EXT_MSG) >> 1); return 0; } } static uchar AscCalSDTRData(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar syn_offset) { uchar byte; uchar sdtr_period_ix; sdtr_period_ix = AscGetSynPeriodIndex(asc_dvc, sdtr_period); if (sdtr_period_ix > asc_dvc->max_sdtr_index) return 0xFF; byte = (sdtr_period_ix << 4) | (syn_offset & ASC_SYN_MAX_OFFSET); return byte; } static bool AscSetChipSynRegAtID(PortAddr iop_base, uchar id, uchar sdtr_data) { ASC_SCSI_BIT_ID_TYPE org_id; int i; bool sta = true; AscSetBank(iop_base, 1); org_id = AscReadChipDvcID(iop_base); for (i = 0; i <= ASC_MAX_TID; i++) { if (org_id == (0x01 << i)) break; } org_id = (ASC_SCSI_BIT_ID_TYPE) i; AscWriteChipDvcID(iop_base, id); if (AscReadChipDvcID(iop_base) == (0x01 << id)) { AscSetBank(iop_base, 0); AscSetChipSyn(iop_base, sdtr_data); if (AscGetChipSyn(iop_base) != sdtr_data) { sta = false; } } else { sta = false; } AscSetBank(iop_base, 1); AscWriteChipDvcID(iop_base, org_id); AscSetBank(iop_base, 0); return (sta); } static void AscSetChipSDTR(PortAddr iop_base, uchar sdtr_data, uchar tid_no) { AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data); AscPutMCodeSDTRDoneAtID(iop_base, tid_no, sdtr_data); } static void AscIsrChipHalted(ASC_DVC_VAR *asc_dvc) { EXT_MSG ext_msg; EXT_MSG out_msg; ushort halt_q_addr; bool sdtr_accept; ushort int_halt_code; ASC_SCSI_BIT_ID_TYPE scsi_busy; ASC_SCSI_BIT_ID_TYPE target_id; PortAddr iop_base; uchar tag_code; uchar q_status; uchar halt_qp; uchar sdtr_data; uchar target_ix; uchar q_cntl, tid_no; uchar cur_dvc_qng; uchar asyn_sdtr; uchar scsi_status; struct asc_board *boardp; BUG_ON(!asc_dvc->drv_ptr); boardp = asc_dvc->drv_ptr; iop_base = asc_dvc->iop_base; int_halt_code = AscReadLramWord(iop_base, ASCV_HALTCODE_W); halt_qp = AscReadLramByte(iop_base, ASCV_CURCDB_B); halt_q_addr = ASC_QNO_TO_QADDR(halt_qp); target_ix = AscReadLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_TARGET_IX)); q_cntl = AscReadLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL)); tid_no = ASC_TIX_TO_TID(target_ix); target_id = (uchar)ASC_TID_TO_TARGET_ID(tid_no); if (asc_dvc->pci_fix_asyn_xfer & target_id) { asyn_sdtr = ASYN_SDTR_DATA_FIX_PCI_REV_AB; } else { asyn_sdtr = 0; } if (int_halt_code == ASC_HALT_DISABLE_ASYN_USE_SYN_FIX) { if (asc_dvc->pci_fix_asyn_xfer & target_id) { AscSetChipSDTR(iop_base, 0, tid_no); boardp->sdtr_data[tid_no] = 0; } AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return; } else if (int_halt_code == ASC_HALT_ENABLE_ASYN_USE_SYN_FIX) { if (asc_dvc->pci_fix_asyn_xfer & target_id) { AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); boardp->sdtr_data[tid_no] = asyn_sdtr; } AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return; } else if (int_halt_code == ASC_HALT_EXTMSG_IN) { AscMemWordCopyPtrFromLram(iop_base, ASCV_MSGIN_BEG, (uchar *)&ext_msg, sizeof(EXT_MSG) >> 1); if (ext_msg.msg_type == EXTENDED_MESSAGE && ext_msg.msg_req == EXTENDED_SDTR && ext_msg.msg_len == MS_SDTR_LEN) { sdtr_accept = true; if ((ext_msg.req_ack_offset > ASC_SYN_MAX_OFFSET)) { sdtr_accept = false; ext_msg.req_ack_offset = ASC_SYN_MAX_OFFSET; } if ((ext_msg.xfer_period < asc_dvc->sdtr_period_tbl[asc_dvc->min_sdtr_index]) || (ext_msg.xfer_period > asc_dvc->sdtr_period_tbl[asc_dvc-> max_sdtr_index])) { sdtr_accept = false; ext_msg.xfer_period = asc_dvc->sdtr_period_tbl[asc_dvc-> min_sdtr_index]; } if (sdtr_accept) { sdtr_data = AscCalSDTRData(asc_dvc, ext_msg.xfer_period, ext_msg.req_ack_offset); if (sdtr_data == 0xFF) { q_cntl |= QC_MSG_OUT; asc_dvc->init_sdtr &= ~target_id; asc_dvc->sdtr_done &= ~target_id; AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); boardp->sdtr_data[tid_no] = asyn_sdtr; } } if (ext_msg.req_ack_offset == 0) { q_cntl &= ~QC_MSG_OUT; asc_dvc->init_sdtr &= ~target_id; asc_dvc->sdtr_done &= ~target_id; AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); } else { if (sdtr_accept && (q_cntl & QC_MSG_OUT)) { q_cntl &= ~QC_MSG_OUT; asc_dvc->sdtr_done |= target_id; asc_dvc->init_sdtr |= target_id; asc_dvc->pci_fix_asyn_xfer &= ~target_id; sdtr_data = AscCalSDTRData(asc_dvc, ext_msg.xfer_period, ext_msg. req_ack_offset); AscSetChipSDTR(iop_base, sdtr_data, tid_no); boardp->sdtr_data[tid_no] = sdtr_data; } else { q_cntl |= QC_MSG_OUT; AscMsgOutSDTR(asc_dvc, ext_msg.xfer_period, ext_msg.req_ack_offset); asc_dvc->pci_fix_asyn_xfer &= ~target_id; sdtr_data = AscCalSDTRData(asc_dvc, ext_msg.xfer_period, ext_msg. req_ack_offset); AscSetChipSDTR(iop_base, sdtr_data, tid_no); boardp->sdtr_data[tid_no] = sdtr_data; asc_dvc->sdtr_done |= target_id; asc_dvc->init_sdtr |= target_id; } } AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return; } else if (ext_msg.msg_type == EXTENDED_MESSAGE && ext_msg.msg_req == EXTENDED_WDTR && ext_msg.msg_len == MS_WDTR_LEN) { ext_msg.wdtr_width = 0; AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&ext_msg, sizeof(EXT_MSG) >> 1); q_cntl |= QC_MSG_OUT; AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return; } else { ext_msg.msg_type = MESSAGE_REJECT; AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&ext_msg, sizeof(EXT_MSG) >> 1); q_cntl |= QC_MSG_OUT; AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return; } } else if (int_halt_code == ASC_HALT_CHK_CONDITION) { q_cntl |= QC_REQ_SENSE; if ((asc_dvc->init_sdtr & target_id) != 0) { asc_dvc->sdtr_done &= ~target_id; sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no); q_cntl |= QC_MSG_OUT; AscMsgOutSDTR(asc_dvc, asc_dvc-> sdtr_period_tbl[(sdtr_data >> 4) & (uchar)(asc_dvc-> max_sdtr_index - 1)], (uchar)(sdtr_data & (uchar) ASC_SYN_MAX_OFFSET)); } AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); tag_code = AscReadLramByte(iop_base, (ushort)(halt_q_addr + (ushort) ASC_SCSIQ_B_TAG_CODE)); tag_code &= 0xDC; if ((asc_dvc->pci_fix_asyn_xfer & target_id) && !(asc_dvc->pci_fix_asyn_xfer_always & target_id) ) { tag_code |= (ASC_TAG_FLAG_DISABLE_DISCONNECT | ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX); } AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_TAG_CODE), tag_code); q_status = AscReadLramByte(iop_base, (ushort)(halt_q_addr + (ushort) ASC_SCSIQ_B_STATUS)); q_status |= (QS_READY | QS_BUSY); AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_STATUS), q_status); scsi_busy = AscReadLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B); scsi_busy &= ~target_id; AscWriteLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B, scsi_busy); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return; } else if (int_halt_code == ASC_HALT_SDTR_REJECTED) { AscMemWordCopyPtrFromLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&out_msg, sizeof(EXT_MSG) >> 1); if ((out_msg.msg_type == EXTENDED_MESSAGE) && (out_msg.msg_len == MS_SDTR_LEN) && (out_msg.msg_req == EXTENDED_SDTR)) { asc_dvc->init_sdtr &= ~target_id; asc_dvc->sdtr_done &= ~target_id; AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); boardp->sdtr_data[tid_no] = asyn_sdtr; } q_cntl &= ~QC_MSG_OUT; AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return; } else if (int_halt_code == ASC_HALT_SS_QUEUE_FULL) { scsi_status = AscReadLramByte(iop_base, (ushort)((ushort)halt_q_addr + (ushort) ASC_SCSIQ_SCSI_STATUS)); cur_dvc_qng = AscReadLramByte(iop_base, (ushort)((ushort)ASC_QADR_BEG + (ushort)target_ix)); if ((cur_dvc_qng > 0) && (asc_dvc->cur_dvc_qng[tid_no] > 0)) { scsi_busy = AscReadLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B); scsi_busy |= target_id; AscWriteLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B, scsi_busy); asc_dvc->queue_full_or_busy |= target_id; if (scsi_status == SAM_STAT_TASK_SET_FULL) { if (cur_dvc_qng > ASC_MIN_TAGGED_CMD) { cur_dvc_qng -= 1; asc_dvc->max_dvc_qng[tid_no] = cur_dvc_qng; AscWriteLramByte(iop_base, (ushort)((ushort) ASCV_MAX_DVC_QNG_BEG + (ushort) tid_no), cur_dvc_qng); /* * Set the device queue depth to the * number of active requests when the * QUEUE FULL condition was encountered. */ boardp->queue_full |= target_id; boardp->queue_full_cnt[tid_no] = cur_dvc_qng; } } } AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return; } return; } /* * void * DvcGetQinfo(PortAddr iop_base, ushort s_addr, uchar *inbuf, int words) * * Calling/Exit State: * none * * Description: * Input an ASC_QDONE_INFO structure from the chip */ static void DvcGetQinfo(PortAddr iop_base, ushort s_addr, uchar *inbuf, int words) { int i; ushort word; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 2 * words; i += 2) { if (i == 10) { continue; } word = inpw(iop_base + IOP_RAM_DATA); inbuf[i] = word & 0xff; inbuf[i + 1] = (word >> 8) & 0xff; } ASC_DBG_PRT_HEX(2, "DvcGetQinfo", inbuf, 2 * words); } static uchar _AscCopyLramScsiDoneQ(PortAddr iop_base, ushort q_addr, ASC_QDONE_INFO *scsiq, unsigned int max_dma_count) { ushort _val; uchar sg_queue_cnt; DvcGetQinfo(iop_base, q_addr + ASC_SCSIQ_DONE_INFO_BEG, (uchar *)scsiq, (sizeof(ASC_SCSIQ_2) + sizeof(ASC_SCSIQ_3)) / 2); _val = AscReadLramWord(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_STATUS)); scsiq->q_status = (uchar)_val; scsiq->q_no = (uchar)(_val >> 8); _val = AscReadLramWord(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_CNTL)); scsiq->cntl = (uchar)_val; sg_queue_cnt = (uchar)(_val >> 8); _val = AscReadLramWord(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_SENSE_LEN)); scsiq->sense_len = (uchar)_val; scsiq->extra_bytes = (uchar)(_val >> 8); /* * Read high word of remain bytes from alternate location. */ scsiq->remain_bytes = (((u32)AscReadLramWord(iop_base, (ushort)(q_addr + (ushort) ASC_SCSIQ_W_ALT_DC1))) << 16); /* * Read low word of remain bytes from original location. */ scsiq->remain_bytes += AscReadLramWord(iop_base, (ushort)(q_addr + (ushort) ASC_SCSIQ_DW_REMAIN_XFER_CNT)); scsiq->remain_bytes &= max_dma_count; return sg_queue_cnt; } /* * asc_isr_callback() - Second Level Interrupt Handler called by AscISR(). * * Interrupt callback function for the Narrow SCSI Asc Library. */ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep) { struct asc_board *boardp = asc_dvc_varp->drv_ptr; u32 srb_tag; struct scsi_cmnd *scp; ASC_DBG(1, "asc_dvc_varp 0x%p, qdonep 0x%p\n", asc_dvc_varp, qdonep); ASC_DBG_PRT_ASC_QDONE_INFO(2, qdonep); /* * Decrease the srb_tag by 1 to find the SCSI command */ srb_tag = qdonep->d2.srb_tag - 1; scp = scsi_host_find_tag(boardp->shost, srb_tag); if (!scp) return; ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len); ASC_STATS(boardp->shost, callback); dma_unmap_single(boardp->dev, advansys_cmd(scp)->dma_handle, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); /* * 'qdonep' contains the command's ending status. */ scp->result = 0; switch (qdonep->d3.done_stat) { case QD_NO_ERROR: ASC_DBG(2, "QD_NO_ERROR\n"); /* * Check for an underrun condition. * * If there was no error and an underrun condition, then * return the number of underrun bytes. */ if (scsi_bufflen(scp) != 0 && qdonep->remain_bytes != 0 && qdonep->remain_bytes <= scsi_bufflen(scp)) { ASC_DBG(1, "underrun condition %u bytes\n", (unsigned)qdonep->remain_bytes); scsi_set_resid(scp, qdonep->remain_bytes); } break; case QD_WITH_ERROR: ASC_DBG(2, "QD_WITH_ERROR\n"); switch (qdonep->d3.host_stat) { case QHSTA_NO_ERROR: set_status_byte(scp, qdonep->d3.scsi_stat); if (qdonep->d3.scsi_stat == SAM_STAT_CHECK_CONDITION) { ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n"); ASC_DBG_PRT_SENSE(2, scp->sense_buffer, SCSI_SENSE_BUFFERSIZE); } break; default: /* QHSTA error occurred */ ASC_DBG(1, "host_stat 0x%x\n", qdonep->d3.host_stat); set_host_byte(scp, DID_BAD_TARGET); break; } break; case QD_ABORTED_BY_HOST: ASC_DBG(1, "QD_ABORTED_BY_HOST\n"); set_status_byte(scp, qdonep->d3.scsi_stat); set_host_byte(scp, DID_ABORT); break; default: ASC_DBG(1, "done_stat 0x%x\n", qdonep->d3.done_stat); set_status_byte(scp, qdonep->d3.scsi_stat); set_host_byte(scp, DID_ERROR); break; } /* * If the 'init_tidmask' bit isn't already set for the target and the * current request finished normally, then set the bit for the target * to indicate that a device is present. */ if ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(scp->device->id)) == 0 && qdonep->d3.done_stat == QD_NO_ERROR && qdonep->d3.host_stat == QHSTA_NO_ERROR) { boardp->init_tidmask |= ADV_TID_TO_TIDMASK(scp->device->id); } asc_scsi_done(scp); } static int AscIsrQDone(ASC_DVC_VAR *asc_dvc) { uchar next_qp; uchar n_q_used; uchar sg_list_qp; uchar sg_queue_cnt; uchar q_cnt; uchar done_q_tail; uchar tid_no; ASC_SCSI_BIT_ID_TYPE scsi_busy; ASC_SCSI_BIT_ID_TYPE target_id; PortAddr iop_base; ushort q_addr; ushort sg_q_addr; uchar cur_target_qng; ASC_QDONE_INFO scsiq_buf; ASC_QDONE_INFO *scsiq; bool false_overrun; iop_base = asc_dvc->iop_base; n_q_used = 1; scsiq = (ASC_QDONE_INFO *)&scsiq_buf; done_q_tail = (uchar)AscGetVarDoneQTail(iop_base); q_addr = ASC_QNO_TO_QADDR(done_q_tail); next_qp = AscReadLramByte(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_FWD)); if (next_qp != ASC_QLINK_END) { AscPutVarDoneQTail(iop_base, next_qp); q_addr = ASC_QNO_TO_QADDR(next_qp); sg_queue_cnt = _AscCopyLramScsiDoneQ(iop_base, q_addr, scsiq, asc_dvc->max_dma_count); AscWriteLramByte(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_STATUS), (uchar)(scsiq-> q_status & (uchar)~(QS_READY | QS_ABORTED))); tid_no = ASC_TIX_TO_TID(scsiq->d2.target_ix); target_id = ASC_TIX_TO_TARGET_ID(scsiq->d2.target_ix); if ((scsiq->cntl & QC_SG_HEAD) != 0) { sg_q_addr = q_addr; sg_list_qp = next_qp; for (q_cnt = 0; q_cnt < sg_queue_cnt; q_cnt++) { sg_list_qp = AscReadLramByte(iop_base, (ushort)(sg_q_addr + (ushort) ASC_SCSIQ_B_FWD)); sg_q_addr = ASC_QNO_TO_QADDR(sg_list_qp); if (sg_list_qp == ASC_QLINK_END) { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_SG_Q_LINKS); scsiq->d3.done_stat = QD_WITH_ERROR; scsiq->d3.host_stat = QHSTA_D_QDONE_SG_LIST_CORRUPTED; goto FATAL_ERR_QDONE; } AscWriteLramByte(iop_base, (ushort)(sg_q_addr + (ushort) ASC_SCSIQ_B_STATUS), QS_FREE); } n_q_used = sg_queue_cnt + 1; AscPutVarDoneQTail(iop_base, sg_list_qp); } if (asc_dvc->queue_full_or_busy & target_id) { cur_target_qng = AscReadLramByte(iop_base, (ushort)((ushort) ASC_QADR_BEG + (ushort) scsiq->d2. target_ix)); if (cur_target_qng < asc_dvc->max_dvc_qng[tid_no]) { scsi_busy = AscReadLramByte(iop_base, (ushort) ASCV_SCSIBUSY_B); scsi_busy &= ~target_id; AscWriteLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B, scsi_busy); asc_dvc->queue_full_or_busy &= ~target_id; } } if (asc_dvc->cur_total_qng >= n_q_used) { asc_dvc->cur_total_qng -= n_q_used; if (asc_dvc->cur_dvc_qng[tid_no] != 0) { asc_dvc->cur_dvc_qng[tid_no]--; } } else { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CUR_QNG); scsiq->d3.done_stat = QD_WITH_ERROR; goto FATAL_ERR_QDONE; } if ((scsiq->d2.srb_tag == 0UL) || ((scsiq->q_status & QS_ABORTED) != 0)) { return (0x11); } else if (scsiq->q_status == QS_DONE) { /* * This is also curious. * false_overrun will _always_ be set to 'false' */ false_overrun = false; if (scsiq->extra_bytes != 0) { scsiq->remain_bytes += scsiq->extra_bytes; } if (scsiq->d3.done_stat == QD_WITH_ERROR) { if (scsiq->d3.host_stat == QHSTA_M_DATA_OVER_RUN) { if ((scsiq-> cntl & (QC_DATA_IN | QC_DATA_OUT)) == 0) { scsiq->d3.done_stat = QD_NO_ERROR; scsiq->d3.host_stat = QHSTA_NO_ERROR; } else if (false_overrun) { scsiq->d3.done_stat = QD_NO_ERROR; scsiq->d3.host_stat = QHSTA_NO_ERROR; } } else if (scsiq->d3.host_stat == QHSTA_M_HUNG_REQ_SCSI_BUS_RESET) { AscStopChip(iop_base); AscSetChipControl(iop_base, (uchar)(CC_SCSI_RESET | CC_HALT)); udelay(60); AscSetChipControl(iop_base, CC_HALT); AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT); AscSetChipStatus(iop_base, 0); AscSetChipControl(iop_base, 0); } } if ((scsiq->cntl & QC_NO_CALLBACK) == 0) { asc_isr_callback(asc_dvc, scsiq); } else { if ((AscReadLramByte(iop_base, (ushort)(q_addr + (ushort) ASC_SCSIQ_CDB_BEG)) == START_STOP)) { asc_dvc->unit_not_ready &= ~target_id; if (scsiq->d3.done_stat != QD_NO_ERROR) { asc_dvc->start_motor &= ~target_id; } } } return (1); } else { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_Q_STATUS); FATAL_ERR_QDONE: if ((scsiq->cntl & QC_NO_CALLBACK) == 0) { asc_isr_callback(asc_dvc, scsiq); } return (0x80); } } return (0); } static int AscISR(ASC_DVC_VAR *asc_dvc) { ASC_CS_TYPE chipstat; PortAddr iop_base; ushort saved_ram_addr; uchar ctrl_reg; uchar saved_ctrl_reg; int int_pending; int status; uchar host_flag; iop_base = asc_dvc->iop_base; int_pending = ASC_FALSE; if (AscIsIntPending(iop_base) == 0) return int_pending; if ((asc_dvc->init_state & ASC_INIT_STATE_END_LOAD_MC) == 0) { return ASC_ERROR; } if (asc_dvc->in_critical_cnt != 0) { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_ON_CRITICAL); return ASC_ERROR; } if (asc_dvc->is_in_int) { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_RE_ENTRY); return ASC_ERROR; } asc_dvc->is_in_int = true; ctrl_reg = AscGetChipControl(iop_base); saved_ctrl_reg = ctrl_reg & (~(CC_SCSI_RESET | CC_CHIP_RESET | CC_SINGLE_STEP | CC_DIAG | CC_TEST)); chipstat = AscGetChipStatus(iop_base); if (chipstat & CSW_SCSI_RESET_LATCH) { if (!(asc_dvc->bus_type & (ASC_IS_VL | ASC_IS_EISA))) { int i = 10; int_pending = ASC_TRUE; asc_dvc->sdtr_done = 0; saved_ctrl_reg &= (uchar)(~CC_HALT); while ((AscGetChipStatus(iop_base) & CSW_SCSI_RESET_ACTIVE) && (i-- > 0)) { mdelay(100); } AscSetChipControl(iop_base, (CC_CHIP_RESET | CC_HALT)); AscSetChipControl(iop_base, CC_HALT); AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT); AscSetChipStatus(iop_base, 0); chipstat = AscGetChipStatus(iop_base); } } saved_ram_addr = AscGetChipLramAddr(iop_base); host_flag = AscReadLramByte(iop_base, ASCV_HOST_FLAG_B) & (uchar)(~ASC_HOST_FLAG_IN_ISR); AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, (uchar)(host_flag | (uchar)ASC_HOST_FLAG_IN_ISR)); if ((chipstat & CSW_INT_PENDING) || (int_pending)) { AscAckInterrupt(iop_base); int_pending = ASC_TRUE; if ((chipstat & CSW_HALTED) && (ctrl_reg & CC_SINGLE_STEP)) { AscIsrChipHalted(asc_dvc); saved_ctrl_reg &= (uchar)(~CC_HALT); } else { if ((asc_dvc->dvc_cntl & ASC_CNTL_INT_MULTI_Q) != 0) { while (((status = AscIsrQDone(asc_dvc)) & 0x01) != 0) { } } else { do { if ((status = AscIsrQDone(asc_dvc)) == 1) { break; } } while (status == 0x11); } if ((status & 0x80) != 0) int_pending = ASC_ERROR; } } AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag); AscSetChipLramAddr(iop_base, saved_ram_addr); AscSetChipControl(iop_base, saved_ctrl_reg); asc_dvc->is_in_int = false; return int_pending; } /* * advansys_reset() * * Reset the host associated with the command 'scp'. * * This function runs its own thread. Interrupts must be blocked but * sleeping is allowed and no locking other than for host structures is * required. Returns SUCCESS or FAILED. */ static int advansys_reset(struct scsi_cmnd *scp) { struct Scsi_Host *shost = scp->device->host; struct asc_board *boardp = shost_priv(shost); unsigned long flags; int status; int ret = SUCCESS; ASC_DBG(1, "0x%p\n", scp); ASC_STATS(shost, reset); scmd_printk(KERN_INFO, scp, "SCSI host reset started...\n"); if (ASC_NARROW_BOARD(boardp)) { ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var; /* Reset the chip and SCSI bus. */ ASC_DBG(1, "before AscInitAsc1000Driver()\n"); status = AscInitAsc1000Driver(asc_dvc); /* Refer to ASC_IERR_* definitions for meaning of 'err_code'. */ if (asc_dvc->err_code || !asc_dvc->overrun_dma) { scmd_printk(KERN_INFO, scp, "SCSI host reset error: " "0x%x, status: 0x%x\n", asc_dvc->err_code, status); ret = FAILED; } else if (status) { scmd_printk(KERN_INFO, scp, "SCSI host reset warning: " "0x%x\n", status); } else { scmd_printk(KERN_INFO, scp, "SCSI host reset " "successful\n"); } ASC_DBG(1, "after AscInitAsc1000Driver()\n"); } else { /* * If the suggest reset bus flags are set, then reset the bus. * Otherwise only reset the device. */ ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var; /* * Reset the chip and SCSI bus. */ ASC_DBG(1, "before AdvResetChipAndSB()\n"); switch (AdvResetChipAndSB(adv_dvc)) { case ASC_TRUE: scmd_printk(KERN_INFO, scp, "SCSI host reset " "successful\n"); break; case ASC_FALSE: default: scmd_printk(KERN_INFO, scp, "SCSI host reset error\n"); ret = FAILED; break; } spin_lock_irqsave(shost->host_lock, flags); AdvISR(adv_dvc); spin_unlock_irqrestore(shost->host_lock, flags); } ASC_DBG(1, "ret %d\n", ret); return ret; } /* * advansys_biosparam() * * Translate disk drive geometry if the "BIOS greater than 1 GB" * support is enabled for a drive. * * ip (information pointer) is an int array with the following definition: * ip[0]: heads * ip[1]: sectors * ip[2]: cylinders */ static int advansys_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int ip[]) { struct asc_board *boardp = shost_priv(sdev->host); ASC_DBG(1, "begin\n"); ASC_STATS(sdev->host, biosparam); if (ASC_NARROW_BOARD(boardp)) { if ((boardp->dvc_var.asc_dvc_var.dvc_cntl & ASC_CNTL_BIOS_GT_1GB) && capacity > 0x200000) { ip[0] = 255; ip[1] = 63; } else { ip[0] = 64; ip[1] = 32; } } else { if ((boardp->dvc_var.adv_dvc_var.bios_ctrl & BIOS_CTRL_EXTENDED_XLAT) && capacity > 0x200000) { ip[0] = 255; ip[1] = 63; } else { ip[0] = 64; ip[1] = 32; } } ip[2] = (unsigned long)capacity / (ip[0] * ip[1]); ASC_DBG(1, "end\n"); return 0; } /* * First-level interrupt handler. * * 'dev_id' is a pointer to the interrupting adapter's Scsi_Host. */ static irqreturn_t advansys_interrupt(int irq, void *dev_id) { struct Scsi_Host *shost = dev_id; struct asc_board *boardp = shost_priv(shost); irqreturn_t result = IRQ_NONE; unsigned long flags; ASC_DBG(2, "boardp 0x%p\n", boardp); spin_lock_irqsave(shost->host_lock, flags); if (ASC_NARROW_BOARD(boardp)) { if (AscIsIntPending(shost->io_port)) { result = IRQ_HANDLED; ASC_STATS(shost, interrupt); ASC_DBG(1, "before AscISR()\n"); AscISR(&boardp->dvc_var.asc_dvc_var); } } else { ASC_DBG(1, "before AdvISR()\n"); if (AdvISR(&boardp->dvc_var.adv_dvc_var)) { result = IRQ_HANDLED; ASC_STATS(shost, interrupt); } } spin_unlock_irqrestore(shost->host_lock, flags); ASC_DBG(1, "end\n"); return result; } static bool AscHostReqRiscHalt(PortAddr iop_base) { int count = 0; bool sta = false; uchar saved_stop_code; if (AscIsChipHalted(iop_base)) return true; saved_stop_code = AscReadLramByte(iop_base, ASCV_STOP_CODE_B); AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, ASC_STOP_HOST_REQ_RISC_HALT | ASC_STOP_REQ_RISC_STOP); do { if (AscIsChipHalted(iop_base)) { sta = true; break; } mdelay(100); } while (count++ < 20); AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, saved_stop_code); return sta; } static bool AscSetRunChipSynRegAtID(PortAddr iop_base, uchar tid_no, uchar sdtr_data) { bool sta = false; if (AscHostReqRiscHalt(iop_base)) { sta = AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data); AscStartChip(iop_base); } return sta; } static void AscAsyncFix(ASC_DVC_VAR *asc_dvc, struct scsi_device *sdev) { char type = sdev->type; ASC_SCSI_BIT_ID_TYPE tid_bits = 1 << sdev->id; if (!(asc_dvc->bug_fix_cntl & ASC_BUG_FIX_ASYN_USE_SYN)) return; if (asc_dvc->init_sdtr & tid_bits) return; if ((type == TYPE_ROM) && (strncmp(sdev->vendor, "HP ", 3) == 0)) asc_dvc->pci_fix_asyn_xfer_always |= tid_bits; asc_dvc->pci_fix_asyn_xfer |= tid_bits; if ((type == TYPE_PROCESSOR) || (type == TYPE_SCANNER) || (type == TYPE_ROM) || (type == TYPE_TAPE)) asc_dvc->pci_fix_asyn_xfer &= ~tid_bits; if (asc_dvc->pci_fix_asyn_xfer & tid_bits) AscSetRunChipSynRegAtID(asc_dvc->iop_base, sdev->id, ASYN_SDTR_DATA_FIX_PCI_REV_AB); } static void advansys_narrow_slave_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc) { ASC_SCSI_BIT_ID_TYPE tid_bit = 1 << sdev->id; ASC_SCSI_BIT_ID_TYPE orig_use_tagged_qng = asc_dvc->use_tagged_qng; if (sdev->lun == 0) { ASC_SCSI_BIT_ID_TYPE orig_init_sdtr = asc_dvc->init_sdtr; if ((asc_dvc->cfg->sdtr_enable & tid_bit) && sdev->sdtr) { asc_dvc->init_sdtr |= tid_bit; } else { asc_dvc->init_sdtr &= ~tid_bit; } if (orig_init_sdtr != asc_dvc->init_sdtr) AscAsyncFix(asc_dvc, sdev); } if (sdev->tagged_supported) { if (asc_dvc->cfg->cmd_qng_enabled & tid_bit) { if (sdev->lun == 0) { asc_dvc->cfg->can_tagged_qng |= tid_bit; asc_dvc->use_tagged_qng |= tid_bit; } scsi_change_queue_depth(sdev, asc_dvc->max_dvc_qng[sdev->id]); } } else { if (sdev->lun == 0) { asc_dvc->cfg->can_tagged_qng &= ~tid_bit; asc_dvc->use_tagged_qng &= ~tid_bit; } } if ((sdev->lun == 0) && (orig_use_tagged_qng != asc_dvc->use_tagged_qng)) { AscWriteLramByte(asc_dvc->iop_base, ASCV_DISC_ENABLE_B, asc_dvc->cfg->disc_enable); AscWriteLramByte(asc_dvc->iop_base, ASCV_USE_TAGGED_QNG_B, asc_dvc->use_tagged_qng); AscWriteLramByte(asc_dvc->iop_base, ASCV_CAN_TAGGED_QNG_B, asc_dvc->cfg->can_tagged_qng); asc_dvc->max_dvc_qng[sdev->id] = asc_dvc->cfg->max_tag_qng[sdev->id]; AscWriteLramByte(asc_dvc->iop_base, (ushort)(ASCV_MAX_DVC_QNG_BEG + sdev->id), asc_dvc->max_dvc_qng[sdev->id]); } } /* * Wide Transfers * * If the EEPROM enabled WDTR for the device and the device supports wide * bus (16 bit) transfers, then turn on the device's 'wdtr_able' bit and * write the new value to the microcode. */ static void advansys_wide_enable_wdtr(AdvPortAddr iop_base, unsigned short tidmask) { unsigned short cfg_word; AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, cfg_word); if ((cfg_word & tidmask) != 0) return; cfg_word |= tidmask; AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, cfg_word); /* * Clear the microcode SDTR and WDTR negotiation done indicators for * the target to cause it to negotiate with the new setting set above. * WDTR when accepted causes the target to enter asynchronous mode, so * SDTR must be negotiated. */ AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); cfg_word &= ~tidmask; AdvWriteWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, cfg_word); cfg_word &= ~tidmask; AdvWriteWordLram(iop_base, ASC_MC_WDTR_DONE, cfg_word); } /* * Synchronous Transfers * * If the EEPROM enabled SDTR for the device and the device * supports synchronous transfers, then turn on the device's * 'sdtr_able' bit. Write the new value to the microcode. */ static void advansys_wide_enable_sdtr(AdvPortAddr iop_base, unsigned short tidmask) { unsigned short cfg_word; AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, cfg_word); if ((cfg_word & tidmask) != 0) return; cfg_word |= tidmask; AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, cfg_word); /* * Clear the microcode "SDTR negotiation" done indicator for the * target to cause it to negotiate with the new setting set above. */ AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); cfg_word &= ~tidmask; AdvWriteWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); } /* * PPR (Parallel Protocol Request) Capable * * If the device supports DT mode, then it must be PPR capable. * The PPR message will be used in place of the SDTR and WDTR * messages to negotiate synchronous speed and offset, transfer * width, and protocol options. */ static void advansys_wide_enable_ppr(ADV_DVC_VAR *adv_dvc, AdvPortAddr iop_base, unsigned short tidmask) { AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE, adv_dvc->ppr_able); adv_dvc->ppr_able |= tidmask; AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE, adv_dvc->ppr_able); } static void advansys_wide_slave_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc) { AdvPortAddr iop_base = adv_dvc->iop_base; unsigned short tidmask = 1 << sdev->id; if (sdev->lun == 0) { /* * Handle WDTR, SDTR, and Tag Queuing. If the feature * is enabled in the EEPROM and the device supports the * feature, then enable it in the microcode. */ if ((adv_dvc->wdtr_able & tidmask) && sdev->wdtr) advansys_wide_enable_wdtr(iop_base, tidmask); if ((adv_dvc->sdtr_able & tidmask) && sdev->sdtr) advansys_wide_enable_sdtr(iop_base, tidmask); if (adv_dvc->chip_type == ADV_CHIP_ASC38C1600 && sdev->ppr) advansys_wide_enable_ppr(adv_dvc, iop_base, tidmask); /* * Tag Queuing is disabled for the BIOS which runs in polled * mode and would see no benefit from Tag Queuing. Also by * disabling Tag Queuing in the BIOS devices with Tag Queuing * bugs will at least work with the BIOS. */ if ((adv_dvc->tagqng_able & tidmask) && sdev->tagged_supported) { unsigned short cfg_word; AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, cfg_word); cfg_word |= tidmask; AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, cfg_word); AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + sdev->id, adv_dvc->max_dvc_qng); } } if ((adv_dvc->tagqng_able & tidmask) && sdev->tagged_supported) scsi_change_queue_depth(sdev, adv_dvc->max_dvc_qng); } /* * Set the number of commands to queue per device for the * specified host adapter. */ static int advansys_slave_configure(struct scsi_device *sdev) { struct asc_board *boardp = shost_priv(sdev->host); if (ASC_NARROW_BOARD(boardp)) advansys_narrow_slave_configure(sdev, &boardp->dvc_var.asc_dvc_var); else advansys_wide_slave_configure(sdev, &boardp->dvc_var.adv_dvc_var); return 0; } static __le32 asc_get_sense_buffer_dma(struct scsi_cmnd *scp) { struct asc_board *board = shost_priv(scp->device->host); struct advansys_cmd *acmd = advansys_cmd(scp); acmd->dma_handle = dma_map_single(board->dev, scp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); if (dma_mapping_error(board->dev, acmd->dma_handle)) { ASC_DBG(1, "failed to map sense buffer\n"); return 0; } return cpu_to_le32(acmd->dma_handle); } static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, struct asc_scsi_q *asc_scsi_q) { struct asc_dvc_var *asc_dvc = &boardp->dvc_var.asc_dvc_var; int use_sg; u32 srb_tag; memset(asc_scsi_q, 0, sizeof(*asc_scsi_q)); /* * Set the srb_tag to the command tag + 1, as * srb_tag '0' is used internally by the chip. */ srb_tag = scsi_cmd_to_rq(scp)->tag + 1; asc_scsi_q->q2.srb_tag = srb_tag; /* * Build the ASC_SCSI_Q request. */ asc_scsi_q->cdbptr = &scp->cmnd[0]; asc_scsi_q->q2.cdb_len = scp->cmd_len; asc_scsi_q->q1.target_id = ASC_TID_TO_TARGET_ID(scp->device->id); asc_scsi_q->q1.target_lun = scp->device->lun; asc_scsi_q->q2.target_ix = ASC_TIDLUN_TO_IX(scp->device->id, scp->device->lun); asc_scsi_q->q1.sense_addr = asc_get_sense_buffer_dma(scp); asc_scsi_q->q1.sense_len = SCSI_SENSE_BUFFERSIZE; if (!asc_scsi_q->q1.sense_addr) return ASC_BUSY; /* * If there are any outstanding requests for the current target, * then every 255th request send an ORDERED request. This heuristic * tries to retain the benefit of request sorting while preventing * request starvation. 255 is the max number of tags or pending commands * a device may have outstanding. * * The request count is incremented below for every successfully * started request. * */ if ((asc_dvc->cur_dvc_qng[scp->device->id] > 0) && (boardp->reqcnt[scp->device->id] % 255) == 0) { asc_scsi_q->q2.tag_code = ORDERED_QUEUE_TAG; } else { asc_scsi_q->q2.tag_code = SIMPLE_QUEUE_TAG; } /* Build ASC_SCSI_Q */ use_sg = scsi_dma_map(scp); if (use_sg < 0) { ASC_DBG(1, "failed to map sglist\n"); return ASC_BUSY; } else if (use_sg > 0) { int sgcnt; struct scatterlist *slp; struct asc_sg_head *asc_sg_head; if (use_sg > scp->device->host->sg_tablesize) { scmd_printk(KERN_ERR, scp, "use_sg %d > " "sg_tablesize %d\n", use_sg, scp->device->host->sg_tablesize); scsi_dma_unmap(scp); set_host_byte(scp, DID_ERROR); return ASC_ERROR; } asc_sg_head = kzalloc(struct_size(asc_sg_head, sg_list, use_sg), GFP_ATOMIC); if (!asc_sg_head) { scsi_dma_unmap(scp); set_host_byte(scp, DID_SOFT_ERROR); return ASC_ERROR; } asc_scsi_q->q1.cntl |= QC_SG_HEAD; asc_scsi_q->sg_head = asc_sg_head; asc_scsi_q->q1.data_cnt = 0; asc_scsi_q->q1.data_addr = 0; /* This is a byte value, otherwise it would need to be swapped. */ asc_sg_head->entry_cnt = asc_scsi_q->q1.sg_queue_cnt = use_sg; ASC_STATS_ADD(scp->device->host, xfer_elem, asc_sg_head->entry_cnt); /* * Convert scatter-gather list into ASC_SG_HEAD list. */ scsi_for_each_sg(scp, slp, use_sg, sgcnt) { asc_sg_head->sg_list[sgcnt].addr = cpu_to_le32(sg_dma_address(slp)); asc_sg_head->sg_list[sgcnt].bytes = cpu_to_le32(sg_dma_len(slp)); ASC_STATS_ADD(scp->device->host, xfer_sect, DIV_ROUND_UP(sg_dma_len(slp), 512)); } } ASC_STATS(scp->device->host, xfer_cnt); ASC_DBG_PRT_ASC_SCSI_Q(2, asc_scsi_q); ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len); return ASC_NOERROR; } /* * Build scatter-gather list for Adv Library (Wide Board). * * Additional ADV_SG_BLOCK structures will need to be allocated * if the total number of scatter-gather elements exceeds * NO_OF_SG_PER_BLOCK (15). The ADV_SG_BLOCK structures are * assumed to be physically contiguous. * * Return: * ADV_SUCCESS(1) - SG List successfully created * ADV_ERROR(-1) - SG List creation failed */ static int adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, ADV_SCSI_REQ_Q *scsiqp, struct scsi_cmnd *scp, int use_sg) { adv_sgblk_t *sgblkp, *prev_sgblkp; struct scatterlist *slp; int sg_elem_cnt; ADV_SG_BLOCK *sg_block, *prev_sg_block; dma_addr_t sgblk_paddr; int i; slp = scsi_sglist(scp); sg_elem_cnt = use_sg; prev_sgblkp = NULL; prev_sg_block = NULL; reqp->sgblkp = NULL; for (;;) { /* * Allocate a 'adv_sgblk_t' structure from the board free * list. One 'adv_sgblk_t' structure holds NO_OF_SG_PER_BLOCK * (15) scatter-gather elements. */ sgblkp = dma_pool_alloc(boardp->adv_sgblk_pool, GFP_ATOMIC, &sgblk_paddr); if (!sgblkp) { ASC_DBG(1, "no free adv_sgblk_t\n"); ASC_STATS(scp->device->host, adv_build_nosg); /* * Allocation failed. Free 'adv_sgblk_t' structures * already allocated for the request. */ while ((sgblkp = reqp->sgblkp) != NULL) { /* Remove 'sgblkp' from the request list. */ reqp->sgblkp = sgblkp->next_sgblkp; sgblkp->next_sgblkp = NULL; dma_pool_free(boardp->adv_sgblk_pool, sgblkp, sgblkp->sg_addr); } return ASC_BUSY; } /* Complete 'adv_sgblk_t' board allocation. */ sgblkp->sg_addr = sgblk_paddr; sgblkp->next_sgblkp = NULL; sg_block = &sgblkp->sg_block; /* * Check if this is the first 'adv_sgblk_t' for the * request. */ if (reqp->sgblkp == NULL) { /* Request's first scatter-gather block. */ reqp->sgblkp = sgblkp; /* * Set ADV_SCSI_REQ_T ADV_SG_BLOCK virtual and physical * address pointers. */ scsiqp->sg_list_ptr = sg_block; scsiqp->sg_real_addr = cpu_to_le32(sgblk_paddr); } else { /* Request's second or later scatter-gather block. */ prev_sgblkp->next_sgblkp = sgblkp; /* * Point the previous ADV_SG_BLOCK structure to * the newly allocated ADV_SG_BLOCK structure. */ prev_sg_block->sg_ptr = cpu_to_le32(sgblk_paddr); } for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) { sg_block->sg_list[i].sg_addr = cpu_to_le32(sg_dma_address(slp)); sg_block->sg_list[i].sg_count = cpu_to_le32(sg_dma_len(slp)); ASC_STATS_ADD(scp->device->host, xfer_sect, DIV_ROUND_UP(sg_dma_len(slp), 512)); if (--sg_elem_cnt == 0) { /* * Last ADV_SG_BLOCK and scatter-gather entry. */ sg_block->sg_cnt = i + 1; sg_block->sg_ptr = 0L; /* Last ADV_SG_BLOCK in list. */ return ADV_SUCCESS; } slp = sg_next(slp); } sg_block->sg_cnt = NO_OF_SG_PER_BLOCK; prev_sg_block = sg_block; prev_sgblkp = sgblkp; } } /* * Build a request structure for the Adv Library (Wide Board). * * If an adv_req_t can not be allocated to issue the request, * then return ASC_BUSY. If an error occurs, then return ASC_ERROR. * * Multi-byte fields in the ADV_SCSI_REQ_Q that are used by the * microcode for DMA addresses or math operations are byte swapped * to little-endian order. */ static int adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, adv_req_t **adv_reqpp) { u32 srb_tag = scsi_cmd_to_rq(scp)->tag; adv_req_t *reqp; ADV_SCSI_REQ_Q *scsiqp; int ret; int use_sg; dma_addr_t sense_addr; /* * Allocate an adv_req_t structure from the board to execute * the command. */ reqp = &boardp->adv_reqp[srb_tag]; if (reqp->cmndp && reqp->cmndp != scp ) { ASC_DBG(1, "no free adv_req_t\n"); ASC_STATS(scp->device->host, adv_build_noreq); return ASC_BUSY; } reqp->req_addr = boardp->adv_reqp_addr + (srb_tag * sizeof(adv_req_t)); scsiqp = &reqp->scsi_req_q; /* * Initialize the structure. */ scsiqp->cntl = scsiqp->scsi_cntl = scsiqp->done_status = 0; /* * Set the srb_tag to the command tag. */ scsiqp->srb_tag = srb_tag; /* * Set 'host_scribble' to point to the adv_req_t structure. */ reqp->cmndp = scp; scp->host_scribble = (void *)reqp; /* * Build the ADV_SCSI_REQ_Q request. */ /* Set CDB length and copy it to the request structure. */ scsiqp->cdb_len = scp->cmd_len; /* Copy first 12 CDB bytes to cdb[]. */ memcpy(scsiqp->cdb, scp->cmnd, scp->cmd_len < 12 ? scp->cmd_len : 12); /* Copy last 4 CDB bytes, if present, to cdb16[]. */ if (scp->cmd_len > 12) { int cdb16_len = scp->cmd_len - 12; memcpy(scsiqp->cdb16, &scp->cmnd[12], cdb16_len); } scsiqp->target_id = scp->device->id; scsiqp->target_lun = scp->device->lun; sense_addr = dma_map_single(boardp->dev, scp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); if (dma_mapping_error(boardp->dev, sense_addr)) { ASC_DBG(1, "failed to map sense buffer\n"); ASC_STATS(scp->device->host, adv_build_noreq); return ASC_BUSY; } scsiqp->sense_addr = cpu_to_le32(sense_addr); scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE; /* Build ADV_SCSI_REQ_Q */ use_sg = scsi_dma_map(scp); if (use_sg < 0) { ASC_DBG(1, "failed to map SG list\n"); ASC_STATS(scp->device->host, adv_build_noreq); return ASC_BUSY; } else if (use_sg == 0) { /* Zero-length transfer */ reqp->sgblkp = NULL; scsiqp->data_cnt = 0; scsiqp->data_addr = 0; scsiqp->sg_list_ptr = NULL; scsiqp->sg_real_addr = 0; } else { if (use_sg > ADV_MAX_SG_LIST) { scmd_printk(KERN_ERR, scp, "use_sg %d > " "ADV_MAX_SG_LIST %d\n", use_sg, scp->device->host->sg_tablesize); scsi_dma_unmap(scp); set_host_byte(scp, DID_ERROR); reqp->cmndp = NULL; scp->host_scribble = NULL; return ASC_ERROR; } scsiqp->data_cnt = cpu_to_le32(scsi_bufflen(scp)); ret = adv_get_sglist(boardp, reqp, scsiqp, scp, use_sg); if (ret != ADV_SUCCESS) { scsi_dma_unmap(scp); set_host_byte(scp, DID_ERROR); reqp->cmndp = NULL; scp->host_scribble = NULL; return ret; } ASC_STATS_ADD(scp->device->host, xfer_elem, use_sg); } ASC_STATS(scp->device->host, xfer_cnt); ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp); ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len); *adv_reqpp = reqp; return ASC_NOERROR; } static int AscSgListToQueue(int sg_list) { int n_sg_list_qs; n_sg_list_qs = ((sg_list - 1) / ASC_SG_LIST_PER_Q); if (((sg_list - 1) % ASC_SG_LIST_PER_Q) != 0) n_sg_list_qs++; return n_sg_list_qs + 1; } static uint AscGetNumOfFreeQueue(ASC_DVC_VAR *asc_dvc, uchar target_ix, uchar n_qs) { uint cur_used_qs; uint cur_free_qs; ASC_SCSI_BIT_ID_TYPE target_id; uchar tid_no; target_id = ASC_TIX_TO_TARGET_ID(target_ix); tid_no = ASC_TIX_TO_TID(target_ix); if ((asc_dvc->unit_not_ready & target_id) || (asc_dvc->queue_full_or_busy & target_id)) { return 0; } if (n_qs == 1) { cur_used_qs = (uint) asc_dvc->cur_total_qng + (uint) asc_dvc->last_q_shortage + (uint) ASC_MIN_FREE_Q; } else { cur_used_qs = (uint) asc_dvc->cur_total_qng + (uint) ASC_MIN_FREE_Q; } if ((uint) (cur_used_qs + n_qs) <= (uint) asc_dvc->max_total_qng) { cur_free_qs = (uint) asc_dvc->max_total_qng - cur_used_qs; if (asc_dvc->cur_dvc_qng[tid_no] >= asc_dvc->max_dvc_qng[tid_no]) { return 0; } return cur_free_qs; } if (n_qs > 1) { if ((n_qs > asc_dvc->last_q_shortage) && (n_qs <= (asc_dvc->max_total_qng - ASC_MIN_FREE_Q))) { asc_dvc->last_q_shortage = n_qs; } } return 0; } static uchar AscAllocFreeQueue(PortAddr iop_base, uchar free_q_head) { ushort q_addr; uchar next_qp; uchar q_status; q_addr = ASC_QNO_TO_QADDR(free_q_head); q_status = (uchar)AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_STATUS)); next_qp = AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_FWD)); if (((q_status & QS_READY) == 0) && (next_qp != ASC_QLINK_END)) return next_qp; return ASC_QLINK_END; } static uchar AscAllocMultipleFreeQueue(PortAddr iop_base, uchar free_q_head, uchar n_free_q) { uchar i; for (i = 0; i < n_free_q; i++) { free_q_head = AscAllocFreeQueue(iop_base, free_q_head); if (free_q_head == ASC_QLINK_END) break; } return free_q_head; } /* * void * DvcPutScsiQ(PortAddr iop_base, ushort s_addr, uchar *outbuf, int words) * * Calling/Exit State: * none * * Description: * Output an ASC_SCSI_Q structure to the chip */ static void DvcPutScsiQ(PortAddr iop_base, ushort s_addr, uchar *outbuf, int words) { int i; ASC_DBG_PRT_HEX(2, "DvcPutScsiQ", outbuf, 2 * words); AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 2 * words; i += 2) { if (i == 4 || i == 20) { continue; } outpw(iop_base + IOP_RAM_DATA, ((ushort)outbuf[i + 1] << 8) | outbuf[i]); } } static int AscPutReadyQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) { ushort q_addr; uchar tid_no; uchar sdtr_data; uchar syn_period_ix; uchar syn_offset; PortAddr iop_base; iop_base = asc_dvc->iop_base; if (((asc_dvc->init_sdtr & scsiq->q1.target_id) != 0) && ((asc_dvc->sdtr_done & scsiq->q1.target_id) == 0)) { tid_no = ASC_TIX_TO_TID(scsiq->q2.target_ix); sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no); syn_period_ix = (sdtr_data >> 4) & (asc_dvc->max_sdtr_index - 1); syn_offset = sdtr_data & ASC_SYN_MAX_OFFSET; AscMsgOutSDTR(asc_dvc, asc_dvc->sdtr_period_tbl[syn_period_ix], syn_offset); scsiq->q1.cntl |= QC_MSG_OUT; } q_addr = ASC_QNO_TO_QADDR(q_no); if ((scsiq->q1.target_id & asc_dvc->use_tagged_qng) == 0) { scsiq->q2.tag_code &= ~SIMPLE_QUEUE_TAG; } scsiq->q1.status = QS_FREE; AscMemWordCopyPtrToLram(iop_base, q_addr + ASC_SCSIQ_CDB_BEG, (uchar *)scsiq->cdbptr, scsiq->q2.cdb_len >> 1); DvcPutScsiQ(iop_base, q_addr + ASC_SCSIQ_CPY_BEG, (uchar *)&scsiq->q1.cntl, ((sizeof(ASC_SCSIQ_1) + sizeof(ASC_SCSIQ_2)) / 2) - 1); AscWriteLramWord(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_STATUS), (ushort)(((ushort)scsiq->q1. q_no << 8) | (ushort)QS_READY)); return 1; } static int AscPutReadySgListQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) { int sta; int i; ASC_SG_HEAD *sg_head; ASC_SG_LIST_Q scsi_sg_q; __le32 saved_data_addr; __le32 saved_data_cnt; PortAddr iop_base; ushort sg_list_dwords; ushort sg_index; ushort sg_entry_cnt; ushort q_addr; uchar next_qp; iop_base = asc_dvc->iop_base; sg_head = scsiq->sg_head; saved_data_addr = scsiq->q1.data_addr; saved_data_cnt = scsiq->q1.data_cnt; scsiq->q1.data_addr = cpu_to_le32(sg_head->sg_list[0].addr); scsiq->q1.data_cnt = cpu_to_le32(sg_head->sg_list[0].bytes); /* * Set sg_entry_cnt to be the number of SG elements that * will fit in the allocated SG queues. It is minus 1, because * the first SG element is handled above. */ sg_entry_cnt = sg_head->entry_cnt - 1; if (sg_entry_cnt != 0) { scsiq->q1.cntl |= QC_SG_HEAD; q_addr = ASC_QNO_TO_QADDR(q_no); sg_index = 1; scsiq->q1.sg_queue_cnt = sg_head->queue_cnt; scsi_sg_q.sg_head_qp = q_no; scsi_sg_q.cntl = QCSG_SG_XFER_LIST; for (i = 0; i < sg_head->queue_cnt; i++) { scsi_sg_q.seq_no = i + 1; if (sg_entry_cnt > ASC_SG_LIST_PER_Q) { sg_list_dwords = (uchar)(ASC_SG_LIST_PER_Q * 2); sg_entry_cnt -= ASC_SG_LIST_PER_Q; if (i == 0) { scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q; scsi_sg_q.sg_cur_list_cnt = ASC_SG_LIST_PER_Q; } else { scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q - 1; scsi_sg_q.sg_cur_list_cnt = ASC_SG_LIST_PER_Q - 1; } } else { scsi_sg_q.cntl |= QCSG_SG_XFER_END; sg_list_dwords = sg_entry_cnt << 1; if (i == 0) { scsi_sg_q.sg_list_cnt = sg_entry_cnt; scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt; } else { scsi_sg_q.sg_list_cnt = sg_entry_cnt - 1; scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt - 1; } sg_entry_cnt = 0; } next_qp = AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_FWD)); scsi_sg_q.q_no = next_qp; q_addr = ASC_QNO_TO_QADDR(next_qp); AscMemWordCopyPtrToLram(iop_base, q_addr + ASC_SCSIQ_SGHD_CPY_BEG, (uchar *)&scsi_sg_q, sizeof(ASC_SG_LIST_Q) >> 1); AscMemDWordCopyPtrToLram(iop_base, q_addr + ASC_SGQ_LIST_BEG, (uchar *)&sg_head-> sg_list[sg_index], sg_list_dwords); sg_index += ASC_SG_LIST_PER_Q; scsiq->next_sg_index = sg_index; } } else { scsiq->q1.cntl &= ~QC_SG_HEAD; } sta = AscPutReadyQueue(asc_dvc, scsiq, q_no); scsiq->q1.data_addr = saved_data_addr; scsiq->q1.data_cnt = saved_data_cnt; return (sta); } static int AscSendScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar n_q_required) { PortAddr iop_base; uchar free_q_head; uchar next_qp; uchar tid_no; uchar target_ix; int sta; iop_base = asc_dvc->iop_base; target_ix = scsiq->q2.target_ix; tid_no = ASC_TIX_TO_TID(target_ix); sta = 0; free_q_head = (uchar)AscGetVarFreeQHead(iop_base); if (n_q_required > 1) { next_qp = AscAllocMultipleFreeQueue(iop_base, free_q_head, (uchar)n_q_required); if (next_qp != ASC_QLINK_END) { asc_dvc->last_q_shortage = 0; scsiq->sg_head->queue_cnt = n_q_required - 1; scsiq->q1.q_no = free_q_head; sta = AscPutReadySgListQueue(asc_dvc, scsiq, free_q_head); } } else if (n_q_required == 1) { next_qp = AscAllocFreeQueue(iop_base, free_q_head); if (next_qp != ASC_QLINK_END) { scsiq->q1.q_no = free_q_head; sta = AscPutReadyQueue(asc_dvc, scsiq, free_q_head); } } if (sta == 1) { AscPutVarFreeQHead(iop_base, next_qp); asc_dvc->cur_total_qng += n_q_required; asc_dvc->cur_dvc_qng[tid_no]++; } return sta; } #define ASC_SYN_OFFSET_ONE_DISABLE_LIST 16 static uchar _syn_offset_one_disable_cmd[ASC_SYN_OFFSET_ONE_DISABLE_LIST] = { INQUIRY, REQUEST_SENSE, READ_CAPACITY, READ_TOC, MODE_SELECT, MODE_SENSE, MODE_SELECT_10, MODE_SENSE_10, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq) { PortAddr iop_base; int sta; int n_q_required; bool disable_syn_offset_one_fix; int i; u32 addr; ushort sg_entry_cnt = 0; ushort sg_entry_cnt_minus_one = 0; uchar target_ix; uchar tid_no; uchar sdtr_data; uchar extra_bytes; uchar scsi_cmd; uchar disable_cmd; ASC_SG_HEAD *sg_head; unsigned long data_cnt; iop_base = asc_dvc->iop_base; sg_head = scsiq->sg_head; if (asc_dvc->err_code != 0) return ASC_ERROR; scsiq->q1.q_no = 0; if ((scsiq->q2.tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0) { scsiq->q1.extra_bytes = 0; } sta = 0; target_ix = scsiq->q2.target_ix; tid_no = ASC_TIX_TO_TID(target_ix); n_q_required = 1; if (scsiq->cdbptr[0] == REQUEST_SENSE) { if ((asc_dvc->init_sdtr & scsiq->q1.target_id) != 0) { asc_dvc->sdtr_done &= ~scsiq->q1.target_id; sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no); AscMsgOutSDTR(asc_dvc, asc_dvc-> sdtr_period_tbl[(sdtr_data >> 4) & (uchar)(asc_dvc-> max_sdtr_index - 1)], (uchar)(sdtr_data & (uchar) ASC_SYN_MAX_OFFSET)); scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT); } } if (asc_dvc->in_critical_cnt != 0) { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CRITICAL_RE_ENTRY); return ASC_ERROR; } asc_dvc->in_critical_cnt++; if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) { if ((sg_entry_cnt = sg_head->entry_cnt) == 0) { asc_dvc->in_critical_cnt--; return ASC_ERROR; } if (sg_entry_cnt > ASC_MAX_SG_LIST) { asc_dvc->in_critical_cnt--; return ASC_ERROR; } if (sg_entry_cnt == 1) { scsiq->q1.data_addr = cpu_to_le32(sg_head->sg_list[0].addr); scsiq->q1.data_cnt = cpu_to_le32(sg_head->sg_list[0].bytes); scsiq->q1.cntl &= ~(QC_SG_HEAD | QC_SG_SWAP_QUEUE); } sg_entry_cnt_minus_one = sg_entry_cnt - 1; } scsi_cmd = scsiq->cdbptr[0]; disable_syn_offset_one_fix = false; if ((asc_dvc->pci_fix_asyn_xfer & scsiq->q1.target_id) && !(asc_dvc->pci_fix_asyn_xfer_always & scsiq->q1.target_id)) { if (scsiq->q1.cntl & QC_SG_HEAD) { data_cnt = 0; for (i = 0; i < sg_entry_cnt; i++) { data_cnt += le32_to_cpu(sg_head->sg_list[i]. bytes); } } else { data_cnt = le32_to_cpu(scsiq->q1.data_cnt); } if (data_cnt != 0UL) { if (data_cnt < 512UL) { disable_syn_offset_one_fix = true; } else { for (i = 0; i < ASC_SYN_OFFSET_ONE_DISABLE_LIST; i++) { disable_cmd = _syn_offset_one_disable_cmd[i]; if (disable_cmd == 0xFF) { break; } if (scsi_cmd == disable_cmd) { disable_syn_offset_one_fix = true; break; } } } } } if (disable_syn_offset_one_fix) { scsiq->q2.tag_code &= ~SIMPLE_QUEUE_TAG; scsiq->q2.tag_code |= (ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX | ASC_TAG_FLAG_DISABLE_DISCONNECT); } else { scsiq->q2.tag_code &= 0x27; } if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) { if (asc_dvc->bug_fix_cntl) { if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) { if ((scsi_cmd == READ_6) || (scsi_cmd == READ_10)) { addr = le32_to_cpu(sg_head-> sg_list [sg_entry_cnt_minus_one]. addr) + le32_to_cpu(sg_head-> sg_list [sg_entry_cnt_minus_one]. bytes); extra_bytes = (uchar)((ushort)addr & 0x0003); if ((extra_bytes != 0) && ((scsiq->q2. tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0)) { scsiq->q2.tag_code |= ASC_TAG_FLAG_EXTRA_BYTES; scsiq->q1.extra_bytes = extra_bytes; data_cnt = le32_to_cpu(sg_head-> sg_list [sg_entry_cnt_minus_one]. bytes); data_cnt -= extra_bytes; sg_head-> sg_list [sg_entry_cnt_minus_one]. bytes = cpu_to_le32(data_cnt); } } } } sg_head->entry_to_copy = sg_head->entry_cnt; n_q_required = AscSgListToQueue(sg_entry_cnt); if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, n_q_required) >= (uint) n_q_required) || ((scsiq->q1.cntl & QC_URGENT) != 0)) { if ((sta = AscSendScsiQueue(asc_dvc, scsiq, n_q_required)) == 1) { asc_dvc->in_critical_cnt--; return (sta); } } } else { if (asc_dvc->bug_fix_cntl) { if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) { if ((scsi_cmd == READ_6) || (scsi_cmd == READ_10)) { addr = le32_to_cpu(scsiq->q1.data_addr) + le32_to_cpu(scsiq->q1.data_cnt); extra_bytes = (uchar)((ushort)addr & 0x0003); if ((extra_bytes != 0) && ((scsiq->q2. tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0)) { data_cnt = le32_to_cpu(scsiq->q1. data_cnt); if (((ushort)data_cnt & 0x01FF) == 0) { scsiq->q2.tag_code |= ASC_TAG_FLAG_EXTRA_BYTES; data_cnt -= extra_bytes; scsiq->q1.data_cnt = cpu_to_le32 (data_cnt); scsiq->q1.extra_bytes = extra_bytes; } } } } } n_q_required = 1; if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, 1) >= 1) || ((scsiq->q1.cntl & QC_URGENT) != 0)) { if ((sta = AscSendScsiQueue(asc_dvc, scsiq, n_q_required)) == 1) { asc_dvc->in_critical_cnt--; return (sta); } } } asc_dvc->in_critical_cnt--; return (sta); } /* * AdvExeScsiQueue() - Send a request to the RISC microcode program. * * Allocate a carrier structure, point the carrier to the ADV_SCSI_REQ_Q, * add the carrier to the ICQ (Initiator Command Queue), and tickle the * RISC to notify it a new command is ready to be executed. * * If 'done_status' is not set to QD_DO_RETRY, then 'error_retry' will be * set to SCSI_MAX_RETRY. * * Multi-byte fields in the ADV_SCSI_REQ_Q that are used by the microcode * for DMA addresses or math operations are byte swapped to little-endian * order. * * Return: * ADV_SUCCESS(1) - The request was successfully queued. * ADV_BUSY(0) - Resource unavailable; Retry again after pending * request completes. * ADV_ERROR(-1) - Invalid ADV_SCSI_REQ_Q request structure * host IC error. */ static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, adv_req_t *reqp) { AdvPortAddr iop_base; ADV_CARR_T *new_carrp; ADV_SCSI_REQ_Q *scsiq = &reqp->scsi_req_q; /* * The ADV_SCSI_REQ_Q 'target_id' field should never exceed ADV_MAX_TID. */ if (scsiq->target_id > ADV_MAX_TID) { scsiq->host_status = QHSTA_M_INVALID_DEVICE; scsiq->done_status = QD_WITH_ERROR; return ADV_ERROR; } iop_base = asc_dvc->iop_base; /* * Allocate a carrier ensuring at least one carrier always * remains on the freelist and initialize fields. */ new_carrp = adv_get_next_carrier(asc_dvc); if (!new_carrp) { ASC_DBG(1, "No free carriers\n"); return ADV_BUSY; } asc_dvc->carr_pending_cnt++; /* Save virtual and physical address of ADV_SCSI_REQ_Q and carrier. */ scsiq->scsiq_ptr = cpu_to_le32(scsiq->srb_tag); scsiq->scsiq_rptr = cpu_to_le32(reqp->req_addr); scsiq->carr_va = asc_dvc->icq_sp->carr_va; scsiq->carr_pa = asc_dvc->icq_sp->carr_pa; /* * Use the current stopper to send the ADV_SCSI_REQ_Q command to * the microcode. The newly allocated stopper will become the new * stopper. */ asc_dvc->icq_sp->areq_vpa = scsiq->scsiq_rptr; /* * Set the 'next_vpa' pointer for the old stopper to be the * physical address of the new stopper. The RISC can only * follow physical addresses. */ asc_dvc->icq_sp->next_vpa = new_carrp->carr_pa; /* * Set the host adapter stopper pointer to point to the new carrier. */ asc_dvc->icq_sp = new_carrp; if (asc_dvc->chip_type == ADV_CHIP_ASC3550 || asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { /* * Tickle the RISC to tell it to read its Command Queue Head pointer. */ AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_A); if (asc_dvc->chip_type == ADV_CHIP_ASC3550) { /* * Clear the tickle value. In the ASC-3550 the RISC flag * command 'clr_tickle_a' does not work unless the host * value is cleared. */ AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_NOP); } } else if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { /* * Notify the RISC a carrier is ready by writing the physical * address of the new carrier stopper to the COMMA register. */ AdvWriteDWordRegister(iop_base, IOPDW_COMMA, le32_to_cpu(new_carrp->carr_pa)); } return ADV_SUCCESS; } /* * Execute a single 'struct scsi_cmnd'. */ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp) { int ret, err_code; struct asc_board *boardp = shost_priv(scp->device->host); ASC_DBG(1, "scp 0x%p\n", scp); if (ASC_NARROW_BOARD(boardp)) { ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var; struct asc_scsi_q asc_scsi_q; ret = asc_build_req(boardp, scp, &asc_scsi_q); if (ret != ASC_NOERROR) { ASC_STATS(scp->device->host, build_error); return ret; } ret = AscExeScsiQueue(asc_dvc, &asc_scsi_q); kfree(asc_scsi_q.sg_head); err_code = asc_dvc->err_code; } else { ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var; adv_req_t *adv_reqp; switch (adv_build_req(boardp, scp, &adv_reqp)) { case ASC_NOERROR: ASC_DBG(3, "adv_build_req ASC_NOERROR\n"); break; case ASC_BUSY: ASC_DBG(1, "adv_build_req ASC_BUSY\n"); /* * The asc_stats fields 'adv_build_noreq' and * 'adv_build_nosg' count wide board busy conditions. * They are updated in adv_build_req and * adv_get_sglist, respectively. */ return ASC_BUSY; case ASC_ERROR: default: ASC_DBG(1, "adv_build_req ASC_ERROR\n"); ASC_STATS(scp->device->host, build_error); return ASC_ERROR; } ret = AdvExeScsiQueue(adv_dvc, adv_reqp); err_code = adv_dvc->err_code; } switch (ret) { case ASC_NOERROR: ASC_STATS(scp->device->host, exe_noerror); /* * Increment monotonically increasing per device * successful request counter. Wrapping doesn't matter. */ boardp->reqcnt[scp->device->id]++; ASC_DBG(1, "ExeScsiQueue() ASC_NOERROR\n"); break; case ASC_BUSY: ASC_DBG(1, "ExeScsiQueue() ASC_BUSY\n"); ASC_STATS(scp->device->host, exe_busy); break; case ASC_ERROR: scmd_printk(KERN_ERR, scp, "ExeScsiQueue() ASC_ERROR, " "err_code 0x%x\n", err_code); ASC_STATS(scp->device->host, exe_error); set_host_byte(scp, DID_ERROR); break; default: scmd_printk(KERN_ERR, scp, "ExeScsiQueue() unknown, " "err_code 0x%x\n", err_code); ASC_STATS(scp->device->host, exe_unknown); set_host_byte(scp, DID_ERROR); break; } ASC_DBG(1, "end\n"); return ret; } /* * advansys_queuecommand() - interrupt-driven I/O entrypoint. * * This function always returns 0. Command return status is saved * in the 'scp' result field. */ static int advansys_queuecommand_lck(struct scsi_cmnd *scp) { struct Scsi_Host *shost = scp->device->host; int asc_res, result = 0; ASC_STATS(shost, queuecommand); asc_res = asc_execute_scsi_cmnd(scp); switch (asc_res) { case ASC_NOERROR: break; case ASC_BUSY: result = SCSI_MLQUEUE_HOST_BUSY; break; case ASC_ERROR: default: asc_scsi_done(scp); break; } return result; } static DEF_SCSI_QCMD(advansys_queuecommand) static ushort AscGetEisaChipCfg(PortAddr iop_base) { PortAddr eisa_cfg_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) | (PortAddr) (ASC_EISA_CFG_IOP_MASK); return inpw(eisa_cfg_iop); } /* * Return the BIOS address of the adapter at the specified * I/O port and with the specified bus type. */ static unsigned short AscGetChipBiosAddress(PortAddr iop_base, unsigned short bus_type) { unsigned short cfg_lsw; unsigned short bios_addr; /* * The PCI BIOS is re-located by the motherboard BIOS. Because * of this the driver can not determine where a PCI BIOS is * loaded and executes. */ if (bus_type & ASC_IS_PCI) return 0; if ((bus_type & ASC_IS_EISA) != 0) { cfg_lsw = AscGetEisaChipCfg(iop_base); cfg_lsw &= 0x000F; bios_addr = ASC_BIOS_MIN_ADDR + cfg_lsw * ASC_BIOS_BANK_SIZE; return bios_addr; } cfg_lsw = AscGetChipCfgLsw(iop_base); bios_addr = ASC_BIOS_MIN_ADDR + (cfg_lsw >> 12) * ASC_BIOS_BANK_SIZE; return bios_addr; } static uchar AscSetChipScsiID(PortAddr iop_base, uchar new_host_id) { ushort cfg_lsw; if (AscGetChipScsiID(iop_base) == new_host_id) { return (new_host_id); } cfg_lsw = AscGetChipCfgLsw(iop_base); cfg_lsw &= 0xF8FF; cfg_lsw |= (ushort)((new_host_id & ASC_MAX_TID) << 8); AscSetChipCfgLsw(iop_base, cfg_lsw); return (AscGetChipScsiID(iop_base)); } static unsigned char AscGetChipScsiCtrl(PortAddr iop_base) { unsigned char sc; AscSetBank(iop_base, 1); sc = inp(iop_base + IOP_REG_SC); AscSetBank(iop_base, 0); return sc; } static unsigned char AscGetChipVersion(PortAddr iop_base, unsigned short bus_type) { if (bus_type & ASC_IS_EISA) { PortAddr eisa_iop; unsigned char revision; eisa_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) | (PortAddr) ASC_EISA_REV_IOP_MASK; revision = inp(eisa_iop); return ASC_CHIP_MIN_VER_EISA - 1 + revision; } return AscGetChipVerNo(iop_base); } static int AscStopQueueExe(PortAddr iop_base) { int count = 0; if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) == 0) { AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, ASC_STOP_REQ_RISC_STOP); do { if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) & ASC_STOP_ACK_RISC_STOP) { return (1); } mdelay(100); } while (count++ < 20); } return (0); } static unsigned int AscGetMaxDmaCount(ushort bus_type) { if (bus_type & (ASC_IS_EISA | ASC_IS_VL)) return ASC_MAX_VL_DMA_COUNT; return ASC_MAX_PCI_DMA_COUNT; } static void AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc) { int i; PortAddr iop_base; uchar chip_version; iop_base = asc_dvc->iop_base; asc_dvc->err_code = 0; if ((asc_dvc->bus_type & (ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) { asc_dvc->err_code |= ASC_IERR_NO_BUS_TYPE; } AscSetChipControl(iop_base, CC_HALT); AscSetChipStatus(iop_base, 0); asc_dvc->bug_fix_cntl = 0; asc_dvc->pci_fix_asyn_xfer = 0; asc_dvc->pci_fix_asyn_xfer_always = 0; /* asc_dvc->init_state initialized in AscInitGetConfig(). */ asc_dvc->sdtr_done = 0; asc_dvc->cur_total_qng = 0; asc_dvc->is_in_int = false; asc_dvc->in_critical_cnt = 0; asc_dvc->last_q_shortage = 0; asc_dvc->use_tagged_qng = 0; asc_dvc->no_scam = 0; asc_dvc->unit_not_ready = 0; asc_dvc->queue_full_or_busy = 0; asc_dvc->redo_scam = 0; asc_dvc->res2 = 0; asc_dvc->min_sdtr_index = 0; asc_dvc->cfg->can_tagged_qng = 0; asc_dvc->cfg->cmd_qng_enabled = 0; asc_dvc->dvc_cntl = ASC_DEF_DVC_CNTL; asc_dvc->init_sdtr = 0; asc_dvc->max_total_qng = ASC_DEF_MAX_TOTAL_QNG; asc_dvc->scsi_reset_wait = 3; asc_dvc->start_motor = ASC_SCSI_WIDTH_BIT_SET; asc_dvc->max_dma_count = AscGetMaxDmaCount(asc_dvc->bus_type); asc_dvc->cfg->sdtr_enable = ASC_SCSI_WIDTH_BIT_SET; asc_dvc->cfg->disc_enable = ASC_SCSI_WIDTH_BIT_SET; asc_dvc->cfg->chip_scsi_id = ASC_DEF_CHIP_SCSI_ID; chip_version = AscGetChipVersion(iop_base, asc_dvc->bus_type); asc_dvc->cfg->chip_version = chip_version; asc_dvc->sdtr_period_tbl = asc_syn_xfer_period; asc_dvc->max_sdtr_index = 7; if ((asc_dvc->bus_type & ASC_IS_PCI) && (chip_version >= ASC_CHIP_VER_PCI_ULTRA_3150)) { asc_dvc->bus_type = ASC_IS_PCI_ULTRA; asc_dvc->sdtr_period_tbl = asc_syn_ultra_xfer_period; asc_dvc->max_sdtr_index = 15; if (chip_version == ASC_CHIP_VER_PCI_ULTRA_3150) { AscSetExtraControl(iop_base, (SEC_ACTIVE_NEGATE | SEC_SLEW_RATE)); } else if (chip_version >= ASC_CHIP_VER_PCI_ULTRA_3050) { AscSetExtraControl(iop_base, (SEC_ACTIVE_NEGATE | SEC_ENABLE_FILTER)); } } if (asc_dvc->bus_type == ASC_IS_PCI) { AscSetExtraControl(iop_base, (SEC_ACTIVE_NEGATE | SEC_SLEW_RATE)); } for (i = 0; i <= ASC_MAX_TID; i++) { asc_dvc->cur_dvc_qng[i] = 0; asc_dvc->max_dvc_qng[i] = ASC_MAX_SCSI1_QNG; asc_dvc->scsiq_busy_head[i] = (ASC_SCSI_Q *)0L; asc_dvc->scsiq_busy_tail[i] = (ASC_SCSI_Q *)0L; asc_dvc->cfg->max_tag_qng[i] = ASC_MAX_INRAM_TAG_QNG; } } static int AscWriteEEPCmdReg(PortAddr iop_base, uchar cmd_reg) { int retry; for (retry = 0; retry < ASC_EEP_MAX_RETRY; retry++) { unsigned char read_back; AscSetChipEEPCmd(iop_base, cmd_reg); mdelay(1); read_back = AscGetChipEEPCmd(iop_base); if (read_back == cmd_reg) return 1; } return 0; } static void AscWaitEEPRead(void) { mdelay(1); } static ushort AscReadEEPWord(PortAddr iop_base, uchar addr) { ushort read_wval; uchar cmd_reg; AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_DISABLE); AscWaitEEPRead(); cmd_reg = addr | ASC_EEP_CMD_READ; AscWriteEEPCmdReg(iop_base, cmd_reg); AscWaitEEPRead(); read_wval = AscGetChipEEPData(iop_base); AscWaitEEPRead(); return read_wval; } static ushort AscGetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type) { ushort wval; ushort sum; ushort *wbuf; int cfg_beg; int cfg_end; int uchar_end_in_config = ASC_EEP_MAX_DVC_ADDR - 2; int s_addr; wbuf = (ushort *)cfg_buf; sum = 0; /* Read two config words; Byte-swapping done by AscReadEEPWord(). */ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { *wbuf = AscReadEEPWord(iop_base, (uchar)s_addr); sum += *wbuf; } if (bus_type & ASC_IS_VL) { cfg_beg = ASC_EEP_DVC_CFG_BEG_VL; cfg_end = ASC_EEP_MAX_DVC_ADDR_VL; } else { cfg_beg = ASC_EEP_DVC_CFG_BEG; cfg_end = ASC_EEP_MAX_DVC_ADDR; } for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) { wval = AscReadEEPWord(iop_base, (uchar)s_addr); if (s_addr <= uchar_end_in_config) { /* * Swap all char fields - must unswap bytes already swapped * by AscReadEEPWord(). */ *wbuf = le16_to_cpu(wval); } else { /* Don't swap word field at the end - cntl field. */ *wbuf = wval; } sum += wval; /* Checksum treats all EEPROM data as words. */ } /* * Read the checksum word which will be compared against 'sum' * by the caller. Word field already swapped. */ *wbuf = AscReadEEPWord(iop_base, (uchar)s_addr); return sum; } static int AscTestExternalLram(ASC_DVC_VAR *asc_dvc) { PortAddr iop_base; ushort q_addr; ushort saved_word; int sta; iop_base = asc_dvc->iop_base; sta = 0; q_addr = ASC_QNO_TO_QADDR(241); saved_word = AscReadLramWord(iop_base, q_addr); AscSetChipLramAddr(iop_base, q_addr); AscSetChipLramData(iop_base, 0x55AA); mdelay(10); AscSetChipLramAddr(iop_base, q_addr); if (AscGetChipLramData(iop_base) == 0x55AA) { sta = 1; AscWriteLramWord(iop_base, q_addr, saved_word); } return (sta); } static void AscWaitEEPWrite(void) { mdelay(20); } static int AscWriteEEPDataReg(PortAddr iop_base, ushort data_reg) { ushort read_back; int retry; retry = 0; while (true) { AscSetChipEEPData(iop_base, data_reg); mdelay(1); read_back = AscGetChipEEPData(iop_base); if (read_back == data_reg) { return (1); } if (retry++ > ASC_EEP_MAX_RETRY) { return (0); } } } static ushort AscWriteEEPWord(PortAddr iop_base, uchar addr, ushort word_val) { ushort read_wval; read_wval = AscReadEEPWord(iop_base, addr); if (read_wval != word_val) { AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_ABLE); AscWaitEEPRead(); AscWriteEEPDataReg(iop_base, word_val); AscWaitEEPRead(); AscWriteEEPCmdReg(iop_base, (uchar)((uchar)ASC_EEP_CMD_WRITE | addr)); AscWaitEEPWrite(); AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_DISABLE); AscWaitEEPRead(); return (AscReadEEPWord(iop_base, addr)); } return (read_wval); } static int AscSetEEPConfigOnce(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type) { int n_error; ushort *wbuf; ushort word; ushort sum; int s_addr; int cfg_beg; int cfg_end; int uchar_end_in_config = ASC_EEP_MAX_DVC_ADDR - 2; wbuf = (ushort *)cfg_buf; n_error = 0; sum = 0; /* Write two config words; AscWriteEEPWord() will swap bytes. */ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { sum += *wbuf; if (*wbuf != AscWriteEEPWord(iop_base, (uchar)s_addr, *wbuf)) { n_error++; } } if (bus_type & ASC_IS_VL) { cfg_beg = ASC_EEP_DVC_CFG_BEG_VL; cfg_end = ASC_EEP_MAX_DVC_ADDR_VL; } else { cfg_beg = ASC_EEP_DVC_CFG_BEG; cfg_end = ASC_EEP_MAX_DVC_ADDR; } for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) { if (s_addr <= uchar_end_in_config) { /* * This is a char field. Swap char fields before they are * swapped again by AscWriteEEPWord(). */ word = cpu_to_le16(*wbuf); if (word != AscWriteEEPWord(iop_base, (uchar)s_addr, word)) { n_error++; } } else { /* Don't swap word field at the end - cntl field. */ if (*wbuf != AscWriteEEPWord(iop_base, (uchar)s_addr, *wbuf)) { n_error++; } } sum += *wbuf; /* Checksum calculated from word values. */ } /* Write checksum word. It will be swapped by AscWriteEEPWord(). */ *wbuf = sum; if (sum != AscWriteEEPWord(iop_base, (uchar)s_addr, sum)) { n_error++; } /* Read EEPROM back again. */ wbuf = (ushort *)cfg_buf; /* * Read two config words; Byte-swapping done by AscReadEEPWord(). */ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { if (*wbuf != AscReadEEPWord(iop_base, (uchar)s_addr)) { n_error++; } } if (bus_type & ASC_IS_VL) { cfg_beg = ASC_EEP_DVC_CFG_BEG_VL; cfg_end = ASC_EEP_MAX_DVC_ADDR_VL; } else { cfg_beg = ASC_EEP_DVC_CFG_BEG; cfg_end = ASC_EEP_MAX_DVC_ADDR; } for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) { if (s_addr <= uchar_end_in_config) { /* * Swap all char fields. Must unswap bytes already swapped * by AscReadEEPWord(). */ word = le16_to_cpu(AscReadEEPWord (iop_base, (uchar)s_addr)); } else { /* Don't swap word field at the end - cntl field. */ word = AscReadEEPWord(iop_base, (uchar)s_addr); } if (*wbuf != word) { n_error++; } } /* Read checksum; Byte swapping not needed. */ if (AscReadEEPWord(iop_base, (uchar)s_addr) != sum) { n_error++; } return n_error; } static int AscSetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type) { int retry; int n_error; retry = 0; while (true) { if ((n_error = AscSetEEPConfigOnce(iop_base, cfg_buf, bus_type)) == 0) { break; } if (++retry > ASC_EEP_MAX_RETRY) { break; } } return n_error; } static int AscInitFromEEP(ASC_DVC_VAR *asc_dvc) { ASCEEP_CONFIG eep_config_buf; ASCEEP_CONFIG *eep_config; PortAddr iop_base; ushort chksum; ushort warn_code; ushort cfg_msw, cfg_lsw; int i; int write_eep = 0; iop_base = asc_dvc->iop_base; warn_code = 0; AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0x00FE); AscStopQueueExe(iop_base); if ((AscStopChip(iop_base)) || (AscGetChipScsiCtrl(iop_base) != 0)) { asc_dvc->init_state |= ASC_INIT_RESET_SCSI_DONE; AscResetChipAndScsiBus(asc_dvc); mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */ } if (!AscIsChipHalted(iop_base)) { asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP; return (warn_code); } AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR); if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) { asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR; return (warn_code); } eep_config = (ASCEEP_CONFIG *)&eep_config_buf; cfg_msw = AscGetChipCfgMsw(iop_base); cfg_lsw = AscGetChipCfgLsw(iop_base); if ((cfg_msw & ASC_CFG_MSW_CLR_MASK) != 0) { cfg_msw &= ~ASC_CFG_MSW_CLR_MASK; warn_code |= ASC_WARN_CFG_MSW_RECOVER; AscSetChipCfgMsw(iop_base, cfg_msw); } chksum = AscGetEEPConfig(iop_base, eep_config, asc_dvc->bus_type); ASC_DBG(1, "chksum 0x%x\n", chksum); if (chksum == 0) { chksum = 0xaa55; } if (AscGetChipStatus(iop_base) & CSW_AUTO_CONFIG) { warn_code |= ASC_WARN_AUTO_CONFIG; if (asc_dvc->cfg->chip_version == 3) { if (eep_config->cfg_lsw != cfg_lsw) { warn_code |= ASC_WARN_EEPROM_RECOVER; eep_config->cfg_lsw = AscGetChipCfgLsw(iop_base); } if (eep_config->cfg_msw != cfg_msw) { warn_code |= ASC_WARN_EEPROM_RECOVER; eep_config->cfg_msw = AscGetChipCfgMsw(iop_base); } } } eep_config->cfg_msw &= ~ASC_CFG_MSW_CLR_MASK; eep_config->cfg_lsw |= ASC_CFG0_HOST_INT_ON; ASC_DBG(1, "eep_config->chksum 0x%x\n", eep_config->chksum); if (chksum != eep_config->chksum) { if (AscGetChipVersion(iop_base, asc_dvc->bus_type) == ASC_CHIP_VER_PCI_ULTRA_3050) { ASC_DBG(1, "chksum error ignored; EEPROM-less board\n"); eep_config->init_sdtr = 0xFF; eep_config->disc_enable = 0xFF; eep_config->start_motor = 0xFF; eep_config->use_cmd_qng = 0; eep_config->max_total_qng = 0xF0; eep_config->max_tag_qng = 0x20; eep_config->cntl = 0xBFFF; ASC_EEP_SET_CHIP_ID(eep_config, 7); eep_config->no_scam = 0; eep_config->adapter_info[0] = 0; eep_config->adapter_info[1] = 0; eep_config->adapter_info[2] = 0; eep_config->adapter_info[3] = 0; eep_config->adapter_info[4] = 0; /* Indicate EEPROM-less board. */ eep_config->adapter_info[5] = 0xBB; } else { ASC_PRINT ("AscInitFromEEP: EEPROM checksum error; Will try to re-write EEPROM.\n"); write_eep = 1; warn_code |= ASC_WARN_EEPROM_CHKSUM; } } asc_dvc->cfg->sdtr_enable = eep_config->init_sdtr; asc_dvc->cfg->disc_enable = eep_config->disc_enable; asc_dvc->cfg->cmd_qng_enabled = eep_config->use_cmd_qng; asc_dvc->start_motor = eep_config->start_motor; asc_dvc->dvc_cntl = eep_config->cntl; asc_dvc->no_scam = eep_config->no_scam; asc_dvc->cfg->adapter_info[0] = eep_config->adapter_info[0]; asc_dvc->cfg->adapter_info[1] = eep_config->adapter_info[1]; asc_dvc->cfg->adapter_info[2] = eep_config->adapter_info[2]; asc_dvc->cfg->adapter_info[3] = eep_config->adapter_info[3]; asc_dvc->cfg->adapter_info[4] = eep_config->adapter_info[4]; asc_dvc->cfg->adapter_info[5] = eep_config->adapter_info[5]; if (!AscTestExternalLram(asc_dvc)) { if (((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA)) { eep_config->max_total_qng = ASC_MAX_PCI_ULTRA_INRAM_TOTAL_QNG; eep_config->max_tag_qng = ASC_MAX_PCI_ULTRA_INRAM_TAG_QNG; } else { eep_config->cfg_msw |= 0x0800; cfg_msw |= 0x0800; AscSetChipCfgMsw(iop_base, cfg_msw); eep_config->max_total_qng = ASC_MAX_PCI_INRAM_TOTAL_QNG; eep_config->max_tag_qng = ASC_MAX_INRAM_TAG_QNG; } } else { } if (eep_config->max_total_qng < ASC_MIN_TOTAL_QNG) { eep_config->max_total_qng = ASC_MIN_TOTAL_QNG; } if (eep_config->max_total_qng > ASC_MAX_TOTAL_QNG) { eep_config->max_total_qng = ASC_MAX_TOTAL_QNG; } if (eep_config->max_tag_qng > eep_config->max_total_qng) { eep_config->max_tag_qng = eep_config->max_total_qng; } if (eep_config->max_tag_qng < ASC_MIN_TAG_Q_PER_DVC) { eep_config->max_tag_qng = ASC_MIN_TAG_Q_PER_DVC; } asc_dvc->max_total_qng = eep_config->max_total_qng; if ((eep_config->use_cmd_qng & eep_config->disc_enable) != eep_config->use_cmd_qng) { eep_config->disc_enable = eep_config->use_cmd_qng; warn_code |= ASC_WARN_CMD_QNG_CONFLICT; } ASC_EEP_SET_CHIP_ID(eep_config, ASC_EEP_GET_CHIP_ID(eep_config) & ASC_MAX_TID); asc_dvc->cfg->chip_scsi_id = ASC_EEP_GET_CHIP_ID(eep_config); if (((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) && !(asc_dvc->dvc_cntl & ASC_CNTL_SDTR_ENABLE_ULTRA)) { asc_dvc->min_sdtr_index = ASC_SDTR_ULTRA_PCI_10MB_INDEX; } for (i = 0; i <= ASC_MAX_TID; i++) { asc_dvc->dos_int13_table[i] = eep_config->dos_int13_table[i]; asc_dvc->cfg->max_tag_qng[i] = eep_config->max_tag_qng; asc_dvc->cfg->sdtr_period_offset[i] = (uchar)(ASC_DEF_SDTR_OFFSET | (asc_dvc->min_sdtr_index << 4)); } eep_config->cfg_msw = AscGetChipCfgMsw(iop_base); if (write_eep) { if ((i = AscSetEEPConfig(iop_base, eep_config, asc_dvc->bus_type)) != 0) { ASC_PRINT1 ("AscInitFromEEP: Failed to re-write EEPROM with %d errors.\n", i); } else { ASC_PRINT ("AscInitFromEEP: Successfully re-wrote EEPROM.\n"); } } return (warn_code); } static int AscInitGetConfig(struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); ASC_DVC_VAR *asc_dvc = &board->dvc_var.asc_dvc_var; unsigned short warn_code = 0; asc_dvc->init_state = ASC_INIT_STATE_BEG_GET_CFG; if (asc_dvc->err_code != 0) return asc_dvc->err_code; if (AscFindSignature(asc_dvc->iop_base)) { AscInitAscDvcVar(asc_dvc); warn_code = AscInitFromEEP(asc_dvc); asc_dvc->init_state |= ASC_INIT_STATE_END_GET_CFG; if (asc_dvc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT) asc_dvc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT; } else { asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; } switch (warn_code) { case 0: /* No error */ break; case ASC_WARN_IO_PORT_ROTATE: shost_printk(KERN_WARNING, shost, "I/O port address " "modified\n"); break; case ASC_WARN_AUTO_CONFIG: shost_printk(KERN_WARNING, shost, "I/O port increment switch " "enabled\n"); break; case ASC_WARN_EEPROM_CHKSUM: shost_printk(KERN_WARNING, shost, "EEPROM checksum error\n"); break; case ASC_WARN_IRQ_MODIFIED: shost_printk(KERN_WARNING, shost, "IRQ modified\n"); break; case ASC_WARN_CMD_QNG_CONFLICT: shost_printk(KERN_WARNING, shost, "tag queuing enabled w/o " "disconnects\n"); break; default: shost_printk(KERN_WARNING, shost, "unknown warning: 0x%x\n", warn_code); break; } if (asc_dvc->err_code != 0) shost_printk(KERN_ERR, shost, "error 0x%x at init_state " "0x%x\n", asc_dvc->err_code, asc_dvc->init_state); return asc_dvc->err_code; } static int AscInitSetConfig(struct pci_dev *pdev, struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); ASC_DVC_VAR *asc_dvc = &board->dvc_var.asc_dvc_var; PortAddr iop_base = asc_dvc->iop_base; unsigned short cfg_msw; unsigned short warn_code = 0; asc_dvc->init_state |= ASC_INIT_STATE_BEG_SET_CFG; if (asc_dvc->err_code != 0) return asc_dvc->err_code; if (!AscFindSignature(asc_dvc->iop_base)) { asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; return asc_dvc->err_code; } cfg_msw = AscGetChipCfgMsw(iop_base); if ((cfg_msw & ASC_CFG_MSW_CLR_MASK) != 0) { cfg_msw &= ~ASC_CFG_MSW_CLR_MASK; warn_code |= ASC_WARN_CFG_MSW_RECOVER; AscSetChipCfgMsw(iop_base, cfg_msw); } if ((asc_dvc->cfg->cmd_qng_enabled & asc_dvc->cfg->disc_enable) != asc_dvc->cfg->cmd_qng_enabled) { asc_dvc->cfg->disc_enable = asc_dvc->cfg->cmd_qng_enabled; warn_code |= ASC_WARN_CMD_QNG_CONFLICT; } if (AscGetChipStatus(iop_base) & CSW_AUTO_CONFIG) { warn_code |= ASC_WARN_AUTO_CONFIG; } #ifdef CONFIG_PCI if (asc_dvc->bus_type & ASC_IS_PCI) { cfg_msw &= 0xFFC0; AscSetChipCfgMsw(iop_base, cfg_msw); if ((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) { } else { if ((pdev->device == PCI_DEVICE_ID_ASP_1200A) || (pdev->device == PCI_DEVICE_ID_ASP_ABP940)) { asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_IF_NOT_DWB; asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN; } } } else #endif /* CONFIG_PCI */ if (AscSetChipScsiID(iop_base, asc_dvc->cfg->chip_scsi_id) != asc_dvc->cfg->chip_scsi_id) { asc_dvc->err_code |= ASC_IERR_SET_SCSI_ID; } asc_dvc->init_state |= ASC_INIT_STATE_END_SET_CFG; switch (warn_code) { case 0: /* No error. */ break; case ASC_WARN_IO_PORT_ROTATE: shost_printk(KERN_WARNING, shost, "I/O port address " "modified\n"); break; case ASC_WARN_AUTO_CONFIG: shost_printk(KERN_WARNING, shost, "I/O port increment switch " "enabled\n"); break; case ASC_WARN_EEPROM_CHKSUM: shost_printk(KERN_WARNING, shost, "EEPROM checksum error\n"); break; case ASC_WARN_IRQ_MODIFIED: shost_printk(KERN_WARNING, shost, "IRQ modified\n"); break; case ASC_WARN_CMD_QNG_CONFLICT: shost_printk(KERN_WARNING, shost, "tag queuing w/o " "disconnects\n"); break; default: shost_printk(KERN_WARNING, shost, "unknown warning: 0x%x\n", warn_code); break; } if (asc_dvc->err_code != 0) shost_printk(KERN_ERR, shost, "error 0x%x at init_state " "0x%x\n", asc_dvc->err_code, asc_dvc->init_state); return asc_dvc->err_code; } /* * EEPROM Configuration. * * All drivers should use this structure to set the default EEPROM * configuration. The BIOS now uses this structure when it is built. * Additional structure information can be found in a_condor.h where * the structure is defined. * * The *_Field_IsChar structs are needed to correct for endianness. * These values are read from the board 16 bits at a time directly * into the structs. Because some fields are char, the values will be * in the wrong order. The *_Field_IsChar tells when to flip the * bytes. Data read and written to PCI memory is automatically swapped * on big-endian platforms so char fields read as words are actually being * unswapped on big-endian platforms. */ #ifdef CONFIG_PCI static ADVEEP_3550_CONFIG Default_3550_EEPROM_Config = { ADV_EEPROM_BIOS_ENABLE, /* cfg_lsw */ 0x0000, /* cfg_msw */ 0xFFFF, /* disc_enable */ 0xFFFF, /* wdtr_able */ 0xFFFF, /* sdtr_able */ 0xFFFF, /* start_motor */ 0xFFFF, /* tagqng_able */ 0xFFFF, /* bios_scan */ 0, /* scam_tolerant */ 7, /* adapter_scsi_id */ 0, /* bios_boot_delay */ 3, /* scsi_reset_delay */ 0, /* bios_id_lun */ 0, /* termination */ 0, /* reserved1 */ 0xFFE7, /* bios_ctrl */ 0xFFFF, /* ultra_able */ 0, /* reserved2 */ ASC_DEF_MAX_HOST_QNG, /* max_host_qng */ ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */ 0, /* dvc_cntl */ 0, /* bug_fix */ 0, /* serial_number_word1 */ 0, /* serial_number_word2 */ 0, /* serial_number_word3 */ 0, /* check_sum */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , /* oem_name[16] */ 0, /* dvc_err_code */ 0, /* adv_err_code */ 0, /* adv_err_addr */ 0, /* saved_dvc_err_code */ 0, /* saved_adv_err_code */ 0, /* saved_adv_err_addr */ 0 /* num_of_err */ }; static ADVEEP_3550_CONFIG ADVEEP_3550_Config_Field_IsChar = { 0, /* cfg_lsw */ 0, /* cfg_msw */ 0, /* -disc_enable */ 0, /* wdtr_able */ 0, /* sdtr_able */ 0, /* start_motor */ 0, /* tagqng_able */ 0, /* bios_scan */ 0, /* scam_tolerant */ 1, /* adapter_scsi_id */ 1, /* bios_boot_delay */ 1, /* scsi_reset_delay */ 1, /* bios_id_lun */ 1, /* termination */ 1, /* reserved1 */ 0, /* bios_ctrl */ 0, /* ultra_able */ 0, /* reserved2 */ 1, /* max_host_qng */ 1, /* max_dvc_qng */ 0, /* dvc_cntl */ 0, /* bug_fix */ 0, /* serial_number_word1 */ 0, /* serial_number_word2 */ 0, /* serial_number_word3 */ 0, /* check_sum */ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} , /* oem_name[16] */ 0, /* dvc_err_code */ 0, /* adv_err_code */ 0, /* adv_err_addr */ 0, /* saved_dvc_err_code */ 0, /* saved_adv_err_code */ 0, /* saved_adv_err_addr */ 0 /* num_of_err */ }; static ADVEEP_38C0800_CONFIG Default_38C0800_EEPROM_Config = { ADV_EEPROM_BIOS_ENABLE, /* 00 cfg_lsw */ 0x0000, /* 01 cfg_msw */ 0xFFFF, /* 02 disc_enable */ 0xFFFF, /* 03 wdtr_able */ 0x4444, /* 04 sdtr_speed1 */ 0xFFFF, /* 05 start_motor */ 0xFFFF, /* 06 tagqng_able */ 0xFFFF, /* 07 bios_scan */ 0, /* 08 scam_tolerant */ 7, /* 09 adapter_scsi_id */ 0, /* bios_boot_delay */ 3, /* 10 scsi_reset_delay */ 0, /* bios_id_lun */ 0, /* 11 termination_se */ 0, /* termination_lvd */ 0xFFE7, /* 12 bios_ctrl */ 0x4444, /* 13 sdtr_speed2 */ 0x4444, /* 14 sdtr_speed3 */ ASC_DEF_MAX_HOST_QNG, /* 15 max_host_qng */ ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */ 0, /* 16 dvc_cntl */ 0x4444, /* 17 sdtr_speed4 */ 0, /* 18 serial_number_word1 */ 0, /* 19 serial_number_word2 */ 0, /* 20 serial_number_word3 */ 0, /* 21 check_sum */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , /* 22-29 oem_name[16] */ 0, /* 30 dvc_err_code */ 0, /* 31 adv_err_code */ 0, /* 32 adv_err_addr */ 0, /* 33 saved_dvc_err_code */ 0, /* 34 saved_adv_err_code */ 0, /* 35 saved_adv_err_addr */ 0, /* 36 reserved */ 0, /* 37 reserved */ 0, /* 38 reserved */ 0, /* 39 reserved */ 0, /* 40 reserved */ 0, /* 41 reserved */ 0, /* 42 reserved */ 0, /* 43 reserved */ 0, /* 44 reserved */ 0, /* 45 reserved */ 0, /* 46 reserved */ 0, /* 47 reserved */ 0, /* 48 reserved */ 0, /* 49 reserved */ 0, /* 50 reserved */ 0, /* 51 reserved */ 0, /* 52 reserved */ 0, /* 53 reserved */ 0, /* 54 reserved */ 0, /* 55 reserved */ 0, /* 56 cisptr_lsw */ 0, /* 57 cisprt_msw */ PCI_VENDOR_ID_ASP, /* 58 subsysvid */ PCI_DEVICE_ID_38C0800_REV1, /* 59 subsysid */ 0, /* 60 reserved */ 0, /* 61 reserved */ 0, /* 62 reserved */ 0 /* 63 reserved */ }; static ADVEEP_38C0800_CONFIG ADVEEP_38C0800_Config_Field_IsChar = { 0, /* 00 cfg_lsw */ 0, /* 01 cfg_msw */ 0, /* 02 disc_enable */ 0, /* 03 wdtr_able */ 0, /* 04 sdtr_speed1 */ 0, /* 05 start_motor */ 0, /* 06 tagqng_able */ 0, /* 07 bios_scan */ 0, /* 08 scam_tolerant */ 1, /* 09 adapter_scsi_id */ 1, /* bios_boot_delay */ 1, /* 10 scsi_reset_delay */ 1, /* bios_id_lun */ 1, /* 11 termination_se */ 1, /* termination_lvd */ 0, /* 12 bios_ctrl */ 0, /* 13 sdtr_speed2 */ 0, /* 14 sdtr_speed3 */ 1, /* 15 max_host_qng */ 1, /* max_dvc_qng */ 0, /* 16 dvc_cntl */ 0, /* 17 sdtr_speed4 */ 0, /* 18 serial_number_word1 */ 0, /* 19 serial_number_word2 */ 0, /* 20 serial_number_word3 */ 0, /* 21 check_sum */ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} , /* 22-29 oem_name[16] */ 0, /* 30 dvc_err_code */ 0, /* 31 adv_err_code */ 0, /* 32 adv_err_addr */ 0, /* 33 saved_dvc_err_code */ 0, /* 34 saved_adv_err_code */ 0, /* 35 saved_adv_err_addr */ 0, /* 36 reserved */ 0, /* 37 reserved */ 0, /* 38 reserved */ 0, /* 39 reserved */ 0, /* 40 reserved */ 0, /* 41 reserved */ 0, /* 42 reserved */ 0, /* 43 reserved */ 0, /* 44 reserved */ 0, /* 45 reserved */ 0, /* 46 reserved */ 0, /* 47 reserved */ 0, /* 48 reserved */ 0, /* 49 reserved */ 0, /* 50 reserved */ 0, /* 51 reserved */ 0, /* 52 reserved */ 0, /* 53 reserved */ 0, /* 54 reserved */ 0, /* 55 reserved */ 0, /* 56 cisptr_lsw */ 0, /* 57 cisprt_msw */ 0, /* 58 subsysvid */ 0, /* 59 subsysid */ 0, /* 60 reserved */ 0, /* 61 reserved */ 0, /* 62 reserved */ 0 /* 63 reserved */ }; static ADVEEP_38C1600_CONFIG Default_38C1600_EEPROM_Config = { ADV_EEPROM_BIOS_ENABLE, /* 00 cfg_lsw */ 0x0000, /* 01 cfg_msw */ 0xFFFF, /* 02 disc_enable */ 0xFFFF, /* 03 wdtr_able */ 0x5555, /* 04 sdtr_speed1 */ 0xFFFF, /* 05 start_motor */ 0xFFFF, /* 06 tagqng_able */ 0xFFFF, /* 07 bios_scan */ 0, /* 08 scam_tolerant */ 7, /* 09 adapter_scsi_id */ 0, /* bios_boot_delay */ 3, /* 10 scsi_reset_delay */ 0, /* bios_id_lun */ 0, /* 11 termination_se */ 0, /* termination_lvd */ 0xFFE7, /* 12 bios_ctrl */ 0x5555, /* 13 sdtr_speed2 */ 0x5555, /* 14 sdtr_speed3 */ ASC_DEF_MAX_HOST_QNG, /* 15 max_host_qng */ ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */ 0, /* 16 dvc_cntl */ 0x5555, /* 17 sdtr_speed4 */ 0, /* 18 serial_number_word1 */ 0, /* 19 serial_number_word2 */ 0, /* 20 serial_number_word3 */ 0, /* 21 check_sum */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , /* 22-29 oem_name[16] */ 0, /* 30 dvc_err_code */ 0, /* 31 adv_err_code */ 0, /* 32 adv_err_addr */ 0, /* 33 saved_dvc_err_code */ 0, /* 34 saved_adv_err_code */ 0, /* 35 saved_adv_err_addr */ 0, /* 36 reserved */ 0, /* 37 reserved */ 0, /* 38 reserved */ 0, /* 39 reserved */ 0, /* 40 reserved */ 0, /* 41 reserved */ 0, /* 42 reserved */ 0, /* 43 reserved */ 0, /* 44 reserved */ 0, /* 45 reserved */ 0, /* 46 reserved */ 0, /* 47 reserved */ 0, /* 48 reserved */ 0, /* 49 reserved */ 0, /* 50 reserved */ 0, /* 51 reserved */ 0, /* 52 reserved */ 0, /* 53 reserved */ 0, /* 54 reserved */ 0, /* 55 reserved */ 0, /* 56 cisptr_lsw */ 0, /* 57 cisprt_msw */ PCI_VENDOR_ID_ASP, /* 58 subsysvid */ PCI_DEVICE_ID_38C1600_REV1, /* 59 subsysid */ 0, /* 60 reserved */ 0, /* 61 reserved */ 0, /* 62 reserved */ 0 /* 63 reserved */ }; static ADVEEP_38C1600_CONFIG ADVEEP_38C1600_Config_Field_IsChar = { 0, /* 00 cfg_lsw */ 0, /* 01 cfg_msw */ 0, /* 02 disc_enable */ 0, /* 03 wdtr_able */ 0, /* 04 sdtr_speed1 */ 0, /* 05 start_motor */ 0, /* 06 tagqng_able */ 0, /* 07 bios_scan */ 0, /* 08 scam_tolerant */ 1, /* 09 adapter_scsi_id */ 1, /* bios_boot_delay */ 1, /* 10 scsi_reset_delay */ 1, /* bios_id_lun */ 1, /* 11 termination_se */ 1, /* termination_lvd */ 0, /* 12 bios_ctrl */ 0, /* 13 sdtr_speed2 */ 0, /* 14 sdtr_speed3 */ 1, /* 15 max_host_qng */ 1, /* max_dvc_qng */ 0, /* 16 dvc_cntl */ 0, /* 17 sdtr_speed4 */ 0, /* 18 serial_number_word1 */ 0, /* 19 serial_number_word2 */ 0, /* 20 serial_number_word3 */ 0, /* 21 check_sum */ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} , /* 22-29 oem_name[16] */ 0, /* 30 dvc_err_code */ 0, /* 31 adv_err_code */ 0, /* 32 adv_err_addr */ 0, /* 33 saved_dvc_err_code */ 0, /* 34 saved_adv_err_code */ 0, /* 35 saved_adv_err_addr */ 0, /* 36 reserved */ 0, /* 37 reserved */ 0, /* 38 reserved */ 0, /* 39 reserved */ 0, /* 40 reserved */ 0, /* 41 reserved */ 0, /* 42 reserved */ 0, /* 43 reserved */ 0, /* 44 reserved */ 0, /* 45 reserved */ 0, /* 46 reserved */ 0, /* 47 reserved */ 0, /* 48 reserved */ 0, /* 49 reserved */ 0, /* 50 reserved */ 0, /* 51 reserved */ 0, /* 52 reserved */ 0, /* 53 reserved */ 0, /* 54 reserved */ 0, /* 55 reserved */ 0, /* 56 cisptr_lsw */ 0, /* 57 cisprt_msw */ 0, /* 58 subsysvid */ 0, /* 59 subsysid */ 0, /* 60 reserved */ 0, /* 61 reserved */ 0, /* 62 reserved */ 0 /* 63 reserved */ }; /* * Wait for EEPROM command to complete */ static void AdvWaitEEPCmd(AdvPortAddr iop_base) { int eep_delay_ms; for (eep_delay_ms = 0; eep_delay_ms < ADV_EEP_DELAY_MS; eep_delay_ms++) { if (AdvReadWordRegister(iop_base, IOPW_EE_CMD) & ASC_EEP_CMD_DONE) { break; } mdelay(1); } if ((AdvReadWordRegister(iop_base, IOPW_EE_CMD) & ASC_EEP_CMD_DONE) == 0) BUG(); } /* * Read the EEPROM from specified location */ static ushort AdvReadEEPWord(AdvPortAddr iop_base, int eep_word_addr) { AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_READ | eep_word_addr); AdvWaitEEPCmd(iop_base); return AdvReadWordRegister(iop_base, IOPW_EE_DATA); } /* * Write the EEPROM from 'cfg_buf'. */ static void AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf) { ushort *wbuf; ushort addr, chksum; ushort *charfields; wbuf = (ushort *)cfg_buf; charfields = (ushort *)&ADVEEP_3550_Config_Field_IsChar; chksum = 0; AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE); AdvWaitEEPCmd(iop_base); /* * Write EEPROM from word 0 to word 20. */ for (addr = ADV_EEP_DVC_CFG_BEGIN; addr < ADV_EEP_DVC_CFG_END; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } chksum += *wbuf; /* Checksum is calculated from word values. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); mdelay(ADV_EEP_DELAY_MS); } /* * Write EEPROM checksum at word 21. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); wbuf++; charfields++; /* * Write EEPROM OEM name at words 22 to 29. */ for (addr = ADV_EEP_DVC_CTL_BEGIN; addr < ADV_EEP_MAX_WORD_ADDR; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); } AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE); AdvWaitEEPCmd(iop_base); } /* * Write the EEPROM from 'cfg_buf'. */ static void AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf) { ushort *wbuf; ushort *charfields; ushort addr, chksum; wbuf = (ushort *)cfg_buf; charfields = (ushort *)&ADVEEP_38C0800_Config_Field_IsChar; chksum = 0; AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE); AdvWaitEEPCmd(iop_base); /* * Write EEPROM from word 0 to word 20. */ for (addr = ADV_EEP_DVC_CFG_BEGIN; addr < ADV_EEP_DVC_CFG_END; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } chksum += *wbuf; /* Checksum is calculated from word values. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); mdelay(ADV_EEP_DELAY_MS); } /* * Write EEPROM checksum at word 21. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); wbuf++; charfields++; /* * Write EEPROM OEM name at words 22 to 29. */ for (addr = ADV_EEP_DVC_CTL_BEGIN; addr < ADV_EEP_MAX_WORD_ADDR; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); } AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE); AdvWaitEEPCmd(iop_base); } /* * Write the EEPROM from 'cfg_buf'. */ static void AdvSet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf) { ushort *wbuf; ushort *charfields; ushort addr, chksum; wbuf = (ushort *)cfg_buf; charfields = (ushort *)&ADVEEP_38C1600_Config_Field_IsChar; chksum = 0; AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE); AdvWaitEEPCmd(iop_base); /* * Write EEPROM from word 0 to word 20. */ for (addr = ADV_EEP_DVC_CFG_BEGIN; addr < ADV_EEP_DVC_CFG_END; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } chksum += *wbuf; /* Checksum is calculated from word values. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); mdelay(ADV_EEP_DELAY_MS); } /* * Write EEPROM checksum at word 21. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); wbuf++; charfields++; /* * Write EEPROM OEM name at words 22 to 29. */ for (addr = ADV_EEP_DVC_CTL_BEGIN; addr < ADV_EEP_MAX_WORD_ADDR; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); } AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE); AdvWaitEEPCmd(iop_base); } /* * Read EEPROM configuration into the specified buffer. * * Return a checksum based on the EEPROM configuration read. */ static ushort AdvGet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf) { ushort wval, chksum; ushort *wbuf; int eep_addr; ushort *charfields; charfields = (ushort *)&ADVEEP_3550_Config_Field_IsChar; wbuf = (ushort *)cfg_buf; chksum = 0; for (eep_addr = ADV_EEP_DVC_CFG_BEGIN; eep_addr < ADV_EEP_DVC_CFG_END; eep_addr++, wbuf++) { wval = AdvReadEEPWord(iop_base, eep_addr); chksum += wval; /* Checksum is calculated from word values. */ if (*charfields++) { *wbuf = le16_to_cpu(wval); } else { *wbuf = wval; } } /* Read checksum word. */ *wbuf = AdvReadEEPWord(iop_base, eep_addr); wbuf++; charfields++; /* Read rest of EEPROM not covered by the checksum. */ for (eep_addr = ADV_EEP_DVC_CTL_BEGIN; eep_addr < ADV_EEP_MAX_WORD_ADDR; eep_addr++, wbuf++) { *wbuf = AdvReadEEPWord(iop_base, eep_addr); if (*charfields++) { *wbuf = le16_to_cpu(*wbuf); } } return chksum; } /* * Read EEPROM configuration into the specified buffer. * * Return a checksum based on the EEPROM configuration read. */ static ushort AdvGet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf) { ushort wval, chksum; ushort *wbuf; int eep_addr; ushort *charfields; charfields = (ushort *)&ADVEEP_38C0800_Config_Field_IsChar; wbuf = (ushort *)cfg_buf; chksum = 0; for (eep_addr = ADV_EEP_DVC_CFG_BEGIN; eep_addr < ADV_EEP_DVC_CFG_END; eep_addr++, wbuf++) { wval = AdvReadEEPWord(iop_base, eep_addr); chksum += wval; /* Checksum is calculated from word values. */ if (*charfields++) { *wbuf = le16_to_cpu(wval); } else { *wbuf = wval; } } /* Read checksum word. */ *wbuf = AdvReadEEPWord(iop_base, eep_addr); wbuf++; charfields++; /* Read rest of EEPROM not covered by the checksum. */ for (eep_addr = ADV_EEP_DVC_CTL_BEGIN; eep_addr < ADV_EEP_MAX_WORD_ADDR; eep_addr++, wbuf++) { *wbuf = AdvReadEEPWord(iop_base, eep_addr); if (*charfields++) { *wbuf = le16_to_cpu(*wbuf); } } return chksum; } /* * Read EEPROM configuration into the specified buffer. * * Return a checksum based on the EEPROM configuration read. */ static ushort AdvGet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf) { ushort wval, chksum; ushort *wbuf; int eep_addr; ushort *charfields; charfields = (ushort *)&ADVEEP_38C1600_Config_Field_IsChar; wbuf = (ushort *)cfg_buf; chksum = 0; for (eep_addr = ADV_EEP_DVC_CFG_BEGIN; eep_addr < ADV_EEP_DVC_CFG_END; eep_addr++, wbuf++) { wval = AdvReadEEPWord(iop_base, eep_addr); chksum += wval; /* Checksum is calculated from word values. */ if (*charfields++) { *wbuf = le16_to_cpu(wval); } else { *wbuf = wval; } } /* Read checksum word. */ *wbuf = AdvReadEEPWord(iop_base, eep_addr); wbuf++; charfields++; /* Read rest of EEPROM not covered by the checksum. */ for (eep_addr = ADV_EEP_DVC_CTL_BEGIN; eep_addr < ADV_EEP_MAX_WORD_ADDR; eep_addr++, wbuf++) { *wbuf = AdvReadEEPWord(iop_base, eep_addr); if (*charfields++) { *wbuf = le16_to_cpu(*wbuf); } } return chksum; } /* * Read the board's EEPROM configuration. Set fields in ADV_DVC_VAR and * ADV_DVC_CFG based on the EEPROM settings. The chip is stopped while * all of this is done. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Note: Chip is stopped on entry. */ static int AdvInitFrom3550EEP(ADV_DVC_VAR *asc_dvc) { AdvPortAddr iop_base; ushort warn_code; ADVEEP_3550_CONFIG eep_config; iop_base = asc_dvc->iop_base; warn_code = 0; /* * Read the board's EEPROM configuration. * * Set default values if a bad checksum is found. */ if (AdvGet3550EEPConfig(iop_base, &eep_config) != eep_config.check_sum) { warn_code |= ASC_WARN_EEPROM_CHKSUM; /* * Set EEPROM default values. */ memcpy(&eep_config, &Default_3550_EEPROM_Config, sizeof(ADVEEP_3550_CONFIG)); /* * Assume the 6 byte board serial number that was read from * EEPROM is correct even if the EEPROM checksum failed. */ eep_config.serial_number_word3 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 1); eep_config.serial_number_word2 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 2); eep_config.serial_number_word1 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 3); AdvSet3550EEPConfig(iop_base, &eep_config); } /* * Set ASC_DVC_VAR and ASC_DVC_CFG variables from the * EEPROM configuration that was read. * * This is the mapping of EEPROM fields to Adv Library fields. */ asc_dvc->wdtr_able = eep_config.wdtr_able; asc_dvc->sdtr_able = eep_config.sdtr_able; asc_dvc->ultra_able = eep_config.ultra_able; asc_dvc->tagqng_able = eep_config.tagqng_able; asc_dvc->cfg->disc_enable = eep_config.disc_enable; asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ADV_MAX_TID); asc_dvc->start_motor = eep_config.start_motor; asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay; asc_dvc->bios_ctrl = eep_config.bios_ctrl; asc_dvc->no_scam = eep_config.scam_tolerant; asc_dvc->cfg->serial1 = eep_config.serial_number_word1; asc_dvc->cfg->serial2 = eep_config.serial_number_word2; asc_dvc->cfg->serial3 = eep_config.serial_number_word3; /* * Set the host maximum queuing (max. 253, min. 16) and the per device * maximum queuing (max. 63, min. 4). */ if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_host_qng == 0) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else { eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG; } } if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_dvc_qng == 0) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else { eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG; } } /* * If 'max_dvc_qng' is greater than 'max_host_qng', then * set 'max_dvc_qng' to 'max_host_qng'. */ if (eep_config.max_dvc_qng > eep_config.max_host_qng) { eep_config.max_dvc_qng = eep_config.max_host_qng; } /* * Set ADV_DVC_VAR 'max_host_qng' and ADV_DVC_VAR 'max_dvc_qng' * values based on possibly adjusted EEPROM values. */ asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; /* * If the EEPROM 'termination' field is set to automatic (0), then set * the ADV_DVC_CFG 'termination' field to automatic also. * * If the termination is specified with a non-zero 'termination' * value check that a legal value is set and set the ADV_DVC_CFG * 'termination' field appropriately. */ if (eep_config.termination == 0) { asc_dvc->cfg->termination = 0; /* auto termination */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination == 1) { asc_dvc->cfg->termination = TERM_CTL_SEL; /* Enable manual control with low off / high on. */ } else if (eep_config.termination == 2) { asc_dvc->cfg->termination = TERM_CTL_SEL | TERM_CTL_H; /* Enable manual control with low on / high on. */ } else if (eep_config.termination == 3) { asc_dvc->cfg->termination = TERM_CTL_SEL | TERM_CTL_H | TERM_CTL_L; } else { /* * The EEPROM 'termination' field contains a bad value. Use * automatic termination instead. */ asc_dvc->cfg->termination = 0; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } return warn_code; } /* * Read the board's EEPROM configuration. Set fields in ADV_DVC_VAR and * ADV_DVC_CFG based on the EEPROM settings. The chip is stopped while * all of this is done. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Note: Chip is stopped on entry. */ static int AdvInitFrom38C0800EEP(ADV_DVC_VAR *asc_dvc) { AdvPortAddr iop_base; ushort warn_code; ADVEEP_38C0800_CONFIG eep_config; uchar tid, termination; ushort sdtr_speed = 0; iop_base = asc_dvc->iop_base; warn_code = 0; /* * Read the board's EEPROM configuration. * * Set default values if a bad checksum is found. */ if (AdvGet38C0800EEPConfig(iop_base, &eep_config) != eep_config.check_sum) { warn_code |= ASC_WARN_EEPROM_CHKSUM; /* * Set EEPROM default values. */ memcpy(&eep_config, &Default_38C0800_EEPROM_Config, sizeof(ADVEEP_38C0800_CONFIG)); /* * Assume the 6 byte board serial number that was read from * EEPROM is correct even if the EEPROM checksum failed. */ eep_config.serial_number_word3 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 1); eep_config.serial_number_word2 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 2); eep_config.serial_number_word1 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 3); AdvSet38C0800EEPConfig(iop_base, &eep_config); } /* * Set ADV_DVC_VAR and ADV_DVC_CFG variables from the * EEPROM configuration that was read. * * This is the mapping of EEPROM fields to Adv Library fields. */ asc_dvc->wdtr_able = eep_config.wdtr_able; asc_dvc->sdtr_speed1 = eep_config.sdtr_speed1; asc_dvc->sdtr_speed2 = eep_config.sdtr_speed2; asc_dvc->sdtr_speed3 = eep_config.sdtr_speed3; asc_dvc->sdtr_speed4 = eep_config.sdtr_speed4; asc_dvc->tagqng_able = eep_config.tagqng_able; asc_dvc->cfg->disc_enable = eep_config.disc_enable; asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ADV_MAX_TID); asc_dvc->start_motor = eep_config.start_motor; asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay; asc_dvc->bios_ctrl = eep_config.bios_ctrl; asc_dvc->no_scam = eep_config.scam_tolerant; asc_dvc->cfg->serial1 = eep_config.serial_number_word1; asc_dvc->cfg->serial2 = eep_config.serial_number_word2; asc_dvc->cfg->serial3 = eep_config.serial_number_word3; /* * For every Target ID if any of its 'sdtr_speed[1234]' bits * are set, then set an 'sdtr_able' bit for it. */ asc_dvc->sdtr_able = 0; for (tid = 0; tid <= ADV_MAX_TID; tid++) { if (tid == 0) { sdtr_speed = asc_dvc->sdtr_speed1; } else if (tid == 4) { sdtr_speed = asc_dvc->sdtr_speed2; } else if (tid == 8) { sdtr_speed = asc_dvc->sdtr_speed3; } else if (tid == 12) { sdtr_speed = asc_dvc->sdtr_speed4; } if (sdtr_speed & ADV_MAX_TID) { asc_dvc->sdtr_able |= (1 << tid); } sdtr_speed >>= 4; } /* * Set the host maximum queuing (max. 253, min. 16) and the per device * maximum queuing (max. 63, min. 4). */ if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_host_qng == 0) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else { eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG; } } if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_dvc_qng == 0) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else { eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG; } } /* * If 'max_dvc_qng' is greater than 'max_host_qng', then * set 'max_dvc_qng' to 'max_host_qng'. */ if (eep_config.max_dvc_qng > eep_config.max_host_qng) { eep_config.max_dvc_qng = eep_config.max_host_qng; } /* * Set ADV_DVC_VAR 'max_host_qng' and ADV_DVC_VAR 'max_dvc_qng' * values based on possibly adjusted EEPROM values. */ asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; /* * If the EEPROM 'termination' field is set to automatic (0), then set * the ADV_DVC_CFG 'termination' field to automatic also. * * If the termination is specified with a non-zero 'termination' * value check that a legal value is set and set the ADV_DVC_CFG * 'termination' field appropriately. */ if (eep_config.termination_se == 0) { termination = 0; /* auto termination for SE */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination_se == 1) { termination = 0; /* Enable manual control with low off / high on. */ } else if (eep_config.termination_se == 2) { termination = TERM_SE_HI; /* Enable manual control with low on / high on. */ } else if (eep_config.termination_se == 3) { termination = TERM_SE; } else { /* * The EEPROM 'termination_se' field contains a bad value. * Use automatic termination instead. */ termination = 0; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } if (eep_config.termination_lvd == 0) { asc_dvc->cfg->termination = termination; /* auto termination for LVD */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination_lvd == 1) { asc_dvc->cfg->termination = termination; /* Enable manual control with low off / high on. */ } else if (eep_config.termination_lvd == 2) { asc_dvc->cfg->termination = termination | TERM_LVD_HI; /* Enable manual control with low on / high on. */ } else if (eep_config.termination_lvd == 3) { asc_dvc->cfg->termination = termination | TERM_LVD; } else { /* * The EEPROM 'termination_lvd' field contains a bad value. * Use automatic termination instead. */ asc_dvc->cfg->termination = termination; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } return warn_code; } /* * Read the board's EEPROM configuration. Set fields in ASC_DVC_VAR and * ASC_DVC_CFG based on the EEPROM settings. The chip is stopped while * all of this is done. * * On failure set the ASC_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Note: Chip is stopped on entry. */ static int AdvInitFrom38C1600EEP(ADV_DVC_VAR *asc_dvc) { AdvPortAddr iop_base; ushort warn_code; ADVEEP_38C1600_CONFIG eep_config; uchar tid, termination; ushort sdtr_speed = 0; iop_base = asc_dvc->iop_base; warn_code = 0; /* * Read the board's EEPROM configuration. * * Set default values if a bad checksum is found. */ if (AdvGet38C1600EEPConfig(iop_base, &eep_config) != eep_config.check_sum) { struct pci_dev *pdev = adv_dvc_to_pdev(asc_dvc); warn_code |= ASC_WARN_EEPROM_CHKSUM; /* * Set EEPROM default values. */ memcpy(&eep_config, &Default_38C1600_EEPROM_Config, sizeof(ADVEEP_38C1600_CONFIG)); if (PCI_FUNC(pdev->devfn) != 0) { u8 ints; /* * Disable Bit 14 (BIOS_ENABLE) to fix SPARC Ultra 60 * and old Mac system booting problem. The Expansion * ROM must be disabled in Function 1 for these systems */ eep_config.cfg_lsw &= ~ADV_EEPROM_BIOS_ENABLE; /* * Clear the INTAB (bit 11) if the GPIO 0 input * indicates the Function 1 interrupt line is wired * to INTB. * * Set/Clear Bit 11 (INTAB) from the GPIO bit 0 input: * 1 - Function 1 interrupt line wired to INT A. * 0 - Function 1 interrupt line wired to INT B. * * Note: Function 0 is always wired to INTA. * Put all 5 GPIO bits in input mode and then read * their input values. */ AdvWriteByteRegister(iop_base, IOPB_GPIO_CNTL, 0); ints = AdvReadByteRegister(iop_base, IOPB_GPIO_DATA); if ((ints & 0x01) == 0) eep_config.cfg_lsw &= ~ADV_EEPROM_INTAB; } /* * Assume the 6 byte board serial number that was read from * EEPROM is correct even if the EEPROM checksum failed. */ eep_config.serial_number_word3 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 1); eep_config.serial_number_word2 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 2); eep_config.serial_number_word1 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 3); AdvSet38C1600EEPConfig(iop_base, &eep_config); } /* * Set ASC_DVC_VAR and ASC_DVC_CFG variables from the * EEPROM configuration that was read. * * This is the mapping of EEPROM fields to Adv Library fields. */ asc_dvc->wdtr_able = eep_config.wdtr_able; asc_dvc->sdtr_speed1 = eep_config.sdtr_speed1; asc_dvc->sdtr_speed2 = eep_config.sdtr_speed2; asc_dvc->sdtr_speed3 = eep_config.sdtr_speed3; asc_dvc->sdtr_speed4 = eep_config.sdtr_speed4; asc_dvc->ppr_able = 0; asc_dvc->tagqng_able = eep_config.tagqng_able; asc_dvc->cfg->disc_enable = eep_config.disc_enable; asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ASC_MAX_TID); asc_dvc->start_motor = eep_config.start_motor; asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay; asc_dvc->bios_ctrl = eep_config.bios_ctrl; asc_dvc->no_scam = eep_config.scam_tolerant; /* * For every Target ID if any of its 'sdtr_speed[1234]' bits * are set, then set an 'sdtr_able' bit for it. */ asc_dvc->sdtr_able = 0; for (tid = 0; tid <= ASC_MAX_TID; tid++) { if (tid == 0) { sdtr_speed = asc_dvc->sdtr_speed1; } else if (tid == 4) { sdtr_speed = asc_dvc->sdtr_speed2; } else if (tid == 8) { sdtr_speed = asc_dvc->sdtr_speed3; } else if (tid == 12) { sdtr_speed = asc_dvc->sdtr_speed4; } if (sdtr_speed & ASC_MAX_TID) { asc_dvc->sdtr_able |= (1 << tid); } sdtr_speed >>= 4; } /* * Set the host maximum queuing (max. 253, min. 16) and the per device * maximum queuing (max. 63, min. 4). */ if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_host_qng == 0) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else { eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG; } } if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_dvc_qng == 0) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else { eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG; } } /* * If 'max_dvc_qng' is greater than 'max_host_qng', then * set 'max_dvc_qng' to 'max_host_qng'. */ if (eep_config.max_dvc_qng > eep_config.max_host_qng) { eep_config.max_dvc_qng = eep_config.max_host_qng; } /* * Set ASC_DVC_VAR 'max_host_qng' and ASC_DVC_VAR 'max_dvc_qng' * values based on possibly adjusted EEPROM values. */ asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; /* * If the EEPROM 'termination' field is set to automatic (0), then set * the ASC_DVC_CFG 'termination' field to automatic also. * * If the termination is specified with a non-zero 'termination' * value check that a legal value is set and set the ASC_DVC_CFG * 'termination' field appropriately. */ if (eep_config.termination_se == 0) { termination = 0; /* auto termination for SE */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination_se == 1) { termination = 0; /* Enable manual control with low off / high on. */ } else if (eep_config.termination_se == 2) { termination = TERM_SE_HI; /* Enable manual control with low on / high on. */ } else if (eep_config.termination_se == 3) { termination = TERM_SE; } else { /* * The EEPROM 'termination_se' field contains a bad value. * Use automatic termination instead. */ termination = 0; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } if (eep_config.termination_lvd == 0) { asc_dvc->cfg->termination = termination; /* auto termination for LVD */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination_lvd == 1) { asc_dvc->cfg->termination = termination; /* Enable manual control with low off / high on. */ } else if (eep_config.termination_lvd == 2) { asc_dvc->cfg->termination = termination | TERM_LVD_HI; /* Enable manual control with low on / high on. */ } else if (eep_config.termination_lvd == 3) { asc_dvc->cfg->termination = termination | TERM_LVD; } else { /* * The EEPROM 'termination_lvd' field contains a bad value. * Use automatic termination instead. */ asc_dvc->cfg->termination = termination; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } return warn_code; } /* * Initialize the ADV_DVC_VAR structure. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. */ static int AdvInitGetConfig(struct pci_dev *pdev, struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); ADV_DVC_VAR *asc_dvc = &board->dvc_var.adv_dvc_var; unsigned short warn_code = 0; AdvPortAddr iop_base = asc_dvc->iop_base; u16 cmd; int status; asc_dvc->err_code = 0; /* * Save the state of the PCI Configuration Command Register * "Parity Error Response Control" Bit. If the bit is clear (0), * in AdvInitAsc3550/38C0800Driver() tell the microcode to ignore * DMA parity errors. */ asc_dvc->cfg->control_flag = 0; pci_read_config_word(pdev, PCI_COMMAND, &cmd); if ((cmd & PCI_COMMAND_PARITY) == 0) asc_dvc->cfg->control_flag |= CONTROL_FLAG_IGNORE_PERR; asc_dvc->cfg->chip_version = AdvGetChipVersion(iop_base, asc_dvc->bus_type); ASC_DBG(1, "iopb_chip_id_1: 0x%x 0x%x\n", (ushort)AdvReadByteRegister(iop_base, IOPB_CHIP_ID_1), (ushort)ADV_CHIP_ID_BYTE); ASC_DBG(1, "iopw_chip_id_0: 0x%x 0x%x\n", (ushort)AdvReadWordRegister(iop_base, IOPW_CHIP_ID_0), (ushort)ADV_CHIP_ID_WORD); /* * Reset the chip to start and allow register writes. */ if (AdvFindSignature(iop_base) == 0) { asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; return ADV_ERROR; } else { /* * The caller must set 'chip_type' to a valid setting. */ if (asc_dvc->chip_type != ADV_CHIP_ASC3550 && asc_dvc->chip_type != ADV_CHIP_ASC38C0800 && asc_dvc->chip_type != ADV_CHIP_ASC38C1600) { asc_dvc->err_code |= ASC_IERR_BAD_CHIPTYPE; return ADV_ERROR; } /* * Reset Chip. */ AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_RESET); mdelay(100); AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_WR_IO_REG); if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { status = AdvInitFrom38C1600EEP(asc_dvc); } else if (asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { status = AdvInitFrom38C0800EEP(asc_dvc); } else { status = AdvInitFrom3550EEP(asc_dvc); } warn_code |= status; } if (warn_code != 0) shost_printk(KERN_WARNING, shost, "warning: 0x%x\n", warn_code); if (asc_dvc->err_code) shost_printk(KERN_ERR, shost, "error code 0x%x\n", asc_dvc->err_code); return asc_dvc->err_code; } #endif static const struct scsi_host_template advansys_template = { .proc_name = DRV_NAME, #ifdef CONFIG_PROC_FS .show_info = advansys_show_info, #endif .name = DRV_NAME, .info = advansys_info, .queuecommand = advansys_queuecommand, .eh_host_reset_handler = advansys_reset, .bios_param = advansys_biosparam, .slave_configure = advansys_slave_configure, .cmd_size = sizeof(struct advansys_cmd), }; static int advansys_wide_init_chip(struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var; size_t sgblk_pool_size; int warn_code, err_code; /* * Allocate buffer carrier structures. The total size * is about 8 KB, so allocate all at once. */ adv_dvc->carrier = dma_alloc_coherent(board->dev, ADV_CARRIER_BUFSIZE, &adv_dvc->carrier_addr, GFP_KERNEL); ASC_DBG(1, "carrier 0x%p\n", adv_dvc->carrier); if (!adv_dvc->carrier) goto kmalloc_failed; /* * Allocate up to 'max_host_qng' request structures for the Wide * board. The total size is about 16 KB, so allocate all at once. * If the allocation fails decrement and try again. */ board->adv_reqp_size = adv_dvc->max_host_qng * sizeof(adv_req_t); if (board->adv_reqp_size & 0x1f) { ASC_DBG(1, "unaligned reqp %lu bytes\n", sizeof(adv_req_t)); board->adv_reqp_size = ADV_32BALIGN(board->adv_reqp_size); } board->adv_reqp = dma_alloc_coherent(board->dev, board->adv_reqp_size, &board->adv_reqp_addr, GFP_KERNEL); if (!board->adv_reqp) goto kmalloc_failed; ASC_DBG(1, "reqp 0x%p, req_cnt %d, bytes %lu\n", board->adv_reqp, adv_dvc->max_host_qng, board->adv_reqp_size); /* * Allocate up to ADV_TOT_SG_BLOCK request structures for * the Wide board. Each structure is about 136 bytes. */ sgblk_pool_size = sizeof(adv_sgblk_t) * ADV_TOT_SG_BLOCK; board->adv_sgblk_pool = dma_pool_create("adv_sgblk", board->dev, sgblk_pool_size, 32, 0); ASC_DBG(1, "sg_cnt %d * %lu = %lu bytes\n", ADV_TOT_SG_BLOCK, sizeof(adv_sgblk_t), sgblk_pool_size); if (!board->adv_sgblk_pool) goto kmalloc_failed; if (adv_dvc->chip_type == ADV_CHIP_ASC3550) { ASC_DBG(2, "AdvInitAsc3550Driver()\n"); warn_code = AdvInitAsc3550Driver(adv_dvc); } else if (adv_dvc->chip_type == ADV_CHIP_ASC38C0800) { ASC_DBG(2, "AdvInitAsc38C0800Driver()\n"); warn_code = AdvInitAsc38C0800Driver(adv_dvc); } else { ASC_DBG(2, "AdvInitAsc38C1600Driver()\n"); warn_code = AdvInitAsc38C1600Driver(adv_dvc); } err_code = adv_dvc->err_code; if (warn_code || err_code) { shost_printk(KERN_WARNING, shost, "error: warn 0x%x, error " "0x%x\n", warn_code, err_code); } goto exit; kmalloc_failed: shost_printk(KERN_ERR, shost, "error: kmalloc() failed\n"); err_code = ADV_ERROR; exit: return err_code; } static void advansys_wide_free_mem(struct asc_board *board) { struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var; if (adv_dvc->carrier) { dma_free_coherent(board->dev, ADV_CARRIER_BUFSIZE, adv_dvc->carrier, adv_dvc->carrier_addr); adv_dvc->carrier = NULL; } if (board->adv_reqp) { dma_free_coherent(board->dev, board->adv_reqp_size, board->adv_reqp, board->adv_reqp_addr); board->adv_reqp = NULL; } if (board->adv_sgblk_pool) { dma_pool_destroy(board->adv_sgblk_pool); board->adv_sgblk_pool = NULL; } } static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop, int bus_type) { struct pci_dev *pdev; struct asc_board *boardp = shost_priv(shost); ASC_DVC_VAR *asc_dvc_varp = NULL; ADV_DVC_VAR *adv_dvc_varp = NULL; int share_irq, warn_code, ret; pdev = (bus_type == ASC_IS_PCI) ? to_pci_dev(boardp->dev) : NULL; if (ASC_NARROW_BOARD(boardp)) { ASC_DBG(1, "narrow board\n"); asc_dvc_varp = &boardp->dvc_var.asc_dvc_var; asc_dvc_varp->bus_type = bus_type; asc_dvc_varp->drv_ptr = boardp; asc_dvc_varp->cfg = &boardp->dvc_cfg.asc_dvc_cfg; asc_dvc_varp->iop_base = iop; } else { #ifdef CONFIG_PCI adv_dvc_varp = &boardp->dvc_var.adv_dvc_var; adv_dvc_varp->drv_ptr = boardp; adv_dvc_varp->cfg = &boardp->dvc_cfg.adv_dvc_cfg; if (pdev->device == PCI_DEVICE_ID_ASP_ABP940UW) { ASC_DBG(1, "wide board ASC-3550\n"); adv_dvc_varp->chip_type = ADV_CHIP_ASC3550; } else if (pdev->device == PCI_DEVICE_ID_38C0800_REV1) { ASC_DBG(1, "wide board ASC-38C0800\n"); adv_dvc_varp->chip_type = ADV_CHIP_ASC38C0800; } else { ASC_DBG(1, "wide board ASC-38C1600\n"); adv_dvc_varp->chip_type = ADV_CHIP_ASC38C1600; } boardp->asc_n_io_port = pci_resource_len(pdev, 1); boardp->ioremap_addr = pci_ioremap_bar(pdev, 1); if (!boardp->ioremap_addr) { shost_printk(KERN_ERR, shost, "ioremap(%lx, %d) " "returned NULL\n", (long)pci_resource_start(pdev, 1), boardp->asc_n_io_port); ret = -ENODEV; goto err_shost; } adv_dvc_varp->iop_base = (AdvPortAddr)boardp->ioremap_addr; ASC_DBG(1, "iop_base: 0x%p\n", adv_dvc_varp->iop_base); /* * Even though it isn't used to access wide boards, other * than for the debug line below, save I/O Port address so * that it can be reported. */ boardp->ioport = iop; ASC_DBG(1, "iopb_chip_id_1 0x%x, iopw_chip_id_0 0x%x\n", (ushort)inp(iop + 1), (ushort)inpw(iop)); #endif /* CONFIG_PCI */ } if (ASC_NARROW_BOARD(boardp)) { /* * Set the board bus type and PCI IRQ before * calling AscInitGetConfig(). */ switch (asc_dvc_varp->bus_type) { #ifdef CONFIG_ISA case ASC_IS_VL: share_irq = 0; break; case ASC_IS_EISA: share_irq = IRQF_SHARED; break; #endif /* CONFIG_ISA */ #ifdef CONFIG_PCI case ASC_IS_PCI: share_irq = IRQF_SHARED; break; #endif /* CONFIG_PCI */ default: shost_printk(KERN_ERR, shost, "unknown adapter type: " "%d\n", asc_dvc_varp->bus_type); share_irq = 0; break; } /* * NOTE: AscInitGetConfig() may change the board's * bus_type value. The bus_type value should no * longer be used. If the bus_type field must be * referenced only use the bit-wise AND operator "&". */ ASC_DBG(2, "AscInitGetConfig()\n"); ret = AscInitGetConfig(shost) ? -ENODEV : 0; } else { #ifdef CONFIG_PCI /* * For Wide boards set PCI information before calling * AdvInitGetConfig(). */ share_irq = IRQF_SHARED; ASC_DBG(2, "AdvInitGetConfig()\n"); ret = AdvInitGetConfig(pdev, shost) ? -ENODEV : 0; #else share_irq = 0; ret = -ENODEV; #endif /* CONFIG_PCI */ } if (ret) goto err_unmap; /* * Save the EEPROM configuration so that it can be displayed * from /proc/scsi/advansys/[0...]. */ if (ASC_NARROW_BOARD(boardp)) { ASCEEP_CONFIG *ep; /* * Set the adapter's target id bit in the 'init_tidmask' field. */ boardp->init_tidmask |= ADV_TID_TO_TIDMASK(asc_dvc_varp->cfg->chip_scsi_id); /* * Save EEPROM settings for the board. */ ep = &boardp->eep_config.asc_eep; ep->init_sdtr = asc_dvc_varp->cfg->sdtr_enable; ep->disc_enable = asc_dvc_varp->cfg->disc_enable; ep->use_cmd_qng = asc_dvc_varp->cfg->cmd_qng_enabled; ASC_EEP_SET_DMA_SPD(ep, ASC_DEF_ISA_DMA_SPEED); ep->start_motor = asc_dvc_varp->start_motor; ep->cntl = asc_dvc_varp->dvc_cntl; ep->no_scam = asc_dvc_varp->no_scam; ep->max_total_qng = asc_dvc_varp->max_total_qng; ASC_EEP_SET_CHIP_ID(ep, asc_dvc_varp->cfg->chip_scsi_id); /* 'max_tag_qng' is set to the same value for every device. */ ep->max_tag_qng = asc_dvc_varp->cfg->max_tag_qng[0]; ep->adapter_info[0] = asc_dvc_varp->cfg->adapter_info[0]; ep->adapter_info[1] = asc_dvc_varp->cfg->adapter_info[1]; ep->adapter_info[2] = asc_dvc_varp->cfg->adapter_info[2]; ep->adapter_info[3] = asc_dvc_varp->cfg->adapter_info[3]; ep->adapter_info[4] = asc_dvc_varp->cfg->adapter_info[4]; ep->adapter_info[5] = asc_dvc_varp->cfg->adapter_info[5]; /* * Modify board configuration. */ ASC_DBG(2, "AscInitSetConfig()\n"); ret = AscInitSetConfig(pdev, shost) ? -ENODEV : 0; if (ret) goto err_unmap; } else { ADVEEP_3550_CONFIG *ep_3550; ADVEEP_38C0800_CONFIG *ep_38C0800; ADVEEP_38C1600_CONFIG *ep_38C1600; /* * Save Wide EEP Configuration Information. */ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { ep_3550 = &boardp->eep_config.adv_3550_eep; ep_3550->adapter_scsi_id = adv_dvc_varp->chip_scsi_id; ep_3550->max_host_qng = adv_dvc_varp->max_host_qng; ep_3550->max_dvc_qng = adv_dvc_varp->max_dvc_qng; ep_3550->termination = adv_dvc_varp->cfg->termination; ep_3550->disc_enable = adv_dvc_varp->cfg->disc_enable; ep_3550->bios_ctrl = adv_dvc_varp->bios_ctrl; ep_3550->wdtr_able = adv_dvc_varp->wdtr_able; ep_3550->sdtr_able = adv_dvc_varp->sdtr_able; ep_3550->ultra_able = adv_dvc_varp->ultra_able; ep_3550->tagqng_able = adv_dvc_varp->tagqng_able; ep_3550->start_motor = adv_dvc_varp->start_motor; ep_3550->scsi_reset_delay = adv_dvc_varp->scsi_reset_wait; ep_3550->serial_number_word1 = adv_dvc_varp->cfg->serial1; ep_3550->serial_number_word2 = adv_dvc_varp->cfg->serial2; ep_3550->serial_number_word3 = adv_dvc_varp->cfg->serial3; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { ep_38C0800 = &boardp->eep_config.adv_38C0800_eep; ep_38C0800->adapter_scsi_id = adv_dvc_varp->chip_scsi_id; ep_38C0800->max_host_qng = adv_dvc_varp->max_host_qng; ep_38C0800->max_dvc_qng = adv_dvc_varp->max_dvc_qng; ep_38C0800->termination_lvd = adv_dvc_varp->cfg->termination; ep_38C0800->disc_enable = adv_dvc_varp->cfg->disc_enable; ep_38C0800->bios_ctrl = adv_dvc_varp->bios_ctrl; ep_38C0800->wdtr_able = adv_dvc_varp->wdtr_able; ep_38C0800->tagqng_able = adv_dvc_varp->tagqng_able; ep_38C0800->sdtr_speed1 = adv_dvc_varp->sdtr_speed1; ep_38C0800->sdtr_speed2 = adv_dvc_varp->sdtr_speed2; ep_38C0800->sdtr_speed3 = adv_dvc_varp->sdtr_speed3; ep_38C0800->sdtr_speed4 = adv_dvc_varp->sdtr_speed4; ep_38C0800->tagqng_able = adv_dvc_varp->tagqng_able; ep_38C0800->start_motor = adv_dvc_varp->start_motor; ep_38C0800->scsi_reset_delay = adv_dvc_varp->scsi_reset_wait; ep_38C0800->serial_number_word1 = adv_dvc_varp->cfg->serial1; ep_38C0800->serial_number_word2 = adv_dvc_varp->cfg->serial2; ep_38C0800->serial_number_word3 = adv_dvc_varp->cfg->serial3; } else { ep_38C1600 = &boardp->eep_config.adv_38C1600_eep; ep_38C1600->adapter_scsi_id = adv_dvc_varp->chip_scsi_id; ep_38C1600->max_host_qng = adv_dvc_varp->max_host_qng; ep_38C1600->max_dvc_qng = adv_dvc_varp->max_dvc_qng; ep_38C1600->termination_lvd = adv_dvc_varp->cfg->termination; ep_38C1600->disc_enable = adv_dvc_varp->cfg->disc_enable; ep_38C1600->bios_ctrl = adv_dvc_varp->bios_ctrl; ep_38C1600->wdtr_able = adv_dvc_varp->wdtr_able; ep_38C1600->tagqng_able = adv_dvc_varp->tagqng_able; ep_38C1600->sdtr_speed1 = adv_dvc_varp->sdtr_speed1; ep_38C1600->sdtr_speed2 = adv_dvc_varp->sdtr_speed2; ep_38C1600->sdtr_speed3 = adv_dvc_varp->sdtr_speed3; ep_38C1600->sdtr_speed4 = adv_dvc_varp->sdtr_speed4; ep_38C1600->tagqng_able = adv_dvc_varp->tagqng_able; ep_38C1600->start_motor = adv_dvc_varp->start_motor; ep_38C1600->scsi_reset_delay = adv_dvc_varp->scsi_reset_wait; ep_38C1600->serial_number_word1 = adv_dvc_varp->cfg->serial1; ep_38C1600->serial_number_word2 = adv_dvc_varp->cfg->serial2; ep_38C1600->serial_number_word3 = adv_dvc_varp->cfg->serial3; } /* * Set the adapter's target id bit in the 'init_tidmask' field. */ boardp->init_tidmask |= ADV_TID_TO_TIDMASK(adv_dvc_varp->chip_scsi_id); } /* * Channels are numbered beginning with 0. For AdvanSys one host * structure supports one channel. Multi-channel boards have a * separate host structure for each channel. */ shost->max_channel = 0; if (ASC_NARROW_BOARD(boardp)) { shost->max_id = ASC_MAX_TID + 1; shost->max_lun = ASC_MAX_LUN + 1; shost->max_cmd_len = ASC_MAX_CDB_LEN; shost->io_port = asc_dvc_varp->iop_base; boardp->asc_n_io_port = ASC_IOADR_GAP; shost->this_id = asc_dvc_varp->cfg->chip_scsi_id; /* Set maximum number of queues the adapter can handle. */ shost->can_queue = asc_dvc_varp->max_total_qng; } else { shost->max_id = ADV_MAX_TID + 1; shost->max_lun = ADV_MAX_LUN + 1; shost->max_cmd_len = ADV_MAX_CDB_LEN; /* * Save the I/O Port address and length even though * I/O ports are not used to access Wide boards. * Instead the Wide boards are accessed with * PCI Memory Mapped I/O. */ shost->io_port = iop; shost->this_id = adv_dvc_varp->chip_scsi_id; /* Set maximum number of queues the adapter can handle. */ shost->can_queue = adv_dvc_varp->max_host_qng; } /* * Set the maximum number of scatter-gather elements the * adapter can handle. */ if (ASC_NARROW_BOARD(boardp)) { /* * Allow two commands with 'sg_tablesize' scatter-gather * elements to be executed simultaneously. This value is * the theoretical hardware limit. It may be decreased * below. */ shost->sg_tablesize = (((asc_dvc_varp->max_total_qng - 2) / 2) * ASC_SG_LIST_PER_Q) + 1; } else { shost->sg_tablesize = ADV_MAX_SG_LIST; } /* * The value of 'sg_tablesize' can not exceed the SCSI * mid-level driver definition of SG_ALL. SG_ALL also * must not be exceeded, because it is used to define the * size of the scatter-gather table in 'struct asc_sg_head'. */ if (shost->sg_tablesize > SG_ALL) { shost->sg_tablesize = SG_ALL; } ASC_DBG(1, "sg_tablesize: %d\n", shost->sg_tablesize); /* BIOS start address. */ if (ASC_NARROW_BOARD(boardp)) { shost->base = AscGetChipBiosAddress(asc_dvc_varp->iop_base, asc_dvc_varp->bus_type); } else { /* * Fill-in BIOS board variables. The Wide BIOS saves * information in LRAM that is used by the driver. */ AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_SIGNATURE, boardp->bios_signature); AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_VERSION, boardp->bios_version); AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_CODESEG, boardp->bios_codeseg); AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_CODELEN, boardp->bios_codelen); ASC_DBG(1, "bios_signature 0x%x, bios_version 0x%x\n", boardp->bios_signature, boardp->bios_version); ASC_DBG(1, "bios_codeseg 0x%x, bios_codelen 0x%x\n", boardp->bios_codeseg, boardp->bios_codelen); /* * If the BIOS saved a valid signature, then fill in * the BIOS code segment base address. */ if (boardp->bios_signature == 0x55AA) { /* * Convert x86 realmode code segment to a linear * address by shifting left 4. */ shost->base = ((ulong)boardp->bios_codeseg << 4); } else { shost->base = 0; } } /* * Register Board Resources - I/O Port, DMA, IRQ */ /* Register DMA Channel for Narrow boards. */ shost->dma_channel = NO_ISA_DMA; /* Default to no ISA DMA. */ /* Register IRQ Number. */ ASC_DBG(2, "request_irq(%d, %p)\n", boardp->irq, shost); ret = request_irq(boardp->irq, advansys_interrupt, share_irq, DRV_NAME, shost); if (ret) { if (ret == -EBUSY) { shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x " "already in use\n", boardp->irq); } else if (ret == -EINVAL) { shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x " "not valid\n", boardp->irq); } else { shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x " "failed with %d\n", boardp->irq, ret); } goto err_unmap; } /* * Initialize board RISC chip and enable interrupts. */ if (ASC_NARROW_BOARD(boardp)) { ASC_DBG(2, "AscInitAsc1000Driver()\n"); asc_dvc_varp->overrun_buf = kzalloc(ASC_OVERRUN_BSIZE, GFP_KERNEL); if (!asc_dvc_varp->overrun_buf) { ret = -ENOMEM; goto err_free_irq; } warn_code = AscInitAsc1000Driver(asc_dvc_varp); if (warn_code || asc_dvc_varp->err_code) { shost_printk(KERN_ERR, shost, "error: init_state 0x%x, " "warn 0x%x, error 0x%x\n", asc_dvc_varp->init_state, warn_code, asc_dvc_varp->err_code); if (!asc_dvc_varp->overrun_dma) { ret = -ENODEV; goto err_free_mem; } } } else { if (advansys_wide_init_chip(shost)) { ret = -ENODEV; goto err_free_mem; } } ASC_DBG_PRT_SCSI_HOST(2, shost); ret = scsi_add_host(shost, boardp->dev); if (ret) goto err_free_mem; scsi_scan_host(shost); return 0; err_free_mem: if (ASC_NARROW_BOARD(boardp)) { if (asc_dvc_varp->overrun_dma) dma_unmap_single(boardp->dev, asc_dvc_varp->overrun_dma, ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); kfree(asc_dvc_varp->overrun_buf); } else advansys_wide_free_mem(boardp); err_free_irq: free_irq(boardp->irq, shost); err_unmap: if (boardp->ioremap_addr) iounmap(boardp->ioremap_addr); #ifdef CONFIG_PCI err_shost: #endif return ret; } /* * advansys_release() * * Release resources allocated for a single AdvanSys adapter. */ static int advansys_release(struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); ASC_DBG(1, "begin\n"); scsi_remove_host(shost); free_irq(board->irq, shost); if (ASC_NARROW_BOARD(board)) { dma_unmap_single(board->dev, board->dvc_var.asc_dvc_var.overrun_dma, ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); kfree(board->dvc_var.asc_dvc_var.overrun_buf); } else { iounmap(board->ioremap_addr); advansys_wide_free_mem(board); } scsi_host_put(shost); ASC_DBG(1, "end\n"); return 0; } #define ASC_IOADR_TABLE_MAX_IX 11 static PortAddr _asc_def_iop_base[ASC_IOADR_TABLE_MAX_IX] = { 0x100, 0x0110, 0x120, 0x0130, 0x140, 0x0150, 0x0190, 0x0210, 0x0230, 0x0250, 0x0330 }; static void advansys_vlb_remove(struct device *dev, unsigned int id) { int ioport = _asc_def_iop_base[id]; advansys_release(dev_get_drvdata(dev)); release_region(ioport, ASC_IOADR_GAP); } /* * The VLB IRQ number is found in bits 2 to 4 of the CfgLsw. It decodes as: * 000: invalid * 001: 10 * 010: 11 * 011: 12 * 100: invalid * 101: 14 * 110: 15 * 111: invalid */ static unsigned int advansys_vlb_irq_no(PortAddr iop_base) { unsigned short cfg_lsw = AscGetChipCfgLsw(iop_base); unsigned int chip_irq = ((cfg_lsw >> 2) & 0x07) + 9; if ((chip_irq < 10) || (chip_irq == 13) || (chip_irq > 15)) return 0; return chip_irq; } static int advansys_vlb_probe(struct device *dev, unsigned int id) { int err = -ENODEV; PortAddr iop_base = _asc_def_iop_base[id]; struct Scsi_Host *shost; struct asc_board *board; if (!request_region(iop_base, ASC_IOADR_GAP, DRV_NAME)) { ASC_DBG(1, "I/O port 0x%x busy\n", iop_base); return -ENODEV; } ASC_DBG(1, "probing I/O port 0x%x\n", iop_base); if (!AscFindSignature(iop_base)) goto release_region; /* * I don't think this condition can actually happen, but the old * driver did it, and the chances of finding a VLB setup in 2007 * to do testing with is slight to none. */ if (AscGetChipVersion(iop_base, ASC_IS_VL) > ASC_CHIP_MAX_VER_VL) goto release_region; err = -ENOMEM; shost = scsi_host_alloc(&advansys_template, sizeof(*board)); if (!shost) goto release_region; board = shost_priv(shost); board->irq = advansys_vlb_irq_no(iop_base); board->dev = dev; board->shost = shost; err = advansys_board_found(shost, iop_base, ASC_IS_VL); if (err) goto free_host; dev_set_drvdata(dev, shost); return 0; free_host: scsi_host_put(shost); release_region: release_region(iop_base, ASC_IOADR_GAP); return -ENODEV; } static struct isa_driver advansys_vlb_driver = { .probe = advansys_vlb_probe, .remove = advansys_vlb_remove, .driver = { .owner = THIS_MODULE, .name = "advansys_vlb", }, }; static struct eisa_device_id advansys_eisa_table[] = { { "ABP7401" }, { "ABP7501" }, { "" } }; MODULE_DEVICE_TABLE(eisa, advansys_eisa_table); /* * EISA is a little more tricky than PCI; each EISA device may have two * channels, and this driver is written to make each channel its own Scsi_Host */ struct eisa_scsi_data { struct Scsi_Host *host[2]; }; /* * The EISA IRQ number is found in bits 8 to 10 of the CfgLsw. It decodes as: * 000: 10 * 001: 11 * 010: 12 * 011: invalid * 100: 14 * 101: 15 * 110: invalid * 111: invalid */ static unsigned int advansys_eisa_irq_no(struct eisa_device *edev) { unsigned short cfg_lsw = inw(edev->base_addr + 0xc86); unsigned int chip_irq = ((cfg_lsw >> 8) & 0x07) + 10; if ((chip_irq == 13) || (chip_irq > 15)) return 0; return chip_irq; } static int advansys_eisa_probe(struct device *dev) { int i, ioport, irq = 0; int err; struct eisa_device *edev = to_eisa_device(dev); struct eisa_scsi_data *data; err = -ENOMEM; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) goto fail; ioport = edev->base_addr + 0xc30; err = -ENODEV; for (i = 0; i < 2; i++, ioport += 0x20) { struct asc_board *board; struct Scsi_Host *shost; if (!request_region(ioport, ASC_IOADR_GAP, DRV_NAME)) { printk(KERN_WARNING "Region %x-%x busy\n", ioport, ioport + ASC_IOADR_GAP - 1); continue; } if (!AscFindSignature(ioport)) { release_region(ioport, ASC_IOADR_GAP); continue; } /* * I don't know why we need to do this for EISA chips, but * not for any others. It looks to be equivalent to * AscGetChipCfgMsw, but I may have overlooked something, * so I'm not converting it until I get an EISA board to * test with. */ inw(ioport + 4); if (!irq) irq = advansys_eisa_irq_no(edev); err = -ENOMEM; shost = scsi_host_alloc(&advansys_template, sizeof(*board)); if (!shost) goto release_region; board = shost_priv(shost); board->irq = irq; board->dev = dev; board->shost = shost; err = advansys_board_found(shost, ioport, ASC_IS_EISA); if (!err) { data->host[i] = shost; continue; } scsi_host_put(shost); release_region: release_region(ioport, ASC_IOADR_GAP); break; } if (err) goto free_data; dev_set_drvdata(dev, data); return 0; free_data: kfree(data->host[0]); kfree(data->host[1]); kfree(data); fail: return err; } static int advansys_eisa_remove(struct device *dev) { int i; struct eisa_scsi_data *data = dev_get_drvdata(dev); for (i = 0; i < 2; i++) { int ioport; struct Scsi_Host *shost = data->host[i]; if (!shost) continue; ioport = shost->io_port; advansys_release(shost); release_region(ioport, ASC_IOADR_GAP); } kfree(data); return 0; } static struct eisa_driver advansys_eisa_driver = { .id_table = advansys_eisa_table, .driver = { .name = DRV_NAME, .probe = advansys_eisa_probe, .remove = advansys_eisa_remove, } }; /* PCI Devices supported by this driver */ static struct pci_device_id advansys_pci_tbl[] = { {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_1200A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940U, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940UW, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C0800_REV1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C1600_REV1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {} }; MODULE_DEVICE_TABLE(pci, advansys_pci_tbl); static void advansys_set_latency(struct pci_dev *pdev) { if ((pdev->device == PCI_DEVICE_ID_ASP_1200A) || (pdev->device == PCI_DEVICE_ID_ASP_ABP940)) { pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0); } else { u8 latency; pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency); if (latency < 0x20) pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x20); } } static int advansys_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int err, ioport; struct Scsi_Host *shost; struct asc_board *board; err = pci_enable_device(pdev); if (err) goto fail; err = pci_request_regions(pdev, DRV_NAME); if (err) goto disable_device; pci_set_master(pdev); advansys_set_latency(pdev); err = -ENODEV; if (pci_resource_len(pdev, 0) == 0) goto release_region; ioport = pci_resource_start(pdev, 0); err = -ENOMEM; shost = scsi_host_alloc(&advansys_template, sizeof(*board)); if (!shost) goto release_region; board = shost_priv(shost); board->irq = pdev->irq; board->dev = &pdev->dev; board->shost = shost; if (pdev->device == PCI_DEVICE_ID_ASP_ABP940UW || pdev->device == PCI_DEVICE_ID_38C0800_REV1 || pdev->device == PCI_DEVICE_ID_38C1600_REV1) { board->flags |= ASC_IS_WIDE_BOARD; } err = advansys_board_found(shost, ioport, ASC_IS_PCI); if (err) goto free_host; pci_set_drvdata(pdev, shost); return 0; free_host: scsi_host_put(shost); release_region: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); fail: return err; } static void advansys_pci_remove(struct pci_dev *pdev) { advansys_release(pci_get_drvdata(pdev)); pci_release_regions(pdev); pci_disable_device(pdev); } static struct pci_driver advansys_pci_driver = { .name = DRV_NAME, .id_table = advansys_pci_tbl, .probe = advansys_pci_probe, .remove = advansys_pci_remove, }; static int __init advansys_init(void) { int error; error = isa_register_driver(&advansys_vlb_driver, ASC_IOADR_TABLE_MAX_IX); if (error) goto fail; error = eisa_driver_register(&advansys_eisa_driver); if (error) goto unregister_vlb; error = pci_register_driver(&advansys_pci_driver); if (error) goto unregister_eisa; return 0; unregister_eisa: eisa_driver_unregister(&advansys_eisa_driver); unregister_vlb: isa_unregister_driver(&advansys_vlb_driver); fail: return error; } static void __exit advansys_exit(void) { pci_unregister_driver(&advansys_pci_driver); eisa_driver_unregister(&advansys_eisa_driver); isa_unregister_driver(&advansys_vlb_driver); } module_init(advansys_init); module_exit(advansys_exit); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("advansys/mcode.bin"); MODULE_FIRMWARE("advansys/3550.bin"); MODULE_FIRMWARE("advansys/38C0800.bin"); MODULE_FIRMWARE("advansys/38C1600.bin");
linux-master
drivers/scsi/advansys.c
// SPDX-License-Identifier: GPL-2.0 /* -*-linux-c-*- * vendor-specific code for SCSI CD-ROM's goes here. * * This is needed becauce most of the new features (multisession and * the like) are too new to be included into the SCSI-II standard (to * be exact: there is'nt anything in my draft copy). * * Aug 1997: Ha! Got a SCSI-3 cdrom spec across my fingers. SCSI-3 does * multisession using the READ TOC command (like SONY). * * Rearranged stuff here: SCSI-3 is included allways, support * for NEC/TOSHIBA/HP commands is optional. * * Gerd Knorr <[email protected]> * * -------------------------------------------------------------------------- * * support for XA/multisession-CD's * * - NEC: Detection and support of multisession CD's. * * - TOSHIBA: Detection and support of multisession CD's. * Some XA-Sector tweaking, required for older drives. * * - SONY: Detection and support of multisession CD's. * added by Thomas Quinot <[email protected]> * * - PIONEER, HITACHI, PLEXTOR, MATSHITA, TEAC, PHILIPS: known to * work with SONY (SCSI3 now) code. * * - HP: Much like SONY, but a little different... (Thomas) * HP-Writers only ??? Maybe other CD-Writers work with this too ? * HP 6020 writers now supported. */ #include <linux/cdrom.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/bcd.h> #include <linux/blkdev.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_ioctl.h> #include "sr.h" #if 0 #define DEBUG #endif /* here are some constants to sort the vendors into groups */ #define VENDOR_SCSI3 1 /* default: scsi-3 mmc */ #define VENDOR_NEC 2 #define VENDOR_TOSHIBA 3 #define VENDOR_WRITER 4 /* pre-scsi3 writers */ #define VENDOR_CYGNAL_85ED 5 /* CD-on-a-chip */ #define VENDOR_TIMEOUT 30*HZ void sr_vendor_init(Scsi_CD *cd) { const char *vendor = cd->device->vendor; const char *model = cd->device->model; /* default */ cd->vendor = VENDOR_SCSI3; if (cd->readcd_known) /* this is true for scsi3/mmc drives - no more checks */ return; if (cd->device->type == TYPE_WORM) { cd->vendor = VENDOR_WRITER; } else if (!strncmp(vendor, "NEC", 3)) { cd->vendor = VENDOR_NEC; if (!strncmp(model, "CD-ROM DRIVE:25", 15) || !strncmp(model, "CD-ROM DRIVE:36", 15) || !strncmp(model, "CD-ROM DRIVE:83", 15) || !strncmp(model, "CD-ROM DRIVE:84 ", 16) #if 0 /* my NEC 3x returns the read-raw data if a read-raw is followed by a read for the same sector - aeb */ || !strncmp(model, "CD-ROM DRIVE:500", 16) #endif ) /* these can't handle multisession, may hang */ cd->cdi.mask |= CDC_MULTI_SESSION; } else if (!strncmp(vendor, "TOSHIBA", 7)) { cd->vendor = VENDOR_TOSHIBA; } else if (!strncmp(vendor, "Beurer", 6) && !strncmp(model, "Gluco Memory", 12)) { /* The Beurer GL50 evo uses a Cygnal-manufactured CD-on-a-chip that only accepts a subset of SCSI commands. Most of the not-implemented commands are fine to fail, but a few, particularly around the MMC or Audio commands, will put the device into an unrecoverable state, so they need to be avoided at all costs. */ cd->vendor = VENDOR_CYGNAL_85ED; cd->cdi.mask |= ( CDC_MULTI_SESSION | CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_GENERIC_PACKET | CDC_PLAY_AUDIO ); } } /* small handy function for switching block length using MODE SELECT, * used by sr_read_sector() */ int sr_set_blocklength(Scsi_CD *cd, int blocklength) { unsigned char *buffer; /* the buffer for the ioctl */ struct packet_command cgc; struct ccs_modesel_head *modesel; int rc, density = 0; if (cd->vendor == VENDOR_TOSHIBA) density = (blocklength > 2048) ? 0x81 : 0x83; buffer = kmalloc(512, GFP_KERNEL); if (!buffer) return -ENOMEM; #ifdef DEBUG sr_printk(KERN_INFO, cd, "MODE SELECT 0x%x/%d\n", density, blocklength); #endif memset(&cgc, 0, sizeof(struct packet_command)); cgc.cmd[0] = MODE_SELECT; cgc.cmd[1] = (1 << 4); cgc.cmd[4] = 12; modesel = (struct ccs_modesel_head *) buffer; memset(modesel, 0, sizeof(*modesel)); modesel->block_desc_length = 0x08; modesel->density = density; modesel->block_length_med = (blocklength >> 8) & 0xff; modesel->block_length_lo = blocklength & 0xff; cgc.buffer = buffer; cgc.buflen = sizeof(*modesel); cgc.data_direction = DMA_TO_DEVICE; cgc.timeout = VENDOR_TIMEOUT; if (0 == (rc = sr_do_ioctl(cd, &cgc))) { cd->device->sector_size = blocklength; } #ifdef DEBUG else sr_printk(KERN_INFO, cd, "switching blocklength to %d bytes failed\n", blocklength); #endif kfree(buffer); return rc; } /* This function gets called after a media change. Checks if the CD is multisession, asks for offset etc. */ int sr_cd_check(struct cdrom_device_info *cdi) { Scsi_CD *cd = cdi->handle; unsigned long sector; unsigned char *buffer; /* the buffer for the ioctl */ struct packet_command cgc; int rc, no_multi; if (cd->cdi.mask & CDC_MULTI_SESSION) return 0; buffer = kmalloc(512, GFP_KERNEL); if (!buffer) return -ENOMEM; sector = 0; /* the multisession sector offset goes here */ no_multi = 0; /* flag: the drive can't handle multisession */ rc = 0; memset(&cgc, 0, sizeof(struct packet_command)); switch (cd->vendor) { case VENDOR_SCSI3: cgc.cmd[0] = READ_TOC; cgc.cmd[8] = 12; cgc.cmd[9] = 0x40; cgc.buffer = buffer; cgc.buflen = 12; cgc.quiet = 1; cgc.data_direction = DMA_FROM_DEVICE; cgc.timeout = VENDOR_TIMEOUT; rc = sr_do_ioctl(cd, &cgc); if (rc != 0) break; if ((buffer[0] << 8) + buffer[1] < 0x0a) { sr_printk(KERN_INFO, cd, "Hmm, seems the drive " "doesn't support multisession CD's\n"); no_multi = 1; break; } sector = buffer[11] + (buffer[10] << 8) + (buffer[9] << 16) + (buffer[8] << 24); if (buffer[6] <= 1) { /* ignore sector offsets from first track */ sector = 0; } break; case VENDOR_NEC:{ unsigned long min, sec, frame; cgc.cmd[0] = 0xde; cgc.cmd[1] = 0x03; cgc.cmd[2] = 0xb0; cgc.buffer = buffer; cgc.buflen = 0x16; cgc.quiet = 1; cgc.data_direction = DMA_FROM_DEVICE; cgc.timeout = VENDOR_TIMEOUT; rc = sr_do_ioctl(cd, &cgc); if (rc != 0) break; if (buffer[14] != 0 && buffer[14] != 0xb0) { sr_printk(KERN_INFO, cd, "Hmm, seems the cdrom " "doesn't support multisession CD's\n"); no_multi = 1; break; } min = bcd2bin(buffer[15]); sec = bcd2bin(buffer[16]); frame = bcd2bin(buffer[17]); sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; break; } case VENDOR_TOSHIBA:{ unsigned long min, sec, frame; /* we request some disc information (is it a XA-CD ?, * where starts the last session ?) */ cgc.cmd[0] = 0xc7; cgc.cmd[1] = 0x03; cgc.buffer = buffer; cgc.buflen = 4; cgc.quiet = 1; cgc.data_direction = DMA_FROM_DEVICE; cgc.timeout = VENDOR_TIMEOUT; rc = sr_do_ioctl(cd, &cgc); if (rc == -EINVAL) { sr_printk(KERN_INFO, cd, "Hmm, seems the drive " "doesn't support multisession CD's\n"); no_multi = 1; break; } if (rc != 0) break; min = bcd2bin(buffer[1]); sec = bcd2bin(buffer[2]); frame = bcd2bin(buffer[3]); sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; if (sector) sector -= CD_MSF_OFFSET; sr_set_blocklength(cd, 2048); break; } case VENDOR_WRITER: cgc.cmd[0] = READ_TOC; cgc.cmd[8] = 0x04; cgc.cmd[9] = 0x40; cgc.buffer = buffer; cgc.buflen = 0x04; cgc.quiet = 1; cgc.data_direction = DMA_FROM_DEVICE; cgc.timeout = VENDOR_TIMEOUT; rc = sr_do_ioctl(cd, &cgc); if (rc != 0) { break; } if ((rc = buffer[2]) == 0) { sr_printk(KERN_WARNING, cd, "No finished session\n"); break; } cgc.cmd[0] = READ_TOC; /* Read TOC */ cgc.cmd[6] = rc & 0x7f; /* number of last session */ cgc.cmd[8] = 0x0c; cgc.cmd[9] = 0x40; cgc.buffer = buffer; cgc.buflen = 12; cgc.quiet = 1; cgc.data_direction = DMA_FROM_DEVICE; cgc.timeout = VENDOR_TIMEOUT; rc = sr_do_ioctl(cd, &cgc); if (rc != 0) { break; } sector = buffer[11] + (buffer[10] << 8) + (buffer[9] << 16) + (buffer[8] << 24); break; default: /* should not happen */ sr_printk(KERN_WARNING, cd, "unknown vendor code (%i), not initialized ?\n", cd->vendor); sector = 0; no_multi = 1; break; } cd->ms_offset = sector; cd->xa_flag = 0; if (CDS_AUDIO != sr_disk_status(cdi) && 1 == sr_is_xa(cd)) cd->xa_flag = 1; if (2048 != cd->device->sector_size) { sr_set_blocklength(cd, 2048); } if (no_multi) cdi->mask |= CDC_MULTI_SESSION; #ifdef DEBUG if (sector) sr_printk(KERN_DEBUG, cd, "multisession offset=%lu\n", sector); #endif kfree(buffer); return rc; }
linux-master
drivers/scsi/sr_vendor.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/types.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/zorro.h> #include <linux/module.h> #include <asm/page.h> #include <asm/amigaints.h> #include <asm/amigahw.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_tcq.h> #include "wd33c93.h" #include "gvp11.h" #define CHECK_WD33C93 struct gvp11_hostdata { struct WD33C93_hostdata wh; struct gvp11_scsiregs *regs; struct device *dev; }; #define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) #define TO_DMA_MASK(m) (~((unsigned long long)m & 0xffffffff)) static irqreturn_t gvp11_intr(int irq, void *data) { struct Scsi_Host *instance = data; struct gvp11_hostdata *hdata = shost_priv(instance); unsigned int status = hdata->regs->CNTR; unsigned long flags; if (!(status & GVP11_DMAC_INT_PENDING)) return IRQ_NONE; spin_lock_irqsave(instance->host_lock, flags); wd33c93_intr(instance); spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } static int gvp11_xfer_mask = 0; static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd); unsigned long len = scsi_pointer->this_residual; struct Scsi_Host *instance = cmd->device->host; struct gvp11_hostdata *hdata = shost_priv(instance); struct WD33C93_hostdata *wh = &hdata->wh; struct gvp11_scsiregs *regs = hdata->regs; unsigned short cntr = GVP11_DMAC_INT_ENABLE; dma_addr_t addr; int bank_mask; static int scsi_alloc_out_of_range = 0; addr = dma_map_single(hdata->dev, scsi_pointer->ptr, len, DMA_DIR(dir_in)); if (dma_mapping_error(hdata->dev, addr)) { dev_warn(hdata->dev, "cannot map SCSI data block %p\n", scsi_pointer->ptr); return 1; } scsi_pointer->dma_handle = addr; /* use bounce buffer if the physical address is bad */ if (addr & wh->dma_xfer_mask) { /* drop useless mapping */ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle, scsi_pointer->this_residual, DMA_DIR(dir_in)); scsi_pointer->dma_handle = (dma_addr_t) NULL; wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff; if (!scsi_alloc_out_of_range) { wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len, GFP_KERNEL); wh->dma_buffer_pool = BUF_SCSI_ALLOCED; } if (scsi_alloc_out_of_range || !wh->dma_bounce_buffer) { wh->dma_bounce_buffer = amiga_chip_alloc(wh->dma_bounce_len, "GVP II SCSI Bounce Buffer"); if (!wh->dma_bounce_buffer) { wh->dma_bounce_len = 0; return 1; } wh->dma_buffer_pool = BUF_CHIP_ALLOCED; } if (!dir_in) { /* copy to bounce buffer for a write */ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr, scsi_pointer->this_residual); } if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) { /* will flush/invalidate cache for us */ addr = dma_map_single(hdata->dev, wh->dma_bounce_buffer, wh->dma_bounce_len, DMA_DIR(dir_in)); /* can't map buffer; use PIO */ if (dma_mapping_error(hdata->dev, addr)) { dev_warn(hdata->dev, "cannot map bounce buffer %p\n", wh->dma_bounce_buffer); return 1; } } if (addr & wh->dma_xfer_mask) { /* drop useless mapping */ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle, scsi_pointer->this_residual, DMA_DIR(dir_in)); /* fall back to Chip RAM if address out of range */ if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) { kfree(wh->dma_bounce_buffer); scsi_alloc_out_of_range = 1; } else { amiga_chip_free(wh->dma_bounce_buffer); } wh->dma_bounce_buffer = amiga_chip_alloc(wh->dma_bounce_len, "GVP II SCSI Bounce Buffer"); if (!wh->dma_bounce_buffer) { wh->dma_bounce_len = 0; return 1; } if (!dir_in) { /* copy to bounce buffer for a write */ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr, scsi_pointer->this_residual); } /* chip RAM can be mapped to phys. address directly */ addr = virt_to_phys(wh->dma_bounce_buffer); /* no need to flush/invalidate cache */ wh->dma_buffer_pool = BUF_CHIP_ALLOCED; } /* finally, have OK mapping (punted for PIO else) */ scsi_pointer->dma_handle = addr; } /* setup dma direction */ if (!dir_in) cntr |= GVP11_DMAC_DIR_WRITE; wh->dma_dir = dir_in; regs->CNTR = cntr; /* setup DMA *physical* address */ regs->ACR = addr; /* no more cache flush here - dma_map_single() takes care */ bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0; if (bank_mask) regs->BANK = bank_mask & (addr >> 18); /* start DMA */ regs->ST_DMA = 1; /* return success */ return 0; } static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, int status) { struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt); struct gvp11_hostdata *hdata = shost_priv(instance); struct WD33C93_hostdata *wh = &hdata->wh; struct gvp11_scsiregs *regs = hdata->regs; /* stop DMA */ regs->SP_DMA = 1; /* remove write bit from CONTROL bits */ regs->CNTR = GVP11_DMAC_INT_ENABLE; if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) dma_unmap_single(hdata->dev, scsi_pointer->dma_handle, scsi_pointer->this_residual, DMA_DIR(wh->dma_dir)); /* copy from a bounce buffer, if necessary */ if (status && wh->dma_bounce_buffer) { if (wh->dma_dir && SCpnt) memcpy(scsi_pointer->ptr, wh->dma_bounce_buffer, scsi_pointer->this_residual); if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) kfree(wh->dma_bounce_buffer); else amiga_chip_free(wh->dma_bounce_buffer); wh->dma_bounce_buffer = NULL; wh->dma_bounce_len = 0; } } static const struct scsi_host_template gvp11_scsi_template = { .module = THIS_MODULE, .name = "GVP Series II SCSI", .show_info = wd33c93_show_info, .write_info = wd33c93_write_info, .proc_name = "GVP11", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, .eh_host_reset_handler = wd33c93_host_reset, .can_queue = CAN_QUEUE, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = CMD_PER_LUN, .dma_boundary = PAGE_SIZE - 1, .cmd_size = sizeof(struct scsi_pointer), }; static int check_wd33c93(struct gvp11_scsiregs *regs) { #ifdef CHECK_WD33C93 volatile unsigned char *sasr_3393, *scmd_3393; unsigned char save_sasr; unsigned char q, qq; /* * These darn GVP boards are a problem - it can be tough to tell * whether or not they include a SCSI controller. This is the * ultimate Yet-Another-GVP-Detection-Hack in that it actually * probes for a WD33c93 chip: If we find one, it's extremely * likely that this card supports SCSI, regardless of Product_ * Code, Board_Size, etc. */ /* Get pointers to the presumed register locations and save contents */ sasr_3393 = &regs->SASR; scmd_3393 = &regs->SCMD; save_sasr = *sasr_3393; /* First test the AuxStatus Reg */ q = *sasr_3393; /* read it */ if (q & 0x08) /* bit 3 should always be clear */ return -ENODEV; *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */ if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */ *sasr_3393 = save_sasr; /* Oops - restore this byte */ return -ENODEV; } if (*sasr_3393 != q) { /* should still read the same */ *sasr_3393 = save_sasr; /* Oops - restore this byte */ return -ENODEV; } if (*scmd_3393 != q) /* and so should the image at 0x1f */ return -ENODEV; /* * Ok, we probably have a wd33c93, but let's check a few other places * for good measure. Make sure that this works for both 'A and 'B * chip versions. */ *sasr_3393 = WD_SCSI_STATUS; q = *scmd_3393; *sasr_3393 = WD_SCSI_STATUS; *scmd_3393 = ~q; *sasr_3393 = WD_SCSI_STATUS; qq = *scmd_3393; *sasr_3393 = WD_SCSI_STATUS; *scmd_3393 = q; if (qq != q) /* should be read only */ return -ENODEV; *sasr_3393 = 0x1e; /* this register is unimplemented */ q = *scmd_3393; *sasr_3393 = 0x1e; *scmd_3393 = ~q; *sasr_3393 = 0x1e; qq = *scmd_3393; *sasr_3393 = 0x1e; *scmd_3393 = q; if (qq != q || qq != 0xff) /* should be read only, all 1's */ return -ENODEV; *sasr_3393 = WD_TIMEOUT_PERIOD; q = *scmd_3393; *sasr_3393 = WD_TIMEOUT_PERIOD; *scmd_3393 = ~q; *sasr_3393 = WD_TIMEOUT_PERIOD; qq = *scmd_3393; *sasr_3393 = WD_TIMEOUT_PERIOD; *scmd_3393 = q; if (qq != (~q & 0xff)) /* should be read/write */ return -ENODEV; #endif /* CHECK_WD33C93 */ return 0; } static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent) { struct Scsi_Host *instance; unsigned long address; int error; unsigned int epc; unsigned int default_dma_xfer_mask; struct gvp11_hostdata *hdata; struct gvp11_scsiregs *regs; wd33c93_regs wdregs; default_dma_xfer_mask = ent->driver_data; if (dma_set_mask_and_coherent(&z->dev, TO_DMA_MASK(default_dma_xfer_mask))) { dev_warn(&z->dev, "cannot use DMA mask %llx\n", TO_DMA_MASK(default_dma_xfer_mask)); return -ENODEV; } /* * Rumors state that some GVP ram boards use the same product * code as the SCSI controllers. Therefore if the board-size * is not 64KB we assume it is a ram board and bail out. */ if (zorro_resource_len(z) != 0x10000) return -ENODEV; address = z->resource.start; if (!request_mem_region(address, 256, "wd33c93")) return -EBUSY; regs = ZTWO_VADDR(address); error = check_wd33c93(regs); if (error) goto fail_check_or_alloc; instance = scsi_host_alloc(&gvp11_scsi_template, sizeof(struct gvp11_hostdata)); if (!instance) { error = -ENOMEM; goto fail_check_or_alloc; } instance->irq = IRQ_AMIGA_PORTS; instance->unique_id = z->slotaddr; regs->secret2 = 1; regs->secret1 = 0; regs->secret3 = 15; while (regs->CNTR & GVP11_DMAC_BUSY) ; regs->CNTR = 0; regs->BANK = 0; wdregs.SASR = &regs->SASR; wdregs.SCMD = &regs->SCMD; hdata = shost_priv(instance); if (gvp11_xfer_mask) { hdata->wh.dma_xfer_mask = gvp11_xfer_mask; if (dma_set_mask_and_coherent(&z->dev, TO_DMA_MASK(gvp11_xfer_mask))) { dev_warn(&z->dev, "cannot use DMA mask %llx\n", TO_DMA_MASK(gvp11_xfer_mask)); error = -ENODEV; goto fail_check_or_alloc; } } else hdata->wh.dma_xfer_mask = default_dma_xfer_mask; hdata->wh.no_sync = 0xff; hdata->wh.fast = 0; hdata->wh.dma_mode = CTRL_DMA; hdata->regs = regs; /* * Check for 14MHz SCSI clock */ epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000); wd33c93_init(instance, wdregs, dma_setup, dma_stop, (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10 : WD33C93_FS_12_15); error = request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, "GVP11 SCSI", instance); if (error) goto fail_irq; regs->CNTR = GVP11_DMAC_INT_ENABLE; error = scsi_add_host(instance, NULL); if (error) goto fail_host; zorro_set_drvdata(z, instance); scsi_scan_host(instance); return 0; fail_host: free_irq(IRQ_AMIGA_PORTS, instance); fail_irq: scsi_host_put(instance); fail_check_or_alloc: release_mem_region(address, 256); return error; } static void gvp11_remove(struct zorro_dev *z) { struct Scsi_Host *instance = zorro_get_drvdata(z); struct gvp11_hostdata *hdata = shost_priv(instance); hdata->regs->CNTR = 0; scsi_remove_host(instance); free_irq(IRQ_AMIGA_PORTS, instance); scsi_host_put(instance); release_mem_region(z->resource.start, 256); } /* * This should (hopefully) be the correct way to identify * all the different GVP SCSI controllers (except for the * SERIES I though). */ static struct zorro_device_id gvp11_zorro_tbl[] = { { ZORRO_PROD_GVP_COMBO_030_R3_SCSI, ~0x00ffffff }, { ZORRO_PROD_GVP_SERIES_II, ~0x00ffffff }, { ZORRO_PROD_GVP_GFORCE_030_SCSI, ~0x01ffffff }, { ZORRO_PROD_GVP_A530_SCSI, ~0x01ffffff }, { ZORRO_PROD_GVP_COMBO_030_R4_SCSI, ~0x01ffffff }, { ZORRO_PROD_GVP_A1291, ~0x07ffffff }, { ZORRO_PROD_GVP_GFORCE_040_SCSI_1, ~0x07ffffff }, { 0 } }; MODULE_DEVICE_TABLE(zorro, gvp11_zorro_tbl); static struct zorro_driver gvp11_driver = { .name = "gvp11", .id_table = gvp11_zorro_tbl, .probe = gvp11_probe, .remove = gvp11_remove, }; static int __init gvp11_init(void) { return zorro_register_driver(&gvp11_driver); } module_init(gvp11_init); static void __exit gvp11_exit(void) { zorro_unregister_driver(&gvp11_driver); } module_exit(gvp11_exit); MODULE_DESCRIPTION("GVP Series II SCSI"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/gvp11.c
// SPDX-License-Identifier: GPL-2.0-only /* * scsi.c Copyright (C) 1992 Drew Eckhardt * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale * Copyright (C) 2002, 2003 Christoph Hellwig * * generic mid-level SCSI driver * Initial versions: Drew Eckhardt * Subsequent revisions: Eric Youngdale * * <[email protected]> * * Bug correction thanks go to : * Rik Faith <[email protected]> * Tommy Thorn <tthorn> * Thomas Wuensche <[email protected]> * * Modified by Eric Youngdale [email protected] or [email protected] to * add scatter-gather, multiple outstanding request, and other * enhancements. * * Native multichannel, wide scsi, /proc/scsi and hot plugging * support added by Michael Neuffer <[email protected]> * * Added request_module("scsi_hostadapter") for kerneld: * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf) * Bjorn Ekwall <[email protected]> * (changed to kmod) * * Major improvements to the timeout, abort, and reset processing, * as well as performance modifications for large queue depths by * Leonard N. Zubkoff <[email protected]> * * Converted cli() code to spinlocks, Ingo Molnar * * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli * * out_of_space hacks, D. Gilbert (dpg) 990608 */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/unistd.h> #include <linux/spinlock.h> #include <linux/kmod.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/mutex.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include "scsi_priv.h" #include "scsi_logging.h" #define CREATE_TRACE_POINTS #include <trace/events/scsi.h> /* * Definitions and constants. */ /* * Note - the initial logging level can be set here to log events at boot time. * After the system is up, you may enable logging via the /proc interface. */ unsigned int scsi_logging_level; #if defined(CONFIG_SCSI_LOGGING) EXPORT_SYMBOL(scsi_logging_level); #endif #ifdef CONFIG_SCSI_LOGGING void scsi_log_send(struct scsi_cmnd *cmd) { unsigned int level; /* * If ML QUEUE log level is greater than or equal to: * * 1: nothing (match completion) * * 2: log opcode + command of all commands + cmd address * * 3: same as 2 * * 4: same as 3 */ if (unlikely(scsi_logging_level)) { level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, SCSI_LOG_MLQUEUE_BITS); if (level > 1) { scmd_printk(KERN_INFO, cmd, "Send: scmd 0x%p\n", cmd); scsi_print_command(cmd); } } } void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) { unsigned int level; /* * If ML COMPLETE log level is greater than or equal to: * * 1: log disposition, result, opcode + command, and conditionally * sense data for failures or non SUCCESS dispositions. * * 2: same as 1 but for all command completions. * * 3: same as 2 * * 4: same as 3 plus dump extra junk */ if (unlikely(scsi_logging_level)) { level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, SCSI_LOG_MLCOMPLETE_BITS); if (((level > 0) && (cmd->result || disposition != SUCCESS)) || (level > 1)) { scsi_print_result(cmd, "Done", disposition); scsi_print_command(cmd); if (scsi_status_is_check_condition(cmd->result)) scsi_print_sense(cmd); if (level > 3) scmd_printk(KERN_INFO, cmd, "scsi host busy %d failed %d\n", scsi_host_busy(cmd->device->host), cmd->device->host->host_failed); } } } #endif /** * scsi_finish_command - cleanup and pass command back to upper layer * @cmd: the command * * Description: Pass command off to upper layer for finishing of I/O * request, waking processes that are waiting on results, * etc. */ void scsi_finish_command(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct scsi_target *starget = scsi_target(sdev); struct Scsi_Host *shost = sdev->host; struct scsi_driver *drv; unsigned int good_bytes; scsi_device_unbusy(sdev, cmd); /* * Clear the flags that say that the device/target/host is no longer * capable of accepting new commands. */ if (atomic_read(&shost->host_blocked)) atomic_set(&shost->host_blocked, 0); if (atomic_read(&starget->target_blocked)) atomic_set(&starget->target_blocked, 0); if (atomic_read(&sdev->device_blocked)) atomic_set(&sdev->device_blocked, 0); SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev, "Notifying upper driver of completion " "(result %x)\n", cmd->result)); good_bytes = scsi_bufflen(cmd); if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) { int old_good_bytes = good_bytes; drv = scsi_cmd_to_driver(cmd); if (drv->done) good_bytes = drv->done(cmd); /* * USB may not give sense identifying bad sector and * simply return a residue instead, so subtract off the * residue if drv->done() error processing indicates no * change to the completion length. */ if (good_bytes == old_good_bytes) good_bytes -= scsi_get_resid(cmd); } scsi_io_completion(cmd, good_bytes); } /* * 4096 is big enough for saturating fast SCSI LUNs. */ int scsi_device_max_queue_depth(struct scsi_device *sdev) { return min_t(int, sdev->host->can_queue, 4096); } /** * scsi_change_queue_depth - change a device's queue depth * @sdev: SCSI Device in question * @depth: number of commands allowed to be queued to the driver * * Sets the device queue depth and returns the new value. */ int scsi_change_queue_depth(struct scsi_device *sdev, int depth) { depth = min_t(int, depth, scsi_device_max_queue_depth(sdev)); if (depth > 0) { sdev->queue_depth = depth; wmb(); } if (sdev->request_queue) blk_set_queue_depth(sdev->request_queue, depth); sbitmap_resize(&sdev->budget_map, sdev->queue_depth); return sdev->queue_depth; } EXPORT_SYMBOL(scsi_change_queue_depth); /** * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth * @sdev: SCSI Device in question * @depth: Current number of outstanding SCSI commands on this device, * not counting the one returned as QUEUE_FULL. * * Description: This function will track successive QUEUE_FULL events on a * specific SCSI device to determine if and when there is a * need to adjust the queue depth on the device. * * Returns: 0 - No change needed, >0 - Adjust queue depth to this new depth, * -1 - Drop back to untagged operation using host->cmd_per_lun * as the untagged command depth * * Lock Status: None held on entry * * Notes: Low level drivers may call this at any time and we will do * "The Right Thing." We are interrupt context safe. */ int scsi_track_queue_full(struct scsi_device *sdev, int depth) { /* * Don't let QUEUE_FULLs on the same * jiffies count, they could all be from * same event. */ if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4)) return 0; sdev->last_queue_full_time = jiffies; if (sdev->last_queue_full_depth != depth) { sdev->last_queue_full_count = 1; sdev->last_queue_full_depth = depth; } else { sdev->last_queue_full_count++; } if (sdev->last_queue_full_count <= 10) return 0; return scsi_change_queue_depth(sdev, depth); } EXPORT_SYMBOL(scsi_track_queue_full); /** * scsi_vpd_inquiry - Request a device provide us with a VPD page * @sdev: The device to ask * @buffer: Where to put the result * @page: Which Vital Product Data to return * @len: The length of the buffer * * This is an internal helper function. You probably want to use * scsi_get_vpd_page instead. * * Returns size of the vpd page on success or a negative error number. */ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer, u8 page, unsigned len) { int result; unsigned char cmd[16]; if (len < 4) return -EINVAL; cmd[0] = INQUIRY; cmd[1] = 1; /* EVPD */ cmd[2] = page; cmd[3] = len >> 8; cmd[4] = len & 0xff; cmd[5] = 0; /* Control byte */ /* * I'm not convinced we need to try quite this hard to get VPD, but * all the existing users tried this hard. */ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len, 30 * HZ, 3, NULL); if (result) return -EIO; /* * Sanity check that we got the page back that we asked for and that * the page size is not 0. */ if (buffer[1] != page) return -EIO; result = get_unaligned_be16(&buffer[2]); if (!result) return -EIO; return result + 4; } static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page) { unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4); int result; if (sdev->no_vpd_size) return SCSI_DEFAULT_VPD_LEN; /* * Fetch the VPD page header to find out how big the page * is. This is done to prevent problems on legacy devices * which can not handle allocation lengths as large as * potentially requested by the caller. */ result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header)); if (result < 0) return 0; if (result < SCSI_VPD_HEADER_SIZE) { dev_warn_once(&sdev->sdev_gendev, "%s: short VPD page 0x%02x length: %d bytes\n", __func__, page, result); return 0; } return result; } /** * scsi_get_vpd_page - Get Vital Product Data from a SCSI device * @sdev: The device to ask * @page: Which Vital Product Data to return * @buf: where to store the VPD * @buf_len: number of bytes in the VPD buffer area * * SCSI devices may optionally supply Vital Product Data. Each 'page' * of VPD is defined in the appropriate SCSI document (eg SPC, SBC). * If the device supports this VPD page, this routine fills @buf * with the data from that page and return 0. If the VPD page is not * supported or its content cannot be retrieved, -EINVAL is returned. */ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, int buf_len) { int result, vpd_len; if (!scsi_device_supports_vpd(sdev)) return -EINVAL; vpd_len = scsi_get_vpd_size(sdev, page); if (vpd_len <= 0) return -EINVAL; vpd_len = min(vpd_len, buf_len); /* * Fetch the actual page. Since the appropriate size was reported * by the device it is now safe to ask for something bigger. */ memset(buf, 0, buf_len); result = scsi_vpd_inquiry(sdev, buf, page, vpd_len); if (result < 0) return -EINVAL; else if (result > vpd_len) dev_warn_once(&sdev->sdev_gendev, "%s: VPD page 0x%02x result %d > %d bytes\n", __func__, page, result, vpd_len); return 0; } EXPORT_SYMBOL_GPL(scsi_get_vpd_page); /** * scsi_get_vpd_buf - Get Vital Product Data from a SCSI device * @sdev: The device to ask * @page: Which Vital Product Data to return * * Returns %NULL upon failure. */ static struct scsi_vpd *scsi_get_vpd_buf(struct scsi_device *sdev, u8 page) { struct scsi_vpd *vpd_buf; int vpd_len, result; vpd_len = scsi_get_vpd_size(sdev, page); if (vpd_len <= 0) return NULL; retry_pg: /* * Fetch the actual page. Since the appropriate size was reported * by the device it is now safe to ask for something bigger. */ vpd_buf = kmalloc(sizeof(*vpd_buf) + vpd_len, GFP_KERNEL); if (!vpd_buf) return NULL; result = scsi_vpd_inquiry(sdev, vpd_buf->data, page, vpd_len); if (result < 0) { kfree(vpd_buf); return NULL; } if (result > vpd_len) { dev_warn_once(&sdev->sdev_gendev, "%s: VPD page 0x%02x result %d > %d bytes\n", __func__, page, result, vpd_len); vpd_len = result; kfree(vpd_buf); goto retry_pg; } vpd_buf->len = result; return vpd_buf; } static void scsi_update_vpd_page(struct scsi_device *sdev, u8 page, struct scsi_vpd __rcu **sdev_vpd_buf) { struct scsi_vpd *vpd_buf; vpd_buf = scsi_get_vpd_buf(sdev, page); if (!vpd_buf) return; mutex_lock(&sdev->inquiry_mutex); vpd_buf = rcu_replace_pointer(*sdev_vpd_buf, vpd_buf, lockdep_is_held(&sdev->inquiry_mutex)); mutex_unlock(&sdev->inquiry_mutex); if (vpd_buf) kfree_rcu(vpd_buf, rcu); } /** * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure * @sdev: The device to ask * * Attach the 'Device Identification' VPD page (0x83) and the * 'Unit Serial Number' VPD page (0x80) to a SCSI device * structure. This information can be used to identify the device * uniquely. */ void scsi_attach_vpd(struct scsi_device *sdev) { int i; struct scsi_vpd *vpd_buf; if (!scsi_device_supports_vpd(sdev)) return; /* Ask for all the pages supported by this device */ vpd_buf = scsi_get_vpd_buf(sdev, 0); if (!vpd_buf) return; for (i = 4; i < vpd_buf->len; i++) { if (vpd_buf->data[i] == 0x0) scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0); if (vpd_buf->data[i] == 0x80) scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80); if (vpd_buf->data[i] == 0x83) scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83); if (vpd_buf->data[i] == 0x89) scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89); if (vpd_buf->data[i] == 0xb0) scsi_update_vpd_page(sdev, 0xb0, &sdev->vpd_pgb0); if (vpd_buf->data[i] == 0xb1) scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1); if (vpd_buf->data[i] == 0xb2) scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2); } kfree(vpd_buf); } /** * scsi_report_opcode - Find out if a given command is supported * @sdev: scsi device to query * @buffer: scratch buffer (must be at least 20 bytes long) * @len: length of buffer * @opcode: opcode for the command to look up * @sa: service action for the command to look up * * Uses the REPORT SUPPORTED OPERATION CODES to check support for the * command identified with @opcode and @sa. If the command does not * have a service action, @sa must be 0. Returns -EINVAL if RSOC fails, * 0 if the command is not supported and 1 if the device claims to * support the command. */ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, unsigned int len, unsigned char opcode, unsigned short sa) { unsigned char cmd[16]; struct scsi_sense_hdr sshdr; int result, request_len; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, }; if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3) return -EINVAL; /* RSOC header + size of command we are asking about */ request_len = 4 + COMMAND_SIZE(opcode); if (request_len > len) { dev_warn_once(&sdev->sdev_gendev, "%s: len %u bytes, opcode 0x%02x needs %u\n", __func__, len, opcode, request_len); return -EINVAL; } memset(cmd, 0, 16); cmd[0] = MAINTENANCE_IN; cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES; if (!sa) { cmd[2] = 1; /* One command format */ cmd[3] = opcode; } else { cmd[2] = 3; /* One command format with service action */ cmd[3] = opcode; put_unaligned_be16(sa, &cmd[4]); } put_unaligned_be32(request_len, &cmd[6]); memset(buffer, 0, len); result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, request_len, 30 * HZ, 3, &exec_args); if (result < 0) return result; if (result && scsi_sense_valid(&sshdr) && sshdr.sense_key == ILLEGAL_REQUEST && (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00) return -EINVAL; if ((buffer[1] & 3) == 3) /* Command supported */ return 1; return 0; } EXPORT_SYMBOL(scsi_report_opcode); #define SCSI_CDL_CHECK_BUF_LEN 64 static bool scsi_cdl_check_cmd(struct scsi_device *sdev, u8 opcode, u16 sa, unsigned char *buf) { int ret; u8 cdlp; /* Check operation code */ ret = scsi_report_opcode(sdev, buf, SCSI_CDL_CHECK_BUF_LEN, opcode, sa); if (ret <= 0) return false; if ((buf[1] & 0x03) != 0x03) return false; /* * See SPC-6, One_command parameter data format for * REPORT SUPPORTED OPERATION CODES. We have the following cases * depending on rwcdlp (buf[0] & 0x01) value: * - rwcdlp == 0: then cdlp indicates support for the A mode page when * it is equal to 1 and for the B mode page when it is * equal to 2. * - rwcdlp == 1: then cdlp indicates support for the T2A mode page * when it is equal to 1 and for the T2B mode page when * it is equal to 2. * Overall, to detect support for command duration limits, we only need * to check that cdlp is 1 or 2. */ cdlp = (buf[1] & 0x18) >> 3; return cdlp == 0x01 || cdlp == 0x02; } /** * scsi_cdl_check - Check if a SCSI device supports Command Duration Limits * @sdev: The device to check */ void scsi_cdl_check(struct scsi_device *sdev) { bool cdl_supported; unsigned char *buf; buf = kmalloc(SCSI_CDL_CHECK_BUF_LEN, GFP_KERNEL); if (!buf) { sdev->cdl_supported = 0; return; } /* Check support for READ_16, WRITE_16, READ_32 and WRITE_32 commands */ cdl_supported = scsi_cdl_check_cmd(sdev, READ_16, 0, buf) || scsi_cdl_check_cmd(sdev, WRITE_16, 0, buf) || scsi_cdl_check_cmd(sdev, VARIABLE_LENGTH_CMD, READ_32, buf) || scsi_cdl_check_cmd(sdev, VARIABLE_LENGTH_CMD, WRITE_32, buf); if (cdl_supported) { /* * We have CDL support: force the use of READ16/WRITE16. * READ32 and WRITE32 will be used for devices that support * the T10_PI_TYPE2_PROTECTION protection type. */ sdev->use_16_for_rw = 1; sdev->use_10_for_rw = 0; sdev->cdl_supported = 1; } else { sdev->cdl_supported = 0; } kfree(buf); } /** * scsi_cdl_enable - Enable or disable a SCSI device supports for Command * Duration Limits * @sdev: The target device * @enable: the target state */ int scsi_cdl_enable(struct scsi_device *sdev, bool enable) { struct scsi_mode_data data; struct scsi_sense_hdr sshdr; struct scsi_vpd *vpd; bool is_ata = false; char buf[64]; int ret; if (!sdev->cdl_supported) return -EOPNOTSUPP; rcu_read_lock(); vpd = rcu_dereference(sdev->vpd_pg89); if (vpd) is_ata = true; rcu_read_unlock(); /* * For ATA devices, CDL needs to be enabled with a SET FEATURES command. */ if (is_ata) { char *buf_data; int len; ret = scsi_mode_sense(sdev, 0x08, 0x0a, 0xf2, buf, sizeof(buf), 5 * HZ, 3, &data, NULL); if (ret) return -EINVAL; /* Enable CDL using the ATA feature page */ len = min_t(size_t, sizeof(buf), data.length - data.header_length - data.block_descriptor_length); buf_data = buf + data.header_length + data.block_descriptor_length; if (enable) buf_data[4] = 0x02; else buf_data[4] = 0; ret = scsi_mode_select(sdev, 1, 0, buf_data, len, 5 * HZ, 3, &data, &sshdr); if (ret) { if (scsi_sense_valid(&sshdr)) scsi_print_sense_hdr(sdev, dev_name(&sdev->sdev_gendev), &sshdr); return ret; } } sdev->cdl_enable = enable; return 0; } /** * scsi_device_get - get an additional reference to a scsi_device * @sdev: device to get a reference to * * Description: Gets a reference to the scsi_device and increments the use count * of the underlying LLDD module. You must hold host_lock of the * parent Scsi_Host or already have a reference when calling this. * * This will fail if a device is deleted or cancelled, or when the LLD module * is in the process of being unloaded. */ int scsi_device_get(struct scsi_device *sdev) { if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) goto fail; if (!try_module_get(sdev->host->hostt->module)) goto fail; if (!get_device(&sdev->sdev_gendev)) goto fail_put_module; return 0; fail_put_module: module_put(sdev->host->hostt->module); fail: return -ENXIO; } EXPORT_SYMBOL(scsi_device_get); /** * scsi_device_put - release a reference to a scsi_device * @sdev: device to release a reference on. * * Description: Release a reference to the scsi_device and decrements the use * count of the underlying LLDD module. The device is freed once the last * user vanishes. */ void scsi_device_put(struct scsi_device *sdev) { struct module *mod = sdev->host->hostt->module; put_device(&sdev->sdev_gendev); module_put(mod); } EXPORT_SYMBOL(scsi_device_put); /* helper for shost_for_each_device, see that for documentation */ struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost, struct scsi_device *prev) { struct list_head *list = (prev ? &prev->siblings : &shost->__devices); struct scsi_device *next = NULL; unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); while (list->next != &shost->__devices) { next = list_entry(list->next, struct scsi_device, siblings); /* skip devices that we can't get a reference to */ if (!scsi_device_get(next)) break; next = NULL; list = list->next; } spin_unlock_irqrestore(shost->host_lock, flags); if (prev) scsi_device_put(prev); return next; } EXPORT_SYMBOL(__scsi_iterate_devices); /** * starget_for_each_device - helper to walk all devices of a target * @starget: target whose devices we want to iterate over. * @data: Opaque passed to each function call. * @fn: Function to call on each device * * This traverses over each device of @starget. The devices have * a reference that must be released by scsi_host_put when breaking * out of the loop. */ void starget_for_each_device(struct scsi_target *starget, void *data, void (*fn)(struct scsi_device *, void *)) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct scsi_device *sdev; shost_for_each_device(sdev, shost) { if ((sdev->channel == starget->channel) && (sdev->id == starget->id)) fn(sdev, data); } } EXPORT_SYMBOL(starget_for_each_device); /** * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED) * @starget: target whose devices we want to iterate over. * @data: parameter for callback @fn() * @fn: callback function that is invoked for each device * * This traverses over each device of @starget. It does _not_ * take a reference on the scsi_device, so the whole loop must be * protected by shost->host_lock. * * Note: The only reason why drivers would want to use this is because * they need to access the device list in irq context. Otherwise you * really want to use starget_for_each_device instead. **/ void __starget_for_each_device(struct scsi_target *starget, void *data, void (*fn)(struct scsi_device *, void *)) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct scsi_device *sdev; __shost_for_each_device(sdev, shost) { if ((sdev->channel == starget->channel) && (sdev->id == starget->id)) fn(sdev, data); } } EXPORT_SYMBOL(__starget_for_each_device); /** * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED) * @starget: SCSI target pointer * @lun: SCSI Logical Unit Number * * Description: Looks up the scsi_device with the specified @lun for a given * @starget. The returned scsi_device does not have an additional * reference. You must hold the host's host_lock over this call and * any access to the returned scsi_device. A scsi_device in state * SDEV_DEL is skipped. * * Note: The only reason why drivers should use this is because * they need to access the device list in irq context. Otherwise you * really want to use scsi_device_lookup_by_target instead. **/ struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, u64 lun) { struct scsi_device *sdev; list_for_each_entry(sdev, &starget->devices, same_target_siblings) { if (sdev->sdev_state == SDEV_DEL) continue; if (sdev->lun ==lun) return sdev; } return NULL; } EXPORT_SYMBOL(__scsi_device_lookup_by_target); /** * scsi_device_lookup_by_target - find a device given the target * @starget: SCSI target pointer * @lun: SCSI Logical Unit Number * * Description: Looks up the scsi_device with the specified @lun for a given * @starget. The returned scsi_device has an additional reference that * needs to be released with scsi_device_put once you're done with it. **/ struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, u64 lun) { struct scsi_device *sdev; struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); sdev = __scsi_device_lookup_by_target(starget, lun); if (sdev && scsi_device_get(sdev)) sdev = NULL; spin_unlock_irqrestore(shost->host_lock, flags); return sdev; } EXPORT_SYMBOL(scsi_device_lookup_by_target); /** * __scsi_device_lookup - find a device given the host (UNLOCKED) * @shost: SCSI host pointer * @channel: SCSI channel (zero if only one channel) * @id: SCSI target number (physical unit number) * @lun: SCSI Logical Unit Number * * Description: Looks up the scsi_device with the specified @channel, @id, @lun * for a given host. The returned scsi_device does not have an additional * reference. You must hold the host's host_lock over this call and any access * to the returned scsi_device. * * Note: The only reason why drivers would want to use this is because * they need to access the device list in irq context. Otherwise you * really want to use scsi_device_lookup instead. **/ struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost, uint channel, uint id, u64 lun) { struct scsi_device *sdev; list_for_each_entry(sdev, &shost->__devices, siblings) { if (sdev->sdev_state == SDEV_DEL) continue; if (sdev->channel == channel && sdev->id == id && sdev->lun ==lun) return sdev; } return NULL; } EXPORT_SYMBOL(__scsi_device_lookup); /** * scsi_device_lookup - find a device given the host * @shost: SCSI host pointer * @channel: SCSI channel (zero if only one channel) * @id: SCSI target number (physical unit number) * @lun: SCSI Logical Unit Number * * Description: Looks up the scsi_device with the specified @channel, @id, @lun * for a given host. The returned scsi_device has an additional reference that * needs to be released with scsi_device_put once you're done with it. **/ struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost, uint channel, uint id, u64 lun) { struct scsi_device *sdev; unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); sdev = __scsi_device_lookup(shost, channel, id, lun); if (sdev && scsi_device_get(sdev)) sdev = NULL; spin_unlock_irqrestore(shost->host_lock, flags); return sdev; } EXPORT_SYMBOL(scsi_device_lookup); MODULE_DESCRIPTION("SCSI core"); MODULE_LICENSE("GPL"); module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); static int __init init_scsi(void) { int error; error = scsi_init_procfs(); if (error) goto cleanup_queue; error = scsi_init_devinfo(); if (error) goto cleanup_procfs; error = scsi_init_hosts(); if (error) goto cleanup_devlist; error = scsi_init_sysctl(); if (error) goto cleanup_hosts; error = scsi_sysfs_register(); if (error) goto cleanup_sysctl; scsi_netlink_init(); printk(KERN_NOTICE "SCSI subsystem initialized\n"); return 0; cleanup_sysctl: scsi_exit_sysctl(); cleanup_hosts: scsi_exit_hosts(); cleanup_devlist: scsi_exit_devinfo(); cleanup_procfs: scsi_exit_procfs(); cleanup_queue: scsi_exit_queue(); printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n", -error); return error; } static void __exit exit_scsi(void) { scsi_netlink_exit(); scsi_sysfs_unregister(); scsi_exit_sysctl(); scsi_exit_hosts(); scsi_exit_devinfo(); scsi_exit_procfs(); scsi_exit_queue(); } subsys_initcall(init_scsi); module_exit(exit_scsi);
linux-master
drivers/scsi/scsi.c
// SPDX-License-Identifier: GPL-2.0-only /* SCSI Tape Driver for Linux version 1.1 and newer. See the accompanying file Documentation/scsi/st.rst for more information. History: Rewritten from Dwayne Forsyth's SCSI tape driver by Kai Makisara. Contribution and ideas from several people including (in alphabetical order) Klaus Ehrenfried, Eugene Exarevsky, Eric Lee Green, Wolfgang Denk, Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky, Michael Schaefer, J"org Weule, and Eric Youngdale. Copyright 1992 - 2016 Kai Makisara email [email protected] Some small formal changes - aeb, 950809 Last modified: 18-JAN-1998 Richard Gooch <[email protected]> Devfs support */ static const char *verstr = "20160209"; #include <linux/module.h> #include <linux/compat.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/mtio.h> #include <linux/major.h> #include <linux/cdrom.h> #include <linux/ioctl.h> #include <linux/fcntl.h> #include <linux/spinlock.h> #include <linux/blkdev.h> #include <linux/moduleparam.h> #include <linux/cdev.h> #include <linux/idr.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <asm/dma.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_ioctl.h> #include <scsi/sg.h> /* The driver prints some debugging information on the console if DEBUG is defined and non-zero. */ #define DEBUG 1 #define NO_DEBUG 0 #define ST_DEB_MSG KERN_NOTICE #if DEBUG /* The message level for the debug messages is currently set to KERN_NOTICE so that people can easily see the messages. Later when the debugging messages in the drivers are more widely classified, this may be changed to KERN_DEBUG. */ #define DEB(a) a #define DEBC(a) if (debugging) { a ; } #else #define DEB(a) #define DEBC(a) #endif #define ST_KILOBYTE 1024 #include "st_options.h" #include "st.h" static int buffer_kbs; static int max_sg_segs; static int try_direct_io = TRY_DIRECT_IO; static int try_rdio = 1; static int try_wdio = 1; static int debug_flag; static struct class st_sysfs_class; static const struct attribute_group *st_dev_groups[]; static const struct attribute_group *st_drv_groups[]; MODULE_AUTHOR("Kai Makisara"); MODULE_DESCRIPTION("SCSI tape (st) driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(SCSI_TAPE_MAJOR); MODULE_ALIAS_SCSI_DEVICE(TYPE_TAPE); /* Set 'perm' (4th argument) to 0 to disable module_param's definition * of sysfs parameters (which module_param doesn't yet support). * Sysfs parameters defined explicitly later. */ module_param_named(buffer_kbs, buffer_kbs, int, 0); MODULE_PARM_DESC(buffer_kbs, "Default driver buffer size for fixed block mode (KB; 32)"); module_param_named(max_sg_segs, max_sg_segs, int, 0); MODULE_PARM_DESC(max_sg_segs, "Maximum number of scatter/gather segments to use (256)"); module_param_named(try_direct_io, try_direct_io, int, 0); MODULE_PARM_DESC(try_direct_io, "Try direct I/O between user buffer and tape drive (1)"); module_param_named(debug_flag, debug_flag, int, 0); MODULE_PARM_DESC(debug_flag, "Enable DEBUG, same as setting debugging=1"); /* Extra parameters for testing */ module_param_named(try_rdio, try_rdio, int, 0); MODULE_PARM_DESC(try_rdio, "Try direct read i/o when possible"); module_param_named(try_wdio, try_wdio, int, 0); MODULE_PARM_DESC(try_wdio, "Try direct write i/o when possible"); #ifndef MODULE static int write_threshold_kbs; /* retained for compatibility */ static struct st_dev_parm { char *name; int *val; } parms[] __initdata = { { "buffer_kbs", &buffer_kbs }, { /* Retained for compatibility with 2.4 */ "write_threshold_kbs", &write_threshold_kbs }, { "max_sg_segs", NULL }, { "try_direct_io", &try_direct_io }, { "debug_flag", &debug_flag } }; #endif /* Restrict the number of modes so that names for all are assigned */ #if ST_NBR_MODES > 16 #error "Maximum number of modes is 16" #endif /* Bit reversed order to get same names for same minors with all mode counts */ static const char *st_formats[] = { "", "r", "k", "s", "l", "t", "o", "u", "m", "v", "p", "x", "a", "y", "q", "z"}; /* The default definitions have been moved to st_options.h */ #define ST_FIXED_BUFFER_SIZE (ST_FIXED_BUFFER_BLOCKS * ST_KILOBYTE) /* The buffer size should fit into the 24 bits for length in the 6-byte SCSI read and write commands. */ #if ST_FIXED_BUFFER_SIZE >= (2 << 24 - 1) #error "Buffer size should not exceed (2 << 24 - 1) bytes!" #endif static int debugging = DEBUG; #define MAX_RETRIES 0 #define MAX_WRITE_RETRIES 0 #define MAX_READY_RETRIES 0 #define NO_TAPE NOT_READY #define ST_TIMEOUT (900 * HZ) #define ST_LONG_TIMEOUT (14000 * HZ) /* Remove mode bits and auto-rewind bit (7) */ #define TAPE_NR(x) ( ((iminor(x) & ~255) >> (ST_NBR_MODE_BITS + 1)) | \ (iminor(x) & ((1 << ST_MODE_SHIFT)-1))) #define TAPE_MODE(x) ((iminor(x) & ST_MODE_MASK) >> ST_MODE_SHIFT) /* Construct the minor number from the device (d), mode (m), and non-rewind (n) data */ #define TAPE_MINOR(d, m, n) (((d & ~(255 >> (ST_NBR_MODE_BITS + 1))) << (ST_NBR_MODE_BITS + 1)) | \ (d & (255 >> (ST_NBR_MODE_BITS + 1))) | (m << ST_MODE_SHIFT) | ((n != 0) << 7) ) /* Internal ioctl to set both density (uppermost 8 bits) and blocksize (lower 24 bits) */ #define SET_DENS_AND_BLK 0x10001 static int st_fixed_buffer_size = ST_FIXED_BUFFER_SIZE; static int st_max_sg_segs = ST_MAX_SG; static int modes_defined; static int enlarge_buffer(struct st_buffer *, int); static void clear_buffer(struct st_buffer *); static void normalize_buffer(struct st_buffer *); static int append_to_buffer(const char __user *, struct st_buffer *, int); static int from_buffer(struct st_buffer *, char __user *, int); static void move_buffer_data(struct st_buffer *, int); static int sgl_map_user_pages(struct st_buffer *, const unsigned int, unsigned long, size_t, int); static int sgl_unmap_user_pages(struct st_buffer *, const unsigned int, int); static int st_probe(struct device *); static int st_remove(struct device *); static struct scsi_driver st_template = { .gendrv = { .name = "st", .owner = THIS_MODULE, .probe = st_probe, .remove = st_remove, .groups = st_drv_groups, }, }; static int st_compression(struct scsi_tape *, int); static int find_partition(struct scsi_tape *); static int switch_partition(struct scsi_tape *); static int st_int_ioctl(struct scsi_tape *, unsigned int, unsigned long); static void scsi_tape_release(struct kref *); #define to_scsi_tape(obj) container_of(obj, struct scsi_tape, kref) static DEFINE_MUTEX(st_ref_mutex); static DEFINE_SPINLOCK(st_index_lock); static DEFINE_SPINLOCK(st_use_lock); static DEFINE_IDR(st_index_idr); #ifndef SIGS_FROM_OSST #define SIGS_FROM_OSST \ {"OnStream", "SC-", "", "osst"}, \ {"OnStream", "DI-", "", "osst"}, \ {"OnStream", "DP-", "", "osst"}, \ {"OnStream", "USB", "", "osst"}, \ {"OnStream", "FW-", "", "osst"} #endif static struct scsi_tape *scsi_tape_get(int dev) { struct scsi_tape *STp = NULL; mutex_lock(&st_ref_mutex); spin_lock(&st_index_lock); STp = idr_find(&st_index_idr, dev); if (!STp) goto out; kref_get(&STp->kref); if (!STp->device) goto out_put; if (scsi_device_get(STp->device)) goto out_put; goto out; out_put: kref_put(&STp->kref, scsi_tape_release); STp = NULL; out: spin_unlock(&st_index_lock); mutex_unlock(&st_ref_mutex); return STp; } static void scsi_tape_put(struct scsi_tape *STp) { struct scsi_device *sdev = STp->device; mutex_lock(&st_ref_mutex); kref_put(&STp->kref, scsi_tape_release); scsi_device_put(sdev); mutex_unlock(&st_ref_mutex); } struct st_reject_data { char *vendor; char *model; char *rev; char *driver_hint; /* Name of the correct driver, NULL if unknown */ }; static struct st_reject_data reject_list[] = { /* {"XXX", "Yy-", "", NULL}, example */ SIGS_FROM_OSST, {NULL, }}; /* If the device signature is on the list of incompatible drives, the function returns a pointer to the name of the correct driver (if known) */ static char * st_incompatible(struct scsi_device* SDp) { struct st_reject_data *rp; for (rp=&(reject_list[0]); rp->vendor != NULL; rp++) if (!strncmp(rp->vendor, SDp->vendor, strlen(rp->vendor)) && !strncmp(rp->model, SDp->model, strlen(rp->model)) && !strncmp(rp->rev, SDp->rev, strlen(rp->rev))) { if (rp->driver_hint) return rp->driver_hint; else return "unknown"; } return NULL; } #define st_printk(prefix, t, fmt, a...) \ sdev_prefix_printk(prefix, (t)->device, (t)->name, fmt, ##a) #ifdef DEBUG #define DEBC_printk(t, fmt, a...) \ if (debugging) { st_printk(ST_DEB_MSG, t, fmt, ##a ); } #else #define DEBC_printk(t, fmt, a...) #endif static void st_analyze_sense(struct st_request *SRpnt, struct st_cmdstatus *s) { const u8 *ucp; const u8 *sense = SRpnt->sense; s->have_sense = scsi_normalize_sense(SRpnt->sense, SCSI_SENSE_BUFFERSIZE, &s->sense_hdr); s->flags = 0; if (s->have_sense) { s->deferred = 0; s->remainder_valid = scsi_get_sense_info_fld(sense, SCSI_SENSE_BUFFERSIZE, &s->uremainder64); switch (sense[0] & 0x7f) { case 0x71: s->deferred = 1; fallthrough; case 0x70: s->fixed_format = 1; s->flags = sense[2] & 0xe0; break; case 0x73: s->deferred = 1; fallthrough; case 0x72: s->fixed_format = 0; ucp = scsi_sense_desc_find(sense, SCSI_SENSE_BUFFERSIZE, 4); s->flags = ucp ? (ucp[3] & 0xe0) : 0; break; } } } /* Convert the result to success code */ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt) { int result = SRpnt->result; u8 scode; DEB(const char *stp;) char *name = STp->name; struct st_cmdstatus *cmdstatp; if (!result) return 0; cmdstatp = &STp->buffer->cmdstat; st_analyze_sense(SRpnt, cmdstatp); if (cmdstatp->have_sense) scode = STp->buffer->cmdstat.sense_hdr.sense_key; else scode = 0; DEB( if (debugging) { st_printk(ST_DEB_MSG, STp, "Error: %x, cmd: %x %x %x %x %x %x\n", result, SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2], SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]); if (cmdstatp->have_sense) __scsi_print_sense(STp->device, name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE); } ) /* end DEB */ if (!debugging) { /* Abnormal conditions for tape */ if (!cmdstatp->have_sense) st_printk(KERN_WARNING, STp, "Error %x (driver bt 0, host bt 0x%x).\n", result, host_byte(result)); else if (cmdstatp->have_sense && scode != NO_SENSE && scode != RECOVERED_ERROR && /* scode != UNIT_ATTENTION && */ scode != BLANK_CHECK && scode != VOLUME_OVERFLOW && SRpnt->cmd[0] != MODE_SENSE && SRpnt->cmd[0] != TEST_UNIT_READY) { __scsi_print_sense(STp->device, name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE); } } if (cmdstatp->fixed_format && STp->cln_mode >= EXTENDED_SENSE_START) { /* Only fixed format sense */ if (STp->cln_sense_value) STp->cleaning_req |= ((SRpnt->sense[STp->cln_mode] & STp->cln_sense_mask) == STp->cln_sense_value); else STp->cleaning_req |= ((SRpnt->sense[STp->cln_mode] & STp->cln_sense_mask) != 0); } if (cmdstatp->have_sense && cmdstatp->sense_hdr.asc == 0 && cmdstatp->sense_hdr.ascq == 0x17) STp->cleaning_req = 1; /* ASC and ASCQ => cleaning requested */ if (cmdstatp->have_sense && scode == UNIT_ATTENTION && cmdstatp->sense_hdr.asc == 0x29) STp->pos_unknown = 1; /* ASC => power on / reset */ STp->pos_unknown |= STp->device->was_reset; if (cmdstatp->have_sense && scode == RECOVERED_ERROR #if ST_RECOVERED_WRITE_FATAL && SRpnt->cmd[0] != WRITE_6 && SRpnt->cmd[0] != WRITE_FILEMARKS #endif ) { STp->recover_count++; STp->recover_reg++; DEB( if (debugging) { if (SRpnt->cmd[0] == READ_6) stp = "read"; else if (SRpnt->cmd[0] == WRITE_6) stp = "write"; else stp = "ioctl"; st_printk(ST_DEB_MSG, STp, "Recovered %s error (%d).\n", stp, STp->recover_count); } ) /* end DEB */ if (cmdstatp->flags == 0) return 0; } return (-EIO); } static struct st_request *st_allocate_request(struct scsi_tape *stp) { struct st_request *streq; streq = kzalloc(sizeof(*streq), GFP_KERNEL); if (streq) streq->stp = stp; else { st_printk(KERN_ERR, stp, "Can't get SCSI request.\n"); if (signal_pending(current)) stp->buffer->syscall_result = -EINTR; else stp->buffer->syscall_result = -EBUSY; } return streq; } static void st_release_request(struct st_request *streq) { kfree(streq); } static void st_do_stats(struct scsi_tape *STp, struct request *req) { struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req); ktime_t now; now = ktime_get(); if (scmd->cmnd[0] == WRITE_6) { now = ktime_sub(now, STp->stats->write_time); atomic64_add(ktime_to_ns(now), &STp->stats->tot_write_time); atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time); atomic64_inc(&STp->stats->write_cnt); if (scmd->result) { atomic64_add(atomic_read(&STp->stats->last_write_size) - STp->buffer->cmdstat.residual, &STp->stats->write_byte_cnt); if (STp->buffer->cmdstat.residual > 0) atomic64_inc(&STp->stats->resid_cnt); } else atomic64_add(atomic_read(&STp->stats->last_write_size), &STp->stats->write_byte_cnt); } else if (scmd->cmnd[0] == READ_6) { now = ktime_sub(now, STp->stats->read_time); atomic64_add(ktime_to_ns(now), &STp->stats->tot_read_time); atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time); atomic64_inc(&STp->stats->read_cnt); if (scmd->result) { atomic64_add(atomic_read(&STp->stats->last_read_size) - STp->buffer->cmdstat.residual, &STp->stats->read_byte_cnt); if (STp->buffer->cmdstat.residual > 0) atomic64_inc(&STp->stats->resid_cnt); } else atomic64_add(atomic_read(&STp->stats->last_read_size), &STp->stats->read_byte_cnt); } else { now = ktime_sub(now, STp->stats->other_time); atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time); atomic64_inc(&STp->stats->other_cnt); } atomic64_dec(&STp->stats->in_flight); } static enum rq_end_io_ret st_scsi_execute_end(struct request *req, blk_status_t status) { struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req); struct st_request *SRpnt = req->end_io_data; struct scsi_tape *STp = SRpnt->stp; struct bio *tmp; STp->buffer->cmdstat.midlevel_result = SRpnt->result = scmd->result; STp->buffer->cmdstat.residual = scmd->resid_len; st_do_stats(STp, req); tmp = SRpnt->bio; if (scmd->sense_len) memcpy(SRpnt->sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); if (SRpnt->waiting) complete(SRpnt->waiting); blk_rq_unmap_user(tmp); blk_mq_free_request(req); return RQ_END_IO_NONE; } static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, int timeout, int retries) { struct request *req; struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data; int err = 0; struct scsi_tape *STp = SRpnt->stp; struct scsi_cmnd *scmd; req = scsi_alloc_request(SRpnt->stp->device->request_queue, data_direction == DMA_TO_DEVICE ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); if (IS_ERR(req)) return PTR_ERR(req); scmd = blk_mq_rq_to_pdu(req); req->rq_flags |= RQF_QUIET; mdata->null_mapped = 1; if (bufflen) { err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen, GFP_KERNEL); if (err) { blk_mq_free_request(req); return err; } } atomic64_inc(&STp->stats->in_flight); if (cmd[0] == WRITE_6) { atomic_set(&STp->stats->last_write_size, bufflen); STp->stats->write_time = ktime_get(); } else if (cmd[0] == READ_6) { atomic_set(&STp->stats->last_read_size, bufflen); STp->stats->read_time = ktime_get(); } else { STp->stats->other_time = ktime_get(); } SRpnt->bio = req->bio; scmd->cmd_len = COMMAND_SIZE(cmd[0]); memcpy(scmd->cmnd, cmd, scmd->cmd_len); req->timeout = timeout; scmd->allowed = retries; req->end_io = st_scsi_execute_end; req->end_io_data = SRpnt; blk_execute_rq_nowait(req, true); return 0; } /* Do the scsi command. Waits until command performed if do_wait is true. Otherwise write_behind_check() is used to check that the command has finished. */ static struct st_request * st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd, int bytes, int direction, int timeout, int retries, int do_wait) { struct completion *waiting; struct rq_map_data *mdata = &STp->buffer->map_data; int ret; /* if async, make sure there's no command outstanding */ if (!do_wait && ((STp->buffer)->last_SRpnt)) { st_printk(KERN_ERR, STp, "Async command already active.\n"); if (signal_pending(current)) (STp->buffer)->syscall_result = (-EINTR); else (STp->buffer)->syscall_result = (-EBUSY); return NULL; } if (!SRpnt) { SRpnt = st_allocate_request(STp); if (!SRpnt) return NULL; } /* If async IO, set last_SRpnt. This ptr tells write_behind_check which IO is outstanding. It's nulled out when the IO completes. */ if (!do_wait) (STp->buffer)->last_SRpnt = SRpnt; waiting = &STp->wait; init_completion(waiting); SRpnt->waiting = waiting; if (STp->buffer->do_dio) { mdata->page_order = 0; mdata->nr_entries = STp->buffer->sg_segs; mdata->pages = STp->buffer->mapped_pages; } else { mdata->page_order = STp->buffer->reserved_page_order; mdata->nr_entries = DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order); mdata->pages = STp->buffer->reserved_pages; mdata->offset = 0; } memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd)); STp->buffer->cmdstat.have_sense = 0; STp->buffer->syscall_result = 0; ret = st_scsi_execute(SRpnt, cmd, direction, NULL, bytes, timeout, retries); if (ret) { /* could not allocate the buffer or request was too large */ (STp->buffer)->syscall_result = (-EBUSY); (STp->buffer)->last_SRpnt = NULL; } else if (do_wait) { wait_for_completion(waiting); SRpnt->waiting = NULL; (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt); } return SRpnt; } /* Handle the write-behind checking (waits for completion). Returns -ENOSPC if write has been correct but EOM early warning reached, -EIO if write ended in error or zero if write successful. Asynchronous writes are used only in variable block mode. */ static int write_behind_check(struct scsi_tape * STp) { int retval = 0; struct st_buffer *STbuffer; struct st_partstat *STps; struct st_cmdstatus *cmdstatp; struct st_request *SRpnt; STbuffer = STp->buffer; if (!STbuffer->writing) return 0; DEB( if (STp->write_pending) STp->nbr_waits++; else STp->nbr_finished++; ) /* end DEB */ wait_for_completion(&(STp->wait)); SRpnt = STbuffer->last_SRpnt; STbuffer->last_SRpnt = NULL; SRpnt->waiting = NULL; (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt); st_release_request(SRpnt); STbuffer->buffer_bytes -= STbuffer->writing; STps = &(STp->ps[STp->partition]); if (STps->drv_block >= 0) { if (STp->block_size == 0) STps->drv_block++; else STps->drv_block += STbuffer->writing / STp->block_size; } cmdstatp = &STbuffer->cmdstat; if (STbuffer->syscall_result) { retval = -EIO; if (cmdstatp->have_sense && !cmdstatp->deferred && (cmdstatp->flags & SENSE_EOM) && (cmdstatp->sense_hdr.sense_key == NO_SENSE || cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR)) { /* EOM at write-behind, has all data been written? */ if (!cmdstatp->remainder_valid || cmdstatp->uremainder64 == 0) retval = -ENOSPC; } if (retval == -EIO) STps->drv_block = -1; } STbuffer->writing = 0; DEB(if (debugging && retval) st_printk(ST_DEB_MSG, STp, "Async write error %x, return value %d.\n", STbuffer->cmdstat.midlevel_result, retval);) /* end DEB */ return retval; } /* Step over EOF if it has been inadvertently crossed (ioctl not used because it messes up the block number). */ static int cross_eof(struct scsi_tape * STp, int forward) { struct st_request *SRpnt; unsigned char cmd[MAX_COMMAND_SIZE]; cmd[0] = SPACE; cmd[1] = 0x01; /* Space FileMarks */ if (forward) { cmd[2] = cmd[3] = 0; cmd[4] = 1; } else cmd[2] = cmd[3] = cmd[4] = 0xff; /* -1 filemarks */ cmd[5] = 0; DEBC_printk(STp, "Stepping over filemark %s.\n", forward ? "forward" : "backward"); SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, STp->device->request_queue->rq_timeout, MAX_RETRIES, 1); if (!SRpnt) return (STp->buffer)->syscall_result; st_release_request(SRpnt); SRpnt = NULL; if ((STp->buffer)->cmdstat.midlevel_result != 0) st_printk(KERN_ERR, STp, "Stepping over filemark %s failed.\n", forward ? "forward" : "backward"); return (STp->buffer)->syscall_result; } /* Flush the write buffer (never need to write if variable blocksize). */ static int st_flush_write_buffer(struct scsi_tape * STp) { int transfer, blks; int result; unsigned char cmd[MAX_COMMAND_SIZE]; struct st_request *SRpnt; struct st_partstat *STps; result = write_behind_check(STp); if (result) return result; result = 0; if (STp->dirty == 1) { transfer = STp->buffer->buffer_bytes; DEBC_printk(STp, "Flushing %d bytes.\n", transfer); memset(cmd, 0, MAX_COMMAND_SIZE); cmd[0] = WRITE_6; cmd[1] = 1; blks = transfer / STp->block_size; cmd[2] = blks >> 16; cmd[3] = blks >> 8; cmd[4] = blks; SRpnt = st_do_scsi(NULL, STp, cmd, transfer, DMA_TO_DEVICE, STp->device->request_queue->rq_timeout, MAX_WRITE_RETRIES, 1); if (!SRpnt) return (STp->buffer)->syscall_result; STps = &(STp->ps[STp->partition]); if ((STp->buffer)->syscall_result != 0) { struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; if (cmdstatp->have_sense && !cmdstatp->deferred && (cmdstatp->flags & SENSE_EOM) && (cmdstatp->sense_hdr.sense_key == NO_SENSE || cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) && (!cmdstatp->remainder_valid || cmdstatp->uremainder64 == 0)) { /* All written at EOM early warning */ STp->dirty = 0; (STp->buffer)->buffer_bytes = 0; if (STps->drv_block >= 0) STps->drv_block += blks; result = (-ENOSPC); } else { st_printk(KERN_ERR, STp, "Error on flush.\n"); STps->drv_block = (-1); result = (-EIO); } } else { if (STps->drv_block >= 0) STps->drv_block += blks; STp->dirty = 0; (STp->buffer)->buffer_bytes = 0; } st_release_request(SRpnt); SRpnt = NULL; } return result; } /* Flush the tape buffer. The tape will be positioned correctly unless seek_next is true. */ static int flush_buffer(struct scsi_tape *STp, int seek_next) { int backspace, result; struct st_partstat *STps; /* * If there was a bus reset, block further access * to this device. */ if (STp->pos_unknown) return (-EIO); if (STp->ready != ST_READY) return 0; STps = &(STp->ps[STp->partition]); if (STps->rw == ST_WRITING) /* Writing */ return st_flush_write_buffer(STp); if (STp->block_size == 0) return 0; backspace = ((STp->buffer)->buffer_bytes + (STp->buffer)->read_pointer) / STp->block_size - ((STp->buffer)->read_pointer + STp->block_size - 1) / STp->block_size; (STp->buffer)->buffer_bytes = 0; (STp->buffer)->read_pointer = 0; result = 0; if (!seek_next) { if (STps->eof == ST_FM_HIT) { result = cross_eof(STp, 0); /* Back over the EOF hit */ if (!result) STps->eof = ST_NOEOF; else { if (STps->drv_file >= 0) STps->drv_file++; STps->drv_block = 0; } } if (!result && backspace > 0) result = st_int_ioctl(STp, MTBSR, backspace); } else if (STps->eof == ST_FM_HIT) { if (STps->drv_file >= 0) STps->drv_file++; STps->drv_block = 0; STps->eof = ST_NOEOF; } return result; } /* Set the mode parameters */ static int set_mode_densblk(struct scsi_tape * STp, struct st_modedef * STm) { int set_it = 0; unsigned long arg; if (!STp->density_changed && STm->default_density >= 0 && STm->default_density != STp->density) { arg = STm->default_density; set_it = 1; } else arg = STp->density; arg <<= MT_ST_DENSITY_SHIFT; if (!STp->blksize_changed && STm->default_blksize >= 0 && STm->default_blksize != STp->block_size) { arg |= STm->default_blksize; set_it = 1; } else arg |= STp->block_size; if (set_it && st_int_ioctl(STp, SET_DENS_AND_BLK, arg)) { st_printk(KERN_WARNING, STp, "Can't set default block size to %d bytes " "and density %x.\n", STm->default_blksize, STm->default_density); if (modes_defined) return (-EINVAL); } return 0; } /* Lock or unlock the drive door. Don't use when st_request allocated. */ static int do_door_lock(struct scsi_tape * STp, int do_lock) { int retval; DEBC_printk(STp, "%socking drive door.\n", do_lock ? "L" : "Unl"); retval = scsi_set_medium_removal(STp->device, do_lock ? SCSI_REMOVAL_PREVENT : SCSI_REMOVAL_ALLOW); if (!retval) STp->door_locked = do_lock ? ST_LOCKED_EXPLICIT : ST_UNLOCKED; else STp->door_locked = ST_LOCK_FAILS; return retval; } /* Set the internal state after reset */ static void reset_state(struct scsi_tape *STp) { int i; struct st_partstat *STps; STp->pos_unknown = 0; for (i = 0; i < ST_NBR_PARTITIONS; i++) { STps = &(STp->ps[i]); STps->rw = ST_IDLE; STps->eof = ST_NOEOF; STps->at_sm = 0; STps->last_block_valid = 0; STps->drv_block = -1; STps->drv_file = -1; } if (STp->can_partitions) { STp->partition = find_partition(STp); if (STp->partition < 0) STp->partition = 0; STp->new_partition = STp->partition; } } /* Test if the drive is ready. Returns either one of the codes below or a negative system error code. */ #define CHKRES_READY 0 #define CHKRES_NEW_SESSION 1 #define CHKRES_NOT_READY 2 #define CHKRES_NO_TAPE 3 #define MAX_ATTENTIONS 10 static int test_ready(struct scsi_tape *STp, int do_wait) { int attentions, waits, max_wait, scode; int retval = CHKRES_READY, new_session = 0; unsigned char cmd[MAX_COMMAND_SIZE]; struct st_request *SRpnt = NULL; struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; max_wait = do_wait ? ST_BLOCK_SECONDS : 0; for (attentions=waits=0; ; ) { memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE); cmd[0] = TEST_UNIT_READY; SRpnt = st_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE, STp->long_timeout, MAX_READY_RETRIES, 1); if (!SRpnt) { retval = (STp->buffer)->syscall_result; break; } if (cmdstatp->have_sense) { scode = cmdstatp->sense_hdr.sense_key; if (scode == UNIT_ATTENTION) { /* New media? */ new_session = 1; if (attentions < MAX_ATTENTIONS) { attentions++; continue; } else { retval = (-EIO); break; } } if (scode == NOT_READY) { if (waits < max_wait) { if (msleep_interruptible(1000)) { retval = (-EINTR); break; } waits++; continue; } else { if ((STp->device)->scsi_level >= SCSI_2 && cmdstatp->sense_hdr.asc == 0x3a) /* Check ASC */ retval = CHKRES_NO_TAPE; else retval = CHKRES_NOT_READY; break; } } } retval = (STp->buffer)->syscall_result; if (!retval) retval = new_session ? CHKRES_NEW_SESSION : CHKRES_READY; break; } if (SRpnt != NULL) st_release_request(SRpnt); return retval; } /* See if the drive is ready and gather information about the tape. Return values: < 0 negative error code from errno.h 0 drive ready 1 drive not ready (possibly no tape) */ static int check_tape(struct scsi_tape *STp, struct file *filp) { int i, retval, new_session = 0, do_wait; unsigned char cmd[MAX_COMMAND_SIZE], saved_cleaning; unsigned short st_flags = filp->f_flags; struct st_request *SRpnt = NULL; struct st_modedef *STm; struct st_partstat *STps; struct inode *inode = file_inode(filp); int mode = TAPE_MODE(inode); STp->ready = ST_READY; if (mode != STp->current_mode) { DEBC_printk(STp, "Mode change from %d to %d.\n", STp->current_mode, mode); new_session = 1; STp->current_mode = mode; } STm = &(STp->modes[STp->current_mode]); saved_cleaning = STp->cleaning_req; STp->cleaning_req = 0; do_wait = ((filp->f_flags & O_NONBLOCK) == 0); retval = test_ready(STp, do_wait); if (retval < 0) goto err_out; if (retval == CHKRES_NEW_SESSION) { STp->pos_unknown = 0; STp->partition = STp->new_partition = 0; if (STp->can_partitions) STp->nbr_partitions = 1; /* This guess will be updated later if necessary */ for (i = 0; i < ST_NBR_PARTITIONS; i++) { STps = &(STp->ps[i]); STps->rw = ST_IDLE; STps->eof = ST_NOEOF; STps->at_sm = 0; STps->last_block_valid = 0; STps->drv_block = 0; STps->drv_file = 0; } new_session = 1; } else { STp->cleaning_req |= saved_cleaning; if (retval == CHKRES_NOT_READY || retval == CHKRES_NO_TAPE) { if (retval == CHKRES_NO_TAPE) STp->ready = ST_NO_TAPE; else STp->ready = ST_NOT_READY; STp->density = 0; /* Clear the erroneous "residue" */ STp->write_prot = 0; STp->block_size = 0; STp->ps[0].drv_file = STp->ps[0].drv_block = (-1); STp->partition = STp->new_partition = 0; STp->door_locked = ST_UNLOCKED; return CHKRES_NOT_READY; } } if (STp->omit_blklims) STp->min_block = STp->max_block = (-1); else { memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE); cmd[0] = READ_BLOCK_LIMITS; SRpnt = st_do_scsi(SRpnt, STp, cmd, 6, DMA_FROM_DEVICE, STp->device->request_queue->rq_timeout, MAX_READY_RETRIES, 1); if (!SRpnt) { retval = (STp->buffer)->syscall_result; goto err_out; } if (!SRpnt->result && !STp->buffer->cmdstat.have_sense) { STp->max_block = ((STp->buffer)->b_data[1] << 16) | ((STp->buffer)->b_data[2] << 8) | (STp->buffer)->b_data[3]; STp->min_block = ((STp->buffer)->b_data[4] << 8) | (STp->buffer)->b_data[5]; if ( DEB( debugging || ) !STp->inited) st_printk(KERN_INFO, STp, "Block limits %d - %d bytes.\n", STp->min_block, STp->max_block); } else { STp->min_block = STp->max_block = (-1); DEBC_printk(STp, "Can't read block limits.\n"); } } memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE); cmd[0] = MODE_SENSE; cmd[4] = 12; SRpnt = st_do_scsi(SRpnt, STp, cmd, 12, DMA_FROM_DEVICE, STp->device->request_queue->rq_timeout, MAX_READY_RETRIES, 1); if (!SRpnt) { retval = (STp->buffer)->syscall_result; goto err_out; } if ((STp->buffer)->syscall_result != 0) { DEBC_printk(STp, "No Mode Sense.\n"); STp->block_size = ST_DEFAULT_BLOCK; /* Educated guess (?) */ (STp->buffer)->syscall_result = 0; /* Prevent error propagation */ STp->drv_write_prot = 0; } else { DEBC_printk(STp,"Mode sense. Length %d, " "medium %x, WBS %x, BLL %d\n", (STp->buffer)->b_data[0], (STp->buffer)->b_data[1], (STp->buffer)->b_data[2], (STp->buffer)->b_data[3]); if ((STp->buffer)->b_data[3] >= 8) { STp->drv_buffer = ((STp->buffer)->b_data[2] >> 4) & 7; STp->density = (STp->buffer)->b_data[4]; STp->block_size = (STp->buffer)->b_data[9] * 65536 + (STp->buffer)->b_data[10] * 256 + (STp->buffer)->b_data[11]; DEBC_printk(STp, "Density %x, tape length: %x, " "drv buffer: %d\n", STp->density, (STp->buffer)->b_data[5] * 65536 + (STp->buffer)->b_data[6] * 256 + (STp->buffer)->b_data[7], STp->drv_buffer); } STp->drv_write_prot = ((STp->buffer)->b_data[2] & 0x80) != 0; if (!STp->drv_buffer && STp->immediate_filemark) { st_printk(KERN_WARNING, STp, "non-buffered tape: disabling " "writing immediate filemarks\n"); STp->immediate_filemark = 0; } } st_release_request(SRpnt); SRpnt = NULL; STp->inited = 1; if (STp->block_size > 0) (STp->buffer)->buffer_blocks = (STp->buffer)->buffer_size / STp->block_size; else (STp->buffer)->buffer_blocks = 1; (STp->buffer)->buffer_bytes = (STp->buffer)->read_pointer = 0; DEBC_printk(STp, "Block size: %d, buffer size: %d (%d blocks).\n", STp->block_size, (STp->buffer)->buffer_size, (STp->buffer)->buffer_blocks); if (STp->drv_write_prot) { STp->write_prot = 1; DEBC_printk(STp, "Write protected\n"); if (do_wait && ((st_flags & O_ACCMODE) == O_WRONLY || (st_flags & O_ACCMODE) == O_RDWR)) { retval = (-EROFS); goto err_out; } } if (STp->can_partitions && STp->nbr_partitions < 1) { /* This code is reached when the device is opened for the first time after the driver has been initialized with tape in the drive and the partition support has been enabled. */ DEBC_printk(STp, "Updating partition number in status.\n"); if ((STp->partition = find_partition(STp)) < 0) { retval = STp->partition; goto err_out; } STp->new_partition = STp->partition; STp->nbr_partitions = 1; /* This guess will be updated when necessary */ } if (new_session) { /* Change the drive parameters for the new mode */ STp->density_changed = STp->blksize_changed = 0; STp->compression_changed = 0; if (!(STm->defaults_for_writes) && (retval = set_mode_densblk(STp, STm)) < 0) goto err_out; if (STp->default_drvbuffer != 0xff) { if (st_int_ioctl(STp, MTSETDRVBUFFER, STp->default_drvbuffer)) st_printk(KERN_WARNING, STp, "Can't set default drive " "buffering to %d.\n", STp->default_drvbuffer); } } return CHKRES_READY; err_out: return retval; } /* Open the device. Needs to take the BKL only because of incrementing the SCSI host module count. */ static int st_open(struct inode *inode, struct file *filp) { int i, retval = (-EIO); int resumed = 0; struct scsi_tape *STp; struct st_partstat *STps; int dev = TAPE_NR(inode); /* * We really want to do nonseekable_open(inode, filp); here, but some * versions of tar incorrectly call lseek on tapes and bail out if that * fails. So we disallow pread() and pwrite(), but permit lseeks. */ filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE); if (!(STp = scsi_tape_get(dev))) { return -ENXIO; } filp->private_data = STp; spin_lock(&st_use_lock); if (STp->in_use) { spin_unlock(&st_use_lock); DEBC_printk(STp, "Device already in use.\n"); scsi_tape_put(STp); return (-EBUSY); } STp->in_use = 1; spin_unlock(&st_use_lock); STp->rew_at_close = STp->autorew_dev = (iminor(inode) & 0x80) == 0; if (scsi_autopm_get_device(STp->device) < 0) { retval = -EIO; goto err_out; } resumed = 1; if (!scsi_block_when_processing_errors(STp->device)) { retval = (-ENXIO); goto err_out; } /* See that we have at least a one page buffer available */ if (!enlarge_buffer(STp->buffer, PAGE_SIZE)) { st_printk(KERN_WARNING, STp, "Can't allocate one page tape buffer.\n"); retval = (-EOVERFLOW); goto err_out; } (STp->buffer)->cleared = 0; (STp->buffer)->writing = 0; (STp->buffer)->syscall_result = 0; STp->write_prot = ((filp->f_flags & O_ACCMODE) == O_RDONLY); STp->dirty = 0; for (i = 0; i < ST_NBR_PARTITIONS; i++) { STps = &(STp->ps[i]); STps->rw = ST_IDLE; } STp->try_dio_now = STp->try_dio; STp->recover_count = 0; DEB( STp->nbr_waits = STp->nbr_finished = 0; STp->nbr_requests = STp->nbr_dio = STp->nbr_pages = 0; ) retval = check_tape(STp, filp); if (retval < 0) goto err_out; if ((filp->f_flags & O_NONBLOCK) == 0 && retval != CHKRES_READY) { if (STp->ready == NO_TAPE) retval = (-ENOMEDIUM); else retval = (-EIO); goto err_out; } return 0; err_out: normalize_buffer(STp->buffer); spin_lock(&st_use_lock); STp->in_use = 0; spin_unlock(&st_use_lock); if (resumed) scsi_autopm_put_device(STp->device); scsi_tape_put(STp); return retval; } /* Flush the tape buffer before close */ static int st_flush(struct file *filp, fl_owner_t id) { int result = 0, result2; unsigned char cmd[MAX_COMMAND_SIZE]; struct st_request *SRpnt; struct scsi_tape *STp = filp->private_data; struct st_modedef *STm = &(STp->modes[STp->current_mode]); struct st_partstat *STps = &(STp->ps[STp->partition]); if (file_count(filp) > 1) return 0; if (STps->rw == ST_WRITING && !STp->pos_unknown) { result = st_flush_write_buffer(STp); if (result != 0 && result != (-ENOSPC)) goto out; } if (STp->can_partitions && (result2 = switch_partition(STp)) < 0) { DEBC_printk(STp, "switch_partition at close failed.\n"); if (result == 0) result = result2; goto out; } DEBC( if (STp->nbr_requests) st_printk(KERN_DEBUG, STp, "Number of r/w requests %d, dio used in %d, " "pages %d.\n", STp->nbr_requests, STp->nbr_dio, STp->nbr_pages)); if (STps->rw == ST_WRITING && !STp->pos_unknown) { struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; #if DEBUG DEBC_printk(STp, "Async write waits %d, finished %d.\n", STp->nbr_waits, STp->nbr_finished); #endif memset(cmd, 0, MAX_COMMAND_SIZE); cmd[0] = WRITE_FILEMARKS; if (STp->immediate_filemark) cmd[1] = 1; cmd[4] = 1 + STp->two_fm; SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, STp->device->request_queue->rq_timeout, MAX_WRITE_RETRIES, 1); if (!SRpnt) { result = (STp->buffer)->syscall_result; goto out; } if (STp->buffer->syscall_result == 0 || (cmdstatp->have_sense && !cmdstatp->deferred && (cmdstatp->flags & SENSE_EOM) && (cmdstatp->sense_hdr.sense_key == NO_SENSE || cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) && (!cmdstatp->remainder_valid || cmdstatp->uremainder64 == 0))) { /* Write successful at EOM */ st_release_request(SRpnt); SRpnt = NULL; if (STps->drv_file >= 0) STps->drv_file++; STps->drv_block = 0; if (STp->two_fm) cross_eof(STp, 0); STps->eof = ST_FM; } else { /* Write error */ st_release_request(SRpnt); SRpnt = NULL; st_printk(KERN_ERR, STp, "Error on write filemark.\n"); if (result == 0) result = (-EIO); } DEBC_printk(STp, "Buffer flushed, %d EOF(s) written\n", cmd[4]); } else if (!STp->rew_at_close) { STps = &(STp->ps[STp->partition]); if (!STm->sysv || STps->rw != ST_READING) { if (STp->can_bsr) result = flush_buffer(STp, 0); else if (STps->eof == ST_FM_HIT) { result = cross_eof(STp, 0); if (result) { if (STps->drv_file >= 0) STps->drv_file++; STps->drv_block = 0; STps->eof = ST_FM; } else STps->eof = ST_NOEOF; } } else if ((STps->eof == ST_NOEOF && !(result = cross_eof(STp, 1))) || STps->eof == ST_FM_HIT) { if (STps->drv_file >= 0) STps->drv_file++; STps->drv_block = 0; STps->eof = ST_FM; } } out: if (STp->rew_at_close) { result2 = st_int_ioctl(STp, MTREW, 1); if (result == 0) result = result2; } return result; } /* Close the device and release it. BKL is not needed: this is the only thread accessing this tape. */ static int st_release(struct inode *inode, struct file *filp) { struct scsi_tape *STp = filp->private_data; if (STp->door_locked == ST_LOCKED_AUTO) do_door_lock(STp, 0); normalize_buffer(STp->buffer); spin_lock(&st_use_lock); STp->in_use = 0; spin_unlock(&st_use_lock); scsi_autopm_put_device(STp->device); scsi_tape_put(STp); return 0; } /* The checks common to both reading and writing */ static ssize_t rw_checks(struct scsi_tape *STp, struct file *filp, size_t count) { ssize_t retval = 0; /* * If we are in the middle of error recovery, don't let anyone * else try and use this device. Also, if error recovery fails, it * may try and take the device offline, in which case all further * access to the device is prohibited. */ if (!scsi_block_when_processing_errors(STp->device)) { retval = (-ENXIO); goto out; } if (STp->ready != ST_READY) { if (STp->ready == ST_NO_TAPE) retval = (-ENOMEDIUM); else retval = (-EIO); goto out; } if (! STp->modes[STp->current_mode].defined) { retval = (-ENXIO); goto out; } /* * If there was a bus reset, block further access * to this device. */ if (STp->pos_unknown) { retval = (-EIO); goto out; } if (count == 0) goto out; DEB( if (!STp->in_use) { st_printk(ST_DEB_MSG, STp, "Incorrect device.\n"); retval = (-EIO); goto out; } ) /* end DEB */ if (STp->can_partitions && (retval = switch_partition(STp)) < 0) goto out; if (STp->block_size == 0 && STp->max_block > 0 && (count < STp->min_block || count > STp->max_block)) { retval = (-EINVAL); goto out; } if (STp->do_auto_lock && STp->door_locked == ST_UNLOCKED && !do_door_lock(STp, 1)) STp->door_locked = ST_LOCKED_AUTO; out: return retval; } static int setup_buffering(struct scsi_tape *STp, const char __user *buf, size_t count, int is_read) { int i, bufsize, retval = 0; struct st_buffer *STbp = STp->buffer; if (is_read) i = STp->try_dio_now && try_rdio; else i = STp->try_dio_now && try_wdio; if (i && ((unsigned long)buf & queue_dma_alignment( STp->device->request_queue)) == 0) { i = sgl_map_user_pages(STbp, STbp->use_sg, (unsigned long)buf, count, (is_read ? READ : WRITE)); if (i > 0) { STbp->do_dio = i; STbp->buffer_bytes = 0; /* can be used as transfer counter */ } else STbp->do_dio = 0; /* fall back to buffering with any error */ STbp->sg_segs = STbp->do_dio; DEB( if (STbp->do_dio) { STp->nbr_dio++; STp->nbr_pages += STbp->do_dio; } ) } else STbp->do_dio = 0; DEB( STp->nbr_requests++; ) if (!STbp->do_dio) { if (STp->block_size) bufsize = STp->block_size > st_fixed_buffer_size ? STp->block_size : st_fixed_buffer_size; else { bufsize = count; /* Make sure that data from previous user is not leaked even if HBA does not return correct residual */ if (is_read && STp->sili && !STbp->cleared) clear_buffer(STbp); } if (bufsize > STbp->buffer_size && !enlarge_buffer(STbp, bufsize)) { st_printk(KERN_WARNING, STp, "Can't allocate %d byte tape buffer.\n", bufsize); retval = (-EOVERFLOW); goto out; } if (STp->block_size) STbp->buffer_blocks = bufsize / STp->block_size; } out: return retval; } /* Can be called more than once after each setup_buffer() */ static void release_buffering(struct scsi_tape *STp, int is_read) { struct st_buffer *STbp; STbp = STp->buffer; if (STbp->do_dio) { sgl_unmap_user_pages(STbp, STbp->do_dio, is_read); STbp->do_dio = 0; STbp->sg_segs = 0; } } /* Write command */ static ssize_t st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) { ssize_t total; ssize_t i, do_count, blks, transfer; ssize_t retval; int undone, retry_eot = 0, scode; int async_write; unsigned char cmd[MAX_COMMAND_SIZE]; const char __user *b_point; struct st_request *SRpnt = NULL; struct scsi_tape *STp = filp->private_data; struct st_modedef *STm; struct st_partstat *STps; struct st_buffer *STbp; if (mutex_lock_interruptible(&STp->lock)) return -ERESTARTSYS; retval = rw_checks(STp, filp, count); if (retval || count == 0) goto out; /* Write must be integral number of blocks */ if (STp->block_size != 0 && (count % STp->block_size) != 0) { st_printk(KERN_WARNING, STp, "Write not multiple of tape block size.\n"); retval = (-EINVAL); goto out; } STm = &(STp->modes[STp->current_mode]); STps = &(STp->ps[STp->partition]); if (STp->write_prot) { retval = (-EACCES); goto out; } if (STps->rw == ST_READING) { retval = flush_buffer(STp, 0); if (retval) goto out; STps->rw = ST_WRITING; } else if (STps->rw != ST_WRITING && STps->drv_file == 0 && STps->drv_block == 0) { if ((retval = set_mode_densblk(STp, STm)) < 0) goto out; if (STm->default_compression != ST_DONT_TOUCH && !(STp->compression_changed)) { if (st_compression(STp, (STm->default_compression == ST_YES))) { st_printk(KERN_WARNING, STp, "Can't set default compression.\n"); if (modes_defined) { retval = (-EINVAL); goto out; } } } } STbp = STp->buffer; i = write_behind_check(STp); if (i) { if (i == -ENOSPC) STps->eof = ST_EOM_OK; else STps->eof = ST_EOM_ERROR; } if (STps->eof == ST_EOM_OK) { STps->eof = ST_EOD_1; /* allow next write */ retval = (-ENOSPC); goto out; } else if (STps->eof == ST_EOM_ERROR) { retval = (-EIO); goto out; } /* Check the buffer readability in cases where copy_user might catch the problems after some tape movement. */ if (STp->block_size != 0 && !STbp->do_dio && (copy_from_user(&i, buf, 1) != 0 || copy_from_user(&i, buf + count - 1, 1) != 0)) { retval = (-EFAULT); goto out; } retval = setup_buffering(STp, buf, count, 0); if (retval) goto out; total = count; memset(cmd, 0, MAX_COMMAND_SIZE); cmd[0] = WRITE_6; cmd[1] = (STp->block_size != 0); STps->rw = ST_WRITING; b_point = buf; while (count > 0 && !retry_eot) { if (STbp->do_dio) { do_count = count; } else { if (STp->block_size == 0) do_count = count; else { do_count = STbp->buffer_blocks * STp->block_size - STbp->buffer_bytes; if (do_count > count) do_count = count; } i = append_to_buffer(b_point, STbp, do_count); if (i) { retval = i; goto out; } } count -= do_count; b_point += do_count; async_write = STp->block_size == 0 && !STbp->do_dio && STm->do_async_writes && STps->eof < ST_EOM_OK; if (STp->block_size != 0 && STm->do_buffer_writes && !(STp->try_dio_now && try_wdio) && STps->eof < ST_EOM_OK && STbp->buffer_bytes < STbp->buffer_size) { STp->dirty = 1; /* Don't write a buffer that is not full enough. */ if (!async_write && count == 0) break; } retry_write: if (STp->block_size == 0) blks = transfer = do_count; else { if (!STbp->do_dio) blks = STbp->buffer_bytes; else blks = do_count; blks /= STp->block_size; transfer = blks * STp->block_size; } cmd[2] = blks >> 16; cmd[3] = blks >> 8; cmd[4] = blks; SRpnt = st_do_scsi(SRpnt, STp, cmd, transfer, DMA_TO_DEVICE, STp->device->request_queue->rq_timeout, MAX_WRITE_RETRIES, !async_write); if (!SRpnt) { retval = STbp->syscall_result; goto out; } if (async_write && !STbp->syscall_result) { STbp->writing = transfer; STp->dirty = !(STbp->writing == STbp->buffer_bytes); SRpnt = NULL; /* Prevent releasing this request! */ DEB( STp->write_pending = 1; ) break; } if (STbp->syscall_result != 0) { struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; DEBC_printk(STp, "Error on write:\n"); if (cmdstatp->have_sense && (cmdstatp->flags & SENSE_EOM)) { scode = cmdstatp->sense_hdr.sense_key; if (cmdstatp->remainder_valid) undone = (int)cmdstatp->uremainder64; else if (STp->block_size == 0 && scode == VOLUME_OVERFLOW) undone = transfer; else undone = 0; if (STp->block_size != 0) undone *= STp->block_size; if (undone <= do_count) { /* Only data from this write is not written */ count += undone; b_point -= undone; do_count -= undone; if (STp->block_size) blks = (transfer - undone) / STp->block_size; STps->eof = ST_EOM_OK; /* Continue in fixed block mode if all written in this request but still something left to write (retval left to zero) */ if (STp->block_size == 0 || undone > 0 || count == 0) retval = (-ENOSPC); /* EOM within current request */ DEBC_printk(STp, "EOM with %d " "bytes unwritten.\n", (int)count); } else { /* EOT within data buffered earlier (possible only in fixed block mode without direct i/o) */ if (!retry_eot && !cmdstatp->deferred && (scode == NO_SENSE || scode == RECOVERED_ERROR)) { move_buffer_data(STp->buffer, transfer - undone); retry_eot = 1; if (STps->drv_block >= 0) { STps->drv_block += (transfer - undone) / STp->block_size; } STps->eof = ST_EOM_OK; DEBC_printk(STp, "Retry " "write of %d " "bytes at EOM.\n", STp->buffer->buffer_bytes); goto retry_write; } else { /* Either error within data buffered by driver or failed retry */ count -= do_count; blks = do_count = 0; STps->eof = ST_EOM_ERROR; STps->drv_block = (-1); /* Too cautious? */ retval = (-EIO); /* EOM for old data */ DEBC_printk(STp, "EOM with " "lost data.\n"); } } } else { count += do_count; STps->drv_block = (-1); /* Too cautious? */ retval = STbp->syscall_result; } } if (STps->drv_block >= 0) { if (STp->block_size == 0) STps->drv_block += (do_count > 0); else STps->drv_block += blks; } STbp->buffer_bytes = 0; STp->dirty = 0; if (retval || retry_eot) { if (count < total) retval = total - count; goto out; } } if (STps->eof == ST_EOD_1) STps->eof = ST_EOM_OK; else if (STps->eof != ST_EOM_OK) STps->eof = ST_NOEOF; retval = total - count; out: if (SRpnt != NULL) st_release_request(SRpnt); release_buffering(STp, 0); mutex_unlock(&STp->lock); return retval; } /* Read data from the tape. Returns zero in the normal case, one if the eof status has changed, and the negative error code in case of a fatal error. Otherwise updates the buffer and the eof state. Does release user buffer mapping if it is set. */ static long read_tape(struct scsi_tape *STp, long count, struct st_request ** aSRpnt) { int transfer, blks, bytes; unsigned char cmd[MAX_COMMAND_SIZE]; struct st_request *SRpnt; struct st_modedef *STm; struct st_partstat *STps; struct st_buffer *STbp; int retval = 0; if (count == 0) return 0; STm = &(STp->modes[STp->current_mode]); STps = &(STp->ps[STp->partition]); if (STps->eof == ST_FM_HIT) return 1; STbp = STp->buffer; if (STp->block_size == 0) blks = bytes = count; else { if (!(STp->try_dio_now && try_rdio) && STm->do_read_ahead) { blks = (STp->buffer)->buffer_blocks; bytes = blks * STp->block_size; } else { bytes = count; if (!STbp->do_dio && bytes > (STp->buffer)->buffer_size) bytes = (STp->buffer)->buffer_size; blks = bytes / STp->block_size; bytes = blks * STp->block_size; } } memset(cmd, 0, MAX_COMMAND_SIZE); cmd[0] = READ_6; cmd[1] = (STp->block_size != 0); if (!cmd[1] && STp->sili) cmd[1] |= 2; cmd[2] = blks >> 16; cmd[3] = blks >> 8; cmd[4] = blks; SRpnt = *aSRpnt; SRpnt = st_do_scsi(SRpnt, STp, cmd, bytes, DMA_FROM_DEVICE, STp->device->request_queue->rq_timeout, MAX_RETRIES, 1); release_buffering(STp, 1); *aSRpnt = SRpnt; if (!SRpnt) return STbp->syscall_result; STbp->read_pointer = 0; STps->at_sm = 0; /* Something to check */ if (STbp->syscall_result) { struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; retval = 1; DEBC_printk(STp, "Sense: %2x %2x %2x %2x %2x %2x %2x %2x\n", SRpnt->sense[0], SRpnt->sense[1], SRpnt->sense[2], SRpnt->sense[3], SRpnt->sense[4], SRpnt->sense[5], SRpnt->sense[6], SRpnt->sense[7]); if (cmdstatp->have_sense) { if (cmdstatp->sense_hdr.sense_key == BLANK_CHECK) cmdstatp->flags &= 0xcf; /* No need for EOM in this case */ if (cmdstatp->flags != 0) { /* EOF, EOM, or ILI */ /* Compute the residual count */ if (cmdstatp->remainder_valid) transfer = (int)cmdstatp->uremainder64; else transfer = 0; if (cmdstatp->sense_hdr.sense_key == MEDIUM_ERROR) { if (STp->block_size == 0) transfer = bytes; /* Some drives set ILI with MEDIUM ERROR */ cmdstatp->flags &= ~SENSE_ILI; } if (cmdstatp->flags & SENSE_ILI) { /* ILI */ if (STp->block_size == 0 && transfer < 0) { st_printk(KERN_NOTICE, STp, "Failed to read %d " "byte block with %d " "byte transfer.\n", bytes - transfer, bytes); if (STps->drv_block >= 0) STps->drv_block += 1; STbp->buffer_bytes = 0; return (-ENOMEM); } else if (STp->block_size == 0) { STbp->buffer_bytes = bytes - transfer; } else { st_release_request(SRpnt); SRpnt = *aSRpnt = NULL; if (transfer == blks) { /* We did not get anything, error */ st_printk(KERN_NOTICE, STp, "Incorrect " "block size.\n"); if (STps->drv_block >= 0) STps->drv_block += blks - transfer + 1; st_int_ioctl(STp, MTBSR, 1); return (-EIO); } /* We have some data, deliver it */ STbp->buffer_bytes = (blks - transfer) * STp->block_size; DEBC_printk(STp, "ILI but " "enough data " "received %ld " "%d.\n", count, STbp->buffer_bytes); if (STps->drv_block >= 0) STps->drv_block += 1; if (st_int_ioctl(STp, MTBSR, 1)) return (-EIO); } } else if (cmdstatp->flags & SENSE_FMK) { /* FM overrides EOM */ if (STps->eof != ST_FM_HIT) STps->eof = ST_FM_HIT; else STps->eof = ST_EOD_2; if (STp->block_size == 0) STbp->buffer_bytes = 0; else STbp->buffer_bytes = bytes - transfer * STp->block_size; DEBC_printk(STp, "EOF detected (%d " "bytes read).\n", STbp->buffer_bytes); } else if (cmdstatp->flags & SENSE_EOM) { if (STps->eof == ST_FM) STps->eof = ST_EOD_1; else STps->eof = ST_EOM_OK; if (STp->block_size == 0) STbp->buffer_bytes = bytes - transfer; else STbp->buffer_bytes = bytes - transfer * STp->block_size; DEBC_printk(STp, "EOM detected (%d " "bytes read).\n", STbp->buffer_bytes); } } /* end of EOF, EOM, ILI test */ else { /* nonzero sense key */ DEBC_printk(STp, "Tape error while reading.\n"); STps->drv_block = (-1); if (STps->eof == ST_FM && cmdstatp->sense_hdr.sense_key == BLANK_CHECK) { DEBC_printk(STp, "Zero returned for " "first BLANK CHECK " "after EOF.\n"); STps->eof = ST_EOD_2; /* First BLANK_CHECK after FM */ } else /* Some other extended sense code */ retval = (-EIO); } if (STbp->buffer_bytes < 0) /* Caused by bogus sense data */ STbp->buffer_bytes = 0; } /* End of extended sense test */ else { /* Non-extended sense */ retval = STbp->syscall_result; } } /* End of error handling */ else { /* Read successful */ STbp->buffer_bytes = bytes; if (STp->sili) /* In fixed block mode residual is always zero here */ STbp->buffer_bytes -= STp->buffer->cmdstat.residual; } if (STps->drv_block >= 0) { if (STp->block_size == 0) STps->drv_block++; else STps->drv_block += STbp->buffer_bytes / STp->block_size; } return retval; } /* Read command */ static ssize_t st_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) { ssize_t total; ssize_t retval = 0; ssize_t i, transfer; int special, do_dio = 0; struct st_request *SRpnt = NULL; struct scsi_tape *STp = filp->private_data; struct st_modedef *STm; struct st_partstat *STps; struct st_buffer *STbp = STp->buffer; if (mutex_lock_interruptible(&STp->lock)) return -ERESTARTSYS; retval = rw_checks(STp, filp, count); if (retval || count == 0) goto out; STm = &(STp->modes[STp->current_mode]); if (STp->block_size != 0 && (count % STp->block_size) != 0) { if (!STm->do_read_ahead) { retval = (-EINVAL); /* Read must be integral number of blocks */ goto out; } STp->try_dio_now = 0; /* Direct i/o can't handle split blocks */ } STps = &(STp->ps[STp->partition]); if (STps->rw == ST_WRITING) { retval = flush_buffer(STp, 0); if (retval) goto out; STps->rw = ST_READING; } DEB( if (debugging && STps->eof != ST_NOEOF) st_printk(ST_DEB_MSG, STp, "EOF/EOM flag up (%d). Bytes %d\n", STps->eof, STbp->buffer_bytes); ) /* end DEB */ retval = setup_buffering(STp, buf, count, 1); if (retval) goto out; do_dio = STbp->do_dio; if (STbp->buffer_bytes == 0 && STps->eof >= ST_EOD_1) { if (STps->eof < ST_EOD) { STps->eof += 1; retval = 0; goto out; } retval = (-EIO); /* EOM or Blank Check */ goto out; } if (do_dio) { /* Check the buffer writability before any tape movement. Don't alter buffer data. */ if (copy_from_user(&i, buf, 1) != 0 || copy_to_user(buf, &i, 1) != 0 || copy_from_user(&i, buf + count - 1, 1) != 0 || copy_to_user(buf + count - 1, &i, 1) != 0) { retval = (-EFAULT); goto out; } } STps->rw = ST_READING; /* Loop until enough data in buffer or a special condition found */ for (total = 0, special = 0; total < count && !special;) { /* Get new data if the buffer is empty */ if (STbp->buffer_bytes == 0) { special = read_tape(STp, count - total, &SRpnt); if (special < 0) { /* No need to continue read */ retval = special; goto out; } } /* Move the data from driver buffer to user buffer */ if (STbp->buffer_bytes > 0) { DEB( if (debugging && STps->eof != ST_NOEOF) st_printk(ST_DEB_MSG, STp, "EOF up (%d). Left %d, needed %d.\n", STps->eof, STbp->buffer_bytes, (int)(count - total)); ) /* end DEB */ transfer = STbp->buffer_bytes < count - total ? STbp->buffer_bytes : count - total; if (!do_dio) { i = from_buffer(STbp, buf, transfer); if (i) { retval = i; goto out; } } buf += transfer; total += transfer; } if (STp->block_size == 0) break; /* Read only one variable length block */ } /* for (total = 0, special = 0; total < count && !special; ) */ /* Change the eof state if no data from tape or buffer */ if (total == 0) { if (STps->eof == ST_FM_HIT) { STps->eof = ST_FM; STps->drv_block = 0; if (STps->drv_file >= 0) STps->drv_file++; } else if (STps->eof == ST_EOD_1) { STps->eof = ST_EOD_2; STps->drv_block = 0; if (STps->drv_file >= 0) STps->drv_file++; } else if (STps->eof == ST_EOD_2) STps->eof = ST_EOD; } else if (STps->eof == ST_FM) STps->eof = ST_NOEOF; retval = total; out: if (SRpnt != NULL) { st_release_request(SRpnt); SRpnt = NULL; } if (do_dio) { release_buffering(STp, 1); STbp->buffer_bytes = 0; } mutex_unlock(&STp->lock); return retval; } DEB( /* Set the driver options */ static void st_log_options(struct scsi_tape * STp, struct st_modedef * STm) { if (debugging) { st_printk(KERN_INFO, STp, "Mode %d options: buffer writes: %d, " "async writes: %d, read ahead: %d\n", STp->current_mode, STm->do_buffer_writes, STm->do_async_writes, STm->do_read_ahead); st_printk(KERN_INFO, STp, " can bsr: %d, two FMs: %d, " "fast mteom: %d, auto lock: %d,\n", STp->can_bsr, STp->two_fm, STp->fast_mteom, STp->do_auto_lock); st_printk(KERN_INFO, STp, " defs for wr: %d, no block limits: %d, " "partitions: %d, s2 log: %d\n", STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions, STp->scsi2_logical); st_printk(KERN_INFO, STp, " sysv: %d nowait: %d sili: %d " "nowait_filemark: %d\n", STm->sysv, STp->immediate, STp->sili, STp->immediate_filemark); st_printk(KERN_INFO, STp, " debugging: %d\n", debugging); } } ) static int st_set_options(struct scsi_tape *STp, long options) { int value; long code; struct st_modedef *STm; struct cdev *cd0, *cd1; struct device *d0, *d1; STm = &(STp->modes[STp->current_mode]); if (!STm->defined) { cd0 = STm->cdevs[0]; cd1 = STm->cdevs[1]; d0 = STm->devs[0]; d1 = STm->devs[1]; memcpy(STm, &(STp->modes[0]), sizeof(struct st_modedef)); STm->cdevs[0] = cd0; STm->cdevs[1] = cd1; STm->devs[0] = d0; STm->devs[1] = d1; modes_defined = 1; DEBC_printk(STp, "Initialized mode %d definition from mode 0\n", STp->current_mode); } code = options & MT_ST_OPTIONS; if (code == MT_ST_BOOLEANS) { STm->do_buffer_writes = (options & MT_ST_BUFFER_WRITES) != 0; STm->do_async_writes = (options & MT_ST_ASYNC_WRITES) != 0; STm->defaults_for_writes = (options & MT_ST_DEF_WRITES) != 0; STm->do_read_ahead = (options & MT_ST_READ_AHEAD) != 0; STp->two_fm = (options & MT_ST_TWO_FM) != 0; STp->fast_mteom = (options & MT_ST_FAST_MTEOM) != 0; STp->do_auto_lock = (options & MT_ST_AUTO_LOCK) != 0; STp->can_bsr = (options & MT_ST_CAN_BSR) != 0; STp->omit_blklims = (options & MT_ST_NO_BLKLIMS) != 0; if ((STp->device)->scsi_level >= SCSI_2) STp->can_partitions = (options & MT_ST_CAN_PARTITIONS) != 0; STp->scsi2_logical = (options & MT_ST_SCSI2LOGICAL) != 0; STp->immediate = (options & MT_ST_NOWAIT) != 0; STp->immediate_filemark = (options & MT_ST_NOWAIT_EOF) != 0; STm->sysv = (options & MT_ST_SYSV) != 0; STp->sili = (options & MT_ST_SILI) != 0; DEB( debugging = (options & MT_ST_DEBUGGING) != 0; st_log_options(STp, STm); ) } else if (code == MT_ST_SETBOOLEANS || code == MT_ST_CLEARBOOLEANS) { value = (code == MT_ST_SETBOOLEANS); if ((options & MT_ST_BUFFER_WRITES) != 0) STm->do_buffer_writes = value; if ((options & MT_ST_ASYNC_WRITES) != 0) STm->do_async_writes = value; if ((options & MT_ST_DEF_WRITES) != 0) STm->defaults_for_writes = value; if ((options & MT_ST_READ_AHEAD) != 0) STm->do_read_ahead = value; if ((options & MT_ST_TWO_FM) != 0) STp->two_fm = value; if ((options & MT_ST_FAST_MTEOM) != 0) STp->fast_mteom = value; if ((options & MT_ST_AUTO_LOCK) != 0) STp->do_auto_lock = value; if ((options & MT_ST_CAN_BSR) != 0) STp->can_bsr = value; if ((options & MT_ST_NO_BLKLIMS) != 0) STp->omit_blklims = value; if ((STp->device)->scsi_level >= SCSI_2 && (options & MT_ST_CAN_PARTITIONS) != 0) STp->can_partitions = value; if ((options & MT_ST_SCSI2LOGICAL) != 0) STp->scsi2_logical = value; if ((options & MT_ST_NOWAIT) != 0) STp->immediate = value; if ((options & MT_ST_NOWAIT_EOF) != 0) STp->immediate_filemark = value; if ((options & MT_ST_SYSV) != 0) STm->sysv = value; if ((options & MT_ST_SILI) != 0) STp->sili = value; DEB( if ((options & MT_ST_DEBUGGING) != 0) debugging = value; st_log_options(STp, STm); ) } else if (code == MT_ST_WRITE_THRESHOLD) { /* Retained for compatibility */ } else if (code == MT_ST_DEF_BLKSIZE) { value = (options & ~MT_ST_OPTIONS); if (value == ~MT_ST_OPTIONS) { STm->default_blksize = (-1); DEBC_printk(STp, "Default block size disabled.\n"); } else { STm->default_blksize = value; DEBC_printk(STp,"Default block size set to " "%d bytes.\n", STm->default_blksize); if (STp->ready == ST_READY) { STp->blksize_changed = 0; set_mode_densblk(STp, STm); } } } else if (code == MT_ST_TIMEOUTS) { value = (options & ~MT_ST_OPTIONS); if ((value & MT_ST_SET_LONG_TIMEOUT) != 0) { STp->long_timeout = (value & ~MT_ST_SET_LONG_TIMEOUT) * HZ; DEBC_printk(STp, "Long timeout set to %d seconds.\n", (value & ~MT_ST_SET_LONG_TIMEOUT)); } else { blk_queue_rq_timeout(STp->device->request_queue, value * HZ); DEBC_printk(STp, "Normal timeout set to %d seconds.\n", value); } } else if (code == MT_ST_SET_CLN) { value = (options & ~MT_ST_OPTIONS) & 0xff; if (value != 0 && (value < EXTENDED_SENSE_START || value >= SCSI_SENSE_BUFFERSIZE)) return (-EINVAL); STp->cln_mode = value; STp->cln_sense_mask = (options >> 8) & 0xff; STp->cln_sense_value = (options >> 16) & 0xff; st_printk(KERN_INFO, STp, "Cleaning request mode %d, mask %02x, value %02x\n", value, STp->cln_sense_mask, STp->cln_sense_value); } else if (code == MT_ST_DEF_OPTIONS) { code = (options & ~MT_ST_CLEAR_DEFAULT); value = (options & MT_ST_CLEAR_DEFAULT); if (code == MT_ST_DEF_DENSITY) { if (value == MT_ST_CLEAR_DEFAULT) { STm->default_density = (-1); DEBC_printk(STp, "Density default disabled.\n"); } else { STm->default_density = value & 0xff; DEBC_printk(STp, "Density default set to %x\n", STm->default_density); if (STp->ready == ST_READY) { STp->density_changed = 0; set_mode_densblk(STp, STm); } } } else if (code == MT_ST_DEF_DRVBUFFER) { if (value == MT_ST_CLEAR_DEFAULT) { STp->default_drvbuffer = 0xff; DEBC_printk(STp, "Drive buffer default disabled.\n"); } else { STp->default_drvbuffer = value & 7; DEBC_printk(STp, "Drive buffer default set to %x\n", STp->default_drvbuffer); if (STp->ready == ST_READY) st_int_ioctl(STp, MTSETDRVBUFFER, STp->default_drvbuffer); } } else if (code == MT_ST_DEF_COMPRESSION) { if (value == MT_ST_CLEAR_DEFAULT) { STm->default_compression = ST_DONT_TOUCH; DEBC_printk(STp, "Compression default disabled.\n"); } else { if ((value & 0xff00) != 0) { STp->c_algo = (value & 0xff00) >> 8; DEBC_printk(STp, "Compression " "algorithm set to 0x%x.\n", STp->c_algo); } if ((value & 0xff) != 0xff) { STm->default_compression = (value & 1 ? ST_YES : ST_NO); DEBC_printk(STp, "Compression default " "set to %x\n", (value & 1)); if (STp->ready == ST_READY) { STp->compression_changed = 0; st_compression(STp, (STm->default_compression == ST_YES)); } } } } } else return (-EIO); return 0; } #define MODE_HEADER_LENGTH 4 /* Mode header and page byte offsets */ #define MH_OFF_DATA_LENGTH 0 #define MH_OFF_MEDIUM_TYPE 1 #define MH_OFF_DEV_SPECIFIC 2 #define MH_OFF_BDESCS_LENGTH 3 #define MP_OFF_PAGE_NBR 0 #define MP_OFF_PAGE_LENGTH 1 /* Mode header and page bit masks */ #define MH_BIT_WP 0x80 #define MP_MSK_PAGE_NBR 0x3f /* Don't return block descriptors */ #define MODE_SENSE_OMIT_BDESCS 0x08 #define MODE_SELECT_PAGE_FORMAT 0x10 /* Read a mode page into the tape buffer. The block descriptors are included if incl_block_descs is true. The page control is ored to the page number parameter, if necessary. */ static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs) { unsigned char cmd[MAX_COMMAND_SIZE]; struct st_request *SRpnt; memset(cmd, 0, MAX_COMMAND_SIZE); cmd[0] = MODE_SENSE; if (omit_block_descs) cmd[1] = MODE_SENSE_OMIT_BDESCS; cmd[2] = page; cmd[4] = 255; SRpnt = st_do_scsi(NULL, STp, cmd, cmd[4], DMA_FROM_DEVICE, STp->device->request_queue->rq_timeout, 0, 1); if (SRpnt == NULL) return (STp->buffer)->syscall_result; st_release_request(SRpnt); return STp->buffer->syscall_result; } /* Send the mode page in the tape buffer to the drive. Assumes that the mode data in the buffer is correctly formatted. The long timeout is used if slow is non-zero. */ static int write_mode_page(struct scsi_tape *STp, int page, int slow) { int pgo; unsigned char cmd[MAX_COMMAND_SIZE]; struct st_request *SRpnt; int timeout; memset(cmd, 0, MAX_COMMAND_SIZE); cmd[0] = MODE_SELECT; cmd[1] = MODE_SELECT_PAGE_FORMAT; pgo = MODE_HEADER_LENGTH + (STp->buffer)->b_data[MH_OFF_BDESCS_LENGTH]; cmd[4] = pgo + (STp->buffer)->b_data[pgo + MP_OFF_PAGE_LENGTH] + 2; /* Clear reserved fields */ (STp->buffer)->b_data[MH_OFF_DATA_LENGTH] = 0; (STp->buffer)->b_data[MH_OFF_MEDIUM_TYPE] = 0; (STp->buffer)->b_data[MH_OFF_DEV_SPECIFIC] &= ~MH_BIT_WP; (STp->buffer)->b_data[pgo + MP_OFF_PAGE_NBR] &= MP_MSK_PAGE_NBR; timeout = slow ? STp->long_timeout : STp->device->request_queue->rq_timeout; SRpnt = st_do_scsi(NULL, STp, cmd, cmd[4], DMA_TO_DEVICE, timeout, 0, 1); if (SRpnt == NULL) return (STp->buffer)->syscall_result; st_release_request(SRpnt); return STp->buffer->syscall_result; } #define COMPRESSION_PAGE 0x0f #define COMPRESSION_PAGE_LENGTH 16 #define CP_OFF_DCE_DCC 2 #define CP_OFF_C_ALGO 7 #define DCE_MASK 0x80 #define DCC_MASK 0x40 #define RED_MASK 0x60 /* Control the compression with mode page 15. Algorithm not changed if zero. The block descriptors are read and written because Sony SDT-7000 does not work without this (suggestion from Michael Schaefer <[email protected]>). Including block descriptors should not cause any harm to other drives. */ static int st_compression(struct scsi_tape * STp, int state) { int retval; int mpoffs; /* Offset to mode page start */ unsigned char *b_data = (STp->buffer)->b_data; if (STp->ready != ST_READY) return (-EIO); /* Read the current page contents */ retval = read_mode_page(STp, COMPRESSION_PAGE, 0); if (retval) { DEBC_printk(STp, "Compression mode page not supported.\n"); return (-EIO); } mpoffs = MODE_HEADER_LENGTH + b_data[MH_OFF_BDESCS_LENGTH]; DEBC_printk(STp, "Compression state is %d.\n", (b_data[mpoffs + CP_OFF_DCE_DCC] & DCE_MASK ? 1 : 0)); /* Check if compression can be changed */ if ((b_data[mpoffs + CP_OFF_DCE_DCC] & DCC_MASK) == 0) { DEBC_printk(STp, "Compression not supported.\n"); return (-EIO); } /* Do the change */ if (state) { b_data[mpoffs + CP_OFF_DCE_DCC] |= DCE_MASK; if (STp->c_algo != 0) b_data[mpoffs + CP_OFF_C_ALGO] = STp->c_algo; } else { b_data[mpoffs + CP_OFF_DCE_DCC] &= ~DCE_MASK; if (STp->c_algo != 0) b_data[mpoffs + CP_OFF_C_ALGO] = 0; /* no compression */ } retval = write_mode_page(STp, COMPRESSION_PAGE, 0); if (retval) { DEBC_printk(STp, "Compression change failed.\n"); return (-EIO); } DEBC_printk(STp, "Compression state changed to %d.\n", state); STp->compression_changed = 1; return 0; } /* Process the load and unload commands (does unload if the load code is zero) */ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_code) { int retval = (-EIO), timeout; unsigned char cmd[MAX_COMMAND_SIZE]; struct st_partstat *STps; struct st_request *SRpnt; if (STp->ready != ST_READY && !load_code) { if (STp->ready == ST_NO_TAPE) return (-ENOMEDIUM); else return (-EIO); } memset(cmd, 0, MAX_COMMAND_SIZE); cmd[0] = START_STOP; if (load_code) cmd[4] |= 1; /* * If arg >= 1 && arg <= 6 Enhanced load/unload in HP C1553A */ if (load_code >= 1 + MT_ST_HPLOADER_OFFSET && load_code <= 6 + MT_ST_HPLOADER_OFFSET) { DEBC_printk(STp, " Enhanced %sload slot %2d.\n", (cmd[4]) ? "" : "un", load_code - MT_ST_HPLOADER_OFFSET); cmd[3] = load_code - MT_ST_HPLOADER_OFFSET; /* MediaID field of C1553A */ } if (STp->immediate) { cmd[1] = 1; /* Don't wait for completion */ timeout = STp->device->request_queue->rq_timeout; } else timeout = STp->long_timeout; DEBC( if (!load_code) st_printk(ST_DEB_MSG, STp, "Unloading tape.\n"); else st_printk(ST_DEB_MSG, STp, "Loading tape.\n"); ); SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, timeout, MAX_RETRIES, 1); if (!SRpnt) return (STp->buffer)->syscall_result; retval = (STp->buffer)->syscall_result; st_release_request(SRpnt); if (!retval) { /* SCSI command successful */ if (!load_code) { STp->rew_at_close = 0; STp->ready = ST_NO_TAPE; } else { STp->rew_at_close = STp->autorew_dev; retval = check_tape(STp, filp); if (retval > 0) retval = 0; } } else { STps = &(STp->ps[STp->partition]); STps->drv_file = STps->drv_block = (-1); } return retval; } #if DEBUG #define ST_DEB_FORWARD 0 #define ST_DEB_BACKWARD 1 static void deb_space_print(struct scsi_tape *STp, int direction, char *units, unsigned char *cmd) { s32 sc; if (!debugging) return; sc = sign_extend32(get_unaligned_be24(&cmd[2]), 23); if (direction) sc = -sc; st_printk(ST_DEB_MSG, STp, "Spacing tape %s over %d %s.\n", direction ? "backward" : "forward", sc, units); } #else #define ST_DEB_FORWARD 0 #define ST_DEB_BACKWARD 1 static void deb_space_print(struct scsi_tape *STp, int direction, char *units, unsigned char *cmd) {} #endif /* Internal ioctl function */ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned long arg) { int timeout; long ltmp; int ioctl_result; int chg_eof = 1; unsigned char cmd[MAX_COMMAND_SIZE]; struct st_request *SRpnt; struct st_partstat *STps; int fileno, blkno, at_sm, undone; int datalen = 0, direction = DMA_NONE; WARN_ON(STp->buffer->do_dio != 0); if (STp->ready != ST_READY) { if (STp->ready == ST_NO_TAPE) return (-ENOMEDIUM); else return (-EIO); } timeout = STp->long_timeout; STps = &(STp->ps[STp->partition]); fileno = STps->drv_file; blkno = STps->drv_block; at_sm = STps->at_sm; memset(cmd, 0, MAX_COMMAND_SIZE); switch (cmd_in) { case MTFSFM: chg_eof = 0; /* Changed from the FSF after this */ fallthrough; case MTFSF: cmd[0] = SPACE; cmd[1] = 0x01; /* Space FileMarks */ cmd[2] = (arg >> 16); cmd[3] = (arg >> 8); cmd[4] = arg; deb_space_print(STp, ST_DEB_FORWARD, "filemarks", cmd); if (fileno >= 0) fileno += arg; blkno = 0; at_sm &= (arg == 0); break; case MTBSFM: chg_eof = 0; /* Changed from the FSF after this */ fallthrough; case MTBSF: cmd[0] = SPACE; cmd[1] = 0x01; /* Space FileMarks */ ltmp = (-arg); cmd[2] = (ltmp >> 16); cmd[3] = (ltmp >> 8); cmd[4] = ltmp; deb_space_print(STp, ST_DEB_BACKWARD, "filemarks", cmd); if (fileno >= 0) fileno -= arg; blkno = (-1); /* We can't know the block number */ at_sm &= (arg == 0); break; case MTFSR: cmd[0] = SPACE; cmd[1] = 0x00; /* Space Blocks */ cmd[2] = (arg >> 16); cmd[3] = (arg >> 8); cmd[4] = arg; deb_space_print(STp, ST_DEB_FORWARD, "blocks", cmd); if (blkno >= 0) blkno += arg; at_sm &= (arg == 0); break; case MTBSR: cmd[0] = SPACE; cmd[1] = 0x00; /* Space Blocks */ ltmp = (-arg); cmd[2] = (ltmp >> 16); cmd[3] = (ltmp >> 8); cmd[4] = ltmp; deb_space_print(STp, ST_DEB_BACKWARD, "blocks", cmd); if (blkno >= 0) blkno -= arg; at_sm &= (arg == 0); break; case MTFSS: cmd[0] = SPACE; cmd[1] = 0x04; /* Space Setmarks */ cmd[2] = (arg >> 16); cmd[3] = (arg >> 8); cmd[4] = arg; deb_space_print(STp, ST_DEB_FORWARD, "setmarks", cmd); if (arg != 0) { blkno = fileno = (-1); at_sm = 1; } break; case MTBSS: cmd[0] = SPACE; cmd[1] = 0x04; /* Space Setmarks */ ltmp = (-arg); cmd[2] = (ltmp >> 16); cmd[3] = (ltmp >> 8); cmd[4] = ltmp; deb_space_print(STp, ST_DEB_BACKWARD, "setmarks", cmd); if (arg != 0) { blkno = fileno = (-1); at_sm = 1; } break; case MTWEOF: case MTWEOFI: case MTWSM: if (STp->write_prot) return (-EACCES); cmd[0] = WRITE_FILEMARKS; if (cmd_in == MTWSM) cmd[1] = 2; if (cmd_in == MTWEOFI || (cmd_in == MTWEOF && STp->immediate_filemark)) cmd[1] |= 1; cmd[2] = (arg >> 16); cmd[3] = (arg >> 8); cmd[4] = arg; timeout = STp->device->request_queue->rq_timeout; DEBC( if (cmd_in != MTWSM) st_printk(ST_DEB_MSG, STp, "Writing %d filemarks.\n", cmd[2] * 65536 + cmd[3] * 256 + cmd[4]); else st_printk(ST_DEB_MSG, STp, "Writing %d setmarks.\n", cmd[2] * 65536 + cmd[3] * 256 + cmd[4]); ) if (fileno >= 0) fileno += arg; blkno = 0; at_sm = (cmd_in == MTWSM); break; case MTREW: cmd[0] = REZERO_UNIT; if (STp->immediate) { cmd[1] = 1; /* Don't wait for completion */ timeout = STp->device->request_queue->rq_timeout; } DEBC_printk(STp, "Rewinding tape.\n"); fileno = blkno = at_sm = 0; break; case MTNOP: DEBC_printk(STp, "No op on tape.\n"); return 0; /* Should do something ? */ case MTRETEN: cmd[0] = START_STOP; if (STp->immediate) { cmd[1] = 1; /* Don't wait for completion */ timeout = STp->device->request_queue->rq_timeout; } cmd[4] = 3; DEBC_printk(STp, "Retensioning tape.\n"); fileno = blkno = at_sm = 0; break; case MTEOM: if (!STp->fast_mteom) { /* space to the end of tape */ ioctl_result = st_int_ioctl(STp, MTFSF, 0x7fffff); fileno = STps->drv_file; if (STps->eof >= ST_EOD_1) return 0; /* The next lines would hide the number of spaced FileMarks That's why I inserted the previous lines. I had no luck with detecting EOM with FSF, so we go now to EOM. Joerg Weule */ } else fileno = (-1); cmd[0] = SPACE; cmd[1] = 3; DEBC_printk(STp, "Spacing to end of recorded medium.\n"); blkno = -1; at_sm = 0; break; case MTERASE: if (STp->write_prot) return (-EACCES); cmd[0] = ERASE; cmd[1] = (arg ? 1 : 0); /* Long erase with non-zero argument */ if (STp->immediate) { cmd[1] |= 2; /* Don't wait for completion */ timeout = STp->device->request_queue->rq_timeout; } else timeout = STp->long_timeout * 8; DEBC_printk(STp, "Erasing tape.\n"); fileno = blkno = at_sm = 0; break; case MTSETBLK: /* Set block length */ case MTSETDENSITY: /* Set tape density */ case MTSETDRVBUFFER: /* Set drive buffering */ case SET_DENS_AND_BLK: /* Set density and block size */ chg_eof = 0; if (STp->dirty || (STp->buffer)->buffer_bytes != 0) return (-EIO); /* Not allowed if data in buffer */ if ((cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) && (arg & MT_ST_BLKSIZE_MASK) != 0 && STp->max_block > 0 && ((arg & MT_ST_BLKSIZE_MASK) < STp->min_block || (arg & MT_ST_BLKSIZE_MASK) > STp->max_block)) { st_printk(KERN_WARNING, STp, "Illegal block size.\n"); return (-EINVAL); } cmd[0] = MODE_SELECT; if ((STp->use_pf & USE_PF)) cmd[1] = MODE_SELECT_PAGE_FORMAT; cmd[4] = datalen = 12; direction = DMA_TO_DEVICE; memset((STp->buffer)->b_data, 0, 12); if (cmd_in == MTSETDRVBUFFER) (STp->buffer)->b_data[2] = (arg & 7) << 4; else (STp->buffer)->b_data[2] = STp->drv_buffer << 4; (STp->buffer)->b_data[3] = 8; /* block descriptor length */ if (cmd_in == MTSETDENSITY) { (STp->buffer)->b_data[4] = arg; STp->density_changed = 1; /* At least we tried ;-) */ } else if (cmd_in == SET_DENS_AND_BLK) (STp->buffer)->b_data[4] = arg >> 24; else (STp->buffer)->b_data[4] = STp->density; if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) { ltmp = arg & MT_ST_BLKSIZE_MASK; if (cmd_in == MTSETBLK) STp->blksize_changed = 1; /* At least we tried ;-) */ } else ltmp = STp->block_size; (STp->buffer)->b_data[9] = (ltmp >> 16); (STp->buffer)->b_data[10] = (ltmp >> 8); (STp->buffer)->b_data[11] = ltmp; timeout = STp->device->request_queue->rq_timeout; DEBC( if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) st_printk(ST_DEB_MSG, STp, "Setting block size to %d bytes.\n", (STp->buffer)->b_data[9] * 65536 + (STp->buffer)->b_data[10] * 256 + (STp->buffer)->b_data[11]); if (cmd_in == MTSETDENSITY || cmd_in == SET_DENS_AND_BLK) st_printk(ST_DEB_MSG, STp, "Setting density code to %x.\n", (STp->buffer)->b_data[4]); if (cmd_in == MTSETDRVBUFFER) st_printk(ST_DEB_MSG, STp, "Setting drive buffer code to %d.\n", ((STp->buffer)->b_data[2] >> 4) & 7); ) break; default: return (-ENOSYS); } SRpnt = st_do_scsi(NULL, STp, cmd, datalen, direction, timeout, MAX_RETRIES, 1); if (!SRpnt) return (STp->buffer)->syscall_result; ioctl_result = (STp->buffer)->syscall_result; if (!ioctl_result) { /* SCSI command successful */ st_release_request(SRpnt); SRpnt = NULL; STps->drv_block = blkno; STps->drv_file = fileno; STps->at_sm = at_sm; if (cmd_in == MTBSFM) ioctl_result = st_int_ioctl(STp, MTFSF, 1); else if (cmd_in == MTFSFM) ioctl_result = st_int_ioctl(STp, MTBSF, 1); if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) { STp->block_size = arg & MT_ST_BLKSIZE_MASK; if (STp->block_size != 0) { (STp->buffer)->buffer_blocks = (STp->buffer)->buffer_size / STp->block_size; } (STp->buffer)->buffer_bytes = (STp->buffer)->read_pointer = 0; if (cmd_in == SET_DENS_AND_BLK) STp->density = arg >> MT_ST_DENSITY_SHIFT; } else if (cmd_in == MTSETDRVBUFFER) STp->drv_buffer = (arg & 7); else if (cmd_in == MTSETDENSITY) STp->density = arg; if (cmd_in == MTEOM) STps->eof = ST_EOD; else if (cmd_in == MTFSF) STps->eof = ST_FM; else if (chg_eof) STps->eof = ST_NOEOF; if (cmd_in == MTWEOF || cmd_in == MTWEOFI) STps->rw = ST_IDLE; /* prevent automatic WEOF at close */ } else { /* SCSI command was not completely successful. Don't return from this block without releasing the SCSI command block! */ struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; if (cmdstatp->flags & SENSE_EOM) { if (cmd_in != MTBSF && cmd_in != MTBSFM && cmd_in != MTBSR && cmd_in != MTBSS) STps->eof = ST_EOM_OK; STps->drv_block = 0; } if (cmdstatp->remainder_valid) undone = (int)cmdstatp->uremainder64; else undone = 0; if ((cmd_in == MTWEOF || cmd_in == MTWEOFI) && cmdstatp->have_sense && (cmdstatp->flags & SENSE_EOM)) { if (cmdstatp->sense_hdr.sense_key == NO_SENSE || cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) { ioctl_result = 0; /* EOF(s) written successfully at EOM */ STps->eof = ST_NOEOF; } else { /* Writing EOF(s) failed */ if (fileno >= 0) fileno -= undone; if (undone < arg) STps->eof = ST_NOEOF; } STps->drv_file = fileno; } else if ((cmd_in == MTFSF) || (cmd_in == MTFSFM)) { if (fileno >= 0) STps->drv_file = fileno - undone; else STps->drv_file = fileno; STps->drv_block = -1; STps->eof = ST_NOEOF; } else if ((cmd_in == MTBSF) || (cmd_in == MTBSFM)) { if (arg > 0 && undone < 0) /* Some drives get this wrong */ undone = (-undone); if (STps->drv_file >= 0) STps->drv_file = fileno + undone; STps->drv_block = 0; STps->eof = ST_NOEOF; } else if (cmd_in == MTFSR) { if (cmdstatp->flags & SENSE_FMK) { /* Hit filemark */ if (STps->drv_file >= 0) STps->drv_file++; STps->drv_block = 0; STps->eof = ST_FM; } else { if (blkno >= undone) STps->drv_block = blkno - undone; else STps->drv_block = (-1); STps->eof = ST_NOEOF; } } else if (cmd_in == MTBSR) { if (cmdstatp->flags & SENSE_FMK) { /* Hit filemark */ STps->drv_file--; STps->drv_block = (-1); } else { if (arg > 0 && undone < 0) /* Some drives get this wrong */ undone = (-undone); if (STps->drv_block >= 0) STps->drv_block = blkno + undone; } STps->eof = ST_NOEOF; } else if (cmd_in == MTEOM) { STps->drv_file = (-1); STps->drv_block = (-1); STps->eof = ST_EOD; } else if (cmd_in == MTSETBLK || cmd_in == MTSETDENSITY || cmd_in == MTSETDRVBUFFER || cmd_in == SET_DENS_AND_BLK) { if (cmdstatp->sense_hdr.sense_key == ILLEGAL_REQUEST && !(STp->use_pf & PF_TESTED)) { /* Try the other possible state of Page Format if not already tried */ STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED; st_release_request(SRpnt); SRpnt = NULL; return st_int_ioctl(STp, cmd_in, arg); } } else if (chg_eof) STps->eof = ST_NOEOF; if (cmdstatp->sense_hdr.sense_key == BLANK_CHECK) STps->eof = ST_EOD; st_release_request(SRpnt); SRpnt = NULL; } return ioctl_result; } /* Get the tape position. If bt == 2, arg points into a kernel space mt_loc structure. */ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partition, int logical) { int result; unsigned char scmd[MAX_COMMAND_SIZE]; struct st_request *SRpnt; if (STp->ready != ST_READY) return (-EIO); memset(scmd, 0, MAX_COMMAND_SIZE); if ((STp->device)->scsi_level < SCSI_2) { scmd[0] = QFA_REQUEST_BLOCK; scmd[4] = 3; } else { scmd[0] = READ_POSITION; if (!logical && !STp->scsi2_logical) scmd[1] = 1; } SRpnt = st_do_scsi(NULL, STp, scmd, 20, DMA_FROM_DEVICE, STp->device->request_queue->rq_timeout, MAX_READY_RETRIES, 1); if (!SRpnt) return (STp->buffer)->syscall_result; if ((STp->buffer)->syscall_result != 0 || (STp->device->scsi_level >= SCSI_2 && ((STp->buffer)->b_data[0] & 4) != 0)) { *block = *partition = 0; DEBC_printk(STp, " Can't read tape position.\n"); result = (-EIO); } else { result = 0; if ((STp->device)->scsi_level < SCSI_2) { *block = ((STp->buffer)->b_data[0] << 16) + ((STp->buffer)->b_data[1] << 8) + (STp->buffer)->b_data[2]; *partition = 0; } else { *block = ((STp->buffer)->b_data[4] << 24) + ((STp->buffer)->b_data[5] << 16) + ((STp->buffer)->b_data[6] << 8) + (STp->buffer)->b_data[7]; *partition = (STp->buffer)->b_data[1]; if (((STp->buffer)->b_data[0] & 0x80) && (STp->buffer)->b_data[1] == 0) /* BOP of partition 0 */ STp->ps[0].drv_block = STp->ps[0].drv_file = 0; } DEBC_printk(STp, "Got tape pos. blk %d part %d.\n", *block, *partition); } st_release_request(SRpnt); SRpnt = NULL; return result; } /* Set the tape block and partition. Negative partition means that only the block should be set in vendor specific way. */ static int set_location(struct scsi_tape *STp, unsigned int block, int partition, int logical) { struct st_partstat *STps; int result, p; unsigned int blk; int timeout; unsigned char scmd[MAX_COMMAND_SIZE]; struct st_request *SRpnt; if (STp->ready != ST_READY) return (-EIO); timeout = STp->long_timeout; STps = &(STp->ps[STp->partition]); DEBC_printk(STp, "Setting block to %d and partition to %d.\n", block, partition); DEB(if (partition < 0) return (-EIO); ) /* Update the location at the partition we are leaving */ if ((!STp->can_partitions && partition != 0) || partition >= ST_NBR_PARTITIONS) return (-EINVAL); if (partition != STp->partition) { if (get_location(STp, &blk, &p, 1)) STps->last_block_valid = 0; else { STps->last_block_valid = 1; STps->last_block_visited = blk; DEBC_printk(STp, "Visited block %d for " "partition %d saved.\n", blk, STp->partition); } } memset(scmd, 0, MAX_COMMAND_SIZE); if ((STp->device)->scsi_level < SCSI_2) { scmd[0] = QFA_SEEK_BLOCK; scmd[2] = (block >> 16); scmd[3] = (block >> 8); scmd[4] = block; scmd[5] = 0; } else { scmd[0] = SEEK_10; scmd[3] = (block >> 24); scmd[4] = (block >> 16); scmd[5] = (block >> 8); scmd[6] = block; if (!logical && !STp->scsi2_logical) scmd[1] = 4; if (STp->partition != partition) { scmd[1] |= 2; scmd[8] = partition; DEBC_printk(STp, "Trying to change partition " "from %d to %d\n", STp->partition, partition); } } if (STp->immediate) { scmd[1] |= 1; /* Don't wait for completion */ timeout = STp->device->request_queue->rq_timeout; } SRpnt = st_do_scsi(NULL, STp, scmd, 0, DMA_NONE, timeout, MAX_READY_RETRIES, 1); if (!SRpnt) return (STp->buffer)->syscall_result; STps->drv_block = STps->drv_file = (-1); STps->eof = ST_NOEOF; if ((STp->buffer)->syscall_result != 0) { result = (-EIO); if (STp->can_partitions && (STp->device)->scsi_level >= SCSI_2 && (p = find_partition(STp)) >= 0) STp->partition = p; } else { if (STp->can_partitions) { STp->partition = partition; STps = &(STp->ps[partition]); if (!STps->last_block_valid || STps->last_block_visited != block) { STps->at_sm = 0; STps->rw = ST_IDLE; } } else STps->at_sm = 0; if (block == 0) STps->drv_block = STps->drv_file = 0; result = 0; } st_release_request(SRpnt); SRpnt = NULL; return result; } /* Find the current partition number for the drive status. Called from open and returns either partition number of negative error code. */ static int find_partition(struct scsi_tape *STp) { int i, partition; unsigned int block; if ((i = get_location(STp, &block, &partition, 1)) < 0) return i; if (partition >= ST_NBR_PARTITIONS) return (-EIO); return partition; } /* Change the partition if necessary */ static int switch_partition(struct scsi_tape *STp) { struct st_partstat *STps; if (STp->partition == STp->new_partition) return 0; STps = &(STp->ps[STp->new_partition]); if (!STps->last_block_valid) STps->last_block_visited = 0; return set_location(STp, STps->last_block_visited, STp->new_partition, 1); } /* Functions for reading and writing the medium partition mode page. */ #define PART_PAGE 0x11 #define PART_PAGE_FIXED_LENGTH 8 #define PP_OFF_MAX_ADD_PARTS 2 #define PP_OFF_NBR_ADD_PARTS 3 #define PP_OFF_FLAGS 4 #define PP_OFF_PART_UNITS 6 #define PP_OFF_RESERVED 7 #define PP_BIT_IDP 0x20 #define PP_BIT_FDP 0x80 #define PP_MSK_PSUM_MB 0x10 #define PP_MSK_PSUM_UNITS 0x18 #define PP_MSK_POFM 0x04 /* Get the number of partitions on the tape. As a side effect reads the mode page into the tape buffer. */ static int nbr_partitions(struct scsi_tape *STp) { int result; if (STp->ready != ST_READY) return (-EIO); result = read_mode_page(STp, PART_PAGE, 1); if (result) { DEBC_printk(STp, "Can't read medium partition page.\n"); result = (-EIO); } else { result = (STp->buffer)->b_data[MODE_HEADER_LENGTH + PP_OFF_NBR_ADD_PARTS] + 1; DEBC_printk(STp, "Number of partitions %d.\n", result); } return result; } static int format_medium(struct scsi_tape *STp, int format) { int result = 0; int timeout = STp->long_timeout; unsigned char scmd[MAX_COMMAND_SIZE]; struct st_request *SRpnt; memset(scmd, 0, MAX_COMMAND_SIZE); scmd[0] = FORMAT_UNIT; scmd[2] = format; if (STp->immediate) { scmd[1] |= 1; /* Don't wait for completion */ timeout = STp->device->request_queue->rq_timeout; } DEBC_printk(STp, "Sending FORMAT MEDIUM\n"); SRpnt = st_do_scsi(NULL, STp, scmd, 0, DMA_NONE, timeout, MAX_RETRIES, 1); if (!SRpnt) result = STp->buffer->syscall_result; return result; } /* Partition the tape into two partitions if size > 0 or one partition if size == 0. The block descriptors are read and written because Sony SDT-7000 does not work without this (suggestion from Michael Schaefer <[email protected]>). My HP C1533A drive returns only one partition size field. This is used to set the size of partition 1. There is no size field for the default partition. Michael Schaefer's Sony SDT-7000 returns two descriptors and the second is used to set the size of partition 1 (this is what the SCSI-3 standard specifies). The following algorithm is used to accommodate both drives: if the number of partition size fields is greater than the maximum number of additional partitions in the mode page, the second field is used. Otherwise the first field is used. For Seagate DDS drives the page length must be 8 when no partitions is defined and 10 when 1 partition is defined (information from Eric Lee Green). This is is acceptable also to some other old drives and enforced if the first partition size field is used for the first additional partition size. For drives that advertize SCSI-3 or newer, use the SSC-3 methods. */ static int partition_tape(struct scsi_tape *STp, int size) { int result; int target_partition; bool scsi3 = STp->device->scsi_level >= SCSI_3, needs_format = false; int pgo, psd_cnt, psdo; int psum = PP_MSK_PSUM_MB, units = 0; unsigned char *bp; result = read_mode_page(STp, PART_PAGE, 0); if (result) { DEBC_printk(STp, "Can't read partition mode page.\n"); return result; } target_partition = 1; if (size < 0) { target_partition = 0; size = -size; } /* The mode page is in the buffer. Let's modify it and write it. */ bp = (STp->buffer)->b_data; pgo = MODE_HEADER_LENGTH + bp[MH_OFF_BDESCS_LENGTH]; DEBC_printk(STp, "Partition page length is %d bytes.\n", bp[pgo + MP_OFF_PAGE_LENGTH] + 2); psd_cnt = (bp[pgo + MP_OFF_PAGE_LENGTH] + 2 - PART_PAGE_FIXED_LENGTH) / 2; if (scsi3) { needs_format = (bp[pgo + PP_OFF_FLAGS] & PP_MSK_POFM) != 0; if (needs_format && size == 0) { /* No need to write the mode page when clearing * partitioning */ DEBC_printk(STp, "Formatting tape with one partition.\n"); result = format_medium(STp, 0); goto out; } if (needs_format) /* Leave the old value for HP DATs claiming SCSI_3 */ psd_cnt = 2; if ((bp[pgo + PP_OFF_FLAGS] & PP_MSK_PSUM_UNITS) == PP_MSK_PSUM_UNITS) { /* Use units scaling for large partitions if the device * suggests it and no precision lost. Required for IBM * TS1140/50 drives that don't support MB units. */ if (size >= 1000 && (size % 1000) == 0) { size /= 1000; psum = PP_MSK_PSUM_UNITS; units = 9; /* GB */ } } /* Try it anyway if too large to specify in MB */ if (psum == PP_MSK_PSUM_MB && size >= 65534) { size /= 1000; psum = PP_MSK_PSUM_UNITS; units = 9; /* GB */ } } if (size >= 65535 || /* Does not fit into two bytes */ (target_partition == 0 && psd_cnt < 2)) { result = -EINVAL; goto out; } psdo = pgo + PART_PAGE_FIXED_LENGTH; /* The second condition is for HP DDS which use only one partition size * descriptor */ if (target_partition > 0 && (psd_cnt > bp[pgo + PP_OFF_MAX_ADD_PARTS] || bp[pgo + PP_OFF_MAX_ADD_PARTS] != 1)) { bp[psdo] = bp[psdo + 1] = 0xff; /* Rest to partition 0 */ psdo += 2; } memset(bp + psdo, 0, bp[pgo + PP_OFF_NBR_ADD_PARTS] * 2); DEBC_printk(STp, "psd_cnt %d, max.parts %d, nbr_parts %d\n", psd_cnt, bp[pgo + PP_OFF_MAX_ADD_PARTS], bp[pgo + PP_OFF_NBR_ADD_PARTS]); if (size == 0) { bp[pgo + PP_OFF_NBR_ADD_PARTS] = 0; if (psd_cnt <= bp[pgo + PP_OFF_MAX_ADD_PARTS]) bp[pgo + MP_OFF_PAGE_LENGTH] = 6; DEBC_printk(STp, "Formatting tape with one partition.\n"); } else { bp[psdo] = (size >> 8) & 0xff; bp[psdo + 1] = size & 0xff; if (target_partition == 0) bp[psdo + 2] = bp[psdo + 3] = 0xff; bp[pgo + 3] = 1; if (bp[pgo + MP_OFF_PAGE_LENGTH] < 8) bp[pgo + MP_OFF_PAGE_LENGTH] = 8; DEBC_printk(STp, "Formatting tape with two partitions (%i = %d MB).\n", target_partition, units > 0 ? size * 1000 : size); } bp[pgo + PP_OFF_PART_UNITS] = 0; bp[pgo + PP_OFF_RESERVED] = 0; if (size != 1 || units != 0) { bp[pgo + PP_OFF_FLAGS] = PP_BIT_IDP | psum | (bp[pgo + PP_OFF_FLAGS] & 0x07); bp[pgo + PP_OFF_PART_UNITS] = units; } else bp[pgo + PP_OFF_FLAGS] = PP_BIT_FDP | (bp[pgo + PP_OFF_FLAGS] & 0x1f); bp[pgo + MP_OFF_PAGE_LENGTH] = 6 + psd_cnt * 2; result = write_mode_page(STp, PART_PAGE, 1); if (!result && needs_format) result = format_medium(STp, 1); if (result) { st_printk(KERN_INFO, STp, "Partitioning of tape failed.\n"); result = (-EIO); } out: return result; } /* The ioctl command */ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) { void __user *p = (void __user *)arg; int i, cmd_nr, cmd_type, bt; int retval = 0; unsigned int blk; struct scsi_tape *STp = file->private_data; struct st_modedef *STm; struct st_partstat *STps; if (mutex_lock_interruptible(&STp->lock)) return -ERESTARTSYS; DEB( if (debugging && !STp->in_use) { st_printk(ST_DEB_MSG, STp, "Incorrect device.\n"); retval = (-EIO); goto out; } ) /* end DEB */ STm = &(STp->modes[STp->current_mode]); STps = &(STp->ps[STp->partition]); /* * If we are in the middle of error recovery, don't let anyone * else try and use this device. Also, if error recovery fails, it * may try and take the device offline, in which case all further * access to the device is prohibited. */ retval = scsi_ioctl_block_when_processing_errors(STp->device, cmd_in, file->f_flags & O_NDELAY); if (retval) goto out; cmd_type = _IOC_TYPE(cmd_in); cmd_nr = _IOC_NR(cmd_in); if (cmd_type == _IOC_TYPE(MTIOCTOP) && cmd_nr == _IOC_NR(MTIOCTOP)) { struct mtop mtc; if (_IOC_SIZE(cmd_in) != sizeof(mtc)) { retval = (-EINVAL); goto out; } i = copy_from_user(&mtc, p, sizeof(struct mtop)); if (i) { retval = (-EFAULT); goto out; } if (mtc.mt_op == MTSETDRVBUFFER && !capable(CAP_SYS_ADMIN)) { st_printk(KERN_WARNING, STp, "MTSETDRVBUFFER only allowed for root.\n"); retval = (-EPERM); goto out; } if (!STm->defined && (mtc.mt_op != MTSETDRVBUFFER && (mtc.mt_count & MT_ST_OPTIONS) == 0)) { retval = (-ENXIO); goto out; } if (!STp->pos_unknown) { if (STps->eof == ST_FM_HIT) { if (mtc.mt_op == MTFSF || mtc.mt_op == MTFSFM || mtc.mt_op == MTEOM) { mtc.mt_count -= 1; if (STps->drv_file >= 0) STps->drv_file += 1; } else if (mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM) { mtc.mt_count += 1; if (STps->drv_file >= 0) STps->drv_file += 1; } } if (mtc.mt_op == MTSEEK) { /* Old position must be restored if partition will be changed */ i = !STp->can_partitions || (STp->new_partition != STp->partition); } else { i = mtc.mt_op == MTREW || mtc.mt_op == MTOFFL || mtc.mt_op == MTRETEN || mtc.mt_op == MTEOM || mtc.mt_op == MTLOCK || mtc.mt_op == MTLOAD || mtc.mt_op == MTFSF || mtc.mt_op == MTFSFM || mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM || mtc.mt_op == MTCOMPRESSION; } i = flush_buffer(STp, i); if (i < 0) { retval = i; goto out; } if (STps->rw == ST_WRITING && (mtc.mt_op == MTREW || mtc.mt_op == MTOFFL || mtc.mt_op == MTSEEK || mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM)) { i = st_int_ioctl(STp, MTWEOF, 1); if (i < 0) { retval = i; goto out; } if (mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM) mtc.mt_count++; STps->rw = ST_IDLE; } } else { /* * If there was a bus reset, block further access * to this device. If the user wants to rewind the tape, * then reset the flag and allow access again. */ if (mtc.mt_op != MTREW && mtc.mt_op != MTOFFL && mtc.mt_op != MTRETEN && mtc.mt_op != MTERASE && mtc.mt_op != MTSEEK && mtc.mt_op != MTEOM) { retval = (-EIO); goto out; } reset_state(STp); /* remove this when the midlevel properly clears was_reset */ STp->device->was_reset = 0; } if (mtc.mt_op != MTNOP && mtc.mt_op != MTSETBLK && mtc.mt_op != MTSETDENSITY && mtc.mt_op != MTWSM && mtc.mt_op != MTSETDRVBUFFER && mtc.mt_op != MTSETPART) STps->rw = ST_IDLE; /* Prevent automatic WEOF and fsf */ if (mtc.mt_op == MTOFFL && STp->door_locked != ST_UNLOCKED) do_door_lock(STp, 0); /* Ignore result! */ if (mtc.mt_op == MTSETDRVBUFFER && (mtc.mt_count & MT_ST_OPTIONS) != 0) { retval = st_set_options(STp, mtc.mt_count); goto out; } if (mtc.mt_op == MTSETPART) { if (!STp->can_partitions || mtc.mt_count < 0 || mtc.mt_count >= ST_NBR_PARTITIONS) { retval = (-EINVAL); goto out; } if (mtc.mt_count >= STp->nbr_partitions && (STp->nbr_partitions = nbr_partitions(STp)) < 0) { retval = (-EIO); goto out; } if (mtc.mt_count >= STp->nbr_partitions) { retval = (-EINVAL); goto out; } STp->new_partition = mtc.mt_count; retval = 0; goto out; } if (mtc.mt_op == MTMKPART) { if (!STp->can_partitions) { retval = (-EINVAL); goto out; } i = do_load_unload(STp, file, 1); if (i < 0) { retval = i; goto out; } i = partition_tape(STp, mtc.mt_count); if (i < 0) { retval = i; goto out; } for (i = 0; i < ST_NBR_PARTITIONS; i++) { STp->ps[i].rw = ST_IDLE; STp->ps[i].at_sm = 0; STp->ps[i].last_block_valid = 0; } STp->partition = STp->new_partition = 0; STp->nbr_partitions = mtc.mt_count != 0 ? 2 : 1; STps->drv_block = STps->drv_file = 0; retval = 0; goto out; } if (mtc.mt_op == MTSEEK) { i = set_location(STp, mtc.mt_count, STp->new_partition, 0); if (!STp->can_partitions) STp->ps[0].rw = ST_IDLE; retval = i; goto out; } if (mtc.mt_op == MTUNLOAD || mtc.mt_op == MTOFFL) { retval = do_load_unload(STp, file, 0); goto out; } if (mtc.mt_op == MTLOAD) { retval = do_load_unload(STp, file, max(1, mtc.mt_count)); goto out; } if (mtc.mt_op == MTLOCK || mtc.mt_op == MTUNLOCK) { retval = do_door_lock(STp, (mtc.mt_op == MTLOCK)); goto out; } if (STp->can_partitions && STp->ready == ST_READY && (i = switch_partition(STp)) < 0) { retval = i; goto out; } if (mtc.mt_op == MTCOMPRESSION) retval = st_compression(STp, (mtc.mt_count & 1)); else retval = st_int_ioctl(STp, mtc.mt_op, mtc.mt_count); goto out; } if (!STm->defined) { retval = (-ENXIO); goto out; } if ((i = flush_buffer(STp, 0)) < 0) { retval = i; goto out; } if (STp->can_partitions && (i = switch_partition(STp)) < 0) { retval = i; goto out; } if (cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET)) { struct mtget mt_status; if (_IOC_SIZE(cmd_in) != sizeof(struct mtget)) { retval = (-EINVAL); goto out; } mt_status.mt_type = STp->tape_type; mt_status.mt_dsreg = ((STp->block_size << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK) | ((STp->density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK); mt_status.mt_blkno = STps->drv_block; mt_status.mt_fileno = STps->drv_file; if (STp->block_size != 0) { if (STps->rw == ST_WRITING) mt_status.mt_blkno += (STp->buffer)->buffer_bytes / STp->block_size; else if (STps->rw == ST_READING) mt_status.mt_blkno -= ((STp->buffer)->buffer_bytes + STp->block_size - 1) / STp->block_size; } mt_status.mt_gstat = 0; if (STp->drv_write_prot) mt_status.mt_gstat |= GMT_WR_PROT(0xffffffff); if (mt_status.mt_blkno == 0) { if (mt_status.mt_fileno == 0) mt_status.mt_gstat |= GMT_BOT(0xffffffff); else mt_status.mt_gstat |= GMT_EOF(0xffffffff); } mt_status.mt_erreg = (STp->recover_reg << MT_ST_SOFTERR_SHIFT); mt_status.mt_resid = STp->partition; if (STps->eof == ST_EOM_OK || STps->eof == ST_EOM_ERROR) mt_status.mt_gstat |= GMT_EOT(0xffffffff); else if (STps->eof >= ST_EOM_OK) mt_status.mt_gstat |= GMT_EOD(0xffffffff); if (STp->density == 1) mt_status.mt_gstat |= GMT_D_800(0xffffffff); else if (STp->density == 2) mt_status.mt_gstat |= GMT_D_1600(0xffffffff); else if (STp->density == 3) mt_status.mt_gstat |= GMT_D_6250(0xffffffff); if (STp->ready == ST_READY) mt_status.mt_gstat |= GMT_ONLINE(0xffffffff); if (STp->ready == ST_NO_TAPE) mt_status.mt_gstat |= GMT_DR_OPEN(0xffffffff); if (STps->at_sm) mt_status.mt_gstat |= GMT_SM(0xffffffff); if (STm->do_async_writes || (STm->do_buffer_writes && STp->block_size != 0) || STp->drv_buffer != 0) mt_status.mt_gstat |= GMT_IM_REP_EN(0xffffffff); if (STp->cleaning_req) mt_status.mt_gstat |= GMT_CLN(0xffffffff); retval = put_user_mtget(p, &mt_status); if (retval) goto out; STp->recover_reg = 0; /* Clear after read */ goto out; } /* End of MTIOCGET */ if (cmd_type == _IOC_TYPE(MTIOCPOS) && cmd_nr == _IOC_NR(MTIOCPOS)) { struct mtpos mt_pos; if (_IOC_SIZE(cmd_in) != sizeof(struct mtpos)) { retval = (-EINVAL); goto out; } if ((i = get_location(STp, &blk, &bt, 0)) < 0) { retval = i; goto out; } mt_pos.mt_blkno = blk; retval = put_user_mtpos(p, &mt_pos); goto out; } mutex_unlock(&STp->lock); switch (cmd_in) { case SG_IO: case SCSI_IOCTL_SEND_COMMAND: case CDROM_SEND_PACKET: if (!capable(CAP_SYS_RAWIO)) return -EPERM; break; default: break; } retval = scsi_ioctl(STp->device, file->f_mode & FMODE_WRITE, cmd_in, p); if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) { /* unload */ STp->rew_at_close = 0; STp->ready = ST_NO_TAPE; } return retval; out: mutex_unlock(&STp->lock); return retval; } #ifdef CONFIG_COMPAT static long st_compat_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) { /* argument conversion is handled using put_user_mtpos/put_user_mtget */ switch (cmd_in) { case MTIOCPOS32: cmd_in = MTIOCPOS; break; case MTIOCGET32: cmd_in = MTIOCGET; break; } return st_ioctl(file, cmd_in, arg); } #endif /* Try to allocate a new tape buffer. Calling function must not hold dev_arr_lock. */ static struct st_buffer *new_tape_buffer(int max_sg) { struct st_buffer *tb; tb = kzalloc(sizeof(struct st_buffer), GFP_KERNEL); if (!tb) { printk(KERN_NOTICE "st: Can't allocate new tape buffer.\n"); return NULL; } tb->frp_segs = 0; tb->use_sg = max_sg; tb->buffer_size = 0; tb->reserved_pages = kcalloc(max_sg, sizeof(struct page *), GFP_KERNEL); if (!tb->reserved_pages) { kfree(tb); return NULL; } return tb; } /* Try to allocate enough space in the tape buffer */ #define ST_MAX_ORDER 6 static int enlarge_buffer(struct st_buffer * STbuffer, int new_size) { int segs, max_segs, b_size, order, got; gfp_t priority; if (new_size <= STbuffer->buffer_size) return 1; if (STbuffer->buffer_size <= PAGE_SIZE) normalize_buffer(STbuffer); /* Avoid extra segment */ max_segs = STbuffer->use_sg; priority = GFP_KERNEL | __GFP_NOWARN; if (STbuffer->cleared) priority |= __GFP_ZERO; if (STbuffer->frp_segs) { order = STbuffer->reserved_page_order; b_size = PAGE_SIZE << order; } else { for (b_size = PAGE_SIZE, order = 0; order < ST_MAX_ORDER && max_segs * (PAGE_SIZE << order) < new_size; order++, b_size *= 2) ; /* empty */ STbuffer->reserved_page_order = order; } if (max_segs * (PAGE_SIZE << order) < new_size) { if (order == ST_MAX_ORDER) return 0; normalize_buffer(STbuffer); return enlarge_buffer(STbuffer, new_size); } for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size; segs < max_segs && got < new_size;) { struct page *page; page = alloc_pages(priority, order); if (!page) { DEB(STbuffer->buffer_size = got); normalize_buffer(STbuffer); return 0; } STbuffer->frp_segs += 1; got += b_size; STbuffer->buffer_size = got; STbuffer->reserved_pages[segs] = page; segs++; } STbuffer->b_data = page_address(STbuffer->reserved_pages[0]); return 1; } /* Make sure that no data from previous user is in the internal buffer */ static void clear_buffer(struct st_buffer * st_bp) { int i; for (i=0; i < st_bp->frp_segs; i++) memset(page_address(st_bp->reserved_pages[i]), 0, PAGE_SIZE << st_bp->reserved_page_order); st_bp->cleared = 1; } /* Release the extra buffer */ static void normalize_buffer(struct st_buffer * STbuffer) { int i, order = STbuffer->reserved_page_order; for (i = 0; i < STbuffer->frp_segs; i++) { __free_pages(STbuffer->reserved_pages[i], order); STbuffer->buffer_size -= (PAGE_SIZE << order); } STbuffer->frp_segs = 0; STbuffer->sg_segs = 0; STbuffer->reserved_page_order = 0; STbuffer->map_data.offset = 0; } /* Move data from the user buffer to the tape buffer. Returns zero (success) or negative error code. */ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count) { int i, cnt, res, offset; int length = PAGE_SIZE << st_bp->reserved_page_order; for (i = 0, offset = st_bp->buffer_bytes; i < st_bp->frp_segs && offset >= length; i++) offset -= length; if (i == st_bp->frp_segs) { /* Should never happen */ printk(KERN_WARNING "st: append_to_buffer offset overflow.\n"); return (-EIO); } for (; i < st_bp->frp_segs && do_count > 0; i++) { struct page *page = st_bp->reserved_pages[i]; cnt = length - offset < do_count ? length - offset : do_count; res = copy_from_user(page_address(page) + offset, ubp, cnt); if (res) return (-EFAULT); do_count -= cnt; st_bp->buffer_bytes += cnt; ubp += cnt; offset = 0; } if (do_count) /* Should never happen */ return (-EIO); return 0; } /* Move data from the tape buffer to the user buffer. Returns zero (success) or negative error code. */ static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count) { int i, cnt, res, offset; int length = PAGE_SIZE << st_bp->reserved_page_order; for (i = 0, offset = st_bp->read_pointer; i < st_bp->frp_segs && offset >= length; i++) offset -= length; if (i == st_bp->frp_segs) { /* Should never happen */ printk(KERN_WARNING "st: from_buffer offset overflow.\n"); return (-EIO); } for (; i < st_bp->frp_segs && do_count > 0; i++) { struct page *page = st_bp->reserved_pages[i]; cnt = length - offset < do_count ? length - offset : do_count; res = copy_to_user(ubp, page_address(page) + offset, cnt); if (res) return (-EFAULT); do_count -= cnt; st_bp->buffer_bytes -= cnt; st_bp->read_pointer += cnt; ubp += cnt; offset = 0; } if (do_count) /* Should never happen */ return (-EIO); return 0; } /* Move data towards start of buffer */ static void move_buffer_data(struct st_buffer * st_bp, int offset) { int src_seg, dst_seg, src_offset = 0, dst_offset; int count, total; int length = PAGE_SIZE << st_bp->reserved_page_order; if (offset == 0) return; total=st_bp->buffer_bytes - offset; for (src_seg=0; src_seg < st_bp->frp_segs; src_seg++) { src_offset = offset; if (src_offset < length) break; offset -= length; } st_bp->buffer_bytes = st_bp->read_pointer = total; for (dst_seg=dst_offset=0; total > 0; ) { struct page *dpage = st_bp->reserved_pages[dst_seg]; struct page *spage = st_bp->reserved_pages[src_seg]; count = min(length - dst_offset, length - src_offset); memmove(page_address(dpage) + dst_offset, page_address(spage) + src_offset, count); src_offset += count; if (src_offset >= length) { src_seg++; src_offset = 0; } dst_offset += count; if (dst_offset >= length) { dst_seg++; dst_offset = 0; } total -= count; } } /* Validate the options from command line or module parameters */ static void validate_options(void) { if (buffer_kbs > 0) st_fixed_buffer_size = buffer_kbs * ST_KILOBYTE; if (max_sg_segs >= ST_FIRST_SG) st_max_sg_segs = max_sg_segs; } #ifndef MODULE /* Set the boot options. Syntax is defined in Documenation/scsi/st.txt. */ static int __init st_setup(char *str) { int i, len, ints[5]; char *stp; stp = get_options(str, ARRAY_SIZE(ints), ints); if (ints[0] > 0) { for (i = 0; i < ints[0] && i < ARRAY_SIZE(parms); i++) if (parms[i].val) *parms[i].val = ints[i + 1]; } else { while (stp != NULL) { for (i = 0; i < ARRAY_SIZE(parms); i++) { len = strlen(parms[i].name); if (!strncmp(stp, parms[i].name, len) && (*(stp + len) == ':' || *(stp + len) == '=')) { if (parms[i].val) *parms[i].val = simple_strtoul(stp + len + 1, NULL, 0); else printk(KERN_WARNING "st: Obsolete parameter %s\n", parms[i].name); break; } } if (i >= ARRAY_SIZE(parms)) printk(KERN_WARNING "st: invalid parameter in '%s'\n", stp); stp = strchr(stp, ','); if (stp) stp++; } } validate_options(); return 1; } __setup("st=", st_setup); #endif static const struct file_operations st_fops = { .owner = THIS_MODULE, .read = st_read, .write = st_write, .unlocked_ioctl = st_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = st_compat_ioctl, #endif .open = st_open, .flush = st_flush, .release = st_release, .llseek = noop_llseek, }; static int create_one_cdev(struct scsi_tape *tape, int mode, int rew) { int i, error; dev_t cdev_devno; struct cdev *cdev; struct device *dev; struct st_modedef *STm = &(tape->modes[mode]); char name[10]; int dev_num = tape->index; cdev_devno = MKDEV(SCSI_TAPE_MAJOR, TAPE_MINOR(dev_num, mode, rew)); cdev = cdev_alloc(); if (!cdev) { pr_err("st%d: out of memory. Device not attached.\n", dev_num); error = -ENOMEM; goto out; } cdev->owner = THIS_MODULE; cdev->ops = &st_fops; STm->cdevs[rew] = cdev; error = cdev_add(cdev, cdev_devno, 1); if (error) { pr_err("st%d: Can't add %s-rewind mode %d\n", dev_num, rew ? "non" : "auto", mode); pr_err("st%d: Device not attached.\n", dev_num); goto out_free; } i = mode << (4 - ST_NBR_MODE_BITS); snprintf(name, 10, "%s%s%s", rew ? "n" : "", tape->name, st_formats[i]); dev = device_create(&st_sysfs_class, &tape->device->sdev_gendev, cdev_devno, &tape->modes[mode], "%s", name); if (IS_ERR(dev)) { pr_err("st%d: device_create failed\n", dev_num); error = PTR_ERR(dev); goto out_free; } STm->devs[rew] = dev; return 0; out_free: cdev_del(STm->cdevs[rew]); out: STm->cdevs[rew] = NULL; STm->devs[rew] = NULL; return error; } static int create_cdevs(struct scsi_tape *tape) { int mode, error; for (mode = 0; mode < ST_NBR_MODES; ++mode) { error = create_one_cdev(tape, mode, 0); if (error) return error; error = create_one_cdev(tape, mode, 1); if (error) return error; } return sysfs_create_link(&tape->device->sdev_gendev.kobj, &tape->modes[0].devs[0]->kobj, "tape"); } static void remove_cdevs(struct scsi_tape *tape) { int mode, rew; sysfs_remove_link(&tape->device->sdev_gendev.kobj, "tape"); for (mode = 0; mode < ST_NBR_MODES; mode++) { struct st_modedef *STm = &(tape->modes[mode]); for (rew = 0; rew < 2; rew++) { if (STm->cdevs[rew]) cdev_del(STm->cdevs[rew]); if (STm->devs[rew]) device_unregister(STm->devs[rew]); } } } static int st_probe(struct device *dev) { struct scsi_device *SDp = to_scsi_device(dev); struct scsi_tape *tpnt = NULL; struct st_modedef *STm; struct st_partstat *STps; struct st_buffer *buffer; int i, error; if (SDp->type != TYPE_TAPE) return -ENODEV; if (st_incompatible(SDp)) { sdev_printk(KERN_INFO, SDp, "OnStream tapes are no longer supported;\n"); sdev_printk(KERN_INFO, SDp, "please mail to [email protected].\n"); return -ENODEV; } scsi_autopm_get_device(SDp); i = queue_max_segments(SDp->request_queue); if (st_max_sg_segs < i) i = st_max_sg_segs; buffer = new_tape_buffer(i); if (buffer == NULL) { sdev_printk(KERN_ERR, SDp, "st: Can't allocate new tape buffer. " "Device not attached.\n"); goto out; } tpnt = kzalloc(sizeof(struct scsi_tape), GFP_KERNEL); if (tpnt == NULL) { sdev_printk(KERN_ERR, SDp, "st: Can't allocate device descriptor.\n"); goto out_buffer_free; } kref_init(&tpnt->kref); tpnt->device = SDp; if (SDp->scsi_level <= 2) tpnt->tape_type = MT_ISSCSI1; else tpnt->tape_type = MT_ISSCSI2; tpnt->buffer = buffer; tpnt->buffer->last_SRpnt = NULL; tpnt->inited = 0; tpnt->dirty = 0; tpnt->in_use = 0; tpnt->drv_buffer = 1; /* Try buffering if no mode sense */ tpnt->use_pf = (SDp->scsi_level >= SCSI_2); tpnt->density = 0; tpnt->do_auto_lock = ST_AUTO_LOCK; tpnt->can_bsr = (SDp->scsi_level > 2 ? 1 : ST_IN_FILE_POS); /* BSR mandatory in SCSI3 */ tpnt->can_partitions = 0; tpnt->two_fm = ST_TWO_FM; tpnt->fast_mteom = ST_FAST_MTEOM; tpnt->scsi2_logical = ST_SCSI2LOGICAL; tpnt->sili = ST_SILI; tpnt->immediate = ST_NOWAIT; tpnt->immediate_filemark = 0; tpnt->default_drvbuffer = 0xff; /* No forced buffering */ tpnt->partition = 0; tpnt->new_partition = 0; tpnt->nbr_partitions = 0; blk_queue_rq_timeout(tpnt->device->request_queue, ST_TIMEOUT); tpnt->long_timeout = ST_LONG_TIMEOUT; tpnt->try_dio = try_direct_io; for (i = 0; i < ST_NBR_MODES; i++) { STm = &(tpnt->modes[i]); STm->defined = 0; STm->sysv = ST_SYSV; STm->defaults_for_writes = 0; STm->do_async_writes = ST_ASYNC_WRITES; STm->do_buffer_writes = ST_BUFFER_WRITES; STm->do_read_ahead = ST_READ_AHEAD; STm->default_compression = ST_DONT_TOUCH; STm->default_blksize = (-1); /* No forced size */ STm->default_density = (-1); /* No forced density */ STm->tape = tpnt; } for (i = 0; i < ST_NBR_PARTITIONS; i++) { STps = &(tpnt->ps[i]); STps->rw = ST_IDLE; STps->eof = ST_NOEOF; STps->at_sm = 0; STps->last_block_valid = 0; STps->drv_block = (-1); STps->drv_file = (-1); } tpnt->current_mode = 0; tpnt->modes[0].defined = 1; tpnt->density_changed = tpnt->compression_changed = tpnt->blksize_changed = 0; mutex_init(&tpnt->lock); idr_preload(GFP_KERNEL); spin_lock(&st_index_lock); error = idr_alloc(&st_index_idr, tpnt, 0, ST_MAX_TAPES + 1, GFP_NOWAIT); spin_unlock(&st_index_lock); idr_preload_end(); if (error < 0) { pr_warn("st: idr allocation failed: %d\n", error); goto out_free_tape; } tpnt->index = error; sprintf(tpnt->name, "st%d", tpnt->index); tpnt->stats = kzalloc(sizeof(struct scsi_tape_stats), GFP_KERNEL); if (tpnt->stats == NULL) { sdev_printk(KERN_ERR, SDp, "st: Can't allocate statistics.\n"); goto out_idr_remove; } dev_set_drvdata(dev, tpnt); error = create_cdevs(tpnt); if (error) goto out_remove_devs; scsi_autopm_put_device(SDp); sdev_printk(KERN_NOTICE, SDp, "Attached scsi tape %s\n", tpnt->name); sdev_printk(KERN_INFO, SDp, "%s: try direct i/o: %s (alignment %d B)\n", tpnt->name, tpnt->try_dio ? "yes" : "no", queue_dma_alignment(SDp->request_queue) + 1); return 0; out_remove_devs: remove_cdevs(tpnt); kfree(tpnt->stats); out_idr_remove: spin_lock(&st_index_lock); idr_remove(&st_index_idr, tpnt->index); spin_unlock(&st_index_lock); out_free_tape: kfree(tpnt); out_buffer_free: kfree(buffer); out: scsi_autopm_put_device(SDp); return -ENODEV; }; static int st_remove(struct device *dev) { struct scsi_tape *tpnt = dev_get_drvdata(dev); int index = tpnt->index; scsi_autopm_get_device(to_scsi_device(dev)); remove_cdevs(tpnt); mutex_lock(&st_ref_mutex); kref_put(&tpnt->kref, scsi_tape_release); mutex_unlock(&st_ref_mutex); spin_lock(&st_index_lock); idr_remove(&st_index_idr, index); spin_unlock(&st_index_lock); return 0; } /** * scsi_tape_release - Called to free the Scsi_Tape structure * @kref: pointer to embedded kref * * st_ref_mutex must be held entering this routine. Because it is * called on last put, you should always use the scsi_tape_get() * scsi_tape_put() helpers which manipulate the semaphore directly * and never do a direct kref_put(). **/ static void scsi_tape_release(struct kref *kref) { struct scsi_tape *tpnt = to_scsi_tape(kref); tpnt->device = NULL; if (tpnt->buffer) { normalize_buffer(tpnt->buffer); kfree(tpnt->buffer->reserved_pages); kfree(tpnt->buffer); } kfree(tpnt->stats); kfree(tpnt); return; } static struct class st_sysfs_class = { .name = "scsi_tape", .dev_groups = st_dev_groups, }; static int __init init_st(void) { int err; validate_options(); printk(KERN_INFO "st: Version %s, fixed bufsize %d, s/g segs %d\n", verstr, st_fixed_buffer_size, st_max_sg_segs); debugging = (debug_flag > 0) ? debug_flag : NO_DEBUG; if (debugging) { printk(KERN_INFO "st: Debugging enabled debug_flag = %d\n", debugging); } err = class_register(&st_sysfs_class); if (err) { pr_err("Unable register sysfs class for SCSI tapes\n"); return err; } err = register_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0), ST_MAX_TAPE_ENTRIES, "st"); if (err) { printk(KERN_ERR "Unable to get major %d for SCSI tapes\n", SCSI_TAPE_MAJOR); goto err_class; } err = scsi_register_driver(&st_template.gendrv); if (err) goto err_chrdev; return 0; err_chrdev: unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0), ST_MAX_TAPE_ENTRIES); err_class: class_unregister(&st_sysfs_class); return err; } static void __exit exit_st(void) { scsi_unregister_driver(&st_template.gendrv); unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0), ST_MAX_TAPE_ENTRIES); class_unregister(&st_sysfs_class); idr_destroy(&st_index_idr); printk(KERN_INFO "st: Unloaded.\n"); } module_init(init_st); module_exit(exit_st); /* The sysfs driver interface. Read-only at the moment */ static ssize_t try_direct_io_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", try_direct_io); } static DRIVER_ATTR_RO(try_direct_io); static ssize_t fixed_buffer_size_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", st_fixed_buffer_size); } static DRIVER_ATTR_RO(fixed_buffer_size); static ssize_t max_sg_segs_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", st_max_sg_segs); } static DRIVER_ATTR_RO(max_sg_segs); static ssize_t version_show(struct device_driver *ddd, char *buf) { return scnprintf(buf, PAGE_SIZE, "[%s]\n", verstr); } static DRIVER_ATTR_RO(version); #if DEBUG static ssize_t debug_flag_store(struct device_driver *ddp, const char *buf, size_t count) { /* We only care what the first byte of the data is the rest is unused. * if it's a '1' we turn on debug and if it's a '0' we disable it. All * other values have -EINVAL returned if they are passed in. */ if (count > 0) { if (buf[0] == '0') { debugging = NO_DEBUG; return count; } else if (buf[0] == '1') { debugging = 1; return count; } } return -EINVAL; } static ssize_t debug_flag_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", debugging); } static DRIVER_ATTR_RW(debug_flag); #endif static struct attribute *st_drv_attrs[] = { &driver_attr_try_direct_io.attr, &driver_attr_fixed_buffer_size.attr, &driver_attr_max_sg_segs.attr, &driver_attr_version.attr, #if DEBUG &driver_attr_debug_flag.attr, #endif NULL, }; ATTRIBUTE_GROUPS(st_drv); /* The sysfs simple class interface */ static ssize_t defined_show(struct device *dev, struct device_attribute *attr, char *buf) { struct st_modedef *STm = dev_get_drvdata(dev); ssize_t l = 0; l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined); return l; } static DEVICE_ATTR_RO(defined); static ssize_t default_blksize_show(struct device *dev, struct device_attribute *attr, char *buf) { struct st_modedef *STm = dev_get_drvdata(dev); ssize_t l = 0; l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize); return l; } static DEVICE_ATTR_RO(default_blksize); static ssize_t default_density_show(struct device *dev, struct device_attribute *attr, char *buf) { struct st_modedef *STm = dev_get_drvdata(dev); ssize_t l = 0; char *fmt; fmt = STm->default_density >= 0 ? "0x%02x\n" : "%d\n"; l = snprintf(buf, PAGE_SIZE, fmt, STm->default_density); return l; } static DEVICE_ATTR_RO(default_density); static ssize_t default_compression_show(struct device *dev, struct device_attribute *attr, char *buf) { struct st_modedef *STm = dev_get_drvdata(dev); ssize_t l = 0; l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1); return l; } static DEVICE_ATTR_RO(default_compression); static ssize_t options_show(struct device *dev, struct device_attribute *attr, char *buf) { struct st_modedef *STm = dev_get_drvdata(dev); struct scsi_tape *STp = STm->tape; int options; ssize_t l = 0; options = STm->do_buffer_writes ? MT_ST_BUFFER_WRITES : 0; options |= STm->do_async_writes ? MT_ST_ASYNC_WRITES : 0; options |= STm->do_read_ahead ? MT_ST_READ_AHEAD : 0; DEB( options |= debugging ? MT_ST_DEBUGGING : 0 ); options |= STp->two_fm ? MT_ST_TWO_FM : 0; options |= STp->fast_mteom ? MT_ST_FAST_MTEOM : 0; options |= STm->defaults_for_writes ? MT_ST_DEF_WRITES : 0; options |= STp->can_bsr ? MT_ST_CAN_BSR : 0; options |= STp->omit_blklims ? MT_ST_NO_BLKLIMS : 0; options |= STp->can_partitions ? MT_ST_CAN_PARTITIONS : 0; options |= STp->scsi2_logical ? MT_ST_SCSI2LOGICAL : 0; options |= STm->sysv ? MT_ST_SYSV : 0; options |= STp->immediate ? MT_ST_NOWAIT : 0; options |= STp->immediate_filemark ? MT_ST_NOWAIT_EOF : 0; options |= STp->sili ? MT_ST_SILI : 0; l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options); return l; } static DEVICE_ATTR_RO(options); /* Support for tape stats */ /** * read_cnt_show - return read count - count of reads made from tape drive * @dev: struct device * @attr: attribute structure * @buf: buffer to return formatted data in */ static ssize_t read_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct st_modedef *STm = dev_get_drvdata(dev); return sprintf(buf, "%lld", (long long)atomic64_read(&STm->tape->stats->read_cnt)); } static DEVICE_ATTR_RO(read_cnt); /** * read_byte_cnt_show - return read byte count - tape drives * may use blocks less than 512 bytes this gives the raw byte count of * of data read from the tape drive. * @dev: struct device * @attr: attribute structure * @buf: buffer to return formatted data in */ static ssize_t read_byte_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct st_modedef *STm = dev_get_drvdata(dev); return sprintf(buf, "%lld", (long long)atomic64_read(&STm->tape->stats->read_byte_cnt)); } static DEVICE_ATTR_RO(read_byte_cnt); /** * read_ns_show - return read ns - overall time spent waiting on reads in ns. * @dev: struct device * @attr: attribute structure * @buf: buffer to return formatted data in */ static ssize_t read_ns_show(struct device *dev, struct device_attribute *attr, char *buf) { struct st_modedef *STm = dev_get_drvdata(dev); return sprintf(buf, "%lld", (long long)atomic64_read(&STm->tape->stats->tot_read_time)); } static DEVICE_ATTR_RO(read_ns); /** * write_cnt_show - write count - number of user calls * to write(2) that have written data to tape. * @dev: struct device * @attr: attribute structure * @buf: buffer to return formatted data in */ static ssize_t write_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct st_modedef *STm = dev_get_drvdata(dev); return sprintf(buf, "%lld", (long long)atomic64_read(&STm->tape->stats->write_cnt)); } static DEVICE_ATTR_RO(write_cnt); /** * write_byte_cnt_show - write byte count - raw count of * bytes written to tape. * @dev: struct device * @attr: attribute structure * @buf: buffer to return formatted data in */ static ssize_t write_byte_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct st_modedef *STm = dev_get_drvdata(dev); return sprintf(buf, "%lld", (long long)atomic64_read(&STm->tape->stats->write_byte_cnt)); } static DEVICE_ATTR_RO(write_byte_cnt); /** * write_ns_show - write ns - number of nanoseconds waiting on write * requests to complete. * @dev: struct device * @attr: attribute structure * @buf: buffer to return formatted data in */ static ssize_t write_ns_show(struct device *dev, struct device_attribute *attr, char *buf) { struct st_modedef *STm = dev_get_drvdata(dev); return sprintf(buf, "%lld", (long long)atomic64_read(&STm->tape->stats->tot_write_time)); } static DEVICE_ATTR_RO(write_ns); /** * in_flight_show - number of I/Os currently in flight - * in most cases this will be either 0 or 1. It may be higher if someone * has also issued other SCSI commands such as via an ioctl. * @dev: struct device * @attr: attribute structure * @buf: buffer to return formatted data in */ static ssize_t in_flight_show(struct device *dev, struct device_attribute *attr, char *buf) { struct st_modedef *STm = dev_get_drvdata(dev); return sprintf(buf, "%lld", (long long)atomic64_read(&STm->tape->stats->in_flight)); } static DEVICE_ATTR_RO(in_flight); /** * io_ns_show - io wait ns - this is the number of ns spent * waiting on all I/O to complete. This includes tape movement commands * such as rewinding, seeking to end of file or tape, it also includes * read and write. To determine the time spent on tape movement * subtract the read and write ns from this value. * @dev: struct device * @attr: attribute structure * @buf: buffer to return formatted data in */ static ssize_t io_ns_show(struct device *dev, struct device_attribute *attr, char *buf) { struct st_modedef *STm = dev_get_drvdata(dev); return sprintf(buf, "%lld", (long long)atomic64_read(&STm->tape->stats->tot_io_time)); } static DEVICE_ATTR_RO(io_ns); /** * other_cnt_show - other io count - this is the number of * I/O requests other than read and write requests. * Typically these are tape movement requests but will include driver * tape movement. This includes only requests issued by the st driver. * @dev: struct device * @attr: attribute structure * @buf: buffer to return formatted data in */ static ssize_t other_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct st_modedef *STm = dev_get_drvdata(dev); return sprintf(buf, "%lld", (long long)atomic64_read(&STm->tape->stats->other_cnt)); } static DEVICE_ATTR_RO(other_cnt); /** * resid_cnt_show - A count of the number of times we get a residual * count - this should indicate someone issuing reads larger than the * block size on tape. * @dev: struct device * @attr: attribute structure * @buf: buffer to return formatted data in */ static ssize_t resid_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct st_modedef *STm = dev_get_drvdata(dev); return sprintf(buf, "%lld", (long long)atomic64_read(&STm->tape->stats->resid_cnt)); } static DEVICE_ATTR_RO(resid_cnt); static struct attribute *st_dev_attrs[] = { &dev_attr_defined.attr, &dev_attr_default_blksize.attr, &dev_attr_default_density.attr, &dev_attr_default_compression.attr, &dev_attr_options.attr, NULL, }; static struct attribute *st_stats_attrs[] = { &dev_attr_read_cnt.attr, &dev_attr_read_byte_cnt.attr, &dev_attr_read_ns.attr, &dev_attr_write_cnt.attr, &dev_attr_write_byte_cnt.attr, &dev_attr_write_ns.attr, &dev_attr_in_flight.attr, &dev_attr_io_ns.attr, &dev_attr_other_cnt.attr, &dev_attr_resid_cnt.attr, NULL, }; static struct attribute_group stats_group = { .name = "stats", .attrs = st_stats_attrs, }; static struct attribute_group st_group = { .attrs = st_dev_attrs, }; static const struct attribute_group *st_dev_groups[] = { &st_group, &stats_group, NULL, }; /* The following functions may be useful for a larger audience. */ static int sgl_map_user_pages(struct st_buffer *STbp, const unsigned int max_pages, unsigned long uaddr, size_t count, int rw) { unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long start = uaddr >> PAGE_SHIFT; const int nr_pages = end - start; int res, i; struct page **pages; struct rq_map_data *mdata = &STbp->map_data; /* User attempted Overflow! */ if ((uaddr + count) < uaddr) return -EINVAL; /* Too big */ if (nr_pages > max_pages) return -ENOMEM; /* Hmm? */ if (count == 0) return 0; pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL); if (pages == NULL) return -ENOMEM; /* Try to fault in all of the necessary pages */ /* rw==READ means read from drive, write into memory area */ res = pin_user_pages_fast(uaddr, nr_pages, rw == READ ? FOLL_WRITE : 0, pages); /* Errors and no page mapped should return here */ if (res < nr_pages) goto out_unmap; for (i=0; i < nr_pages; i++) { /* FIXME: flush superflous for rw==READ, * probably wrong function for rw==WRITE */ flush_dcache_page(pages[i]); } mdata->offset = uaddr & ~PAGE_MASK; STbp->mapped_pages = pages; return nr_pages; out_unmap: if (res > 0) { unpin_user_pages(pages, res); res = 0; } kfree(pages); return res; } /* And unmap them... */ static int sgl_unmap_user_pages(struct st_buffer *STbp, const unsigned int nr_pages, int dirtied) { /* FIXME: cache flush missing for rw==READ */ unpin_user_pages_dirty_lock(STbp->mapped_pages, nr_pages, dirtied); kfree(STbp->mapped_pages); STbp->mapped_pages = NULL; return 0; }
linux-master
drivers/scsi/st.c
// SPDX-License-Identifier: GPL-2.0 /* * Driver for Future Domain TMC-16x0 and TMC-3260 SCSI host adapters * Copyright 2019 Ondrej Zary * * Original driver by * Rickard E. Faith, [email protected] * * Future Domain BIOS versions supported for autodetect: * 2.0, 3.0, 3.2, 3.4 (1.0), 3.5 (2.0), 3.6, 3.61 * Chips supported: * TMC-1800, TMC-18C50, TMC-18C30, TMC-36C70 * Boards supported: * Future Domain TMC-1650, TMC-1660, TMC-1670, TMC-1680, TMC-1610M/MER/MEX * Future Domain TMC-3260 (PCI) * Quantum ISA-200S, ISA-250MG * Adaptec AHA-2920A (PCI) [BUT *NOT* AHA-2920C -- use aic7xxx instead] * IBM ? * * NOTE: * * The Adaptec AHA-2920C has an Adaptec AIC-7850 chip on it. * Use the aic7xxx driver for this board. * * The Adaptec AHA-2920A has a Future Domain chip on it, so this is the right * driver for that card. Unfortunately, the boxes will probably just say * "2920", so you'll have to look on the card for a Future Domain logo, or a * letter after the 2920. * * If you have a TMC-8xx or TMC-9xx board, then this is not the driver for * your board. * * DESCRIPTION: * * This is the Linux low-level SCSI driver for Future Domain TMC-1660/1680 * TMC-1650/1670, and TMC-3260 SCSI host adapters. The 1650 and 1670 have a * 25-pin external connector, whereas the 1660 and 1680 have a SCSI-2 50-pin * high-density external connector. The 1670 and 1680 have floppy disk * controllers built in. The TMC-3260 is a PCI bus card. * * Future Domain's older boards are based on the TMC-1800 chip, and this * driver was originally written for a TMC-1680 board with the TMC-1800 chip. * More recently, boards are being produced with the TMC-18C50 and TMC-18C30 * chips. * * Please note that the drive ordering that Future Domain implemented in BIOS * versions 3.4 and 3.5 is the opposite of the order (currently) used by the * rest of the SCSI industry. * * * REFERENCES USED: * * "TMC-1800 SCSI Chip Specification (FDC-1800T)", Future Domain Corporation, * 1990. * * "Technical Reference Manual: 18C50 SCSI Host Adapter Chip", Future Domain * Corporation, January 1992. * * "LXT SCSI Products: Specifications and OEM Technical Manual (Revision * B/September 1991)", Maxtor Corporation, 1991. * * "7213S product Manual (Revision P3)", Maxtor Corporation, 1992. * * "Draft Proposed American National Standard: Small Computer System * Interface - 2 (SCSI-2)", Global Engineering Documents. (X3T9.2/86-109, * revision 10h, October 17, 1991) * * Private communications, Drew Eckhardt ([email protected]) and Eric * Youngdale ([email protected]), 1992. * * Private communication, Tuong Le (Future Domain Engineering department), * 1994. (Disk geometry computations for Future Domain BIOS version 3.4, and * TMC-18C30 detection.) * * Hogan, Thom. The Programmer's PC Sourcebook. Microsoft Press, 1988. Page * 60 (2.39: Disk Partition Table Layout). * * "18C30 Technical Reference Manual", Future Domain Corporation, 1993, page * 6-1. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/workqueue.h> #include <scsi/scsicam.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "fdomain.h" /* * FIFO_COUNT: The host adapter has an 8K cache (host adapters based on the * 18C30 chip have a 2k cache). When this many 512 byte blocks are filled by * the SCSI device, an interrupt will be raised. Therefore, this could be as * low as 0, or as high as 16. Note, however, that values which are too high * or too low seem to prevent any interrupts from occurring, and thereby lock * up the machine. */ #define FIFO_COUNT 2 /* Number of 512 byte blocks before INTR */ #define PARITY_MASK ACTL_PAREN /* Parity enabled, 0 = disabled */ enum chip_type { unknown = 0x00, tmc1800 = 0x01, tmc18c50 = 0x02, tmc18c30 = 0x03, }; struct fdomain { int base; struct scsi_cmnd *cur_cmd; enum chip_type chip; struct work_struct work; }; static struct scsi_pointer *fdomain_scsi_pointer(struct scsi_cmnd *cmd) { return scsi_cmd_priv(cmd); } static inline void fdomain_make_bus_idle(struct fdomain *fd) { outb(0, fd->base + REG_BCTL); outb(0, fd->base + REG_MCTL); if (fd->chip == tmc18c50 || fd->chip == tmc18c30) /* Clear forced intr. */ outb(ACTL_RESET | ACTL_CLRFIRQ | PARITY_MASK, fd->base + REG_ACTL); else outb(ACTL_RESET | PARITY_MASK, fd->base + REG_ACTL); } static enum chip_type fdomain_identify(int port) { u16 id = inb(port + REG_ID_LSB) | inb(port + REG_ID_MSB) << 8; switch (id) { case 0x6127: return tmc1800; case 0x60e9: /* 18c50 or 18c30 */ break; default: return unknown; } /* Try to toggle 32-bit mode. This only works on an 18c30 chip. */ outb(CFG2_32BIT, port + REG_CFG2); if ((inb(port + REG_CFG2) & CFG2_32BIT)) { outb(0, port + REG_CFG2); if ((inb(port + REG_CFG2) & CFG2_32BIT) == 0) return tmc18c30; } /* If that failed, we are an 18c50. */ return tmc18c50; } static int fdomain_test_loopback(int base) { int i; for (i = 0; i < 255; i++) { outb(i, base + REG_LOOPBACK); if (inb(base + REG_LOOPBACK) != i) return 1; } return 0; } static void fdomain_reset(int base) { outb(BCTL_RST, base + REG_BCTL); mdelay(20); outb(0, base + REG_BCTL); mdelay(1150); outb(0, base + REG_MCTL); outb(PARITY_MASK, base + REG_ACTL); } static int fdomain_select(struct Scsi_Host *sh, int target) { int status; unsigned long timeout; struct fdomain *fd = shost_priv(sh); outb(BCTL_BUSEN | BCTL_SEL, fd->base + REG_BCTL); outb(BIT(sh->this_id) | BIT(target), fd->base + REG_SCSI_DATA_NOACK); /* Stop arbitration and enable parity */ outb(PARITY_MASK, fd->base + REG_ACTL); timeout = 350; /* 350 msec */ do { status = inb(fd->base + REG_BSTAT); if (status & BSTAT_BSY) { /* Enable SCSI Bus */ /* (on error, should make bus idle with 0) */ outb(BCTL_BUSEN, fd->base + REG_BCTL); return 0; } mdelay(1); } while (--timeout); fdomain_make_bus_idle(fd); return 1; } static void fdomain_finish_cmd(struct fdomain *fd) { outb(0, fd->base + REG_ICTL); fdomain_make_bus_idle(fd); scsi_done(fd->cur_cmd); fd->cur_cmd = NULL; } static void fdomain_read_data(struct scsi_cmnd *cmd) { struct fdomain *fd = shost_priv(cmd->device->host); unsigned char *virt, *ptr; size_t offset, len; while ((len = inw(fd->base + REG_FIFO_COUNT)) > 0) { offset = scsi_bufflen(cmd) - scsi_get_resid(cmd); virt = scsi_kmap_atomic_sg(scsi_sglist(cmd), scsi_sg_count(cmd), &offset, &len); ptr = virt + offset; if (len & 1) *ptr++ = inb(fd->base + REG_FIFO); if (len > 1) insw(fd->base + REG_FIFO, ptr, len >> 1); scsi_set_resid(cmd, scsi_get_resid(cmd) - len); scsi_kunmap_atomic_sg(virt); } } static void fdomain_write_data(struct scsi_cmnd *cmd) { struct fdomain *fd = shost_priv(cmd->device->host); /* 8k FIFO for pre-tmc18c30 chips, 2k FIFO for tmc18c30 */ int FIFO_Size = fd->chip == tmc18c30 ? 0x800 : 0x2000; unsigned char *virt, *ptr; size_t offset, len; while ((len = FIFO_Size - inw(fd->base + REG_FIFO_COUNT)) > 512) { offset = scsi_bufflen(cmd) - scsi_get_resid(cmd); if (len + offset > scsi_bufflen(cmd)) { len = scsi_bufflen(cmd) - offset; if (len == 0) break; } virt = scsi_kmap_atomic_sg(scsi_sglist(cmd), scsi_sg_count(cmd), &offset, &len); ptr = virt + offset; if (len & 1) outb(*ptr++, fd->base + REG_FIFO); if (len > 1) outsw(fd->base + REG_FIFO, ptr, len >> 1); scsi_set_resid(cmd, scsi_get_resid(cmd) - len); scsi_kunmap_atomic_sg(virt); } } static void fdomain_work(struct work_struct *work) { struct fdomain *fd = container_of(work, struct fdomain, work); struct Scsi_Host *sh = container_of((void *)fd, struct Scsi_Host, hostdata); struct scsi_cmnd *cmd = fd->cur_cmd; struct scsi_pointer *scsi_pointer = fdomain_scsi_pointer(cmd); unsigned long flags; int status; int done = 0; spin_lock_irqsave(sh->host_lock, flags); if (scsi_pointer->phase & in_arbitration) { status = inb(fd->base + REG_ASTAT); if (!(status & ASTAT_ARB)) { set_host_byte(cmd, DID_BUS_BUSY); fdomain_finish_cmd(fd); goto out; } scsi_pointer->phase = in_selection; outb(ICTL_SEL | FIFO_COUNT, fd->base + REG_ICTL); outb(BCTL_BUSEN | BCTL_SEL, fd->base + REG_BCTL); outb(BIT(cmd->device->host->this_id) | BIT(scmd_id(cmd)), fd->base + REG_SCSI_DATA_NOACK); /* Stop arbitration and enable parity */ outb(ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL); goto out; } else if (scsi_pointer->phase & in_selection) { status = inb(fd->base + REG_BSTAT); if (!(status & BSTAT_BSY)) { /* Try again, for slow devices */ if (fdomain_select(cmd->device->host, scmd_id(cmd))) { set_host_byte(cmd, DID_NO_CONNECT); fdomain_finish_cmd(fd); goto out; } /* Stop arbitration and enable parity */ outb(ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL); } scsi_pointer->phase = in_other; outb(ICTL_FIFO | ICTL_REQ | FIFO_COUNT, fd->base + REG_ICTL); outb(BCTL_BUSEN, fd->base + REG_BCTL); goto out; } /* fdomain_scsi_pointer(cur_cmd)->phase == in_other: this is the body of the routine */ status = inb(fd->base + REG_BSTAT); if (status & BSTAT_REQ) { switch (status & (BSTAT_MSG | BSTAT_CMD | BSTAT_IO)) { case BSTAT_CMD: /* COMMAND OUT */ outb(cmd->cmnd[scsi_pointer->sent_command++], fd->base + REG_SCSI_DATA); break; case 0: /* DATA OUT -- tmc18c50/tmc18c30 only */ if (fd->chip != tmc1800 && !scsi_pointer->have_data_in) { scsi_pointer->have_data_in = -1; outb(ACTL_IRQEN | ACTL_FIFOWR | ACTL_FIFOEN | PARITY_MASK, fd->base + REG_ACTL); } break; case BSTAT_IO: /* DATA IN -- tmc18c50/tmc18c30 only */ if (fd->chip != tmc1800 && !scsi_pointer->have_data_in) { scsi_pointer->have_data_in = 1; outb(ACTL_IRQEN | ACTL_FIFOEN | PARITY_MASK, fd->base + REG_ACTL); } break; case BSTAT_CMD | BSTAT_IO: /* STATUS IN */ scsi_pointer->Status = inb(fd->base + REG_SCSI_DATA); break; case BSTAT_MSG | BSTAT_CMD: /* MESSAGE OUT */ outb(MESSAGE_REJECT, fd->base + REG_SCSI_DATA); break; case BSTAT_MSG | BSTAT_CMD | BSTAT_IO: /* MESSAGE IN */ scsi_pointer->Message = inb(fd->base + REG_SCSI_DATA); if (scsi_pointer->Message == COMMAND_COMPLETE) ++done; break; } } if (fd->chip == tmc1800 && !scsi_pointer->have_data_in && scsi_pointer->sent_command >= cmd->cmd_len) { if (cmd->sc_data_direction == DMA_TO_DEVICE) { scsi_pointer->have_data_in = -1; outb(ACTL_IRQEN | ACTL_FIFOWR | ACTL_FIFOEN | PARITY_MASK, fd->base + REG_ACTL); } else { scsi_pointer->have_data_in = 1; outb(ACTL_IRQEN | ACTL_FIFOEN | PARITY_MASK, fd->base + REG_ACTL); } } if (scsi_pointer->have_data_in == -1) /* DATA OUT */ fdomain_write_data(cmd); if (scsi_pointer->have_data_in == 1) /* DATA IN */ fdomain_read_data(cmd); if (done) { set_status_byte(cmd, scsi_pointer->Status); set_host_byte(cmd, DID_OK); scsi_msg_to_host_byte(cmd, scsi_pointer->Message); fdomain_finish_cmd(fd); } else { if (scsi_pointer->phase & disconnect) { outb(ICTL_FIFO | ICTL_SEL | ICTL_REQ | FIFO_COUNT, fd->base + REG_ICTL); outb(0, fd->base + REG_BCTL); } else outb(ICTL_FIFO | ICTL_REQ | FIFO_COUNT, fd->base + REG_ICTL); } out: spin_unlock_irqrestore(sh->host_lock, flags); } static irqreturn_t fdomain_irq(int irq, void *dev_id) { struct fdomain *fd = dev_id; /* Is it our IRQ? */ if ((inb(fd->base + REG_ASTAT) & ASTAT_IRQ) == 0) return IRQ_NONE; outb(0, fd->base + REG_ICTL); /* We usually have one spurious interrupt after each command. */ if (!fd->cur_cmd) /* Spurious interrupt */ return IRQ_NONE; schedule_work(&fd->work); return IRQ_HANDLED; } static int fdomain_queue(struct Scsi_Host *sh, struct scsi_cmnd *cmd) { struct scsi_pointer *scsi_pointer = fdomain_scsi_pointer(cmd); struct fdomain *fd = shost_priv(cmd->device->host); unsigned long flags; scsi_pointer->Status = 0; scsi_pointer->Message = 0; scsi_pointer->have_data_in = 0; scsi_pointer->sent_command = 0; scsi_pointer->phase = in_arbitration; scsi_set_resid(cmd, scsi_bufflen(cmd)); spin_lock_irqsave(sh->host_lock, flags); fd->cur_cmd = cmd; fdomain_make_bus_idle(fd); /* Start arbitration */ outb(0, fd->base + REG_ICTL); outb(0, fd->base + REG_BCTL); /* Disable data drivers */ /* Set our id bit */ outb(BIT(cmd->device->host->this_id), fd->base + REG_SCSI_DATA_NOACK); outb(ICTL_ARB, fd->base + REG_ICTL); /* Start arbitration */ outb(ACTL_ARB | ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL); spin_unlock_irqrestore(sh->host_lock, flags); return 0; } static int fdomain_abort(struct scsi_cmnd *cmd) { struct Scsi_Host *sh = cmd->device->host; struct fdomain *fd = shost_priv(sh); unsigned long flags; if (!fd->cur_cmd) return FAILED; spin_lock_irqsave(sh->host_lock, flags); fdomain_make_bus_idle(fd); fdomain_scsi_pointer(fd->cur_cmd)->phase |= aborted; /* Aborts are not done well. . . */ set_host_byte(fd->cur_cmd, DID_ABORT); fdomain_finish_cmd(fd); spin_unlock_irqrestore(sh->host_lock, flags); return SUCCESS; } static int fdomain_host_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *sh = cmd->device->host; struct fdomain *fd = shost_priv(sh); unsigned long flags; spin_lock_irqsave(sh->host_lock, flags); fdomain_reset(fd->base); spin_unlock_irqrestore(sh->host_lock, flags); return SUCCESS; } static int fdomain_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { unsigned char *p = scsi_bios_ptable(bdev); if (p && p[65] == 0xaa && p[64] == 0x55 /* Partition table valid */ && p[4]) { /* Partition type */ geom[0] = p[5] + 1; /* heads */ geom[1] = p[6] & 0x3f; /* sectors */ } else { if (capacity >= 0x7e0000) { geom[0] = 255; /* heads */ geom[1] = 63; /* sectors */ } else if (capacity >= 0x200000) { geom[0] = 128; /* heads */ geom[1] = 63; /* sectors */ } else { geom[0] = 64; /* heads */ geom[1] = 32; /* sectors */ } } geom[2] = sector_div(capacity, geom[0] * geom[1]); kfree(p); return 0; } static const struct scsi_host_template fdomain_template = { .module = THIS_MODULE, .name = "Future Domain TMC-16x0", .proc_name = "fdomain", .queuecommand = fdomain_queue, .eh_abort_handler = fdomain_abort, .eh_host_reset_handler = fdomain_host_reset, .bios_param = fdomain_biosparam, .can_queue = 1, .this_id = 7, .sg_tablesize = 64, .dma_boundary = PAGE_SIZE - 1, .cmd_size = sizeof(struct scsi_pointer), }; struct Scsi_Host *fdomain_create(int base, int irq, int this_id, struct device *dev) { struct Scsi_Host *sh; struct fdomain *fd; enum chip_type chip; static const char * const chip_names[] = { "Unknown", "TMC-1800", "TMC-18C50", "TMC-18C30" }; unsigned long irq_flags = 0; chip = fdomain_identify(base); if (!chip) return NULL; fdomain_reset(base); if (fdomain_test_loopback(base)) return NULL; if (!irq) { dev_err(dev, "card has no IRQ assigned"); return NULL; } sh = scsi_host_alloc(&fdomain_template, sizeof(struct fdomain)); if (!sh) return NULL; if (this_id) sh->this_id = this_id & 0x07; sh->irq = irq; sh->io_port = base; sh->n_io_port = FDOMAIN_REGION_SIZE; fd = shost_priv(sh); fd->base = base; fd->chip = chip; INIT_WORK(&fd->work, fdomain_work); if (dev_is_pci(dev) || !strcmp(dev->bus->name, "pcmcia")) irq_flags = IRQF_SHARED; if (request_irq(irq, fdomain_irq, irq_flags, "fdomain", fd)) goto fail_put; shost_printk(KERN_INFO, sh, "%s chip at 0x%x irq %d SCSI ID %d\n", dev_is_pci(dev) ? "TMC-36C70 (PCI bus)" : chip_names[chip], base, irq, sh->this_id); if (scsi_add_host(sh, dev)) goto fail_free_irq; scsi_scan_host(sh); return sh; fail_free_irq: free_irq(irq, fd); fail_put: scsi_host_put(sh); return NULL; } EXPORT_SYMBOL_GPL(fdomain_create); int fdomain_destroy(struct Scsi_Host *sh) { struct fdomain *fd = shost_priv(sh); cancel_work_sync(&fd->work); scsi_remove_host(sh); if (sh->irq) free_irq(sh->irq, fd); scsi_host_put(sh); return 0; } EXPORT_SYMBOL_GPL(fdomain_destroy); #ifdef CONFIG_PM_SLEEP static int fdomain_resume(struct device *dev) { struct fdomain *fd = shost_priv(dev_get_drvdata(dev)); fdomain_reset(fd->base); return 0; } static SIMPLE_DEV_PM_OPS(fdomain_pm_ops, NULL, fdomain_resume); #endif /* CONFIG_PM_SLEEP */ MODULE_AUTHOR("Ondrej Zary, Rickard E. Faith"); MODULE_DESCRIPTION("Future Domain TMC-16x0/TMC-3260 SCSI driver"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/fdomain.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/types.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/zorro.h> #include <linux/module.h> #include <asm/page.h> #include <asm/amigaints.h> #include <asm/amigahw.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_tcq.h> #include "wd33c93.h" #include "a2091.h" struct a2091_hostdata { struct WD33C93_hostdata wh; struct a2091_scsiregs *regs; struct device *dev; }; #define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) static irqreturn_t a2091_intr(int irq, void *data) { struct Scsi_Host *instance = data; struct a2091_hostdata *hdata = shost_priv(instance); unsigned int status = hdata->regs->ISTR; unsigned long flags; if (!(status & (ISTR_INT_F | ISTR_INT_P)) || !(status & ISTR_INTS)) return IRQ_NONE; spin_lock_irqsave(instance->host_lock, flags); wd33c93_intr(instance); spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd); unsigned long len = scsi_pointer->this_residual; struct Scsi_Host *instance = cmd->device->host; struct a2091_hostdata *hdata = shost_priv(instance); struct WD33C93_hostdata *wh = &hdata->wh; struct a2091_scsiregs *regs = hdata->regs; unsigned short cntr = CNTR_PDMD | CNTR_INTEN; dma_addr_t addr; addr = dma_map_single(hdata->dev, scsi_pointer->ptr, len, DMA_DIR(dir_in)); if (dma_mapping_error(hdata->dev, addr)) { dev_warn(hdata->dev, "cannot map SCSI data block %p\n", scsi_pointer->ptr); return 1; } scsi_pointer->dma_handle = addr; /* don't allow DMA if the physical address is bad */ if (addr & A2091_XFER_MASK) { /* drop useless mapping */ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle, scsi_pointer->this_residual, DMA_DIR(dir_in)); scsi_pointer->dma_handle = (dma_addr_t) NULL; wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff; wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len, GFP_KERNEL); /* can't allocate memory; use PIO */ if (!wh->dma_bounce_buffer) { wh->dma_bounce_len = 0; return 1; } if (!dir_in) { /* copy to bounce buffer for a write */ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr, scsi_pointer->this_residual); } /* will flush/invalidate cache for us */ addr = dma_map_single(hdata->dev, wh->dma_bounce_buffer, wh->dma_bounce_len, DMA_DIR(dir_in)); /* can't map buffer; use PIO */ if (dma_mapping_error(hdata->dev, addr)) { dev_warn(hdata->dev, "cannot map bounce buffer %p\n", wh->dma_bounce_buffer); return 1; } /* the bounce buffer may not be in the first 16M of physmem */ if (addr & A2091_XFER_MASK) { /* we could use chipmem... maybe later */ kfree(wh->dma_bounce_buffer); wh->dma_bounce_buffer = NULL; wh->dma_bounce_len = 0; return 1; } scsi_pointer->dma_handle = addr; } /* setup dma direction */ if (!dir_in) cntr |= CNTR_DDIR; /* remember direction */ wh->dma_dir = dir_in; regs->CNTR = cntr; /* setup DMA *physical* address */ regs->ACR = addr; /* no more cache flush here - dma_map_single() takes care */ /* start DMA */ regs->ST_DMA = 1; /* return success */ return 0; } static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, int status) { struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt); struct a2091_hostdata *hdata = shost_priv(instance); struct WD33C93_hostdata *wh = &hdata->wh; struct a2091_scsiregs *regs = hdata->regs; /* disable SCSI interrupts */ unsigned short cntr = CNTR_PDMD; if (!wh->dma_dir) cntr |= CNTR_DDIR; /* disable SCSI interrupts */ regs->CNTR = cntr; /* flush if we were reading */ if (wh->dma_dir) { regs->FLUSH = 1; while (!(regs->ISTR & ISTR_FE_FLG)) ; } /* clear a possible interrupt */ regs->CINT = 1; /* stop DMA */ regs->SP_DMA = 1; /* restore the CONTROL bits (minus the direction flag) */ regs->CNTR = CNTR_PDMD | CNTR_INTEN; dma_unmap_single(hdata->dev, scsi_pointer->dma_handle, scsi_pointer->this_residual, DMA_DIR(wh->dma_dir)); /* copy from a bounce buffer, if necessary */ if (status && wh->dma_bounce_buffer) { if (wh->dma_dir) memcpy(scsi_pointer->ptr, wh->dma_bounce_buffer, scsi_pointer->this_residual); kfree(wh->dma_bounce_buffer); wh->dma_bounce_buffer = NULL; wh->dma_bounce_len = 0; } } static const struct scsi_host_template a2091_scsi_template = { .module = THIS_MODULE, .name = "Commodore A2091/A590 SCSI", .show_info = wd33c93_show_info, .write_info = wd33c93_write_info, .proc_name = "A2901", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, .eh_host_reset_handler = wd33c93_host_reset, .can_queue = CAN_QUEUE, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = CMD_PER_LUN, .dma_boundary = PAGE_SIZE - 1, .cmd_size = sizeof(struct scsi_pointer), }; static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent) { struct Scsi_Host *instance; int error; struct a2091_scsiregs *regs; wd33c93_regs wdregs; struct a2091_hostdata *hdata; if (dma_set_mask_and_coherent(&z->dev, DMA_BIT_MASK(24))) { dev_warn(&z->dev, "cannot use 24 bit DMA\n"); return -ENODEV; } if (!request_mem_region(z->resource.start, 256, "wd33c93")) return -EBUSY; instance = scsi_host_alloc(&a2091_scsi_template, sizeof(struct a2091_hostdata)); if (!instance) { error = -ENOMEM; goto fail_alloc; } instance->irq = IRQ_AMIGA_PORTS; instance->unique_id = z->slotaddr; regs = ZTWO_VADDR(z->resource.start); regs->DAWR = DAWR_A2091; wdregs.SASR = &regs->SASR; wdregs.SCMD = &regs->SCMD; hdata = shost_priv(instance); hdata->dev = &z->dev; hdata->wh.no_sync = 0xff; hdata->wh.fast = 0; hdata->wh.dma_mode = CTRL_DMA; hdata->regs = regs; wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_8_10); error = request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI", instance); if (error) goto fail_irq; regs->CNTR = CNTR_PDMD | CNTR_INTEN; error = scsi_add_host(instance, NULL); if (error) goto fail_host; zorro_set_drvdata(z, instance); scsi_scan_host(instance); return 0; fail_host: free_irq(IRQ_AMIGA_PORTS, instance); fail_irq: scsi_host_put(instance); fail_alloc: release_mem_region(z->resource.start, 256); return error; } static void a2091_remove(struct zorro_dev *z) { struct Scsi_Host *instance = zorro_get_drvdata(z); struct a2091_hostdata *hdata = shost_priv(instance); hdata->regs->CNTR = 0; scsi_remove_host(instance); free_irq(IRQ_AMIGA_PORTS, instance); scsi_host_put(instance); release_mem_region(z->resource.start, 256); } static struct zorro_device_id a2091_zorro_tbl[] = { { ZORRO_PROD_CBM_A590_A2091_1 }, { ZORRO_PROD_CBM_A590_A2091_2 }, { 0 } }; MODULE_DEVICE_TABLE(zorro, a2091_zorro_tbl); static struct zorro_driver a2091_driver = { .name = "a2091", .id_table = a2091_zorro_tbl, .probe = a2091_probe, .remove = a2091_remove, }; static int __init a2091_init(void) { return zorro_register_driver(&a2091_driver); } module_init(a2091_init); static void __exit a2091_exit(void) { zorro_unregister_driver(&a2091_driver); } module_exit(a2091_exit); MODULE_DESCRIPTION("Commodore A2091/A590 SCSI"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/a2091.c
// SPDX-License-Identifier: GPL-2.0-only /* * SCSI Zoned Block commands * * Copyright (C) 2014-2015 SUSE Linux GmbH * Written by: Hannes Reinecke <[email protected]> * Modified by: Damien Le Moal <[email protected]> * Modified by: Shaun Tancheff <[email protected]> */ #include <linux/blkdev.h> #include <linux/vmalloc.h> #include <linux/sched/mm.h> #include <linux/mutex.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include "sd.h" #define CREATE_TRACE_POINTS #include "sd_trace.h" /** * sd_zbc_get_zone_wp_offset - Get zone write pointer offset. * @zone: Zone for which to return the write pointer offset. * * Return: offset of the write pointer from the start of the zone. */ static unsigned int sd_zbc_get_zone_wp_offset(struct blk_zone *zone) { if (zone->type == ZBC_ZONE_TYPE_CONV) return 0; switch (zone->cond) { case BLK_ZONE_COND_IMP_OPEN: case BLK_ZONE_COND_EXP_OPEN: case BLK_ZONE_COND_CLOSED: return zone->wp - zone->start; case BLK_ZONE_COND_FULL: return zone->len; case BLK_ZONE_COND_EMPTY: case BLK_ZONE_COND_OFFLINE: case BLK_ZONE_COND_READONLY: default: /* * Offline and read-only zones do not have a valid * write pointer. Use 0 as for an empty zone. */ return 0; } } /* Whether or not a SCSI zone descriptor describes a gap zone. */ static bool sd_zbc_is_gap_zone(const u8 buf[64]) { return (buf[0] & 0xf) == ZBC_ZONE_TYPE_GAP; } /** * sd_zbc_parse_report - Parse a SCSI zone descriptor * @sdkp: SCSI disk pointer. * @buf: SCSI zone descriptor. * @idx: Index of the zone relative to the first zone reported by the current * sd_zbc_report_zones() call. * @cb: Callback function pointer. * @data: Second argument passed to @cb. * * Return: Value returned by @cb. * * Convert a SCSI zone descriptor into struct blk_zone format. Additionally, * call @cb(blk_zone, @data). */ static int sd_zbc_parse_report(struct scsi_disk *sdkp, const u8 buf[64], unsigned int idx, report_zones_cb cb, void *data) { struct scsi_device *sdp = sdkp->device; struct blk_zone zone = { 0 }; sector_t start_lba, gran; int ret; if (WARN_ON_ONCE(sd_zbc_is_gap_zone(buf))) return -EINVAL; zone.type = buf[0] & 0x0f; zone.cond = (buf[1] >> 4) & 0xf; if (buf[1] & 0x01) zone.reset = 1; if (buf[1] & 0x02) zone.non_seq = 1; start_lba = get_unaligned_be64(&buf[16]); zone.start = logical_to_sectors(sdp, start_lba); zone.capacity = logical_to_sectors(sdp, get_unaligned_be64(&buf[8])); zone.len = zone.capacity; if (sdkp->zone_starting_lba_gran) { gran = logical_to_sectors(sdp, sdkp->zone_starting_lba_gran); if (zone.len > gran) { sd_printk(KERN_ERR, sdkp, "Invalid zone at LBA %llu with capacity %llu and length %llu; granularity = %llu\n", start_lba, sectors_to_logical(sdp, zone.capacity), sectors_to_logical(sdp, zone.len), sectors_to_logical(sdp, gran)); return -EINVAL; } /* * Use the starting LBA granularity instead of the zone length * obtained from the REPORT ZONES command. */ zone.len = gran; } if (zone.cond == ZBC_ZONE_COND_FULL) zone.wp = zone.start + zone.len; else zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24])); ret = cb(&zone, idx, data); if (ret) return ret; if (sdkp->rev_wp_offset) sdkp->rev_wp_offset[idx] = sd_zbc_get_zone_wp_offset(&zone); return 0; } /** * sd_zbc_do_report_zones - Issue a REPORT ZONES scsi command. * @sdkp: The target disk * @buf: vmalloc-ed buffer to use for the reply * @buflen: the buffer size * @lba: Start LBA of the report * @partial: Do partial report * * For internal use during device validation. * Using partial=true can significantly speed up execution of a report zones * command because the disk does not have to count all possible report matching * zones and will only report the count of zones fitting in the command reply * buffer. */ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf, unsigned int buflen, sector_t lba, bool partial) { struct scsi_device *sdp = sdkp->device; const int timeout = sdp->request_queue->rq_timeout; struct scsi_sense_hdr sshdr; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, }; unsigned char cmd[16]; unsigned int rep_len; int result; memset(cmd, 0, 16); cmd[0] = ZBC_IN; cmd[1] = ZI_REPORT_ZONES; put_unaligned_be64(lba, &cmd[2]); put_unaligned_be32(buflen, &cmd[10]); if (partial) cmd[14] = ZBC_REPORT_ZONE_PARTIAL; result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buf, buflen, timeout, SD_MAX_RETRIES, &exec_args); if (result) { sd_printk(KERN_ERR, sdkp, "REPORT ZONES start lba %llu failed\n", lba); sd_print_result(sdkp, "REPORT ZONES", result); if (result > 0 && scsi_sense_valid(&sshdr)) sd_print_sense_hdr(sdkp, &sshdr); return -EIO; } rep_len = get_unaligned_be32(&buf[0]); if (rep_len < 64) { sd_printk(KERN_ERR, sdkp, "REPORT ZONES report invalid length %u\n", rep_len); return -EIO; } return 0; } /** * sd_zbc_alloc_report_buffer() - Allocate a buffer for report zones reply. * @sdkp: The target disk * @nr_zones: Maximum number of zones to report * @buflen: Size of the buffer allocated * * Try to allocate a reply buffer for the number of requested zones. * The size of the buffer allocated may be smaller than requested to * satify the device constraint (max_hw_sectors, max_segments, etc). * * Return the address of the allocated buffer and update @buflen with * the size of the allocated buffer. */ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp, unsigned int nr_zones, size_t *buflen) { struct request_queue *q = sdkp->disk->queue; size_t bufsize; void *buf; /* * Report zone buffer size should be at most 64B times the number of * zones requested plus the 64B reply header, but should be aligned * to SECTOR_SIZE for ATA devices. * Make sure that this size does not exceed the hardware capabilities. * Furthermore, since the report zone command cannot be split, make * sure that the allocated buffer can always be mapped by limiting the * number of pages allocated to the HBA max segments limit. */ nr_zones = min(nr_zones, sdkp->zone_info.nr_zones); bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE); bufsize = min_t(size_t, bufsize, queue_max_hw_sectors(q) << SECTOR_SHIFT); bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); while (bufsize >= SECTOR_SIZE) { buf = __vmalloc(bufsize, GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY); if (buf) { *buflen = bufsize; return buf; } bufsize = rounddown(bufsize >> 1, SECTOR_SIZE); } return NULL; } /** * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors. * @sdkp: The target disk */ static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp) { return logical_to_sectors(sdkp->device, sdkp->zone_info.zone_blocks); } /** * sd_zbc_report_zones - SCSI .report_zones() callback. * @disk: Disk to report zones for. * @sector: Start sector. * @nr_zones: Maximum number of zones to report. * @cb: Callback function called to report zone information. * @data: Second argument passed to @cb. * * Called by the block layer to iterate over zone information. See also the * disk->fops->report_zones() calls in block/blk-zoned.c. */ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data) { struct scsi_disk *sdkp = scsi_disk(disk); sector_t lba = sectors_to_logical(sdkp->device, sector); unsigned int nr, i; unsigned char *buf; u64 zone_length, start_lba; size_t offset, buflen = 0; int zone_idx = 0; int ret; if (!sd_is_zoned(sdkp)) /* Not a zoned device */ return -EOPNOTSUPP; if (!sdkp->capacity) /* Device gone or invalid */ return -ENODEV; buf = sd_zbc_alloc_report_buffer(sdkp, nr_zones, &buflen); if (!buf) return -ENOMEM; while (zone_idx < nr_zones && lba < sdkp->capacity) { ret = sd_zbc_do_report_zones(sdkp, buf, buflen, lba, true); if (ret) goto out; offset = 0; nr = min(nr_zones, get_unaligned_be32(&buf[0]) / 64); if (!nr) break; for (i = 0; i < nr && zone_idx < nr_zones; i++) { offset += 64; start_lba = get_unaligned_be64(&buf[offset + 16]); zone_length = get_unaligned_be64(&buf[offset + 8]); if ((zone_idx == 0 && (lba < start_lba || lba >= start_lba + zone_length)) || (zone_idx > 0 && start_lba != lba) || start_lba + zone_length < start_lba) { sd_printk(KERN_ERR, sdkp, "Zone %d at LBA %llu is invalid: %llu + %llu\n", zone_idx, lba, start_lba, zone_length); ret = -EINVAL; goto out; } lba = start_lba + zone_length; if (sd_zbc_is_gap_zone(&buf[offset])) { if (sdkp->zone_starting_lba_gran) continue; sd_printk(KERN_ERR, sdkp, "Gap zone without constant LBA offsets\n"); ret = -EINVAL; goto out; } ret = sd_zbc_parse_report(sdkp, buf + offset, zone_idx, cb, data); if (ret) goto out; zone_idx++; } } ret = zone_idx; out: kvfree(buf); return ret; } static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd) { struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_disk *sdkp = scsi_disk(rq->q->disk); sector_t sector = blk_rq_pos(rq); if (!sd_is_zoned(sdkp)) /* Not a zoned device */ return BLK_STS_IOERR; if (sdkp->device->changed) return BLK_STS_IOERR; if (sector & (sd_zbc_zone_sectors(sdkp) - 1)) /* Unaligned request */ return BLK_STS_IOERR; return BLK_STS_OK; } #define SD_ZBC_INVALID_WP_OFST (~0u) #define SD_ZBC_UPDATING_WP_OFST (SD_ZBC_INVALID_WP_OFST - 1) static int sd_zbc_update_wp_offset_cb(struct blk_zone *zone, unsigned int idx, void *data) { struct scsi_disk *sdkp = data; lockdep_assert_held(&sdkp->zones_wp_offset_lock); sdkp->zones_wp_offset[idx] = sd_zbc_get_zone_wp_offset(zone); return 0; } /* * An attempt to append a zone triggered an invalid write pointer error. * Reread the write pointer of the zone(s) in which the append failed. */ static void sd_zbc_update_wp_offset_workfn(struct work_struct *work) { struct scsi_disk *sdkp; unsigned long flags; sector_t zno; int ret; sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work); spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); for (zno = 0; zno < sdkp->zone_info.nr_zones; zno++) { if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST) continue; spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf, SD_BUF_SIZE, zno * sdkp->zone_info.zone_blocks, true); spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); if (!ret) sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64, zno, sd_zbc_update_wp_offset_cb, sdkp); } spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); scsi_device_put(sdkp->device); } /** * sd_zbc_prepare_zone_append() - Prepare an emulated ZONE_APPEND command. * @cmd: the command to setup * @lba: the LBA to patch * @nr_blocks: the number of LBAs to be written * * Called from sd_setup_read_write_cmnd() for REQ_OP_ZONE_APPEND. * @sd_zbc_prepare_zone_append() handles the necessary zone wrote locking and * patching of the lba for an emulated ZONE_APPEND command. * * In case the cached write pointer offset is %SD_ZBC_INVALID_WP_OFST it will * schedule a REPORT ZONES command and return BLK_STS_IOERR. */ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba, unsigned int nr_blocks) { struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_disk *sdkp = scsi_disk(rq->q->disk); unsigned int wp_offset, zno = blk_rq_zone_no(rq); unsigned long flags; blk_status_t ret; ret = sd_zbc_cmnd_checks(cmd); if (ret != BLK_STS_OK) return ret; if (!blk_rq_zone_is_seq(rq)) return BLK_STS_IOERR; /* Unlock of the write lock will happen in sd_zbc_complete() */ if (!blk_req_zone_write_trylock(rq)) return BLK_STS_ZONE_RESOURCE; spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); wp_offset = sdkp->zones_wp_offset[zno]; switch (wp_offset) { case SD_ZBC_INVALID_WP_OFST: /* * We are about to schedule work to update a zone write pointer * offset, which will cause the zone append command to be * requeued. So make sure that the scsi device does not go away * while the work is being processed. */ if (scsi_device_get(sdkp->device)) { ret = BLK_STS_IOERR; break; } sdkp->zones_wp_offset[zno] = SD_ZBC_UPDATING_WP_OFST; schedule_work(&sdkp->zone_wp_offset_work); fallthrough; case SD_ZBC_UPDATING_WP_OFST: ret = BLK_STS_DEV_RESOURCE; break; default: wp_offset = sectors_to_logical(sdkp->device, wp_offset); if (wp_offset + nr_blocks > sdkp->zone_info.zone_blocks) { ret = BLK_STS_IOERR; break; } trace_scsi_prepare_zone_append(cmd, *lba, wp_offset); *lba += wp_offset; } spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); if (ret) blk_req_zone_write_unlock(rq); return ret; } /** * sd_zbc_setup_zone_mgmt_cmnd - Prepare a zone ZBC_OUT command. The operations * can be RESET WRITE POINTER, OPEN, CLOSE or FINISH. * @cmd: the command to setup * @op: Operation to be performed * @all: All zones control * * Called from sd_init_command() for REQ_OP_ZONE_RESET, REQ_OP_ZONE_RESET_ALL, * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE or REQ_OP_ZONE_FINISH requests. */ blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd, unsigned char op, bool all) { struct request *rq = scsi_cmd_to_rq(cmd); sector_t sector = blk_rq_pos(rq); struct scsi_disk *sdkp = scsi_disk(rq->q->disk); sector_t block = sectors_to_logical(sdkp->device, sector); blk_status_t ret; ret = sd_zbc_cmnd_checks(cmd); if (ret != BLK_STS_OK) return ret; cmd->cmd_len = 16; memset(cmd->cmnd, 0, cmd->cmd_len); cmd->cmnd[0] = ZBC_OUT; cmd->cmnd[1] = op; if (all) cmd->cmnd[14] = 0x1; else put_unaligned_be64(block, &cmd->cmnd[2]); rq->timeout = SD_TIMEOUT; cmd->sc_data_direction = DMA_NONE; cmd->transfersize = 0; cmd->allowed = 0; return BLK_STS_OK; } static bool sd_zbc_need_zone_wp_update(struct request *rq) { switch (req_op(rq)) { case REQ_OP_ZONE_APPEND: case REQ_OP_ZONE_FINISH: case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_RESET_ALL: return true; case REQ_OP_WRITE: case REQ_OP_WRITE_ZEROES: return blk_rq_zone_is_seq(rq); default: return false; } } /** * sd_zbc_zone_wp_update - Update cached zone write pointer upon cmd completion * @cmd: Completed command * @good_bytes: Command reply bytes * * Called from sd_zbc_complete() to handle the update of the cached zone write * pointer value in case an update is needed. */ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd, unsigned int good_bytes) { int result = cmd->result; struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_disk *sdkp = scsi_disk(rq->q->disk); unsigned int zno = blk_rq_zone_no(rq); enum req_op op = req_op(rq); unsigned long flags; /* * If we got an error for a command that needs updating the write * pointer offset cache, we must mark the zone wp offset entry as * invalid to force an update from disk the next time a zone append * command is issued. */ spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags); if (result && op != REQ_OP_ZONE_RESET_ALL) { if (op == REQ_OP_ZONE_APPEND) { /* Force complete completion (no retry) */ good_bytes = 0; scsi_set_resid(cmd, blk_rq_bytes(rq)); } /* * Force an update of the zone write pointer offset on * the next zone append access. */ if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST) sdkp->zones_wp_offset[zno] = SD_ZBC_INVALID_WP_OFST; goto unlock_wp_offset; } switch (op) { case REQ_OP_ZONE_APPEND: trace_scsi_zone_wp_update(cmd, rq->__sector, sdkp->zones_wp_offset[zno], good_bytes); rq->__sector += sdkp->zones_wp_offset[zno]; fallthrough; case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE: if (sdkp->zones_wp_offset[zno] < sd_zbc_zone_sectors(sdkp)) sdkp->zones_wp_offset[zno] += good_bytes >> SECTOR_SHIFT; break; case REQ_OP_ZONE_RESET: sdkp->zones_wp_offset[zno] = 0; break; case REQ_OP_ZONE_FINISH: sdkp->zones_wp_offset[zno] = sd_zbc_zone_sectors(sdkp); break; case REQ_OP_ZONE_RESET_ALL: memset(sdkp->zones_wp_offset, 0, sdkp->zone_info.nr_zones * sizeof(unsigned int)); break; default: break; } unlock_wp_offset: spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags); return good_bytes; } /** * sd_zbc_complete - ZBC command post processing. * @cmd: Completed command * @good_bytes: Command reply bytes * @sshdr: command sense header * * Called from sd_done() to handle zone commands errors and updates to the * device queue zone write pointer offset cahce. */ unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, struct scsi_sense_hdr *sshdr) { int result = cmd->result; struct request *rq = scsi_cmd_to_rq(cmd); if (op_is_zone_mgmt(req_op(rq)) && result && sshdr->sense_key == ILLEGAL_REQUEST && sshdr->asc == 0x24) { /* * INVALID FIELD IN CDB error: a zone management command was * attempted on a conventional zone. Nothing to worry about, * so be quiet about the error. */ rq->rq_flags |= RQF_QUIET; } else if (sd_zbc_need_zone_wp_update(rq)) good_bytes = sd_zbc_zone_wp_update(cmd, good_bytes); if (req_op(rq) == REQ_OP_ZONE_APPEND) blk_req_zone_write_unlock(rq); return good_bytes; } /** * sd_zbc_check_zoned_characteristics - Check zoned block device characteristics * @sdkp: Target disk * @buf: Buffer where to store the VPD page data * * Read VPD page B6, get information and check that reads are unconstrained. */ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp, unsigned char *buf) { u64 zone_starting_lba_gran; if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) { sd_printk(KERN_NOTICE, sdkp, "Read zoned characteristics VPD page failed\n"); return -ENODEV; } if (sdkp->device->type != TYPE_ZBC) { /* Host-aware */ sdkp->urswrz = 1; sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]); sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]); sdkp->zones_max_open = 0; return 0; } /* Host-managed */ sdkp->urswrz = buf[4] & 1; sdkp->zones_optimal_open = 0; sdkp->zones_optimal_nonseq = 0; sdkp->zones_max_open = get_unaligned_be32(&buf[16]); /* Check zone alignment method */ switch (buf[23] & 0xf) { case 0: case ZBC_CONSTANT_ZONE_LENGTH: /* Use zone length */ break; case ZBC_CONSTANT_ZONE_START_OFFSET: zone_starting_lba_gran = get_unaligned_be64(&buf[24]); if (zone_starting_lba_gran == 0 || !is_power_of_2(zone_starting_lba_gran) || logical_to_sectors(sdkp->device, zone_starting_lba_gran) > UINT_MAX) { sd_printk(KERN_ERR, sdkp, "Invalid zone starting LBA granularity %llu\n", zone_starting_lba_gran); return -ENODEV; } sdkp->zone_starting_lba_gran = zone_starting_lba_gran; break; default: sd_printk(KERN_ERR, sdkp, "Invalid zone alignment method\n"); return -ENODEV; } /* * Check for unconstrained reads: host-managed devices with * constrained reads (drives failing read after write pointer) * are not supported. */ if (!sdkp->urswrz) { if (sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "constrained reads devices are not supported\n"); return -ENODEV; } return 0; } /** * sd_zbc_check_capacity - Check the device capacity * @sdkp: Target disk * @buf: command buffer * @zblocks: zone size in logical blocks * * Get the device zone size and check that the device capacity as reported * by READ CAPACITY matches the max_lba value (plus one) of the report zones * command reply for devices with RC_BASIS == 0. * * Returns 0 upon success or an error code upon failure. */ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf, u32 *zblocks) { u64 zone_blocks; sector_t max_lba; unsigned char *rec; int ret; /* Do a report zone to get max_lba and the size of the first zone */ ret = sd_zbc_do_report_zones(sdkp, buf, SD_BUF_SIZE, 0, false); if (ret) return ret; if (sdkp->rc_basis == 0) { /* The max_lba field is the capacity of this device */ max_lba = get_unaligned_be64(&buf[8]); if (sdkp->capacity != max_lba + 1) { if (sdkp->first_scan) sd_printk(KERN_WARNING, sdkp, "Changing capacity from %llu to max LBA+1 %llu\n", (unsigned long long)sdkp->capacity, (unsigned long long)max_lba + 1); sdkp->capacity = max_lba + 1; } } if (sdkp->zone_starting_lba_gran == 0) { /* Get the size of the first reported zone */ rec = buf + 64; zone_blocks = get_unaligned_be64(&rec[8]); if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) { if (sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "Zone size too large\n"); return -EFBIG; } } else { zone_blocks = sdkp->zone_starting_lba_gran; } if (!is_power_of_2(zone_blocks)) { sd_printk(KERN_ERR, sdkp, "Zone size %llu is not a power of two.\n", zone_blocks); return -EINVAL; } *zblocks = zone_blocks; return 0; } static void sd_zbc_print_zones(struct scsi_disk *sdkp) { if (!sd_is_zoned(sdkp) || !sdkp->capacity) return; if (sdkp->capacity & (sdkp->zone_info.zone_blocks - 1)) sd_printk(KERN_NOTICE, sdkp, "%u zones of %u logical blocks + 1 runt zone\n", sdkp->zone_info.nr_zones - 1, sdkp->zone_info.zone_blocks); else sd_printk(KERN_NOTICE, sdkp, "%u zones of %u logical blocks\n", sdkp->zone_info.nr_zones, sdkp->zone_info.zone_blocks); } static int sd_zbc_init_disk(struct scsi_disk *sdkp) { sdkp->zones_wp_offset = NULL; spin_lock_init(&sdkp->zones_wp_offset_lock); sdkp->rev_wp_offset = NULL; mutex_init(&sdkp->rev_mutex); INIT_WORK(&sdkp->zone_wp_offset_work, sd_zbc_update_wp_offset_workfn); sdkp->zone_wp_update_buf = kzalloc(SD_BUF_SIZE, GFP_KERNEL); if (!sdkp->zone_wp_update_buf) return -ENOMEM; return 0; } void sd_zbc_free_zone_info(struct scsi_disk *sdkp) { if (!sdkp->zone_wp_update_buf) return; /* Serialize against revalidate zones */ mutex_lock(&sdkp->rev_mutex); kvfree(sdkp->zones_wp_offset); sdkp->zones_wp_offset = NULL; kfree(sdkp->zone_wp_update_buf); sdkp->zone_wp_update_buf = NULL; sdkp->early_zone_info = (struct zoned_disk_info){ }; sdkp->zone_info = (struct zoned_disk_info){ }; mutex_unlock(&sdkp->rev_mutex); } static void sd_zbc_revalidate_zones_cb(struct gendisk *disk) { struct scsi_disk *sdkp = scsi_disk(disk); swap(sdkp->zones_wp_offset, sdkp->rev_wp_offset); } /* * Call blk_revalidate_disk_zones() if any of the zoned disk properties have * changed that make it necessary to call that function. Called by * sd_revalidate_disk() after the gendisk capacity has been set. */ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) { struct gendisk *disk = sdkp->disk; struct request_queue *q = disk->queue; u32 zone_blocks = sdkp->early_zone_info.zone_blocks; unsigned int nr_zones = sdkp->early_zone_info.nr_zones; int ret = 0; unsigned int flags; /* * For all zoned disks, initialize zone append emulation data if not * already done. This is necessary also for host-aware disks used as * regular disks due to the presence of partitions as these partitions * may be deleted and the disk zoned model changed back from * BLK_ZONED_NONE to BLK_ZONED_HA. */ if (sd_is_zoned(sdkp) && !sdkp->zone_wp_update_buf) { ret = sd_zbc_init_disk(sdkp); if (ret) return ret; } /* * There is nothing to do for regular disks, including host-aware disks * that have partitions. */ if (!blk_queue_is_zoned(q)) return 0; /* * Make sure revalidate zones are serialized to ensure exclusive * updates of the scsi disk data. */ mutex_lock(&sdkp->rev_mutex); if (sdkp->zone_info.zone_blocks == zone_blocks && sdkp->zone_info.nr_zones == nr_zones && disk->nr_zones == nr_zones) goto unlock; flags = memalloc_noio_save(); sdkp->zone_info.zone_blocks = zone_blocks; sdkp->zone_info.nr_zones = nr_zones; sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_KERNEL); if (!sdkp->rev_wp_offset) { ret = -ENOMEM; memalloc_noio_restore(flags); goto unlock; } blk_queue_chunk_sectors(q, logical_to_sectors(sdkp->device, zone_blocks)); blk_queue_max_zone_append_sectors(q, q->limits.max_segments << PAGE_SECTORS_SHIFT); ret = blk_revalidate_disk_zones(disk, sd_zbc_revalidate_zones_cb); memalloc_noio_restore(flags); kvfree(sdkp->rev_wp_offset); sdkp->rev_wp_offset = NULL; if (ret) { sdkp->zone_info = (struct zoned_disk_info){ }; sdkp->capacity = 0; goto unlock; } sd_zbc_print_zones(sdkp); unlock: mutex_unlock(&sdkp->rev_mutex); return ret; } /** * sd_zbc_read_zones - Read zone information and update the request queue * @sdkp: SCSI disk pointer. * @buf: 512 byte buffer used for storing SCSI command output. * * Read zone information and update the request queue zone characteristics and * also the zoned device information in *sdkp. Called by sd_revalidate_disk() * before the gendisk capacity has been set. */ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]) { struct gendisk *disk = sdkp->disk; struct request_queue *q = disk->queue; unsigned int nr_zones; u32 zone_blocks = 0; int ret; if (!sd_is_zoned(sdkp)) { /* * Device managed or normal SCSI disk, no special handling * required. Nevertheless, free the disk zone information in * case the device type changed. */ sd_zbc_free_zone_info(sdkp); return 0; } /* READ16/WRITE16/SYNC16 is mandatory for ZBC devices */ sdkp->device->use_16_for_rw = 1; sdkp->device->use_10_for_rw = 0; sdkp->device->use_16_for_sync = 1; if (!blk_queue_is_zoned(q)) { /* * This can happen for a host aware disk with partitions. * The block device zone model was already cleared by * disk_set_zoned(). Only free the scsi disk zone * information and exit early. */ sd_zbc_free_zone_info(sdkp); return 0; } /* Check zoned block device characteristics (unconstrained reads) */ ret = sd_zbc_check_zoned_characteristics(sdkp, buf); if (ret) goto err; /* Check the device capacity reported by report zones */ ret = sd_zbc_check_capacity(sdkp, buf, &zone_blocks); if (ret != 0) goto err; /* The drive satisfies the kernel restrictions: set it up */ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE); if (sdkp->zones_max_open == U32_MAX) disk_set_max_open_zones(disk, 0); else disk_set_max_open_zones(disk, sdkp->zones_max_open); disk_set_max_active_zones(disk, 0); nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks); sdkp->early_zone_info.nr_zones = nr_zones; sdkp->early_zone_info.zone_blocks = zone_blocks; return 0; err: sdkp->capacity = 0; return ret; }
linux-master
drivers/scsi/sd_zbc.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/blkdev.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <scsi/scsi_device.h> #include <scsi/scsi_devinfo.h> #include "scsi_priv.h" /* * scsi_dev_info_list: structure to hold black/white listed devices. */ struct scsi_dev_info_list { struct list_head dev_info_list; char vendor[8]; char model[16]; blist_flags_t flags; unsigned compatible; /* for use with scsi_static_device_list entries */ }; struct scsi_dev_info_list_table { struct list_head node; /* our node for being on the master list */ struct list_head scsi_dev_info_list; /* head of dev info list */ const char *name; /* name of list for /proc (NULL for global) */ int key; /* unique numeric identifier */ }; static blist_flags_t scsi_default_dev_flags; static LIST_HEAD(scsi_dev_info_list); static char scsi_dev_flags[256]; /* * scsi_static_device_list: deprecated list of devices that require * settings that differ from the default, includes black-listed (broken) * devices. The entries here are added to the tail of scsi_dev_info_list * via scsi_dev_info_list_init. * * Do not add to this list, use the command line or proc interface to add * to the scsi_dev_info_list. This table will eventually go away. */ static struct { char *vendor; char *model; char *revision; /* revision known to be bad, unused */ blist_flags_t flags; } scsi_static_device_list[] __initdata = { /* * The following devices are known not to tolerate a lun != 0 scan * for one reason or another. Some will respond to all luns, * others will lock up. */ {"Aashima", "IMAGERY 2400SP", "1.03", BLIST_NOLUN}, /* locks up */ {"CHINON", "CD-ROM CDS-431", "H42", BLIST_NOLUN}, /* locks up */ {"CHINON", "CD-ROM CDS-535", "Q14", BLIST_NOLUN}, /* locks up */ {"DENON", "DRD-25X", "V", BLIST_NOLUN}, /* locks up */ {"HITACHI", "DK312C", "CM81", BLIST_NOLUN}, /* responds to all lun */ {"HITACHI", "DK314C", "CR21", BLIST_NOLUN}, /* responds to all lun */ {"IBM", "2104-DU3", NULL, BLIST_NOLUN}, /* locks up */ {"IBM", "2104-TU3", NULL, BLIST_NOLUN}, /* locks up */ {"IMS", "CDD521/10", "2.06", BLIST_NOLUN}, /* locks up */ {"MAXTOR", "XT-3280", "PR02", BLIST_NOLUN}, /* locks up */ {"MAXTOR", "XT-4380S", "B3C", BLIST_NOLUN}, /* locks up */ {"MAXTOR", "MXT-1240S", "I1.2", BLIST_NOLUN}, /* locks up */ {"MAXTOR", "XT-4170S", "B5A", BLIST_NOLUN}, /* locks up */ {"MAXTOR", "XT-8760S", "B7B", BLIST_NOLUN}, /* locks up */ {"MEDIAVIS", "RENO CD-ROMX2A", "2.03", BLIST_NOLUN}, /* responds to all lun */ {"MICROTEK", "ScanMakerIII", "2.30", BLIST_NOLUN}, /* responds to all lun */ {"NEC", "CD-ROM DRIVE:841", "1.0", BLIST_NOLUN},/* locks up */ {"PHILIPS", "PCA80SC", "V4-2", BLIST_NOLUN}, /* responds to all lun */ {"RODIME", "RO3000S", "2.33", BLIST_NOLUN}, /* locks up */ {"SUN", "SENA", NULL, BLIST_NOLUN}, /* responds to all luns */ /* * The following causes a failed REQUEST SENSE on lun 1 for * aha152x controller, which causes SCSI code to reset bus. */ {"SANYO", "CRD-250S", "1.20", BLIST_NOLUN}, /* * The following causes a failed REQUEST SENSE on lun 1 for * aha152x controller, which causes SCSI code to reset bus. */ {"SEAGATE", "ST157N", "\004|j", BLIST_NOLUN}, {"SEAGATE", "ST296", "921", BLIST_NOLUN}, /* responds to all lun */ {"SEAGATE", "ST1581", "6538", BLIST_NOLUN}, /* responds to all lun */ {"SONY", "CD-ROM CDU-541", "4.3d", BLIST_NOLUN}, {"SONY", "CD-ROM CDU-55S", "1.0i", BLIST_NOLUN}, {"SONY", "CD-ROM CDU-561", "1.7x", BLIST_NOLUN}, {"SONY", "CD-ROM CDU-8012", NULL, BLIST_NOLUN}, {"SONY", "SDT-5000", "3.17", BLIST_SELECT_NO_ATN}, {"TANDBERG", "TDC 3600", "U07", BLIST_NOLUN}, /* locks up */ {"TEAC", "CD-R55S", "1.0H", BLIST_NOLUN}, /* locks up */ /* * The following causes a failed REQUEST SENSE on lun 1 for * seagate controller, which causes SCSI code to reset bus. */ {"TEAC", "CD-ROM", "1.06", BLIST_NOLUN}, {"TEAC", "MT-2ST/45S2-27", "RV M", BLIST_NOLUN}, /* responds to all lun */ /* * The following causes a failed REQUEST SENSE on lun 1 for * seagate controller, which causes SCSI code to reset bus. */ {"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */ {"HP", "C1790A", NULL, BLIST_NOLUN}, /* scanjet iip */ {"HP", "C2500A", NULL, BLIST_NOLUN}, /* scanjet iicx */ {"MEDIAVIS", "CDR-H93MV", "1.31", BLIST_NOLUN}, /* locks up */ {"MICROTEK", "ScanMaker II", "5.61", BLIST_NOLUN}, /* responds to all lun */ {"MITSUMI", "CD-R CR-2201CS", "6119", BLIST_NOLUN}, /* locks up */ {"NEC", "D3856", "0009", BLIST_NOLUN}, {"QUANTUM", "LPS525S", "3110", BLIST_NOLUN}, /* locks up */ {"QUANTUM", "PD1225S", "3110", BLIST_NOLUN}, /* locks up */ {"QUANTUM", "FIREBALL ST4.3S", "0F0C", BLIST_NOLUN}, /* locks up */ {"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */ {"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */ {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN | BLIST_BORKEN}, {"transtec", "T5008", "0001", BLIST_NOREPORTLUN }, {"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */ {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */ {"YAMAHA", "CRW8424S", "1.0", BLIST_NOLUN}, /* locks up */ {"YAMAHA", "CRW6416S", "1.0c", BLIST_NOLUN}, /* locks up */ {"", "Scanner", "1.80", BLIST_NOLUN}, /* responds to all lun */ /* * Other types of devices that have special flags. * Note that all USB devices should have the BLIST_INQUIRY_36 flag. */ {"3PARdata", "VV", NULL, BLIST_REPORTLUN2}, {"ADAPTEC", "AACRAID", NULL, BLIST_FORCELUN}, {"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN}, {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES | BLIST_NO_VPD_SIZE}, {"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN}, {"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36}, {"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN}, {"BROWNIE", "1600U3P", NULL, BLIST_NOREPORTLUN}, {"CANON", "IPUBJD", NULL, BLIST_SPARSELUN}, {"CBOX3", "USB Storage-SMC", "300A", BLIST_FORCELUN | BLIST_INQUIRY_36}, {"CMD", "CRA-7280", NULL, BLIST_SPARSELUN}, /* CMD RAID Controller */ {"CNSI", "G7324", NULL, BLIST_SPARSELUN}, /* Chaparral G7324 RAID */ {"CNSi", "G8324", NULL, BLIST_SPARSELUN}, /* Chaparral G8324 RAID */ {"COMPAQ", "ARRAY CONTROLLER", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_MAX_512 | BLIST_REPORTLUN2}, /* Compaq RA4x00 */ {"COMPAQ", "LOGICAL VOLUME", NULL, BLIST_FORCELUN | BLIST_MAX_512}, /* Compaq RA4x00 */ {"COMPAQ", "CR3500", NULL, BLIST_FORCELUN}, {"COMPAQ", "MSA1000", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD}, {"COMPAQ", "MSA1000 VOLUME", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD}, {"COMPAQ", "HSV110", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, {"DDN", "SAN DataDirector", "*", BLIST_SPARSELUN}, {"DEC", "HSG80", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, {"DELL", "PV660F", NULL, BLIST_SPARSELUN}, {"DELL", "PV660F PSEUDO", NULL, BLIST_SPARSELUN}, {"DELL", "PSEUDO DEVICE .", NULL, BLIST_SPARSELUN}, /* Dell PV 530F */ {"DELL", "PV530F", NULL, BLIST_SPARSELUN}, {"DELL", "PERCRAID", NULL, BLIST_FORCELUN}, {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, storage on LUN 0 */ {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, no storage on LUN 0 */ {"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2 | BLIST_RETRY_ITF}, {"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN}, {"easyRAID", "16P", NULL, BLIST_NOREPORTLUN}, {"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN}, {"easyRAID", "F8", NULL, BLIST_NOREPORTLUN}, {"FSC", "CentricStor", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"FUJITSU", "ETERNUS_DXM", "*", BLIST_RETRY_ASC_C1}, {"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36}, {"Generic", "USB Storage-SMC", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36}, /* FW: 0180 and 0207 */ {"Generic", "Ultra HS-SD/MMC", "2.09", BLIST_IGN_MEDIA_CHANGE | BLIST_INQUIRY_36}, {"HITACHI", "DF400", "*", BLIST_REPORTLUN2}, {"HITACHI", "DF500", "*", BLIST_REPORTLUN2}, {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2}, {"HITACHI", "HUS1530", "*", BLIST_NO_DIF}, {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, {"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */ {"HP", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, /* HP XP Arrays */ {"HP", "NetRAID-4M", NULL, BLIST_FORCELUN}, {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, {"HP", "C1557A", NULL, BLIST_FORCELUN}, {"HP", "C3323-300", "4269", BLIST_NOTQ}, {"HP", "C5713A", NULL, BLIST_NOREPORTLUN}, {"HP", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2}, {"HPE", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, {"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN}, {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"IBM", "2076", NULL, BLIST_NO_VPD_SIZE}, {"IBM", "2105", NULL, BLIST_RETRY_HWERROR}, {"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN}, {"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN}, {"IOMEGA", "Io20S *F", NULL, BLIST_KEY}, {"INSITE", "Floptical F*8I", NULL, BLIST_KEY}, {"INSITE", "I325VM", NULL, BLIST_KEY}, {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC}, {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES}, {"Marvell", "91xx Config", "1.01", BLIST_SKIP_VPD_PAGES}, {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, {"Medion", "Flash XL MMC/SD", "2.6D", BLIST_FORCELUN}, {"MegaRAID", "LD", NULL, BLIST_FORCELUN}, {"MICROP", "4110", NULL, BLIST_NOTQ}, {"MSFT", "Virtual HD", NULL, BLIST_MAX_1024 | BLIST_NO_RSOC}, {"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2}, {"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN}, {"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"NAKAMICH", "MJ-5.16S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"NEC", "PD-1 ODX654P", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"NEC", "iStorage", NULL, BLIST_REPORTLUN2}, {"NRC", "MBR-7", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"NRC", "MBR-7.4", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"PIONEER", "CD-ROM DRM-602X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, {"Promise", "", NULL, BLIST_SPARSELUN}, {"QEMU", "QEMU CD-ROM", NULL, BLIST_SKIP_VPD_PAGES}, {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024}, {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024}, {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN}, {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN}, {"SEAGATE", "ST34555N", "0930", BLIST_NOTQ}, /* Chokes on tagged INQUIRY */ {"SEAGATE", "ST3390N", "9546", BLIST_NOTQ}, {"SEAGATE", "ST900MM0006", NULL, BLIST_SKIP_VPD_PAGES}, {"SGI", "RAID3", "*", BLIST_SPARSELUN}, {"SGI", "RAID5", "*", BLIST_SPARSELUN}, {"SGI", "TP9100", "*", BLIST_REPORTLUN2}, {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SKhynix", "H28U74301AMR", NULL, BLIST_SKIP_VPD_PAGES}, {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"STK", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"FUJITSU", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SanDisk", "Cruzer Blade", NULL, BLIST_TRY_VPD_PAGES | BLIST_INQUIRY_36}, {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ {"ST650211", "CF", NULL, BLIST_RETRY_HWERROR}, {"SUN", "T300", "*", BLIST_SPARSELUN}, {"SUN", "T4", "*", BLIST_SPARSELUN}, {"Tornado-", "F4", "*", BLIST_NOREPORTLUN}, {"TOSHIBA", "CDROM", NULL, BLIST_ISROM}, {"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM}, {"Traxdata", "CDR4120", NULL, BLIST_NOLUN}, /* locks up */ {"USB2.0", "SMARTMEDIA/XD", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36}, {"WangDAT", "Model 2600", "01.7", BLIST_SELECT_NO_ATN}, {"WangDAT", "Model 3200", "02.2", BLIST_SELECT_NO_ATN}, {"WangDAT", "Model 1300", "02.4", BLIST_SELECT_NO_ATN}, {"WDC WD25", "00JB-00FUA0", NULL, BLIST_NOREPORTLUN}, {"XYRATEX", "RS", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"Zzyzx", "RocketStor 500S", NULL, BLIST_SPARSELUN}, {"Zzyzx", "RocketStor 2000", NULL, BLIST_SPARSELUN}, { NULL, NULL, NULL, 0 }, }; static struct scsi_dev_info_list_table *scsi_devinfo_lookup_by_key(int key) { struct scsi_dev_info_list_table *devinfo_table; int found = 0; list_for_each_entry(devinfo_table, &scsi_dev_info_list, node) if (devinfo_table->key == key) { found = 1; break; } if (!found) return ERR_PTR(-EINVAL); return devinfo_table; } /* * scsi_strcpy_devinfo: called from scsi_dev_info_list_add to copy into * devinfo vendor and model strings. */ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length, char *from, int compatible) { size_t from_length; from_length = strlen(from); /* This zero-pads the destination */ strncpy(to, from, to_length); if (from_length < to_length && !compatible) { /* * space pad the string if it is short. */ memset(&to[from_length], ' ', to_length - from_length); } if (from_length > to_length) printk(KERN_WARNING "%s: %s string '%s' is too long\n", __func__, name, from); } /** * scsi_dev_info_list_add - add one dev_info list entry. * @compatible: if true, null terminate short strings. Otherwise space pad. * @vendor: vendor string * @model: model (product) string * @strflags: integer string * @flags: if strflags NULL, use this flag value * * Description: * Create and add one dev_info entry for @vendor, @model, @strflags or * @flag. If @compatible, add to the tail of the list, do not space * pad, and set devinfo->compatible. The scsi_static_device_list entries * are added with @compatible 1 and @clfags NULL. * * Returns: 0 OK, -error on failure. **/ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model, char *strflags, blist_flags_t flags) { return scsi_dev_info_list_add_keyed(compatible, vendor, model, strflags, flags, SCSI_DEVINFO_GLOBAL); } /** * scsi_dev_info_list_add_keyed - add one dev_info list entry. * @compatible: if true, null terminate short strings. Otherwise space pad. * @vendor: vendor string * @model: model (product) string * @strflags: integer string * @flags: if strflags NULL, use this flag value * @key: specify list to use * * Description: * Create and add one dev_info entry for @vendor, @model, * @strflags or @flag in list specified by @key. If @compatible, * add to the tail of the list, do not space pad, and set * devinfo->compatible. The scsi_static_device_list entries are * added with @compatible 1 and @clfags NULL. * * Returns: 0 OK, -error on failure. **/ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model, char *strflags, blist_flags_t flags, enum scsi_devinfo_key key) { struct scsi_dev_info_list *devinfo; struct scsi_dev_info_list_table *devinfo_table = scsi_devinfo_lookup_by_key(key); if (IS_ERR(devinfo_table)) return PTR_ERR(devinfo_table); devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL); if (!devinfo) { printk(KERN_ERR "%s: no memory\n", __func__); return -ENOMEM; } scsi_strcpy_devinfo("vendor", devinfo->vendor, sizeof(devinfo->vendor), vendor, compatible); scsi_strcpy_devinfo("model", devinfo->model, sizeof(devinfo->model), model, compatible); if (strflags) { unsigned long long val; int ret = kstrtoull(strflags, 0, &val); if (ret != 0) { kfree(devinfo); return ret; } flags = (__force blist_flags_t)val; } if (flags & __BLIST_UNUSED_MASK) { pr_err("scsi_devinfo (%s:%s): unsupported flags 0x%llx", vendor, model, flags & __BLIST_UNUSED_MASK); kfree(devinfo); return -EINVAL; } devinfo->flags = flags; devinfo->compatible = compatible; if (compatible) list_add_tail(&devinfo->dev_info_list, &devinfo_table->scsi_dev_info_list); else list_add(&devinfo->dev_info_list, &devinfo_table->scsi_dev_info_list); return 0; } EXPORT_SYMBOL(scsi_dev_info_list_add_keyed); /** * scsi_dev_info_list_find - find a matching dev_info list entry. * @vendor: full vendor string * @model: full model (product) string * @key: specify list to use * * Description: * Finds the first dev_info entry matching @vendor, @model * in list specified by @key. * * Returns: pointer to matching entry, or ERR_PTR on failure. **/ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, const char *model, enum scsi_devinfo_key key) { struct scsi_dev_info_list *devinfo; struct scsi_dev_info_list_table *devinfo_table = scsi_devinfo_lookup_by_key(key); size_t vmax, mmax, mlen; const char *vskip, *mskip; if (IS_ERR(devinfo_table)) return (struct scsi_dev_info_list *) devinfo_table; /* Prepare for "compatible" matches */ /* * XXX why skip leading spaces? If an odd INQUIRY * value, that should have been part of the * scsi_static_device_list[] entry, such as " FOO" * rather than "FOO". Since this code is already * here, and we don't know what device it is * trying to work with, leave it as-is. */ vmax = sizeof(devinfo->vendor); vskip = vendor; while (vmax > 0 && *vskip == ' ') { vmax--; vskip++; } /* Also skip trailing spaces */ while (vmax > 0 && vskip[vmax - 1] == ' ') --vmax; mmax = sizeof(devinfo->model); mskip = model; while (mmax > 0 && *mskip == ' ') { mmax--; mskip++; } while (mmax > 0 && mskip[mmax - 1] == ' ') --mmax; list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list, dev_info_list) { if (devinfo->compatible) { /* * vendor strings must be an exact match */ if (vmax != strnlen(devinfo->vendor, sizeof(devinfo->vendor)) || memcmp(devinfo->vendor, vskip, vmax)) continue; /* * @model specifies the full string, and * must be larger or equal to devinfo->model */ mlen = strnlen(devinfo->model, sizeof(devinfo->model)); if (mmax < mlen || memcmp(devinfo->model, mskip, mlen)) continue; return devinfo; } else { if (!memcmp(devinfo->vendor, vendor, sizeof(devinfo->vendor)) && !memcmp(devinfo->model, model, sizeof(devinfo->model))) return devinfo; } } return ERR_PTR(-ENOENT); } /** * scsi_dev_info_list_del_keyed - remove one dev_info list entry. * @vendor: vendor string * @model: model (product) string * @key: specify list to use * * Description: * Remove and destroy one dev_info entry for @vendor, @model * in list specified by @key. * * Returns: 0 OK, -error on failure. **/ int scsi_dev_info_list_del_keyed(char *vendor, char *model, enum scsi_devinfo_key key) { struct scsi_dev_info_list *found; found = scsi_dev_info_list_find(vendor, model, key); if (IS_ERR(found)) return PTR_ERR(found); list_del(&found->dev_info_list); kfree(found); return 0; } EXPORT_SYMBOL(scsi_dev_info_list_del_keyed); /** * scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list. * @dev_list: string of device flags to add * * Description: * Parse dev_list, and add entries to the scsi_dev_info_list. * dev_list is of the form "vendor:product:flag,vendor:product:flag". * dev_list is modified via strsep. Can be called for command line * addition, for proc or mabye a sysfs interface. * * Returns: 0 if OK, -error on failure. **/ static int scsi_dev_info_list_add_str(char *dev_list) { char *vendor, *model, *strflags, *next; char *next_check; int res = 0; next = dev_list; if (next && next[0] == '"') { /* * Ignore both the leading and trailing quote. */ next++; next_check = ",\""; } else { next_check = ","; } /* * For the leading and trailing '"' case, the for loop comes * through the last time with vendor[0] == '\0'. */ for (vendor = strsep(&next, ":"); vendor && (vendor[0] != '\0') && (res == 0); vendor = strsep(&next, ":")) { strflags = NULL; model = strsep(&next, ":"); if (model) strflags = strsep(&next, next_check); if (!model || !strflags) { printk(KERN_ERR "%s: bad dev info string '%s' '%s'" " '%s'\n", __func__, vendor, model, strflags); res = -EINVAL; } else res = scsi_dev_info_list_add(0 /* compatible */, vendor, model, strflags, 0); } return res; } /** * scsi_get_device_flags - get device specific flags from the dynamic * device list. * @sdev: &scsi_device to get flags for * @vendor: vendor name * @model: model name * * Description: * Search the global scsi_dev_info_list (specified by list zero) * for an entry matching @vendor and @model, if found, return the * matching flags value, else return the host or global default * settings. Called during scan time. **/ blist_flags_t scsi_get_device_flags(struct scsi_device *sdev, const unsigned char *vendor, const unsigned char *model) { return scsi_get_device_flags_keyed(sdev, vendor, model, SCSI_DEVINFO_GLOBAL); } /** * scsi_get_device_flags_keyed - get device specific flags from the dynamic device list * @sdev: &scsi_device to get flags for * @vendor: vendor name * @model: model name * @key: list to look up * * Description: * Search the scsi_dev_info_list specified by @key for an entry * matching @vendor and @model, if found, return the matching * flags value, else return the host or global default settings. * Called during scan time. **/ blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev, const unsigned char *vendor, const unsigned char *model, enum scsi_devinfo_key key) { struct scsi_dev_info_list *devinfo; devinfo = scsi_dev_info_list_find(vendor, model, key); if (!IS_ERR(devinfo)) return devinfo->flags; /* key or device not found: return nothing */ if (key != SCSI_DEVINFO_GLOBAL) return 0; /* except for the global list, where we have an exception */ if (sdev->sdev_bflags) return sdev->sdev_bflags; return scsi_default_dev_flags; } EXPORT_SYMBOL(scsi_get_device_flags_keyed); #ifdef CONFIG_SCSI_PROC_FS struct double_list { struct list_head *top; struct list_head *bottom; }; static int devinfo_seq_show(struct seq_file *m, void *v) { struct double_list *dl = v; struct scsi_dev_info_list_table *devinfo_table = list_entry(dl->top, struct scsi_dev_info_list_table, node); struct scsi_dev_info_list *devinfo = list_entry(dl->bottom, struct scsi_dev_info_list, dev_info_list); if (devinfo_table->scsi_dev_info_list.next == dl->bottom && devinfo_table->name) seq_printf(m, "[%s]:\n", devinfo_table->name); seq_printf(m, "'%.8s' '%.16s' 0x%llx\n", devinfo->vendor, devinfo->model, devinfo->flags); return 0; } static void *devinfo_seq_start(struct seq_file *m, loff_t *ppos) { struct double_list *dl = kmalloc(sizeof(*dl), GFP_KERNEL); loff_t pos = *ppos; if (!dl) return NULL; list_for_each(dl->top, &scsi_dev_info_list) { struct scsi_dev_info_list_table *devinfo_table = list_entry(dl->top, struct scsi_dev_info_list_table, node); list_for_each(dl->bottom, &devinfo_table->scsi_dev_info_list) if (pos-- == 0) return dl; } kfree(dl); return NULL; } static void *devinfo_seq_next(struct seq_file *m, void *v, loff_t *ppos) { struct double_list *dl = v; struct scsi_dev_info_list_table *devinfo_table = list_entry(dl->top, struct scsi_dev_info_list_table, node); ++*ppos; dl->bottom = dl->bottom->next; while (&devinfo_table->scsi_dev_info_list == dl->bottom) { dl->top = dl->top->next; if (dl->top == &scsi_dev_info_list) { kfree(dl); return NULL; } devinfo_table = list_entry(dl->top, struct scsi_dev_info_list_table, node); dl->bottom = devinfo_table->scsi_dev_info_list.next; } return dl; } static void devinfo_seq_stop(struct seq_file *m, void *v) { kfree(v); } static const struct seq_operations scsi_devinfo_seq_ops = { .start = devinfo_seq_start, .next = devinfo_seq_next, .stop = devinfo_seq_stop, .show = devinfo_seq_show, }; static int proc_scsi_devinfo_open(struct inode *inode, struct file *file) { return seq_open(file, &scsi_devinfo_seq_ops); } /* * proc_scsi_dev_info_write - allow additions to scsi_dev_info_list via /proc. * * Description: Adds a black/white list entry for vendor and model with an * integer value of flag to the scsi device info list. * To use, echo "vendor:model:flag" > /proc/scsi/device_info */ static ssize_t proc_scsi_devinfo_write(struct file *file, const char __user *buf, size_t length, loff_t *ppos) { char *buffer; ssize_t err = length; if (!buf || length>PAGE_SIZE) return -EINVAL; if (!(buffer = (char *) __get_free_page(GFP_KERNEL))) return -ENOMEM; if (copy_from_user(buffer, buf, length)) { err =-EFAULT; goto out; } if (length < PAGE_SIZE) buffer[length] = '\0'; else if (buffer[PAGE_SIZE-1]) { err = -EINVAL; goto out; } scsi_dev_info_list_add_str(buffer); out: free_page((unsigned long)buffer); return err; } static const struct proc_ops scsi_devinfo_proc_ops = { .proc_open = proc_scsi_devinfo_open, .proc_read = seq_read, .proc_write = proc_scsi_devinfo_write, .proc_lseek = seq_lseek, .proc_release = seq_release, }; #endif /* CONFIG_SCSI_PROC_FS */ module_param_string(dev_flags, scsi_dev_flags, sizeof(scsi_dev_flags), 0); MODULE_PARM_DESC(dev_flags, "Given scsi_dev_flags=vendor:model:flags[,v:m:f] add black/white" " list entries for vendor and model with an integer value of flags" " to the scsi device info list"); module_param_named(default_dev_flags, scsi_default_dev_flags, ullong, 0644); MODULE_PARM_DESC(default_dev_flags, "scsi default device flag uint64_t value"); /** * scsi_exit_devinfo - remove /proc/scsi/device_info & the scsi_dev_info_list **/ void scsi_exit_devinfo(void) { #ifdef CONFIG_SCSI_PROC_FS remove_proc_entry("scsi/device_info", NULL); #endif scsi_dev_info_remove_list(SCSI_DEVINFO_GLOBAL); } /** * scsi_dev_info_add_list - add a new devinfo list * @key: key of the list to add * @name: Name of the list to add (for /proc/scsi/device_info) * * Adds the requested list, returns zero on success, -EEXIST if the * key is already registered to a list, or other error on failure. */ int scsi_dev_info_add_list(enum scsi_devinfo_key key, const char *name) { struct scsi_dev_info_list_table *devinfo_table = scsi_devinfo_lookup_by_key(key); if (!IS_ERR(devinfo_table)) /* list already exists */ return -EEXIST; devinfo_table = kmalloc(sizeof(*devinfo_table), GFP_KERNEL); if (!devinfo_table) return -ENOMEM; INIT_LIST_HEAD(&devinfo_table->node); INIT_LIST_HEAD(&devinfo_table->scsi_dev_info_list); devinfo_table->name = name; devinfo_table->key = key; list_add_tail(&devinfo_table->node, &scsi_dev_info_list); return 0; } EXPORT_SYMBOL(scsi_dev_info_add_list); /** * scsi_dev_info_remove_list - destroy an added devinfo list * @key: key of the list to destroy * * Iterates over the entire list first, freeing all the values, then * frees the list itself. Returns 0 on success or -EINVAL if the key * can't be found. */ int scsi_dev_info_remove_list(enum scsi_devinfo_key key) { struct list_head *lh, *lh_next; struct scsi_dev_info_list_table *devinfo_table = scsi_devinfo_lookup_by_key(key); if (IS_ERR(devinfo_table)) /* no such list */ return -EINVAL; /* remove from the master list */ list_del(&devinfo_table->node); list_for_each_safe(lh, lh_next, &devinfo_table->scsi_dev_info_list) { struct scsi_dev_info_list *devinfo; devinfo = list_entry(lh, struct scsi_dev_info_list, dev_info_list); kfree(devinfo); } kfree(devinfo_table); return 0; } EXPORT_SYMBOL(scsi_dev_info_remove_list); /** * scsi_init_devinfo - set up the dynamic device list. * * Description: * Add command line entries from scsi_dev_flags, then add * scsi_static_device_list entries to the scsi device info list. */ int __init scsi_init_devinfo(void) { #ifdef CONFIG_SCSI_PROC_FS struct proc_dir_entry *p; #endif int error, i; error = scsi_dev_info_add_list(SCSI_DEVINFO_GLOBAL, NULL); if (error) return error; error = scsi_dev_info_list_add_str(scsi_dev_flags); if (error) goto out; for (i = 0; scsi_static_device_list[i].vendor; i++) { error = scsi_dev_info_list_add(1 /* compatibile */, scsi_static_device_list[i].vendor, scsi_static_device_list[i].model, NULL, scsi_static_device_list[i].flags); if (error) goto out; } #ifdef CONFIG_SCSI_PROC_FS p = proc_create("scsi/device_info", 0, NULL, &scsi_devinfo_proc_ops); if (!p) { error = -ENOMEM; goto out; } #endif /* CONFIG_SCSI_PROC_FS */ out: if (error) scsi_exit_devinfo(); return error; }
linux-master
drivers/scsi/scsi_devinfo.c
// SPDX-License-Identifier: GPL-2.0-only /* * hosts.c Copyright (C) 1992 Drew Eckhardt * Copyright (C) 1993, 1994, 1995 Eric Youngdale * Copyright (C) 2002-2003 Christoph Hellwig * * mid to lowlevel SCSI driver interface * Initial versions: Drew Eckhardt * Subsequent revisions: Eric Youngdale * * <[email protected]> * * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli * Added QLOGIC QLA1280 SCSI controller kernel host support. * August 4, 1999 Fred Lewis, Intel DuPont * * Updated to reflect the new initialization scheme for the higher * level of scsi drivers (sd/sr/st) * September 17, 2000 Torben Mathiasen <[email protected]> * * Restructured scsi_host lists and associated functions. * September 04, 2002 Mike Anderson ([email protected]) */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/kthread.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/transport_class.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/idr.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_cmnd.h> #include "scsi_priv.h" #include "scsi_logging.h" static int shost_eh_deadline = -1; module_param_named(eh_deadline, shost_eh_deadline, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(eh_deadline, "SCSI EH timeout in seconds (should be between 0 and 2^31-1)"); static DEFINE_IDA(host_index_ida); static void scsi_host_cls_release(struct device *dev) { put_device(&class_to_shost(dev)->shost_gendev); } static struct class shost_class = { .name = "scsi_host", .dev_release = scsi_host_cls_release, .dev_groups = scsi_shost_groups, }; /** * scsi_host_set_state - Take the given host through the host state model. * @shost: scsi host to change the state of. * @state: state to change to. * * Returns zero if unsuccessful or an error if the requested * transition is illegal. **/ int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state) { enum scsi_host_state oldstate = shost->shost_state; if (state == oldstate) return 0; switch (state) { case SHOST_CREATED: /* There are no legal states that come back to * created. This is the manually initialised start * state */ goto illegal; case SHOST_RUNNING: switch (oldstate) { case SHOST_CREATED: case SHOST_RECOVERY: break; default: goto illegal; } break; case SHOST_RECOVERY: switch (oldstate) { case SHOST_RUNNING: break; default: goto illegal; } break; case SHOST_CANCEL: switch (oldstate) { case SHOST_CREATED: case SHOST_RUNNING: case SHOST_CANCEL_RECOVERY: break; default: goto illegal; } break; case SHOST_DEL: switch (oldstate) { case SHOST_CANCEL: case SHOST_DEL_RECOVERY: break; default: goto illegal; } break; case SHOST_CANCEL_RECOVERY: switch (oldstate) { case SHOST_CANCEL: case SHOST_RECOVERY: break; default: goto illegal; } break; case SHOST_DEL_RECOVERY: switch (oldstate) { case SHOST_CANCEL_RECOVERY: break; default: goto illegal; } break; } shost->shost_state = state; return 0; illegal: SCSI_LOG_ERROR_RECOVERY(1, shost_printk(KERN_ERR, shost, "Illegal host state transition" "%s->%s\n", scsi_host_state_name(oldstate), scsi_host_state_name(state))); return -EINVAL; } /** * scsi_remove_host - remove a scsi host * @shost: a pointer to a scsi host to remove **/ void scsi_remove_host(struct Scsi_Host *shost) { unsigned long flags; mutex_lock(&shost->scan_mutex); spin_lock_irqsave(shost->host_lock, flags); if (scsi_host_set_state(shost, SHOST_CANCEL)) if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) { spin_unlock_irqrestore(shost->host_lock, flags); mutex_unlock(&shost->scan_mutex); return; } spin_unlock_irqrestore(shost->host_lock, flags); scsi_autopm_get_host(shost); flush_workqueue(shost->tmf_work_q); scsi_forget_host(shost); mutex_unlock(&shost->scan_mutex); scsi_proc_host_rm(shost); scsi_proc_hostdir_rm(shost->hostt); /* * New SCSI devices cannot be attached anymore because of the SCSI host * state so drop the tag set refcnt. Wait until the tag set refcnt drops * to zero because .exit_cmd_priv implementations may need the host * pointer. */ kref_put(&shost->tagset_refcnt, scsi_mq_free_tags); wait_for_completion(&shost->tagset_freed); spin_lock_irqsave(shost->host_lock, flags); if (scsi_host_set_state(shost, SHOST_DEL)) BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY)); spin_unlock_irqrestore(shost->host_lock, flags); transport_unregister_device(&shost->shost_gendev); device_unregister(&shost->shost_dev); device_del(&shost->shost_gendev); } EXPORT_SYMBOL(scsi_remove_host); /** * scsi_add_host_with_dma - add a scsi host with dma device * @shost: scsi host pointer to add * @dev: a struct device of type scsi class * @dma_dev: dma device for the host * * Note: You rarely need to worry about this unless you're in a * virtualised host environments, so use the simpler scsi_add_host() * function instead. * * Return value: * 0 on success / != 0 for error **/ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, struct device *dma_dev) { const struct scsi_host_template *sht = shost->hostt; int error = -EINVAL; shost_printk(KERN_INFO, shost, "%s\n", sht->info ? sht->info(shost) : sht->name); if (!shost->can_queue) { shost_printk(KERN_ERR, shost, "can_queue = 0 no longer supported\n"); goto fail; } /* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */ shost->cmd_per_lun = min_t(int, shost->cmd_per_lun, shost->can_queue); error = scsi_init_sense_cache(shost); if (error) goto fail; if (!shost->shost_gendev.parent) shost->shost_gendev.parent = dev ? dev : &platform_bus; if (!dma_dev) dma_dev = shost->shost_gendev.parent; shost->dma_dev = dma_dev; if (dma_dev->dma_mask) { shost->max_sectors = min_t(unsigned int, shost->max_sectors, dma_max_mapping_size(dma_dev) >> SECTOR_SHIFT); } error = scsi_mq_setup_tags(shost); if (error) goto fail; kref_init(&shost->tagset_refcnt); init_completion(&shost->tagset_freed); /* * Increase usage count temporarily here so that calling * scsi_autopm_put_host() will trigger runtime idle if there is * nothing else preventing suspending the device. */ pm_runtime_get_noresume(&shost->shost_gendev); pm_runtime_set_active(&shost->shost_gendev); pm_runtime_enable(&shost->shost_gendev); device_enable_async_suspend(&shost->shost_gendev); error = device_add(&shost->shost_gendev); if (error) goto out_disable_runtime_pm; scsi_host_set_state(shost, SHOST_RUNNING); get_device(shost->shost_gendev.parent); device_enable_async_suspend(&shost->shost_dev); get_device(&shost->shost_gendev); error = device_add(&shost->shost_dev); if (error) goto out_del_gendev; if (shost->transportt->host_size) { shost->shost_data = kzalloc(shost->transportt->host_size, GFP_KERNEL); if (shost->shost_data == NULL) { error = -ENOMEM; goto out_del_dev; } } if (shost->transportt->create_work_queue) { snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d", shost->host_no); shost->work_q = alloc_workqueue("%s", WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, 1, shost->work_q_name); if (!shost->work_q) { error = -EINVAL; goto out_del_dev; } } error = scsi_sysfs_add_host(shost); if (error) goto out_del_dev; scsi_proc_host_add(shost); scsi_autopm_put_host(shost); return error; /* * Any host allocation in this function will be freed in * scsi_host_dev_release(). */ out_del_dev: device_del(&shost->shost_dev); out_del_gendev: /* * Host state is SHOST_RUNNING so we have to explicitly release * ->shost_dev. */ put_device(&shost->shost_dev); device_del(&shost->shost_gendev); out_disable_runtime_pm: device_disable_async_suspend(&shost->shost_gendev); pm_runtime_disable(&shost->shost_gendev); pm_runtime_set_suspended(&shost->shost_gendev); pm_runtime_put_noidle(&shost->shost_gendev); kref_put(&shost->tagset_refcnt, scsi_mq_free_tags); fail: return error; } EXPORT_SYMBOL(scsi_add_host_with_dma); static void scsi_host_dev_release(struct device *dev) { struct Scsi_Host *shost = dev_to_shost(dev); struct device *parent = dev->parent; /* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */ rcu_barrier(); if (shost->tmf_work_q) destroy_workqueue(shost->tmf_work_q); if (shost->ehandler) kthread_stop(shost->ehandler); if (shost->work_q) destroy_workqueue(shost->work_q); if (shost->shost_state == SHOST_CREATED) { /* * Free the shost_dev device name here if scsi_host_alloc() * and scsi_host_put() have been called but neither * scsi_host_add() nor scsi_remove_host() has been called. * This avoids that the memory allocated for the shost_dev * name is leaked. */ kfree(dev_name(&shost->shost_dev)); } kfree(shost->shost_data); ida_free(&host_index_ida, shost->host_no); if (shost->shost_state != SHOST_CREATED) put_device(parent); kfree(shost); } static struct device_type scsi_host_type = { .name = "scsi_host", .release = scsi_host_dev_release, }; /** * scsi_host_alloc - register a scsi host adapter instance. * @sht: pointer to scsi host template * @privsize: extra bytes to allocate for driver * * Note: * Allocate a new Scsi_Host and perform basic initialization. * The host is not published to the scsi midlayer until scsi_add_host * is called. * * Return value: * Pointer to a new Scsi_Host **/ struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int privsize) { struct Scsi_Host *shost; int index; shost = kzalloc(sizeof(struct Scsi_Host) + privsize, GFP_KERNEL); if (!shost) return NULL; shost->host_lock = &shost->default_lock; spin_lock_init(shost->host_lock); shost->shost_state = SHOST_CREATED; INIT_LIST_HEAD(&shost->__devices); INIT_LIST_HEAD(&shost->__targets); INIT_LIST_HEAD(&shost->eh_abort_list); INIT_LIST_HEAD(&shost->eh_cmd_q); INIT_LIST_HEAD(&shost->starved_list); init_waitqueue_head(&shost->host_wait); mutex_init(&shost->scan_mutex); index = ida_alloc(&host_index_ida, GFP_KERNEL); if (index < 0) { kfree(shost); return NULL; } shost->host_no = index; shost->dma_channel = 0xff; /* These three are default values which can be overridden */ shost->max_channel = 0; shost->max_id = 8; shost->max_lun = 8; /* Give each shost a default transportt */ shost->transportt = &blank_transport_template; /* * All drivers right now should be able to handle 12 byte * commands. Every so often there are requests for 16 byte * commands, but individual low-level drivers need to certify that * they actually do something sensible with such commands. */ shost->max_cmd_len = 12; shost->hostt = sht; shost->this_id = sht->this_id; shost->can_queue = sht->can_queue; shost->sg_tablesize = sht->sg_tablesize; shost->sg_prot_tablesize = sht->sg_prot_tablesize; shost->cmd_per_lun = sht->cmd_per_lun; shost->no_write_same = sht->no_write_same; shost->host_tagset = sht->host_tagset; shost->queuecommand_may_block = sht->queuecommand_may_block; if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler) shost->eh_deadline = -1; else if ((ulong) shost_eh_deadline * HZ > INT_MAX) { shost_printk(KERN_WARNING, shost, "eh_deadline %u too large, setting to %u\n", shost_eh_deadline, INT_MAX / HZ); shost->eh_deadline = INT_MAX; } else shost->eh_deadline = shost_eh_deadline * HZ; if (sht->supported_mode == MODE_UNKNOWN) /* means we didn't set it ... default to INITIATOR */ shost->active_mode = MODE_INITIATOR; else shost->active_mode = sht->supported_mode; if (sht->max_host_blocked) shost->max_host_blocked = sht->max_host_blocked; else shost->max_host_blocked = SCSI_DEFAULT_HOST_BLOCKED; /* * If the driver imposes no hard sector transfer limit, start at * machine infinity initially. */ if (sht->max_sectors) shost->max_sectors = sht->max_sectors; else shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS; if (sht->max_segment_size) shost->max_segment_size = sht->max_segment_size; else shost->max_segment_size = BLK_MAX_SEGMENT_SIZE; /* * assume a 4GB boundary, if not set */ if (sht->dma_boundary) shost->dma_boundary = sht->dma_boundary; else shost->dma_boundary = 0xffffffff; if (sht->virt_boundary_mask) shost->virt_boundary_mask = sht->virt_boundary_mask; device_initialize(&shost->shost_gendev); dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); shost->shost_gendev.bus = &scsi_bus_type; shost->shost_gendev.type = &scsi_host_type; scsi_enable_async_suspend(&shost->shost_gendev); device_initialize(&shost->shost_dev); shost->shost_dev.parent = &shost->shost_gendev; shost->shost_dev.class = &shost_class; dev_set_name(&shost->shost_dev, "host%d", shost->host_no); shost->shost_dev.groups = sht->shost_groups; shost->ehandler = kthread_run(scsi_error_handler, shost, "scsi_eh_%d", shost->host_no); if (IS_ERR(shost->ehandler)) { shost_printk(KERN_WARNING, shost, "error handler thread failed to spawn, error = %ld\n", PTR_ERR(shost->ehandler)); shost->ehandler = NULL; goto fail; } shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 1, shost->host_no); if (!shost->tmf_work_q) { shost_printk(KERN_WARNING, shost, "failed to create tmf workq\n"); goto fail; } if (scsi_proc_hostdir_add(shost->hostt) < 0) goto fail; return shost; fail: /* * Host state is still SHOST_CREATED and that is enough to release * ->shost_gendev. scsi_host_dev_release() will free * dev_name(&shost->shost_dev). */ put_device(&shost->shost_gendev); return NULL; } EXPORT_SYMBOL(scsi_host_alloc); static int __scsi_host_match(struct device *dev, const void *data) { struct Scsi_Host *p; const unsigned int *hostnum = data; p = class_to_shost(dev); return p->host_no == *hostnum; } /** * scsi_host_lookup - get a reference to a Scsi_Host by host no * @hostnum: host number to locate * * Return value: * A pointer to located Scsi_Host or NULL. * * The caller must do a scsi_host_put() to drop the reference * that scsi_host_get() took. The put_device() below dropped * the reference from class_find_device(). **/ struct Scsi_Host *scsi_host_lookup(unsigned int hostnum) { struct device *cdev; struct Scsi_Host *shost = NULL; cdev = class_find_device(&shost_class, NULL, &hostnum, __scsi_host_match); if (cdev) { shost = scsi_host_get(class_to_shost(cdev)); put_device(cdev); } return shost; } EXPORT_SYMBOL(scsi_host_lookup); /** * scsi_host_get - inc a Scsi_Host ref count * @shost: Pointer to Scsi_Host to inc. **/ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost) { if ((shost->shost_state == SHOST_DEL) || !get_device(&shost->shost_gendev)) return NULL; return shost; } EXPORT_SYMBOL(scsi_host_get); static bool scsi_host_check_in_flight(struct request *rq, void *data) { int *count = data; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); if (test_bit(SCMD_STATE_INFLIGHT, &cmd->state)) (*count)++; return true; } /** * scsi_host_busy - Return the host busy counter * @shost: Pointer to Scsi_Host to inc. **/ int scsi_host_busy(struct Scsi_Host *shost) { int cnt = 0; blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight, &cnt); return cnt; } EXPORT_SYMBOL(scsi_host_busy); /** * scsi_host_put - dec a Scsi_Host ref count * @shost: Pointer to Scsi_Host to dec. **/ void scsi_host_put(struct Scsi_Host *shost) { put_device(&shost->shost_gendev); } EXPORT_SYMBOL(scsi_host_put); int scsi_init_hosts(void) { return class_register(&shost_class); } void scsi_exit_hosts(void) { class_unregister(&shost_class); ida_destroy(&host_index_ida); } int scsi_is_host_device(const struct device *dev) { return dev->type == &scsi_host_type; } EXPORT_SYMBOL(scsi_is_host_device); /** * scsi_queue_work - Queue work to the Scsi_Host workqueue. * @shost: Pointer to Scsi_Host. * @work: Work to queue for execution. * * Return value: * 1 - work queued for execution * 0 - work is already queued * -EINVAL - work queue doesn't exist **/ int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work) { if (unlikely(!shost->work_q)) { shost_printk(KERN_ERR, shost, "ERROR: Scsi host '%s' attempted to queue scsi-work, " "when no workqueue created.\n", shost->hostt->name); dump_stack(); return -EINVAL; } return queue_work(shost->work_q, work); } EXPORT_SYMBOL_GPL(scsi_queue_work); /** * scsi_flush_work - Flush a Scsi_Host's workqueue. * @shost: Pointer to Scsi_Host. **/ void scsi_flush_work(struct Scsi_Host *shost) { if (!shost->work_q) { shost_printk(KERN_ERR, shost, "ERROR: Scsi host '%s' attempted to flush scsi-work, " "when no workqueue created.\n", shost->hostt->name); dump_stack(); return; } flush_workqueue(shost->work_q); } EXPORT_SYMBOL_GPL(scsi_flush_work); static bool complete_all_cmds_iter(struct request *rq, void *data) { struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); enum scsi_host_status status = *(enum scsi_host_status *)data; scsi_dma_unmap(scmd); scmd->result = 0; set_host_byte(scmd, status); scsi_done(scmd); return true; } /** * scsi_host_complete_all_commands - Terminate all running commands * @shost: Scsi Host on which commands should be terminated * @status: Status to be set for the terminated commands * * There is no protection against modification of the number * of outstanding commands. It is the responsibility of the * caller to ensure that concurrent I/O submission and/or * completion is stopped when calling this function. */ void scsi_host_complete_all_commands(struct Scsi_Host *shost, enum scsi_host_status status) { blk_mq_tagset_busy_iter(&shost->tag_set, complete_all_cmds_iter, &status); } EXPORT_SYMBOL_GPL(scsi_host_complete_all_commands); struct scsi_host_busy_iter_data { bool (*fn)(struct scsi_cmnd *, void *); void *priv; }; static bool __scsi_host_busy_iter_fn(struct request *req, void *priv) { struct scsi_host_busy_iter_data *iter_data = priv; struct scsi_cmnd *sc = blk_mq_rq_to_pdu(req); return iter_data->fn(sc, iter_data->priv); } /** * scsi_host_busy_iter - Iterate over all busy commands * @shost: Pointer to Scsi_Host. * @fn: Function to call on each busy command * @priv: Data pointer passed to @fn * * If locking against concurrent command completions is required * ithas to be provided by the caller **/ void scsi_host_busy_iter(struct Scsi_Host *shost, bool (*fn)(struct scsi_cmnd *, void *), void *priv) { struct scsi_host_busy_iter_data iter_data = { .fn = fn, .priv = priv, }; blk_mq_tagset_busy_iter(&shost->tag_set, __scsi_host_busy_iter_fn, &iter_data); } EXPORT_SYMBOL_GPL(scsi_host_busy_iter);
linux-master
drivers/scsi/hosts.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters * * Written By: Anil Ravindranath<[email protected]> * PMC-Sierra Inc * * Copyright (C) 2008, 2009 PMC Sierra Inc */ #include <linux/fs.h> #include <linux/init.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/wait.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/blkdev.h> #include <linux/firmware.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/hdreg.h> #include <linux/io.h> #include <linux/slab.h> #include <asm/irq.h> #include <asm/processor.h> #include <linux/libata.h> #include <linux/mutex.h> #include <linux/ktime.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsicam.h> #include "pmcraid.h" /* * Module configuration parameters */ static unsigned int pmcraid_debug_log; static unsigned int pmcraid_disable_aen; static unsigned int pmcraid_log_level = IOASC_LOG_LEVEL_MUST; static unsigned int pmcraid_enable_msix; /* * Data structures to support multiple adapters by the LLD. * pmcraid_adapter_count - count of configured adapters */ static atomic_t pmcraid_adapter_count = ATOMIC_INIT(0); /* * Supporting user-level control interface through IOCTL commands. * pmcraid_major - major number to use * pmcraid_minor - minor number(s) to use */ static unsigned int pmcraid_major; static struct class *pmcraid_class; static DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS); /* * Module parameters */ MODULE_AUTHOR("Anil Ravindranath<[email protected]>"); MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(PMCRAID_DRIVER_VERSION); module_param_named(log_level, pmcraid_log_level, uint, (S_IRUGO | S_IWUSR)); MODULE_PARM_DESC(log_level, "Enables firmware error code logging, default :1 high-severity" " errors, 2: all errors including high-severity errors," " 0: disables logging"); module_param_named(debug, pmcraid_debug_log, uint, (S_IRUGO | S_IWUSR)); MODULE_PARM_DESC(debug, "Enable driver verbose message logging. Set 1 to enable." "(default: 0)"); module_param_named(disable_aen, pmcraid_disable_aen, uint, (S_IRUGO | S_IWUSR)); MODULE_PARM_DESC(disable_aen, "Disable driver aen notifications to apps. Set 1 to disable." "(default: 0)"); /* chip specific constants for PMC MaxRAID controllers (same for * 0x5220 and 0x8010 */ static struct pmcraid_chip_details pmcraid_chip_cfg[] = { { .ioastatus = 0x0, .ioarrin = 0x00040, .mailbox = 0x7FC30, .global_intr_mask = 0x00034, .ioa_host_intr = 0x0009C, .ioa_host_intr_clr = 0x000A0, .ioa_host_msix_intr = 0x7FC40, .ioa_host_mask = 0x7FC28, .ioa_host_mask_clr = 0x7FC28, .host_ioa_intr = 0x00020, .host_ioa_intr_clr = 0x00020, .transop_timeout = 300 } }; /* * PCI device ids supported by pmcraid driver */ static struct pci_device_id pmcraid_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID), 0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0] }, {} }; MODULE_DEVICE_TABLE(pci, pmcraid_pci_table); /** * pmcraid_slave_alloc - Prepare for commands to a device * @scsi_dev: scsi device struct * * This function is called by mid-layer prior to sending any command to the new * device. Stores resource entry details of the device in scsi_device struct. * Queuecommand uses the resource handle and other details to fill up IOARCB * while sending commands to the device. * * Return value: * 0 on success / -ENXIO if device does not exist */ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev) { struct pmcraid_resource_entry *temp, *res = NULL; struct pmcraid_instance *pinstance; u8 target, bus, lun; unsigned long lock_flags; int rc = -ENXIO; u16 fw_version; pinstance = shost_priv(scsi_dev->host); fw_version = be16_to_cpu(pinstance->inq_data->fw_version); /* Driver exposes VSET and GSCSI resources only; all other device types * are not exposed. Resource list is synchronized using resource lock * so any traversal or modifications to the list should be done inside * this lock */ spin_lock_irqsave(&pinstance->resource_lock, lock_flags); list_for_each_entry(temp, &pinstance->used_res_q, queue) { /* do not expose VSETs with order-ids > MAX_VSET_TARGETS */ if (RES_IS_VSET(temp->cfg_entry)) { if (fw_version <= PMCRAID_FW_VERSION_1) target = temp->cfg_entry.unique_flags1; else target = le16_to_cpu(temp->cfg_entry.array_id) & 0xFF; if (target > PMCRAID_MAX_VSET_TARGETS) continue; bus = PMCRAID_VSET_BUS_ID; lun = 0; } else if (RES_IS_GSCSI(temp->cfg_entry)) { target = RES_TARGET(temp->cfg_entry.resource_address); bus = PMCRAID_PHYS_BUS_ID; lun = RES_LUN(temp->cfg_entry.resource_address); } else { continue; } if (bus == scsi_dev->channel && target == scsi_dev->id && lun == scsi_dev->lun) { res = temp; break; } } if (res) { res->scsi_dev = scsi_dev; scsi_dev->hostdata = res; res->change_detected = 0; atomic_set(&res->read_failures, 0); atomic_set(&res->write_failures, 0); rc = 0; } spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); return rc; } /** * pmcraid_slave_configure - Configures a SCSI device * @scsi_dev: scsi device struct * * This function is executed by SCSI mid layer just after a device is first * scanned (i.e. it has responded to an INQUIRY). For VSET resources, the * timeout value (default 30s) will be over-written to a higher value (60s) * and max_sectors value will be over-written to 512. It also sets queue depth * to host->cmd_per_lun value * * Return value: * 0 on success */ static int pmcraid_slave_configure(struct scsi_device *scsi_dev) { struct pmcraid_resource_entry *res = scsi_dev->hostdata; if (!res) return 0; /* LLD exposes VSETs and Enclosure devices only */ if (RES_IS_GSCSI(res->cfg_entry) && scsi_dev->type != TYPE_ENCLOSURE) return -ENXIO; pmcraid_info("configuring %x:%x:%x:%x\n", scsi_dev->host->unique_id, scsi_dev->channel, scsi_dev->id, (u8)scsi_dev->lun); if (RES_IS_GSCSI(res->cfg_entry)) { scsi_dev->allow_restart = 1; } else if (RES_IS_VSET(res->cfg_entry)) { scsi_dev->allow_restart = 1; blk_queue_rq_timeout(scsi_dev->request_queue, PMCRAID_VSET_IO_TIMEOUT); blk_queue_max_hw_sectors(scsi_dev->request_queue, PMCRAID_VSET_MAX_SECTORS); } /* * We never want to report TCQ support for these types of devices. */ if (!RES_IS_GSCSI(res->cfg_entry) && !RES_IS_VSET(res->cfg_entry)) scsi_dev->tagged_supported = 0; return 0; } /** * pmcraid_slave_destroy - Unconfigure a SCSI device before removing it * * @scsi_dev: scsi device struct * * This is called by mid-layer before removing a device. Pointer assignments * done in pmcraid_slave_alloc will be reset to NULL here. * * Return value * none */ static void pmcraid_slave_destroy(struct scsi_device *scsi_dev) { struct pmcraid_resource_entry *res; res = (struct pmcraid_resource_entry *)scsi_dev->hostdata; if (res) res->scsi_dev = NULL; scsi_dev->hostdata = NULL; } /** * pmcraid_change_queue_depth - Change the device's queue depth * @scsi_dev: scsi device struct * @depth: depth to set * * Return value * actual depth set */ static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth) { if (depth > PMCRAID_MAX_CMD_PER_LUN) depth = PMCRAID_MAX_CMD_PER_LUN; return scsi_change_queue_depth(scsi_dev, depth); } /** * pmcraid_init_cmdblk - initializes a command block * * @cmd: pointer to struct pmcraid_cmd to be initialized * @index: if >=0 first time initialization; otherwise reinitialization * * Return Value * None */ static void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index) { struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb); dma_addr_t dma_addr = cmd->ioa_cb_bus_addr; if (index >= 0) { /* first time initialization (called from probe) */ u32 ioasa_offset = offsetof(struct pmcraid_control_block, ioasa); cmd->index = index; ioarcb->response_handle = cpu_to_le32(index << 2); ioarcb->ioarcb_bus_addr = cpu_to_le64(dma_addr); ioarcb->ioasa_bus_addr = cpu_to_le64(dma_addr + ioasa_offset); ioarcb->ioasa_len = cpu_to_le16(sizeof(struct pmcraid_ioasa)); } else { /* re-initialization of various lengths, called once command is * processed by IOA */ memset(&cmd->ioa_cb->ioarcb.cdb, 0, PMCRAID_MAX_CDB_LEN); ioarcb->hrrq_id = 0; ioarcb->request_flags0 = 0; ioarcb->request_flags1 = 0; ioarcb->cmd_timeout = 0; ioarcb->ioarcb_bus_addr &= cpu_to_le64(~0x1FULL); ioarcb->ioadl_bus_addr = 0; ioarcb->ioadl_length = 0; ioarcb->data_transfer_length = 0; ioarcb->add_cmd_param_length = 0; ioarcb->add_cmd_param_offset = 0; cmd->ioa_cb->ioasa.ioasc = 0; cmd->ioa_cb->ioasa.residual_data_length = 0; cmd->time_left = 0; } cmd->cmd_done = NULL; cmd->scsi_cmd = NULL; cmd->release = 0; cmd->completion_req = 0; cmd->sense_buffer = NULL; cmd->sense_buffer_dma = 0; cmd->dma_handle = 0; timer_setup(&cmd->timer, NULL, 0); } /** * pmcraid_reinit_cmdblk - reinitialize a command block * * @cmd: pointer to struct pmcraid_cmd to be reinitialized * * Return Value * None */ static void pmcraid_reinit_cmdblk(struct pmcraid_cmd *cmd) { pmcraid_init_cmdblk(cmd, -1); } /** * pmcraid_get_free_cmd - get a free cmd block from command block pool * @pinstance: adapter instance structure * * Return Value: * returns pointer to cmd block or NULL if no blocks are available */ static struct pmcraid_cmd *pmcraid_get_free_cmd( struct pmcraid_instance *pinstance ) { struct pmcraid_cmd *cmd = NULL; unsigned long lock_flags; /* free cmd block list is protected by free_pool_lock */ spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags); if (!list_empty(&pinstance->free_cmd_pool)) { cmd = list_entry(pinstance->free_cmd_pool.next, struct pmcraid_cmd, free_list); list_del(&cmd->free_list); } spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags); /* Initialize the command block before giving it the caller */ if (cmd != NULL) pmcraid_reinit_cmdblk(cmd); return cmd; } /** * pmcraid_return_cmd - return a completed command block back into free pool * @cmd: pointer to the command block * * Return Value: * nothing */ static void pmcraid_return_cmd(struct pmcraid_cmd *cmd) { struct pmcraid_instance *pinstance = cmd->drv_inst; unsigned long lock_flags; spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags); list_add_tail(&cmd->free_list, &pinstance->free_cmd_pool); spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags); } /** * pmcraid_read_interrupts - reads IOA interrupts * * @pinstance: pointer to adapter instance structure * * Return value * interrupts read from IOA */ static u32 pmcraid_read_interrupts(struct pmcraid_instance *pinstance) { return (pinstance->interrupt_mode) ? ioread32(pinstance->int_regs.ioa_host_msix_interrupt_reg) : ioread32(pinstance->int_regs.ioa_host_interrupt_reg); } /** * pmcraid_disable_interrupts - Masks and clears all specified interrupts * * @pinstance: pointer to per adapter instance structure * @intrs: interrupts to disable * * Return Value * None */ static void pmcraid_disable_interrupts( struct pmcraid_instance *pinstance, u32 intrs ) { u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg); u32 nmask = gmask | GLOBAL_INTERRUPT_MASK; iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_clr_reg); iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg); ioread32(pinstance->int_regs.global_interrupt_mask_reg); if (!pinstance->interrupt_mode) { iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_mask_reg); ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg); } } /** * pmcraid_enable_interrupts - Enables specified interrupts * * @pinstance: pointer to per adapter instance structure * @intrs: interrupts to enable * * Return Value * None */ static void pmcraid_enable_interrupts( struct pmcraid_instance *pinstance, u32 intrs) { u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg); u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK); iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg); if (!pinstance->interrupt_mode) { iowrite32(~intrs, pinstance->int_regs.ioa_host_interrupt_mask_reg); ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg); } pmcraid_info("enabled interrupts global mask = %x intr_mask = %x\n", ioread32(pinstance->int_regs.global_interrupt_mask_reg), ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg)); } /** * pmcraid_clr_trans_op - clear trans to op interrupt * * @pinstance: pointer to per adapter instance structure * * Return Value * None */ static void pmcraid_clr_trans_op( struct pmcraid_instance *pinstance ) { unsigned long lock_flags; if (!pinstance->interrupt_mode) { iowrite32(INTRS_TRANSITION_TO_OPERATIONAL, pinstance->int_regs.ioa_host_interrupt_mask_reg); ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg); iowrite32(INTRS_TRANSITION_TO_OPERATIONAL, pinstance->int_regs.ioa_host_interrupt_clr_reg); ioread32(pinstance->int_regs.ioa_host_interrupt_clr_reg); } if (pinstance->reset_cmd != NULL) { del_timer(&pinstance->reset_cmd->timer); spin_lock_irqsave( pinstance->host->host_lock, lock_flags); pinstance->reset_cmd->cmd_done(pinstance->reset_cmd); spin_unlock_irqrestore( pinstance->host->host_lock, lock_flags); } } /** * pmcraid_reset_type - Determine the required reset type * @pinstance: pointer to adapter instance structure * * IOA requires hard reset if any of the following conditions is true. * 1. If HRRQ valid interrupt is not masked * 2. IOA reset alert doorbell is set * 3. If there are any error interrupts */ static void pmcraid_reset_type(struct pmcraid_instance *pinstance) { u32 mask; u32 intrs; u32 alerts; mask = ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg); intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg); alerts = ioread32(pinstance->int_regs.host_ioa_interrupt_reg); if ((mask & INTRS_HRRQ_VALID) == 0 || (alerts & DOORBELL_IOA_RESET_ALERT) || (intrs & PMCRAID_ERROR_INTERRUPTS)) { pmcraid_info("IOA requires hard reset\n"); pinstance->ioa_hard_reset = 1; } /* If unit check is active, trigger the dump */ if (intrs & INTRS_IOA_UNIT_CHECK) pinstance->ioa_unit_check = 1; } static void pmcraid_ioa_reset(struct pmcraid_cmd *); /** * pmcraid_bist_done - completion function for PCI BIST * @t: pointer to reset command * Return Value * none */ static void pmcraid_bist_done(struct timer_list *t) { struct pmcraid_cmd *cmd = from_timer(cmd, t, timer); struct pmcraid_instance *pinstance = cmd->drv_inst; unsigned long lock_flags; int rc; u16 pci_reg; rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg); /* If PCI config space can't be accessed wait for another two secs */ if ((rc != PCIBIOS_SUCCESSFUL || (!(pci_reg & PCI_COMMAND_MEMORY))) && cmd->time_left > 0) { pmcraid_info("BIST not complete, waiting another 2 secs\n"); cmd->timer.expires = jiffies + cmd->time_left; cmd->time_left = 0; add_timer(&cmd->timer); } else { cmd->time_left = 0; pmcraid_info("BIST is complete, proceeding with reset\n"); spin_lock_irqsave(pinstance->host->host_lock, lock_flags); pmcraid_ioa_reset(cmd); spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); } } /** * pmcraid_start_bist - starts BIST * @cmd: pointer to reset cmd * Return Value * none */ static void pmcraid_start_bist(struct pmcraid_cmd *cmd) { struct pmcraid_instance *pinstance = cmd->drv_inst; u32 doorbells, intrs; /* proceed with bist and wait for 2 seconds */ iowrite32(DOORBELL_IOA_START_BIST, pinstance->int_regs.host_ioa_interrupt_reg); doorbells = ioread32(pinstance->int_regs.host_ioa_interrupt_reg); intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg); pmcraid_info("doorbells after start bist: %x intrs: %x\n", doorbells, intrs); cmd->time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT); cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT); cmd->timer.function = pmcraid_bist_done; add_timer(&cmd->timer); } /** * pmcraid_reset_alert_done - completion routine for reset_alert * @t: pointer to command block used in reset sequence * Return value * None */ static void pmcraid_reset_alert_done(struct timer_list *t) { struct pmcraid_cmd *cmd = from_timer(cmd, t, timer); struct pmcraid_instance *pinstance = cmd->drv_inst; u32 status = ioread32(pinstance->ioa_status); unsigned long lock_flags; /* if the critical operation in progress bit is set or the wait times * out, invoke reset engine to proceed with hard reset. If there is * some more time to wait, restart the timer */ if (((status & INTRS_CRITICAL_OP_IN_PROGRESS) == 0) || cmd->time_left <= 0) { pmcraid_info("critical op is reset proceeding with reset\n"); spin_lock_irqsave(pinstance->host->host_lock, lock_flags); pmcraid_ioa_reset(cmd); spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); } else { pmcraid_info("critical op is not yet reset waiting again\n"); /* restart timer if some more time is available to wait */ cmd->time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT; cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT; cmd->timer.function = pmcraid_reset_alert_done; add_timer(&cmd->timer); } } static void pmcraid_notify_ioastate(struct pmcraid_instance *, u32); /** * pmcraid_reset_alert - alerts IOA for a possible reset * @cmd: command block to be used for reset sequence. * * Return Value * returns 0 if pci config-space is accessible and RESET_DOORBELL is * successfully written to IOA. Returns non-zero in case pci_config_space * is not accessible */ static void pmcraid_reset_alert(struct pmcraid_cmd *cmd) { struct pmcraid_instance *pinstance = cmd->drv_inst; u32 doorbells; int rc; u16 pci_reg; /* If we are able to access IOA PCI config space, alert IOA that we are * going to reset it soon. This enables IOA to preserv persistent error * data if any. In case memory space is not accessible, proceed with * BIST or slot_reset */ rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg); if ((rc == PCIBIOS_SUCCESSFUL) && (pci_reg & PCI_COMMAND_MEMORY)) { /* wait for IOA permission i.e until CRITICAL_OPERATION bit is * reset IOA doesn't generate any interrupts when CRITICAL * OPERATION bit is reset. A timer is started to wait for this * bit to be reset. */ cmd->time_left = PMCRAID_RESET_TIMEOUT; cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT; cmd->timer.function = pmcraid_reset_alert_done; add_timer(&cmd->timer); iowrite32(DOORBELL_IOA_RESET_ALERT, pinstance->int_regs.host_ioa_interrupt_reg); doorbells = ioread32(pinstance->int_regs.host_ioa_interrupt_reg); pmcraid_info("doorbells after reset alert: %x\n", doorbells); } else { pmcraid_info("PCI config is not accessible starting BIST\n"); pinstance->ioa_state = IOA_STATE_IN_HARD_RESET; pmcraid_start_bist(cmd); } } /** * pmcraid_timeout_handler - Timeout handler for internally generated ops * * @t: pointer to command structure, that got timedout * * This function blocks host requests and initiates an adapter reset. * * Return value: * None */ static void pmcraid_timeout_handler(struct timer_list *t) { struct pmcraid_cmd *cmd = from_timer(cmd, t, timer); struct pmcraid_instance *pinstance = cmd->drv_inst; unsigned long lock_flags; dev_info(&pinstance->pdev->dev, "Adapter being reset due to cmd(CDB[0] = %x) timeout\n", cmd->ioa_cb->ioarcb.cdb[0]); /* Command timeouts result in hard reset sequence. The command that got * timed out may be the one used as part of reset sequence. In this * case restart reset sequence using the same command block even if * reset is in progress. Otherwise fail this command and get a free * command block to restart the reset sequence. */ spin_lock_irqsave(pinstance->host->host_lock, lock_flags); if (!pinstance->ioa_reset_in_progress) { pinstance->ioa_reset_attempts = 0; cmd = pmcraid_get_free_cmd(pinstance); /* If we are out of command blocks, just return here itself. * Some other command's timeout handler can do the reset job */ if (cmd == NULL) { spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); pmcraid_err("no free cmnd block for timeout handler\n"); return; } pinstance->reset_cmd = cmd; pinstance->ioa_reset_in_progress = 1; } else { pmcraid_info("reset is already in progress\n"); if (pinstance->reset_cmd != cmd) { /* This command should have been given to IOA, this * command will be completed by fail_outstanding_cmds * anyway */ pmcraid_err("cmd is pending but reset in progress\n"); } /* If this command was being used as part of the reset * sequence, set cmd_done pointer to pmcraid_ioa_reset. This * causes fail_outstanding_commands not to return the command * block back to free pool */ if (cmd == pinstance->reset_cmd) cmd->cmd_done = pmcraid_ioa_reset; } /* Notify apps of important IOA bringup/bringdown sequences */ if (pinstance->scn.ioa_state != PMC_DEVICE_EVENT_RESET_START && pinstance->scn.ioa_state != PMC_DEVICE_EVENT_SHUTDOWN_START) pmcraid_notify_ioastate(pinstance, PMC_DEVICE_EVENT_RESET_START); pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; scsi_block_requests(pinstance->host); pmcraid_reset_alert(cmd); spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); } /** * pmcraid_internal_done - completion routine for internally generated cmds * * @cmd: command that got response from IOA * * Return Value: * none */ static void pmcraid_internal_done(struct pmcraid_cmd *cmd) { pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n", cmd->ioa_cb->ioarcb.cdb[0], le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)); /* Some of the internal commands are sent with callers blocking for the * response. Same will be indicated as part of cmd->completion_req * field. Response path needs to wake up any waiters waiting for cmd * completion if this flag is set. */ if (cmd->completion_req) { cmd->completion_req = 0; complete(&cmd->wait_for_completion); } /* most of the internal commands are completed by caller itself, so * no need to return the command block back to free pool until we are * required to do so (e.g once done with initialization). */ if (cmd->release) { cmd->release = 0; pmcraid_return_cmd(cmd); } } /** * pmcraid_reinit_cfgtable_done - done function for cfg table reinitialization * * @cmd: command that got response from IOA * * This routine is called after driver re-reads configuration table due to a * lost CCN. It returns the command block back to free pool and schedules * worker thread to add/delete devices into the system. * * Return Value: * none */ static void pmcraid_reinit_cfgtable_done(struct pmcraid_cmd *cmd) { pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n", cmd->ioa_cb->ioarcb.cdb[0], le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)); if (cmd->release) { cmd->release = 0; pmcraid_return_cmd(cmd); } pmcraid_info("scheduling worker for config table reinitialization\n"); schedule_work(&cmd->drv_inst->worker_q); } /** * pmcraid_erp_done - Process completion of SCSI error response from device * @cmd: pmcraid_command * * This function copies the sense buffer into the scsi_cmd struct and completes * scsi_cmd by calling scsi_done function. * * Return value: * none */ static void pmcraid_erp_done(struct pmcraid_cmd *cmd) { struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; struct pmcraid_instance *pinstance = cmd->drv_inst; u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc); if (PMCRAID_IOASC_SENSE_KEY(ioasc) > 0) { scsi_cmd->result |= (DID_ERROR << 16); scmd_printk(KERN_INFO, scsi_cmd, "command CDB[0] = %x failed with IOASC: 0x%08X\n", cmd->ioa_cb->ioarcb.cdb[0], ioasc); } if (cmd->sense_buffer) { dma_unmap_single(&pinstance->pdev->dev, cmd->sense_buffer_dma, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); cmd->sense_buffer = NULL; cmd->sense_buffer_dma = 0; } scsi_dma_unmap(scsi_cmd); pmcraid_return_cmd(cmd); scsi_done(scsi_cmd); } /** * _pmcraid_fire_command - sends an IOA command to adapter * * This function adds the given block into pending command list * and returns without waiting * * @cmd : command to be sent to the device * * Return Value * None */ static void _pmcraid_fire_command(struct pmcraid_cmd *cmd) { struct pmcraid_instance *pinstance = cmd->drv_inst; unsigned long lock_flags; /* Add this command block to pending cmd pool. We do this prior to * writting IOARCB to ioarrin because IOA might complete the command * by the time we are about to add it to the list. Response handler * (isr/tasklet) looks for cmd block in the pending pending list. */ spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags); list_add_tail(&cmd->free_list, &pinstance->pending_cmd_pool); spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags); atomic_inc(&pinstance->outstanding_cmds); /* driver writes lower 32-bit value of IOARCB address only */ mb(); iowrite32(le64_to_cpu(cmd->ioa_cb->ioarcb.ioarcb_bus_addr), pinstance->ioarrin); } /** * pmcraid_send_cmd - fires a command to IOA * * This function also sets up timeout function, and command completion * function * * @cmd: pointer to the command block to be fired to IOA * @cmd_done: command completion function, called once IOA responds * @timeout: timeout to wait for this command completion * @timeout_func: timeout handler * * Return value * none */ static void pmcraid_send_cmd( struct pmcraid_cmd *cmd, void (*cmd_done) (struct pmcraid_cmd *), unsigned long timeout, void (*timeout_func) (struct timer_list *) ) { /* initialize done function */ cmd->cmd_done = cmd_done; if (timeout_func) { /* setup timeout handler */ cmd->timer.expires = jiffies + timeout; cmd->timer.function = timeout_func; add_timer(&cmd->timer); } /* fire the command to IOA */ _pmcraid_fire_command(cmd); } /** * pmcraid_ioa_shutdown_done - completion function for IOA shutdown command * @cmd: pointer to the command block used for sending IOA shutdown command * * Return value * None */ static void pmcraid_ioa_shutdown_done(struct pmcraid_cmd *cmd) { struct pmcraid_instance *pinstance = cmd->drv_inst; unsigned long lock_flags; spin_lock_irqsave(pinstance->host->host_lock, lock_flags); pmcraid_ioa_reset(cmd); spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); } /** * pmcraid_ioa_shutdown - sends SHUTDOWN command to ioa * * @cmd: pointer to the command block used as part of reset sequence * * Return Value * None */ static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd) { pmcraid_info("response for Cancel CCN CDB[0] = %x ioasc = %x\n", cmd->ioa_cb->ioarcb.cdb[0], le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)); /* Note that commands sent during reset require next command to be sent * to IOA. Hence reinit the done function as well as timeout function */ pmcraid_reinit_cmdblk(cmd); cmd->ioa_cb->ioarcb.request_type = REQ_TYPE_IOACMD; cmd->ioa_cb->ioarcb.resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); cmd->ioa_cb->ioarcb.cdb[0] = PMCRAID_IOA_SHUTDOWN; cmd->ioa_cb->ioarcb.cdb[1] = PMCRAID_SHUTDOWN_NORMAL; /* fire shutdown command to hardware. */ pmcraid_info("firing normal shutdown command (%d) to IOA\n", le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle)); pmcraid_notify_ioastate(cmd->drv_inst, PMC_DEVICE_EVENT_SHUTDOWN_START); pmcraid_send_cmd(cmd, pmcraid_ioa_shutdown_done, PMCRAID_SHUTDOWN_TIMEOUT, pmcraid_timeout_handler); } static void pmcraid_querycfg(struct pmcraid_cmd *); /** * pmcraid_get_fwversion_done - completion function for get_fwversion * * @cmd: pointer to command block used to send INQUIRY command * * Return Value * none */ static void pmcraid_get_fwversion_done(struct pmcraid_cmd *cmd) { struct pmcraid_instance *pinstance = cmd->drv_inst; u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc); unsigned long lock_flags; /* configuration table entry size depends on firmware version. If fw * version is not known, it is not possible to interpret IOA config * table */ if (ioasc) { pmcraid_err("IOA Inquiry failed with %x\n", ioasc); spin_lock_irqsave(pinstance->host->host_lock, lock_flags); pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; pmcraid_reset_alert(cmd); spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); } else { pmcraid_querycfg(cmd); } } /** * pmcraid_get_fwversion - reads firmware version information * * @cmd: pointer to command block used to send INQUIRY command * * Return Value * none */ static void pmcraid_get_fwversion(struct pmcraid_cmd *cmd) { struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; struct pmcraid_ioadl_desc *ioadl; struct pmcraid_instance *pinstance = cmd->drv_inst; u16 data_size = sizeof(struct pmcraid_inquiry_data); pmcraid_reinit_cmdblk(cmd); ioarcb->request_type = REQ_TYPE_SCSI; ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); ioarcb->cdb[0] = INQUIRY; ioarcb->cdb[1] = 1; ioarcb->cdb[2] = 0xD0; ioarcb->cdb[3] = (data_size >> 8) & 0xFF; ioarcb->cdb[4] = data_size & 0xFF; /* Since entire inquiry data it can be part of IOARCB itself */ ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) + offsetof(struct pmcraid_ioarcb, add_data.u.ioadl[0])); ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc)); ioarcb->ioarcb_bus_addr &= cpu_to_le64(~(0x1FULL)); ioarcb->request_flags0 |= NO_LINK_DESCS; ioarcb->data_transfer_length = cpu_to_le32(data_size); ioadl = &(ioarcb->add_data.u.ioadl[0]); ioadl->flags = IOADL_FLAGS_LAST_DESC; ioadl->address = cpu_to_le64(pinstance->inq_data_baddr); ioadl->data_len = cpu_to_le32(data_size); pmcraid_send_cmd(cmd, pmcraid_get_fwversion_done, PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler); } /** * pmcraid_identify_hrrq - registers host rrq buffers with IOA * @cmd: pointer to command block to be used for identify hrrq * * Return Value * none */ static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd) { struct pmcraid_instance *pinstance = cmd->drv_inst; struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; int index = cmd->hrrq_index; __be64 hrrq_addr = cpu_to_be64(pinstance->hrrq_start_bus_addr[index]); __be32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD); void (*done_function)(struct pmcraid_cmd *); pmcraid_reinit_cmdblk(cmd); cmd->hrrq_index = index + 1; if (cmd->hrrq_index < pinstance->num_hrrq) { done_function = pmcraid_identify_hrrq; } else { cmd->hrrq_index = 0; done_function = pmcraid_get_fwversion; } /* Initialize ioarcb */ ioarcb->request_type = REQ_TYPE_IOACMD; ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); /* initialize the hrrq number where IOA will respond to this command */ ioarcb->hrrq_id = index; ioarcb->cdb[0] = PMCRAID_IDENTIFY_HRRQ; ioarcb->cdb[1] = index; /* IOA expects 64-bit pci address to be written in B.E format * (i.e cdb[2]=MSByte..cdb[9]=LSB. */ pmcraid_info("HRRQ_IDENTIFY with hrrq:ioarcb:index => %llx:%llx:%x\n", hrrq_addr, ioarcb->ioarcb_bus_addr, index); memcpy(&(ioarcb->cdb[2]), &hrrq_addr, sizeof(hrrq_addr)); memcpy(&(ioarcb->cdb[10]), &hrrq_size, sizeof(hrrq_size)); /* Subsequent commands require HRRQ identification to be successful. * Note that this gets called even during reset from SCSI mid-layer * or tasklet */ pmcraid_send_cmd(cmd, done_function, PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler); } static void pmcraid_process_ccn(struct pmcraid_cmd *cmd); static void pmcraid_process_ldn(struct pmcraid_cmd *cmd); /** * pmcraid_send_hcam_cmd - send an initialized command block(HCAM) to IOA * * @cmd: initialized command block pointer * * Return Value * none */ static void pmcraid_send_hcam_cmd(struct pmcraid_cmd *cmd) { if (cmd->ioa_cb->ioarcb.cdb[1] == PMCRAID_HCAM_CODE_CONFIG_CHANGE) atomic_set(&(cmd->drv_inst->ccn.ignore), 0); else atomic_set(&(cmd->drv_inst->ldn.ignore), 0); pmcraid_send_cmd(cmd, cmd->cmd_done, 0, NULL); } /** * pmcraid_init_hcam - send an initialized command block(HCAM) to IOA * * @pinstance: pointer to adapter instance structure * @type: HCAM type * * Return Value * pointer to initialized pmcraid_cmd structure or NULL */ static struct pmcraid_cmd *pmcraid_init_hcam ( struct pmcraid_instance *pinstance, u8 type ) { struct pmcraid_cmd *cmd; struct pmcraid_ioarcb *ioarcb; struct pmcraid_ioadl_desc *ioadl; struct pmcraid_hostrcb *hcam; void (*cmd_done) (struct pmcraid_cmd *); dma_addr_t dma; int rcb_size; cmd = pmcraid_get_free_cmd(pinstance); if (!cmd) { pmcraid_err("no free command blocks for hcam\n"); return cmd; } if (type == PMCRAID_HCAM_CODE_CONFIG_CHANGE) { rcb_size = sizeof(struct pmcraid_hcam_ccn_ext); cmd_done = pmcraid_process_ccn; dma = pinstance->ccn.baddr + PMCRAID_AEN_HDR_SIZE; hcam = &pinstance->ccn; } else { rcb_size = sizeof(struct pmcraid_hcam_ldn); cmd_done = pmcraid_process_ldn; dma = pinstance->ldn.baddr + PMCRAID_AEN_HDR_SIZE; hcam = &pinstance->ldn; } /* initialize command pointer used for HCAM registration */ hcam->cmd = cmd; ioarcb = &cmd->ioa_cb->ioarcb; ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) + offsetof(struct pmcraid_ioarcb, add_data.u.ioadl[0])); ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc)); ioadl = ioarcb->add_data.u.ioadl; /* Initialize ioarcb */ ioarcb->request_type = REQ_TYPE_HCAM; ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); ioarcb->cdb[0] = PMCRAID_HOST_CONTROLLED_ASYNC; ioarcb->cdb[1] = type; ioarcb->cdb[7] = (rcb_size >> 8) & 0xFF; ioarcb->cdb[8] = (rcb_size) & 0xFF; ioarcb->data_transfer_length = cpu_to_le32(rcb_size); ioadl[0].flags |= IOADL_FLAGS_READ_LAST; ioadl[0].data_len = cpu_to_le32(rcb_size); ioadl[0].address = cpu_to_le64(dma); cmd->cmd_done = cmd_done; return cmd; } /** * pmcraid_send_hcam - Send an HCAM to IOA * @pinstance: ioa config struct * @type: HCAM type * * This function will send a Host Controlled Async command to IOA. * * Return value: * none */ static void pmcraid_send_hcam(struct pmcraid_instance *pinstance, u8 type) { struct pmcraid_cmd *cmd = pmcraid_init_hcam(pinstance, type); pmcraid_send_hcam_cmd(cmd); } /** * pmcraid_prepare_cancel_cmd - prepares a command block to abort another * * @cmd: pointer to cmd that is used as cancelling command * @cmd_to_cancel: pointer to the command that needs to be cancelled */ static void pmcraid_prepare_cancel_cmd( struct pmcraid_cmd *cmd, struct pmcraid_cmd *cmd_to_cancel ) { struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; __be64 ioarcb_addr; /* IOARCB address of the command to be cancelled is given in * cdb[2]..cdb[9] is Big-Endian format. Note that length bits in * IOARCB address are not masked. */ ioarcb_addr = cpu_to_be64(le64_to_cpu(cmd_to_cancel->ioa_cb->ioarcb.ioarcb_bus_addr)); /* Get the resource handle to where the command to be aborted has been * sent. */ ioarcb->resource_handle = cmd_to_cancel->ioa_cb->ioarcb.resource_handle; ioarcb->request_type = REQ_TYPE_IOACMD; memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN); ioarcb->cdb[0] = PMCRAID_ABORT_CMD; memcpy(&(ioarcb->cdb[2]), &ioarcb_addr, sizeof(ioarcb_addr)); } /** * pmcraid_cancel_hcam - sends ABORT task to abort a given HCAM * * @cmd: command to be used as cancelling command * @type: HCAM type * @cmd_done: op done function for the cancelling command */ static void pmcraid_cancel_hcam( struct pmcraid_cmd *cmd, u8 type, void (*cmd_done) (struct pmcraid_cmd *) ) { struct pmcraid_instance *pinstance; struct pmcraid_hostrcb *hcam; pinstance = cmd->drv_inst; hcam = (type == PMCRAID_HCAM_CODE_LOG_DATA) ? &pinstance->ldn : &pinstance->ccn; /* prepare for cancelling previous hcam command. If the HCAM is * currently not pending with IOA, we would have hcam->cmd as non-null */ if (hcam->cmd == NULL) return; pmcraid_prepare_cancel_cmd(cmd, hcam->cmd); /* writing to IOARRIN must be protected by host_lock, as mid-layer * schedule queuecommand while we are doing this */ pmcraid_send_cmd(cmd, cmd_done, PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler); } /** * pmcraid_cancel_ccn - cancel CCN HCAM already registered with IOA * * @cmd: command block to be used for cancelling the HCAM */ static void pmcraid_cancel_ccn(struct pmcraid_cmd *cmd) { pmcraid_info("response for Cancel LDN CDB[0] = %x ioasc = %x\n", cmd->ioa_cb->ioarcb.cdb[0], le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)); pmcraid_reinit_cmdblk(cmd); pmcraid_cancel_hcam(cmd, PMCRAID_HCAM_CODE_CONFIG_CHANGE, pmcraid_ioa_shutdown); } /** * pmcraid_cancel_ldn - cancel LDN HCAM already registered with IOA * * @cmd: command block to be used for cancelling the HCAM */ static void pmcraid_cancel_ldn(struct pmcraid_cmd *cmd) { pmcraid_cancel_hcam(cmd, PMCRAID_HCAM_CODE_LOG_DATA, pmcraid_cancel_ccn); } /** * pmcraid_expose_resource - check if the resource can be exposed to OS * * @fw_version: firmware version code * @cfgte: pointer to configuration table entry of the resource * * Return value: * true if resource can be added to midlayer, false(0) otherwise */ static int pmcraid_expose_resource(u16 fw_version, struct pmcraid_config_table_entry *cfgte) { int retval = 0; if (cfgte->resource_type == RES_TYPE_VSET) { if (fw_version <= PMCRAID_FW_VERSION_1) retval = ((cfgte->unique_flags1 & 0x80) == 0); else retval = ((cfgte->unique_flags0 & 0x80) == 0 && (cfgte->unique_flags1 & 0x80) == 0); } else if (cfgte->resource_type == RES_TYPE_GSCSI) retval = (RES_BUS(cfgte->resource_address) != PMCRAID_VIRTUAL_ENCL_BUS_ID); return retval; } /* attributes supported by pmcraid_event_family */ enum { PMCRAID_AEN_ATTR_UNSPEC, PMCRAID_AEN_ATTR_EVENT, __PMCRAID_AEN_ATTR_MAX, }; #define PMCRAID_AEN_ATTR_MAX (__PMCRAID_AEN_ATTR_MAX - 1) /* commands supported by pmcraid_event_family */ enum { PMCRAID_AEN_CMD_UNSPEC, PMCRAID_AEN_CMD_EVENT, __PMCRAID_AEN_CMD_MAX, }; #define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1) static struct genl_multicast_group pmcraid_mcgrps[] = { { .name = "events", /* not really used - see ID discussion below */ }, }; static struct genl_family pmcraid_event_family __ro_after_init = { .module = THIS_MODULE, .name = "pmcraid", .version = 1, .maxattr = PMCRAID_AEN_ATTR_MAX, .mcgrps = pmcraid_mcgrps, .n_mcgrps = ARRAY_SIZE(pmcraid_mcgrps), }; /** * pmcraid_netlink_init - registers pmcraid_event_family * * Return value: * 0 if the pmcraid_event_family is successfully registered * with netlink generic, non-zero otherwise */ static int __init pmcraid_netlink_init(void) { int result; result = genl_register_family(&pmcraid_event_family); if (result) return result; pmcraid_info("registered NETLINK GENERIC group: %d\n", pmcraid_event_family.id); return result; } /** * pmcraid_netlink_release - unregisters pmcraid_event_family * * Return value: * none */ static void pmcraid_netlink_release(void) { genl_unregister_family(&pmcraid_event_family); } /* * pmcraid_notify_aen - sends event msg to user space application * @pinstance: pointer to adapter instance structure * * Return value: * 0 if success, error value in case of any failure. */ static int pmcraid_notify_aen( struct pmcraid_instance *pinstance, struct pmcraid_aen_msg *aen_msg, u32 data_size) { struct sk_buff *skb; void *msg_header; u32 total_size, nla_genl_hdr_total_size; int result; aen_msg->hostno = (pinstance->host->unique_id << 16 | MINOR(pinstance->cdev.dev)); aen_msg->length = data_size; data_size += sizeof(*aen_msg); total_size = nla_total_size(data_size); /* Add GENL_HDR to total_size */ nla_genl_hdr_total_size = (total_size + (GENL_HDRLEN + ((struct genl_family *)&pmcraid_event_family)->hdrsize) + NLMSG_HDRLEN); skb = genlmsg_new(nla_genl_hdr_total_size, GFP_ATOMIC); if (!skb) { pmcraid_err("Failed to allocate aen data SKB of size: %x\n", total_size); return -ENOMEM; } /* add the genetlink message header */ msg_header = genlmsg_put(skb, 0, 0, &pmcraid_event_family, 0, PMCRAID_AEN_CMD_EVENT); if (!msg_header) { pmcraid_err("failed to copy command details\n"); nlmsg_free(skb); return -ENOMEM; } result = nla_put(skb, PMCRAID_AEN_ATTR_EVENT, data_size, aen_msg); if (result) { pmcraid_err("failed to copy AEN attribute data\n"); nlmsg_free(skb); return -EINVAL; } /* send genetlink multicast message to notify applications */ genlmsg_end(skb, msg_header); result = genlmsg_multicast(&pmcraid_event_family, skb, 0, 0, GFP_ATOMIC); /* If there are no listeners, genlmsg_multicast may return non-zero * value. */ if (result) pmcraid_info("error (%x) sending aen event message\n", result); return result; } /** * pmcraid_notify_ccn - notifies about CCN event msg to user space * @pinstance: pointer adapter instance structure * * Return value: * 0 if success, error value in case of any failure */ static int pmcraid_notify_ccn(struct pmcraid_instance *pinstance) { return pmcraid_notify_aen(pinstance, pinstance->ccn.msg, le32_to_cpu(pinstance->ccn.hcam->data_len) + sizeof(struct pmcraid_hcam_hdr)); } /** * pmcraid_notify_ldn - notifies about CCN event msg to user space * @pinstance: pointer adapter instance structure * * Return value: * 0 if success, error value in case of any failure */ static int pmcraid_notify_ldn(struct pmcraid_instance *pinstance) { return pmcraid_notify_aen(pinstance, pinstance->ldn.msg, le32_to_cpu(pinstance->ldn.hcam->data_len) + sizeof(struct pmcraid_hcam_hdr)); } /** * pmcraid_notify_ioastate - sends IOA state event msg to user space * @pinstance: pointer adapter instance structure * @evt: controller state event to be sent * * Return value: * 0 if success, error value in case of any failure */ static void pmcraid_notify_ioastate(struct pmcraid_instance *pinstance, u32 evt) { pinstance->scn.ioa_state = evt; pmcraid_notify_aen(pinstance, &pinstance->scn.msg, sizeof(u32)); } /** * pmcraid_handle_config_change - Handle a config change from the adapter * @pinstance: pointer to per adapter instance structure * * Return value: * none */ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance) { struct pmcraid_config_table_entry *cfg_entry; struct pmcraid_hcam_ccn *ccn_hcam; struct pmcraid_cmd *cmd; struct pmcraid_cmd *cfgcmd; struct pmcraid_resource_entry *res = NULL; unsigned long lock_flags; unsigned long host_lock_flags; u32 new_entry = 1; u32 hidden_entry = 0; u16 fw_version; int rc; ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam; cfg_entry = &ccn_hcam->cfg_entry; fw_version = be16_to_cpu(pinstance->inq_data->fw_version); pmcraid_info("CCN(%x): %x timestamp: %llx type: %x lost: %x flags: %x \ res: %x:%x:%x:%x\n", le32_to_cpu(pinstance->ccn.hcam->ilid), pinstance->ccn.hcam->op_code, (le32_to_cpu(pinstance->ccn.hcam->timestamp1) | ((le32_to_cpu(pinstance->ccn.hcam->timestamp2) & 0xffffffffLL) << 32)), pinstance->ccn.hcam->notification_type, pinstance->ccn.hcam->notification_lost, pinstance->ccn.hcam->flags, pinstance->host->unique_id, RES_IS_VSET(*cfg_entry) ? PMCRAID_VSET_BUS_ID : (RES_IS_GSCSI(*cfg_entry) ? PMCRAID_PHYS_BUS_ID : RES_BUS(cfg_entry->resource_address)), RES_IS_VSET(*cfg_entry) ? (fw_version <= PMCRAID_FW_VERSION_1 ? cfg_entry->unique_flags1 : le16_to_cpu(cfg_entry->array_id) & 0xFF) : RES_TARGET(cfg_entry->resource_address), RES_LUN(cfg_entry->resource_address)); /* If this HCAM indicates a lost notification, read the config table */ if (pinstance->ccn.hcam->notification_lost) { cfgcmd = pmcraid_get_free_cmd(pinstance); if (cfgcmd) { pmcraid_info("lost CCN, reading config table\b"); pinstance->reinit_cfg_table = 1; pmcraid_querycfg(cfgcmd); } else { pmcraid_err("lost CCN, no free cmd for querycfg\n"); } goto out_notify_apps; } /* If this resource is not going to be added to mid-layer, just notify * applications and return. If this notification is about hiding a VSET * resource, check if it was exposed already. */ if (pinstance->ccn.hcam->notification_type == NOTIFICATION_TYPE_ENTRY_CHANGED && cfg_entry->resource_type == RES_TYPE_VSET) { hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0; } else if (!pmcraid_expose_resource(fw_version, cfg_entry)) { goto out_notify_apps; } spin_lock_irqsave(&pinstance->resource_lock, lock_flags); list_for_each_entry(res, &pinstance->used_res_q, queue) { rc = memcmp(&res->cfg_entry.resource_address, &cfg_entry->resource_address, sizeof(cfg_entry->resource_address)); if (!rc) { new_entry = 0; break; } } if (new_entry) { if (hidden_entry) { spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); goto out_notify_apps; } /* If there are more number of resources than what driver can * manage, do not notify the applications about the CCN. Just * ignore this notifications and re-register the same HCAM */ if (list_empty(&pinstance->free_res_q)) { spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); pmcraid_err("too many resources attached\n"); spin_lock_irqsave(pinstance->host->host_lock, host_lock_flags); pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE); spin_unlock_irqrestore(pinstance->host->host_lock, host_lock_flags); return; } res = list_entry(pinstance->free_res_q.next, struct pmcraid_resource_entry, queue); list_del(&res->queue); res->scsi_dev = NULL; res->reset_progress = 0; list_add_tail(&res->queue, &pinstance->used_res_q); } memcpy(&res->cfg_entry, cfg_entry, pinstance->config_table_entry_size); if (pinstance->ccn.hcam->notification_type == NOTIFICATION_TYPE_ENTRY_DELETED || hidden_entry) { if (res->scsi_dev) { if (fw_version <= PMCRAID_FW_VERSION_1) res->cfg_entry.unique_flags1 &= 0x7F; else res->cfg_entry.array_id &= cpu_to_le16(0xFF); res->change_detected = RES_CHANGE_DEL; res->cfg_entry.resource_handle = PMCRAID_INVALID_RES_HANDLE; schedule_work(&pinstance->worker_q); } else { /* This may be one of the non-exposed resources */ list_move_tail(&res->queue, &pinstance->free_res_q); } } else if (!res->scsi_dev) { res->change_detected = RES_CHANGE_ADD; schedule_work(&pinstance->worker_q); } spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); out_notify_apps: /* Notify configuration changes to registered applications.*/ if (!pmcraid_disable_aen) pmcraid_notify_ccn(pinstance); cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE); if (cmd) pmcraid_send_hcam_cmd(cmd); } /** * pmcraid_get_error_info - return error string for an ioasc * @ioasc: ioasc code * Return Value * none */ static struct pmcraid_ioasc_error *pmcraid_get_error_info(u32 ioasc) { int i; for (i = 0; i < ARRAY_SIZE(pmcraid_ioasc_error_table); i++) { if (pmcraid_ioasc_error_table[i].ioasc_code == ioasc) return &pmcraid_ioasc_error_table[i]; } return NULL; } /** * pmcraid_ioasc_logger - log IOASC information based user-settings * @ioasc: ioasc code * @cmd: pointer to command that resulted in 'ioasc' */ static void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd) { struct pmcraid_ioasc_error *error_info = pmcraid_get_error_info(ioasc); if (error_info == NULL || cmd->drv_inst->current_log_level < error_info->log_level) return; /* log the error string */ pmcraid_err("cmd [%x] for resource %x failed with %x(%s)\n", cmd->ioa_cb->ioarcb.cdb[0], le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle), ioasc, error_info->error_string); } /** * pmcraid_handle_error_log - Handle a config change (error log) from the IOA * * @pinstance: pointer to per adapter instance structure * * Return value: * none */ static void pmcraid_handle_error_log(struct pmcraid_instance *pinstance) { struct pmcraid_hcam_ldn *hcam_ldn; u32 ioasc; hcam_ldn = (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam; pmcraid_info ("LDN(%x): %x type: %x lost: %x flags: %x overlay id: %x\n", pinstance->ldn.hcam->ilid, pinstance->ldn.hcam->op_code, pinstance->ldn.hcam->notification_type, pinstance->ldn.hcam->notification_lost, pinstance->ldn.hcam->flags, pinstance->ldn.hcam->overlay_id); /* log only the errors, no need to log informational log entries */ if (pinstance->ldn.hcam->notification_type != NOTIFICATION_TYPE_ERROR_LOG) return; if (pinstance->ldn.hcam->notification_lost == HOSTRCB_NOTIFICATIONS_LOST) dev_info(&pinstance->pdev->dev, "Error notifications lost\n"); ioasc = le32_to_cpu(hcam_ldn->error_log.fd_ioasc); if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET || ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER) { dev_info(&pinstance->pdev->dev, "UnitAttention due to IOA Bus Reset\n"); scsi_report_bus_reset( pinstance->host, RES_BUS(hcam_ldn->error_log.fd_ra)); } return; } /** * pmcraid_process_ccn - Op done function for a CCN. * @cmd: pointer to command struct * * This function is the op done function for a configuration * change notification * * Return value: * none */ static void pmcraid_process_ccn(struct pmcraid_cmd *cmd) { struct pmcraid_instance *pinstance = cmd->drv_inst; u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc); unsigned long lock_flags; pinstance->ccn.cmd = NULL; pmcraid_return_cmd(cmd); /* If driver initiated IOA reset happened while this hcam was pending * with IOA, or IOA bringdown sequence is in progress, no need to * re-register the hcam */ if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET || atomic_read(&pinstance->ccn.ignore) == 1) { return; } else if (ioasc) { dev_info(&pinstance->pdev->dev, "Host RCB (CCN) failed with IOASC: 0x%08X\n", ioasc); spin_lock_irqsave(pinstance->host->host_lock, lock_flags); pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE); spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); } else { pmcraid_handle_config_change(pinstance); } } static void pmcraid_initiate_reset(struct pmcraid_instance *); static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd); /** * pmcraid_process_ldn - op done function for an LDN * @cmd: pointer to command block * * Return value * none */ static void pmcraid_process_ldn(struct pmcraid_cmd *cmd) { struct pmcraid_instance *pinstance = cmd->drv_inst; struct pmcraid_hcam_ldn *ldn_hcam = (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam; u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc); u32 fd_ioasc = le32_to_cpu(ldn_hcam->error_log.fd_ioasc); unsigned long lock_flags; /* return the command block back to freepool */ pinstance->ldn.cmd = NULL; pmcraid_return_cmd(cmd); /* If driver initiated IOA reset happened while this hcam was pending * with IOA, no need to re-register the hcam as reset engine will do it * once reset sequence is complete */ if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET || atomic_read(&pinstance->ccn.ignore) == 1) { return; } else if (!ioasc) { pmcraid_handle_error_log(pinstance); if (fd_ioasc == PMCRAID_IOASC_NR_IOA_RESET_REQUIRED) { spin_lock_irqsave(pinstance->host->host_lock, lock_flags); pmcraid_initiate_reset(pinstance); spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); return; } if (fd_ioasc == PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC) { pinstance->timestamp_error = 1; pmcraid_set_timestamp(cmd); } } else { dev_info(&pinstance->pdev->dev, "Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc); } /* send netlink message for HCAM notification if enabled */ if (!pmcraid_disable_aen) pmcraid_notify_ldn(pinstance); cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA); if (cmd) pmcraid_send_hcam_cmd(cmd); } /** * pmcraid_register_hcams - register HCAMs for CCN and LDN * * @pinstance: pointer per adapter instance structure * * Return Value * none */ static void pmcraid_register_hcams(struct pmcraid_instance *pinstance) { pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE); pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA); } /** * pmcraid_unregister_hcams - cancel HCAMs registered already * @cmd: pointer to command used as part of reset sequence */ static void pmcraid_unregister_hcams(struct pmcraid_cmd *cmd) { struct pmcraid_instance *pinstance = cmd->drv_inst; /* During IOA bringdown, HCAM gets fired and tasklet proceeds with * handling hcam response though it is not necessary. In order to * prevent this, set 'ignore', so that bring-down sequence doesn't * re-send any more hcams */ atomic_set(&pinstance->ccn.ignore, 1); atomic_set(&pinstance->ldn.ignore, 1); /* If adapter reset was forced as part of runtime reset sequence, * start the reset sequence. Reset will be triggered even in case * IOA unit_check. */ if ((pinstance->force_ioa_reset && !pinstance->ioa_bringdown) || pinstance->ioa_unit_check) { pinstance->force_ioa_reset = 0; pinstance->ioa_unit_check = 0; pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; pmcraid_reset_alert(cmd); return; } /* Driver tries to cancel HCAMs by sending ABORT TASK for each HCAM * one after the other. So CCN cancellation will be triggered by * pmcraid_cancel_ldn itself. */ pmcraid_cancel_ldn(cmd); } static void pmcraid_reinit_buffers(struct pmcraid_instance *); /** * pmcraid_reset_enable_ioa - re-enable IOA after a hard reset * @pinstance: pointer to adapter instance structure * Return Value * 1 if TRANSITION_TO_OPERATIONAL is active, otherwise 0 */ static int pmcraid_reset_enable_ioa(struct pmcraid_instance *pinstance) { u32 intrs; pmcraid_reinit_buffers(pinstance); intrs = pmcraid_read_interrupts(pinstance); pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS); if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) { if (!pinstance->interrupt_mode) { iowrite32(INTRS_TRANSITION_TO_OPERATIONAL, pinstance->int_regs. ioa_host_interrupt_mask_reg); iowrite32(INTRS_TRANSITION_TO_OPERATIONAL, pinstance->int_regs.ioa_host_interrupt_clr_reg); } return 1; } else { return 0; } } /** * pmcraid_soft_reset - performs a soft reset and makes IOA become ready * @cmd : pointer to reset command block * * Return Value * none */ static void pmcraid_soft_reset(struct pmcraid_cmd *cmd) { struct pmcraid_instance *pinstance = cmd->drv_inst; u32 int_reg; u32 doorbell; /* There will be an interrupt when Transition to Operational bit is * set so tasklet would execute next reset task. The timeout handler * would re-initiate a reset */ cmd->cmd_done = pmcraid_ioa_reset; cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT); cmd->timer.function = pmcraid_timeout_handler; if (!timer_pending(&cmd->timer)) add_timer(&cmd->timer); /* Enable destructive diagnostics on IOA if it is not yet in * operational state */ doorbell = DOORBELL_RUNTIME_RESET | DOORBELL_ENABLE_DESTRUCTIVE_DIAGS; /* Since we do RESET_ALERT and Start BIST we have to again write * MSIX Doorbell to indicate the interrupt mode */ if (pinstance->interrupt_mode) { iowrite32(DOORBELL_INTR_MODE_MSIX, pinstance->int_regs.host_ioa_interrupt_reg); ioread32(pinstance->int_regs.host_ioa_interrupt_reg); } iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg); ioread32(pinstance->int_regs.host_ioa_interrupt_reg), int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg); pmcraid_info("Waiting for IOA to become operational %x:%x\n", ioread32(pinstance->int_regs.host_ioa_interrupt_reg), int_reg); } /** * pmcraid_get_dump - retrieves IOA dump in case of Unit Check interrupt * * @pinstance: pointer to adapter instance structure * * Return Value * none */ static void pmcraid_get_dump(struct pmcraid_instance *pinstance) { pmcraid_info("%s is not yet implemented\n", __func__); } /** * pmcraid_fail_outstanding_cmds - Fails all outstanding ops. * @pinstance: pointer to adapter instance structure * * This function fails all outstanding ops. If they are submitted to IOA * already, it sends cancel all messages if IOA is still accepting IOARCBs, * otherwise just completes the commands and returns the cmd blocks to free * pool. * * Return value: * none */ static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance) { struct pmcraid_cmd *cmd, *temp; unsigned long lock_flags; /* pending command list is protected by pending_pool_lock. Its * traversal must be done as within this lock */ spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags); list_for_each_entry_safe(cmd, temp, &pinstance->pending_cmd_pool, free_list) { list_del(&cmd->free_list); spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags); cmd->ioa_cb->ioasa.ioasc = cpu_to_le32(PMCRAID_IOASC_IOA_WAS_RESET); cmd->ioa_cb->ioasa.ilid = cpu_to_le32(PMCRAID_DRIVER_ILID); /* In case the command timer is still running */ del_timer(&cmd->timer); /* If this is an IO command, complete it by invoking scsi_done * function. If this is one of the internal commands other * than pmcraid_ioa_reset and HCAM commands invoke cmd_done to * complete it */ if (cmd->scsi_cmd) { struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; __le32 resp = cmd->ioa_cb->ioarcb.response_handle; scsi_cmd->result |= DID_ERROR << 16; scsi_dma_unmap(scsi_cmd); pmcraid_return_cmd(cmd); pmcraid_info("failing(%d) CDB[0] = %x result: %x\n", le32_to_cpu(resp) >> 2, cmd->ioa_cb->ioarcb.cdb[0], scsi_cmd->result); scsi_done(scsi_cmd); } else if (cmd->cmd_done == pmcraid_internal_done || cmd->cmd_done == pmcraid_erp_done) { cmd->cmd_done(cmd); } else if (cmd->cmd_done != pmcraid_ioa_reset && cmd->cmd_done != pmcraid_ioa_shutdown_done) { pmcraid_return_cmd(cmd); } atomic_dec(&pinstance->outstanding_cmds); spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags); } spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags); } /** * pmcraid_ioa_reset - Implementation of IOA reset logic * * @cmd: pointer to the cmd block to be used for entire reset process * * This function executes most of the steps required for IOA reset. This gets * called by user threads (modprobe/insmod/rmmod) timer, tasklet and midlayer's * 'eh_' thread. Access to variables used for controlling the reset sequence is * synchronized using host lock. Various functions called during reset process * would make use of a single command block, pointer to which is also stored in * adapter instance structure. * * Return Value * None */ static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd) { struct pmcraid_instance *pinstance = cmd->drv_inst; u8 reset_complete = 0; pinstance->ioa_reset_in_progress = 1; if (pinstance->reset_cmd != cmd) { pmcraid_err("reset is called with different command block\n"); pinstance->reset_cmd = cmd; } pmcraid_info("reset_engine: state = %d, command = %p\n", pinstance->ioa_state, cmd); switch (pinstance->ioa_state) { case IOA_STATE_DEAD: /* If IOA is offline, whatever may be the reset reason, just * return. callers might be waiting on the reset wait_q, wake * up them */ pmcraid_err("IOA is offline no reset is possible\n"); reset_complete = 1; break; case IOA_STATE_IN_BRINGDOWN: /* we enter here, once ioa shutdown command is processed by IOA * Alert IOA for a possible reset. If reset alert fails, IOA * goes through hard-reset */ pmcraid_disable_interrupts(pinstance, ~0); pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; pmcraid_reset_alert(cmd); break; case IOA_STATE_UNKNOWN: /* We may be called during probe or resume. Some pre-processing * is required for prior to reset */ scsi_block_requests(pinstance->host); /* If asked to reset while IOA was processing responses or * there are any error responses then IOA may require * hard-reset. */ if (pinstance->ioa_hard_reset == 0) { if (ioread32(pinstance->ioa_status) & INTRS_TRANSITION_TO_OPERATIONAL) { pmcraid_info("sticky bit set, bring-up\n"); pinstance->ioa_state = IOA_STATE_IN_BRINGUP; pmcraid_reinit_cmdblk(cmd); pmcraid_identify_hrrq(cmd); } else { pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET; pmcraid_soft_reset(cmd); } } else { /* Alert IOA of a possible reset and wait for critical * operation in progress bit to reset */ pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; pmcraid_reset_alert(cmd); } break; case IOA_STATE_IN_RESET_ALERT: /* If critical operation in progress bit is reset or wait gets * timed out, reset proceeds with starting BIST on the IOA. * pmcraid_ioa_hard_reset keeps a count of reset attempts. If * they are 3 or more, reset engine marks IOA dead and returns */ pinstance->ioa_state = IOA_STATE_IN_HARD_RESET; pmcraid_start_bist(cmd); break; case IOA_STATE_IN_HARD_RESET: pinstance->ioa_reset_attempts++; /* retry reset if we haven't reached maximum allowed limit */ if (pinstance->ioa_reset_attempts > PMCRAID_RESET_ATTEMPTS) { pinstance->ioa_reset_attempts = 0; pmcraid_err("IOA didn't respond marking it as dead\n"); pinstance->ioa_state = IOA_STATE_DEAD; if (pinstance->ioa_bringdown) pmcraid_notify_ioastate(pinstance, PMC_DEVICE_EVENT_SHUTDOWN_FAILED); else pmcraid_notify_ioastate(pinstance, PMC_DEVICE_EVENT_RESET_FAILED); reset_complete = 1; break; } /* Once either bist or pci reset is done, restore PCI config * space. If this fails, proceed with hard reset again */ pci_restore_state(pinstance->pdev); /* fail all pending commands */ pmcraid_fail_outstanding_cmds(pinstance); /* check if unit check is active, if so extract dump */ if (pinstance->ioa_unit_check) { pmcraid_info("unit check is active\n"); pinstance->ioa_unit_check = 0; pmcraid_get_dump(pinstance); pinstance->ioa_reset_attempts--; pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT; pmcraid_reset_alert(cmd); break; } /* if the reset reason is to bring-down the ioa, we might be * done with the reset restore pci_config_space and complete * the reset */ if (pinstance->ioa_bringdown) { pmcraid_info("bringing down the adapter\n"); pinstance->ioa_shutdown_type = SHUTDOWN_NONE; pinstance->ioa_bringdown = 0; pinstance->ioa_state = IOA_STATE_UNKNOWN; pmcraid_notify_ioastate(pinstance, PMC_DEVICE_EVENT_SHUTDOWN_SUCCESS); reset_complete = 1; } else { /* bring-up IOA, so proceed with soft reset * Reinitialize hrrq_buffers and their indices also * enable interrupts after a pci_restore_state */ if (pmcraid_reset_enable_ioa(pinstance)) { pinstance->ioa_state = IOA_STATE_IN_BRINGUP; pmcraid_info("bringing up the adapter\n"); pmcraid_reinit_cmdblk(cmd); pmcraid_identify_hrrq(cmd); } else { pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET; pmcraid_soft_reset(cmd); } } break; case IOA_STATE_IN_SOFT_RESET: /* TRANSITION TO OPERATIONAL is on so start initialization * sequence */ pmcraid_info("In softreset proceeding with bring-up\n"); pinstance->ioa_state = IOA_STATE_IN_BRINGUP; /* Initialization commands start with HRRQ identification. From * now on tasklet completes most of the commands as IOA is up * and intrs are enabled */ pmcraid_identify_hrrq(cmd); break; case IOA_STATE_IN_BRINGUP: /* we are done with bringing up of IOA, change the ioa_state to * operational and wake up any waiters */ pinstance->ioa_state = IOA_STATE_OPERATIONAL; reset_complete = 1; break; case IOA_STATE_OPERATIONAL: default: /* When IOA is operational and a reset is requested, check for * the reset reason. If reset is to bring down IOA, unregister * HCAMs and initiate shutdown; if adapter reset is forced then * restart reset sequence again */ if (pinstance->ioa_shutdown_type == SHUTDOWN_NONE && pinstance->force_ioa_reset == 0) { pmcraid_notify_ioastate(pinstance, PMC_DEVICE_EVENT_RESET_SUCCESS); reset_complete = 1; } else { if (pinstance->ioa_shutdown_type != SHUTDOWN_NONE) pinstance->ioa_state = IOA_STATE_IN_BRINGDOWN; pmcraid_reinit_cmdblk(cmd); pmcraid_unregister_hcams(cmd); } break; } /* reset will be completed if ioa_state is either DEAD or UNKNOWN or * OPERATIONAL. Reset all control variables used during reset, wake up * any waiting threads and let the SCSI mid-layer send commands. Note * that host_lock must be held before invoking scsi_report_bus_reset. */ if (reset_complete) { pinstance->ioa_reset_in_progress = 0; pinstance->ioa_reset_attempts = 0; pinstance->reset_cmd = NULL; pinstance->ioa_shutdown_type = SHUTDOWN_NONE; pinstance->ioa_bringdown = 0; pmcraid_return_cmd(cmd); /* If target state is to bring up the adapter, proceed with * hcam registration and resource exposure to mid-layer. */ if (pinstance->ioa_state == IOA_STATE_OPERATIONAL) pmcraid_register_hcams(pinstance); wake_up_all(&pinstance->reset_wait_q); } return; } /** * pmcraid_initiate_reset - initiates reset sequence. This is called from * ISR/tasklet during error interrupts including IOA unit check. If reset * is already in progress, it just returns, otherwise initiates IOA reset * to bring IOA up to operational state. * * @pinstance: pointer to adapter instance structure * * Return value * none */ static void pmcraid_initiate_reset(struct pmcraid_instance *pinstance) { struct pmcraid_cmd *cmd; /* If the reset is already in progress, just return, otherwise start * reset sequence and return */ if (!pinstance->ioa_reset_in_progress) { scsi_block_requests(pinstance->host); cmd = pmcraid_get_free_cmd(pinstance); if (cmd == NULL) { pmcraid_err("no cmnd blocks for initiate_reset\n"); return; } pinstance->ioa_shutdown_type = SHUTDOWN_NONE; pinstance->reset_cmd = cmd; pinstance->force_ioa_reset = 1; pmcraid_notify_ioastate(pinstance, PMC_DEVICE_EVENT_RESET_START); pmcraid_ioa_reset(cmd); } } /** * pmcraid_reset_reload - utility routine for doing IOA reset either to bringup * or bringdown IOA * @pinstance: pointer adapter instance structure * @shutdown_type: shutdown type to be used NONE, NORMAL or ABRREV * @target_state: expected target state after reset * * Note: This command initiates reset and waits for its completion. Hence this * should not be called from isr/timer/tasklet functions (timeout handlers, * error response handlers and interrupt handlers). * * Return Value * 1 in case ioa_state is not target_state, 0 otherwise. */ static int pmcraid_reset_reload( struct pmcraid_instance *pinstance, u8 shutdown_type, u8 target_state ) { struct pmcraid_cmd *reset_cmd = NULL; unsigned long lock_flags; int reset = 1; spin_lock_irqsave(pinstance->host->host_lock, lock_flags); if (pinstance->ioa_reset_in_progress) { pmcraid_info("reset_reload: reset is already in progress\n"); spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); wait_event(pinstance->reset_wait_q, !pinstance->ioa_reset_in_progress); spin_lock_irqsave(pinstance->host->host_lock, lock_flags); if (pinstance->ioa_state == IOA_STATE_DEAD) { pmcraid_info("reset_reload: IOA is dead\n"); goto out_unlock; } if (pinstance->ioa_state == target_state) { reset = 0; goto out_unlock; } } pmcraid_info("reset_reload: proceeding with reset\n"); scsi_block_requests(pinstance->host); reset_cmd = pmcraid_get_free_cmd(pinstance); if (reset_cmd == NULL) { pmcraid_err("no free cmnd for reset_reload\n"); goto out_unlock; } if (shutdown_type == SHUTDOWN_NORMAL) pinstance->ioa_bringdown = 1; pinstance->ioa_shutdown_type = shutdown_type; pinstance->reset_cmd = reset_cmd; pinstance->force_ioa_reset = reset; pmcraid_info("reset_reload: initiating reset\n"); pmcraid_ioa_reset(reset_cmd); spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); pmcraid_info("reset_reload: waiting for reset to complete\n"); wait_event(pinstance->reset_wait_q, !pinstance->ioa_reset_in_progress); pmcraid_info("reset_reload: reset is complete !!\n"); scsi_unblock_requests(pinstance->host); return pinstance->ioa_state != target_state; out_unlock: spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); return reset; } /** * pmcraid_reset_bringdown - wrapper over pmcraid_reset_reload to bringdown IOA * * @pinstance: pointer to adapter instance structure * * Return Value * whatever is returned from pmcraid_reset_reload */ static int pmcraid_reset_bringdown(struct pmcraid_instance *pinstance) { return pmcraid_reset_reload(pinstance, SHUTDOWN_NORMAL, IOA_STATE_UNKNOWN); } /** * pmcraid_reset_bringup - wrapper over pmcraid_reset_reload to bring up IOA * * @pinstance: pointer to adapter instance structure * * Return Value * whatever is returned from pmcraid_reset_reload */ static int pmcraid_reset_bringup(struct pmcraid_instance *pinstance) { pmcraid_notify_ioastate(pinstance, PMC_DEVICE_EVENT_RESET_START); return pmcraid_reset_reload(pinstance, SHUTDOWN_NONE, IOA_STATE_OPERATIONAL); } /** * pmcraid_request_sense - Send request sense to a device * @cmd: pmcraid command struct * * This function sends a request sense to a device as a result of a check * condition. This method re-uses the same command block that failed earlier. */ static void pmcraid_request_sense(struct pmcraid_cmd *cmd) { struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl; struct device *dev = &cmd->drv_inst->pdev->dev; cmd->sense_buffer = cmd->scsi_cmd->sense_buffer; cmd->sense_buffer_dma = dma_map_single(dev, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); if (dma_mapping_error(dev, cmd->sense_buffer_dma)) { pmcraid_err ("couldn't allocate sense buffer for request sense\n"); pmcraid_erp_done(cmd); return; } /* re-use the command block */ memset(&cmd->ioa_cb->ioasa, 0, sizeof(struct pmcraid_ioasa)); memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN); ioarcb->request_flags0 = (SYNC_COMPLETE | NO_LINK_DESCS | INHIBIT_UL_CHECK); ioarcb->request_type = REQ_TYPE_SCSI; ioarcb->cdb[0] = REQUEST_SENSE; ioarcb->cdb[4] = SCSI_SENSE_BUFFERSIZE; ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) + offsetof(struct pmcraid_ioarcb, add_data.u.ioadl[0])); ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc)); ioarcb->data_transfer_length = cpu_to_le32(SCSI_SENSE_BUFFERSIZE); ioadl->address = cpu_to_le64(cmd->sense_buffer_dma); ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE); ioadl->flags = IOADL_FLAGS_LAST_DESC; /* request sense might be called as part of error response processing * which runs in tasklets context. It is possible that mid-layer might * schedule queuecommand during this time, hence, writting to IOARRIN * must be protect by host_lock */ pmcraid_send_cmd(cmd, pmcraid_erp_done, PMCRAID_REQUEST_SENSE_TIMEOUT, pmcraid_timeout_handler); } /** * pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery * @cmd: command that failed * @need_sense: true if request_sense is required after cancel all * * This function sends a cancel all to a device to clear the queue. */ static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, bool need_sense) { struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata; memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN); ioarcb->request_flags0 = SYNC_OVERRIDE; ioarcb->request_type = REQ_TYPE_IOACMD; ioarcb->cdb[0] = PMCRAID_CANCEL_ALL_REQUESTS; if (RES_IS_GSCSI(res->cfg_entry)) ioarcb->cdb[1] = PMCRAID_SYNC_COMPLETE_AFTER_CANCEL; ioarcb->ioadl_bus_addr = 0; ioarcb->ioadl_length = 0; ioarcb->data_transfer_length = 0; ioarcb->ioarcb_bus_addr &= cpu_to_le64((~0x1FULL)); /* writing to IOARRIN must be protected by host_lock, as mid-layer * schedule queuecommand while we are doing this */ pmcraid_send_cmd(cmd, need_sense ? pmcraid_erp_done : pmcraid_request_sense, PMCRAID_REQUEST_SENSE_TIMEOUT, pmcraid_timeout_handler); } /** * pmcraid_frame_auto_sense: frame fixed format sense information * * @cmd: pointer to failing command block * * Return value * none */ static void pmcraid_frame_auto_sense(struct pmcraid_cmd *cmd) { u8 *sense_buf = cmd->scsi_cmd->sense_buffer; struct pmcraid_resource_entry *res = cmd->scsi_cmd->device->hostdata; struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa; u32 ioasc = le32_to_cpu(ioasa->ioasc); u32 failing_lba = 0; memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION; if (RES_IS_VSET(res->cfg_entry) && ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC && ioasa->u.vset.failing_lba_hi != 0) { sense_buf[0] = 0x72; sense_buf[1] = PMCRAID_IOASC_SENSE_KEY(ioasc); sense_buf[2] = PMCRAID_IOASC_SENSE_CODE(ioasc); sense_buf[3] = PMCRAID_IOASC_SENSE_QUAL(ioasc); sense_buf[7] = 12; sense_buf[8] = 0; sense_buf[9] = 0x0A; sense_buf[10] = 0x80; failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_hi); sense_buf[12] = (failing_lba & 0xff000000) >> 24; sense_buf[13] = (failing_lba & 0x00ff0000) >> 16; sense_buf[14] = (failing_lba & 0x0000ff00) >> 8; sense_buf[15] = failing_lba & 0x000000ff; failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_lo); sense_buf[16] = (failing_lba & 0xff000000) >> 24; sense_buf[17] = (failing_lba & 0x00ff0000) >> 16; sense_buf[18] = (failing_lba & 0x0000ff00) >> 8; sense_buf[19] = failing_lba & 0x000000ff; } else { sense_buf[0] = 0x70; sense_buf[2] = PMCRAID_IOASC_SENSE_KEY(ioasc); sense_buf[12] = PMCRAID_IOASC_SENSE_CODE(ioasc); sense_buf[13] = PMCRAID_IOASC_SENSE_QUAL(ioasc); if (ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC) { if (RES_IS_VSET(res->cfg_entry)) failing_lba = le32_to_cpu(ioasa->u. vset.failing_lba_lo); sense_buf[0] |= 0x80; sense_buf[3] = (failing_lba >> 24) & 0xff; sense_buf[4] = (failing_lba >> 16) & 0xff; sense_buf[5] = (failing_lba >> 8) & 0xff; sense_buf[6] = failing_lba & 0xff; } sense_buf[7] = 6; /* additional length */ } } /** * pmcraid_error_handler - Error response handlers for a SCSI op * @cmd: pointer to pmcraid_cmd that has failed * * This function determines whether or not to initiate ERP on the affected * device. This is called from a tasklet, which doesn't hold any locks. * * Return value: * 0 it caller can complete the request, otherwise 1 where in error * handler itself completes the request and returns the command block * back to free-pool */ static int pmcraid_error_handler(struct pmcraid_cmd *cmd) { struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata; struct pmcraid_instance *pinstance = cmd->drv_inst; struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa; u32 ioasc = le32_to_cpu(ioasa->ioasc); u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK; bool sense_copied = false; if (!res) { pmcraid_info("resource pointer is NULL\n"); return 0; } /* If this was a SCSI read/write command keep count of errors */ if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD) atomic_inc(&res->read_failures); else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD) atomic_inc(&res->write_failures); if (!RES_IS_GSCSI(res->cfg_entry) && masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) { pmcraid_frame_auto_sense(cmd); } /* Log IOASC/IOASA information based on user settings */ pmcraid_ioasc_logger(ioasc, cmd); switch (masked_ioasc) { case PMCRAID_IOASC_AC_TERMINATED_BY_HOST: scsi_cmd->result |= (DID_ABORT << 16); break; case PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE: case PMCRAID_IOASC_HW_CANNOT_COMMUNICATE: scsi_cmd->result |= (DID_NO_CONNECT << 16); break; case PMCRAID_IOASC_NR_SYNC_REQUIRED: res->sync_reqd = 1; scsi_cmd->result |= (DID_IMM_RETRY << 16); break; case PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC: scsi_cmd->result |= (DID_PASSTHROUGH << 16); break; case PMCRAID_IOASC_UA_BUS_WAS_RESET: case PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER: if (!res->reset_progress) scsi_report_bus_reset(pinstance->host, scsi_cmd->device->channel); scsi_cmd->result |= (DID_ERROR << 16); break; case PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR: scsi_cmd->result |= PMCRAID_IOASC_SENSE_STATUS(ioasc); res->sync_reqd = 1; /* if check_condition is not active return with error otherwise * get/frame the sense buffer */ if (PMCRAID_IOASC_SENSE_STATUS(ioasc) != SAM_STAT_CHECK_CONDITION && PMCRAID_IOASC_SENSE_STATUS(ioasc) != SAM_STAT_ACA_ACTIVE) return 0; /* If we have auto sense data as part of IOASA pass it to * mid-layer */ if (ioasa->auto_sense_length != 0) { short sense_len = le16_to_cpu(ioasa->auto_sense_length); int data_size = min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE); memcpy(scsi_cmd->sense_buffer, ioasa->sense_data, data_size); sense_copied = true; } if (RES_IS_GSCSI(res->cfg_entry)) pmcraid_cancel_all(cmd, sense_copied); else if (sense_copied) pmcraid_erp_done(cmd); else pmcraid_request_sense(cmd); return 1; case PMCRAID_IOASC_NR_INIT_CMD_REQUIRED: break; default: if (PMCRAID_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) scsi_cmd->result |= (DID_ERROR << 16); break; } return 0; } /** * pmcraid_reset_device - device reset handler functions * * @scsi_cmd: scsi command struct * @timeout: command timeout * @modifier: reset modifier indicating the reset sequence to be performed * * This function issues a device reset to the affected device. * A LUN reset will be sent to the device first. If that does * not work, a target reset will be sent. * * Return value: * SUCCESS / FAILED */ static int pmcraid_reset_device( struct scsi_cmnd *scsi_cmd, unsigned long timeout, u8 modifier) { struct pmcraid_cmd *cmd; struct pmcraid_instance *pinstance; struct pmcraid_resource_entry *res; struct pmcraid_ioarcb *ioarcb; unsigned long lock_flags; u32 ioasc; pinstance = (struct pmcraid_instance *)scsi_cmd->device->host->hostdata; res = scsi_cmd->device->hostdata; if (!res) { sdev_printk(KERN_ERR, scsi_cmd->device, "reset_device: NULL resource pointer\n"); return FAILED; } /* If adapter is currently going through reset/reload, return failed. * This will force the mid-layer to call _eh_bus/host reset, which * will then go to sleep and wait for the reset to complete */ spin_lock_irqsave(pinstance->host->host_lock, lock_flags); if (pinstance->ioa_reset_in_progress || pinstance->ioa_state == IOA_STATE_DEAD) { spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); return FAILED; } res->reset_progress = 1; pmcraid_info("Resetting %s resource with addr %x\n", ((modifier & RESET_DEVICE_LUN) ? "LUN" : ((modifier & RESET_DEVICE_TARGET) ? "TARGET" : "BUS")), le32_to_cpu(res->cfg_entry.resource_address)); /* get a free cmd block */ cmd = pmcraid_get_free_cmd(pinstance); if (cmd == NULL) { spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); pmcraid_err("%s: no cmd blocks are available\n", __func__); return FAILED; } ioarcb = &cmd->ioa_cb->ioarcb; ioarcb->resource_handle = res->cfg_entry.resource_handle; ioarcb->request_type = REQ_TYPE_IOACMD; ioarcb->cdb[0] = PMCRAID_RESET_DEVICE; /* Initialize reset modifier bits */ if (modifier) modifier = ENABLE_RESET_MODIFIER | modifier; ioarcb->cdb[1] = modifier; init_completion(&cmd->wait_for_completion); cmd->completion_req = 1; pmcraid_info("cmd(CDB[0] = %x) for %x with index = %d\n", cmd->ioa_cb->ioarcb.cdb[0], le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle), le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2); pmcraid_send_cmd(cmd, pmcraid_internal_done, timeout, pmcraid_timeout_handler); spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); /* RESET_DEVICE command completes after all pending IOARCBs are * completed. Once this command is completed, pmcraind_internal_done * will wake up the 'completion' queue. */ wait_for_completion(&cmd->wait_for_completion); /* complete the command here itself and return the command block * to free list */ pmcraid_return_cmd(cmd); res->reset_progress = 0; ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc); /* set the return value based on the returned ioasc */ return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS; } /** * _pmcraid_io_done - helper for pmcraid_io_done function * * @cmd: pointer to pmcraid command struct * @reslen: residual data length to be set in the ioasa * @ioasc: ioasc either returned by IOA or set by driver itself. * * This function is invoked by pmcraid_io_done to complete mid-layer * scsi ops. * * Return value: * 0 if caller is required to return it to free_pool. Returns 1 if * caller need not worry about freeing command block as error handler * will take care of that. */ static int _pmcraid_io_done(struct pmcraid_cmd *cmd, int reslen, int ioasc) { struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; int rc = 0; scsi_set_resid(scsi_cmd, reslen); pmcraid_info("response(%d) CDB[0] = %x ioasc:result: %x:%x\n", le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2, cmd->ioa_cb->ioarcb.cdb[0], ioasc, scsi_cmd->result); if (PMCRAID_IOASC_SENSE_KEY(ioasc) != 0) rc = pmcraid_error_handler(cmd); if (rc == 0) { scsi_dma_unmap(scsi_cmd); scsi_done(scsi_cmd); } return rc; } /** * pmcraid_io_done - SCSI completion function * * @cmd: pointer to pmcraid command struct * * This function is invoked by tasklet/mid-layer error handler to completing * the SCSI ops sent from mid-layer. * * Return value * none */ static void pmcraid_io_done(struct pmcraid_cmd *cmd) { u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc); u32 reslen = le32_to_cpu(cmd->ioa_cb->ioasa.residual_data_length); if (_pmcraid_io_done(cmd, reslen, ioasc) == 0) pmcraid_return_cmd(cmd); } /** * pmcraid_abort_cmd - Aborts a single IOARCB already submitted to IOA * * @cmd: command block of the command to be aborted * * Return Value: * returns pointer to command structure used as cancelling cmd */ static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd) { struct pmcraid_cmd *cancel_cmd; struct pmcraid_instance *pinstance; pinstance = (struct pmcraid_instance *)cmd->drv_inst; cancel_cmd = pmcraid_get_free_cmd(pinstance); if (cancel_cmd == NULL) { pmcraid_err("%s: no cmd blocks are available\n", __func__); return NULL; } pmcraid_prepare_cancel_cmd(cancel_cmd, cmd); pmcraid_info("aborting command CDB[0]= %x with index = %d\n", cmd->ioa_cb->ioarcb.cdb[0], le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2); init_completion(&cancel_cmd->wait_for_completion); cancel_cmd->completion_req = 1; pmcraid_info("command (%d) CDB[0] = %x for %x\n", le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.response_handle) >> 2, cancel_cmd->ioa_cb->ioarcb.cdb[0], le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.resource_handle)); pmcraid_send_cmd(cancel_cmd, pmcraid_internal_done, PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler); return cancel_cmd; } /** * pmcraid_abort_complete - Waits for ABORT TASK completion * * @cancel_cmd: command block use as cancelling command * * Return Value: * returns SUCCESS if ABORT TASK has good completion * otherwise FAILED */ static int pmcraid_abort_complete(struct pmcraid_cmd *cancel_cmd) { struct pmcraid_resource_entry *res; u32 ioasc; wait_for_completion(&cancel_cmd->wait_for_completion); res = cancel_cmd->res; cancel_cmd->res = NULL; ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc); /* If the abort task is not timed out we will get a Good completion * as sense_key, otherwise we may get one the following responses * due to subsequent bus reset or device reset. In case IOASC is * NR_SYNC_REQUIRED, set sync_reqd flag for the corresponding resource */ if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET || ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED) { if (ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED) res->sync_reqd = 1; ioasc = 0; } /* complete the command here itself */ pmcraid_return_cmd(cancel_cmd); return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS; } /** * pmcraid_eh_abort_handler - entry point for aborting a single task on errors * * @scsi_cmd: scsi command struct given by mid-layer. When this is called * mid-layer ensures that no other commands are queued. This * never gets called under interrupt, but a separate eh thread. * * Return value: * SUCCESS / FAILED */ static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd) { struct pmcraid_instance *pinstance; struct pmcraid_cmd *cmd; struct pmcraid_resource_entry *res; unsigned long host_lock_flags; unsigned long pending_lock_flags; struct pmcraid_cmd *cancel_cmd = NULL; int cmd_found = 0; int rc = FAILED; pinstance = (struct pmcraid_instance *)scsi_cmd->device->host->hostdata; scmd_printk(KERN_INFO, scsi_cmd, "I/O command timed out, aborting it.\n"); res = scsi_cmd->device->hostdata; if (res == NULL) return rc; /* If we are currently going through reset/reload, return failed. * This will force the mid-layer to eventually call * pmcraid_eh_host_reset which will then go to sleep and wait for the * reset to complete */ spin_lock_irqsave(pinstance->host->host_lock, host_lock_flags); if (pinstance->ioa_reset_in_progress || pinstance->ioa_state == IOA_STATE_DEAD) { spin_unlock_irqrestore(pinstance->host->host_lock, host_lock_flags); return rc; } /* loop over pending cmd list to find cmd corresponding to this * scsi_cmd. Note that this command might not have been completed * already. locking: all pending commands are protected with * pending_pool_lock. */ spin_lock_irqsave(&pinstance->pending_pool_lock, pending_lock_flags); list_for_each_entry(cmd, &pinstance->pending_cmd_pool, free_list) { if (cmd->scsi_cmd == scsi_cmd) { cmd_found = 1; break; } } spin_unlock_irqrestore(&pinstance->pending_pool_lock, pending_lock_flags); /* If the command to be aborted was given to IOA and still pending with * it, send ABORT_TASK to abort this and wait for its completion */ if (cmd_found) cancel_cmd = pmcraid_abort_cmd(cmd); spin_unlock_irqrestore(pinstance->host->host_lock, host_lock_flags); if (cancel_cmd) { cancel_cmd->res = cmd->scsi_cmd->device->hostdata; rc = pmcraid_abort_complete(cancel_cmd); } return cmd_found ? rc : SUCCESS; } /** * pmcraid_eh_device_reset_handler - bus/target/device reset handler callbacks * * @scmd: pointer to scsi_cmd that was sent to the resource to be reset. * * All these routines invokve pmcraid_reset_device with appropriate parameters. * Since these are called from mid-layer EH thread, no other IO will be queued * to the resource being reset. However, control path (IOCTL) may be active so * it is necessary to synchronize IOARRIN writes which pmcraid_reset_device * takes care by locking/unlocking host_lock. * * Return value * SUCCESS or FAILED */ static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd) { scmd_printk(KERN_INFO, scmd, "resetting device due to an I/O command timeout.\n"); return pmcraid_reset_device(scmd, PMCRAID_INTERNAL_TIMEOUT, RESET_DEVICE_LUN); } static int pmcraid_eh_bus_reset_handler(struct scsi_cmnd *scmd) { scmd_printk(KERN_INFO, scmd, "Doing bus reset due to an I/O command timeout.\n"); return pmcraid_reset_device(scmd, PMCRAID_RESET_BUS_TIMEOUT, RESET_DEVICE_BUS); } static int pmcraid_eh_target_reset_handler(struct scsi_cmnd *scmd) { scmd_printk(KERN_INFO, scmd, "Doing target reset due to an I/O command timeout.\n"); return pmcraid_reset_device(scmd, PMCRAID_INTERNAL_TIMEOUT, RESET_DEVICE_TARGET); } /** * pmcraid_eh_host_reset_handler - adapter reset handler callback * * @scmd: pointer to scsi_cmd that was sent to a resource of adapter * * Initiates adapter reset to bring it up to operational state * * Return value * SUCCESS or FAILED */ static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd) { unsigned long interval = 10000; /* 10 seconds interval */ int waits = jiffies_to_msecs(PMCRAID_RESET_HOST_TIMEOUT) / interval; struct pmcraid_instance *pinstance = (struct pmcraid_instance *)(scmd->device->host->hostdata); /* wait for an additional 150 seconds just in case firmware could come * up and if it could complete all the pending commands excluding the * two HCAM (CCN and LDN). */ while (waits--) { if (atomic_read(&pinstance->outstanding_cmds) <= PMCRAID_MAX_HCAM_CMD) return SUCCESS; msleep(interval); } dev_err(&pinstance->pdev->dev, "Adapter being reset due to an I/O command timeout.\n"); return pmcraid_reset_bringup(pinstance) == 0 ? SUCCESS : FAILED; } /** * pmcraid_init_ioadls - initializes IOADL related fields in IOARCB * @cmd: pmcraid command struct * @sgcount: count of scatter-gather elements * * Return value * returns pointer pmcraid_ioadl_desc, initialized to point to internal * or external IOADLs */ static struct pmcraid_ioadl_desc * pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount) { struct pmcraid_ioadl_desc *ioadl; struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; int ioadl_count = 0; if (ioarcb->add_cmd_param_length) ioadl_count = DIV_ROUND_UP(le16_to_cpu(ioarcb->add_cmd_param_length), 16); ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc) * sgcount); if ((sgcount + ioadl_count) > (ARRAY_SIZE(ioarcb->add_data.u.ioadl))) { /* external ioadls start at offset 0x80 from control_block * structure, re-using 24 out of 27 ioadls part of IOARCB. * It is necessary to indicate to firmware that driver is * using ioadls to be treated as external to IOARCB. */ ioarcb->ioarcb_bus_addr &= cpu_to_le64(~(0x1FULL)); ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) + offsetof(struct pmcraid_ioarcb, add_data.u.ioadl[3])); ioadl = &ioarcb->add_data.u.ioadl[3]; } else { ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) + offsetof(struct pmcraid_ioarcb, add_data.u.ioadl[ioadl_count])); ioadl = &ioarcb->add_data.u.ioadl[ioadl_count]; ioarcb->ioarcb_bus_addr |= cpu_to_le64(DIV_ROUND_CLOSEST(sgcount + ioadl_count, 8)); } return ioadl; } /** * pmcraid_build_ioadl - Build a scatter/gather list and map the buffer * @pinstance: pointer to adapter instance structure * @cmd: pmcraid command struct * * This function is invoked by queuecommand entry point while sending a command * to firmware. This builds ioadl descriptors and sets up ioarcb fields. * * Return value: * 0 on success or -1 on failure */ static int pmcraid_build_ioadl( struct pmcraid_instance *pinstance, struct pmcraid_cmd *cmd ) { int i, nseg; struct scatterlist *sglist; struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb); struct pmcraid_ioadl_desc *ioadl; u32 length = scsi_bufflen(scsi_cmd); if (!length) return 0; nseg = scsi_dma_map(scsi_cmd); if (nseg < 0) { scmd_printk(KERN_ERR, scsi_cmd, "scsi_map_dma failed!\n"); return -1; } else if (nseg > PMCRAID_MAX_IOADLS) { scsi_dma_unmap(scsi_cmd); scmd_printk(KERN_ERR, scsi_cmd, "sg count is (%d) more than allowed!\n", nseg); return -1; } /* Initialize IOARCB data transfer length fields */ if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) ioarcb->request_flags0 |= TRANSFER_DIR_WRITE; ioarcb->request_flags0 |= NO_LINK_DESCS; ioarcb->data_transfer_length = cpu_to_le32(length); ioadl = pmcraid_init_ioadls(cmd, nseg); /* Initialize IOADL descriptor addresses */ scsi_for_each_sg(scsi_cmd, sglist, nseg, i) { ioadl[i].data_len = cpu_to_le32(sg_dma_len(sglist)); ioadl[i].address = cpu_to_le64(sg_dma_address(sglist)); ioadl[i].flags = 0; } /* setup last descriptor */ ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC; return 0; } /** * pmcraid_queuecommand_lck - Queue a mid-layer request * @scsi_cmd: scsi command struct * * This function queues a request generated by the mid-layer. Midlayer calls * this routine within host->lock. Some of the functions called by queuecommand * would use cmd block queue locks (free_pool_lock and pending_pool_lock) * * Return value: * 0 on success * SCSI_MLQUEUE_DEVICE_BUSY if device is busy * SCSI_MLQUEUE_HOST_BUSY if host is busy */ static int pmcraid_queuecommand_lck(struct scsi_cmnd *scsi_cmd) { struct pmcraid_instance *pinstance; struct pmcraid_resource_entry *res; struct pmcraid_ioarcb *ioarcb; struct pmcraid_cmd *cmd; u32 fw_version; int rc = 0; pinstance = (struct pmcraid_instance *)scsi_cmd->device->host->hostdata; fw_version = be16_to_cpu(pinstance->inq_data->fw_version); res = scsi_cmd->device->hostdata; scsi_cmd->result = (DID_OK << 16); /* if adapter is marked as dead, set result to DID_NO_CONNECT complete * the command */ if (pinstance->ioa_state == IOA_STATE_DEAD) { pmcraid_info("IOA is dead, but queuecommand is scheduled\n"); scsi_cmd->result = (DID_NO_CONNECT << 16); scsi_done(scsi_cmd); return 0; } /* If IOA reset is in progress, can't queue the commands */ if (pinstance->ioa_reset_in_progress) return SCSI_MLQUEUE_HOST_BUSY; /* Firmware doesn't support SYNCHRONIZE_CACHE command (0x35), complete * the command here itself with success return */ if (scsi_cmd->cmnd[0] == SYNCHRONIZE_CACHE) { pmcraid_info("SYNC_CACHE(0x35), completing in driver itself\n"); scsi_done(scsi_cmd); return 0; } /* initialize the command and IOARCB to be sent to IOA */ cmd = pmcraid_get_free_cmd(pinstance); if (cmd == NULL) { pmcraid_err("free command block is not available\n"); return SCSI_MLQUEUE_HOST_BUSY; } cmd->scsi_cmd = scsi_cmd; ioarcb = &(cmd->ioa_cb->ioarcb); memcpy(ioarcb->cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); ioarcb->resource_handle = res->cfg_entry.resource_handle; ioarcb->request_type = REQ_TYPE_SCSI; /* set hrrq number where the IOA should respond to. Note that all cmds * generated internally uses hrrq_id 0, exception to this is the cmd * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses * hrrq_id assigned here in queuecommand */ ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) % pinstance->num_hrrq; cmd->cmd_done = pmcraid_io_done; if (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry)) { if (scsi_cmd->underflow == 0) ioarcb->request_flags0 |= INHIBIT_UL_CHECK; if (res->sync_reqd) { ioarcb->request_flags0 |= SYNC_COMPLETE; res->sync_reqd = 0; } ioarcb->request_flags0 |= NO_LINK_DESCS; if (scsi_cmd->flags & SCMD_TAGGED) ioarcb->request_flags1 |= TASK_TAG_SIMPLE; if (RES_IS_GSCSI(res->cfg_entry)) ioarcb->request_flags1 |= DELAY_AFTER_RESET; } rc = pmcraid_build_ioadl(pinstance, cmd); pmcraid_info("command (%d) CDB[0] = %x for %x:%x:%x:%x\n", le32_to_cpu(ioarcb->response_handle) >> 2, scsi_cmd->cmnd[0], pinstance->host->unique_id, RES_IS_VSET(res->cfg_entry) ? PMCRAID_VSET_BUS_ID : PMCRAID_PHYS_BUS_ID, RES_IS_VSET(res->cfg_entry) ? (fw_version <= PMCRAID_FW_VERSION_1 ? res->cfg_entry.unique_flags1 : le16_to_cpu(res->cfg_entry.array_id) & 0xFF) : RES_TARGET(res->cfg_entry.resource_address), RES_LUN(res->cfg_entry.resource_address)); if (likely(rc == 0)) { _pmcraid_fire_command(cmd); } else { pmcraid_err("queuecommand could not build ioadl\n"); pmcraid_return_cmd(cmd); rc = SCSI_MLQUEUE_HOST_BUSY; } return rc; } static DEF_SCSI_QCMD(pmcraid_queuecommand) /* * pmcraid_open -char node "open" entry, allowed only users with admin access */ static int pmcraid_chr_open(struct inode *inode, struct file *filep) { struct pmcraid_instance *pinstance; if (!capable(CAP_SYS_ADMIN)) return -EACCES; /* Populate adapter instance * pointer for use by ioctl */ pinstance = container_of(inode->i_cdev, struct pmcraid_instance, cdev); filep->private_data = pinstance; return 0; } /* * pmcraid_fasync - Async notifier registration from applications * * This function adds the calling process to a driver global queue. When an * event occurs, SIGIO will be sent to all processes in this queue. */ static int pmcraid_chr_fasync(int fd, struct file *filep, int mode) { struct pmcraid_instance *pinstance; int rc; pinstance = filep->private_data; mutex_lock(&pinstance->aen_queue_lock); rc = fasync_helper(fd, filep, mode, &pinstance->aen_queue); mutex_unlock(&pinstance->aen_queue_lock); return rc; } /** * pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself * * @pinstance: pointer to adapter instance structure * @cmd: ioctl command passed in * @buflen: length of user_buffer * @user_buffer: user buffer pointer * * Return Value * 0 in case of success, otherwise appropriate error code */ static long pmcraid_ioctl_driver( struct pmcraid_instance *pinstance, unsigned int cmd, unsigned int buflen, void __user *user_buffer ) { int rc = -ENOSYS; switch (cmd) { case PMCRAID_IOCTL_RESET_ADAPTER: pmcraid_reset_bringup(pinstance); rc = 0; break; default: break; } return rc; } /** * pmcraid_check_ioctl_buffer - check for proper access to user buffer * * @cmd: ioctl command * @arg: user buffer * @hdr: pointer to kernel memory for pmcraid_ioctl_header * * Return Value * negetive error code if there are access issues, otherwise zero. * Upon success, returns ioctl header copied out of user buffer. */ static int pmcraid_check_ioctl_buffer( int cmd, void __user *arg, struct pmcraid_ioctl_header *hdr ) { int rc; if (copy_from_user(hdr, arg, sizeof(struct pmcraid_ioctl_header))) { pmcraid_err("couldn't copy ioctl header from user buffer\n"); return -EFAULT; } /* check for valid driver signature */ rc = memcmp(hdr->signature, PMCRAID_IOCTL_SIGNATURE, sizeof(hdr->signature)); if (rc) { pmcraid_err("signature verification failed\n"); return -EINVAL; } return 0; } /* * pmcraid_ioctl - char node ioctl entry point */ static long pmcraid_chr_ioctl( struct file *filep, unsigned int cmd, unsigned long arg ) { struct pmcraid_instance *pinstance = NULL; struct pmcraid_ioctl_header *hdr = NULL; void __user *argp = (void __user *)arg; int retval = -ENOTTY; hdr = kmalloc(sizeof(struct pmcraid_ioctl_header), GFP_KERNEL); if (!hdr) { pmcraid_err("failed to allocate memory for ioctl header\n"); return -ENOMEM; } retval = pmcraid_check_ioctl_buffer(cmd, argp, hdr); if (retval) { pmcraid_info("chr_ioctl: header check failed\n"); kfree(hdr); return retval; } pinstance = filep->private_data; if (!pinstance) { pmcraid_info("adapter instance is not found\n"); kfree(hdr); return -ENOTTY; } switch (_IOC_TYPE(cmd)) { case PMCRAID_DRIVER_IOCTL: arg += sizeof(struct pmcraid_ioctl_header); retval = pmcraid_ioctl_driver(pinstance, cmd, hdr->buffer_length, argp); break; default: retval = -ENOTTY; break; } kfree(hdr); return retval; } /* * File operations structure for management interface */ static const struct file_operations pmcraid_fops = { .owner = THIS_MODULE, .open = pmcraid_chr_open, .fasync = pmcraid_chr_fasync, .unlocked_ioctl = pmcraid_chr_ioctl, .compat_ioctl = compat_ptr_ioctl, .llseek = noop_llseek, }; /** * pmcraid_show_log_level - Display adapter's error logging level * @dev: class device struct * @attr: unused * @buf: buffer * * Return value: * number of bytes printed to buffer */ static ssize_t pmcraid_show_log_level( struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct pmcraid_instance *pinstance = (struct pmcraid_instance *)shost->hostdata; return snprintf(buf, PAGE_SIZE, "%d\n", pinstance->current_log_level); } /** * pmcraid_store_log_level - Change the adapter's error logging level * @dev: class device struct * @attr: unused * @buf: buffer * @count: not used * * Return value: * number of bytes printed to buffer */ static ssize_t pmcraid_store_log_level( struct device *dev, struct device_attribute *attr, const char *buf, size_t count ) { struct Scsi_Host *shost; struct pmcraid_instance *pinstance; u8 val; if (kstrtou8(buf, 10, &val)) return -EINVAL; /* log-level should be from 0 to 2 */ if (val > 2) return -EINVAL; shost = class_to_shost(dev); pinstance = (struct pmcraid_instance *)shost->hostdata; pinstance->current_log_level = val; return strlen(buf); } static struct device_attribute pmcraid_log_level_attr = { .attr = { .name = "log_level", .mode = S_IRUGO | S_IWUSR, }, .show = pmcraid_show_log_level, .store = pmcraid_store_log_level, }; /** * pmcraid_show_drv_version - Display driver version * @dev: class device struct * @attr: unused * @buf: buffer * * Return value: * number of bytes printed to buffer */ static ssize_t pmcraid_show_drv_version( struct device *dev, struct device_attribute *attr, char *buf ) { return snprintf(buf, PAGE_SIZE, "version: %s\n", PMCRAID_DRIVER_VERSION); } static struct device_attribute pmcraid_driver_version_attr = { .attr = { .name = "drv_version", .mode = S_IRUGO, }, .show = pmcraid_show_drv_version, }; /** * pmcraid_show_adapter_id - Display driver assigned adapter id * @dev: class device struct * @attr: unused * @buf: buffer * * Return value: * number of bytes printed to buffer */ static ssize_t pmcraid_show_adapter_id( struct device *dev, struct device_attribute *attr, char *buf ) { struct Scsi_Host *shost = class_to_shost(dev); struct pmcraid_instance *pinstance = (struct pmcraid_instance *)shost->hostdata; u32 adapter_id = pci_dev_id(pinstance->pdev); u32 aen_group = pmcraid_event_family.id; return snprintf(buf, PAGE_SIZE, "adapter id: %d\nminor: %d\naen group: %d\n", adapter_id, MINOR(pinstance->cdev.dev), aen_group); } static struct device_attribute pmcraid_adapter_id_attr = { .attr = { .name = "adapter_id", .mode = S_IRUGO, }, .show = pmcraid_show_adapter_id, }; static struct attribute *pmcraid_host_attrs[] = { &pmcraid_log_level_attr.attr, &pmcraid_driver_version_attr.attr, &pmcraid_adapter_id_attr.attr, NULL, }; ATTRIBUTE_GROUPS(pmcraid_host); /* host template structure for pmcraid driver */ static const struct scsi_host_template pmcraid_host_template = { .module = THIS_MODULE, .name = PMCRAID_DRIVER_NAME, .queuecommand = pmcraid_queuecommand, .eh_abort_handler = pmcraid_eh_abort_handler, .eh_bus_reset_handler = pmcraid_eh_bus_reset_handler, .eh_target_reset_handler = pmcraid_eh_target_reset_handler, .eh_device_reset_handler = pmcraid_eh_device_reset_handler, .eh_host_reset_handler = pmcraid_eh_host_reset_handler, .slave_alloc = pmcraid_slave_alloc, .slave_configure = pmcraid_slave_configure, .slave_destroy = pmcraid_slave_destroy, .change_queue_depth = pmcraid_change_queue_depth, .can_queue = PMCRAID_MAX_IO_CMD, .this_id = -1, .sg_tablesize = PMCRAID_MAX_IOADLS, .max_sectors = PMCRAID_IOA_MAX_SECTORS, .no_write_same = 1, .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN, .shost_groups = pmcraid_host_groups, .proc_name = PMCRAID_DRIVER_NAME, }; /* * pmcraid_isr_msix - implements MSI-X interrupt handling routine * @irq: interrupt vector number * @dev_id: pointer hrrq_vector * * Return Value * IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored */ static irqreturn_t pmcraid_isr_msix(int irq, void *dev_id) { struct pmcraid_isr_param *hrrq_vector; struct pmcraid_instance *pinstance; unsigned long lock_flags; u32 intrs_val; int hrrq_id; hrrq_vector = (struct pmcraid_isr_param *)dev_id; hrrq_id = hrrq_vector->hrrq_id; pinstance = hrrq_vector->drv_inst; if (!hrrq_id) { /* Read the interrupt */ intrs_val = pmcraid_read_interrupts(pinstance); if (intrs_val && ((ioread32(pinstance->int_regs.host_ioa_interrupt_reg) & DOORBELL_INTR_MSIX_CLR) == 0)) { /* Any error interrupts including unit_check, * initiate IOA reset.In case of unit check indicate * to reset_sequence that IOA unit checked and prepare * for a dump during reset sequence */ if (intrs_val & PMCRAID_ERROR_INTERRUPTS) { if (intrs_val & INTRS_IOA_UNIT_CHECK) pinstance->ioa_unit_check = 1; pmcraid_err("ISR: error interrupts: %x \ initiating reset\n", intrs_val); spin_lock_irqsave(pinstance->host->host_lock, lock_flags); pmcraid_initiate_reset(pinstance); spin_unlock_irqrestore( pinstance->host->host_lock, lock_flags); } /* If interrupt was as part of the ioa initialization, * clear it. Delete the timer and wakeup the * reset engine to proceed with reset sequence */ if (intrs_val & INTRS_TRANSITION_TO_OPERATIONAL) pmcraid_clr_trans_op(pinstance); /* Clear the interrupt register by writing * to host to ioa doorbell. Once done * FW will clear the interrupt. */ iowrite32(DOORBELL_INTR_MSIX_CLR, pinstance->int_regs.host_ioa_interrupt_reg); ioread32(pinstance->int_regs.host_ioa_interrupt_reg); } } tasklet_schedule(&(pinstance->isr_tasklet[hrrq_id])); return IRQ_HANDLED; } /** * pmcraid_isr - implements legacy interrupt handling routine * * @irq: interrupt vector number * @dev_id: pointer hrrq_vector * * Return Value * IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored */ static irqreturn_t pmcraid_isr(int irq, void *dev_id) { struct pmcraid_isr_param *hrrq_vector; struct pmcraid_instance *pinstance; u32 intrs; unsigned long lock_flags; int hrrq_id = 0; /* In case of legacy interrupt mode where interrupts are shared across * isrs, it may be possible that the current interrupt is not from IOA */ if (!dev_id) { printk(KERN_INFO "%s(): NULL host pointer\n", __func__); return IRQ_NONE; } hrrq_vector = (struct pmcraid_isr_param *)dev_id; pinstance = hrrq_vector->drv_inst; intrs = pmcraid_read_interrupts(pinstance); if (unlikely((intrs & PMCRAID_PCI_INTERRUPTS) == 0)) return IRQ_NONE; /* Any error interrupts including unit_check, initiate IOA reset. * In case of unit check indicate to reset_sequence that IOA unit * checked and prepare for a dump during reset sequence */ if (intrs & PMCRAID_ERROR_INTERRUPTS) { if (intrs & INTRS_IOA_UNIT_CHECK) pinstance->ioa_unit_check = 1; iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_clr_reg); pmcraid_err("ISR: error interrupts: %x initiating reset\n", intrs); intrs = ioread32( pinstance->int_regs.ioa_host_interrupt_clr_reg); spin_lock_irqsave(pinstance->host->host_lock, lock_flags); pmcraid_initiate_reset(pinstance); spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); } else { /* If interrupt was as part of the ioa initialization, * clear. Delete the timer and wakeup the * reset engine to proceed with reset sequence */ if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) { pmcraid_clr_trans_op(pinstance); } else { iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_clr_reg); ioread32( pinstance->int_regs.ioa_host_interrupt_clr_reg); tasklet_schedule( &(pinstance->isr_tasklet[hrrq_id])); } } return IRQ_HANDLED; } /** * pmcraid_worker_function - worker thread function * * @workp: pointer to struct work queue * * Return Value * None */ static void pmcraid_worker_function(struct work_struct *workp) { struct pmcraid_instance *pinstance; struct pmcraid_resource_entry *res; struct pmcraid_resource_entry *temp; struct scsi_device *sdev; unsigned long lock_flags; unsigned long host_lock_flags; u16 fw_version; u8 bus, target, lun; pinstance = container_of(workp, struct pmcraid_instance, worker_q); /* add resources only after host is added into system */ if (!atomic_read(&pinstance->expose_resources)) return; fw_version = be16_to_cpu(pinstance->inq_data->fw_version); spin_lock_irqsave(&pinstance->resource_lock, lock_flags); list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) { if (res->change_detected == RES_CHANGE_DEL && res->scsi_dev) { sdev = res->scsi_dev; /* host_lock must be held before calling * scsi_device_get */ spin_lock_irqsave(pinstance->host->host_lock, host_lock_flags); if (!scsi_device_get(sdev)) { spin_unlock_irqrestore( pinstance->host->host_lock, host_lock_flags); pmcraid_info("deleting %x from midlayer\n", res->cfg_entry.resource_address); list_move_tail(&res->queue, &pinstance->free_res_q); spin_unlock_irqrestore( &pinstance->resource_lock, lock_flags); scsi_remove_device(sdev); scsi_device_put(sdev); spin_lock_irqsave(&pinstance->resource_lock, lock_flags); res->change_detected = 0; } else { spin_unlock_irqrestore( pinstance->host->host_lock, host_lock_flags); } } } list_for_each_entry(res, &pinstance->used_res_q, queue) { if (res->change_detected == RES_CHANGE_ADD) { if (!pmcraid_expose_resource(fw_version, &res->cfg_entry)) continue; if (RES_IS_VSET(res->cfg_entry)) { bus = PMCRAID_VSET_BUS_ID; if (fw_version <= PMCRAID_FW_VERSION_1) target = res->cfg_entry.unique_flags1; else target = le16_to_cpu(res->cfg_entry.array_id) & 0xFF; lun = PMCRAID_VSET_LUN_ID; } else { bus = PMCRAID_PHYS_BUS_ID; target = RES_TARGET( res->cfg_entry.resource_address); lun = RES_LUN(res->cfg_entry.resource_address); } res->change_detected = 0; spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); scsi_add_device(pinstance->host, bus, target, lun); spin_lock_irqsave(&pinstance->resource_lock, lock_flags); } } spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); } /** * pmcraid_tasklet_function - Tasklet function * * @instance: pointer to msix param structure * * Return Value * None */ static void pmcraid_tasklet_function(unsigned long instance) { struct pmcraid_isr_param *hrrq_vector; struct pmcraid_instance *pinstance; unsigned long hrrq_lock_flags; unsigned long pending_lock_flags; unsigned long host_lock_flags; spinlock_t *lockp; /* hrrq buffer lock */ int id; u32 resp; hrrq_vector = (struct pmcraid_isr_param *)instance; pinstance = hrrq_vector->drv_inst; id = hrrq_vector->hrrq_id; lockp = &(pinstance->hrrq_lock[id]); /* loop through each of the commands responded by IOA. Each HRRQ buf is * protected by its own lock. Traversals must be done within this lock * as there may be multiple tasklets running on multiple CPUs. Note * that the lock is held just for picking up the response handle and * manipulating hrrq_curr/toggle_bit values. */ spin_lock_irqsave(lockp, hrrq_lock_flags); resp = le32_to_cpu(*(pinstance->hrrq_curr[id])); while ((resp & HRRQ_TOGGLE_BIT) == pinstance->host_toggle_bit[id]) { int cmd_index = resp >> 2; struct pmcraid_cmd *cmd = NULL; if (pinstance->hrrq_curr[id] < pinstance->hrrq_end[id]) { pinstance->hrrq_curr[id]++; } else { pinstance->hrrq_curr[id] = pinstance->hrrq_start[id]; pinstance->host_toggle_bit[id] ^= 1u; } if (cmd_index >= PMCRAID_MAX_CMD) { /* In case of invalid response handle, log message */ pmcraid_err("Invalid response handle %d\n", cmd_index); resp = le32_to_cpu(*(pinstance->hrrq_curr[id])); continue; } cmd = pinstance->cmd_list[cmd_index]; spin_unlock_irqrestore(lockp, hrrq_lock_flags); spin_lock_irqsave(&pinstance->pending_pool_lock, pending_lock_flags); list_del(&cmd->free_list); spin_unlock_irqrestore(&pinstance->pending_pool_lock, pending_lock_flags); del_timer(&cmd->timer); atomic_dec(&pinstance->outstanding_cmds); if (cmd->cmd_done == pmcraid_ioa_reset) { spin_lock_irqsave(pinstance->host->host_lock, host_lock_flags); cmd->cmd_done(cmd); spin_unlock_irqrestore(pinstance->host->host_lock, host_lock_flags); } else if (cmd->cmd_done != NULL) { cmd->cmd_done(cmd); } /* loop over until we are done with all responses */ spin_lock_irqsave(lockp, hrrq_lock_flags); resp = le32_to_cpu(*(pinstance->hrrq_curr[id])); } spin_unlock_irqrestore(lockp, hrrq_lock_flags); } /** * pmcraid_unregister_interrupt_handler - de-register interrupts handlers * @pinstance: pointer to adapter instance structure * * This routine un-registers registered interrupt handler and * also frees irqs/vectors. * * Retun Value * None */ static void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance) { struct pci_dev *pdev = pinstance->pdev; int i; for (i = 0; i < pinstance->num_hrrq; i++) free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]); pinstance->interrupt_mode = 0; pci_free_irq_vectors(pdev); } /** * pmcraid_register_interrupt_handler - registers interrupt handler * @pinstance: pointer to per-adapter instance structure * * Return Value * 0 on success, non-zero error code otherwise. */ static int pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance) { struct pci_dev *pdev = pinstance->pdev; unsigned int irq_flag = PCI_IRQ_LEGACY, flag; int num_hrrq, rc, i; irq_handler_t isr; if (pmcraid_enable_msix) irq_flag |= PCI_IRQ_MSIX; num_hrrq = pci_alloc_irq_vectors(pdev, 1, PMCRAID_NUM_MSIX_VECTORS, irq_flag); if (num_hrrq < 0) return num_hrrq; if (pdev->msix_enabled) { flag = 0; isr = pmcraid_isr_msix; } else { flag = IRQF_SHARED; isr = pmcraid_isr; } for (i = 0; i < num_hrrq; i++) { struct pmcraid_isr_param *vec = &pinstance->hrrq_vector[i]; vec->hrrq_id = i; vec->drv_inst = pinstance; rc = request_irq(pci_irq_vector(pdev, i), isr, flag, PMCRAID_DRIVER_NAME, vec); if (rc) goto out_unwind; } pinstance->num_hrrq = num_hrrq; if (pdev->msix_enabled) { pinstance->interrupt_mode = 1; iowrite32(DOORBELL_INTR_MODE_MSIX, pinstance->int_regs.host_ioa_interrupt_reg); ioread32(pinstance->int_regs.host_ioa_interrupt_reg); } return 0; out_unwind: while (--i >= 0) free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]); pci_free_irq_vectors(pdev); return rc; } /** * pmcraid_release_cmd_blocks - release buufers allocated for command blocks * @pinstance: per adapter instance structure pointer * @max_index: number of buffer blocks to release * * Return Value * None */ static void pmcraid_release_cmd_blocks(struct pmcraid_instance *pinstance, int max_index) { int i; for (i = 0; i < max_index; i++) { kmem_cache_free(pinstance->cmd_cachep, pinstance->cmd_list[i]); pinstance->cmd_list[i] = NULL; } kmem_cache_destroy(pinstance->cmd_cachep); pinstance->cmd_cachep = NULL; } /** * pmcraid_release_control_blocks - releases buffers alloced for control blocks * @pinstance: pointer to per adapter instance structure * @max_index: number of buffers (from 0 onwards) to release * * This function assumes that the command blocks for which control blocks are * linked are not released. * * Return Value * None */ static void pmcraid_release_control_blocks( struct pmcraid_instance *pinstance, int max_index ) { int i; if (pinstance->control_pool == NULL) return; for (i = 0; i < max_index; i++) { dma_pool_free(pinstance->control_pool, pinstance->cmd_list[i]->ioa_cb, pinstance->cmd_list[i]->ioa_cb_bus_addr); pinstance->cmd_list[i]->ioa_cb = NULL; pinstance->cmd_list[i]->ioa_cb_bus_addr = 0; } dma_pool_destroy(pinstance->control_pool); pinstance->control_pool = NULL; } /** * pmcraid_allocate_cmd_blocks - allocate memory for cmd block structures * @pinstance: pointer to per adapter instance structure * * Allocates memory for command blocks using kernel slab allocator. * * Return Value * 0 in case of success; -ENOMEM in case of failure */ static int pmcraid_allocate_cmd_blocks(struct pmcraid_instance *pinstance) { int i; sprintf(pinstance->cmd_pool_name, "pmcraid_cmd_pool_%d", pinstance->host->unique_id); pinstance->cmd_cachep = kmem_cache_create( pinstance->cmd_pool_name, sizeof(struct pmcraid_cmd), 0, SLAB_HWCACHE_ALIGN, NULL); if (!pinstance->cmd_cachep) return -ENOMEM; for (i = 0; i < PMCRAID_MAX_CMD; i++) { pinstance->cmd_list[i] = kmem_cache_alloc(pinstance->cmd_cachep, GFP_KERNEL); if (!pinstance->cmd_list[i]) { pmcraid_release_cmd_blocks(pinstance, i); return -ENOMEM; } } return 0; } /** * pmcraid_allocate_control_blocks - allocates memory control blocks * @pinstance : pointer to per adapter instance structure * * This function allocates PCI memory for DMAable buffers like IOARCB, IOADLs * and IOASAs. This is called after command blocks are already allocated. * * Return Value * 0 in case it can allocate all control blocks, otherwise -ENOMEM */ static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance) { int i; sprintf(pinstance->ctl_pool_name, "pmcraid_control_pool_%d", pinstance->host->unique_id); pinstance->control_pool = dma_pool_create(pinstance->ctl_pool_name, &pinstance->pdev->dev, sizeof(struct pmcraid_control_block), PMCRAID_IOARCB_ALIGNMENT, 0); if (!pinstance->control_pool) return -ENOMEM; for (i = 0; i < PMCRAID_MAX_CMD; i++) { pinstance->cmd_list[i]->ioa_cb = dma_pool_zalloc( pinstance->control_pool, GFP_KERNEL, &(pinstance->cmd_list[i]->ioa_cb_bus_addr)); if (!pinstance->cmd_list[i]->ioa_cb) { pmcraid_release_control_blocks(pinstance, i); return -ENOMEM; } } return 0; } /** * pmcraid_release_host_rrqs - release memory allocated for hrrq buffer(s) * @pinstance: pointer to per adapter instance structure * @maxindex: size of hrrq buffer pointer array * * Return Value * None */ static void pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex) { int i; for (i = 0; i < maxindex; i++) { dma_free_coherent(&pinstance->pdev->dev, HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD, pinstance->hrrq_start[i], pinstance->hrrq_start_bus_addr[i]); /* reset pointers and toggle bit to zeros */ pinstance->hrrq_start[i] = NULL; pinstance->hrrq_start_bus_addr[i] = 0; pinstance->host_toggle_bit[i] = 0; } } /** * pmcraid_allocate_host_rrqs - Allocate and initialize host RRQ buffers * @pinstance: pointer to per adapter instance structure * * Return value * 0 hrrq buffers are allocated, -ENOMEM otherwise. */ static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance) { int i, buffer_size; buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD; for (i = 0; i < pinstance->num_hrrq; i++) { pinstance->hrrq_start[i] = dma_alloc_coherent(&pinstance->pdev->dev, buffer_size, &pinstance->hrrq_start_bus_addr[i], GFP_KERNEL); if (!pinstance->hrrq_start[i]) { pmcraid_err("pci_alloc failed for hrrq vector : %d\n", i); pmcraid_release_host_rrqs(pinstance, i); return -ENOMEM; } pinstance->hrrq_curr[i] = pinstance->hrrq_start[i]; pinstance->hrrq_end[i] = pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1; pinstance->host_toggle_bit[i] = 1; spin_lock_init(&pinstance->hrrq_lock[i]); } return 0; } /** * pmcraid_release_hcams - release HCAM buffers * * @pinstance: pointer to per adapter instance structure * * Return value * none */ static void pmcraid_release_hcams(struct pmcraid_instance *pinstance) { if (pinstance->ccn.msg != NULL) { dma_free_coherent(&pinstance->pdev->dev, PMCRAID_AEN_HDR_SIZE + sizeof(struct pmcraid_hcam_ccn_ext), pinstance->ccn.msg, pinstance->ccn.baddr); pinstance->ccn.msg = NULL; pinstance->ccn.hcam = NULL; pinstance->ccn.baddr = 0; } if (pinstance->ldn.msg != NULL) { dma_free_coherent(&pinstance->pdev->dev, PMCRAID_AEN_HDR_SIZE + sizeof(struct pmcraid_hcam_ldn), pinstance->ldn.msg, pinstance->ldn.baddr); pinstance->ldn.msg = NULL; pinstance->ldn.hcam = NULL; pinstance->ldn.baddr = 0; } } /** * pmcraid_allocate_hcams - allocates HCAM buffers * @pinstance : pointer to per adapter instance structure * * Return Value: * 0 in case of successful allocation, non-zero otherwise */ static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance) { pinstance->ccn.msg = dma_alloc_coherent(&pinstance->pdev->dev, PMCRAID_AEN_HDR_SIZE + sizeof(struct pmcraid_hcam_ccn_ext), &pinstance->ccn.baddr, GFP_KERNEL); pinstance->ldn.msg = dma_alloc_coherent(&pinstance->pdev->dev, PMCRAID_AEN_HDR_SIZE + sizeof(struct pmcraid_hcam_ldn), &pinstance->ldn.baddr, GFP_KERNEL); if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) { pmcraid_release_hcams(pinstance); } else { pinstance->ccn.hcam = (void *)pinstance->ccn.msg + PMCRAID_AEN_HDR_SIZE; pinstance->ldn.hcam = (void *)pinstance->ldn.msg + PMCRAID_AEN_HDR_SIZE; atomic_set(&pinstance->ccn.ignore, 0); atomic_set(&pinstance->ldn.ignore, 0); } return (pinstance->ldn.msg == NULL) ? -ENOMEM : 0; } /** * pmcraid_release_config_buffers - release config.table buffers * @pinstance: pointer to per adapter instance structure * * Return Value * none */ static void pmcraid_release_config_buffers(struct pmcraid_instance *pinstance) { if (pinstance->cfg_table != NULL && pinstance->cfg_table_bus_addr != 0) { dma_free_coherent(&pinstance->pdev->dev, sizeof(struct pmcraid_config_table), pinstance->cfg_table, pinstance->cfg_table_bus_addr); pinstance->cfg_table = NULL; pinstance->cfg_table_bus_addr = 0; } if (pinstance->res_entries != NULL) { int i; for (i = 0; i < PMCRAID_MAX_RESOURCES; i++) list_del(&pinstance->res_entries[i].queue); kfree(pinstance->res_entries); pinstance->res_entries = NULL; } pmcraid_release_hcams(pinstance); } /** * pmcraid_allocate_config_buffers - allocates DMAable memory for config table * @pinstance : pointer to per adapter instance structure * * Return Value * 0 for successful allocation, -ENOMEM for any failure */ static int pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance) { int i; pinstance->res_entries = kcalloc(PMCRAID_MAX_RESOURCES, sizeof(struct pmcraid_resource_entry), GFP_KERNEL); if (NULL == pinstance->res_entries) { pmcraid_err("failed to allocate memory for resource table\n"); return -ENOMEM; } for (i = 0; i < PMCRAID_MAX_RESOURCES; i++) list_add_tail(&pinstance->res_entries[i].queue, &pinstance->free_res_q); pinstance->cfg_table = dma_alloc_coherent(&pinstance->pdev->dev, sizeof(struct pmcraid_config_table), &pinstance->cfg_table_bus_addr, GFP_KERNEL); if (NULL == pinstance->cfg_table) { pmcraid_err("couldn't alloc DMA memory for config table\n"); pmcraid_release_config_buffers(pinstance); return -ENOMEM; } if (pmcraid_allocate_hcams(pinstance)) { pmcraid_err("could not alloc DMA memory for HCAMS\n"); pmcraid_release_config_buffers(pinstance); return -ENOMEM; } return 0; } /** * pmcraid_init_tasklets - registers tasklets for response handling * * @pinstance: pointer adapter instance structure * * Return value * none */ static void pmcraid_init_tasklets(struct pmcraid_instance *pinstance) { int i; for (i = 0; i < pinstance->num_hrrq; i++) tasklet_init(&pinstance->isr_tasklet[i], pmcraid_tasklet_function, (unsigned long)&pinstance->hrrq_vector[i]); } /** * pmcraid_kill_tasklets - destroys tasklets registered for response handling * * @pinstance: pointer to adapter instance structure * * Return value * none */ static void pmcraid_kill_tasklets(struct pmcraid_instance *pinstance) { int i; for (i = 0; i < pinstance->num_hrrq; i++) tasklet_kill(&pinstance->isr_tasklet[i]); } /** * pmcraid_release_buffers - release per-adapter buffers allocated * * @pinstance: pointer to adapter soft state * * Return Value * none */ static void pmcraid_release_buffers(struct pmcraid_instance *pinstance) { pmcraid_release_config_buffers(pinstance); pmcraid_release_control_blocks(pinstance, PMCRAID_MAX_CMD); pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD); pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq); if (pinstance->inq_data != NULL) { dma_free_coherent(&pinstance->pdev->dev, sizeof(struct pmcraid_inquiry_data), pinstance->inq_data, pinstance->inq_data_baddr); pinstance->inq_data = NULL; pinstance->inq_data_baddr = 0; } if (pinstance->timestamp_data != NULL) { dma_free_coherent(&pinstance->pdev->dev, sizeof(struct pmcraid_timestamp_data), pinstance->timestamp_data, pinstance->timestamp_data_baddr); pinstance->timestamp_data = NULL; pinstance->timestamp_data_baddr = 0; } } /** * pmcraid_init_buffers - allocates memory and initializes various structures * @pinstance: pointer to per adapter instance structure * * This routine pre-allocates memory based on the type of block as below: * cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator, * IOARCBs(PMCRAID_MAX_CMD) : DMAable memory, using pci pool allocator * config-table entries : DMAable memory using dma_alloc_coherent * HostRRQs : DMAable memory, using dma_alloc_coherent * * Return Value * 0 in case all of the blocks are allocated, -ENOMEM otherwise. */ static int pmcraid_init_buffers(struct pmcraid_instance *pinstance) { int i; if (pmcraid_allocate_host_rrqs(pinstance)) { pmcraid_err("couldn't allocate memory for %d host rrqs\n", pinstance->num_hrrq); return -ENOMEM; } if (pmcraid_allocate_config_buffers(pinstance)) { pmcraid_err("couldn't allocate memory for config buffers\n"); pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq); return -ENOMEM; } if (pmcraid_allocate_cmd_blocks(pinstance)) { pmcraid_err("couldn't allocate memory for cmd blocks\n"); pmcraid_release_config_buffers(pinstance); pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq); return -ENOMEM; } if (pmcraid_allocate_control_blocks(pinstance)) { pmcraid_err("couldn't allocate memory control blocks\n"); pmcraid_release_config_buffers(pinstance); pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD); pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq); return -ENOMEM; } /* allocate DMAable memory for page D0 INQUIRY buffer */ pinstance->inq_data = dma_alloc_coherent(&pinstance->pdev->dev, sizeof(struct pmcraid_inquiry_data), &pinstance->inq_data_baddr, GFP_KERNEL); if (pinstance->inq_data == NULL) { pmcraid_err("couldn't allocate DMA memory for INQUIRY\n"); pmcraid_release_buffers(pinstance); return -ENOMEM; } /* allocate DMAable memory for set timestamp data buffer */ pinstance->timestamp_data = dma_alloc_coherent(&pinstance->pdev->dev, sizeof(struct pmcraid_timestamp_data), &pinstance->timestamp_data_baddr, GFP_KERNEL); if (pinstance->timestamp_data == NULL) { pmcraid_err("couldn't allocate DMA memory for \ set time_stamp \n"); pmcraid_release_buffers(pinstance); return -ENOMEM; } /* Initialize all the command blocks and add them to free pool. No * need to lock (free_pool_lock) as this is done in initialization * itself */ for (i = 0; i < PMCRAID_MAX_CMD; i++) { struct pmcraid_cmd *cmdp = pinstance->cmd_list[i]; pmcraid_init_cmdblk(cmdp, i); cmdp->drv_inst = pinstance; list_add_tail(&cmdp->free_list, &pinstance->free_cmd_pool); } return 0; } /** * pmcraid_reinit_buffers - resets various buffer pointers * @pinstance: pointer to adapter instance * Return value * none */ static void pmcraid_reinit_buffers(struct pmcraid_instance *pinstance) { int i; int buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD; for (i = 0; i < pinstance->num_hrrq; i++) { memset(pinstance->hrrq_start[i], 0, buffer_size); pinstance->hrrq_curr[i] = pinstance->hrrq_start[i]; pinstance->hrrq_end[i] = pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1; pinstance->host_toggle_bit[i] = 1; } } /** * pmcraid_init_instance - initialize per instance data structure * @pdev: pointer to pci device structure * @host: pointer to Scsi_Host structure * @mapped_pci_addr: memory mapped IOA configuration registers * * Return Value * 0 on success, non-zero in case of any failure */ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host, void __iomem *mapped_pci_addr) { struct pmcraid_instance *pinstance = (struct pmcraid_instance *)host->hostdata; pinstance->host = host; pinstance->pdev = pdev; /* Initialize register addresses */ pinstance->mapped_dma_addr = mapped_pci_addr; /* Initialize chip-specific details */ { struct pmcraid_chip_details *chip_cfg = pinstance->chip_cfg; struct pmcraid_interrupts *pint_regs = &pinstance->int_regs; pinstance->ioarrin = mapped_pci_addr + chip_cfg->ioarrin; pint_regs->ioa_host_interrupt_reg = mapped_pci_addr + chip_cfg->ioa_host_intr; pint_regs->ioa_host_interrupt_clr_reg = mapped_pci_addr + chip_cfg->ioa_host_intr_clr; pint_regs->ioa_host_msix_interrupt_reg = mapped_pci_addr + chip_cfg->ioa_host_msix_intr; pint_regs->host_ioa_interrupt_reg = mapped_pci_addr + chip_cfg->host_ioa_intr; pint_regs->host_ioa_interrupt_clr_reg = mapped_pci_addr + chip_cfg->host_ioa_intr_clr; /* Current version of firmware exposes interrupt mask set * and mask clr registers through memory mapped bar0. */ pinstance->mailbox = mapped_pci_addr + chip_cfg->mailbox; pinstance->ioa_status = mapped_pci_addr + chip_cfg->ioastatus; pint_regs->ioa_host_interrupt_mask_reg = mapped_pci_addr + chip_cfg->ioa_host_mask; pint_regs->ioa_host_interrupt_mask_clr_reg = mapped_pci_addr + chip_cfg->ioa_host_mask_clr; pint_regs->global_interrupt_mask_reg = mapped_pci_addr + chip_cfg->global_intr_mask; } pinstance->ioa_reset_attempts = 0; init_waitqueue_head(&pinstance->reset_wait_q); atomic_set(&pinstance->outstanding_cmds, 0); atomic_set(&pinstance->last_message_id, 0); atomic_set(&pinstance->expose_resources, 0); INIT_LIST_HEAD(&pinstance->free_res_q); INIT_LIST_HEAD(&pinstance->used_res_q); INIT_LIST_HEAD(&pinstance->free_cmd_pool); INIT_LIST_HEAD(&pinstance->pending_cmd_pool); spin_lock_init(&pinstance->free_pool_lock); spin_lock_init(&pinstance->pending_pool_lock); spin_lock_init(&pinstance->resource_lock); mutex_init(&pinstance->aen_queue_lock); /* Work-queue (Shared) for deferred processing error handling */ INIT_WORK(&pinstance->worker_q, pmcraid_worker_function); /* Initialize the default log_level */ pinstance->current_log_level = pmcraid_log_level; /* Setup variables required for reset engine */ pinstance->ioa_state = IOA_STATE_UNKNOWN; pinstance->reset_cmd = NULL; return 0; } /** * pmcraid_shutdown - shutdown adapter controller. * @pdev: pci device struct * * Issues an adapter shutdown to the card waits for its completion * * Return value * none */ static void pmcraid_shutdown(struct pci_dev *pdev) { struct pmcraid_instance *pinstance = pci_get_drvdata(pdev); pmcraid_reset_bringdown(pinstance); } /* * pmcraid_get_minor - returns unused minor number from minor number bitmap */ static unsigned short pmcraid_get_minor(void) { int minor; minor = find_first_zero_bit(pmcraid_minor, PMCRAID_MAX_ADAPTERS); __set_bit(minor, pmcraid_minor); return minor; } /* * pmcraid_release_minor - releases given minor back to minor number bitmap */ static void pmcraid_release_minor(unsigned short minor) { __clear_bit(minor, pmcraid_minor); } /** * pmcraid_setup_chrdev - allocates a minor number and registers a char device * * @pinstance: pointer to adapter instance for which to register device * * Return value * 0 in case of success, otherwise non-zero */ static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance) { int minor; int error; minor = pmcraid_get_minor(); cdev_init(&pinstance->cdev, &pmcraid_fops); pinstance->cdev.owner = THIS_MODULE; error = cdev_add(&pinstance->cdev, MKDEV(pmcraid_major, minor), 1); if (error) pmcraid_release_minor(minor); else device_create(pmcraid_class, NULL, MKDEV(pmcraid_major, minor), NULL, "%s%u", PMCRAID_DEVFILE, minor); return error; } /** * pmcraid_release_chrdev - unregisters per-adapter management interface * * @pinstance: pointer to adapter instance structure * * Return value * none */ static void pmcraid_release_chrdev(struct pmcraid_instance *pinstance) { pmcraid_release_minor(MINOR(pinstance->cdev.dev)); device_destroy(pmcraid_class, MKDEV(pmcraid_major, MINOR(pinstance->cdev.dev))); cdev_del(&pinstance->cdev); } /** * pmcraid_remove - IOA hot plug remove entry point * @pdev: pci device struct * * Return value * none */ static void pmcraid_remove(struct pci_dev *pdev) { struct pmcraid_instance *pinstance = pci_get_drvdata(pdev); /* remove the management interface (/dev file) for this device */ pmcraid_release_chrdev(pinstance); /* remove host template from scsi midlayer */ scsi_remove_host(pinstance->host); /* block requests from mid-layer */ scsi_block_requests(pinstance->host); /* initiate shutdown adapter */ pmcraid_shutdown(pdev); pmcraid_disable_interrupts(pinstance, ~0); flush_work(&pinstance->worker_q); pmcraid_kill_tasklets(pinstance); pmcraid_unregister_interrupt_handler(pinstance); pmcraid_release_buffers(pinstance); iounmap(pinstance->mapped_dma_addr); pci_release_regions(pdev); scsi_host_put(pinstance->host); pci_disable_device(pdev); return; } /** * pmcraid_suspend - driver suspend entry point for power management * @dev: Device structure * * Return Value - 0 always */ static int __maybe_unused pmcraid_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct pmcraid_instance *pinstance = pci_get_drvdata(pdev); pmcraid_shutdown(pdev); pmcraid_disable_interrupts(pinstance, ~0); pmcraid_kill_tasklets(pinstance); pmcraid_unregister_interrupt_handler(pinstance); return 0; } /** * pmcraid_resume - driver resume entry point PCI power management * @dev: Device structure * * Return Value - 0 in case of success. Error code in case of any failure */ static int __maybe_unused pmcraid_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct pmcraid_instance *pinstance = pci_get_drvdata(pdev); struct Scsi_Host *host = pinstance->host; int rc = 0; if (sizeof(dma_addr_t) == 4 || dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (rc == 0) rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (rc != 0) { dev_err(&pdev->dev, "resume: Failed to set PCI DMA mask\n"); goto disable_device; } pmcraid_disable_interrupts(pinstance, ~0); atomic_set(&pinstance->outstanding_cmds, 0); rc = pmcraid_register_interrupt_handler(pinstance); if (rc) { dev_err(&pdev->dev, "resume: couldn't register interrupt handlers\n"); rc = -ENODEV; goto release_host; } pmcraid_init_tasklets(pinstance); pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS); /* Start with hard reset sequence which brings up IOA to operational * state as well as completes the reset sequence. */ pinstance->ioa_hard_reset = 1; /* Start IOA firmware initialization and bring card to Operational * state. */ if (pmcraid_reset_bringup(pinstance)) { dev_err(&pdev->dev, "couldn't initialize IOA\n"); rc = -ENODEV; goto release_tasklets; } return 0; release_tasklets: pmcraid_disable_interrupts(pinstance, ~0); pmcraid_kill_tasklets(pinstance); pmcraid_unregister_interrupt_handler(pinstance); release_host: scsi_host_put(host); disable_device: return rc; } /** * pmcraid_complete_ioa_reset - Called by either timer or tasklet during * completion of the ioa reset * @cmd: pointer to reset command block */ static void pmcraid_complete_ioa_reset(struct pmcraid_cmd *cmd) { struct pmcraid_instance *pinstance = cmd->drv_inst; unsigned long flags; spin_lock_irqsave(pinstance->host->host_lock, flags); pmcraid_ioa_reset(cmd); spin_unlock_irqrestore(pinstance->host->host_lock, flags); scsi_unblock_requests(pinstance->host); schedule_work(&pinstance->worker_q); } /** * pmcraid_set_supported_devs - sends SET SUPPORTED DEVICES to IOAFP * * @cmd: pointer to pmcraid_cmd structure * * Return Value * 0 for success or non-zero for failure cases */ static void pmcraid_set_supported_devs(struct pmcraid_cmd *cmd) { struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; void (*cmd_done) (struct pmcraid_cmd *) = pmcraid_complete_ioa_reset; pmcraid_reinit_cmdblk(cmd); ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); ioarcb->request_type = REQ_TYPE_IOACMD; ioarcb->cdb[0] = PMCRAID_SET_SUPPORTED_DEVICES; ioarcb->cdb[1] = ALL_DEVICES_SUPPORTED; /* If this was called as part of resource table reinitialization due to * lost CCN, it is enough to return the command block back to free pool * as part of set_supported_devs completion function. */ if (cmd->drv_inst->reinit_cfg_table) { cmd->drv_inst->reinit_cfg_table = 0; cmd->release = 1; cmd_done = pmcraid_reinit_cfgtable_done; } /* we will be done with the reset sequence after set supported devices, * setup the done function to return the command block back to free * pool */ pmcraid_send_cmd(cmd, cmd_done, PMCRAID_SET_SUP_DEV_TIMEOUT, pmcraid_timeout_handler); return; } /** * pmcraid_set_timestamp - set the timestamp to IOAFP * * @cmd: pointer to pmcraid_cmd structure * * Return Value * 0 for success or non-zero for failure cases */ static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd) { struct pmcraid_instance *pinstance = cmd->drv_inst; struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; __be32 time_stamp_len = cpu_to_be32(PMCRAID_TIMESTAMP_LEN); struct pmcraid_ioadl_desc *ioadl; u64 timestamp; timestamp = ktime_get_real_seconds() * 1000; pinstance->timestamp_data->timestamp[0] = (__u8)(timestamp); pinstance->timestamp_data->timestamp[1] = (__u8)((timestamp) >> 8); pinstance->timestamp_data->timestamp[2] = (__u8)((timestamp) >> 16); pinstance->timestamp_data->timestamp[3] = (__u8)((timestamp) >> 24); pinstance->timestamp_data->timestamp[4] = (__u8)((timestamp) >> 32); pinstance->timestamp_data->timestamp[5] = (__u8)((timestamp) >> 40); pmcraid_reinit_cmdblk(cmd); ioarcb->request_type = REQ_TYPE_SCSI; ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); ioarcb->cdb[0] = PMCRAID_SCSI_SET_TIMESTAMP; ioarcb->cdb[1] = PMCRAID_SCSI_SERVICE_ACTION; memcpy(&(ioarcb->cdb[6]), &time_stamp_len, sizeof(time_stamp_len)); ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) + offsetof(struct pmcraid_ioarcb, add_data.u.ioadl[0])); ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc)); ioarcb->ioarcb_bus_addr &= cpu_to_le64(~(0x1FULL)); ioarcb->request_flags0 |= NO_LINK_DESCS; ioarcb->request_flags0 |= TRANSFER_DIR_WRITE; ioarcb->data_transfer_length = cpu_to_le32(sizeof(struct pmcraid_timestamp_data)); ioadl = &(ioarcb->add_data.u.ioadl[0]); ioadl->flags = IOADL_FLAGS_LAST_DESC; ioadl->address = cpu_to_le64(pinstance->timestamp_data_baddr); ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_timestamp_data)); if (!pinstance->timestamp_error) { pinstance->timestamp_error = 0; pmcraid_send_cmd(cmd, pmcraid_set_supported_devs, PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler); } else { pmcraid_send_cmd(cmd, pmcraid_return_cmd, PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler); return; } } /** * pmcraid_init_res_table - Initialize the resource table * @cmd: pointer to pmcraid command struct * * This function looks through the existing resource table, comparing * it with the config table. This function will take care of old/new * devices and schedule adding/removing them from the mid-layer * as appropriate. * * Return value * None */ static void pmcraid_init_res_table(struct pmcraid_cmd *cmd) { struct pmcraid_instance *pinstance = cmd->drv_inst; struct pmcraid_resource_entry *res, *temp; struct pmcraid_config_table_entry *cfgte; unsigned long lock_flags; int found, rc, i; u16 fw_version; LIST_HEAD(old_res); if (pinstance->cfg_table->flags & MICROCODE_UPDATE_REQUIRED) pmcraid_err("IOA requires microcode download\n"); fw_version = be16_to_cpu(pinstance->inq_data->fw_version); /* resource list is protected by pinstance->resource_lock. * init_res_table can be called from probe (user-thread) or runtime * reset (timer/tasklet) */ spin_lock_irqsave(&pinstance->resource_lock, lock_flags); list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) list_move_tail(&res->queue, &old_res); for (i = 0; i < le16_to_cpu(pinstance->cfg_table->num_entries); i++) { if (be16_to_cpu(pinstance->inq_data->fw_version) <= PMCRAID_FW_VERSION_1) cfgte = &pinstance->cfg_table->entries[i]; else cfgte = (struct pmcraid_config_table_entry *) &pinstance->cfg_table->entries_ext[i]; if (!pmcraid_expose_resource(fw_version, cfgte)) continue; found = 0; /* If this entry was already detected and initialized */ list_for_each_entry_safe(res, temp, &old_res, queue) { rc = memcmp(&res->cfg_entry.resource_address, &cfgte->resource_address, sizeof(cfgte->resource_address)); if (!rc) { list_move_tail(&res->queue, &pinstance->used_res_q); found = 1; break; } } /* If this is new entry, initialize it and add it the queue */ if (!found) { if (list_empty(&pinstance->free_res_q)) { pmcraid_err("Too many devices attached\n"); break; } found = 1; res = list_entry(pinstance->free_res_q.next, struct pmcraid_resource_entry, queue); res->scsi_dev = NULL; res->change_detected = RES_CHANGE_ADD; res->reset_progress = 0; list_move_tail(&res->queue, &pinstance->used_res_q); } /* copy new configuration table entry details into driver * maintained resource entry */ if (found) { memcpy(&res->cfg_entry, cfgte, pinstance->config_table_entry_size); pmcraid_info("New res type:%x, vset:%x, addr:%x:\n", res->cfg_entry.resource_type, (fw_version <= PMCRAID_FW_VERSION_1 ? res->cfg_entry.unique_flags1 : le16_to_cpu(res->cfg_entry.array_id) & 0xFF), le32_to_cpu(res->cfg_entry.resource_address)); } } /* Detect any deleted entries, mark them for deletion from mid-layer */ list_for_each_entry_safe(res, temp, &old_res, queue) { if (res->scsi_dev) { res->change_detected = RES_CHANGE_DEL; res->cfg_entry.resource_handle = PMCRAID_INVALID_RES_HANDLE; list_move_tail(&res->queue, &pinstance->used_res_q); } else { list_move_tail(&res->queue, &pinstance->free_res_q); } } /* release the resource list lock */ spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); pmcraid_set_timestamp(cmd); } /** * pmcraid_querycfg - Send a Query IOA Config to the adapter. * @cmd: pointer pmcraid_cmd struct * * This function sends a Query IOA Configuration command to the adapter to * retrieve the IOA configuration table. * * Return value: * none */ static void pmcraid_querycfg(struct pmcraid_cmd *cmd) { struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; struct pmcraid_ioadl_desc *ioadl; struct pmcraid_instance *pinstance = cmd->drv_inst; __be32 cfg_table_size = cpu_to_be32(sizeof(struct pmcraid_config_table)); if (be16_to_cpu(pinstance->inq_data->fw_version) <= PMCRAID_FW_VERSION_1) pinstance->config_table_entry_size = sizeof(struct pmcraid_config_table_entry); else pinstance->config_table_entry_size = sizeof(struct pmcraid_config_table_entry_ext); ioarcb->request_type = REQ_TYPE_IOACMD; ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); ioarcb->cdb[0] = PMCRAID_QUERY_IOA_CONFIG; /* firmware requires 4-byte length field, specified in B.E format */ memcpy(&(ioarcb->cdb[10]), &cfg_table_size, sizeof(cfg_table_size)); /* Since entire config table can be described by single IOADL, it can * be part of IOARCB itself */ ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) + offsetof(struct pmcraid_ioarcb, add_data.u.ioadl[0])); ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc)); ioarcb->ioarcb_bus_addr &= cpu_to_le64(~0x1FULL); ioarcb->request_flags0 |= NO_LINK_DESCS; ioarcb->data_transfer_length = cpu_to_le32(sizeof(struct pmcraid_config_table)); ioadl = &(ioarcb->add_data.u.ioadl[0]); ioadl->flags = IOADL_FLAGS_LAST_DESC; ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr); ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table)); pmcraid_send_cmd(cmd, pmcraid_init_res_table, PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler); } /** * pmcraid_probe - PCI probe entry pointer for PMC MaxRAID controller driver * @pdev: pointer to pci device structure * @dev_id: pointer to device ids structure * * Return Value * returns 0 if the device is claimed and successfully configured. * returns non-zero error code in case of any failure */ static int pmcraid_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) { struct pmcraid_instance *pinstance; struct Scsi_Host *host; void __iomem *mapped_pci_addr; int rc = PCIBIOS_SUCCESSFUL; if (atomic_read(&pmcraid_adapter_count) >= PMCRAID_MAX_ADAPTERS) { pmcraid_err ("maximum number(%d) of supported adapters reached\n", atomic_read(&pmcraid_adapter_count)); return -ENOMEM; } atomic_inc(&pmcraid_adapter_count); rc = pci_enable_device(pdev); if (rc) { dev_err(&pdev->dev, "Cannot enable adapter\n"); atomic_dec(&pmcraid_adapter_count); return rc; } dev_info(&pdev->dev, "Found new IOA(%x:%x), Total IOA count: %d\n", pdev->vendor, pdev->device, atomic_read(&pmcraid_adapter_count)); rc = pci_request_regions(pdev, PMCRAID_DRIVER_NAME); if (rc < 0) { dev_err(&pdev->dev, "Couldn't register memory range of registers\n"); goto out_disable_device; } mapped_pci_addr = pci_iomap(pdev, 0, 0); if (!mapped_pci_addr) { dev_err(&pdev->dev, "Couldn't map PCI registers memory\n"); rc = -ENOMEM; goto out_release_regions; } pci_set_master(pdev); /* Firmware requires the system bus address of IOARCB to be within * 32-bit addressable range though it has 64-bit IOARRIN register. * However, firmware supports 64-bit streaming DMA buffers, whereas * coherent buffers are to be 32-bit. Since dma_alloc_coherent always * returns memory within 4GB (if not, change this logic), coherent * buffers are within firmware acceptable address ranges. */ if (sizeof(dma_addr_t) == 4 || dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); /* firmware expects 32-bit DMA addresses for IOARRIN register; set 32 * bit mask for dma_alloc_coherent to return addresses within 4GB */ if (rc == 0) rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (rc != 0) { dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); goto cleanup_nomem; } host = scsi_host_alloc(&pmcraid_host_template, sizeof(struct pmcraid_instance)); if (!host) { dev_err(&pdev->dev, "scsi_host_alloc failed!\n"); rc = -ENOMEM; goto cleanup_nomem; } host->max_id = PMCRAID_MAX_NUM_TARGETS_PER_BUS; host->max_lun = PMCRAID_MAX_NUM_LUNS_PER_TARGET; host->unique_id = host->host_no; host->max_channel = PMCRAID_MAX_BUS_TO_SCAN; host->max_cmd_len = PMCRAID_MAX_CDB_LEN; /* zero out entire instance structure */ pinstance = (struct pmcraid_instance *)host->hostdata; memset(pinstance, 0, sizeof(*pinstance)); pinstance->chip_cfg = (struct pmcraid_chip_details *)(dev_id->driver_data); rc = pmcraid_init_instance(pdev, host, mapped_pci_addr); if (rc < 0) { dev_err(&pdev->dev, "failed to initialize adapter instance\n"); goto out_scsi_host_put; } pci_set_drvdata(pdev, pinstance); /* Save PCI config-space for use following the reset */ rc = pci_save_state(pinstance->pdev); if (rc != 0) { dev_err(&pdev->dev, "Failed to save PCI config space\n"); goto out_scsi_host_put; } pmcraid_disable_interrupts(pinstance, ~0); rc = pmcraid_register_interrupt_handler(pinstance); if (rc) { dev_err(&pdev->dev, "couldn't register interrupt handler\n"); goto out_scsi_host_put; } pmcraid_init_tasklets(pinstance); /* allocate verious buffers used by LLD.*/ rc = pmcraid_init_buffers(pinstance); if (rc) { pmcraid_err("couldn't allocate memory blocks\n"); goto out_unregister_isr; } /* check the reset type required */ pmcraid_reset_type(pinstance); pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS); /* Start IOA firmware initialization and bring card to Operational * state. */ pmcraid_info("starting IOA initialization sequence\n"); if (pmcraid_reset_bringup(pinstance)) { dev_err(&pdev->dev, "couldn't initialize IOA\n"); rc = 1; goto out_release_bufs; } /* Add adapter instance into mid-layer list */ rc = scsi_add_host(pinstance->host, &pdev->dev); if (rc != 0) { pmcraid_err("couldn't add host into mid-layer: %d\n", rc); goto out_release_bufs; } scsi_scan_host(pinstance->host); rc = pmcraid_setup_chrdev(pinstance); if (rc != 0) { pmcraid_err("couldn't create mgmt interface, error: %x\n", rc); goto out_remove_host; } /* Schedule worker thread to handle CCN and take care of adding and * removing devices to OS */ atomic_set(&pinstance->expose_resources, 1); schedule_work(&pinstance->worker_q); return rc; out_remove_host: scsi_remove_host(host); out_release_bufs: pmcraid_release_buffers(pinstance); out_unregister_isr: pmcraid_kill_tasklets(pinstance); pmcraid_unregister_interrupt_handler(pinstance); out_scsi_host_put: scsi_host_put(host); cleanup_nomem: iounmap(mapped_pci_addr); out_release_regions: pci_release_regions(pdev); out_disable_device: atomic_dec(&pmcraid_adapter_count); pci_disable_device(pdev); return -ENODEV; } static SIMPLE_DEV_PM_OPS(pmcraid_pm_ops, pmcraid_suspend, pmcraid_resume); /* * PCI driver structure of pmcraid driver */ static struct pci_driver pmcraid_driver = { .name = PMCRAID_DRIVER_NAME, .id_table = pmcraid_pci_table, .probe = pmcraid_probe, .remove = pmcraid_remove, .driver.pm = &pmcraid_pm_ops, .shutdown = pmcraid_shutdown }; /** * pmcraid_init - module load entry point */ static int __init pmcraid_init(void) { dev_t dev; int error; pmcraid_info("%s Device Driver version: %s\n", PMCRAID_DRIVER_NAME, PMCRAID_DRIVER_VERSION); error = alloc_chrdev_region(&dev, 0, PMCRAID_MAX_ADAPTERS, PMCRAID_DEVFILE); if (error) { pmcraid_err("failed to get a major number for adapters\n"); goto out_init; } pmcraid_major = MAJOR(dev); pmcraid_class = class_create(PMCRAID_DEVFILE); if (IS_ERR(pmcraid_class)) { error = PTR_ERR(pmcraid_class); pmcraid_err("failed to register with sysfs, error = %x\n", error); goto out_unreg_chrdev; } error = pmcraid_netlink_init(); if (error) { class_destroy(pmcraid_class); goto out_unreg_chrdev; } error = pci_register_driver(&pmcraid_driver); if (error == 0) goto out_init; pmcraid_err("failed to register pmcraid driver, error = %x\n", error); class_destroy(pmcraid_class); pmcraid_netlink_release(); out_unreg_chrdev: unregister_chrdev_region(MKDEV(pmcraid_major, 0), PMCRAID_MAX_ADAPTERS); out_init: return error; } /** * pmcraid_exit - module unload entry point */ static void __exit pmcraid_exit(void) { pmcraid_netlink_release(); unregister_chrdev_region(MKDEV(pmcraid_major, 0), PMCRAID_MAX_ADAPTERS); pci_unregister_driver(&pmcraid_driver); class_destroy(pmcraid_class); } module_init(pmcraid_init); module_exit(pmcraid_exit);
linux-master
drivers/scsi/pmcraid.c
// SPDX-License-Identifier: GPL-2.0-only /* * PS3 BD/DVD/CD-ROM Storage Driver * * Copyright (C) 2007 Sony Computer Entertainment Inc. * Copyright 2007 Sony Corp. */ #include <linux/cdrom.h> #include <linux/highmem.h> #include <linux/module.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_eh.h> #include <asm/lv1call.h> #include <asm/ps3stor.h> #define DEVICE_NAME "ps3rom" #define BOUNCE_SIZE (64*1024) #define PS3ROM_MAX_SECTORS (BOUNCE_SIZE >> 9) struct ps3rom_private { struct ps3_storage_device *dev; struct scsi_cmnd *curr_cmd; }; #define LV1_STORAGE_SEND_ATAPI_COMMAND (1) struct lv1_atapi_cmnd_block { u8 pkt[32]; /* packet command block */ u32 pktlen; /* should be 12 for ATAPI 8020 */ u32 blocks; u32 block_size; u32 proto; /* transfer mode */ u32 in_out; /* transfer direction */ u64 buffer; /* parameter except command block */ u32 arglen; /* length above */ }; enum lv1_atapi_proto { NON_DATA_PROTO = 0, PIO_DATA_IN_PROTO = 1, PIO_DATA_OUT_PROTO = 2, DMA_PROTO = 3 }; enum lv1_atapi_in_out { DIR_WRITE = 0, /* memory -> device */ DIR_READ = 1 /* device -> memory */ }; static int ps3rom_slave_configure(struct scsi_device *scsi_dev) { struct ps3rom_private *priv = shost_priv(scsi_dev->host); struct ps3_storage_device *dev = priv->dev; dev_dbg(&dev->sbd.core, "%s:%u: id %u, lun %llu, channel %u\n", __func__, __LINE__, scsi_dev->id, scsi_dev->lun, scsi_dev->channel); /* * ATAPI SFF8020 devices use MODE_SENSE_10, * so we can prohibit MODE_SENSE_6 */ scsi_dev->use_10_for_ms = 1; /* we don't support {READ,WRITE}_6 */ scsi_dev->use_10_for_rw = 1; return 0; } static int ps3rom_atapi_request(struct ps3_storage_device *dev, struct scsi_cmnd *cmd) { struct lv1_atapi_cmnd_block atapi_cmnd; unsigned char opcode = cmd->cmnd[0]; int res; u64 lpar; dev_dbg(&dev->sbd.core, "%s:%u: send ATAPI command 0x%02x\n", __func__, __LINE__, opcode); memset(&atapi_cmnd, 0, sizeof(struct lv1_atapi_cmnd_block)); memcpy(&atapi_cmnd.pkt, cmd->cmnd, 12); atapi_cmnd.pktlen = 12; atapi_cmnd.block_size = 1; /* transfer size is block_size * blocks */ atapi_cmnd.blocks = atapi_cmnd.arglen = scsi_bufflen(cmd); atapi_cmnd.buffer = dev->bounce_lpar; switch (cmd->sc_data_direction) { case DMA_FROM_DEVICE: if (scsi_bufflen(cmd) >= CD_FRAMESIZE) atapi_cmnd.proto = DMA_PROTO; else atapi_cmnd.proto = PIO_DATA_IN_PROTO; atapi_cmnd.in_out = DIR_READ; break; case DMA_TO_DEVICE: if (scsi_bufflen(cmd) >= CD_FRAMESIZE) atapi_cmnd.proto = DMA_PROTO; else atapi_cmnd.proto = PIO_DATA_OUT_PROTO; atapi_cmnd.in_out = DIR_WRITE; scsi_sg_copy_to_buffer(cmd, dev->bounce_buf, dev->bounce_size); break; default: atapi_cmnd.proto = NON_DATA_PROTO; break; } lpar = ps3_mm_phys_to_lpar(__pa(&atapi_cmnd)); res = lv1_storage_send_device_command(dev->sbd.dev_id, LV1_STORAGE_SEND_ATAPI_COMMAND, lpar, sizeof(atapi_cmnd), atapi_cmnd.buffer, atapi_cmnd.arglen, &dev->tag); if (res == LV1_DENIED_BY_POLICY) { dev_dbg(&dev->sbd.core, "%s:%u: ATAPI command 0x%02x denied by policy\n", __func__, __LINE__, opcode); return DID_ERROR << 16; } if (res) { dev_err(&dev->sbd.core, "%s:%u: ATAPI command 0x%02x failed %d\n", __func__, __LINE__, opcode, res); return DID_ERROR << 16; } return 0; } static inline unsigned int srb10_lba(const struct scsi_cmnd *cmd) { return cmd->cmnd[2] << 24 | cmd->cmnd[3] << 16 | cmd->cmnd[4] << 8 | cmd->cmnd[5]; } static inline unsigned int srb10_len(const struct scsi_cmnd *cmd) { return cmd->cmnd[7] << 8 | cmd->cmnd[8]; } static int ps3rom_read_request(struct ps3_storage_device *dev, struct scsi_cmnd *cmd, u32 start_sector, u32 sectors) { int res; dev_dbg(&dev->sbd.core, "%s:%u: read %u sectors starting at %u\n", __func__, __LINE__, sectors, start_sector); res = lv1_storage_read(dev->sbd.dev_id, dev->regions[dev->region_idx].id, start_sector, sectors, 0, dev->bounce_lpar, &dev->tag); if (res) { dev_err(&dev->sbd.core, "%s:%u: read failed %d\n", __func__, __LINE__, res); return DID_ERROR << 16; } return 0; } static int ps3rom_write_request(struct ps3_storage_device *dev, struct scsi_cmnd *cmd, u32 start_sector, u32 sectors) { int res; dev_dbg(&dev->sbd.core, "%s:%u: write %u sectors starting at %u\n", __func__, __LINE__, sectors, start_sector); scsi_sg_copy_to_buffer(cmd, dev->bounce_buf, dev->bounce_size); res = lv1_storage_write(dev->sbd.dev_id, dev->regions[dev->region_idx].id, start_sector, sectors, 0, dev->bounce_lpar, &dev->tag); if (res) { dev_err(&dev->sbd.core, "%s:%u: write failed %d\n", __func__, __LINE__, res); return DID_ERROR << 16; } return 0; } static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd) { struct ps3rom_private *priv = shost_priv(cmd->device->host); struct ps3_storage_device *dev = priv->dev; unsigned char opcode; int res; priv->curr_cmd = cmd; opcode = cmd->cmnd[0]; /* * While we can submit READ/WRITE SCSI commands as ATAPI commands, * it's recommended for various reasons (performance, error handling, * ...) to use lv1_storage_{read,write}() instead */ switch (opcode) { case READ_10: res = ps3rom_read_request(dev, cmd, srb10_lba(cmd), srb10_len(cmd)); break; case WRITE_10: res = ps3rom_write_request(dev, cmd, srb10_lba(cmd), srb10_len(cmd)); break; default: res = ps3rom_atapi_request(dev, cmd); break; } if (res) { scsi_build_sense(cmd, 0, ILLEGAL_REQUEST, 0, 0); cmd->result = res; priv->curr_cmd = NULL; scsi_done(cmd); } return 0; } static DEF_SCSI_QCMD(ps3rom_queuecommand) static int decode_lv1_status(u64 status, unsigned char *sense_key, unsigned char *asc, unsigned char *ascq) { if (((status >> 24) & 0xff) != SAM_STAT_CHECK_CONDITION) return -1; *sense_key = (status >> 16) & 0xff; *asc = (status >> 8) & 0xff; *ascq = status & 0xff; return 0; } static irqreturn_t ps3rom_interrupt(int irq, void *data) { struct ps3_storage_device *dev = data; struct Scsi_Host *host; struct ps3rom_private *priv; struct scsi_cmnd *cmd; int res; u64 tag, status; unsigned char sense_key, asc, ascq; res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status); /* * status = -1 may mean that ATAPI transport completed OK, but * ATAPI command itself resulted CHECK CONDITION * so, upper layer should issue REQUEST_SENSE to check the sense data */ if (tag != dev->tag) dev_err(&dev->sbd.core, "%s:%u: tag mismatch, got %llx, expected %llx\n", __func__, __LINE__, tag, dev->tag); if (res) { dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n", __func__, __LINE__, res, status); return IRQ_HANDLED; } host = ps3_system_bus_get_drvdata(&dev->sbd); priv = shost_priv(host); cmd = priv->curr_cmd; if (!status) { /* OK, completed */ if (cmd->sc_data_direction == DMA_FROM_DEVICE) { int len; len = scsi_sg_copy_from_buffer(cmd, dev->bounce_buf, dev->bounce_size); scsi_set_resid(cmd, scsi_bufflen(cmd) - len); } cmd->result = DID_OK << 16; goto done; } if (cmd->cmnd[0] == REQUEST_SENSE) { /* SCSI spec says request sense should never get error */ dev_err(&dev->sbd.core, "%s:%u: end error without autosense\n", __func__, __LINE__); cmd->result = DID_ERROR << 16 | SAM_STAT_CHECK_CONDITION; goto done; } if (decode_lv1_status(status, &sense_key, &asc, &ascq)) { cmd->result = DID_ERROR << 16; goto done; } scsi_build_sense(cmd, 0, sense_key, asc, ascq); done: priv->curr_cmd = NULL; scsi_done(cmd); return IRQ_HANDLED; } static const struct scsi_host_template ps3rom_host_template = { .name = DEVICE_NAME, .slave_configure = ps3rom_slave_configure, .queuecommand = ps3rom_queuecommand, .can_queue = 1, .this_id = 7, .sg_tablesize = SG_ALL, .emulated = 1, /* only sg driver uses this */ .max_sectors = PS3ROM_MAX_SECTORS, .module = THIS_MODULE, }; static int ps3rom_probe(struct ps3_system_bus_device *_dev) { struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); int error; struct Scsi_Host *host; struct ps3rom_private *priv; if (dev->blk_size != CD_FRAMESIZE) { dev_err(&dev->sbd.core, "%s:%u: cannot handle block size %llu\n", __func__, __LINE__, dev->blk_size); return -EINVAL; } dev->bounce_size = BOUNCE_SIZE; dev->bounce_buf = kmalloc(BOUNCE_SIZE, GFP_DMA); if (!dev->bounce_buf) return -ENOMEM; error = ps3stor_setup(dev, ps3rom_interrupt); if (error) goto fail_free_bounce; host = scsi_host_alloc(&ps3rom_host_template, sizeof(struct ps3rom_private)); if (!host) { dev_err(&dev->sbd.core, "%s:%u: scsi_host_alloc failed\n", __func__, __LINE__); error = -ENOMEM; goto fail_teardown; } priv = shost_priv(host); ps3_system_bus_set_drvdata(&dev->sbd, host); priv->dev = dev; /* One device/LUN per SCSI bus */ host->max_id = 1; host->max_lun = 1; error = scsi_add_host(host, &dev->sbd.core); if (error) { dev_err(&dev->sbd.core, "%s:%u: scsi_host_alloc failed %d\n", __func__, __LINE__, error); error = -ENODEV; goto fail_host_put; } scsi_scan_host(host); return 0; fail_host_put: scsi_host_put(host); ps3_system_bus_set_drvdata(&dev->sbd, NULL); fail_teardown: ps3stor_teardown(dev); fail_free_bounce: kfree(dev->bounce_buf); return error; } static void ps3rom_remove(struct ps3_system_bus_device *_dev) { struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); struct Scsi_Host *host = ps3_system_bus_get_drvdata(&dev->sbd); scsi_remove_host(host); ps3stor_teardown(dev); scsi_host_put(host); ps3_system_bus_set_drvdata(&dev->sbd, NULL); kfree(dev->bounce_buf); } static struct ps3_system_bus_driver ps3rom = { .match_id = PS3_MATCH_ID_STOR_ROM, .core.name = DEVICE_NAME, .core.owner = THIS_MODULE, .probe = ps3rom_probe, .remove = ps3rom_remove }; static int __init ps3rom_init(void) { return ps3_system_bus_driver_register(&ps3rom); } static void __exit ps3rom_exit(void) { ps3_system_bus_driver_unregister(&ps3rom); } module_init(ps3rom_init); module_exit(ps3rom_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PS3 BD/DVD/CD-ROM Storage Driver"); MODULE_AUTHOR("Sony Corporation"); MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_ROM);
linux-master
drivers/scsi/ps3rom.c
/* FlashPoint.c -- FlashPoint SCCB Manager for Linux This file contains the FlashPoint SCCB Manager from BusLogic's FlashPoint Driver Developer's Kit, with minor modifications by Leonard N. Zubkoff for Linux compatibility. It was provided by BusLogic in the form of 16 separate source files, which would have unnecessarily cluttered the scsi directory, so the individual files have been combined into this single file. Copyright 1995-1996 by Mylex Corporation. All Rights Reserved This file is available under both the GNU General Public License and a BSD-style copyright; see LICENSE.FlashPoint for details. */ #ifdef CONFIG_SCSI_FLASHPOINT #define MAX_CARDS 8 #undef BUSTYPE_PCI #define CRCMASK 0xA001 #define FAILURE 0xFFFFFFFFL struct sccb; typedef void (*CALL_BK_FN) (struct sccb *); struct sccb_mgr_info { u32 si_baseaddr; unsigned char si_present; unsigned char si_intvect; unsigned char si_id; unsigned char si_lun; u16 si_fw_revision; u16 si_per_targ_init_sync; u16 si_per_targ_fast_nego; u16 si_per_targ_ultra_nego; u16 si_per_targ_no_disc; u16 si_per_targ_wide_nego; u16 si_mflags; unsigned char si_card_family; unsigned char si_bustype; unsigned char si_card_model[3]; unsigned char si_relative_cardnum; unsigned char si_reserved[4]; u32 si_OS_reserved; unsigned char si_XlatInfo[4]; u32 si_reserved2[5]; u32 si_secondary_range; }; #define SCSI_PARITY_ENA 0x0001 #define LOW_BYTE_TERM 0x0010 #define HIGH_BYTE_TERM 0x0020 #define BUSTYPE_PCI 0x3 #define SUPPORT_16TAR_32LUN 0x0002 #define SOFT_RESET 0x0004 #define EXTENDED_TRANSLATION 0x0008 #define POST_ALL_UNDERRRUNS 0x0040 #define FLAG_SCAM_ENABLED 0x0080 #define FLAG_SCAM_LEVEL2 0x0100 #define HARPOON_FAMILY 0x02 /* SCCB struct used for both SCCB and UCB manager compiles! * The UCB Manager treats the SCCB as it's 'native hardware structure' */ /*#pragma pack(1)*/ struct sccb { unsigned char OperationCode; unsigned char ControlByte; unsigned char CdbLength; unsigned char RequestSenseLength; u32 DataLength; void *DataPointer; unsigned char CcbRes[2]; unsigned char HostStatus; unsigned char TargetStatus; unsigned char TargID; unsigned char Lun; unsigned char Cdb[12]; unsigned char CcbRes1; unsigned char Reserved1; u32 Reserved2; u32 SensePointer; CALL_BK_FN SccbCallback; /* VOID (*SccbCallback)(); */ u32 SccbIOPort; /* Identifies board base port */ unsigned char SccbStatus; unsigned char SCCBRes2; u16 SccbOSFlags; u32 Sccb_XferCnt; /* actual transfer count */ u32 Sccb_ATC; u32 SccbVirtDataPtr; /* virtual addr for OS/2 */ u32 Sccb_res1; u16 Sccb_MGRFlags; u16 Sccb_sgseg; unsigned char Sccb_scsimsg; /* identify msg for selection */ unsigned char Sccb_tag; unsigned char Sccb_scsistat; unsigned char Sccb_idmsg; /* image of last msg in */ struct sccb *Sccb_forwardlink; struct sccb *Sccb_backlink; u32 Sccb_savedATC; unsigned char Save_Cdb[6]; unsigned char Save_CdbLen; unsigned char Sccb_XferState; u32 Sccb_SGoffset; }; #pragma pack() #define SCATTER_GATHER_COMMAND 0x02 #define RESIDUAL_COMMAND 0x03 #define RESIDUAL_SG_COMMAND 0x04 #define RESET_COMMAND 0x81 #define F_USE_CMD_Q 0x20 /*Inidcates TAGGED command. */ #define TAG_TYPE_MASK 0xC0 /*Type of tag msg to send. */ #define SCCB_DATA_XFER_OUT 0x10 /* Write */ #define SCCB_DATA_XFER_IN 0x08 /* Read */ #define NO_AUTO_REQUEST_SENSE 0x01 /* No Request Sense Buffer */ #define BUS_FREE_ST 0 #define SELECT_ST 1 #define SELECT_BDR_ST 2 /* Select w\ Bus Device Reset */ #define SELECT_SN_ST 3 /* Select w\ Sync Nego */ #define SELECT_WN_ST 4 /* Select w\ Wide Data Nego */ #define SELECT_Q_ST 5 /* Select w\ Tagged Q'ing */ #define COMMAND_ST 6 #define DATA_OUT_ST 7 #define DATA_IN_ST 8 #define DISCONNECT_ST 9 #define ABORT_ST 11 #define F_HOST_XFER_DIR 0x01 #define F_ALL_XFERRED 0x02 #define F_SG_XFER 0x04 #define F_AUTO_SENSE 0x08 #define F_ODD_BALL_CNT 0x10 #define F_NO_DATA_YET 0x80 #define F_STATUSLOADED 0x01 #define F_DEV_SELECTED 0x04 #define SCCB_COMPLETE 0x00 /* SCCB completed without error */ #define SCCB_DATA_UNDER_RUN 0x0C #define SCCB_SELECTION_TIMEOUT 0x11 /* Set SCSI selection timed out */ #define SCCB_DATA_OVER_RUN 0x12 #define SCCB_PHASE_SEQUENCE_FAIL 0x14 /* Target bus phase sequence failure */ #define SCCB_GROSS_FW_ERR 0x27 /* Major problem! */ #define SCCB_BM_ERR 0x30 /* BusMaster error. */ #define SCCB_PARITY_ERR 0x34 /* SCSI parity error */ #define SCCB_IN_PROCESS 0x00 #define SCCB_SUCCESS 0x01 #define SCCB_ABORT 0x02 #define SCCB_ERROR 0x04 #define ORION_FW_REV 3110 #define QUEUE_DEPTH 254+1 /*1 for Normal disconnect 32 for Q'ing. */ #define MAX_MB_CARDS 4 /* Max. no of cards suppoerted on Mother Board */ #define MAX_SCSI_TAR 16 #define MAX_LUN 32 #define LUN_MASK 0x1f #define SG_BUF_CNT 16 /*Number of prefetched elements. */ #define SG_ELEMENT_SIZE 8 /*Eight byte per element. */ #define RD_HARPOON(ioport) inb((u32)ioport) #define RDW_HARPOON(ioport) inw((u32)ioport) #define RD_HARP32(ioport,offset,data) (data = inl((u32)(ioport + offset))) #define WR_HARPOON(ioport,val) outb((u8) val, (u32)ioport) #define WRW_HARPOON(ioport,val) outw((u16)val, (u32)ioport) #define WR_HARP32(ioport,offset,data) outl(data, (u32)(ioport + offset)) #define TAR_SYNC_MASK (BIT(7)+BIT(6)) #define SYNC_TRYING BIT(6) #define SYNC_SUPPORTED (BIT(7)+BIT(6)) #define TAR_WIDE_MASK (BIT(5)+BIT(4)) #define WIDE_ENABLED BIT(4) #define WIDE_NEGOCIATED BIT(5) #define TAR_TAG_Q_MASK (BIT(3)+BIT(2)) #define TAG_Q_TRYING BIT(2) #define TAG_Q_REJECT BIT(3) #define TAR_ALLOW_DISC BIT(0) #define EE_SYNC_MASK (BIT(0)+BIT(1)) #define EE_SYNC_5MB BIT(0) #define EE_SYNC_10MB BIT(1) #define EE_SYNC_20MB (BIT(0)+BIT(1)) #define EE_WIDE_SCSI BIT(7) struct sccb_mgr_tar_info { struct sccb *TarSelQ_Head; struct sccb *TarSelQ_Tail; unsigned char TarLUN_CA; /*Contingent Allgiance */ unsigned char TarTagQ_Cnt; unsigned char TarSelQ_Cnt; unsigned char TarStatus; unsigned char TarEEValue; unsigned char TarSyncCtrl; unsigned char TarReserved[2]; /* for alignment */ unsigned char LunDiscQ_Idx[MAX_LUN]; unsigned char TarLUNBusy[MAX_LUN]; }; struct nvram_info { unsigned char niModel; /* Model No. of card */ unsigned char niCardNo; /* Card no. */ u32 niBaseAddr; /* Port Address of card */ unsigned char niSysConf; /* Adapter Configuration byte - Byte 16 of eeprom map */ unsigned char niScsiConf; /* SCSI Configuration byte - Byte 17 of eeprom map */ unsigned char niScamConf; /* SCAM Configuration byte - Byte 20 of eeprom map */ unsigned char niAdapId; /* Host Adapter ID - Byte 24 of eerpom map */ unsigned char niSyncTbl[MAX_SCSI_TAR / 2]; /* Sync/Wide byte of targets */ unsigned char niScamTbl[MAX_SCSI_TAR][4]; /* Compressed Scam name string of Targets */ }; #define MODEL_LT 1 #define MODEL_DL 2 #define MODEL_LW 3 #define MODEL_DW 4 struct sccb_card { struct sccb *currentSCCB; struct sccb_mgr_info *cardInfo; u32 ioPort; unsigned short cmdCounter; unsigned char discQCount; unsigned char tagQ_Lst; unsigned char cardIndex; unsigned char scanIndex; unsigned char globalFlags; unsigned char ourId; struct nvram_info *pNvRamInfo; struct sccb *discQ_Tbl[QUEUE_DEPTH]; }; #define F_TAG_STARTED 0x01 #define F_CONLUN_IO 0x02 #define F_DO_RENEGO 0x04 #define F_NO_FILTER 0x08 #define F_GREEN_PC 0x10 #define F_HOST_XFER_ACT 0x20 #define F_NEW_SCCB_CMD 0x40 #define F_UPDATE_EEPROM 0x80 #define ID_STRING_LENGTH 32 #define TYPE_CODE0 0x63 /*Level2 Mstr (bits 7-6), */ #define SLV_TYPE_CODE0 0xA3 /*Priority Bit set (bits 7-6), */ #define ASSIGN_ID 0x00 #define SET_P_FLAG 0x01 #define CFG_CMPLT 0x03 #define DOM_MSTR 0x0F #define SYNC_PTRN 0x1F #define ID_0_7 0x18 #define ID_8_F 0x11 #define MISC_CODE 0x14 #define CLR_P_FLAG 0x18 #define INIT_SELTD 0x01 #define LEVEL2_TAR 0x02 enum scam_id_st { ID0, ID1, ID2, ID3, ID4, ID5, ID6, ID7, ID8, ID9, ID10, ID11, ID12, ID13, ID14, ID15, ID_UNUSED, ID_UNASSIGNED, ID_ASSIGNED, LEGACY, CLR_PRIORITY, NO_ID_AVAIL }; typedef struct SCCBscam_info { unsigned char id_string[ID_STRING_LENGTH]; enum scam_id_st state; } SCCBSCAM_INFO; #define SMIDENT 0x80 #define DISC_PRIV 0x40 #define SM8BIT 0x00 #define SM16BIT 0x01 #define SIX_BYTE_CMD 0x06 #define TWELVE_BYTE_CMD 0x0C #define ASYNC 0x00 #define MAX_OFFSET 0x0F /* Maxbyteoffset for Sync Xfers */ #define EEPROM_WD_CNT 256 #define EEPROM_CHECK_SUM 0 #define FW_SIGNATURE 2 #define MODEL_NUMB_0 4 #define MODEL_NUMB_2 6 #define MODEL_NUMB_4 8 #define SYSTEM_CONFIG 16 #define SCSI_CONFIG 17 #define BIOS_CONFIG 18 #define SCAM_CONFIG 20 #define ADAPTER_SCSI_ID 24 #define IGNORE_B_SCAN 32 #define SEND_START_ENA 34 #define DEVICE_ENABLE 36 #define SYNC_RATE_TBL 38 #define SYNC_RATE_TBL01 38 #define SYNC_RATE_TBL23 40 #define SYNC_RATE_TBL45 42 #define SYNC_RATE_TBL67 44 #define SYNC_RATE_TBL89 46 #define SYNC_RATE_TBLab 48 #define SYNC_RATE_TBLcd 50 #define SYNC_RATE_TBLef 52 #define EE_SCAMBASE 256 #define SCAM_ENABLED BIT(2) #define SCAM_LEVEL2 BIT(3) #define RENEGO_ENA BIT(10) #define CONNIO_ENA BIT(11) #define GREEN_PC_ENA BIT(12) #define AUTO_RATE_00 00 #define AUTO_RATE_05 01 #define AUTO_RATE_10 02 #define AUTO_RATE_20 03 #define WIDE_NEGO_BIT BIT(7) #define DISC_ENABLE_BIT BIT(6) #define hp_vendor_id_0 0x00 /* LSB */ #define ORION_VEND_0 0x4B #define hp_vendor_id_1 0x01 /* MSB */ #define ORION_VEND_1 0x10 #define hp_device_id_0 0x02 /* LSB */ #define ORION_DEV_0 0x30 #define hp_device_id_1 0x03 /* MSB */ #define ORION_DEV_1 0x81 /* Sub Vendor ID and Sub Device ID only available in Harpoon Version 2 and higher */ #define hp_sub_device_id_0 0x06 /* LSB */ #define hp_semaphore 0x0C #define SCCB_MGR_ACTIVE BIT(0) #define TICKLE_ME BIT(1) #define SCCB_MGR_PRESENT BIT(3) #define BIOS_IN_USE BIT(4) #define hp_sys_ctrl 0x0F #define STOP_CLK BIT(0) /*Turn off BusMaster Clock */ #define DRVR_RST BIT(1) /*Firmware Reset to 80C15 chip */ #define HALT_MACH BIT(3) /*Halt State Machine */ #define HARD_ABORT BIT(4) /*Hard Abort */ #define hp_host_blk_cnt 0x13 #define XFER_BLK64 0x06 /* 1 1 0 64 byte per block */ #define BM_THRESHOLD 0x40 /* PCI mode can only xfer 16 bytes */ #define hp_int_mask 0x17 #define INT_CMD_COMPL BIT(0) /* DMA command complete */ #define INT_EXT_STATUS BIT(1) /* Extended Status Set */ #define hp_xfer_cnt_lo 0x18 #define hp_xfer_cnt_hi 0x1A #define hp_xfer_cmd 0x1B #define XFER_HOST_DMA 0x00 /* 0 0 0 Transfer Host -> DMA */ #define XFER_DMA_HOST 0x01 /* 0 0 1 Transfer DMA -> Host */ #define XFER_HOST_AUTO 0x00 /* 0 0 Auto Transfer Size */ #define XFER_DMA_8BIT 0x20 /* 0 1 8 BIT Transfer Size */ #define DISABLE_INT BIT(7) /*Do not interrupt at end of cmd. */ #define HOST_WRT_CMD ((DISABLE_INT + XFER_HOST_DMA + XFER_HOST_AUTO + XFER_DMA_8BIT)) #define HOST_RD_CMD ((DISABLE_INT + XFER_DMA_HOST + XFER_HOST_AUTO + XFER_DMA_8BIT)) #define hp_host_addr_lo 0x1C #define hp_host_addr_hmi 0x1E #define hp_ee_ctrl 0x22 #define EXT_ARB_ACK BIT(7) #define SCSI_TERM_ENA_H BIT(6) /* SCSI high byte terminator */ #define SEE_MS BIT(5) #define SEE_CS BIT(3) #define SEE_CLK BIT(2) #define SEE_DO BIT(1) #define SEE_DI BIT(0) #define EE_READ 0x06 #define EE_WRITE 0x05 #define EWEN 0x04 #define EWEN_ADDR 0x03C0 #define EWDS 0x04 #define EWDS_ADDR 0x0000 #define hp_bm_ctrl 0x26 #define SCSI_TERM_ENA_L BIT(0) /*Enable/Disable external terminators */ #define FLUSH_XFER_CNTR BIT(1) /*Flush transfer counter */ #define FORCE1_XFER BIT(5) /*Always xfer one byte in byte mode */ #define FAST_SINGLE BIT(6) /*?? */ #define BMCTRL_DEFAULT (FORCE1_XFER|FAST_SINGLE|SCSI_TERM_ENA_L) #define hp_sg_addr 0x28 #define hp_page_ctrl 0x29 #define SCATTER_EN BIT(0) #define SGRAM_ARAM BIT(1) #define G_INT_DISABLE BIT(3) /* Enable/Disable all Interrupts */ #define NARROW_SCSI_CARD BIT(4) /* NARROW/WIDE SCSI config pin */ #define hp_pci_stat_cfg 0x2D #define REC_MASTER_ABORT BIT(5) /*received Master abort */ #define hp_rev_num 0x33 #define hp_stack_data 0x34 #define hp_stack_addr 0x35 #define hp_ext_status 0x36 #define BM_FORCE_OFF BIT(0) /*Bus Master is forced to get off */ #define PCI_TGT_ABORT BIT(0) /*PCI bus master transaction aborted */ #define PCI_DEV_TMOUT BIT(1) /*PCI Device Time out */ #define CMD_ABORTED BIT(4) /*Command aborted */ #define BM_PARITY_ERR BIT(5) /*parity error on data received */ #define PIO_OVERRUN BIT(6) /*Slave data overrun */ #define BM_CMD_BUSY BIT(7) /*Bus master transfer command busy */ #define BAD_EXT_STATUS (BM_FORCE_OFF | PCI_DEV_TMOUT | CMD_ABORTED | \ BM_PARITY_ERR | PIO_OVERRUN) #define hp_int_status 0x37 #define EXT_STATUS_ON BIT(1) /*Extended status is valid */ #define SCSI_INTERRUPT BIT(2) /*Global indication of a SCSI int. */ #define INT_ASSERTED BIT(5) /* */ #define hp_fifo_cnt 0x38 #define hp_intena 0x40 #define RESET BIT(7) #define PROG_HLT BIT(6) #define PARITY BIT(5) #define FIFO BIT(4) #define SEL BIT(3) #define SCAM_SEL BIT(2) #define RSEL BIT(1) #define TIMEOUT BIT(0) #define BUS_FREE BIT(15) #define XFER_CNT_0 BIT(14) #define PHASE BIT(13) #define IUNKWN BIT(12) #define ICMD_COMP BIT(11) #define ITICKLE BIT(10) #define IDO_STRT BIT(9) #define ITAR_DISC BIT(8) #define AUTO_INT (BIT(12)+BIT(11)+BIT(10)+BIT(9)+BIT(8)) #define CLR_ALL_INT 0xFFFF #define CLR_ALL_INT_1 0xFF00 #define hp_intstat 0x42 #define hp_scsisig 0x44 #define SCSI_SEL BIT(7) #define SCSI_BSY BIT(6) #define SCSI_REQ BIT(5) #define SCSI_ACK BIT(4) #define SCSI_ATN BIT(3) #define SCSI_CD BIT(2) #define SCSI_MSG BIT(1) #define SCSI_IOBIT BIT(0) #define S_SCSI_PHZ (BIT(2)+BIT(1)+BIT(0)) #define S_MSGO_PH (BIT(2)+BIT(1) ) #define S_MSGI_PH (BIT(2)+BIT(1)+BIT(0)) #define S_DATAI_PH ( BIT(0)) #define S_DATAO_PH 0x00 #define S_ILL_PH ( BIT(1) ) #define hp_scsictrl_0 0x45 #define SEL_TAR BIT(6) #define ENA_ATN BIT(4) #define ENA_RESEL BIT(2) #define SCSI_RST BIT(1) #define ENA_SCAM_SEL BIT(0) #define hp_portctrl_0 0x46 #define SCSI_PORT BIT(7) #define SCSI_INBIT BIT(6) #define DMA_PORT BIT(5) #define DMA_RD BIT(4) #define HOST_PORT BIT(3) #define HOST_WRT BIT(2) #define SCSI_BUS_EN BIT(1) #define START_TO BIT(0) #define hp_scsireset 0x47 #define SCSI_INI BIT(6) #define SCAM_EN BIT(5) #define DMA_RESET BIT(3) #define HPSCSI_RESET BIT(2) #define PROG_RESET BIT(1) #define FIFO_CLR BIT(0) #define hp_xfercnt_0 0x48 #define hp_xfercnt_2 0x4A #define hp_fifodata_0 0x4C #define hp_addstat 0x4E #define SCAM_TIMER BIT(7) #define SCSI_MODE8 BIT(3) #define SCSI_PAR_ERR BIT(0) #define hp_prgmcnt_0 0x4F #define hp_selfid_0 0x50 #define hp_selfid_1 0x51 #define hp_arb_id 0x52 #define hp_select_id 0x53 #define hp_synctarg_base 0x54 #define hp_synctarg_12 0x54 #define hp_synctarg_13 0x55 #define hp_synctarg_14 0x56 #define hp_synctarg_15 0x57 #define hp_synctarg_8 0x58 #define hp_synctarg_9 0x59 #define hp_synctarg_10 0x5A #define hp_synctarg_11 0x5B #define hp_synctarg_4 0x5C #define hp_synctarg_5 0x5D #define hp_synctarg_6 0x5E #define hp_synctarg_7 0x5F #define hp_synctarg_0 0x60 #define hp_synctarg_1 0x61 #define hp_synctarg_2 0x62 #define hp_synctarg_3 0x63 #define NARROW_SCSI BIT(4) #define DEFAULT_OFFSET 0x0F #define hp_autostart_0 0x64 #define hp_autostart_1 0x65 #define hp_autostart_3 0x67 #define AUTO_IMMED BIT(5) #define SELECT BIT(6) #define END_DATA (BIT(7)+BIT(6)) #define hp_gp_reg_0 0x68 #define hp_gp_reg_1 0x69 #define hp_gp_reg_3 0x6B #define hp_seltimeout 0x6C #define TO_4ms 0x67 /* 3.9959ms */ #define TO_5ms 0x03 /* 4.9152ms */ #define TO_10ms 0x07 /* 11.xxxms */ #define TO_250ms 0x99 /* 250.68ms */ #define TO_290ms 0xB1 /* 289.99ms */ #define hp_clkctrl_0 0x6D #define PWR_DWN BIT(6) #define ACTdeassert BIT(4) #define CLK_40MHZ (BIT(1) + BIT(0)) #define CLKCTRL_DEFAULT (ACTdeassert | CLK_40MHZ) #define hp_fiforead 0x6E #define hp_fifowrite 0x6F #define hp_offsetctr 0x70 #define hp_xferstat 0x71 #define FIFO_EMPTY BIT(6) #define hp_portctrl_1 0x72 #define CHK_SCSI_P BIT(3) #define HOST_MODE8 BIT(0) #define hp_xfer_pad 0x73 #define ID_UNLOCK BIT(3) #define hp_scsidata_0 0x74 #define hp_scsidata_1 0x75 #define hp_aramBase 0x80 #define BIOS_DATA_OFFSET 0x60 #define BIOS_RELATIVE_CARD 0x64 #define AR3 (BIT(9) + BIT(8)) #define SDATA BIT(10) #define CRD_OP BIT(11) /* Cmp Reg. w/ Data */ #define CRR_OP BIT(12) /* Cmp Reg. w. Reg. */ #define CPE_OP (BIT(14)+BIT(11)) /* Cmp SCSI phs & Branch EQ */ #define CPN_OP (BIT(14)+BIT(12)) /* Cmp SCSI phs & Branch NOT EQ */ #define ADATA_OUT 0x00 #define ADATA_IN BIT(8) #define ACOMMAND BIT(10) #define ASTATUS (BIT(10)+BIT(8)) #define AMSG_OUT (BIT(10)+BIT(9)) #define AMSG_IN (BIT(10)+BIT(9)+BIT(8)) #define BRH_OP BIT(13) /* Branch */ #define ALWAYS 0x00 #define EQUAL BIT(8) #define NOT_EQ BIT(9) #define TCB_OP (BIT(13)+BIT(11)) /* Test condition & branch */ #define FIFO_0 BIT(10) #define MPM_OP BIT(15) /* Match phase and move data */ #define MRR_OP BIT(14) /* Move DReg. to Reg. */ #define S_IDREG (BIT(2)+BIT(1)+BIT(0)) #define D_AR0 0x00 #define D_AR1 BIT(0) #define D_BUCKET (BIT(2) + BIT(1) + BIT(0)) #define RAT_OP (BIT(14)+BIT(13)+BIT(11)) #define SSI_OP (BIT(15)+BIT(11)) #define SSI_ITAR_DISC (ITAR_DISC >> 8) #define SSI_IDO_STRT (IDO_STRT >> 8) #define SSI_ICMD_COMP (ICMD_COMP >> 8) #define SSI_ITICKLE (ITICKLE >> 8) #define SSI_IUNKWN (IUNKWN >> 8) #define SSI_INO_CC (IUNKWN >> 8) #define SSI_IRFAIL (IUNKWN >> 8) #define NP 0x10 /*Next Phase */ #define NTCMD 0x02 /*Non- Tagged Command start */ #define CMDPZ 0x04 /*Command phase */ #define DINT 0x12 /*Data Out/In interrupt */ #define DI 0x13 /*Data Out */ #define DC 0x19 /*Disconnect Message */ #define ST 0x1D /*Status Phase */ #define UNKNWN 0x24 /*Unknown bus action */ #define CC 0x25 /*Command Completion failure */ #define TICK 0x26 /*New target reselected us. */ #define SELCHK 0x28 /*Select & Check SCSI ID latch reg */ #define ID_MSG_STRT hp_aramBase + 0x00 #define NON_TAG_ID_MSG hp_aramBase + 0x06 #define CMD_STRT hp_aramBase + 0x08 #define SYNC_MSGS hp_aramBase + 0x08 #define TAG_STRT 0x00 #define DISCONNECT_START 0x10/2 #define END_DATA_START 0x14/2 #define CMD_ONLY_STRT CMDPZ/2 #define SELCHK_STRT SELCHK/2 #define GET_XFER_CNT(port, xfercnt) {RD_HARP32(port,hp_xfercnt_0,xfercnt); xfercnt &= 0xFFFFFF;} /* #define GET_XFER_CNT(port, xfercnt) (xfercnt = RD_HARPOON(port+hp_xfercnt_2), \ xfercnt <<= 16,\ xfercnt |= RDW_HARPOON((unsigned short)(port+hp_xfercnt_0))) */ #define HP_SETUP_ADDR_CNT(port,addr,count) (WRW_HARPOON((port+hp_host_addr_lo), (unsigned short)(addr & 0x0000FFFFL)),\ addr >>= 16,\ WRW_HARPOON((port+hp_host_addr_hmi), (unsigned short)(addr & 0x0000FFFFL)),\ WR_HARP32(port,hp_xfercnt_0,count),\ WRW_HARPOON((port+hp_xfer_cnt_lo), (unsigned short)(count & 0x0000FFFFL)),\ count >>= 16,\ WR_HARPOON(port+hp_xfer_cnt_hi, (count & 0xFF))) #define ACCEPT_MSG(port) {while(RD_HARPOON(port+hp_scsisig) & SCSI_REQ){}\ WR_HARPOON(port+hp_scsisig, S_ILL_PH);} #define ACCEPT_MSG_ATN(port) {while(RD_HARPOON(port+hp_scsisig) & SCSI_REQ){}\ WR_HARPOON(port+hp_scsisig, (S_ILL_PH|SCSI_ATN));} #define DISABLE_AUTO(port) (WR_HARPOON(port+hp_scsireset, PROG_RESET),\ WR_HARPOON(port+hp_scsireset, 0x00)) #define ARAM_ACCESS(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \ (RD_HARPOON(p_port+hp_page_ctrl) | SGRAM_ARAM))) #define SGRAM_ACCESS(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \ (RD_HARPOON(p_port+hp_page_ctrl) & ~SGRAM_ARAM))) #define MDISABLE_INT(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \ (RD_HARPOON(p_port+hp_page_ctrl) | G_INT_DISABLE))) #define MENABLE_INT(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \ (RD_HARPOON(p_port+hp_page_ctrl) & ~G_INT_DISABLE))) static unsigned char FPT_sisyncn(u32 port, unsigned char p_card, unsigned char syncFlag); static void FPT_ssel(u32 port, unsigned char p_card); static void FPT_sres(u32 port, unsigned char p_card, struct sccb_card *pCurrCard); static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB); static void FPT_stsyncn(u32 port, unsigned char p_card); static void FPT_sisyncr(u32 port, unsigned char sync_pulse, unsigned char offset); static void FPT_sssyncv(u32 p_port, unsigned char p_id, unsigned char p_sync_value, struct sccb_mgr_tar_info *currTar_Info); static void FPT_sresb(u32 port, unsigned char p_card); static void FPT_sxfrp(u32 p_port, unsigned char p_card); static void FPT_schkdd(u32 port, unsigned char p_card); static unsigned char FPT_RdStack(u32 port, unsigned char index); static void FPT_WrStack(u32 portBase, unsigned char index, unsigned char data); static unsigned char FPT_ChkIfChipInitialized(u32 ioPort); static void FPT_SendMsg(u32 port, unsigned char message); static void FPT_queueFlushTargSccb(unsigned char p_card, unsigned char thisTarg, unsigned char error_code); static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card); static void FPT_RNVRamData(struct nvram_info *pNvRamInfo); static unsigned char FPT_siwidn(u32 port, unsigned char p_card); static void FPT_stwidn(u32 port, unsigned char p_card); static void FPT_siwidr(u32 port, unsigned char width); static void FPT_queueSelectFail(struct sccb_card *pCurrCard, unsigned char p_card); static void FPT_queueDisconnect(struct sccb *p_SCCB, unsigned char p_card); static void FPT_queueCmdComplete(struct sccb_card *pCurrCard, struct sccb *p_SCCB, unsigned char p_card); static void FPT_queueSearchSelect(struct sccb_card *pCurrCard, unsigned char p_card); static void FPT_queueFlushSccb(unsigned char p_card, unsigned char error_code); static void FPT_queueAddSccb(struct sccb *p_SCCB, unsigned char card); static unsigned char FPT_queueFindSccb(struct sccb *p_SCCB, unsigned char p_card); static void FPT_utilUpdateResidual(struct sccb *p_SCCB); static unsigned short FPT_CalcCrc16(unsigned char buffer[]); static unsigned char FPT_CalcLrc(unsigned char buffer[]); static void FPT_Wait1Second(u32 p_port); static void FPT_Wait(u32 p_port, unsigned char p_delay); static void FPT_utilEEWriteOnOff(u32 p_port, unsigned char p_mode); static void FPT_utilEEWrite(u32 p_port, unsigned short ee_data, unsigned short ee_addr); static unsigned short FPT_utilEERead(u32 p_port, unsigned short ee_addr); static unsigned short FPT_utilEEReadOrg(u32 p_port, unsigned short ee_addr); static void FPT_utilEESendCmdAddr(u32 p_port, unsigned char ee_cmd, unsigned short ee_addr); static void FPT_phaseDataOut(u32 port, unsigned char p_card); static void FPT_phaseDataIn(u32 port, unsigned char p_card); static void FPT_phaseCommand(u32 port, unsigned char p_card); static void FPT_phaseStatus(u32 port, unsigned char p_card); static void FPT_phaseMsgOut(u32 port, unsigned char p_card); static void FPT_phaseMsgIn(u32 port, unsigned char p_card); static void FPT_phaseIllegal(u32 port, unsigned char p_card); static void FPT_phaseDecode(u32 port, unsigned char p_card); static void FPT_phaseChkFifo(u32 port, unsigned char p_card); static void FPT_phaseBusFree(u32 p_port, unsigned char p_card); static void FPT_XbowInit(u32 port, unsigned char scamFlg); static void FPT_BusMasterInit(u32 p_port); static void FPT_DiagEEPROM(u32 p_port); static void FPT_dataXferProcessor(u32 port, struct sccb_card *pCurrCard); static void FPT_busMstrSGDataXferStart(u32 port, struct sccb *pCurrSCCB); static void FPT_busMstrDataXferStart(u32 port, struct sccb *pCurrSCCB); static void FPT_hostDataXferAbort(u32 port, unsigned char p_card, struct sccb *pCurrSCCB); static void FPT_hostDataXferRestart(struct sccb *currSCCB); static unsigned char FPT_SccbMgr_bad_isr(u32 p_port, unsigned char p_card, struct sccb_card *pCurrCard, unsigned short p_int); static void FPT_SccbMgrTableInitAll(void); static void FPT_SccbMgrTableInitCard(struct sccb_card *pCurrCard, unsigned char p_card); static void FPT_SccbMgrTableInitTarget(unsigned char p_card, unsigned char target); static void FPT_scini(unsigned char p_card, unsigned char p_our_id, unsigned char p_power_up); static int FPT_scarb(u32 p_port, unsigned char p_sel_type); static void FPT_scbusf(u32 p_port); static void FPT_scsel(u32 p_port); static void FPT_scasid(unsigned char p_card, u32 p_port); static unsigned char FPT_scxferc(u32 p_port, unsigned char p_data); static unsigned char FPT_scsendi(u32 p_port, unsigned char p_id_string[]); static unsigned char FPT_sciso(u32 p_port, unsigned char p_id_string[]); static void FPT_scwirod(u32 p_port, unsigned char p_data_bit); static void FPT_scwiros(u32 p_port, unsigned char p_data_bit); static unsigned char FPT_scvalq(unsigned char p_quintet); static unsigned char FPT_scsell(u32 p_port, unsigned char targ_id); static void FPT_scwtsel(u32 p_port); static void FPT_inisci(unsigned char p_card, u32 p_port, unsigned char p_our_id); static void FPT_scsavdi(unsigned char p_card, u32 p_port); static unsigned char FPT_scmachid(unsigned char p_card, unsigned char p_id_string[]); static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card); static void FPT_autoLoadDefaultMap(u32 p_port); static struct sccb_mgr_tar_info FPT_sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR] = { {{0}} }; static struct sccb_card FPT_BL_Card[MAX_CARDS] = { {0} }; static SCCBSCAM_INFO FPT_scamInfo[MAX_SCSI_TAR] = { {{0}} }; static struct nvram_info FPT_nvRamInfo[MAX_MB_CARDS] = { {0} }; static unsigned char FPT_mbCards = 0; static unsigned char FPT_scamHAString[] = { 0x63, 0x07, 'B', 'U', 'S', 'L', 'O', 'G', 'I', 'C', ' ', 'B', 'T', '-', '9', '3', '0', 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20 }; static unsigned short FPT_default_intena = 0; static void (*FPT_s_PhaseTbl[8]) (u32, unsigned char) = { 0}; /*--------------------------------------------------------------------- * * Function: FlashPoint_ProbeHostAdapter * * Description: Setup and/or Search for cards and return info to caller. * *---------------------------------------------------------------------*/ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo) { static unsigned char first_time = 1; unsigned char i, j, id, ScamFlg; unsigned short temp, temp2, temp3, temp4, temp5, temp6; u32 ioport; struct nvram_info *pCurrNvRam; ioport = pCardInfo->si_baseaddr; if (RD_HARPOON(ioport + hp_vendor_id_0) != ORION_VEND_0) return (int)FAILURE; if ((RD_HARPOON(ioport + hp_vendor_id_1) != ORION_VEND_1)) return (int)FAILURE; if ((RD_HARPOON(ioport + hp_device_id_0) != ORION_DEV_0)) return (int)FAILURE; if ((RD_HARPOON(ioport + hp_device_id_1) != ORION_DEV_1)) return (int)FAILURE; if (RD_HARPOON(ioport + hp_rev_num) != 0x0f) { /* For new Harpoon then check for sub_device ID LSB the bits(0-3) must be all ZERO for compatible with current version of SCCBMgr, else skip this Harpoon device. */ if (RD_HARPOON(ioport + hp_sub_device_id_0) & 0x0f) return (int)FAILURE; } if (first_time) { FPT_SccbMgrTableInitAll(); first_time = 0; FPT_mbCards = 0; } if (FPT_RdStack(ioport, 0) != 0x00) { if (FPT_ChkIfChipInitialized(ioport) == 0) { pCurrNvRam = NULL; WR_HARPOON(ioport + hp_semaphore, 0x00); FPT_XbowInit(ioport, 0); /*Must Init the SCSI before attempting */ FPT_DiagEEPROM(ioport); } else { if (FPT_mbCards < MAX_MB_CARDS) { pCurrNvRam = &FPT_nvRamInfo[FPT_mbCards]; FPT_mbCards++; pCurrNvRam->niBaseAddr = ioport; FPT_RNVRamData(pCurrNvRam); } else return (int)FAILURE; } } else pCurrNvRam = NULL; WR_HARPOON(ioport + hp_clkctrl_0, CLKCTRL_DEFAULT); WR_HARPOON(ioport + hp_sys_ctrl, 0x00); if (pCurrNvRam) pCardInfo->si_id = pCurrNvRam->niAdapId; else pCardInfo->si_id = (unsigned char)(FPT_utilEERead(ioport, (ADAPTER_SCSI_ID / 2)) & (unsigned char)0x0FF); pCardInfo->si_lun = 0x00; pCardInfo->si_fw_revision = ORION_FW_REV; temp2 = 0x0000; temp3 = 0x0000; temp4 = 0x0000; temp5 = 0x0000; temp6 = 0x0000; for (id = 0; id < (16 / 2); id++) { if (pCurrNvRam) { temp = (unsigned short)pCurrNvRam->niSyncTbl[id]; temp = ((temp & 0x03) + ((temp << 4) & 0xc0)) + (((temp << 4) & 0x0300) + ((temp << 8) & 0xc000)); } else temp = FPT_utilEERead(ioport, (unsigned short)((SYNC_RATE_TBL / 2) + id)); for (i = 0; i < 2; temp >>= 8, i++) { temp2 >>= 1; temp3 >>= 1; temp4 >>= 1; temp5 >>= 1; temp6 >>= 1; switch (temp & 0x3) { case AUTO_RATE_20: /* Synchronous, 20 mega-transfers/second */ temp6 |= 0x8000; fallthrough; case AUTO_RATE_10: /* Synchronous, 10 mega-transfers/second */ temp5 |= 0x8000; fallthrough; case AUTO_RATE_05: /* Synchronous, 5 mega-transfers/second */ temp2 |= 0x8000; fallthrough; case AUTO_RATE_00: /* Asynchronous */ break; } if (temp & DISC_ENABLE_BIT) temp3 |= 0x8000; if (temp & WIDE_NEGO_BIT) temp4 |= 0x8000; } } pCardInfo->si_per_targ_init_sync = temp2; pCardInfo->si_per_targ_no_disc = temp3; pCardInfo->si_per_targ_wide_nego = temp4; pCardInfo->si_per_targ_fast_nego = temp5; pCardInfo->si_per_targ_ultra_nego = temp6; if (pCurrNvRam) i = pCurrNvRam->niSysConf; else i = (unsigned char)(FPT_utilEERead(ioport, (SYSTEM_CONFIG / 2))); if (pCurrNvRam) ScamFlg = pCurrNvRam->niScamConf; else ScamFlg = (unsigned char)FPT_utilEERead(ioport, SCAM_CONFIG / 2); pCardInfo->si_mflags = 0x0000; if (i & 0x01) pCardInfo->si_mflags |= SCSI_PARITY_ENA; if (!(i & 0x02)) pCardInfo->si_mflags |= SOFT_RESET; if (i & 0x10) pCardInfo->si_mflags |= EXTENDED_TRANSLATION; if (ScamFlg & SCAM_ENABLED) pCardInfo->si_mflags |= FLAG_SCAM_ENABLED; if (ScamFlg & SCAM_LEVEL2) pCardInfo->si_mflags |= FLAG_SCAM_LEVEL2; j = (RD_HARPOON(ioport + hp_bm_ctrl) & ~SCSI_TERM_ENA_L); if (i & 0x04) { j |= SCSI_TERM_ENA_L; } WR_HARPOON(ioport + hp_bm_ctrl, j); j = (RD_HARPOON(ioport + hp_ee_ctrl) & ~SCSI_TERM_ENA_H); if (i & 0x08) { j |= SCSI_TERM_ENA_H; } WR_HARPOON(ioport + hp_ee_ctrl, j); if (!(RD_HARPOON(ioport + hp_page_ctrl) & NARROW_SCSI_CARD)) pCardInfo->si_mflags |= SUPPORT_16TAR_32LUN; pCardInfo->si_card_family = HARPOON_FAMILY; pCardInfo->si_bustype = BUSTYPE_PCI; if (pCurrNvRam) { pCardInfo->si_card_model[0] = '9'; switch (pCurrNvRam->niModel & 0x0f) { case MODEL_LT: pCardInfo->si_card_model[1] = '3'; pCardInfo->si_card_model[2] = '0'; break; case MODEL_LW: pCardInfo->si_card_model[1] = '5'; pCardInfo->si_card_model[2] = '0'; break; case MODEL_DL: pCardInfo->si_card_model[1] = '3'; pCardInfo->si_card_model[2] = '2'; break; case MODEL_DW: pCardInfo->si_card_model[1] = '5'; pCardInfo->si_card_model[2] = '2'; break; } } else { temp = FPT_utilEERead(ioport, (MODEL_NUMB_0 / 2)); pCardInfo->si_card_model[0] = (unsigned char)(temp >> 8); temp = FPT_utilEERead(ioport, (MODEL_NUMB_2 / 2)); pCardInfo->si_card_model[1] = (unsigned char)(temp & 0x00FF); pCardInfo->si_card_model[2] = (unsigned char)(temp >> 8); } if (pCardInfo->si_card_model[1] == '3') { if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7)) pCardInfo->si_mflags |= LOW_BYTE_TERM; } else if (pCardInfo->si_card_model[2] == '0') { temp = RD_HARPOON(ioport + hp_xfer_pad); WR_HARPOON(ioport + hp_xfer_pad, (temp & ~BIT(4))); if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7)) pCardInfo->si_mflags |= LOW_BYTE_TERM; WR_HARPOON(ioport + hp_xfer_pad, (temp | BIT(4))); if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7)) pCardInfo->si_mflags |= HIGH_BYTE_TERM; WR_HARPOON(ioport + hp_xfer_pad, temp); } else { temp = RD_HARPOON(ioport + hp_ee_ctrl); temp2 = RD_HARPOON(ioport + hp_xfer_pad); WR_HARPOON(ioport + hp_ee_ctrl, (temp | SEE_CS)); WR_HARPOON(ioport + hp_xfer_pad, (temp2 | BIT(4))); temp3 = 0; for (i = 0; i < 8; i++) { temp3 <<= 1; if (!(RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))) temp3 |= 1; WR_HARPOON(ioport + hp_xfer_pad, (temp2 & ~BIT(4))); WR_HARPOON(ioport + hp_xfer_pad, (temp2 | BIT(4))); } WR_HARPOON(ioport + hp_ee_ctrl, temp); WR_HARPOON(ioport + hp_xfer_pad, temp2); if (!(temp3 & BIT(7))) pCardInfo->si_mflags |= LOW_BYTE_TERM; if (!(temp3 & BIT(6))) pCardInfo->si_mflags |= HIGH_BYTE_TERM; } ARAM_ACCESS(ioport); for (i = 0; i < 4; i++) { pCardInfo->si_XlatInfo[i] = RD_HARPOON(ioport + hp_aramBase + BIOS_DATA_OFFSET + i); } /* return with -1 if no sort, else return with logical card number sorted by BIOS (zero-based) */ pCardInfo->si_relative_cardnum = (unsigned char)(RD_HARPOON(ioport + hp_aramBase + BIOS_RELATIVE_CARD) - 1); SGRAM_ACCESS(ioport); FPT_s_PhaseTbl[0] = FPT_phaseDataOut; FPT_s_PhaseTbl[1] = FPT_phaseDataIn; FPT_s_PhaseTbl[2] = FPT_phaseIllegal; FPT_s_PhaseTbl[3] = FPT_phaseIllegal; FPT_s_PhaseTbl[4] = FPT_phaseCommand; FPT_s_PhaseTbl[5] = FPT_phaseStatus; FPT_s_PhaseTbl[6] = FPT_phaseMsgOut; FPT_s_PhaseTbl[7] = FPT_phaseMsgIn; pCardInfo->si_present = 0x01; return 0; } /*--------------------------------------------------------------------- * * Function: FlashPoint_HardwareResetHostAdapter * * Description: Setup adapter for normal operation (hard reset). * *---------------------------------------------------------------------*/ static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info *pCardInfo) { struct sccb_card *CurrCard = NULL; struct nvram_info *pCurrNvRam; unsigned char i, j, thisCard, ScamFlg; unsigned short temp, sync_bit_map, id; u32 ioport; ioport = pCardInfo->si_baseaddr; for (thisCard = 0; thisCard <= MAX_CARDS; thisCard++) { if (thisCard == MAX_CARDS) return (void *)FAILURE; if (FPT_BL_Card[thisCard].ioPort == ioport) { CurrCard = &FPT_BL_Card[thisCard]; FPT_SccbMgrTableInitCard(CurrCard, thisCard); break; } else if (FPT_BL_Card[thisCard].ioPort == 0x00) { FPT_BL_Card[thisCard].ioPort = ioport; CurrCard = &FPT_BL_Card[thisCard]; if (FPT_mbCards) for (i = 0; i < FPT_mbCards; i++) { if (CurrCard->ioPort == FPT_nvRamInfo[i].niBaseAddr) CurrCard->pNvRamInfo = &FPT_nvRamInfo[i]; } FPT_SccbMgrTableInitCard(CurrCard, thisCard); CurrCard->cardIndex = thisCard; CurrCard->cardInfo = pCardInfo; break; } } pCurrNvRam = CurrCard->pNvRamInfo; if (pCurrNvRam) { ScamFlg = pCurrNvRam->niScamConf; } else { ScamFlg = (unsigned char)FPT_utilEERead(ioport, SCAM_CONFIG / 2); } FPT_BusMasterInit(ioport); FPT_XbowInit(ioport, ScamFlg); FPT_autoLoadDefaultMap(ioport); for (i = 0, id = 0x01; i != pCardInfo->si_id; i++, id <<= 1) { } WR_HARPOON(ioport + hp_selfid_0, id); WR_HARPOON(ioport + hp_selfid_1, 0x00); WR_HARPOON(ioport + hp_arb_id, pCardInfo->si_id); CurrCard->ourId = pCardInfo->si_id; i = (unsigned char)pCardInfo->si_mflags; if (i & SCSI_PARITY_ENA) WR_HARPOON(ioport + hp_portctrl_1, (HOST_MODE8 | CHK_SCSI_P)); j = (RD_HARPOON(ioport + hp_bm_ctrl) & ~SCSI_TERM_ENA_L); if (i & LOW_BYTE_TERM) j |= SCSI_TERM_ENA_L; WR_HARPOON(ioport + hp_bm_ctrl, j); j = (RD_HARPOON(ioport + hp_ee_ctrl) & ~SCSI_TERM_ENA_H); if (i & HIGH_BYTE_TERM) j |= SCSI_TERM_ENA_H; WR_HARPOON(ioport + hp_ee_ctrl, j); if (!(pCardInfo->si_mflags & SOFT_RESET)) { FPT_sresb(ioport, thisCard); FPT_scini(thisCard, pCardInfo->si_id, 0); } if (pCardInfo->si_mflags & POST_ALL_UNDERRRUNS) CurrCard->globalFlags |= F_NO_FILTER; if (pCurrNvRam) { if (pCurrNvRam->niSysConf & 0x10) CurrCard->globalFlags |= F_GREEN_PC; } else { if (FPT_utilEERead(ioport, (SYSTEM_CONFIG / 2)) & GREEN_PC_ENA) CurrCard->globalFlags |= F_GREEN_PC; } /* Set global flag to indicate Re-Negotiation to be done on all ckeck condition */ if (pCurrNvRam) { if (pCurrNvRam->niScsiConf & 0x04) CurrCard->globalFlags |= F_DO_RENEGO; } else { if (FPT_utilEERead(ioport, (SCSI_CONFIG / 2)) & RENEGO_ENA) CurrCard->globalFlags |= F_DO_RENEGO; } if (pCurrNvRam) { if (pCurrNvRam->niScsiConf & 0x08) CurrCard->globalFlags |= F_CONLUN_IO; } else { if (FPT_utilEERead(ioport, (SCSI_CONFIG / 2)) & CONNIO_ENA) CurrCard->globalFlags |= F_CONLUN_IO; } temp = pCardInfo->si_per_targ_no_disc; for (i = 0, id = 1; i < MAX_SCSI_TAR; i++, id <<= 1) { if (temp & id) FPT_sccbMgrTbl[thisCard][i].TarStatus |= TAR_ALLOW_DISC; } sync_bit_map = 0x0001; for (id = 0; id < (MAX_SCSI_TAR / 2); id++) { if (pCurrNvRam) { temp = (unsigned short)pCurrNvRam->niSyncTbl[id]; temp = ((temp & 0x03) + ((temp << 4) & 0xc0)) + (((temp << 4) & 0x0300) + ((temp << 8) & 0xc000)); } else temp = FPT_utilEERead(ioport, (unsigned short)((SYNC_RATE_TBL / 2) + id)); for (i = 0; i < 2; temp >>= 8, i++) { if (pCardInfo->si_per_targ_init_sync & sync_bit_map) { FPT_sccbMgrTbl[thisCard][id * 2 + i].TarEEValue = (unsigned char)temp; } else { FPT_sccbMgrTbl[thisCard][id * 2 + i].TarStatus |= SYNC_SUPPORTED; FPT_sccbMgrTbl[thisCard][id * 2 + i].TarEEValue = (unsigned char)(temp & ~EE_SYNC_MASK); } /* if ((pCardInfo->si_per_targ_wide_nego & sync_bit_map) || (id*2+i >= 8)){ */ if (pCardInfo->si_per_targ_wide_nego & sync_bit_map) { FPT_sccbMgrTbl[thisCard][id * 2 + i].TarEEValue |= EE_WIDE_SCSI; } else { /* NARROW SCSI */ FPT_sccbMgrTbl[thisCard][id * 2 + i].TarStatus |= WIDE_NEGOCIATED; } sync_bit_map <<= 1; } } WR_HARPOON((ioport + hp_semaphore), (unsigned char)(RD_HARPOON((ioport + hp_semaphore)) | SCCB_MGR_PRESENT)); return (void *)CurrCard; } static void FlashPoint_ReleaseHostAdapter(void *pCurrCard) { unsigned char i; u32 portBase; u32 regOffset; u32 scamData; u32 *pScamTbl; struct nvram_info *pCurrNvRam; pCurrNvRam = ((struct sccb_card *)pCurrCard)->pNvRamInfo; if (pCurrNvRam) { FPT_WrStack(pCurrNvRam->niBaseAddr, 0, pCurrNvRam->niModel); FPT_WrStack(pCurrNvRam->niBaseAddr, 1, pCurrNvRam->niSysConf); FPT_WrStack(pCurrNvRam->niBaseAddr, 2, pCurrNvRam->niScsiConf); FPT_WrStack(pCurrNvRam->niBaseAddr, 3, pCurrNvRam->niScamConf); FPT_WrStack(pCurrNvRam->niBaseAddr, 4, pCurrNvRam->niAdapId); for (i = 0; i < MAX_SCSI_TAR / 2; i++) FPT_WrStack(pCurrNvRam->niBaseAddr, (unsigned char)(i + 5), pCurrNvRam->niSyncTbl[i]); portBase = pCurrNvRam->niBaseAddr; for (i = 0; i < MAX_SCSI_TAR; i++) { regOffset = hp_aramBase + 64 + i * 4; pScamTbl = (u32 *)&pCurrNvRam->niScamTbl[i]; scamData = *pScamTbl; WR_HARP32(portBase, regOffset, scamData); } } else { FPT_WrStack(((struct sccb_card *)pCurrCard)->ioPort, 0, 0); } } static void FPT_RNVRamData(struct nvram_info *pNvRamInfo) { unsigned char i; u32 portBase; u32 regOffset; u32 scamData; u32 *pScamTbl; pNvRamInfo->niModel = FPT_RdStack(pNvRamInfo->niBaseAddr, 0); pNvRamInfo->niSysConf = FPT_RdStack(pNvRamInfo->niBaseAddr, 1); pNvRamInfo->niScsiConf = FPT_RdStack(pNvRamInfo->niBaseAddr, 2); pNvRamInfo->niScamConf = FPT_RdStack(pNvRamInfo->niBaseAddr, 3); pNvRamInfo->niAdapId = FPT_RdStack(pNvRamInfo->niBaseAddr, 4); for (i = 0; i < MAX_SCSI_TAR / 2; i++) pNvRamInfo->niSyncTbl[i] = FPT_RdStack(pNvRamInfo->niBaseAddr, (unsigned char)(i + 5)); portBase = pNvRamInfo->niBaseAddr; for (i = 0; i < MAX_SCSI_TAR; i++) { regOffset = hp_aramBase + 64 + i * 4; RD_HARP32(portBase, regOffset, scamData); pScamTbl = (u32 *)&pNvRamInfo->niScamTbl[i]; *pScamTbl = scamData; } } static unsigned char FPT_RdStack(u32 portBase, unsigned char index) { WR_HARPOON(portBase + hp_stack_addr, index); return RD_HARPOON(portBase + hp_stack_data); } static void FPT_WrStack(u32 portBase, unsigned char index, unsigned char data) { WR_HARPOON(portBase + hp_stack_addr, index); WR_HARPOON(portBase + hp_stack_data, data); } static unsigned char FPT_ChkIfChipInitialized(u32 ioPort) { if ((RD_HARPOON(ioPort + hp_arb_id) & 0x0f) != FPT_RdStack(ioPort, 4)) return 0; if ((RD_HARPOON(ioPort + hp_clkctrl_0) & CLKCTRL_DEFAULT) != CLKCTRL_DEFAULT) return 0; if ((RD_HARPOON(ioPort + hp_seltimeout) == TO_250ms) || (RD_HARPOON(ioPort + hp_seltimeout) == TO_290ms)) return 1; return 0; } /*--------------------------------------------------------------------- * * Function: FlashPoint_StartCCB * * Description: Start a command pointed to by p_Sccb. When the * command is completed it will be returned via the * callback function. * *---------------------------------------------------------------------*/ static void FlashPoint_StartCCB(void *curr_card, struct sccb *p_Sccb) { u32 ioport; unsigned char thisCard, lun; struct sccb *pSaveSccb; CALL_BK_FN callback; struct sccb_card *pCurrCard = curr_card; thisCard = pCurrCard->cardIndex; ioport = pCurrCard->ioPort; if ((p_Sccb->TargID >= MAX_SCSI_TAR) || (p_Sccb->Lun >= MAX_LUN)) { p_Sccb->HostStatus = SCCB_COMPLETE; p_Sccb->SccbStatus = SCCB_ERROR; callback = (CALL_BK_FN) p_Sccb->SccbCallback; if (callback) callback(p_Sccb); return; } FPT_sinits(p_Sccb, thisCard); if (!pCurrCard->cmdCounter) { WR_HARPOON(ioport + hp_semaphore, (RD_HARPOON(ioport + hp_semaphore) | SCCB_MGR_ACTIVE)); if (pCurrCard->globalFlags & F_GREEN_PC) { WR_HARPOON(ioport + hp_clkctrl_0, CLKCTRL_DEFAULT); WR_HARPOON(ioport + hp_sys_ctrl, 0x00); } } pCurrCard->cmdCounter++; if (RD_HARPOON(ioport + hp_semaphore) & BIOS_IN_USE) { WR_HARPOON(ioport + hp_semaphore, (RD_HARPOON(ioport + hp_semaphore) | TICKLE_ME)); if (p_Sccb->OperationCode == RESET_COMMAND) { pSaveSccb = pCurrCard->currentSCCB; pCurrCard->currentSCCB = p_Sccb; FPT_queueSelectFail(&FPT_BL_Card[thisCard], thisCard); pCurrCard->currentSCCB = pSaveSccb; } else { FPT_queueAddSccb(p_Sccb, thisCard); } } else if ((RD_HARPOON(ioport + hp_page_ctrl) & G_INT_DISABLE)) { if (p_Sccb->OperationCode == RESET_COMMAND) { pSaveSccb = pCurrCard->currentSCCB; pCurrCard->currentSCCB = p_Sccb; FPT_queueSelectFail(&FPT_BL_Card[thisCard], thisCard); pCurrCard->currentSCCB = pSaveSccb; } else { FPT_queueAddSccb(p_Sccb, thisCard); } } else { MDISABLE_INT(ioport); if ((pCurrCard->globalFlags & F_CONLUN_IO) && ((FPT_sccbMgrTbl[thisCard][p_Sccb->TargID]. TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)) lun = p_Sccb->Lun; else lun = 0; if ((pCurrCard->currentSCCB == NULL) && (FPT_sccbMgrTbl[thisCard][p_Sccb->TargID].TarSelQ_Cnt == 0) && (FPT_sccbMgrTbl[thisCard][p_Sccb->TargID].TarLUNBusy[lun] == 0)) { pCurrCard->currentSCCB = p_Sccb; FPT_ssel(p_Sccb->SccbIOPort, thisCard); } else { if (p_Sccb->OperationCode == RESET_COMMAND) { pSaveSccb = pCurrCard->currentSCCB; pCurrCard->currentSCCB = p_Sccb; FPT_queueSelectFail(&FPT_BL_Card[thisCard], thisCard); pCurrCard->currentSCCB = pSaveSccb; } else { FPT_queueAddSccb(p_Sccb, thisCard); } } MENABLE_INT(ioport); } } /*--------------------------------------------------------------------- * * Function: FlashPoint_AbortCCB * * Description: Abort the command pointed to by p_Sccb. When the * command is completed it will be returned via the * callback function. * *---------------------------------------------------------------------*/ static int FlashPoint_AbortCCB(void *pCurrCard, struct sccb *p_Sccb) { u32 ioport; unsigned char thisCard; CALL_BK_FN callback; struct sccb *pSaveSCCB; struct sccb_mgr_tar_info *currTar_Info; ioport = ((struct sccb_card *)pCurrCard)->ioPort; thisCard = ((struct sccb_card *)pCurrCard)->cardIndex; if (!(RD_HARPOON(ioport + hp_page_ctrl) & G_INT_DISABLE)) { if (FPT_queueFindSccb(p_Sccb, thisCard)) { ((struct sccb_card *)pCurrCard)->cmdCounter--; if (!((struct sccb_card *)pCurrCard)->cmdCounter) WR_HARPOON(ioport + hp_semaphore, (RD_HARPOON(ioport + hp_semaphore) & (unsigned char)(~(SCCB_MGR_ACTIVE | TICKLE_ME)))); p_Sccb->SccbStatus = SCCB_ABORT; callback = p_Sccb->SccbCallback; callback(p_Sccb); return 0; } else { if (((struct sccb_card *)pCurrCard)->currentSCCB == p_Sccb) { p_Sccb->SccbStatus = SCCB_ABORT; return 0; } else { if (p_Sccb->Sccb_tag) { MDISABLE_INT(ioport); if (((struct sccb_card *)pCurrCard)-> discQ_Tbl[p_Sccb->Sccb_tag] == p_Sccb) { p_Sccb->SccbStatus = SCCB_ABORT; p_Sccb->Sccb_scsistat = ABORT_ST; p_Sccb->Sccb_scsimsg = ABORT_TASK; if (((struct sccb_card *) pCurrCard)->currentSCCB == NULL) { ((struct sccb_card *) pCurrCard)-> currentSCCB = p_Sccb; FPT_ssel(ioport, thisCard); } else { pSaveSCCB = ((struct sccb_card *)pCurrCard)-> currentSCCB; ((struct sccb_card *) pCurrCard)-> currentSCCB = p_Sccb; FPT_queueSelectFail((struct sccb_card *)pCurrCard, thisCard); ((struct sccb_card *) pCurrCard)-> currentSCCB = pSaveSCCB; } } MENABLE_INT(ioport); return 0; } else { currTar_Info = &FPT_sccbMgrTbl[thisCard][p_Sccb-> TargID]; if (FPT_BL_Card[thisCard]. discQ_Tbl[currTar_Info-> LunDiscQ_Idx[p_Sccb->Lun]] == p_Sccb) { p_Sccb->SccbStatus = SCCB_ABORT; return 0; } } } } } return -1; } /*--------------------------------------------------------------------- * * Function: FlashPoint_InterruptPending * * Description: Do a quick check to determine if there is a pending * interrupt for this card and disable the IRQ Pin if so. * *---------------------------------------------------------------------*/ static unsigned char FlashPoint_InterruptPending(void *pCurrCard) { u32 ioport; ioport = ((struct sccb_card *)pCurrCard)->ioPort; if (RD_HARPOON(ioport + hp_int_status) & INT_ASSERTED) { return 1; } else return 0; } /*--------------------------------------------------------------------- * * Function: FlashPoint_HandleInterrupt * * Description: This is our entry point when an interrupt is generated * by the card and the upper level driver passes it on to * us. * *---------------------------------------------------------------------*/ static int FlashPoint_HandleInterrupt(void *pcard) { struct sccb *currSCCB; unsigned char thisCard, result, bm_status; unsigned short hp_int; unsigned char i, target; struct sccb_card *pCurrCard = pcard; u32 ioport; thisCard = pCurrCard->cardIndex; ioport = pCurrCard->ioPort; MDISABLE_INT(ioport); if (RD_HARPOON(ioport + hp_int_status) & EXT_STATUS_ON) bm_status = RD_HARPOON(ioport + hp_ext_status) & (unsigned char)BAD_EXT_STATUS; else bm_status = 0; WR_HARPOON(ioport + hp_int_mask, (INT_CMD_COMPL | SCSI_INTERRUPT)); while ((hp_int = RDW_HARPOON((ioport + hp_intstat)) & FPT_default_intena) | bm_status) { currSCCB = pCurrCard->currentSCCB; if (hp_int & (FIFO | TIMEOUT | RESET | SCAM_SEL) || bm_status) { result = FPT_SccbMgr_bad_isr(ioport, thisCard, pCurrCard, hp_int); WRW_HARPOON((ioport + hp_intstat), (FIFO | TIMEOUT | RESET | SCAM_SEL)); bm_status = 0; if (result) { MENABLE_INT(ioport); return result; } } else if (hp_int & ICMD_COMP) { if (!(hp_int & BUS_FREE)) { /* Wait for the BusFree before starting a new command. We must also check for being reselected since the BusFree may not show up if another device reselects us in 1.5us or less. SRR Wednesday, 3/8/1995. */ while (! (RDW_HARPOON((ioport + hp_intstat)) & (BUS_FREE | RSEL))) ; } if (pCurrCard->globalFlags & F_HOST_XFER_ACT) FPT_phaseChkFifo(ioport, thisCard); /* WRW_HARPOON((ioport+hp_intstat), (BUS_FREE | ICMD_COMP | ITAR_DISC | XFER_CNT_0)); */ WRW_HARPOON((ioport + hp_intstat), CLR_ALL_INT_1); FPT_autoCmdCmplt(ioport, thisCard); } else if (hp_int & ITAR_DISC) { if (pCurrCard->globalFlags & F_HOST_XFER_ACT) FPT_phaseChkFifo(ioport, thisCard); if (RD_HARPOON(ioport + hp_gp_reg_1) == SAVE_POINTERS) { WR_HARPOON(ioport + hp_gp_reg_1, 0x00); currSCCB->Sccb_XferState |= F_NO_DATA_YET; currSCCB->Sccb_savedATC = currSCCB->Sccb_ATC; } currSCCB->Sccb_scsistat = DISCONNECT_ST; FPT_queueDisconnect(currSCCB, thisCard); /* Wait for the BusFree before starting a new command. We must also check for being reselected since the BusFree may not show up if another device reselects us in 1.5us or less. SRR Wednesday, 3/8/1995. */ while (! (RDW_HARPOON((ioport + hp_intstat)) & (BUS_FREE | RSEL)) && !((RDW_HARPOON((ioport + hp_intstat)) & PHASE) && RD_HARPOON((ioport + hp_scsisig)) == (SCSI_BSY | SCSI_REQ | SCSI_CD | SCSI_MSG | SCSI_IOBIT))) ; /* The additional loop exit condition above detects a timing problem with the revision D/E harpoon chips. The caller should reset the host adapter to recover when 0xFE is returned. */ if (! (RDW_HARPOON((ioport + hp_intstat)) & (BUS_FREE | RSEL))) { MENABLE_INT(ioport); return 0xFE; } WRW_HARPOON((ioport + hp_intstat), (BUS_FREE | ITAR_DISC)); pCurrCard->globalFlags |= F_NEW_SCCB_CMD; } else if (hp_int & RSEL) { WRW_HARPOON((ioport + hp_intstat), (PROG_HLT | RSEL | PHASE | BUS_FREE)); if (RDW_HARPOON((ioport + hp_intstat)) & ITAR_DISC) { if (pCurrCard->globalFlags & F_HOST_XFER_ACT) FPT_phaseChkFifo(ioport, thisCard); if (RD_HARPOON(ioport + hp_gp_reg_1) == SAVE_POINTERS) { WR_HARPOON(ioport + hp_gp_reg_1, 0x00); currSCCB->Sccb_XferState |= F_NO_DATA_YET; currSCCB->Sccb_savedATC = currSCCB->Sccb_ATC; } WRW_HARPOON((ioport + hp_intstat), (BUS_FREE | ITAR_DISC)); currSCCB->Sccb_scsistat = DISCONNECT_ST; FPT_queueDisconnect(currSCCB, thisCard); } FPT_sres(ioport, thisCard, pCurrCard); FPT_phaseDecode(ioport, thisCard); } else if ((hp_int & IDO_STRT) && (!(hp_int & BUS_FREE))) { WRW_HARPOON((ioport + hp_intstat), (IDO_STRT | XFER_CNT_0)); FPT_phaseDecode(ioport, thisCard); } else if ((hp_int & IUNKWN) || (hp_int & PROG_HLT)) { WRW_HARPOON((ioport + hp_intstat), (PHASE | IUNKWN | PROG_HLT)); if ((RD_HARPOON(ioport + hp_prgmcnt_0) & (unsigned char) 0x3f) < (unsigned char)SELCHK) { FPT_phaseDecode(ioport, thisCard); } else { /* Harpoon problem some SCSI target device respond to selection with short BUSY pulse (<400ns) this will make the Harpoon is not able to latch the correct Target ID into reg. x53. The work around require to correct this reg. But when write to this reg. (0x53) also increment the FIFO write addr reg (0x6f), thus we need to read this reg first then restore it later. After update to 0x53 */ i = (unsigned char)(RD_HARPOON(ioport + hp_fifowrite)); target = (unsigned char)(RD_HARPOON(ioport + hp_gp_reg_3)); WR_HARPOON(ioport + hp_xfer_pad, (unsigned char)ID_UNLOCK); WR_HARPOON(ioport + hp_select_id, (unsigned char)(target | target << 4)); WR_HARPOON(ioport + hp_xfer_pad, (unsigned char)0x00); WR_HARPOON(ioport + hp_fifowrite, i); WR_HARPOON(ioport + hp_autostart_3, (AUTO_IMMED + TAG_STRT)); } } else if (hp_int & XFER_CNT_0) { WRW_HARPOON((ioport + hp_intstat), XFER_CNT_0); FPT_schkdd(ioport, thisCard); } else if (hp_int & BUS_FREE) { WRW_HARPOON((ioport + hp_intstat), BUS_FREE); if (pCurrCard->globalFlags & F_HOST_XFER_ACT) { FPT_hostDataXferAbort(ioport, thisCard, currSCCB); } FPT_phaseBusFree(ioport, thisCard); } else if (hp_int & ITICKLE) { WRW_HARPOON((ioport + hp_intstat), ITICKLE); pCurrCard->globalFlags |= F_NEW_SCCB_CMD; } if (((struct sccb_card *)pCurrCard)-> globalFlags & F_NEW_SCCB_CMD) { pCurrCard->globalFlags &= ~F_NEW_SCCB_CMD; if (pCurrCard->currentSCCB == NULL) FPT_queueSearchSelect(pCurrCard, thisCard); if (pCurrCard->currentSCCB != NULL) { pCurrCard->globalFlags &= ~F_NEW_SCCB_CMD; FPT_ssel(ioport, thisCard); } break; } } /*end while */ MENABLE_INT(ioport); return 0; } /*--------------------------------------------------------------------- * * Function: Sccb_bad_isr * * Description: Some type of interrupt has occurred which is slightly * out of the ordinary. We will now decode it fully, in * this routine. This is broken up in an attempt to save * processing time. * *---------------------------------------------------------------------*/ static unsigned char FPT_SccbMgr_bad_isr(u32 p_port, unsigned char p_card, struct sccb_card *pCurrCard, unsigned short p_int) { unsigned char temp, ScamFlg; struct sccb_mgr_tar_info *currTar_Info; struct nvram_info *pCurrNvRam; if (RD_HARPOON(p_port + hp_ext_status) & (BM_FORCE_OFF | PCI_DEV_TMOUT | BM_PARITY_ERR | PIO_OVERRUN)) { if (pCurrCard->globalFlags & F_HOST_XFER_ACT) { FPT_hostDataXferAbort(p_port, p_card, pCurrCard->currentSCCB); } if (RD_HARPOON(p_port + hp_pci_stat_cfg) & REC_MASTER_ABORT) { WR_HARPOON(p_port + hp_pci_stat_cfg, (RD_HARPOON(p_port + hp_pci_stat_cfg) & ~REC_MASTER_ABORT)); WR_HARPOON(p_port + hp_host_blk_cnt, 0x00); } if (pCurrCard->currentSCCB != NULL) { if (!pCurrCard->currentSCCB->HostStatus) pCurrCard->currentSCCB->HostStatus = SCCB_BM_ERR; FPT_sxfrp(p_port, p_card); temp = (unsigned char)(RD_HARPOON(p_port + hp_ee_ctrl) & (EXT_ARB_ACK | SCSI_TERM_ENA_H)); WR_HARPOON(p_port + hp_ee_ctrl, ((unsigned char)temp | SEE_MS | SEE_CS)); WR_HARPOON(p_port + hp_ee_ctrl, temp); if (! (RDW_HARPOON((p_port + hp_intstat)) & (BUS_FREE | RESET))) { FPT_phaseDecode(p_port, p_card); } } } else if (p_int & RESET) { WR_HARPOON(p_port + hp_clkctrl_0, CLKCTRL_DEFAULT); WR_HARPOON(p_port + hp_sys_ctrl, 0x00); if (pCurrCard->currentSCCB != NULL) { if (pCurrCard->globalFlags & F_HOST_XFER_ACT) FPT_hostDataXferAbort(p_port, p_card, pCurrCard->currentSCCB); } DISABLE_AUTO(p_port); FPT_sresb(p_port, p_card); while (RD_HARPOON(p_port + hp_scsictrl_0) & SCSI_RST) { } pCurrNvRam = pCurrCard->pNvRamInfo; if (pCurrNvRam) { ScamFlg = pCurrNvRam->niScamConf; } else { ScamFlg = (unsigned char)FPT_utilEERead(p_port, SCAM_CONFIG / 2); } FPT_XbowInit(p_port, ScamFlg); FPT_scini(p_card, pCurrCard->ourId, 0); return 0xFF; } else if (p_int & FIFO) { WRW_HARPOON((p_port + hp_intstat), FIFO); if (pCurrCard->currentSCCB != NULL) FPT_sxfrp(p_port, p_card); } else if (p_int & TIMEOUT) { DISABLE_AUTO(p_port); WRW_HARPOON((p_port + hp_intstat), (PROG_HLT | TIMEOUT | SEL | BUS_FREE | PHASE | IUNKWN)); pCurrCard->currentSCCB->HostStatus = SCCB_SELECTION_TIMEOUT; currTar_Info = &FPT_sccbMgrTbl[p_card][pCurrCard->currentSCCB->TargID]; if ((pCurrCard->globalFlags & F_CONLUN_IO) && ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)) currTar_Info->TarLUNBusy[pCurrCard->currentSCCB->Lun] = 0; else currTar_Info->TarLUNBusy[0] = 0; if (currTar_Info->TarEEValue & EE_SYNC_MASK) { currTar_Info->TarSyncCtrl = 0; currTar_Info->TarStatus &= ~TAR_SYNC_MASK; } if (currTar_Info->TarEEValue & EE_WIDE_SCSI) { currTar_Info->TarStatus &= ~TAR_WIDE_MASK; } FPT_sssyncv(p_port, pCurrCard->currentSCCB->TargID, NARROW_SCSI, currTar_Info); FPT_queueCmdComplete(pCurrCard, pCurrCard->currentSCCB, p_card); } else if (p_int & SCAM_SEL) { FPT_scarb(p_port, LEVEL2_TAR); FPT_scsel(p_port); FPT_scasid(p_card, p_port); FPT_scbusf(p_port); WRW_HARPOON((p_port + hp_intstat), SCAM_SEL); } return 0x00; } /*--------------------------------------------------------------------- * * Function: SccbMgrTableInit * * Description: Initialize all Sccb manager data structures. * *---------------------------------------------------------------------*/ static void FPT_SccbMgrTableInitAll(void) { unsigned char thisCard; for (thisCard = 0; thisCard < MAX_CARDS; thisCard++) { FPT_SccbMgrTableInitCard(&FPT_BL_Card[thisCard], thisCard); FPT_BL_Card[thisCard].ioPort = 0x00; FPT_BL_Card[thisCard].cardInfo = NULL; FPT_BL_Card[thisCard].cardIndex = 0xFF; FPT_BL_Card[thisCard].ourId = 0x00; FPT_BL_Card[thisCard].pNvRamInfo = NULL; } } /*--------------------------------------------------------------------- * * Function: SccbMgrTableInit * * Description: Initialize all Sccb manager data structures. * *---------------------------------------------------------------------*/ static void FPT_SccbMgrTableInitCard(struct sccb_card *pCurrCard, unsigned char p_card) { unsigned char scsiID, qtag; for (qtag = 0; qtag < QUEUE_DEPTH; qtag++) { FPT_BL_Card[p_card].discQ_Tbl[qtag] = NULL; } for (scsiID = 0; scsiID < MAX_SCSI_TAR; scsiID++) { FPT_sccbMgrTbl[p_card][scsiID].TarStatus = 0; FPT_sccbMgrTbl[p_card][scsiID].TarEEValue = 0; FPT_SccbMgrTableInitTarget(p_card, scsiID); } pCurrCard->scanIndex = 0x00; pCurrCard->currentSCCB = NULL; pCurrCard->globalFlags = 0x00; pCurrCard->cmdCounter = 0x00; pCurrCard->tagQ_Lst = 0x01; pCurrCard->discQCount = 0; } /*--------------------------------------------------------------------- * * Function: SccbMgrTableInit * * Description: Initialize all Sccb manager data structures. * *---------------------------------------------------------------------*/ static void FPT_SccbMgrTableInitTarget(unsigned char p_card, unsigned char target) { unsigned char lun, qtag; struct sccb_mgr_tar_info *currTar_Info; currTar_Info = &FPT_sccbMgrTbl[p_card][target]; currTar_Info->TarSelQ_Cnt = 0; currTar_Info->TarSyncCtrl = 0; currTar_Info->TarSelQ_Head = NULL; currTar_Info->TarSelQ_Tail = NULL; currTar_Info->TarTagQ_Cnt = 0; currTar_Info->TarLUN_CA = 0; for (lun = 0; lun < MAX_LUN; lun++) { currTar_Info->TarLUNBusy[lun] = 0; currTar_Info->LunDiscQ_Idx[lun] = 0; } for (qtag = 0; qtag < QUEUE_DEPTH; qtag++) { if (FPT_BL_Card[p_card].discQ_Tbl[qtag] != NULL) { if (FPT_BL_Card[p_card].discQ_Tbl[qtag]->TargID == target) { FPT_BL_Card[p_card].discQ_Tbl[qtag] = NULL; FPT_BL_Card[p_card].discQCount--; } } } } /*--------------------------------------------------------------------- * * Function: sfetm * * Description: Read in a message byte from the SCSI bus, and check * for a parity error. * *---------------------------------------------------------------------*/ static unsigned char FPT_sfm(u32 port, struct sccb *pCurrSCCB) { unsigned char message; unsigned short TimeOutLoop; TimeOutLoop = 0; while ((!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) && (TimeOutLoop++ < 20000)) { } WR_HARPOON(port + hp_portctrl_0, SCSI_PORT); message = RD_HARPOON(port + hp_scsidata_0); WR_HARPOON(port + hp_scsisig, SCSI_ACK + S_MSGI_PH); if (TimeOutLoop > 20000) message = 0x00; /* force message byte = 0 if Time Out on Req */ if ((RDW_HARPOON((port + hp_intstat)) & PARITY) && (RD_HARPOON(port + hp_addstat) & SCSI_PAR_ERR)) { WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH)); WR_HARPOON(port + hp_xferstat, 0); WR_HARPOON(port + hp_fiforead, 0); WR_HARPOON(port + hp_fifowrite, 0); if (pCurrSCCB != NULL) { pCurrSCCB->Sccb_scsimsg = MSG_PARITY_ERROR; } message = 0x00; do { ACCEPT_MSG_ATN(port); TimeOutLoop = 0; while ((!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) && (TimeOutLoop++ < 20000)) { } if (TimeOutLoop > 20000) { WRW_HARPOON((port + hp_intstat), PARITY); return message; } if ((RD_HARPOON(port + hp_scsisig) & S_SCSI_PHZ) != S_MSGI_PH) { WRW_HARPOON((port + hp_intstat), PARITY); return message; } WR_HARPOON(port + hp_portctrl_0, SCSI_PORT); RD_HARPOON(port + hp_scsidata_0); WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH)); } while (1); } WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH)); WR_HARPOON(port + hp_xferstat, 0); WR_HARPOON(port + hp_fiforead, 0); WR_HARPOON(port + hp_fifowrite, 0); return message; } /*--------------------------------------------------------------------- * * Function: FPT_ssel * * Description: Load up automation and select target device. * *---------------------------------------------------------------------*/ static void FPT_ssel(u32 port, unsigned char p_card) { unsigned char auto_loaded, i, target, *theCCB; u32 cdb_reg; struct sccb_card *CurrCard; struct sccb *currSCCB; struct sccb_mgr_tar_info *currTar_Info; unsigned char lastTag, lun; CurrCard = &FPT_BL_Card[p_card]; currSCCB = CurrCard->currentSCCB; target = currSCCB->TargID; currTar_Info = &FPT_sccbMgrTbl[p_card][target]; lastTag = CurrCard->tagQ_Lst; ARAM_ACCESS(port); if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_REJECT) currSCCB->ControlByte &= ~F_USE_CMD_Q; if (((CurrCard->globalFlags & F_CONLUN_IO) && ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) lun = currSCCB->Lun; else lun = 0; if (CurrCard->globalFlags & F_TAG_STARTED) { if (!(currSCCB->ControlByte & F_USE_CMD_Q)) { if ((currTar_Info->TarLUN_CA == 0) && ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_TRYING)) { if (currTar_Info->TarTagQ_Cnt != 0) { currTar_Info->TarLUNBusy[lun] = 1; FPT_queueSelectFail(CurrCard, p_card); SGRAM_ACCESS(port); return; } else { currTar_Info->TarLUNBusy[lun] = 1; } } /*End non-tagged */ else { currTar_Info->TarLUNBusy[lun] = 1; } } /*!Use cmd Q Tagged */ else { if (currTar_Info->TarLUN_CA == 1) { FPT_queueSelectFail(CurrCard, p_card); SGRAM_ACCESS(port); return; } currTar_Info->TarLUNBusy[lun] = 1; } /*else use cmd Q tagged */ } /*if glob tagged started */ else { currTar_Info->TarLUNBusy[lun] = 1; } if ((((CurrCard->globalFlags & F_CONLUN_IO) && ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)) || (!(currSCCB->ControlByte & F_USE_CMD_Q)))) { if (CurrCard->discQCount >= QUEUE_DEPTH) { currTar_Info->TarLUNBusy[lun] = 1; FPT_queueSelectFail(CurrCard, p_card); SGRAM_ACCESS(port); return; } for (i = 1; i < QUEUE_DEPTH; i++) { if (++lastTag >= QUEUE_DEPTH) lastTag = 1; if (CurrCard->discQ_Tbl[lastTag] == NULL) { CurrCard->tagQ_Lst = lastTag; currTar_Info->LunDiscQ_Idx[lun] = lastTag; CurrCard->discQ_Tbl[lastTag] = currSCCB; CurrCard->discQCount++; break; } } if (i == QUEUE_DEPTH) { currTar_Info->TarLUNBusy[lun] = 1; FPT_queueSelectFail(CurrCard, p_card); SGRAM_ACCESS(port); return; } } auto_loaded = 0; WR_HARPOON(port + hp_select_id, target); WR_HARPOON(port + hp_gp_reg_3, target); /* Use by new automation logic */ if (currSCCB->OperationCode == RESET_COMMAND) { WRW_HARPOON((port + ID_MSG_STRT), (MPM_OP + AMSG_OUT + (currSCCB-> Sccb_idmsg & ~DISC_PRIV))); WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + NP); currSCCB->Sccb_scsimsg = TARGET_RESET; WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT)); auto_loaded = 1; currSCCB->Sccb_scsistat = SELECT_BDR_ST; if (currTar_Info->TarEEValue & EE_SYNC_MASK) { currTar_Info->TarSyncCtrl = 0; currTar_Info->TarStatus &= ~TAR_SYNC_MASK; } if (currTar_Info->TarEEValue & EE_WIDE_SCSI) { currTar_Info->TarStatus &= ~TAR_WIDE_MASK; } FPT_sssyncv(port, target, NARROW_SCSI, currTar_Info); FPT_SccbMgrTableInitTarget(p_card, target); } else if (currSCCB->Sccb_scsistat == ABORT_ST) { WRW_HARPOON((port + ID_MSG_STRT), (MPM_OP + AMSG_OUT + (currSCCB-> Sccb_idmsg & ~DISC_PRIV))); WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ); WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT + (((unsigned char)(currSCCB-> ControlByte & TAG_TYPE_MASK) >> 6) | (unsigned char) 0x20))); WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + currSCCB->Sccb_tag)); WRW_HARPOON((port + SYNC_MSGS + 4), (BRH_OP + ALWAYS + NP)); WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT)); auto_loaded = 1; } else if (!(currTar_Info->TarStatus & WIDE_NEGOCIATED)) { auto_loaded = FPT_siwidn(port, p_card); currSCCB->Sccb_scsistat = SELECT_WN_ST; } else if (!((currTar_Info->TarStatus & TAR_SYNC_MASK) == SYNC_SUPPORTED)) { auto_loaded = FPT_sisyncn(port, p_card, 0); currSCCB->Sccb_scsistat = SELECT_SN_ST; } if (!auto_loaded) { if (currSCCB->ControlByte & F_USE_CMD_Q) { CurrCard->globalFlags |= F_TAG_STARTED; if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_REJECT) { currSCCB->ControlByte &= ~F_USE_CMD_Q; /* Fix up the start instruction with a jump to Non-Tag-CMD handling */ WRW_HARPOON((port + ID_MSG_STRT), BRH_OP + ALWAYS + NTCMD); WRW_HARPOON((port + NON_TAG_ID_MSG), (MPM_OP + AMSG_OUT + currSCCB->Sccb_idmsg)); WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT)); /* Setup our STATE so we know what happened when the wheels fall off. */ currSCCB->Sccb_scsistat = SELECT_ST; currTar_Info->TarLUNBusy[lun] = 1; } else { WRW_HARPOON((port + ID_MSG_STRT), (MPM_OP + AMSG_OUT + currSCCB->Sccb_idmsg)); WRW_HARPOON((port + ID_MSG_STRT + 2), (MPM_OP + AMSG_OUT + (((unsigned char)(currSCCB-> ControlByte & TAG_TYPE_MASK) >> 6) | (unsigned char)0x20))); for (i = 1; i < QUEUE_DEPTH; i++) { if (++lastTag >= QUEUE_DEPTH) lastTag = 1; if (CurrCard->discQ_Tbl[lastTag] == NULL) { WRW_HARPOON((port + ID_MSG_STRT + 6), (MPM_OP + AMSG_OUT + lastTag)); CurrCard->tagQ_Lst = lastTag; currSCCB->Sccb_tag = lastTag; CurrCard->discQ_Tbl[lastTag] = currSCCB; CurrCard->discQCount++; break; } } if (i == QUEUE_DEPTH) { currTar_Info->TarLUNBusy[lun] = 1; FPT_queueSelectFail(CurrCard, p_card); SGRAM_ACCESS(port); return; } currSCCB->Sccb_scsistat = SELECT_Q_ST; WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT)); } } else { WRW_HARPOON((port + ID_MSG_STRT), BRH_OP + ALWAYS + NTCMD); WRW_HARPOON((port + NON_TAG_ID_MSG), (MPM_OP + AMSG_OUT + currSCCB->Sccb_idmsg)); currSCCB->Sccb_scsistat = SELECT_ST; WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT)); } theCCB = (unsigned char *)&currSCCB->Cdb[0]; cdb_reg = port + CMD_STRT; for (i = 0; i < currSCCB->CdbLength; i++) { WRW_HARPOON(cdb_reg, (MPM_OP + ACOMMAND + *theCCB)); cdb_reg += 2; theCCB++; } if (currSCCB->CdbLength != TWELVE_BYTE_CMD) WRW_HARPOON(cdb_reg, (BRH_OP + ALWAYS + NP)); } /* auto_loaded */ WRW_HARPOON((port + hp_fiforead), (unsigned short)0x00); WR_HARPOON(port + hp_xferstat, 0x00); WRW_HARPOON((port + hp_intstat), (PROG_HLT | TIMEOUT | SEL | BUS_FREE)); WR_HARPOON(port + hp_portctrl_0, (SCSI_PORT)); if (!(currSCCB->Sccb_MGRFlags & F_DEV_SELECTED)) { WR_HARPOON(port + hp_scsictrl_0, (SEL_TAR | ENA_ATN | ENA_RESEL | ENA_SCAM_SEL)); } else { /* auto_loaded = (RD_HARPOON(port+hp_autostart_3) & (unsigned char)0x1F); auto_loaded |= AUTO_IMMED; */ auto_loaded = AUTO_IMMED; DISABLE_AUTO(port); WR_HARPOON(port + hp_autostart_3, auto_loaded); } SGRAM_ACCESS(port); } /*--------------------------------------------------------------------- * * Function: FPT_sres * * Description: Hookup the correct CCB and handle the incoming messages. * *---------------------------------------------------------------------*/ static void FPT_sres(u32 port, unsigned char p_card, struct sccb_card *pCurrCard) { unsigned char our_target, message, lun = 0, tag, msgRetryCount; struct sccb_mgr_tar_info *currTar_Info; struct sccb *currSCCB; if (pCurrCard->currentSCCB != NULL) { currTar_Info = &FPT_sccbMgrTbl[p_card][pCurrCard->currentSCCB->TargID]; DISABLE_AUTO(port); WR_HARPOON((port + hp_scsictrl_0), (ENA_RESEL | ENA_SCAM_SEL)); currSCCB = pCurrCard->currentSCCB; if (currSCCB->Sccb_scsistat == SELECT_WN_ST) { currTar_Info->TarStatus &= ~TAR_WIDE_MASK; currSCCB->Sccb_scsistat = BUS_FREE_ST; } if (currSCCB->Sccb_scsistat == SELECT_SN_ST) { currTar_Info->TarStatus &= ~TAR_SYNC_MASK; currSCCB->Sccb_scsistat = BUS_FREE_ST; } if (((pCurrCard->globalFlags & F_CONLUN_IO) && ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) { currTar_Info->TarLUNBusy[currSCCB->Lun] = 0; if (currSCCB->Sccb_scsistat != ABORT_ST) { pCurrCard->discQCount--; pCurrCard->discQ_Tbl[currTar_Info-> LunDiscQ_Idx[currSCCB-> Lun]] = NULL; } } else { currTar_Info->TarLUNBusy[0] = 0; if (currSCCB->Sccb_tag) { if (currSCCB->Sccb_scsistat != ABORT_ST) { pCurrCard->discQCount--; pCurrCard->discQ_Tbl[currSCCB-> Sccb_tag] = NULL; } } else { if (currSCCB->Sccb_scsistat != ABORT_ST) { pCurrCard->discQCount--; pCurrCard->discQ_Tbl[currTar_Info-> LunDiscQ_Idx[0]] = NULL; } } } FPT_queueSelectFail(&FPT_BL_Card[p_card], p_card); } WRW_HARPOON((port + hp_fiforead), (unsigned short)0x00); our_target = (unsigned char)(RD_HARPOON(port + hp_select_id) >> 4); currTar_Info = &FPT_sccbMgrTbl[p_card][our_target]; msgRetryCount = 0; do { currTar_Info = &FPT_sccbMgrTbl[p_card][our_target]; tag = 0; while (!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) { if (!(RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) { WRW_HARPOON((port + hp_intstat), PHASE); return; } } WRW_HARPOON((port + hp_intstat), PHASE); if ((RD_HARPOON(port + hp_scsisig) & S_SCSI_PHZ) == S_MSGI_PH) { message = FPT_sfm(port, pCurrCard->currentSCCB); if (message) { if (message <= (0x80 | LUN_MASK)) { lun = message & (unsigned char)LUN_MASK; if ((currTar_Info-> TarStatus & TAR_TAG_Q_MASK) == TAG_Q_TRYING) { if (currTar_Info->TarTagQ_Cnt != 0) { if (! (currTar_Info-> TarLUN_CA)) { ACCEPT_MSG(port); /*Release the ACK for ID msg. */ message = FPT_sfm (port, pCurrCard-> currentSCCB); if (message) { ACCEPT_MSG (port); } else message = 0; if (message != 0) { tag = FPT_sfm (port, pCurrCard-> currentSCCB); if (! (tag)) message = 0; } } /*C.A. exists! */ } /*End Q cnt != 0 */ } /*End Tag cmds supported! */ } /*End valid ID message. */ else { ACCEPT_MSG_ATN(port); } } /* End good id message. */ else { message = 0; } } else { ACCEPT_MSG_ATN(port); while (! (RDW_HARPOON((port + hp_intstat)) & (PHASE | RESET)) && !(RD_HARPOON(port + hp_scsisig) & SCSI_REQ) && (RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) ; return; } if (message == 0) { msgRetryCount++; if (msgRetryCount == 1) { FPT_SendMsg(port, MSG_PARITY_ERROR); } else { FPT_SendMsg(port, TARGET_RESET); FPT_sssyncv(port, our_target, NARROW_SCSI, currTar_Info); if (FPT_sccbMgrTbl[p_card][our_target]. TarEEValue & EE_SYNC_MASK) { FPT_sccbMgrTbl[p_card][our_target]. TarStatus &= ~TAR_SYNC_MASK; } if (FPT_sccbMgrTbl[p_card][our_target]. TarEEValue & EE_WIDE_SCSI) { FPT_sccbMgrTbl[p_card][our_target]. TarStatus &= ~TAR_WIDE_MASK; } FPT_queueFlushTargSccb(p_card, our_target, SCCB_COMPLETE); FPT_SccbMgrTableInitTarget(p_card, our_target); return; } } } while (message == 0); if (((pCurrCard->globalFlags & F_CONLUN_IO) && ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) { currTar_Info->TarLUNBusy[lun] = 1; pCurrCard->currentSCCB = pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[lun]]; if (pCurrCard->currentSCCB != NULL) { ACCEPT_MSG(port); } else { ACCEPT_MSG_ATN(port); } } else { currTar_Info->TarLUNBusy[0] = 1; if (tag) { if (pCurrCard->discQ_Tbl[tag] != NULL) { pCurrCard->currentSCCB = pCurrCard->discQ_Tbl[tag]; currTar_Info->TarTagQ_Cnt--; ACCEPT_MSG(port); } else { ACCEPT_MSG_ATN(port); } } else { pCurrCard->currentSCCB = pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[0]]; if (pCurrCard->currentSCCB != NULL) { ACCEPT_MSG(port); } else { ACCEPT_MSG_ATN(port); } } } if (pCurrCard->currentSCCB != NULL) { if (pCurrCard->currentSCCB->Sccb_scsistat == ABORT_ST) { /* During Abort Tag command, the target could have got re-selected and completed the command. Check the select Q and remove the CCB if it is in the Select Q */ FPT_queueFindSccb(pCurrCard->currentSCCB, p_card); } } while (!(RDW_HARPOON((port + hp_intstat)) & (PHASE | RESET)) && !(RD_HARPOON(port + hp_scsisig) & SCSI_REQ) && (RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) ; } static void FPT_SendMsg(u32 port, unsigned char message) { while (!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) { if (!(RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) { WRW_HARPOON((port + hp_intstat), PHASE); return; } } WRW_HARPOON((port + hp_intstat), PHASE); if ((RD_HARPOON(port + hp_scsisig) & S_SCSI_PHZ) == S_MSGO_PH) { WRW_HARPOON((port + hp_intstat), (BUS_FREE | PHASE | XFER_CNT_0)); WR_HARPOON(port + hp_portctrl_0, SCSI_BUS_EN); WR_HARPOON(port + hp_scsidata_0, message); WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH)); ACCEPT_MSG(port); WR_HARPOON(port + hp_portctrl_0, 0x00); if ((message == ABORT_TASK_SET) || (message == TARGET_RESET) || (message == ABORT_TASK)) { while (! (RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | PHASE))) { } if (RDW_HARPOON((port + hp_intstat)) & BUS_FREE) { WRW_HARPOON((port + hp_intstat), BUS_FREE); } } } } /*--------------------------------------------------------------------- * * Function: FPT_sdecm * * Description: Determine the proper response to the message from the * target device. * *---------------------------------------------------------------------*/ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card) { struct sccb *currSCCB; struct sccb_card *CurrCard; struct sccb_mgr_tar_info *currTar_Info; CurrCard = &FPT_BL_Card[p_card]; currSCCB = CurrCard->currentSCCB; currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID]; if (message == RESTORE_POINTERS) { if (!(currSCCB->Sccb_XferState & F_NO_DATA_YET)) { currSCCB->Sccb_ATC = currSCCB->Sccb_savedATC; FPT_hostDataXferRestart(currSCCB); } ACCEPT_MSG(port); WR_HARPOON(port + hp_autostart_1, (AUTO_IMMED + DISCONNECT_START)); } else if (message == COMMAND_COMPLETE) { if (currSCCB->Sccb_scsistat == SELECT_Q_ST) { currTar_Info->TarStatus &= ~(unsigned char)TAR_TAG_Q_MASK; currTar_Info->TarStatus |= (unsigned char)TAG_Q_REJECT; } ACCEPT_MSG(port); } else if ((message == NOP) || (message >= IDENTIFY_BASE) || (message == INITIATE_RECOVERY) || (message == RELEASE_RECOVERY)) { ACCEPT_MSG(port); WR_HARPOON(port + hp_autostart_1, (AUTO_IMMED + DISCONNECT_START)); } else if (message == MESSAGE_REJECT) { if ((currSCCB->Sccb_scsistat == SELECT_SN_ST) || (currSCCB->Sccb_scsistat == SELECT_WN_ST) || ((currTar_Info->TarStatus & TAR_SYNC_MASK) == SYNC_TRYING) || ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_TRYING)) { WRW_HARPOON((port + hp_intstat), BUS_FREE); ACCEPT_MSG(port); while ((!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) && (!(RDW_HARPOON((port + hp_intstat)) & BUS_FREE))) { } if (currSCCB->Lun == 0x00) { if (currSCCB->Sccb_scsistat == SELECT_SN_ST) { currTar_Info->TarStatus |= (unsigned char)SYNC_SUPPORTED; currTar_Info->TarEEValue &= ~EE_SYNC_MASK; } else if (currSCCB->Sccb_scsistat == SELECT_WN_ST) { currTar_Info->TarStatus = (currTar_Info-> TarStatus & ~WIDE_ENABLED) | WIDE_NEGOCIATED; currTar_Info->TarEEValue &= ~EE_WIDE_SCSI; } else if ((currTar_Info-> TarStatus & TAR_TAG_Q_MASK) == TAG_Q_TRYING) { currTar_Info->TarStatus = (currTar_Info-> TarStatus & ~(unsigned char) TAR_TAG_Q_MASK) | TAG_Q_REJECT; currSCCB->ControlByte &= ~F_USE_CMD_Q; CurrCard->discQCount--; CurrCard->discQ_Tbl[currSCCB-> Sccb_tag] = NULL; currSCCB->Sccb_tag = 0x00; } } if (RDW_HARPOON((port + hp_intstat)) & BUS_FREE) { if (currSCCB->Lun == 0x00) { WRW_HARPOON((port + hp_intstat), BUS_FREE); CurrCard->globalFlags |= F_NEW_SCCB_CMD; } } else { if ((CurrCard->globalFlags & F_CONLUN_IO) && ((currTar_Info-> TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)) currTar_Info->TarLUNBusy[currSCCB-> Lun] = 1; else currTar_Info->TarLUNBusy[0] = 1; currSCCB->ControlByte &= ~(unsigned char)F_USE_CMD_Q; WR_HARPOON(port + hp_autostart_1, (AUTO_IMMED + DISCONNECT_START)); } } else { ACCEPT_MSG(port); while ((!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) && (!(RDW_HARPOON((port + hp_intstat)) & BUS_FREE))) { } if (!(RDW_HARPOON((port + hp_intstat)) & BUS_FREE)) { WR_HARPOON(port + hp_autostart_1, (AUTO_IMMED + DISCONNECT_START)); } } } else if (message == EXTENDED_MESSAGE) { ACCEPT_MSG(port); FPT_shandem(port, p_card, currSCCB); } else if (message == IGNORE_WIDE_RESIDUE) { ACCEPT_MSG(port); /* ACK the RESIDUE MSG */ message = FPT_sfm(port, currSCCB); if (currSCCB->Sccb_scsimsg != MSG_PARITY_ERROR) ACCEPT_MSG(port); WR_HARPOON(port + hp_autostart_1, (AUTO_IMMED + DISCONNECT_START)); } else { currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL; currSCCB->Sccb_scsimsg = MESSAGE_REJECT; ACCEPT_MSG_ATN(port); WR_HARPOON(port + hp_autostart_1, (AUTO_IMMED + DISCONNECT_START)); } } /*--------------------------------------------------------------------- * * Function: FPT_shandem * * Description: Decide what to do with the extended message. * *---------------------------------------------------------------------*/ static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB) { unsigned char length, message; length = FPT_sfm(port, pCurrSCCB); if (length) { ACCEPT_MSG(port); message = FPT_sfm(port, pCurrSCCB); if (message) { if (message == EXTENDED_SDTR) { if (length == 0x03) { ACCEPT_MSG(port); FPT_stsyncn(port, p_card); } else { pCurrSCCB->Sccb_scsimsg = MESSAGE_REJECT; ACCEPT_MSG_ATN(port); } } else if (message == EXTENDED_WDTR) { if (length == 0x02) { ACCEPT_MSG(port); FPT_stwidn(port, p_card); } else { pCurrSCCB->Sccb_scsimsg = MESSAGE_REJECT; ACCEPT_MSG_ATN(port); WR_HARPOON(port + hp_autostart_1, (AUTO_IMMED + DISCONNECT_START)); } } else { pCurrSCCB->Sccb_scsimsg = MESSAGE_REJECT; ACCEPT_MSG_ATN(port); WR_HARPOON(port + hp_autostart_1, (AUTO_IMMED + DISCONNECT_START)); } } else { if (pCurrSCCB->Sccb_scsimsg != MSG_PARITY_ERROR) ACCEPT_MSG(port); WR_HARPOON(port + hp_autostart_1, (AUTO_IMMED + DISCONNECT_START)); } } else { if (pCurrSCCB->Sccb_scsimsg == MSG_PARITY_ERROR) WR_HARPOON(port + hp_autostart_1, (AUTO_IMMED + DISCONNECT_START)); } } /*--------------------------------------------------------------------- * * Function: FPT_sisyncn * * Description: Read in a message byte from the SCSI bus, and check * for a parity error. * *---------------------------------------------------------------------*/ static unsigned char FPT_sisyncn(u32 port, unsigned char p_card, unsigned char syncFlag) { struct sccb *currSCCB; struct sccb_mgr_tar_info *currTar_Info; currSCCB = FPT_BL_Card[p_card].currentSCCB; currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID]; if (!((currTar_Info->TarStatus & TAR_SYNC_MASK) == SYNC_TRYING)) { WRW_HARPOON((port + ID_MSG_STRT), (MPM_OP + AMSG_OUT + (currSCCB-> Sccb_idmsg & ~(unsigned char)DISC_PRIV))); WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ); WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE)); WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x03)); WRW_HARPOON((port + SYNC_MSGS + 4), (MPM_OP + AMSG_OUT + EXTENDED_SDTR)); if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_20MB) WRW_HARPOON((port + SYNC_MSGS + 6), (MPM_OP + AMSG_OUT + 12)); else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_10MB) WRW_HARPOON((port + SYNC_MSGS + 6), (MPM_OP + AMSG_OUT + 25)); else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_5MB) WRW_HARPOON((port + SYNC_MSGS + 6), (MPM_OP + AMSG_OUT + 50)); else WRW_HARPOON((port + SYNC_MSGS + 6), (MPM_OP + AMSG_OUT + 00)); WRW_HARPOON((port + SYNC_MSGS + 8), (RAT_OP)); WRW_HARPOON((port + SYNC_MSGS + 10), (MPM_OP + AMSG_OUT + DEFAULT_OFFSET)); WRW_HARPOON((port + SYNC_MSGS + 12), (BRH_OP + ALWAYS + NP)); if (syncFlag == 0) { WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT)); currTar_Info->TarStatus = ((currTar_Info-> TarStatus & ~(unsigned char)TAR_SYNC_MASK) | (unsigned char)SYNC_TRYING); } else { WR_HARPOON(port + hp_autostart_3, (AUTO_IMMED + CMD_ONLY_STRT)); } return 1; } else { currTar_Info->TarStatus |= (unsigned char)SYNC_SUPPORTED; currTar_Info->TarEEValue &= ~EE_SYNC_MASK; return 0; } } /*--------------------------------------------------------------------- * * Function: FPT_stsyncn * * Description: The has sent us a Sync Nego message so handle it as * necessary. * *---------------------------------------------------------------------*/ static void FPT_stsyncn(u32 port, unsigned char p_card) { unsigned char sync_msg, offset, sync_reg, our_sync_msg; struct sccb *currSCCB; struct sccb_mgr_tar_info *currTar_Info; currSCCB = FPT_BL_Card[p_card].currentSCCB; currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID]; sync_msg = FPT_sfm(port, currSCCB); if ((sync_msg == 0x00) && (currSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)) { WR_HARPOON(port + hp_autostart_1, (AUTO_IMMED + DISCONNECT_START)); return; } ACCEPT_MSG(port); offset = FPT_sfm(port, currSCCB); if ((offset == 0x00) && (currSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)) { WR_HARPOON(port + hp_autostart_1, (AUTO_IMMED + DISCONNECT_START)); return; } if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_20MB) our_sync_msg = 12; /* Setup our Message to 20mb/s */ else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_10MB) our_sync_msg = 25; /* Setup our Message to 10mb/s */ else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_5MB) our_sync_msg = 50; /* Setup our Message to 5mb/s */ else our_sync_msg = 0; /* Message = Async */ if (sync_msg < our_sync_msg) { sync_msg = our_sync_msg; /*if faster, then set to max. */ } if (offset == ASYNC) sync_msg = ASYNC; if (offset > MAX_OFFSET) offset = MAX_OFFSET; sync_reg = 0x00; if (sync_msg > 12) sync_reg = 0x20; /* Use 10MB/s */ if (sync_msg > 25) sync_reg = 0x40; /* Use 6.6MB/s */ if (sync_msg > 38) sync_reg = 0x60; /* Use 5MB/s */ if (sync_msg > 50) sync_reg = 0x80; /* Use 4MB/s */ if (sync_msg > 62) sync_reg = 0xA0; /* Use 3.33MB/s */ if (sync_msg > 75) sync_reg = 0xC0; /* Use 2.85MB/s */ if (sync_msg > 87) sync_reg = 0xE0; /* Use 2.5MB/s */ if (sync_msg > 100) { sync_reg = 0x00; /* Use ASYNC */ offset = 0x00; } if (currTar_Info->TarStatus & WIDE_ENABLED) sync_reg |= offset; else sync_reg |= (offset | NARROW_SCSI); FPT_sssyncv(port, currSCCB->TargID, sync_reg, currTar_Info); if (currSCCB->Sccb_scsistat == SELECT_SN_ST) { ACCEPT_MSG(port); currTar_Info->TarStatus = ((currTar_Info->TarStatus & ~(unsigned char)TAR_SYNC_MASK) | (unsigned char)SYNC_SUPPORTED); WR_HARPOON(port + hp_autostart_1, (AUTO_IMMED + DISCONNECT_START)); } else { ACCEPT_MSG_ATN(port); FPT_sisyncr(port, sync_msg, offset); currTar_Info->TarStatus = ((currTar_Info->TarStatus & ~(unsigned char)TAR_SYNC_MASK) | (unsigned char)SYNC_SUPPORTED); } } /*--------------------------------------------------------------------- * * Function: FPT_sisyncr * * Description: Answer the targets sync message. * *---------------------------------------------------------------------*/ static void FPT_sisyncr(u32 port, unsigned char sync_pulse, unsigned char offset) { ARAM_ACCESS(port); WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE)); WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x03)); WRW_HARPOON((port + SYNC_MSGS + 4), (MPM_OP + AMSG_OUT + EXTENDED_SDTR)); WRW_HARPOON((port + SYNC_MSGS + 6), (MPM_OP + AMSG_OUT + sync_pulse)); WRW_HARPOON((port + SYNC_MSGS + 8), (RAT_OP)); WRW_HARPOON((port + SYNC_MSGS + 10), (MPM_OP + AMSG_OUT + offset)); WRW_HARPOON((port + SYNC_MSGS + 12), (BRH_OP + ALWAYS + NP)); SGRAM_ACCESS(port); WR_HARPOON(port + hp_portctrl_0, SCSI_PORT); WRW_HARPOON((port + hp_intstat), CLR_ALL_INT_1); WR_HARPOON(port + hp_autostart_3, (AUTO_IMMED + CMD_ONLY_STRT)); while (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | AUTO_INT))) { } } /*--------------------------------------------------------------------- * * Function: FPT_siwidn * * Description: Read in a message byte from the SCSI bus, and check * for a parity error. * *---------------------------------------------------------------------*/ static unsigned char FPT_siwidn(u32 port, unsigned char p_card) { struct sccb *currSCCB; struct sccb_mgr_tar_info *currTar_Info; currSCCB = FPT_BL_Card[p_card].currentSCCB; currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID]; if (!((currTar_Info->TarStatus & TAR_WIDE_MASK) == WIDE_NEGOCIATED)) { WRW_HARPOON((port + ID_MSG_STRT), (MPM_OP + AMSG_OUT + (currSCCB-> Sccb_idmsg & ~(unsigned char)DISC_PRIV))); WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ); WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE)); WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x02)); WRW_HARPOON((port + SYNC_MSGS + 4), (MPM_OP + AMSG_OUT + EXTENDED_WDTR)); WRW_HARPOON((port + SYNC_MSGS + 6), (RAT_OP)); WRW_HARPOON((port + SYNC_MSGS + 8), (MPM_OP + AMSG_OUT + SM16BIT)); WRW_HARPOON((port + SYNC_MSGS + 10), (BRH_OP + ALWAYS + NP)); WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT)); currTar_Info->TarStatus = ((currTar_Info->TarStatus & ~(unsigned char)TAR_WIDE_MASK) | (unsigned char)WIDE_ENABLED); return 1; } else { currTar_Info->TarStatus = ((currTar_Info->TarStatus & ~(unsigned char)TAR_WIDE_MASK) | WIDE_NEGOCIATED); currTar_Info->TarEEValue &= ~EE_WIDE_SCSI; return 0; } } /*--------------------------------------------------------------------- * * Function: FPT_stwidn * * Description: The has sent us a Wide Nego message so handle it as * necessary. * *---------------------------------------------------------------------*/ static void FPT_stwidn(u32 port, unsigned char p_card) { unsigned char width; struct sccb *currSCCB; struct sccb_mgr_tar_info *currTar_Info; currSCCB = FPT_BL_Card[p_card].currentSCCB; currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID]; width = FPT_sfm(port, currSCCB); if ((width == 0x00) && (currSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)) { WR_HARPOON(port + hp_autostart_1, (AUTO_IMMED + DISCONNECT_START)); return; } if (!(currTar_Info->TarEEValue & EE_WIDE_SCSI)) width = 0; if (width) { currTar_Info->TarStatus |= WIDE_ENABLED; width = 0; } else { width = NARROW_SCSI; currTar_Info->TarStatus &= ~WIDE_ENABLED; } FPT_sssyncv(port, currSCCB->TargID, width, currTar_Info); if (currSCCB->Sccb_scsistat == SELECT_WN_ST) { currTar_Info->TarStatus |= WIDE_NEGOCIATED; if (! ((currTar_Info->TarStatus & TAR_SYNC_MASK) == SYNC_SUPPORTED)) { ACCEPT_MSG_ATN(port); ARAM_ACCESS(port); FPT_sisyncn(port, p_card, 1); currSCCB->Sccb_scsistat = SELECT_SN_ST; SGRAM_ACCESS(port); } else { ACCEPT_MSG(port); WR_HARPOON(port + hp_autostart_1, (AUTO_IMMED + DISCONNECT_START)); } } else { ACCEPT_MSG_ATN(port); if (currTar_Info->TarEEValue & EE_WIDE_SCSI) width = SM16BIT; else width = SM8BIT; FPT_siwidr(port, width); currTar_Info->TarStatus |= (WIDE_NEGOCIATED | WIDE_ENABLED); } } /*--------------------------------------------------------------------- * * Function: FPT_siwidr * * Description: Answer the targets Wide nego message. * *---------------------------------------------------------------------*/ static void FPT_siwidr(u32 port, unsigned char width) { ARAM_ACCESS(port); WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE)); WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x02)); WRW_HARPOON((port + SYNC_MSGS + 4), (MPM_OP + AMSG_OUT + EXTENDED_WDTR)); WRW_HARPOON((port + SYNC_MSGS + 6), (RAT_OP)); WRW_HARPOON((port + SYNC_MSGS + 8), (MPM_OP + AMSG_OUT + width)); WRW_HARPOON((port + SYNC_MSGS + 10), (BRH_OP + ALWAYS + NP)); SGRAM_ACCESS(port); WR_HARPOON(port + hp_portctrl_0, SCSI_PORT); WRW_HARPOON((port + hp_intstat), CLR_ALL_INT_1); WR_HARPOON(port + hp_autostart_3, (AUTO_IMMED + CMD_ONLY_STRT)); while (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | AUTO_INT))) { } } /*--------------------------------------------------------------------- * * Function: FPT_sssyncv * * Description: Write the desired value to the Sync Register for the * ID specified. * *---------------------------------------------------------------------*/ static void FPT_sssyncv(u32 p_port, unsigned char p_id, unsigned char p_sync_value, struct sccb_mgr_tar_info *currTar_Info) { unsigned char index; index = p_id; switch (index) { case 0: index = 12; /* hp_synctarg_0 */ break; case 1: index = 13; /* hp_synctarg_1 */ break; case 2: index = 14; /* hp_synctarg_2 */ break; case 3: index = 15; /* hp_synctarg_3 */ break; case 4: index = 8; /* hp_synctarg_4 */ break; case 5: index = 9; /* hp_synctarg_5 */ break; case 6: index = 10; /* hp_synctarg_6 */ break; case 7: index = 11; /* hp_synctarg_7 */ break; case 8: index = 4; /* hp_synctarg_8 */ break; case 9: index = 5; /* hp_synctarg_9 */ break; case 10: index = 6; /* hp_synctarg_10 */ break; case 11: index = 7; /* hp_synctarg_11 */ break; case 12: index = 0; /* hp_synctarg_12 */ break; case 13: index = 1; /* hp_synctarg_13 */ break; case 14: index = 2; /* hp_synctarg_14 */ break; case 15: index = 3; /* hp_synctarg_15 */ } WR_HARPOON(p_port + hp_synctarg_base + index, p_sync_value); currTar_Info->TarSyncCtrl = p_sync_value; } /*--------------------------------------------------------------------- * * Function: FPT_sresb * * Description: Reset the desired card's SCSI bus. * *---------------------------------------------------------------------*/ static void FPT_sresb(u32 port, unsigned char p_card) { unsigned char scsiID, i; struct sccb_mgr_tar_info *currTar_Info; WR_HARPOON(port + hp_page_ctrl, (RD_HARPOON(port + hp_page_ctrl) | G_INT_DISABLE)); WRW_HARPOON((port + hp_intstat), CLR_ALL_INT); WR_HARPOON(port + hp_scsictrl_0, SCSI_RST); scsiID = RD_HARPOON(port + hp_seltimeout); WR_HARPOON(port + hp_seltimeout, TO_5ms); WRW_HARPOON((port + hp_intstat), TIMEOUT); WR_HARPOON(port + hp_portctrl_0, (SCSI_PORT | START_TO)); while (!(RDW_HARPOON((port + hp_intstat)) & TIMEOUT)) { } WR_HARPOON(port + hp_seltimeout, scsiID); WR_HARPOON(port + hp_scsictrl_0, ENA_SCAM_SEL); FPT_Wait(port, TO_5ms); WRW_HARPOON((port + hp_intstat), CLR_ALL_INT); WR_HARPOON(port + hp_int_mask, (RD_HARPOON(port + hp_int_mask) | 0x00)); for (scsiID = 0; scsiID < MAX_SCSI_TAR; scsiID++) { currTar_Info = &FPT_sccbMgrTbl[p_card][scsiID]; if (currTar_Info->TarEEValue & EE_SYNC_MASK) { currTar_Info->TarSyncCtrl = 0; currTar_Info->TarStatus &= ~TAR_SYNC_MASK; } if (currTar_Info->TarEEValue & EE_WIDE_SCSI) { currTar_Info->TarStatus &= ~TAR_WIDE_MASK; } FPT_sssyncv(port, scsiID, NARROW_SCSI, currTar_Info); FPT_SccbMgrTableInitTarget(p_card, scsiID); } FPT_BL_Card[p_card].scanIndex = 0x00; FPT_BL_Card[p_card].currentSCCB = NULL; FPT_BL_Card[p_card].globalFlags &= ~(F_TAG_STARTED | F_HOST_XFER_ACT | F_NEW_SCCB_CMD); FPT_BL_Card[p_card].cmdCounter = 0x00; FPT_BL_Card[p_card].discQCount = 0x00; FPT_BL_Card[p_card].tagQ_Lst = 0x01; for (i = 0; i < QUEUE_DEPTH; i++) FPT_BL_Card[p_card].discQ_Tbl[i] = NULL; WR_HARPOON(port + hp_page_ctrl, (RD_HARPOON(port + hp_page_ctrl) & ~G_INT_DISABLE)); } /*--------------------------------------------------------------------- * * Function: FPT_ssenss * * Description: Setup for the Auto Sense command. * *---------------------------------------------------------------------*/ static void FPT_ssenss(struct sccb_card *pCurrCard) { unsigned char i; struct sccb *currSCCB; currSCCB = pCurrCard->currentSCCB; currSCCB->Save_CdbLen = currSCCB->CdbLength; for (i = 0; i < 6; i++) { currSCCB->Save_Cdb[i] = currSCCB->Cdb[i]; } currSCCB->CdbLength = SIX_BYTE_CMD; currSCCB->Cdb[0] = REQUEST_SENSE; currSCCB->Cdb[1] = currSCCB->Cdb[1] & (unsigned char)0xE0; /*Keep LUN. */ currSCCB->Cdb[2] = 0x00; currSCCB->Cdb[3] = 0x00; currSCCB->Cdb[4] = currSCCB->RequestSenseLength; currSCCB->Cdb[5] = 0x00; currSCCB->Sccb_XferCnt = (u32)currSCCB->RequestSenseLength; currSCCB->Sccb_ATC = 0x00; currSCCB->Sccb_XferState |= F_AUTO_SENSE; currSCCB->Sccb_XferState &= ~F_SG_XFER; currSCCB->Sccb_idmsg = currSCCB->Sccb_idmsg & ~(unsigned char)DISC_PRIV; currSCCB->ControlByte = 0x00; currSCCB->Sccb_MGRFlags &= F_STATUSLOADED; } /*--------------------------------------------------------------------- * * Function: FPT_sxfrp * * Description: Transfer data into the bit bucket until the device * decides to switch phase. * *---------------------------------------------------------------------*/ static void FPT_sxfrp(u32 p_port, unsigned char p_card) { unsigned char curr_phz; DISABLE_AUTO(p_port); if (FPT_BL_Card[p_card].globalFlags & F_HOST_XFER_ACT) { FPT_hostDataXferAbort(p_port, p_card, FPT_BL_Card[p_card].currentSCCB); } /* If the Automation handled the end of the transfer then do not match the phase or we will get out of sync with the ISR. */ if (RDW_HARPOON((p_port + hp_intstat)) & (BUS_FREE | XFER_CNT_0 | AUTO_INT)) return; WR_HARPOON(p_port + hp_xfercnt_0, 0x00); curr_phz = RD_HARPOON(p_port + hp_scsisig) & (unsigned char)S_SCSI_PHZ; WRW_HARPOON((p_port + hp_intstat), XFER_CNT_0); WR_HARPOON(p_port + hp_scsisig, curr_phz); while (!(RDW_HARPOON((p_port + hp_intstat)) & (BUS_FREE | RESET)) && (curr_phz == (RD_HARPOON(p_port + hp_scsisig) & (unsigned char)S_SCSI_PHZ))) { if (curr_phz & (unsigned char)SCSI_IOBIT) { WR_HARPOON(p_port + hp_portctrl_0, (SCSI_PORT | HOST_PORT | SCSI_INBIT)); if (!(RD_HARPOON(p_port + hp_xferstat) & FIFO_EMPTY)) { RD_HARPOON(p_port + hp_fifodata_0); } } else { WR_HARPOON(p_port + hp_portctrl_0, (SCSI_PORT | HOST_PORT | HOST_WRT)); if (RD_HARPOON(p_port + hp_xferstat) & FIFO_EMPTY) { WR_HARPOON(p_port + hp_fifodata_0, 0xFA); } } } /* End of While loop for padding data I/O phase */ while (!(RDW_HARPOON((p_port + hp_intstat)) & (BUS_FREE | RESET))) { if (RD_HARPOON(p_port + hp_scsisig) & SCSI_REQ) break; } WR_HARPOON(p_port + hp_portctrl_0, (SCSI_PORT | HOST_PORT | SCSI_INBIT)); while (!(RD_HARPOON(p_port + hp_xferstat) & FIFO_EMPTY)) { RD_HARPOON(p_port + hp_fifodata_0); } if (!(RDW_HARPOON((p_port + hp_intstat)) & (BUS_FREE | RESET))) { WR_HARPOON(p_port + hp_autostart_0, (AUTO_IMMED + DISCONNECT_START)); while (!(RDW_HARPOON((p_port + hp_intstat)) & AUTO_INT)) { } if (RDW_HARPOON((p_port + hp_intstat)) & (ICMD_COMP | ITAR_DISC)) while (! (RDW_HARPOON((p_port + hp_intstat)) & (BUS_FREE | RSEL))) ; } } /*--------------------------------------------------------------------- * * Function: FPT_schkdd * * Description: Make sure data has been flushed from both FIFOs and abort * the operations if necessary. * *---------------------------------------------------------------------*/ static void FPT_schkdd(u32 port, unsigned char p_card) { unsigned short TimeOutLoop; unsigned char sPhase; struct sccb *currSCCB; currSCCB = FPT_BL_Card[p_card].currentSCCB; if ((currSCCB->Sccb_scsistat != DATA_OUT_ST) && (currSCCB->Sccb_scsistat != DATA_IN_ST)) { return; } if (currSCCB->Sccb_XferState & F_ODD_BALL_CNT) { currSCCB->Sccb_ATC += (currSCCB->Sccb_XferCnt - 1); currSCCB->Sccb_XferCnt = 1; currSCCB->Sccb_XferState &= ~F_ODD_BALL_CNT; WRW_HARPOON((port + hp_fiforead), (unsigned short)0x00); WR_HARPOON(port + hp_xferstat, 0x00); } else { currSCCB->Sccb_ATC += currSCCB->Sccb_XferCnt; currSCCB->Sccb_XferCnt = 0; } if ((RDW_HARPOON((port + hp_intstat)) & PARITY) && (currSCCB->HostStatus == SCCB_COMPLETE)) { currSCCB->HostStatus = SCCB_PARITY_ERR; WRW_HARPOON((port + hp_intstat), PARITY); } FPT_hostDataXferAbort(port, p_card, currSCCB); while (RD_HARPOON(port + hp_scsisig) & SCSI_ACK) { } TimeOutLoop = 0; while (RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY) { if (RDW_HARPOON((port + hp_intstat)) & BUS_FREE) { return; } if (RD_HARPOON(port + hp_offsetctr) & (unsigned char)0x1F) { break; } if (RDW_HARPOON((port + hp_intstat)) & RESET) { return; } if ((RD_HARPOON(port + hp_scsisig) & SCSI_REQ) || (TimeOutLoop++ > 0x3000)) break; } sPhase = RD_HARPOON(port + hp_scsisig) & (SCSI_BSY | S_SCSI_PHZ); if ((!(RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY)) || (RD_HARPOON(port + hp_offsetctr) & (unsigned char)0x1F) || (sPhase == (SCSI_BSY | S_DATAO_PH)) || (sPhase == (SCSI_BSY | S_DATAI_PH))) { WR_HARPOON(port + hp_portctrl_0, SCSI_PORT); if (!(currSCCB->Sccb_XferState & F_ALL_XFERRED)) { if (currSCCB->Sccb_XferState & F_HOST_XFER_DIR) { FPT_phaseDataIn(port, p_card); } else { FPT_phaseDataOut(port, p_card); } } else { FPT_sxfrp(port, p_card); if (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | ICMD_COMP | ITAR_DISC | RESET))) { WRW_HARPOON((port + hp_intstat), AUTO_INT); FPT_phaseDecode(port, p_card); } } } else { WR_HARPOON(port + hp_portctrl_0, 0x00); } } /*--------------------------------------------------------------------- * * Function: FPT_sinits * * Description: Setup SCCB manager fields in this SCCB. * *---------------------------------------------------------------------*/ static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card) { struct sccb_mgr_tar_info *currTar_Info; if ((p_sccb->TargID >= MAX_SCSI_TAR) || (p_sccb->Lun >= MAX_LUN)) { return; } currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID]; p_sccb->Sccb_XferState = 0x00; p_sccb->Sccb_XferCnt = p_sccb->DataLength; if ((p_sccb->OperationCode == SCATTER_GATHER_COMMAND) || (p_sccb->OperationCode == RESIDUAL_SG_COMMAND)) { p_sccb->Sccb_SGoffset = 0; p_sccb->Sccb_XferState = F_SG_XFER; p_sccb->Sccb_XferCnt = 0x00; } if (p_sccb->DataLength == 0x00) p_sccb->Sccb_XferState |= F_ALL_XFERRED; if (p_sccb->ControlByte & F_USE_CMD_Q) { if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_REJECT) p_sccb->ControlByte &= ~F_USE_CMD_Q; else currTar_Info->TarStatus |= TAG_Q_TRYING; } /* For !single SCSI device in system & device allow Disconnect or command is tag_q type then send Cmd with Disconnect Enable else send Cmd with Disconnect Disable */ /* if (((!(FPT_BL_Card[p_card].globalFlags & F_SINGLE_DEVICE)) && (currTar_Info->TarStatus & TAR_ALLOW_DISC)) || (currTar_Info->TarStatus & TAG_Q_TRYING)) { */ if ((currTar_Info->TarStatus & TAR_ALLOW_DISC) || (currTar_Info->TarStatus & TAG_Q_TRYING)) { p_sccb->Sccb_idmsg = IDENTIFY(true, p_sccb->Lun); } else { p_sccb->Sccb_idmsg = IDENTIFY(false, p_sccb->Lun); } p_sccb->HostStatus = 0x00; p_sccb->TargetStatus = 0x00; p_sccb->Sccb_tag = 0x00; p_sccb->Sccb_MGRFlags = 0x00; p_sccb->Sccb_sgseg = 0x00; p_sccb->Sccb_ATC = 0x00; p_sccb->Sccb_savedATC = 0x00; /* p_sccb->SccbVirtDataPtr = 0x00; p_sccb->Sccb_forwardlink = NULL; p_sccb->Sccb_backlink = NULL; */ p_sccb->Sccb_scsistat = BUS_FREE_ST; p_sccb->SccbStatus = SCCB_IN_PROCESS; p_sccb->Sccb_scsimsg = NOP; } /*--------------------------------------------------------------------- * * Function: Phase Decode * * Description: Determine the phase and call the appropriate function. * *---------------------------------------------------------------------*/ static void FPT_phaseDecode(u32 p_port, unsigned char p_card) { unsigned char phase_ref; void (*phase) (u32, unsigned char); DISABLE_AUTO(p_port); phase_ref = (unsigned char)(RD_HARPOON(p_port + hp_scsisig) & S_SCSI_PHZ); phase = FPT_s_PhaseTbl[phase_ref]; (*phase) (p_port, p_card); /* Call the correct phase func */ } /*--------------------------------------------------------------------- * * Function: Data Out Phase * * Description: Start up both the BusMaster and Xbow. * *---------------------------------------------------------------------*/ static void FPT_phaseDataOut(u32 port, unsigned char p_card) { struct sccb *currSCCB; currSCCB = FPT_BL_Card[p_card].currentSCCB; if (currSCCB == NULL) { return; /* Exit if No SCCB record */ } currSCCB->Sccb_scsistat = DATA_OUT_ST; currSCCB->Sccb_XferState &= ~(F_HOST_XFER_DIR | F_NO_DATA_YET); WR_HARPOON(port + hp_portctrl_0, SCSI_PORT); WRW_HARPOON((port + hp_intstat), XFER_CNT_0); WR_HARPOON(port + hp_autostart_0, (END_DATA + END_DATA_START)); FPT_dataXferProcessor(port, &FPT_BL_Card[p_card]); if (currSCCB->Sccb_XferCnt == 0) { if ((currSCCB->ControlByte & SCCB_DATA_XFER_OUT) && (currSCCB->HostStatus == SCCB_COMPLETE)) currSCCB->HostStatus = SCCB_DATA_OVER_RUN; FPT_sxfrp(port, p_card); if (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | RESET))) FPT_phaseDecode(port, p_card); } } /*--------------------------------------------------------------------- * * Function: Data In Phase * * Description: Startup the BusMaster and the XBOW. * *---------------------------------------------------------------------*/ static void FPT_phaseDataIn(u32 port, unsigned char p_card) { struct sccb *currSCCB; currSCCB = FPT_BL_Card[p_card].currentSCCB; if (currSCCB == NULL) { return; /* Exit if No SCCB record */ } currSCCB->Sccb_scsistat = DATA_IN_ST; currSCCB->Sccb_XferState |= F_HOST_XFER_DIR; currSCCB->Sccb_XferState &= ~F_NO_DATA_YET; WR_HARPOON(port + hp_portctrl_0, SCSI_PORT); WRW_HARPOON((port + hp_intstat), XFER_CNT_0); WR_HARPOON(port + hp_autostart_0, (END_DATA + END_DATA_START)); FPT_dataXferProcessor(port, &FPT_BL_Card[p_card]); if (currSCCB->Sccb_XferCnt == 0) { if ((currSCCB->ControlByte & SCCB_DATA_XFER_IN) && (currSCCB->HostStatus == SCCB_COMPLETE)) currSCCB->HostStatus = SCCB_DATA_OVER_RUN; FPT_sxfrp(port, p_card); if (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | RESET))) FPT_phaseDecode(port, p_card); } } /*--------------------------------------------------------------------- * * Function: Command Phase * * Description: Load the CDB into the automation and start it up. * *---------------------------------------------------------------------*/ static void FPT_phaseCommand(u32 p_port, unsigned char p_card) { struct sccb *currSCCB; u32 cdb_reg; unsigned char i; currSCCB = FPT_BL_Card[p_card].currentSCCB; if (currSCCB->OperationCode == RESET_COMMAND) { currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL; currSCCB->CdbLength = SIX_BYTE_CMD; } WR_HARPOON(p_port + hp_scsisig, 0x00); ARAM_ACCESS(p_port); cdb_reg = p_port + CMD_STRT; for (i = 0; i < currSCCB->CdbLength; i++) { if (currSCCB->OperationCode == RESET_COMMAND) WRW_HARPOON(cdb_reg, (MPM_OP + ACOMMAND + 0x00)); else WRW_HARPOON(cdb_reg, (MPM_OP + ACOMMAND + currSCCB->Cdb[i])); cdb_reg += 2; } if (currSCCB->CdbLength != TWELVE_BYTE_CMD) WRW_HARPOON(cdb_reg, (BRH_OP + ALWAYS + NP)); WR_HARPOON(p_port + hp_portctrl_0, (SCSI_PORT)); currSCCB->Sccb_scsistat = COMMAND_ST; WR_HARPOON(p_port + hp_autostart_3, (AUTO_IMMED | CMD_ONLY_STRT)); SGRAM_ACCESS(p_port); } /*--------------------------------------------------------------------- * * Function: Status phase * * Description: Bring in the status and command complete message bytes * *---------------------------------------------------------------------*/ static void FPT_phaseStatus(u32 port, unsigned char p_card) { /* Start-up the automation to finish off this command and let the isr handle the interrupt for command complete when it comes in. We could wait here for the interrupt to be generated? */ WR_HARPOON(port + hp_scsisig, 0x00); WR_HARPOON(port + hp_autostart_0, (AUTO_IMMED + END_DATA_START)); } /*--------------------------------------------------------------------- * * Function: Phase Message Out * * Description: Send out our message (if we have one) and handle whatever * else is involed. * *---------------------------------------------------------------------*/ static void FPT_phaseMsgOut(u32 port, unsigned char p_card) { unsigned char message, scsiID; struct sccb *currSCCB; struct sccb_mgr_tar_info *currTar_Info; currSCCB = FPT_BL_Card[p_card].currentSCCB; if (currSCCB != NULL) { message = currSCCB->Sccb_scsimsg; scsiID = currSCCB->TargID; if (message == TARGET_RESET) { currTar_Info = &FPT_sccbMgrTbl[p_card][scsiID]; currTar_Info->TarSyncCtrl = 0; FPT_sssyncv(port, scsiID, NARROW_SCSI, currTar_Info); if (FPT_sccbMgrTbl[p_card][scsiID]. TarEEValue & EE_SYNC_MASK) { FPT_sccbMgrTbl[p_card][scsiID].TarStatus &= ~TAR_SYNC_MASK; } if (FPT_sccbMgrTbl[p_card][scsiID]. TarEEValue & EE_WIDE_SCSI) { FPT_sccbMgrTbl[p_card][scsiID].TarStatus &= ~TAR_WIDE_MASK; } FPT_queueFlushSccb(p_card, SCCB_COMPLETE); FPT_SccbMgrTableInitTarget(p_card, scsiID); } else if (currSCCB->Sccb_scsistat == ABORT_ST) { currSCCB->HostStatus = SCCB_COMPLETE; if (FPT_BL_Card[p_card].discQ_Tbl[currSCCB->Sccb_tag] != NULL) { FPT_BL_Card[p_card].discQ_Tbl[currSCCB-> Sccb_tag] = NULL; FPT_sccbMgrTbl[p_card][scsiID].TarTagQ_Cnt--; } } else if (currSCCB->Sccb_scsistat < COMMAND_ST) { if (message == NOP) { currSCCB->Sccb_MGRFlags |= F_DEV_SELECTED; FPT_ssel(port, p_card); return; } } else { if (message == ABORT_TASK_SET) FPT_queueFlushSccb(p_card, SCCB_COMPLETE); } } else { message = ABORT_TASK_SET; } WRW_HARPOON((port + hp_intstat), (BUS_FREE | PHASE | XFER_CNT_0)); WR_HARPOON(port + hp_portctrl_0, SCSI_BUS_EN); WR_HARPOON(port + hp_scsidata_0, message); WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH)); ACCEPT_MSG(port); WR_HARPOON(port + hp_portctrl_0, 0x00); if ((message == ABORT_TASK_SET) || (message == TARGET_RESET) || (message == ABORT_TASK)) { while (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | PHASE))) { } if (RDW_HARPOON((port + hp_intstat)) & BUS_FREE) { WRW_HARPOON((port + hp_intstat), BUS_FREE); if (currSCCB != NULL) { if ((FPT_BL_Card[p_card]. globalFlags & F_CONLUN_IO) && ((FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)) FPT_sccbMgrTbl[p_card][currSCCB-> TargID]. TarLUNBusy[currSCCB->Lun] = 0; else FPT_sccbMgrTbl[p_card][currSCCB-> TargID]. TarLUNBusy[0] = 0; FPT_queueCmdComplete(&FPT_BL_Card[p_card], currSCCB, p_card); } else { FPT_BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD; } } else { FPT_sxfrp(port, p_card); } } else { if (message == MSG_PARITY_ERROR) { currSCCB->Sccb_scsimsg = NOP; WR_HARPOON(port + hp_autostart_1, (AUTO_IMMED + DISCONNECT_START)); } else { FPT_sxfrp(port, p_card); } } } /*--------------------------------------------------------------------- * * Function: Message In phase * * Description: Bring in the message and determine what to do with it. * *---------------------------------------------------------------------*/ static void FPT_phaseMsgIn(u32 port, unsigned char p_card) { unsigned char message; struct sccb *currSCCB; currSCCB = FPT_BL_Card[p_card].currentSCCB; if (FPT_BL_Card[p_card].globalFlags & F_HOST_XFER_ACT) { FPT_phaseChkFifo(port, p_card); } message = RD_HARPOON(port + hp_scsidata_0); if ((message == DISCONNECT) || (message == SAVE_POINTERS)) { WR_HARPOON(port + hp_autostart_1, (AUTO_IMMED + END_DATA_START)); } else { message = FPT_sfm(port, currSCCB); if (message) { FPT_sdecm(message, port, p_card); } else { if (currSCCB->Sccb_scsimsg != MSG_PARITY_ERROR) ACCEPT_MSG(port); WR_HARPOON(port + hp_autostart_1, (AUTO_IMMED + DISCONNECT_START)); } } } /*--------------------------------------------------------------------- * * Function: Illegal phase * * Description: Target switched to some illegal phase, so all we can do * is report an error back to the host (if that is possible) * and send an ABORT message to the misbehaving target. * *---------------------------------------------------------------------*/ static void FPT_phaseIllegal(u32 port, unsigned char p_card) { struct sccb *currSCCB; currSCCB = FPT_BL_Card[p_card].currentSCCB; WR_HARPOON(port + hp_scsisig, RD_HARPOON(port + hp_scsisig)); if (currSCCB != NULL) { currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL; currSCCB->Sccb_scsistat = ABORT_ST; currSCCB->Sccb_scsimsg = ABORT_TASK_SET; } ACCEPT_MSG_ATN(port); } /*--------------------------------------------------------------------- * * Function: Phase Check FIFO * * Description: Make sure data has been flushed from both FIFOs and abort * the operations if necessary. * *---------------------------------------------------------------------*/ static void FPT_phaseChkFifo(u32 port, unsigned char p_card) { u32 xfercnt; struct sccb *currSCCB; currSCCB = FPT_BL_Card[p_card].currentSCCB; if (currSCCB->Sccb_scsistat == DATA_IN_ST) { while ((!(RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY)) && (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY)) { } if (!(RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY)) { currSCCB->Sccb_ATC += currSCCB->Sccb_XferCnt; currSCCB->Sccb_XferCnt = 0; if ((RDW_HARPOON((port + hp_intstat)) & PARITY) && (currSCCB->HostStatus == SCCB_COMPLETE)) { currSCCB->HostStatus = SCCB_PARITY_ERR; WRW_HARPOON((port + hp_intstat), PARITY); } FPT_hostDataXferAbort(port, p_card, currSCCB); FPT_dataXferProcessor(port, &FPT_BL_Card[p_card]); while ((!(RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY)) && (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY)) { } } } /*End Data In specific code. */ GET_XFER_CNT(port, xfercnt); WR_HARPOON(port + hp_xfercnt_0, 0x00); WR_HARPOON(port + hp_portctrl_0, 0x00); currSCCB->Sccb_ATC += (currSCCB->Sccb_XferCnt - xfercnt); currSCCB->Sccb_XferCnt = xfercnt; if ((RDW_HARPOON((port + hp_intstat)) & PARITY) && (currSCCB->HostStatus == SCCB_COMPLETE)) { currSCCB->HostStatus = SCCB_PARITY_ERR; WRW_HARPOON((port + hp_intstat), PARITY); } FPT_hostDataXferAbort(port, p_card, currSCCB); WR_HARPOON(port + hp_fifowrite, 0x00); WR_HARPOON(port + hp_fiforead, 0x00); WR_HARPOON(port + hp_xferstat, 0x00); WRW_HARPOON((port + hp_intstat), XFER_CNT_0); } /*--------------------------------------------------------------------- * * Function: Phase Bus Free * * Description: We just went bus free so figure out if it was * because of command complete or from a disconnect. * *---------------------------------------------------------------------*/ static void FPT_phaseBusFree(u32 port, unsigned char p_card) { struct sccb *currSCCB; currSCCB = FPT_BL_Card[p_card].currentSCCB; if (currSCCB != NULL) { DISABLE_AUTO(port); if (currSCCB->OperationCode == RESET_COMMAND) { if ((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) && ((FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)) FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarLUNBusy[currSCCB->Lun] = 0; else FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarLUNBusy[0] = 0; FPT_queueCmdComplete(&FPT_BL_Card[p_card], currSCCB, p_card); FPT_queueSearchSelect(&FPT_BL_Card[p_card], p_card); } else if (currSCCB->Sccb_scsistat == SELECT_SN_ST) { FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarStatus |= (unsigned char)SYNC_SUPPORTED; FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &= ~EE_SYNC_MASK; } else if (currSCCB->Sccb_scsistat == SELECT_WN_ST) { FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarStatus = (FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarStatus & ~WIDE_ENABLED) | WIDE_NEGOCIATED; FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &= ~EE_WIDE_SCSI; } else if (currSCCB->Sccb_scsistat == SELECT_Q_ST) { /* Make sure this is not a phony BUS_FREE. If we were reselected or if BUSY is NOT on then this is a valid BUS FREE. SRR Wednesday, 5/10/1995. */ if ((!(RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) || (RDW_HARPOON((port + hp_intstat)) & RSEL)) { FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarStatus &= ~TAR_TAG_Q_MASK; FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarStatus |= TAG_Q_REJECT; } else { return; } } else { currSCCB->Sccb_scsistat = BUS_FREE_ST; if (!currSCCB->HostStatus) { currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL; } if ((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) && ((FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)) FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarLUNBusy[currSCCB->Lun] = 0; else FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarLUNBusy[0] = 0; FPT_queueCmdComplete(&FPT_BL_Card[p_card], currSCCB, p_card); return; } FPT_BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD; } /*end if !=null */ } /*--------------------------------------------------------------------- * * Function: Auto Load Default Map * * Description: Load the Automation RAM with the default map values. * *---------------------------------------------------------------------*/ static void FPT_autoLoadDefaultMap(u32 p_port) { u32 map_addr; ARAM_ACCESS(p_port); map_addr = p_port + hp_aramBase; WRW_HARPOON(map_addr, (MPM_OP + AMSG_OUT + 0xC0)); /*ID MESSAGE */ map_addr += 2; WRW_HARPOON(map_addr, (MPM_OP + AMSG_OUT + 0x20)); /*SIMPLE TAG QUEUEING MSG */ map_addr += 2; WRW_HARPOON(map_addr, RAT_OP); /*RESET ATTENTION */ map_addr += 2; WRW_HARPOON(map_addr, (MPM_OP + AMSG_OUT + 0x00)); /*TAG ID MSG */ map_addr += 2; WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 0 */ map_addr += 2; WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 1 */ map_addr += 2; WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 2 */ map_addr += 2; WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 3 */ map_addr += 2; WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 4 */ map_addr += 2; WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 5 */ map_addr += 2; WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 6 */ map_addr += 2; WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 7 */ map_addr += 2; WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 8 */ map_addr += 2; WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 9 */ map_addr += 2; WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 10 */ map_addr += 2; WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 11 */ map_addr += 2; WRW_HARPOON(map_addr, (CPE_OP + ADATA_OUT + DINT)); /*JUMP IF DATA OUT */ map_addr += 2; WRW_HARPOON(map_addr, (TCB_OP + FIFO_0 + DI)); /*JUMP IF NO DATA IN FIFO */ map_addr += 2; /*This means AYNC DATA IN */ WRW_HARPOON(map_addr, (SSI_OP + SSI_IDO_STRT)); /*STOP AND INTERRUPT */ map_addr += 2; WRW_HARPOON(map_addr, (CPE_OP + ADATA_IN + DINT)); /*JUMP IF NOT DATA IN PHZ */ map_addr += 2; WRW_HARPOON(map_addr, (CPN_OP + AMSG_IN + ST)); /*IF NOT MSG IN CHECK 4 DATA IN */ map_addr += 2; WRW_HARPOON(map_addr, (CRD_OP + SDATA + 0x02)); /*SAVE DATA PTR MSG? */ map_addr += 2; WRW_HARPOON(map_addr, (BRH_OP + NOT_EQ + DC)); /*GO CHECK FOR DISCONNECT MSG */ map_addr += 2; WRW_HARPOON(map_addr, (MRR_OP + SDATA + D_AR1)); /*SAVE DATA PTRS MSG */ map_addr += 2; WRW_HARPOON(map_addr, (CPN_OP + AMSG_IN + ST)); /*IF NOT MSG IN CHECK DATA IN */ map_addr += 2; WRW_HARPOON(map_addr, (CRD_OP + SDATA + 0x04)); /*DISCONNECT MSG? */ map_addr += 2; WRW_HARPOON(map_addr, (BRH_OP + NOT_EQ + UNKNWN)); /*UKNKNOWN MSG */ map_addr += 2; WRW_HARPOON(map_addr, (MRR_OP + SDATA + D_BUCKET)); /*XFER DISCONNECT MSG */ map_addr += 2; WRW_HARPOON(map_addr, (SSI_OP + SSI_ITAR_DISC)); /*STOP AND INTERRUPT */ map_addr += 2; WRW_HARPOON(map_addr, (CPN_OP + ASTATUS + UNKNWN)); /*JUMP IF NOT STATUS PHZ. */ map_addr += 2; WRW_HARPOON(map_addr, (MRR_OP + SDATA + D_AR0)); /*GET STATUS BYTE */ map_addr += 2; WRW_HARPOON(map_addr, (CPN_OP + AMSG_IN + CC)); /*ERROR IF NOT MSG IN PHZ */ map_addr += 2; WRW_HARPOON(map_addr, (CRD_OP + SDATA + 0x00)); /*CHECK FOR CMD COMPLETE MSG. */ map_addr += 2; WRW_HARPOON(map_addr, (BRH_OP + NOT_EQ + CC)); /*ERROR IF NOT CMD COMPLETE MSG. */ map_addr += 2; WRW_HARPOON(map_addr, (MRR_OP + SDATA + D_BUCKET)); /*GET CMD COMPLETE MSG */ map_addr += 2; WRW_HARPOON(map_addr, (SSI_OP + SSI_ICMD_COMP)); /*END OF COMMAND */ map_addr += 2; WRW_HARPOON(map_addr, (SSI_OP + SSI_IUNKWN)); /*RECEIVED UNKNOWN MSG BYTE */ map_addr += 2; WRW_HARPOON(map_addr, (SSI_OP + SSI_INO_CC)); /*NO COMMAND COMPLETE AFTER STATUS */ map_addr += 2; WRW_HARPOON(map_addr, (SSI_OP + SSI_ITICKLE)); /*BIOS Tickled the Mgr */ map_addr += 2; WRW_HARPOON(map_addr, (SSI_OP + SSI_IRFAIL)); /*EXPECTED ID/TAG MESSAGES AND */ map_addr += 2; /* DIDN'T GET ONE */ WRW_HARPOON(map_addr, (CRR_OP + AR3 + S_IDREG)); /* comp SCSI SEL ID & AR3 */ map_addr += 2; WRW_HARPOON(map_addr, (BRH_OP + EQUAL + 0x00)); /*SEL ID OK then Conti. */ map_addr += 2; WRW_HARPOON(map_addr, (SSI_OP + SSI_INO_CC)); /*NO COMMAND COMPLETE AFTER STATUS */ SGRAM_ACCESS(p_port); } /*--------------------------------------------------------------------- * * Function: Auto Command Complete * * Description: Post command back to host and find another command * to execute. * *---------------------------------------------------------------------*/ static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card) { struct sccb *currSCCB; unsigned char status_byte; currSCCB = FPT_BL_Card[p_card].currentSCCB; status_byte = RD_HARPOON(p_port + hp_gp_reg_0); FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarLUN_CA = 0; if (status_byte != SAM_STAT_GOOD) { if (status_byte == SAM_STAT_TASK_SET_FULL) { if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) && ((FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) { FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarLUNBusy[currSCCB->Lun] = 1; if (FPT_BL_Card[p_card].discQCount != 0) FPT_BL_Card[p_card].discQCount--; FPT_BL_Card[p_card]. discQ_Tbl[FPT_sccbMgrTbl[p_card] [currSCCB->TargID]. LunDiscQ_Idx[currSCCB->Lun]] = NULL; } else { FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarLUNBusy[0] = 1; if (currSCCB->Sccb_tag) { if (FPT_BL_Card[p_card].discQCount != 0) FPT_BL_Card[p_card]. discQCount--; FPT_BL_Card[p_card].discQ_Tbl[currSCCB-> Sccb_tag] = NULL; } else { if (FPT_BL_Card[p_card].discQCount != 0) FPT_BL_Card[p_card]. discQCount--; FPT_BL_Card[p_card]. discQ_Tbl[FPT_sccbMgrTbl[p_card] [currSCCB->TargID]. LunDiscQ_Idx[0]] = NULL; } } currSCCB->Sccb_MGRFlags |= F_STATUSLOADED; FPT_queueSelectFail(&FPT_BL_Card[p_card], p_card); return; } if (currSCCB->Sccb_scsistat == SELECT_SN_ST) { FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarStatus |= (unsigned char)SYNC_SUPPORTED; FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &= ~EE_SYNC_MASK; FPT_BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD; if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) && ((FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) { FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarLUNBusy[currSCCB->Lun] = 1; if (FPT_BL_Card[p_card].discQCount != 0) FPT_BL_Card[p_card].discQCount--; FPT_BL_Card[p_card]. discQ_Tbl[FPT_sccbMgrTbl[p_card] [currSCCB->TargID]. LunDiscQ_Idx[currSCCB->Lun]] = NULL; } else { FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarLUNBusy[0] = 1; if (currSCCB->Sccb_tag) { if (FPT_BL_Card[p_card].discQCount != 0) FPT_BL_Card[p_card]. discQCount--; FPT_BL_Card[p_card].discQ_Tbl[currSCCB-> Sccb_tag] = NULL; } else { if (FPT_BL_Card[p_card].discQCount != 0) FPT_BL_Card[p_card]. discQCount--; FPT_BL_Card[p_card]. discQ_Tbl[FPT_sccbMgrTbl[p_card] [currSCCB->TargID]. LunDiscQ_Idx[0]] = NULL; } } return; } if (currSCCB->Sccb_scsistat == SELECT_WN_ST) { FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarStatus = (FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarStatus & ~WIDE_ENABLED) | WIDE_NEGOCIATED; FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &= ~EE_WIDE_SCSI; FPT_BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD; if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) && ((FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) { FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarLUNBusy[currSCCB->Lun] = 1; if (FPT_BL_Card[p_card].discQCount != 0) FPT_BL_Card[p_card].discQCount--; FPT_BL_Card[p_card]. discQ_Tbl[FPT_sccbMgrTbl[p_card] [currSCCB->TargID]. LunDiscQ_Idx[currSCCB->Lun]] = NULL; } else { FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarLUNBusy[0] = 1; if (currSCCB->Sccb_tag) { if (FPT_BL_Card[p_card].discQCount != 0) FPT_BL_Card[p_card]. discQCount--; FPT_BL_Card[p_card].discQ_Tbl[currSCCB-> Sccb_tag] = NULL; } else { if (FPT_BL_Card[p_card].discQCount != 0) FPT_BL_Card[p_card]. discQCount--; FPT_BL_Card[p_card]. discQ_Tbl[FPT_sccbMgrTbl[p_card] [currSCCB->TargID]. LunDiscQ_Idx[0]] = NULL; } } return; } if (status_byte == SAM_STAT_CHECK_CONDITION) { if (FPT_BL_Card[p_card].globalFlags & F_DO_RENEGO) { if (FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarEEValue & EE_SYNC_MASK) { FPT_sccbMgrTbl[p_card][currSCCB-> TargID]. TarStatus &= ~TAR_SYNC_MASK; } if (FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarEEValue & EE_WIDE_SCSI) { FPT_sccbMgrTbl[p_card][currSCCB-> TargID]. TarStatus &= ~TAR_WIDE_MASK; } } } if (!(currSCCB->Sccb_XferState & F_AUTO_SENSE)) { currSCCB->SccbStatus = SCCB_ERROR; currSCCB->TargetStatus = status_byte; if (status_byte == SAM_STAT_CHECK_CONDITION) { FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarLUN_CA = 1; if (currSCCB->RequestSenseLength != NO_AUTO_REQUEST_SENSE) { if (currSCCB->RequestSenseLength == 0) currSCCB->RequestSenseLength = 14; FPT_ssenss(&FPT_BL_Card[p_card]); FPT_BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD; if (((FPT_BL_Card[p_card]. globalFlags & F_CONLUN_IO) && ((FPT_sccbMgrTbl[p_card] [currSCCB->TargID]. TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) { FPT_sccbMgrTbl[p_card] [currSCCB->TargID]. TarLUNBusy[currSCCB->Lun] = 1; if (FPT_BL_Card[p_card]. discQCount != 0) FPT_BL_Card[p_card]. discQCount--; FPT_BL_Card[p_card]. discQ_Tbl[FPT_sccbMgrTbl [p_card] [currSCCB-> TargID]. LunDiscQ_Idx [currSCCB->Lun]] = NULL; } else { FPT_sccbMgrTbl[p_card] [currSCCB->TargID]. TarLUNBusy[0] = 1; if (currSCCB->Sccb_tag) { if (FPT_BL_Card[p_card]. discQCount != 0) FPT_BL_Card [p_card]. discQCount--; FPT_BL_Card[p_card]. discQ_Tbl[currSCCB-> Sccb_tag] = NULL; } else { if (FPT_BL_Card[p_card]. discQCount != 0) FPT_BL_Card [p_card]. discQCount--; FPT_BL_Card[p_card]. discQ_Tbl [FPT_sccbMgrTbl [p_card][currSCCB-> TargID]. LunDiscQ_Idx[0]] = NULL; } } return; } } } } if ((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) && ((FPT_sccbMgrTbl[p_card][currSCCB->TargID]. TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)) FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB-> Lun] = 0; else FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = 0; FPT_queueCmdComplete(&FPT_BL_Card[p_card], currSCCB, p_card); } #define SHORT_WAIT 0x0000000F #define LONG_WAIT 0x0000FFFFL /*--------------------------------------------------------------------- * * Function: Data Transfer Processor * * Description: This routine performs two tasks. * (1) Start data transfer by calling HOST_DATA_XFER_START * function. Once data transfer is started, (2) Depends * on the type of data transfer mode Scatter/Gather mode * or NON Scatter/Gather mode. In NON Scatter/Gather mode, * this routine checks Sccb_MGRFlag (F_HOST_XFER_ACT bit) for * data transfer done. In Scatter/Gather mode, this routine * checks bus master command complete and dual rank busy * bit to keep chaining SC transfer command. Similarly, * in Scatter/Gather mode, it checks Sccb_MGRFlag * (F_HOST_XFER_ACT bit) for data transfer done. * *---------------------------------------------------------------------*/ static void FPT_dataXferProcessor(u32 port, struct sccb_card *pCurrCard) { struct sccb *currSCCB; currSCCB = pCurrCard->currentSCCB; if (currSCCB->Sccb_XferState & F_SG_XFER) { if (pCurrCard->globalFlags & F_HOST_XFER_ACT) { currSCCB->Sccb_sgseg += (unsigned char)SG_BUF_CNT; currSCCB->Sccb_SGoffset = 0x00; } pCurrCard->globalFlags |= F_HOST_XFER_ACT; FPT_busMstrSGDataXferStart(port, currSCCB); } else { if (!(pCurrCard->globalFlags & F_HOST_XFER_ACT)) { pCurrCard->globalFlags |= F_HOST_XFER_ACT; FPT_busMstrDataXferStart(port, currSCCB); } } } /*--------------------------------------------------------------------- * * Function: BusMaster Scatter Gather Data Transfer Start * * Description: * *---------------------------------------------------------------------*/ static void FPT_busMstrSGDataXferStart(u32 p_port, struct sccb *pcurrSCCB) { u32 count, addr, tmpSGCnt; unsigned int sg_index; unsigned char sg_count, i; u32 reg_offset; struct blogic_sg_seg *segp; if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR) count = ((u32)HOST_RD_CMD) << 24; else count = ((u32)HOST_WRT_CMD) << 24; sg_count = 0; tmpSGCnt = 0; sg_index = pcurrSCCB->Sccb_sgseg; reg_offset = hp_aramBase; i = (unsigned char)(RD_HARPOON(p_port + hp_page_ctrl) & ~(SGRAM_ARAM | SCATTER_EN)); WR_HARPOON(p_port + hp_page_ctrl, i); while ((sg_count < (unsigned char)SG_BUF_CNT) && ((sg_index * (unsigned int)SG_ELEMENT_SIZE) < pcurrSCCB->DataLength)) { segp = (struct blogic_sg_seg *)(pcurrSCCB->DataPointer) + sg_index; tmpSGCnt += segp->segbytes; count |= segp->segbytes; addr = segp->segdata; if ((!sg_count) && (pcurrSCCB->Sccb_SGoffset)) { addr += ((count & 0x00FFFFFFL) - pcurrSCCB->Sccb_SGoffset); count = (count & 0xFF000000L) | pcurrSCCB->Sccb_SGoffset; tmpSGCnt = count & 0x00FFFFFFL; } WR_HARP32(p_port, reg_offset, addr); reg_offset += 4; WR_HARP32(p_port, reg_offset, count); reg_offset += 4; count &= 0xFF000000L; sg_index++; sg_count++; } /*End While */ pcurrSCCB->Sccb_XferCnt = tmpSGCnt; WR_HARPOON(p_port + hp_sg_addr, (sg_count << 4)); if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR) { WR_HARP32(p_port, hp_xfercnt_0, tmpSGCnt); WR_HARPOON(p_port + hp_portctrl_0, (DMA_PORT | SCSI_PORT | SCSI_INBIT)); WR_HARPOON(p_port + hp_scsisig, S_DATAI_PH); } else { if ((!(RD_HARPOON(p_port + hp_synctarg_0) & NARROW_SCSI)) && (tmpSGCnt & 0x000000001)) { pcurrSCCB->Sccb_XferState |= F_ODD_BALL_CNT; tmpSGCnt--; } WR_HARP32(p_port, hp_xfercnt_0, tmpSGCnt); WR_HARPOON(p_port + hp_portctrl_0, (SCSI_PORT | DMA_PORT | DMA_RD)); WR_HARPOON(p_port + hp_scsisig, S_DATAO_PH); } WR_HARPOON(p_port + hp_page_ctrl, (unsigned char)(i | SCATTER_EN)); } /*--------------------------------------------------------------------- * * Function: BusMaster Data Transfer Start * * Description: * *---------------------------------------------------------------------*/ static void FPT_busMstrDataXferStart(u32 p_port, struct sccb *pcurrSCCB) { u32 addr, count; if (!(pcurrSCCB->Sccb_XferState & F_AUTO_SENSE)) { count = pcurrSCCB->Sccb_XferCnt; addr = (u32)(unsigned long)pcurrSCCB->DataPointer + pcurrSCCB->Sccb_ATC; } else { addr = pcurrSCCB->SensePointer; count = pcurrSCCB->RequestSenseLength; } HP_SETUP_ADDR_CNT(p_port, addr, count); if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR) { WR_HARPOON(p_port + hp_portctrl_0, (DMA_PORT | SCSI_PORT | SCSI_INBIT)); WR_HARPOON(p_port + hp_scsisig, S_DATAI_PH); WR_HARPOON(p_port + hp_xfer_cmd, (XFER_DMA_HOST | XFER_HOST_AUTO | XFER_DMA_8BIT)); } else { WR_HARPOON(p_port + hp_portctrl_0, (SCSI_PORT | DMA_PORT | DMA_RD)); WR_HARPOON(p_port + hp_scsisig, S_DATAO_PH); WR_HARPOON(p_port + hp_xfer_cmd, (XFER_HOST_DMA | XFER_HOST_AUTO | XFER_DMA_8BIT)); } } /*--------------------------------------------------------------------- * * Function: BusMaster Timeout Handler * * Description: This function is called after a bus master command busy time * out is detected. This routines issue halt state machine * with a software time out for command busy. If command busy * is still asserted at the end of the time out, it issues * hard abort with another software time out. It hard abort * command busy is also time out, it'll just give up. * *---------------------------------------------------------------------*/ static unsigned char FPT_busMstrTimeOut(u32 p_port) { unsigned long timeout; timeout = LONG_WAIT; WR_HARPOON(p_port + hp_sys_ctrl, HALT_MACH); while ((!(RD_HARPOON(p_port + hp_ext_status) & CMD_ABORTED)) && timeout--) { } if (RD_HARPOON(p_port + hp_ext_status) & BM_CMD_BUSY) { WR_HARPOON(p_port + hp_sys_ctrl, HARD_ABORT); timeout = LONG_WAIT; while ((RD_HARPOON(p_port + hp_ext_status) & BM_CMD_BUSY) && timeout--) { } } RD_HARPOON(p_port + hp_int_status); /*Clear command complete */ if (RD_HARPOON(p_port + hp_ext_status) & BM_CMD_BUSY) { return 1; } else { return 0; } } /*--------------------------------------------------------------------- * * Function: Host Data Transfer Abort * * Description: Abort any in progress transfer. * *---------------------------------------------------------------------*/ static void FPT_hostDataXferAbort(u32 port, unsigned char p_card, struct sccb *pCurrSCCB) { unsigned long timeout; unsigned long remain_cnt; u32 sg_ptr; struct blogic_sg_seg *segp; FPT_BL_Card[p_card].globalFlags &= ~F_HOST_XFER_ACT; if (pCurrSCCB->Sccb_XferState & F_AUTO_SENSE) { if (!(RD_HARPOON(port + hp_int_status) & INT_CMD_COMPL)) { WR_HARPOON(port + hp_bm_ctrl, (RD_HARPOON(port + hp_bm_ctrl) | FLUSH_XFER_CNTR)); timeout = LONG_WAIT; while ((RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) && timeout--) { } WR_HARPOON(port + hp_bm_ctrl, (RD_HARPOON(port + hp_bm_ctrl) & ~FLUSH_XFER_CNTR)); if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) { if (FPT_busMstrTimeOut(port)) { if (pCurrSCCB->HostStatus == 0x00) pCurrSCCB->HostStatus = SCCB_BM_ERR; } if (RD_HARPOON(port + hp_int_status) & INT_EXT_STATUS) if (RD_HARPOON(port + hp_ext_status) & BAD_EXT_STATUS) if (pCurrSCCB->HostStatus == 0x00) { pCurrSCCB->HostStatus = SCCB_BM_ERR; } } } } else if (pCurrSCCB->Sccb_XferCnt) { if (pCurrSCCB->Sccb_XferState & F_SG_XFER) { WR_HARPOON(port + hp_page_ctrl, (RD_HARPOON(port + hp_page_ctrl) & ~SCATTER_EN)); WR_HARPOON(port + hp_sg_addr, 0x00); sg_ptr = pCurrSCCB->Sccb_sgseg + SG_BUF_CNT; if (sg_ptr > (unsigned int)(pCurrSCCB->DataLength / SG_ELEMENT_SIZE)) { sg_ptr = (u32)(pCurrSCCB->DataLength / SG_ELEMENT_SIZE); } remain_cnt = pCurrSCCB->Sccb_XferCnt; while (remain_cnt < 0x01000000L) { sg_ptr--; segp = (struct blogic_sg_seg *)(pCurrSCCB-> DataPointer) + (sg_ptr * 2); if (remain_cnt > (unsigned long)segp->segbytes) remain_cnt -= (unsigned long)segp->segbytes; else break; } if (remain_cnt < 0x01000000L) { pCurrSCCB->Sccb_SGoffset = remain_cnt; pCurrSCCB->Sccb_sgseg = (unsigned short)sg_ptr; if ((unsigned long)(sg_ptr * SG_ELEMENT_SIZE) == pCurrSCCB->DataLength && (remain_cnt == 0)) pCurrSCCB->Sccb_XferState |= F_ALL_XFERRED; } else { if (pCurrSCCB->HostStatus == 0x00) { pCurrSCCB->HostStatus = SCCB_GROSS_FW_ERR; } } } if (!(pCurrSCCB->Sccb_XferState & F_HOST_XFER_DIR)) { if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) { FPT_busMstrTimeOut(port); } else { if (RD_HARPOON(port + hp_int_status) & INT_EXT_STATUS) { if (RD_HARPOON(port + hp_ext_status) & BAD_EXT_STATUS) { if (pCurrSCCB->HostStatus == 0x00) { pCurrSCCB->HostStatus = SCCB_BM_ERR; } } } } } else { if ((RD_HARPOON(port + hp_fifo_cnt)) >= BM_THRESHOLD) { timeout = SHORT_WAIT; while ((RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) && ((RD_HARPOON(port + hp_fifo_cnt)) >= BM_THRESHOLD) && timeout--) { } } if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) { WR_HARPOON(port + hp_bm_ctrl, (RD_HARPOON(port + hp_bm_ctrl) | FLUSH_XFER_CNTR)); timeout = LONG_WAIT; while ((RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) && timeout--) { } WR_HARPOON(port + hp_bm_ctrl, (RD_HARPOON(port + hp_bm_ctrl) & ~FLUSH_XFER_CNTR)); if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) { if (pCurrSCCB->HostStatus == 0x00) { pCurrSCCB->HostStatus = SCCB_BM_ERR; } FPT_busMstrTimeOut(port); } } if (RD_HARPOON(port + hp_int_status) & INT_EXT_STATUS) { if (RD_HARPOON(port + hp_ext_status) & BAD_EXT_STATUS) { if (pCurrSCCB->HostStatus == 0x00) { pCurrSCCB->HostStatus = SCCB_BM_ERR; } } } } } else { if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) { timeout = LONG_WAIT; while ((RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) && timeout--) { } if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) { if (pCurrSCCB->HostStatus == 0x00) { pCurrSCCB->HostStatus = SCCB_BM_ERR; } FPT_busMstrTimeOut(port); } } if (RD_HARPOON(port + hp_int_status) & INT_EXT_STATUS) { if (RD_HARPOON(port + hp_ext_status) & BAD_EXT_STATUS) { if (pCurrSCCB->HostStatus == 0x00) { pCurrSCCB->HostStatus = SCCB_BM_ERR; } } } if (pCurrSCCB->Sccb_XferState & F_SG_XFER) { WR_HARPOON(port + hp_page_ctrl, (RD_HARPOON(port + hp_page_ctrl) & ~SCATTER_EN)); WR_HARPOON(port + hp_sg_addr, 0x00); pCurrSCCB->Sccb_sgseg += SG_BUF_CNT; pCurrSCCB->Sccb_SGoffset = 0x00; if ((u32)(pCurrSCCB->Sccb_sgseg * SG_ELEMENT_SIZE) >= pCurrSCCB->DataLength) { pCurrSCCB->Sccb_XferState |= F_ALL_XFERRED; pCurrSCCB->Sccb_sgseg = (unsigned short)(pCurrSCCB->DataLength / SG_ELEMENT_SIZE); } } else { if (!(pCurrSCCB->Sccb_XferState & F_AUTO_SENSE)) pCurrSCCB->Sccb_XferState |= F_ALL_XFERRED; } } WR_HARPOON(port + hp_int_mask, (INT_CMD_COMPL | SCSI_INTERRUPT)); } /*--------------------------------------------------------------------- * * Function: Host Data Transfer Restart * * Description: Reset the available count due to a restore data * pointers message. * *---------------------------------------------------------------------*/ static void FPT_hostDataXferRestart(struct sccb *currSCCB) { unsigned long data_count; unsigned int sg_index; struct blogic_sg_seg *segp; if (currSCCB->Sccb_XferState & F_SG_XFER) { currSCCB->Sccb_XferCnt = 0; sg_index = 0xffff; /*Index by long words into sg list. */ data_count = 0; /*Running count of SG xfer counts. */ while (data_count < currSCCB->Sccb_ATC) { sg_index++; segp = (struct blogic_sg_seg *)(currSCCB->DataPointer) + (sg_index * 2); data_count += segp->segbytes; } if (data_count == currSCCB->Sccb_ATC) { currSCCB->Sccb_SGoffset = 0; sg_index++; } else { currSCCB->Sccb_SGoffset = data_count - currSCCB->Sccb_ATC; } currSCCB->Sccb_sgseg = (unsigned short)sg_index; } else { currSCCB->Sccb_XferCnt = currSCCB->DataLength - currSCCB->Sccb_ATC; } } /*--------------------------------------------------------------------- * * Function: FPT_scini * * Description: Setup all data structures necessary for SCAM selection. * *---------------------------------------------------------------------*/ static void FPT_scini(unsigned char p_card, unsigned char p_our_id, unsigned char p_power_up) { unsigned char loser, assigned_id; u32 p_port; unsigned char i, k, ScamFlg; struct sccb_card *currCard; struct nvram_info *pCurrNvRam; currCard = &FPT_BL_Card[p_card]; p_port = currCard->ioPort; pCurrNvRam = currCard->pNvRamInfo; if (pCurrNvRam) { ScamFlg = pCurrNvRam->niScamConf; i = pCurrNvRam->niSysConf; } else { ScamFlg = (unsigned char)FPT_utilEERead(p_port, SCAM_CONFIG / 2); i = (unsigned char)(FPT_utilEERead(p_port, (SYSTEM_CONFIG / 2))); } if (!(i & 0x02)) /* check if reset bus in AutoSCSI parameter set */ return; FPT_inisci(p_card, p_port, p_our_id); /* Force to wait 1 sec after SCSI bus reset. Some SCAM device FW too slow to return to SCAM selection */ /* if (p_power_up) FPT_Wait1Second(p_port); else FPT_Wait(p_port, TO_250ms); */ FPT_Wait1Second(p_port); if ((ScamFlg & SCAM_ENABLED) && (ScamFlg & SCAM_LEVEL2)) { while (!(FPT_scarb(p_port, INIT_SELTD))) { } FPT_scsel(p_port); do { FPT_scxferc(p_port, SYNC_PTRN); FPT_scxferc(p_port, DOM_MSTR); loser = FPT_scsendi(p_port, &FPT_scamInfo[p_our_id].id_string[0]); } while (loser == 0xFF); FPT_scbusf(p_port); if ((p_power_up) && (!loser)) { FPT_sresb(p_port, p_card); FPT_Wait(p_port, TO_250ms); while (!(FPT_scarb(p_port, INIT_SELTD))) { } FPT_scsel(p_port); do { FPT_scxferc(p_port, SYNC_PTRN); FPT_scxferc(p_port, DOM_MSTR); loser = FPT_scsendi(p_port, &FPT_scamInfo[p_our_id]. id_string[0]); } while (loser == 0xFF); FPT_scbusf(p_port); } } else { loser = 0; } if (!loser) { FPT_scamInfo[p_our_id].state = ID_ASSIGNED; if (ScamFlg & SCAM_ENABLED) { for (i = 0; i < MAX_SCSI_TAR; i++) { if ((FPT_scamInfo[i].state == ID_UNASSIGNED) || (FPT_scamInfo[i].state == ID_UNUSED)) { if (FPT_scsell(p_port, i)) { FPT_scamInfo[i].state = LEGACY; if ((FPT_scamInfo[i]. id_string[0] != 0xFF) || (FPT_scamInfo[i]. id_string[1] != 0xFA)) { FPT_scamInfo[i]. id_string[0] = 0xFF; FPT_scamInfo[i]. id_string[1] = 0xFA; if (pCurrNvRam == NULL) currCard-> globalFlags |= F_UPDATE_EEPROM; } } } } FPT_sresb(p_port, p_card); FPT_Wait1Second(p_port); while (!(FPT_scarb(p_port, INIT_SELTD))) { } FPT_scsel(p_port); FPT_scasid(p_card, p_port); } } else if ((loser) && (ScamFlg & SCAM_ENABLED)) { FPT_scamInfo[p_our_id].id_string[0] = SLV_TYPE_CODE0; assigned_id = 0; FPT_scwtsel(p_port); do { while (FPT_scxferc(p_port, 0x00) != SYNC_PTRN) { } i = FPT_scxferc(p_port, 0x00); if (i == ASSIGN_ID) { if (! (FPT_scsendi (p_port, &FPT_scamInfo[p_our_id].id_string[0]))) { i = FPT_scxferc(p_port, 0x00); if (FPT_scvalq(i)) { k = FPT_scxferc(p_port, 0x00); if (FPT_scvalq(k)) { currCard->ourId = ((unsigned char)(i << 3) + (k & (unsigned char)7)) & (unsigned char) 0x3F; FPT_inisci(p_card, p_port, p_our_id); FPT_scamInfo[currCard-> ourId]. state = ID_ASSIGNED; FPT_scamInfo[currCard-> ourId]. id_string[0] = SLV_TYPE_CODE0; assigned_id = 1; } } } } else if (i == SET_P_FLAG) { if (!(FPT_scsendi(p_port, &FPT_scamInfo[p_our_id]. id_string[0]))) FPT_scamInfo[p_our_id].id_string[0] |= 0x80; } } while (!assigned_id); while (FPT_scxferc(p_port, 0x00) != CFG_CMPLT) { } } if (ScamFlg & SCAM_ENABLED) { FPT_scbusf(p_port); if (currCard->globalFlags & F_UPDATE_EEPROM) { FPT_scsavdi(p_card, p_port); currCard->globalFlags &= ~F_UPDATE_EEPROM; } } /* for (i=0,k=0; i < MAX_SCSI_TAR; i++) { if ((FPT_scamInfo[i].state == ID_ASSIGNED) || (FPT_scamInfo[i].state == LEGACY)) k++; } if (k==2) currCard->globalFlags |= F_SINGLE_DEVICE; else currCard->globalFlags &= ~F_SINGLE_DEVICE; */ } /*--------------------------------------------------------------------- * * Function: FPT_scarb * * Description: Gain control of the bus and wait SCAM select time (250ms) * *---------------------------------------------------------------------*/ static int FPT_scarb(u32 p_port, unsigned char p_sel_type) { if (p_sel_type == INIT_SELTD) { while (RD_HARPOON(p_port + hp_scsisig) & (SCSI_SEL | SCSI_BSY)) { } if (RD_HARPOON(p_port + hp_scsisig) & SCSI_SEL) return 0; if (RD_HARPOON(p_port + hp_scsidata_0) != 00) return 0; WR_HARPOON(p_port + hp_scsisig, (RD_HARPOON(p_port + hp_scsisig) | SCSI_BSY)); if (RD_HARPOON(p_port + hp_scsisig) & SCSI_SEL) { WR_HARPOON(p_port + hp_scsisig, (RD_HARPOON(p_port + hp_scsisig) & ~SCSI_BSY)); return 0; } WR_HARPOON(p_port + hp_scsisig, (RD_HARPOON(p_port + hp_scsisig) | SCSI_SEL)); if (RD_HARPOON(p_port + hp_scsidata_0) != 00) { WR_HARPOON(p_port + hp_scsisig, (RD_HARPOON(p_port + hp_scsisig) & ~(SCSI_BSY | SCSI_SEL))); return 0; } } WR_HARPOON(p_port + hp_clkctrl_0, (RD_HARPOON(p_port + hp_clkctrl_0) & ~ACTdeassert)); WR_HARPOON(p_port + hp_scsireset, SCAM_EN); WR_HARPOON(p_port + hp_scsidata_0, 0x00); WR_HARPOON(p_port + hp_scsidata_1, 0x00); WR_HARPOON(p_port + hp_portctrl_0, SCSI_BUS_EN); WR_HARPOON(p_port + hp_scsisig, (RD_HARPOON(p_port + hp_scsisig) | SCSI_MSG)); WR_HARPOON(p_port + hp_scsisig, (RD_HARPOON(p_port + hp_scsisig) & ~SCSI_BSY)); FPT_Wait(p_port, TO_250ms); return 1; } /*--------------------------------------------------------------------- * * Function: FPT_scbusf * * Description: Release the SCSI bus and disable SCAM selection. * *---------------------------------------------------------------------*/ static void FPT_scbusf(u32 p_port) { WR_HARPOON(p_port + hp_page_ctrl, (RD_HARPOON(p_port + hp_page_ctrl) | G_INT_DISABLE)); WR_HARPOON(p_port + hp_scsidata_0, 0x00); WR_HARPOON(p_port + hp_portctrl_0, (RD_HARPOON(p_port + hp_portctrl_0) & ~SCSI_BUS_EN)); WR_HARPOON(p_port + hp_scsisig, 0x00); WR_HARPOON(p_port + hp_scsireset, (RD_HARPOON(p_port + hp_scsireset) & ~SCAM_EN)); WR_HARPOON(p_port + hp_clkctrl_0, (RD_HARPOON(p_port + hp_clkctrl_0) | ACTdeassert)); WRW_HARPOON((p_port + hp_intstat), (BUS_FREE | AUTO_INT | SCAM_SEL)); WR_HARPOON(p_port + hp_page_ctrl, (RD_HARPOON(p_port + hp_page_ctrl) & ~G_INT_DISABLE)); } /*--------------------------------------------------------------------- * * Function: FPT_scasid * * Description: Assign an ID to all the SCAM devices. * *---------------------------------------------------------------------*/ static void FPT_scasid(unsigned char p_card, u32 p_port) { unsigned char temp_id_string[ID_STRING_LENGTH]; unsigned char i, k, scam_id; unsigned char crcBytes[3]; struct nvram_info *pCurrNvRam; unsigned short *pCrcBytes; pCurrNvRam = FPT_BL_Card[p_card].pNvRamInfo; i = 0; while (!i) { for (k = 0; k < ID_STRING_LENGTH; k++) { temp_id_string[k] = (unsigned char)0x00; } FPT_scxferc(p_port, SYNC_PTRN); FPT_scxferc(p_port, ASSIGN_ID); if (!(FPT_sciso(p_port, &temp_id_string[0]))) { if (pCurrNvRam) { pCrcBytes = (unsigned short *)&crcBytes[0]; *pCrcBytes = FPT_CalcCrc16(&temp_id_string[0]); crcBytes[2] = FPT_CalcLrc(&temp_id_string[0]); temp_id_string[1] = crcBytes[2]; temp_id_string[2] = crcBytes[0]; temp_id_string[3] = crcBytes[1]; for (k = 4; k < ID_STRING_LENGTH; k++) temp_id_string[k] = (unsigned char)0x00; } i = FPT_scmachid(p_card, temp_id_string); if (i == CLR_PRIORITY) { FPT_scxferc(p_port, MISC_CODE); FPT_scxferc(p_port, CLR_P_FLAG); i = 0; /*Not the last ID yet. */ } else if (i != NO_ID_AVAIL) { if (i < 8) FPT_scxferc(p_port, ID_0_7); else FPT_scxferc(p_port, ID_8_F); scam_id = (i & (unsigned char)0x07); for (k = 1; k < 0x08; k <<= 1) if (!(k & i)) scam_id += 0x08; /*Count number of zeros in DB0-3. */ FPT_scxferc(p_port, scam_id); i = 0; /*Not the last ID yet. */ } } else { i = 1; } } /*End while */ FPT_scxferc(p_port, SYNC_PTRN); FPT_scxferc(p_port, CFG_CMPLT); } /*--------------------------------------------------------------------- * * Function: FPT_scsel * * Description: Select all the SCAM devices. * *---------------------------------------------------------------------*/ static void FPT_scsel(u32 p_port) { WR_HARPOON(p_port + hp_scsisig, SCSI_SEL); FPT_scwiros(p_port, SCSI_MSG); WR_HARPOON(p_port + hp_scsisig, (SCSI_SEL | SCSI_BSY)); WR_HARPOON(p_port + hp_scsisig, (SCSI_SEL | SCSI_BSY | SCSI_IOBIT | SCSI_CD)); WR_HARPOON(p_port + hp_scsidata_0, (unsigned char)(RD_HARPOON(p_port + hp_scsidata_0) | (unsigned char)(BIT(7) + BIT(6)))); WR_HARPOON(p_port + hp_scsisig, (SCSI_BSY | SCSI_IOBIT | SCSI_CD)); FPT_scwiros(p_port, SCSI_SEL); WR_HARPOON(p_port + hp_scsidata_0, (unsigned char)(RD_HARPOON(p_port + hp_scsidata_0) & ~(unsigned char)BIT(6))); FPT_scwirod(p_port, BIT(6)); WR_HARPOON(p_port + hp_scsisig, (SCSI_SEL | SCSI_BSY | SCSI_IOBIT | SCSI_CD)); } /*--------------------------------------------------------------------- * * Function: FPT_scxferc * * Description: Handshake the p_data (DB4-0) across the bus. * *---------------------------------------------------------------------*/ static unsigned char FPT_scxferc(u32 p_port, unsigned char p_data) { unsigned char curr_data, ret_data; curr_data = p_data | BIT(7) | BIT(5); /*Start with DB7 & DB5 asserted. */ WR_HARPOON(p_port + hp_scsidata_0, curr_data); curr_data &= ~BIT(7); WR_HARPOON(p_port + hp_scsidata_0, curr_data); FPT_scwirod(p_port, BIT(7)); /*Wait for DB7 to be released. */ while (!(RD_HARPOON(p_port + hp_scsidata_0) & BIT(5))) ; ret_data = (RD_HARPOON(p_port + hp_scsidata_0) & (unsigned char)0x1F); curr_data |= BIT(6); WR_HARPOON(p_port + hp_scsidata_0, curr_data); curr_data &= ~BIT(5); WR_HARPOON(p_port + hp_scsidata_0, curr_data); FPT_scwirod(p_port, BIT(5)); /*Wait for DB5 to be released. */ curr_data &= ~(BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0)); /*Release data bits */ curr_data |= BIT(7); WR_HARPOON(p_port + hp_scsidata_0, curr_data); curr_data &= ~BIT(6); WR_HARPOON(p_port + hp_scsidata_0, curr_data); FPT_scwirod(p_port, BIT(6)); /*Wait for DB6 to be released. */ return ret_data; } /*--------------------------------------------------------------------- * * Function: FPT_scsendi * * Description: Transfer our Identification string to determine if we * will be the dominant master. * *---------------------------------------------------------------------*/ static unsigned char FPT_scsendi(u32 p_port, unsigned char p_id_string[]) { unsigned char ret_data, byte_cnt, bit_cnt, defer; defer = 0; for (byte_cnt = 0; byte_cnt < ID_STRING_LENGTH; byte_cnt++) { for (bit_cnt = 0x80; bit_cnt != 0; bit_cnt >>= 1) { if (defer) ret_data = FPT_scxferc(p_port, 00); else if (p_id_string[byte_cnt] & bit_cnt) ret_data = FPT_scxferc(p_port, 02); else { ret_data = FPT_scxferc(p_port, 01); if (ret_data & 02) defer = 1; } if ((ret_data & 0x1C) == 0x10) return 0x00; /*End of isolation stage, we won! */ if (ret_data & 0x1C) return 0xFF; if ((defer) && (!(ret_data & 0x1F))) return 0x01; /*End of isolation stage, we lost. */ } /*bit loop */ } /*byte loop */ if (defer) return 0x01; /*We lost */ else return 0; /*We WON! Yeeessss! */ } /*--------------------------------------------------------------------- * * Function: FPT_sciso * * Description: Transfer the Identification string. * *---------------------------------------------------------------------*/ static unsigned char FPT_sciso(u32 p_port, unsigned char p_id_string[]) { unsigned char ret_data, the_data, byte_cnt, bit_cnt; the_data = 0; for (byte_cnt = 0; byte_cnt < ID_STRING_LENGTH; byte_cnt++) { for (bit_cnt = 0; bit_cnt < 8; bit_cnt++) { ret_data = FPT_scxferc(p_port, 0); if (ret_data & 0xFC) return 0xFF; else { the_data <<= 1; if (ret_data & BIT(1)) { the_data |= 1; } } if ((ret_data & 0x1F) == 0) { /* if(bit_cnt != 0 || bit_cnt != 8) { byte_cnt = 0; bit_cnt = 0; FPT_scxferc(p_port, SYNC_PTRN); FPT_scxferc(p_port, ASSIGN_ID); continue; } */ if (byte_cnt) return 0x00; else return 0xFF; } } /*bit loop */ p_id_string[byte_cnt] = the_data; } /*byte loop */ return 0; } /*--------------------------------------------------------------------- * * Function: FPT_scwirod * * Description: Sample the SCSI data bus making sure the signal has been * deasserted for the correct number of consecutive samples. * *---------------------------------------------------------------------*/ static void FPT_scwirod(u32 p_port, unsigned char p_data_bit) { unsigned char i; i = 0; while (i < MAX_SCSI_TAR) { if (RD_HARPOON(p_port + hp_scsidata_0) & p_data_bit) i = 0; else i++; } } /*--------------------------------------------------------------------- * * Function: FPT_scwiros * * Description: Sample the SCSI Signal lines making sure the signal has been * deasserted for the correct number of consecutive samples. * *---------------------------------------------------------------------*/ static void FPT_scwiros(u32 p_port, unsigned char p_data_bit) { unsigned char i; i = 0; while (i < MAX_SCSI_TAR) { if (RD_HARPOON(p_port + hp_scsisig) & p_data_bit) i = 0; else i++; } } /*--------------------------------------------------------------------- * * Function: FPT_scvalq * * Description: Make sure we received a valid data byte. * *---------------------------------------------------------------------*/ static unsigned char FPT_scvalq(unsigned char p_quintet) { unsigned char count; for (count = 1; count < 0x08; count <<= 1) { if (!(p_quintet & count)) p_quintet -= 0x80; } if (p_quintet & 0x18) return 0; else return 1; } /*--------------------------------------------------------------------- * * Function: FPT_scsell * * Description: Select the specified device ID using a selection timeout * less than 4ms. If somebody responds then it is a legacy * drive and this ID must be marked as such. * *---------------------------------------------------------------------*/ static unsigned char FPT_scsell(u32 p_port, unsigned char targ_id) { unsigned long i; WR_HARPOON(p_port + hp_page_ctrl, (RD_HARPOON(p_port + hp_page_ctrl) | G_INT_DISABLE)); ARAM_ACCESS(p_port); WR_HARPOON(p_port + hp_addstat, (RD_HARPOON(p_port + hp_addstat) | SCAM_TIMER)); WR_HARPOON(p_port + hp_seltimeout, TO_4ms); for (i = p_port + CMD_STRT; i < p_port + CMD_STRT + 12; i += 2) { WRW_HARPOON(i, (MPM_OP + ACOMMAND)); } WRW_HARPOON(i, (BRH_OP + ALWAYS + NP)); WRW_HARPOON((p_port + hp_intstat), (RESET | TIMEOUT | SEL | BUS_FREE | AUTO_INT)); WR_HARPOON(p_port + hp_select_id, targ_id); WR_HARPOON(p_port + hp_portctrl_0, SCSI_PORT); WR_HARPOON(p_port + hp_autostart_3, (SELECT | CMD_ONLY_STRT)); WR_HARPOON(p_port + hp_scsictrl_0, (SEL_TAR | ENA_RESEL)); while (!(RDW_HARPOON((p_port + hp_intstat)) & (RESET | PROG_HLT | TIMEOUT | AUTO_INT))) { } if (RDW_HARPOON((p_port + hp_intstat)) & RESET) FPT_Wait(p_port, TO_250ms); DISABLE_AUTO(p_port); WR_HARPOON(p_port + hp_addstat, (RD_HARPOON(p_port + hp_addstat) & ~SCAM_TIMER)); WR_HARPOON(p_port + hp_seltimeout, TO_290ms); SGRAM_ACCESS(p_port); if (RDW_HARPOON((p_port + hp_intstat)) & (RESET | TIMEOUT)) { WRW_HARPOON((p_port + hp_intstat), (RESET | TIMEOUT | SEL | BUS_FREE | PHASE)); WR_HARPOON(p_port + hp_page_ctrl, (RD_HARPOON(p_port + hp_page_ctrl) & ~G_INT_DISABLE)); return 0; /*No legacy device */ } else { while (!(RDW_HARPOON((p_port + hp_intstat)) & BUS_FREE)) { if (RD_HARPOON(p_port + hp_scsisig) & SCSI_REQ) { WR_HARPOON(p_port + hp_scsisig, (SCSI_ACK + S_ILL_PH)); ACCEPT_MSG(p_port); } } WRW_HARPOON((p_port + hp_intstat), CLR_ALL_INT_1); WR_HARPOON(p_port + hp_page_ctrl, (RD_HARPOON(p_port + hp_page_ctrl) & ~G_INT_DISABLE)); return 1; /*Found one of them oldies! */ } } /*--------------------------------------------------------------------- * * Function: FPT_scwtsel * * Description: Wait to be selected by another SCAM initiator. * *---------------------------------------------------------------------*/ static void FPT_scwtsel(u32 p_port) { while (!(RDW_HARPOON((p_port + hp_intstat)) & SCAM_SEL)) { } } /*--------------------------------------------------------------------- * * Function: FPT_inisci * * Description: Setup the data Structure with the info from the EEPROM. * *---------------------------------------------------------------------*/ static void FPT_inisci(unsigned char p_card, u32 p_port, unsigned char p_our_id) { unsigned char i, k, max_id; unsigned short ee_data; struct nvram_info *pCurrNvRam; pCurrNvRam = FPT_BL_Card[p_card].pNvRamInfo; if (RD_HARPOON(p_port + hp_page_ctrl) & NARROW_SCSI_CARD) max_id = 0x08; else max_id = 0x10; if (pCurrNvRam) { for (i = 0; i < max_id; i++) { for (k = 0; k < 4; k++) FPT_scamInfo[i].id_string[k] = pCurrNvRam->niScamTbl[i][k]; for (k = 4; k < ID_STRING_LENGTH; k++) FPT_scamInfo[i].id_string[k] = (unsigned char)0x00; if (FPT_scamInfo[i].id_string[0] == 0x00) FPT_scamInfo[i].state = ID_UNUSED; /*Default to unused ID. */ else FPT_scamInfo[i].state = ID_UNASSIGNED; /*Default to unassigned ID. */ } } else { for (i = 0; i < max_id; i++) { for (k = 0; k < ID_STRING_LENGTH; k += 2) { ee_data = FPT_utilEERead(p_port, (unsigned short)((EE_SCAMBASE / 2) + (unsigned short)(i * ((unsigned short)ID_STRING_LENGTH / 2)) + (unsigned short)(k / 2))); FPT_scamInfo[i].id_string[k] = (unsigned char)ee_data; ee_data >>= 8; FPT_scamInfo[i].id_string[k + 1] = (unsigned char)ee_data; } if ((FPT_scamInfo[i].id_string[0] == 0x00) || (FPT_scamInfo[i].id_string[0] == 0xFF)) FPT_scamInfo[i].state = ID_UNUSED; /*Default to unused ID. */ else FPT_scamInfo[i].state = ID_UNASSIGNED; /*Default to unassigned ID. */ } } for (k = 0; k < ID_STRING_LENGTH; k++) FPT_scamInfo[p_our_id].id_string[k] = FPT_scamHAString[k]; } /*--------------------------------------------------------------------- * * Function: FPT_scmachid * * Description: Match the Device ID string with our values stored in * the EEPROM. * *---------------------------------------------------------------------*/ static unsigned char FPT_scmachid(unsigned char p_card, unsigned char p_id_string[]) { unsigned char i, k, match; for (i = 0; i < MAX_SCSI_TAR; i++) { match = 1; for (k = 0; k < ID_STRING_LENGTH; k++) { if (p_id_string[k] != FPT_scamInfo[i].id_string[k]) match = 0; } if (match) { FPT_scamInfo[i].state = ID_ASSIGNED; return i; } } if (p_id_string[0] & BIT(5)) i = 8; else i = MAX_SCSI_TAR; if (((p_id_string[0] & 0x06) == 0x02) || ((p_id_string[0] & 0x06) == 0x04)) match = p_id_string[1] & (unsigned char)0x1F; else match = 7; while (i > 0) { i--; if (FPT_scamInfo[match].state == ID_UNUSED) { for (k = 0; k < ID_STRING_LENGTH; k++) { FPT_scamInfo[match].id_string[k] = p_id_string[k]; } FPT_scamInfo[match].state = ID_ASSIGNED; if (FPT_BL_Card[p_card].pNvRamInfo == NULL) FPT_BL_Card[p_card].globalFlags |= F_UPDATE_EEPROM; return match; } match--; if (match == 0xFF) { if (p_id_string[0] & BIT(5)) match = 7; else match = MAX_SCSI_TAR - 1; } } if (p_id_string[0] & BIT(7)) { return CLR_PRIORITY; } if (p_id_string[0] & BIT(5)) i = 8; else i = MAX_SCSI_TAR; if (((p_id_string[0] & 0x06) == 0x02) || ((p_id_string[0] & 0x06) == 0x04)) match = p_id_string[1] & (unsigned char)0x1F; else match = 7; while (i > 0) { i--; if (FPT_scamInfo[match].state == ID_UNASSIGNED) { for (k = 0; k < ID_STRING_LENGTH; k++) { FPT_scamInfo[match].id_string[k] = p_id_string[k]; } FPT_scamInfo[match].id_string[0] |= BIT(7); FPT_scamInfo[match].state = ID_ASSIGNED; if (FPT_BL_Card[p_card].pNvRamInfo == NULL) FPT_BL_Card[p_card].globalFlags |= F_UPDATE_EEPROM; return match; } match--; if (match == 0xFF) { if (p_id_string[0] & BIT(5)) match = 7; else match = MAX_SCSI_TAR - 1; } } return NO_ID_AVAIL; } /*--------------------------------------------------------------------- * * Function: FPT_scsavdi * * Description: Save off the device SCAM ID strings. * *---------------------------------------------------------------------*/ static void FPT_scsavdi(unsigned char p_card, u32 p_port) { unsigned char i, k, max_id; unsigned short ee_data, sum_data; sum_data = 0x0000; for (i = 1; i < EE_SCAMBASE / 2; i++) { sum_data += FPT_utilEERead(p_port, i); } FPT_utilEEWriteOnOff(p_port, 1); /* Enable write access to the EEPROM */ if (RD_HARPOON(p_port + hp_page_ctrl) & NARROW_SCSI_CARD) max_id = 0x08; else max_id = 0x10; for (i = 0; i < max_id; i++) { for (k = 0; k < ID_STRING_LENGTH; k += 2) { ee_data = FPT_scamInfo[i].id_string[k + 1]; ee_data <<= 8; ee_data |= FPT_scamInfo[i].id_string[k]; sum_data += ee_data; FPT_utilEEWrite(p_port, ee_data, (unsigned short)((EE_SCAMBASE / 2) + (unsigned short)(i * ((unsigned short)ID_STRING_LENGTH / 2)) + (unsigned short)(k / 2))); } } FPT_utilEEWrite(p_port, sum_data, EEPROM_CHECK_SUM / 2); FPT_utilEEWriteOnOff(p_port, 0); /* Turn off write access */ } /*--------------------------------------------------------------------- * * Function: FPT_XbowInit * * Description: Setup the Xbow for normal operation. * *---------------------------------------------------------------------*/ static void FPT_XbowInit(u32 port, unsigned char ScamFlg) { unsigned char i; i = RD_HARPOON(port + hp_page_ctrl); WR_HARPOON(port + hp_page_ctrl, (unsigned char)(i | G_INT_DISABLE)); WR_HARPOON(port + hp_scsireset, 0x00); WR_HARPOON(port + hp_portctrl_1, HOST_MODE8); WR_HARPOON(port + hp_scsireset, (DMA_RESET | HPSCSI_RESET | PROG_RESET | FIFO_CLR)); WR_HARPOON(port + hp_scsireset, SCSI_INI); WR_HARPOON(port + hp_clkctrl_0, CLKCTRL_DEFAULT); WR_HARPOON(port + hp_scsisig, 0x00); /* Clear any signals we might */ WR_HARPOON(port + hp_scsictrl_0, ENA_SCAM_SEL); WRW_HARPOON((port + hp_intstat), CLR_ALL_INT); FPT_default_intena = RESET | RSEL | PROG_HLT | TIMEOUT | BUS_FREE | XFER_CNT_0 | AUTO_INT; if ((ScamFlg & SCAM_ENABLED) && (ScamFlg & SCAM_LEVEL2)) FPT_default_intena |= SCAM_SEL; WRW_HARPOON((port + hp_intena), FPT_default_intena); WR_HARPOON(port + hp_seltimeout, TO_290ms); /* Turn on SCSI_MODE8 for narrow cards to fix the strapping issue with the DUAL CHANNEL card */ if (RD_HARPOON(port + hp_page_ctrl) & NARROW_SCSI_CARD) WR_HARPOON(port + hp_addstat, SCSI_MODE8); WR_HARPOON(port + hp_page_ctrl, i); } /*--------------------------------------------------------------------- * * Function: FPT_BusMasterInit * * Description: Initialize the BusMaster for normal operations. * *---------------------------------------------------------------------*/ static void FPT_BusMasterInit(u32 p_port) { WR_HARPOON(p_port + hp_sys_ctrl, DRVR_RST); WR_HARPOON(p_port + hp_sys_ctrl, 0x00); WR_HARPOON(p_port + hp_host_blk_cnt, XFER_BLK64); WR_HARPOON(p_port + hp_bm_ctrl, (BMCTRL_DEFAULT)); WR_HARPOON(p_port + hp_ee_ctrl, (SCSI_TERM_ENA_H)); RD_HARPOON(p_port + hp_int_status); /*Clear interrupts. */ WR_HARPOON(p_port + hp_int_mask, (INT_CMD_COMPL | SCSI_INTERRUPT)); WR_HARPOON(p_port + hp_page_ctrl, (RD_HARPOON(p_port + hp_page_ctrl) & ~SCATTER_EN)); } /*--------------------------------------------------------------------- * * Function: FPT_DiagEEPROM * * Description: Verfiy checksum and 'Key' and initialize the EEPROM if * necessary. * *---------------------------------------------------------------------*/ static void FPT_DiagEEPROM(u32 p_port) { unsigned short index, temp, max_wd_cnt; if (RD_HARPOON(p_port + hp_page_ctrl) & NARROW_SCSI_CARD) max_wd_cnt = EEPROM_WD_CNT; else max_wd_cnt = EEPROM_WD_CNT * 2; temp = FPT_utilEERead(p_port, FW_SIGNATURE / 2); if (temp == 0x4641) { for (index = 2; index < max_wd_cnt; index++) { temp += FPT_utilEERead(p_port, index); } if (temp == FPT_utilEERead(p_port, EEPROM_CHECK_SUM / 2)) { return; /*EEPROM is Okay so return now! */ } } FPT_utilEEWriteOnOff(p_port, (unsigned char)1); for (index = 0; index < max_wd_cnt; index++) { FPT_utilEEWrite(p_port, 0x0000, index); } temp = 0; FPT_utilEEWrite(p_port, 0x4641, FW_SIGNATURE / 2); temp += 0x4641; FPT_utilEEWrite(p_port, 0x3920, MODEL_NUMB_0 / 2); temp += 0x3920; FPT_utilEEWrite(p_port, 0x3033, MODEL_NUMB_2 / 2); temp += 0x3033; FPT_utilEEWrite(p_port, 0x2020, MODEL_NUMB_4 / 2); temp += 0x2020; FPT_utilEEWrite(p_port, 0x70D3, SYSTEM_CONFIG / 2); temp += 0x70D3; FPT_utilEEWrite(p_port, 0x0010, BIOS_CONFIG / 2); temp += 0x0010; FPT_utilEEWrite(p_port, 0x0003, SCAM_CONFIG / 2); temp += 0x0003; FPT_utilEEWrite(p_port, 0x0007, ADAPTER_SCSI_ID / 2); temp += 0x0007; FPT_utilEEWrite(p_port, 0x0000, IGNORE_B_SCAN / 2); temp += 0x0000; FPT_utilEEWrite(p_port, 0x0000, SEND_START_ENA / 2); temp += 0x0000; FPT_utilEEWrite(p_port, 0x0000, DEVICE_ENABLE / 2); temp += 0x0000; FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL01 / 2); temp += 0x4242; FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL23 / 2); temp += 0x4242; FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL45 / 2); temp += 0x4242; FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL67 / 2); temp += 0x4242; FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL89 / 2); temp += 0x4242; FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBLab / 2); temp += 0x4242; FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBLcd / 2); temp += 0x4242; FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBLef / 2); temp += 0x4242; FPT_utilEEWrite(p_port, 0x6C46, 64 / 2); /*PRODUCT ID */ temp += 0x6C46; FPT_utilEEWrite(p_port, 0x7361, 66 / 2); /* FlashPoint LT */ temp += 0x7361; FPT_utilEEWrite(p_port, 0x5068, 68 / 2); temp += 0x5068; FPT_utilEEWrite(p_port, 0x696F, 70 / 2); temp += 0x696F; FPT_utilEEWrite(p_port, 0x746E, 72 / 2); temp += 0x746E; FPT_utilEEWrite(p_port, 0x4C20, 74 / 2); temp += 0x4C20; FPT_utilEEWrite(p_port, 0x2054, 76 / 2); temp += 0x2054; FPT_utilEEWrite(p_port, 0x2020, 78 / 2); temp += 0x2020; index = ((EE_SCAMBASE / 2) + (7 * 16)); FPT_utilEEWrite(p_port, (0x0700 + TYPE_CODE0), index); temp += (0x0700 + TYPE_CODE0); index++; FPT_utilEEWrite(p_port, 0x5542, index); /*Vendor ID code */ temp += 0x5542; /* BUSLOGIC */ index++; FPT_utilEEWrite(p_port, 0x4C53, index); temp += 0x4C53; index++; FPT_utilEEWrite(p_port, 0x474F, index); temp += 0x474F; index++; FPT_utilEEWrite(p_port, 0x4349, index); temp += 0x4349; index++; FPT_utilEEWrite(p_port, 0x5442, index); /*Vendor unique code */ temp += 0x5442; /* BT- 930 */ index++; FPT_utilEEWrite(p_port, 0x202D, index); temp += 0x202D; index++; FPT_utilEEWrite(p_port, 0x3339, index); temp += 0x3339; index++; /*Serial # */ FPT_utilEEWrite(p_port, 0x2030, index); /* 01234567 */ temp += 0x2030; index++; FPT_utilEEWrite(p_port, 0x5453, index); temp += 0x5453; index++; FPT_utilEEWrite(p_port, 0x5645, index); temp += 0x5645; index++; FPT_utilEEWrite(p_port, 0x2045, index); temp += 0x2045; index++; FPT_utilEEWrite(p_port, 0x202F, index); temp += 0x202F; index++; FPT_utilEEWrite(p_port, 0x4F4A, index); temp += 0x4F4A; index++; FPT_utilEEWrite(p_port, 0x204E, index); temp += 0x204E; index++; FPT_utilEEWrite(p_port, 0x3539, index); temp += 0x3539; FPT_utilEEWrite(p_port, temp, EEPROM_CHECK_SUM / 2); FPT_utilEEWriteOnOff(p_port, (unsigned char)0); } /*--------------------------------------------------------------------- * * Function: Queue Search Select * * Description: Try to find a new command to execute. * *---------------------------------------------------------------------*/ static void FPT_queueSearchSelect(struct sccb_card *pCurrCard, unsigned char p_card) { unsigned char scan_ptr, lun; struct sccb_mgr_tar_info *currTar_Info; struct sccb *pOldSccb; scan_ptr = pCurrCard->scanIndex; do { currTar_Info = &FPT_sccbMgrTbl[p_card][scan_ptr]; if ((pCurrCard->globalFlags & F_CONLUN_IO) && ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)) { if (currTar_Info->TarSelQ_Cnt != 0) { scan_ptr++; if (scan_ptr == MAX_SCSI_TAR) scan_ptr = 0; for (lun = 0; lun < MAX_LUN; lun++) { if (currTar_Info->TarLUNBusy[lun] == 0) { pCurrCard->currentSCCB = currTar_Info->TarSelQ_Head; pOldSccb = NULL; while ((pCurrCard-> currentSCCB != NULL) && (lun != pCurrCard-> currentSCCB->Lun)) { pOldSccb = pCurrCard-> currentSCCB; pCurrCard->currentSCCB = (struct sccb *)(pCurrCard-> currentSCCB)-> Sccb_forwardlink; } if (pCurrCard->currentSCCB == NULL) continue; if (pOldSccb != NULL) { pOldSccb-> Sccb_forwardlink = (struct sccb *)(pCurrCard-> currentSCCB)-> Sccb_forwardlink; pOldSccb-> Sccb_backlink = (struct sccb *)(pCurrCard-> currentSCCB)-> Sccb_backlink; currTar_Info-> TarSelQ_Cnt--; } else { currTar_Info-> TarSelQ_Head = (struct sccb *)(pCurrCard-> currentSCCB)-> Sccb_forwardlink; if (currTar_Info-> TarSelQ_Head == NULL) { currTar_Info-> TarSelQ_Tail = NULL; currTar_Info-> TarSelQ_Cnt = 0; } else { currTar_Info-> TarSelQ_Cnt--; currTar_Info-> TarSelQ_Head-> Sccb_backlink = (struct sccb *)NULL; } } pCurrCard->scanIndex = scan_ptr; pCurrCard->globalFlags |= F_NEW_SCCB_CMD; break; } } } else { scan_ptr++; if (scan_ptr == MAX_SCSI_TAR) { scan_ptr = 0; } } } else { if ((currTar_Info->TarSelQ_Cnt != 0) && (currTar_Info->TarLUNBusy[0] == 0)) { pCurrCard->currentSCCB = currTar_Info->TarSelQ_Head; currTar_Info->TarSelQ_Head = (struct sccb *)(pCurrCard->currentSCCB)-> Sccb_forwardlink; if (currTar_Info->TarSelQ_Head == NULL) { currTar_Info->TarSelQ_Tail = NULL; currTar_Info->TarSelQ_Cnt = 0; } else { currTar_Info->TarSelQ_Cnt--; currTar_Info->TarSelQ_Head-> Sccb_backlink = (struct sccb *)NULL; } scan_ptr++; if (scan_ptr == MAX_SCSI_TAR) scan_ptr = 0; pCurrCard->scanIndex = scan_ptr; pCurrCard->globalFlags |= F_NEW_SCCB_CMD; break; } else { scan_ptr++; if (scan_ptr == MAX_SCSI_TAR) { scan_ptr = 0; } } } } while (scan_ptr != pCurrCard->scanIndex); } /*--------------------------------------------------------------------- * * Function: Queue Select Fail * * Description: Add the current SCCB to the head of the Queue. * *---------------------------------------------------------------------*/ static void FPT_queueSelectFail(struct sccb_card *pCurrCard, unsigned char p_card) { unsigned char thisTarg; struct sccb_mgr_tar_info *currTar_Info; if (pCurrCard->currentSCCB != NULL) { thisTarg = (unsigned char)(((struct sccb *)(pCurrCard->currentSCCB))-> TargID); currTar_Info = &FPT_sccbMgrTbl[p_card][thisTarg]; pCurrCard->currentSCCB->Sccb_backlink = (struct sccb *)NULL; pCurrCard->currentSCCB->Sccb_forwardlink = currTar_Info->TarSelQ_Head; if (currTar_Info->TarSelQ_Cnt == 0) { currTar_Info->TarSelQ_Tail = pCurrCard->currentSCCB; } else { currTar_Info->TarSelQ_Head->Sccb_backlink = pCurrCard->currentSCCB; } currTar_Info->TarSelQ_Head = pCurrCard->currentSCCB; pCurrCard->currentSCCB = NULL; currTar_Info->TarSelQ_Cnt++; } } /*--------------------------------------------------------------------- * * Function: Queue Command Complete * * Description: Call the callback function with the current SCCB. * *---------------------------------------------------------------------*/ static void FPT_queueCmdComplete(struct sccb_card *pCurrCard, struct sccb *p_sccb, unsigned char p_card) { unsigned char i, SCSIcmd; CALL_BK_FN callback; struct sccb_mgr_tar_info *currTar_Info; SCSIcmd = p_sccb->Cdb[0]; if (!(p_sccb->Sccb_XferState & F_ALL_XFERRED)) { if ((p_sccb-> ControlByte & (SCCB_DATA_XFER_OUT | SCCB_DATA_XFER_IN)) && (p_sccb->HostStatus == SCCB_COMPLETE) && (p_sccb->TargetStatus != SAM_STAT_CHECK_CONDITION)) if ((SCSIcmd == READ_6) || (SCSIcmd == WRITE_6) || (SCSIcmd == READ_10) || (SCSIcmd == WRITE_10) || (SCSIcmd == WRITE_VERIFY) || (SCSIcmd == START_STOP) || (pCurrCard->globalFlags & F_NO_FILTER) ) p_sccb->HostStatus = SCCB_DATA_UNDER_RUN; } if (p_sccb->SccbStatus == SCCB_IN_PROCESS) { if (p_sccb->HostStatus || p_sccb->TargetStatus) p_sccb->SccbStatus = SCCB_ERROR; else p_sccb->SccbStatus = SCCB_SUCCESS; } if (p_sccb->Sccb_XferState & F_AUTO_SENSE) { p_sccb->CdbLength = p_sccb->Save_CdbLen; for (i = 0; i < 6; i++) { p_sccb->Cdb[i] = p_sccb->Save_Cdb[i]; } } if ((p_sccb->OperationCode == RESIDUAL_SG_COMMAND) || (p_sccb->OperationCode == RESIDUAL_COMMAND)) { FPT_utilUpdateResidual(p_sccb); } pCurrCard->cmdCounter--; if (!pCurrCard->cmdCounter) { if (pCurrCard->globalFlags & F_GREEN_PC) { WR_HARPOON(pCurrCard->ioPort + hp_clkctrl_0, (PWR_DWN | CLKCTRL_DEFAULT)); WR_HARPOON(pCurrCard->ioPort + hp_sys_ctrl, STOP_CLK); } WR_HARPOON(pCurrCard->ioPort + hp_semaphore, (RD_HARPOON(pCurrCard->ioPort + hp_semaphore) & ~SCCB_MGR_ACTIVE)); } if (pCurrCard->discQCount != 0) { currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID]; if (((pCurrCard->globalFlags & F_CONLUN_IO) && ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) { pCurrCard->discQCount--; pCurrCard->discQ_Tbl[currTar_Info-> LunDiscQ_Idx[p_sccb->Lun]] = NULL; } else { if (p_sccb->Sccb_tag) { pCurrCard->discQCount--; pCurrCard->discQ_Tbl[p_sccb->Sccb_tag] = NULL; } else { pCurrCard->discQCount--; pCurrCard->discQ_Tbl[currTar_Info-> LunDiscQ_Idx[0]] = NULL; } } } callback = (CALL_BK_FN) p_sccb->SccbCallback; callback(p_sccb); pCurrCard->globalFlags |= F_NEW_SCCB_CMD; pCurrCard->currentSCCB = NULL; } /*--------------------------------------------------------------------- * * Function: Queue Disconnect * * Description: Add SCCB to our disconnect array. * *---------------------------------------------------------------------*/ static void FPT_queueDisconnect(struct sccb *p_sccb, unsigned char p_card) { struct sccb_mgr_tar_info *currTar_Info; currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID]; if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) && ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) { FPT_BL_Card[p_card].discQ_Tbl[currTar_Info-> LunDiscQ_Idx[p_sccb->Lun]] = p_sccb; } else { if (p_sccb->Sccb_tag) { FPT_BL_Card[p_card].discQ_Tbl[p_sccb->Sccb_tag] = p_sccb; FPT_sccbMgrTbl[p_card][p_sccb->TargID].TarLUNBusy[0] = 0; FPT_sccbMgrTbl[p_card][p_sccb->TargID].TarTagQ_Cnt++; } else { FPT_BL_Card[p_card].discQ_Tbl[currTar_Info-> LunDiscQ_Idx[0]] = p_sccb; } } FPT_BL_Card[p_card].currentSCCB = NULL; } /*--------------------------------------------------------------------- * * Function: Queue Flush SCCB * * Description: Flush all SCCB's back to the host driver for this target. * *---------------------------------------------------------------------*/ static void FPT_queueFlushSccb(unsigned char p_card, unsigned char error_code) { unsigned char qtag, thisTarg; struct sccb *currSCCB; struct sccb_mgr_tar_info *currTar_Info; currSCCB = FPT_BL_Card[p_card].currentSCCB; if (currSCCB != NULL) { thisTarg = (unsigned char)currSCCB->TargID; currTar_Info = &FPT_sccbMgrTbl[p_card][thisTarg]; for (qtag = 0; qtag < QUEUE_DEPTH; qtag++) { if (FPT_BL_Card[p_card].discQ_Tbl[qtag] && (FPT_BL_Card[p_card].discQ_Tbl[qtag]->TargID == thisTarg)) { FPT_BL_Card[p_card].discQ_Tbl[qtag]-> HostStatus = (unsigned char)error_code; FPT_queueCmdComplete(&FPT_BL_Card[p_card], FPT_BL_Card[p_card]. discQ_Tbl[qtag], p_card); FPT_BL_Card[p_card].discQ_Tbl[qtag] = NULL; currTar_Info->TarTagQ_Cnt--; } } } } /*--------------------------------------------------------------------- * * Function: Queue Flush Target SCCB * * Description: Flush all SCCB's back to the host driver for this target. * *---------------------------------------------------------------------*/ static void FPT_queueFlushTargSccb(unsigned char p_card, unsigned char thisTarg, unsigned char error_code) { unsigned char qtag; struct sccb_mgr_tar_info *currTar_Info; currTar_Info = &FPT_sccbMgrTbl[p_card][thisTarg]; for (qtag = 0; qtag < QUEUE_DEPTH; qtag++) { if (FPT_BL_Card[p_card].discQ_Tbl[qtag] && (FPT_BL_Card[p_card].discQ_Tbl[qtag]->TargID == thisTarg)) { FPT_BL_Card[p_card].discQ_Tbl[qtag]->HostStatus = (unsigned char)error_code; FPT_queueCmdComplete(&FPT_BL_Card[p_card], FPT_BL_Card[p_card]. discQ_Tbl[qtag], p_card); FPT_BL_Card[p_card].discQ_Tbl[qtag] = NULL; currTar_Info->TarTagQ_Cnt--; } } } static void FPT_queueAddSccb(struct sccb *p_SCCB, unsigned char p_card) { struct sccb_mgr_tar_info *currTar_Info; currTar_Info = &FPT_sccbMgrTbl[p_card][p_SCCB->TargID]; p_SCCB->Sccb_forwardlink = NULL; p_SCCB->Sccb_backlink = currTar_Info->TarSelQ_Tail; if (currTar_Info->TarSelQ_Cnt == 0) { currTar_Info->TarSelQ_Head = p_SCCB; } else { currTar_Info->TarSelQ_Tail->Sccb_forwardlink = p_SCCB; } currTar_Info->TarSelQ_Tail = p_SCCB; currTar_Info->TarSelQ_Cnt++; } /*--------------------------------------------------------------------- * * Function: Queue Find SCCB * * Description: Search the target select Queue for this SCCB, and * remove it if found. * *---------------------------------------------------------------------*/ static unsigned char FPT_queueFindSccb(struct sccb *p_SCCB, unsigned char p_card) { struct sccb *q_ptr; struct sccb_mgr_tar_info *currTar_Info; currTar_Info = &FPT_sccbMgrTbl[p_card][p_SCCB->TargID]; q_ptr = currTar_Info->TarSelQ_Head; while (q_ptr != NULL) { if (q_ptr == p_SCCB) { if (currTar_Info->TarSelQ_Head == q_ptr) { currTar_Info->TarSelQ_Head = q_ptr->Sccb_forwardlink; } if (currTar_Info->TarSelQ_Tail == q_ptr) { currTar_Info->TarSelQ_Tail = q_ptr->Sccb_backlink; } if (q_ptr->Sccb_forwardlink != NULL) { q_ptr->Sccb_forwardlink->Sccb_backlink = q_ptr->Sccb_backlink; } if (q_ptr->Sccb_backlink != NULL) { q_ptr->Sccb_backlink->Sccb_forwardlink = q_ptr->Sccb_forwardlink; } currTar_Info->TarSelQ_Cnt--; return 1; } else { q_ptr = q_ptr->Sccb_forwardlink; } } return 0; } /*--------------------------------------------------------------------- * * Function: Utility Update Residual Count * * Description: Update the XferCnt to the remaining byte count. * If we transferred all the data then just write zero. * If Non-SG transfer then report Total Cnt - Actual Transfer * Cnt. For SG transfers add the count fields of all * remaining SG elements, as well as any partial remaining * element. * *---------------------------------------------------------------------*/ static void FPT_utilUpdateResidual(struct sccb *p_SCCB) { unsigned long partial_cnt; unsigned int sg_index; struct blogic_sg_seg *segp; if (p_SCCB->Sccb_XferState & F_ALL_XFERRED) { p_SCCB->DataLength = 0x0000; } else if (p_SCCB->Sccb_XferState & F_SG_XFER) { partial_cnt = 0x0000; sg_index = p_SCCB->Sccb_sgseg; if (p_SCCB->Sccb_SGoffset) { partial_cnt = p_SCCB->Sccb_SGoffset; sg_index++; } while (((unsigned long)sg_index * (unsigned long)SG_ELEMENT_SIZE) < p_SCCB->DataLength) { segp = (struct blogic_sg_seg *)(p_SCCB->DataPointer) + (sg_index * 2); partial_cnt += segp->segbytes; sg_index++; } p_SCCB->DataLength = partial_cnt; } else { p_SCCB->DataLength -= p_SCCB->Sccb_ATC; } } /*--------------------------------------------------------------------- * * Function: Wait 1 Second * * Description: Wait for 1 second. * *---------------------------------------------------------------------*/ static void FPT_Wait1Second(u32 p_port) { unsigned char i; for (i = 0; i < 4; i++) { FPT_Wait(p_port, TO_250ms); if ((RD_HARPOON(p_port + hp_scsictrl_0) & SCSI_RST)) break; if ((RDW_HARPOON((p_port + hp_intstat)) & SCAM_SEL)) break; } } /*--------------------------------------------------------------------- * * Function: FPT_Wait * * Description: Wait the desired delay. * *---------------------------------------------------------------------*/ static void FPT_Wait(u32 p_port, unsigned char p_delay) { unsigned char old_timer; unsigned char green_flag; old_timer = RD_HARPOON(p_port + hp_seltimeout); green_flag = RD_HARPOON(p_port + hp_clkctrl_0); WR_HARPOON(p_port + hp_clkctrl_0, CLKCTRL_DEFAULT); WR_HARPOON(p_port + hp_seltimeout, p_delay); WRW_HARPOON((p_port + hp_intstat), TIMEOUT); WRW_HARPOON((p_port + hp_intena), (FPT_default_intena & ~TIMEOUT)); WR_HARPOON(p_port + hp_portctrl_0, (RD_HARPOON(p_port + hp_portctrl_0) | START_TO)); while (!(RDW_HARPOON((p_port + hp_intstat)) & TIMEOUT)) { if ((RD_HARPOON(p_port + hp_scsictrl_0) & SCSI_RST)) break; if ((RDW_HARPOON((p_port + hp_intstat)) & SCAM_SEL)) break; } WR_HARPOON(p_port + hp_portctrl_0, (RD_HARPOON(p_port + hp_portctrl_0) & ~START_TO)); WRW_HARPOON((p_port + hp_intstat), TIMEOUT); WRW_HARPOON((p_port + hp_intena), FPT_default_intena); WR_HARPOON(p_port + hp_clkctrl_0, green_flag); WR_HARPOON(p_port + hp_seltimeout, old_timer); } /*--------------------------------------------------------------------- * * Function: Enable/Disable Write to EEPROM * * Description: The EEPROM must first be enabled for writes * A total of 9 clocks are needed. * *---------------------------------------------------------------------*/ static void FPT_utilEEWriteOnOff(u32 p_port, unsigned char p_mode) { unsigned char ee_value; ee_value = (unsigned char)(RD_HARPOON(p_port + hp_ee_ctrl) & (EXT_ARB_ACK | SCSI_TERM_ENA_H)); if (p_mode) FPT_utilEESendCmdAddr(p_port, EWEN, EWEN_ADDR); else FPT_utilEESendCmdAddr(p_port, EWDS, EWDS_ADDR); WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS)); /*Turn off CS */ WR_HARPOON(p_port + hp_ee_ctrl, ee_value); /*Turn off Master Select */ } /*--------------------------------------------------------------------- * * Function: Write EEPROM * * Description: Write a word to the EEPROM at the specified * address. * *---------------------------------------------------------------------*/ static void FPT_utilEEWrite(u32 p_port, unsigned short ee_data, unsigned short ee_addr) { unsigned char ee_value; unsigned short i; ee_value = (unsigned char)((RD_HARPOON(p_port + hp_ee_ctrl) & (EXT_ARB_ACK | SCSI_TERM_ENA_H)) | (SEE_MS | SEE_CS)); FPT_utilEESendCmdAddr(p_port, EE_WRITE, ee_addr); ee_value |= (SEE_MS + SEE_CS); for (i = 0x8000; i != 0; i >>= 1) { if (i & ee_data) ee_value |= SEE_DO; else ee_value &= ~SEE_DO; WR_HARPOON(p_port + hp_ee_ctrl, ee_value); WR_HARPOON(p_port + hp_ee_ctrl, ee_value); ee_value |= SEE_CLK; /* Clock data! */ WR_HARPOON(p_port + hp_ee_ctrl, ee_value); WR_HARPOON(p_port + hp_ee_ctrl, ee_value); ee_value &= ~SEE_CLK; WR_HARPOON(p_port + hp_ee_ctrl, ee_value); WR_HARPOON(p_port + hp_ee_ctrl, ee_value); } ee_value &= (EXT_ARB_ACK | SCSI_TERM_ENA_H); WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS)); FPT_Wait(p_port, TO_10ms); WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS | SEE_CS)); /* Set CS to EEPROM */ WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS)); /* Turn off CS */ WR_HARPOON(p_port + hp_ee_ctrl, ee_value); /* Turn off Master Select */ } /*--------------------------------------------------------------------- * * Function: Read EEPROM * * Description: Read a word from the EEPROM at the desired * address. * *---------------------------------------------------------------------*/ static unsigned short FPT_utilEERead(u32 p_port, unsigned short ee_addr) { unsigned short i, ee_data1, ee_data2; i = 0; ee_data1 = FPT_utilEEReadOrg(p_port, ee_addr); do { ee_data2 = FPT_utilEEReadOrg(p_port, ee_addr); if (ee_data1 == ee_data2) return ee_data1; ee_data1 = ee_data2; i++; } while (i < 4); return ee_data1; } /*--------------------------------------------------------------------- * * Function: Read EEPROM Original * * Description: Read a word from the EEPROM at the desired * address. * *---------------------------------------------------------------------*/ static unsigned short FPT_utilEEReadOrg(u32 p_port, unsigned short ee_addr) { unsigned char ee_value; unsigned short i, ee_data; ee_value = (unsigned char)((RD_HARPOON(p_port + hp_ee_ctrl) & (EXT_ARB_ACK | SCSI_TERM_ENA_H)) | (SEE_MS | SEE_CS)); FPT_utilEESendCmdAddr(p_port, EE_READ, ee_addr); ee_value |= (SEE_MS + SEE_CS); ee_data = 0; for (i = 1; i <= 16; i++) { ee_value |= SEE_CLK; /* Clock data! */ WR_HARPOON(p_port + hp_ee_ctrl, ee_value); WR_HARPOON(p_port + hp_ee_ctrl, ee_value); ee_value &= ~SEE_CLK; WR_HARPOON(p_port + hp_ee_ctrl, ee_value); WR_HARPOON(p_port + hp_ee_ctrl, ee_value); ee_data <<= 1; if (RD_HARPOON(p_port + hp_ee_ctrl) & SEE_DI) ee_data |= 1; } ee_value &= ~(SEE_MS + SEE_CS); WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS)); /*Turn off CS */ WR_HARPOON(p_port + hp_ee_ctrl, ee_value); /*Turn off Master Select */ return ee_data; } /*--------------------------------------------------------------------- * * Function: Send EE command and Address to the EEPROM * * Description: Transfers the correct command and sends the address * to the eeprom. * *---------------------------------------------------------------------*/ static void FPT_utilEESendCmdAddr(u32 p_port, unsigned char ee_cmd, unsigned short ee_addr) { unsigned char ee_value; unsigned char narrow_flg; unsigned short i; narrow_flg = (unsigned char)(RD_HARPOON(p_port + hp_page_ctrl) & NARROW_SCSI_CARD); ee_value = SEE_MS; WR_HARPOON(p_port + hp_ee_ctrl, ee_value); ee_value |= SEE_CS; /* Set CS to EEPROM */ WR_HARPOON(p_port + hp_ee_ctrl, ee_value); for (i = 0x04; i != 0; i >>= 1) { if (i & ee_cmd) ee_value |= SEE_DO; else ee_value &= ~SEE_DO; WR_HARPOON(p_port + hp_ee_ctrl, ee_value); WR_HARPOON(p_port + hp_ee_ctrl, ee_value); ee_value |= SEE_CLK; /* Clock data! */ WR_HARPOON(p_port + hp_ee_ctrl, ee_value); WR_HARPOON(p_port + hp_ee_ctrl, ee_value); ee_value &= ~SEE_CLK; WR_HARPOON(p_port + hp_ee_ctrl, ee_value); WR_HARPOON(p_port + hp_ee_ctrl, ee_value); } if (narrow_flg) i = 0x0080; else i = 0x0200; while (i != 0) { if (i & ee_addr) ee_value |= SEE_DO; else ee_value &= ~SEE_DO; WR_HARPOON(p_port + hp_ee_ctrl, ee_value); WR_HARPOON(p_port + hp_ee_ctrl, ee_value); ee_value |= SEE_CLK; /* Clock data! */ WR_HARPOON(p_port + hp_ee_ctrl, ee_value); WR_HARPOON(p_port + hp_ee_ctrl, ee_value); ee_value &= ~SEE_CLK; WR_HARPOON(p_port + hp_ee_ctrl, ee_value); WR_HARPOON(p_port + hp_ee_ctrl, ee_value); i >>= 1; } } static unsigned short FPT_CalcCrc16(unsigned char buffer[]) { unsigned short crc = 0; int i, j; unsigned short ch; for (i = 0; i < ID_STRING_LENGTH; i++) { ch = (unsigned short)buffer[i]; for (j = 0; j < 8; j++) { if ((crc ^ ch) & 1) crc = (crc >> 1) ^ CRCMASK; else crc >>= 1; ch >>= 1; } } return crc; } static unsigned char FPT_CalcLrc(unsigned char buffer[]) { int i; unsigned char lrc; lrc = 0; for (i = 0; i < ID_STRING_LENGTH; i++) lrc ^= buffer[i]; return lrc; } /* The following inline definitions avoid type conflicts. */ static inline unsigned char FlashPoint__ProbeHostAdapter(struct fpoint_info *FlashPointInfo) { return FlashPoint_ProbeHostAdapter((struct sccb_mgr_info *) FlashPointInfo); } static inline void * FlashPoint__HardwareResetHostAdapter(struct fpoint_info *FlashPointInfo) { return FlashPoint_HardwareResetHostAdapter((struct sccb_mgr_info *) FlashPointInfo); } static inline void FlashPoint__ReleaseHostAdapter(void *CardHandle) { FlashPoint_ReleaseHostAdapter(CardHandle); } static inline void FlashPoint__StartCCB(void *CardHandle, struct blogic_ccb *CCB) { FlashPoint_StartCCB(CardHandle, (struct sccb *)CCB); } static inline void FlashPoint__AbortCCB(void *CardHandle, struct blogic_ccb *CCB) { FlashPoint_AbortCCB(CardHandle, (struct sccb *)CCB); } static inline bool FlashPoint__InterruptPending(void *CardHandle) { return FlashPoint_InterruptPending(CardHandle); } static inline int FlashPoint__HandleInterrupt(void *CardHandle) { return FlashPoint_HandleInterrupt(CardHandle); } #define FlashPoint_ProbeHostAdapter FlashPoint__ProbeHostAdapter #define FlashPoint_HardwareResetHostAdapter FlashPoint__HardwareResetHostAdapter #define FlashPoint_ReleaseHostAdapter FlashPoint__ReleaseHostAdapter #define FlashPoint_StartCCB FlashPoint__StartCCB #define FlashPoint_AbortCCB FlashPoint__AbortCCB #define FlashPoint_InterruptPending FlashPoint__InterruptPending #define FlashPoint_HandleInterrupt FlashPoint__HandleInterrupt #else /* !CONFIG_SCSI_FLASHPOINT */ /* Define prototypes for the FlashPoint SCCB Manager Functions. */ extern unsigned char FlashPoint_ProbeHostAdapter(struct fpoint_info *); extern void *FlashPoint_HardwareResetHostAdapter(struct fpoint_info *); extern void FlashPoint_StartCCB(void *, struct blogic_ccb *); extern int FlashPoint_AbortCCB(void *, struct blogic_ccb *); extern bool FlashPoint_InterruptPending(void *); extern int FlashPoint_HandleInterrupt(void *); extern void FlashPoint_ReleaseHostAdapter(void *); #endif /* CONFIG_SCSI_FLASHPOINT */
linux-master
drivers/scsi/FlashPoint.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 1997 Wu Ching Chen * 2.1.x update (C) 1998 Krzysztof G. Baranowski * 2.5.x update (C) 2002 Red Hat * 2.6.x update (C) 2004 Red Hat * * Marcelo Tosatti <[email protected]> : SMP fixes * * Wu Ching Chen : NULL pointer fixes 2000/06/02 * support atp876 chip * enable 32 bit fifo transfer * support cdrom & remove device run ultra speed * fix disconnect bug 2000/12/21 * support atp880 chip lvd u160 2001/05/15 * fix prd table bug 2001/09/12 (7.1) * * atp885 support add by ACARD Hao Ping Lian 2005/01/05 */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/proc_fs.h> #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/blkdev.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <asm/io.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "atp870u.h" static const struct scsi_host_template atp870u_template; static void send_s870(struct atp_unit *dev,unsigned char c); static void atp_is(struct atp_unit *dev, unsigned char c, bool wide_chip, unsigned char lvdmode); static inline void atp_writeb_base(struct atp_unit *atp, u8 reg, u8 val) { outb(val, atp->baseport + reg); } static inline void atp_writew_base(struct atp_unit *atp, u8 reg, u16 val) { outw(val, atp->baseport + reg); } static inline void atp_writeb_io(struct atp_unit *atp, u8 channel, u8 reg, u8 val) { outb(val, atp->ioport[channel] + reg); } static inline void atp_writew_io(struct atp_unit *atp, u8 channel, u8 reg, u16 val) { outw(val, atp->ioport[channel] + reg); } static inline void atp_writeb_pci(struct atp_unit *atp, u8 channel, u8 reg, u8 val) { outb(val, atp->pciport[channel] + reg); } static inline void atp_writel_pci(struct atp_unit *atp, u8 channel, u8 reg, u32 val) { outl(val, atp->pciport[channel] + reg); } static inline u8 atp_readb_base(struct atp_unit *atp, u8 reg) { return inb(atp->baseport + reg); } static inline u16 atp_readw_base(struct atp_unit *atp, u8 reg) { return inw(atp->baseport + reg); } static inline u32 atp_readl_base(struct atp_unit *atp, u8 reg) { return inl(atp->baseport + reg); } static inline u8 atp_readb_io(struct atp_unit *atp, u8 channel, u8 reg) { return inb(atp->ioport[channel] + reg); } static inline u16 atp_readw_io(struct atp_unit *atp, u8 channel, u8 reg) { return inw(atp->ioport[channel] + reg); } static inline u8 atp_readb_pci(struct atp_unit *atp, u8 channel, u8 reg) { return inb(atp->pciport[channel] + reg); } static inline bool is880(struct atp_unit *atp) { return atp->pdev->device == ATP880_DEVID1 || atp->pdev->device == ATP880_DEVID2; } static inline bool is885(struct atp_unit *atp) { return atp->pdev->device == ATP885_DEVID; } static irqreturn_t atp870u_intr_handle(int irq, void *dev_id) { unsigned long flags; unsigned short int id; unsigned char i, j, c, target_id, lun,cmdp; unsigned char *prd; struct scsi_cmnd *workreq; unsigned long adrcnt, k; #ifdef ED_DBGP unsigned long l; #endif struct Scsi_Host *host = dev_id; struct atp_unit *dev = (struct atp_unit *)&host->hostdata; for (c = 0; c < 2; c++) { j = atp_readb_io(dev, c, 0x1f); if ((j & 0x80) != 0) break; dev->in_int[c] = 0; } if ((j & 0x80) == 0) return IRQ_NONE; #ifdef ED_DBGP printk("atp870u_intr_handle enter\n"); #endif dev->in_int[c] = 1; cmdp = atp_readb_io(dev, c, 0x10); if (dev->working[c] != 0) { if (is885(dev)) { if ((atp_readb_io(dev, c, 0x16) & 0x80) == 0) atp_writeb_io(dev, c, 0x16, (atp_readb_io(dev, c, 0x16) | 0x80)); } if ((atp_readb_pci(dev, c, 0x00) & 0x08) != 0) { for (k=0; k < 1000; k++) { if ((atp_readb_pci(dev, c, 2) & 0x08) == 0) break; if ((atp_readb_pci(dev, c, 2) & 0x01) == 0) break; } } atp_writeb_pci(dev, c, 0, 0x00); i = atp_readb_io(dev, c, 0x17); if (is885(dev)) atp_writeb_pci(dev, c, 2, 0x06); target_id = atp_readb_io(dev, c, 0x15); /* * Remap wide devices onto id numbers */ if ((target_id & 0x40) != 0) { target_id = (target_id & 0x07) | 0x08; } else { target_id &= 0x07; } if ((j & 0x40) != 0) { if (dev->last_cmd[c] == 0xff) { dev->last_cmd[c] = target_id; } dev->last_cmd[c] |= 0x40; } if (is885(dev)) dev->r1f[c][target_id] |= j; #ifdef ED_DBGP printk("atp870u_intr_handle status = %x\n",i); #endif if (i == 0x85) { if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } if (is885(dev)) { adrcnt = 0; ((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12); ((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13); ((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14); if (dev->id[c][target_id].last_len != adrcnt) { k = dev->id[c][target_id].last_len; k -= adrcnt; dev->id[c][target_id].tran_len = k; dev->id[c][target_id].last_len = adrcnt; } #ifdef ED_DBGP printk("dev->id[c][target_id].last_len = %d " "dev->id[c][target_id].tran_len = %d\n", dev->id[c][target_id].last_len, dev->id[c][target_id].tran_len); #endif } /* * Flip wide */ if (dev->wide_id[c] != 0) { atp_writeb_io(dev, c, 0x1b, 0x01); while ((atp_readb_io(dev, c, 0x1b) & 0x01) != 0x01) atp_writeb_io(dev, c, 0x1b, 0x01); } /* * Issue more commands */ spin_lock_irqsave(dev->host->host_lock, flags); if (((dev->quhd[c] != dev->quend[c]) || (dev->last_cmd[c] != 0xff)) && (dev->in_snd[c] == 0)) { #ifdef ED_DBGP printk("Call sent_s870\n"); #endif send_s870(dev,c); } spin_unlock_irqrestore(dev->host->host_lock, flags); /* * Done */ dev->in_int[c] = 0; #ifdef ED_DBGP printk("Status 0x85 return\n"); #endif return IRQ_HANDLED; } if (i == 0x40) { dev->last_cmd[c] |= 0x40; dev->in_int[c] = 0; return IRQ_HANDLED; } if (i == 0x21) { if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } adrcnt = 0; ((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12); ((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13); ((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14); k = dev->id[c][target_id].last_len; k -= adrcnt; dev->id[c][target_id].tran_len = k; dev->id[c][target_id].last_len = adrcnt; atp_writeb_io(dev, c, 0x10, 0x41); atp_writeb_io(dev, c, 0x18, 0x08); dev->in_int[c] = 0; return IRQ_HANDLED; } if (is885(dev)) { if ((i == 0x4c) || (i == 0x4d) || (i == 0x8c) || (i == 0x8d)) { if ((i == 0x4c) || (i == 0x8c)) i=0x48; else i=0x49; } } if ((i == 0x80) || (i == 0x8f)) { #ifdef ED_DBGP printk(KERN_DEBUG "Device reselect\n"); #endif lun = 0; if (cmdp == 0x44 || i == 0x80) lun = atp_readb_io(dev, c, 0x1d) & 0x07; else { if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } if (cmdp == 0x41) { #ifdef ED_DBGP printk("cmdp = 0x41\n"); #endif adrcnt = 0; ((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12); ((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13); ((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14); k = dev->id[c][target_id].last_len; k -= adrcnt; dev->id[c][target_id].tran_len = k; dev->id[c][target_id].last_len = adrcnt; atp_writeb_io(dev, c, 0x18, 0x08); dev->in_int[c] = 0; return IRQ_HANDLED; } else { #ifdef ED_DBGP printk("cmdp != 0x41\n"); #endif atp_writeb_io(dev, c, 0x10, 0x46); dev->id[c][target_id].dirct = 0x00; atp_writeb_io(dev, c, 0x12, 0x00); atp_writeb_io(dev, c, 0x13, 0x00); atp_writeb_io(dev, c, 0x14, 0x00); atp_writeb_io(dev, c, 0x18, 0x08); dev->in_int[c] = 0; return IRQ_HANDLED; } } if (dev->last_cmd[c] != 0xff) { dev->last_cmd[c] |= 0x40; } if (is885(dev)) { j = atp_readb_base(dev, 0x29) & 0xfe; atp_writeb_base(dev, 0x29, j); } else atp_writeb_io(dev, c, 0x10, 0x45); target_id = atp_readb_io(dev, c, 0x16); /* * Remap wide identifiers */ if ((target_id & 0x10) != 0) { target_id = (target_id & 0x07) | 0x08; } else { target_id &= 0x07; } if (is885(dev)) atp_writeb_io(dev, c, 0x10, 0x45); workreq = dev->id[c][target_id].curr_req; #ifdef ED_DBGP scmd_printk(KERN_DEBUG, workreq, "CDB"); for (l = 0; l < workreq->cmd_len; l++) printk(KERN_DEBUG " %x",workreq->cmnd[l]); printk("\n"); #endif atp_writeb_io(dev, c, 0x0f, lun); atp_writeb_io(dev, c, 0x11, dev->id[c][target_id].devsp); adrcnt = dev->id[c][target_id].tran_len; k = dev->id[c][target_id].last_len; atp_writeb_io(dev, c, 0x12, ((unsigned char *) &k)[2]); atp_writeb_io(dev, c, 0x13, ((unsigned char *) &k)[1]); atp_writeb_io(dev, c, 0x14, ((unsigned char *) &k)[0]); #ifdef ED_DBGP printk("k %x, k[0] 0x%x k[1] 0x%x k[2] 0x%x\n", k, atp_readb_io(dev, c, 0x14), atp_readb_io(dev, c, 0x13), atp_readb_io(dev, c, 0x12)); #endif /* Remap wide */ j = target_id; if (target_id > 7) { j = (j & 0x07) | 0x40; } /* Add direction */ j |= dev->id[c][target_id].dirct; atp_writeb_io(dev, c, 0x15, j); atp_writeb_io(dev, c, 0x16, 0x80); /* enable 32 bit fifo transfer */ if (is885(dev)) { i = atp_readb_pci(dev, c, 1) & 0xf3; //j=workreq->cmnd[0]; if ((workreq->cmnd[0] == READ_6) || (workreq->cmnd[0] == READ_10) || (workreq->cmnd[0] == WRITE_6) || (workreq->cmnd[0] == WRITE_10)) { i |= 0x0c; } atp_writeb_pci(dev, c, 1, i); } else if (is880(dev)) { if ((workreq->cmnd[0] == READ_6) || (workreq->cmnd[0] == READ_10) || (workreq->cmnd[0] == WRITE_6) || (workreq->cmnd[0] == WRITE_10)) atp_writeb_base(dev, 0x3b, (atp_readb_base(dev, 0x3b) & 0x3f) | 0xc0); else atp_writeb_base(dev, 0x3b, atp_readb_base(dev, 0x3b) & 0x3f); } else { if ((workreq->cmnd[0] == READ_6) || (workreq->cmnd[0] == READ_10) || (workreq->cmnd[0] == WRITE_6) || (workreq->cmnd[0] == WRITE_10)) atp_writeb_base(dev, 0x3a, (atp_readb_base(dev, 0x3a) & 0xf3) | 0x08); else atp_writeb_base(dev, 0x3a, atp_readb_base(dev, 0x3a) & 0xf3); } j = 0; id = 1; id = id << target_id; /* * Is this a wide device */ if ((id & dev->wide_id[c]) != 0) { j |= 0x01; } atp_writeb_io(dev, c, 0x1b, j); while ((atp_readb_io(dev, c, 0x1b) & 0x01) != j) atp_writeb_io(dev, c, 0x1b, j); if (dev->id[c][target_id].last_len == 0) { atp_writeb_io(dev, c, 0x18, 0x08); dev->in_int[c] = 0; #ifdef ED_DBGP printk("dev->id[c][target_id].last_len = 0\n"); #endif return IRQ_HANDLED; } #ifdef ED_DBGP printk("target_id = %d adrcnt = %d\n",target_id,adrcnt); #endif prd = dev->id[c][target_id].prd_pos; while (adrcnt != 0) { id = ((unsigned short int *)prd)[2]; if (id == 0) { k = 0x10000; } else { k = id; } if (k > adrcnt) { ((unsigned short int *)prd)[2] = (unsigned short int)(k - adrcnt); ((unsigned long *)prd)[0] += adrcnt; adrcnt = 0; dev->id[c][target_id].prd_pos = prd; } else { adrcnt -= k; dev->id[c][target_id].prdaddr += 0x08; prd += 0x08; if (adrcnt == 0) { dev->id[c][target_id].prd_pos = prd; } } } atp_writel_pci(dev, c, 0x04, dev->id[c][target_id].prdaddr); #ifdef ED_DBGP printk("dev->id[%d][%d].prdaddr 0x%8x\n", c, target_id, dev->id[c][target_id].prdaddr); #endif if (!is885(dev)) { atp_writeb_pci(dev, c, 2, 0x06); atp_writeb_pci(dev, c, 2, 0x00); } /* * Check transfer direction */ if (dev->id[c][target_id].dirct != 0) { atp_writeb_io(dev, c, 0x18, 0x08); atp_writeb_pci(dev, c, 0, 0x01); dev->in_int[c] = 0; #ifdef ED_DBGP printk("status 0x80 return dirct != 0\n"); #endif return IRQ_HANDLED; } atp_writeb_io(dev, c, 0x18, 0x08); atp_writeb_pci(dev, c, 0, 0x09); dev->in_int[c] = 0; #ifdef ED_DBGP printk("status 0x80 return dirct = 0\n"); #endif return IRQ_HANDLED; } /* * Current scsi request on this target */ workreq = dev->id[c][target_id].curr_req; if (i == 0x42 || i == 0x16) { if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } if (i == 0x16) { workreq->result = atp_readb_io(dev, c, 0x0f); if (((dev->r1f[c][target_id] & 0x10) != 0) && is885(dev)) { printk(KERN_WARNING "AEC67162 CRC ERROR !\n"); workreq->result = SAM_STAT_CHECK_CONDITION; } } else workreq->result = SAM_STAT_CHECK_CONDITION; if (is885(dev)) { j = atp_readb_base(dev, 0x29) | 0x01; atp_writeb_base(dev, 0x29, j); } /* * Complete the command */ scsi_dma_unmap(workreq); spin_lock_irqsave(dev->host->host_lock, flags); scsi_done(workreq); #ifdef ED_DBGP printk("workreq->scsi_done\n"); #endif /* * Clear it off the queue */ dev->id[c][target_id].curr_req = NULL; dev->working[c]--; spin_unlock_irqrestore(dev->host->host_lock, flags); /* * Take it back wide */ if (dev->wide_id[c] != 0) { atp_writeb_io(dev, c, 0x1b, 0x01); while ((atp_readb_io(dev, c, 0x1b) & 0x01) != 0x01) atp_writeb_io(dev, c, 0x1b, 0x01); } /* * If there is stuff to send and nothing going then send it */ spin_lock_irqsave(dev->host->host_lock, flags); if (((dev->last_cmd[c] != 0xff) || (dev->quhd[c] != dev->quend[c])) && (dev->in_snd[c] == 0)) { #ifdef ED_DBGP printk("Call sent_s870(scsi_done)\n"); #endif send_s870(dev,c); } spin_unlock_irqrestore(dev->host->host_lock, flags); dev->in_int[c] = 0; return IRQ_HANDLED; } if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } if (i == 0x4f) { i = 0x89; } i &= 0x0f; if (i == 0x09) { atp_writel_pci(dev, c, 4, dev->id[c][target_id].prdaddr); atp_writeb_pci(dev, c, 2, 0x06); atp_writeb_pci(dev, c, 2, 0x00); atp_writeb_io(dev, c, 0x10, 0x41); if (is885(dev)) { k = dev->id[c][target_id].last_len; atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&k))[2]); atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&k))[1]); atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&k))[0]); dev->id[c][target_id].dirct = 0x00; } else { dev->id[c][target_id].dirct = 0x00; } atp_writeb_io(dev, c, 0x18, 0x08); atp_writeb_pci(dev, c, 0, 0x09); dev->in_int[c] = 0; return IRQ_HANDLED; } if (i == 0x08) { atp_writel_pci(dev, c, 4, dev->id[c][target_id].prdaddr); atp_writeb_pci(dev, c, 2, 0x06); atp_writeb_pci(dev, c, 2, 0x00); atp_writeb_io(dev, c, 0x10, 0x41); if (is885(dev)) { k = dev->id[c][target_id].last_len; atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&k))[2]); atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&k))[1]); atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&k))[0]); } atp_writeb_io(dev, c, 0x15, atp_readb_io(dev, c, 0x15) | 0x20); dev->id[c][target_id].dirct = 0x20; atp_writeb_io(dev, c, 0x18, 0x08); atp_writeb_pci(dev, c, 0, 0x01); dev->in_int[c] = 0; return IRQ_HANDLED; } if (i == 0x0a) atp_writeb_io(dev, c, 0x10, 0x30); else atp_writeb_io(dev, c, 0x10, 0x46); dev->id[c][target_id].dirct = 0x00; atp_writeb_io(dev, c, 0x12, 0x00); atp_writeb_io(dev, c, 0x13, 0x00); atp_writeb_io(dev, c, 0x14, 0x00); atp_writeb_io(dev, c, 0x18, 0x08); } dev->in_int[c] = 0; return IRQ_HANDLED; } /** * atp870u_queuecommand_lck - Queue SCSI command * @req_p: request block * * Queue a command to the ATP queue. Called with the host lock held. */ static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p) { void (*done)(struct scsi_cmnd *) = scsi_done; unsigned char c; unsigned int m; struct atp_unit *dev; struct Scsi_Host *host; c = scmd_channel(req_p); req_p->sense_buffer[0]=0; scsi_set_resid(req_p, 0); if (scmd_channel(req_p) > 1) { req_p->result = DID_BAD_TARGET << 16; done(req_p); #ifdef ED_DBGP printk("atp870u_queuecommand : req_p->device->channel > 1\n"); #endif return 0; } host = req_p->device->host; dev = (struct atp_unit *)&host->hostdata; m = 1; m = m << scmd_id(req_p); /* * Fake a timeout for missing targets */ if ((m & dev->active_id[c]) == 0) { req_p->result = DID_BAD_TARGET << 16; done(req_p); return 0; } /* * Count new command */ dev->quend[c]++; if (dev->quend[c] >= qcnt) { dev->quend[c] = 0; } /* * Check queue state */ if (dev->quhd[c] == dev->quend[c]) { if (dev->quend[c] == 0) { dev->quend[c] = qcnt; } #ifdef ED_DBGP printk("atp870u_queuecommand : dev->quhd[c] == dev->quend[c]\n"); #endif dev->quend[c]--; req_p->result = DID_BUS_BUSY << 16; done(req_p); return 0; } dev->quereq[c][dev->quend[c]] = req_p; #ifdef ED_DBGP printk("dev->ioport[c] = %x atp_readb_io(dev, c, 0x1c) = %x " "dev->in_int[%d] = %d dev->in_snd[%d] = %d\n", dev->ioport[c], atp_readb_io(dev, c, 0x1c), c, dev->in_int[c],c,dev->in_snd[c]); #endif if ((atp_readb_io(dev, c, 0x1c) == 0) && (dev->in_int[c] == 0) && (dev->in_snd[c] == 0)) { #ifdef ED_DBGP printk("Call sent_s870(atp870u_queuecommand)\n"); #endif send_s870(dev,c); } #ifdef ED_DBGP printk("atp870u_queuecommand : exit\n"); #endif return 0; } static DEF_SCSI_QCMD(atp870u_queuecommand) /* * send_s870 - send a command to the controller * * On entry there is work queued to be done. We move some of that work to the * controller itself. * * Caller holds the host lock. */ static void send_s870(struct atp_unit *dev, unsigned char c) { struct scsi_cmnd *workreq = NULL; unsigned int i;//,k; unsigned char j, target_id; unsigned char *prd; unsigned short int w; unsigned long l, bttl = 0; unsigned long sg_count; if (dev->in_snd[c] != 0) { #ifdef ED_DBGP printk("cmnd in_snd\n"); #endif return; } #ifdef ED_DBGP printk("Sent_s870 enter\n"); #endif dev->in_snd[c] = 1; if ((dev->last_cmd[c] != 0xff) && ((dev->last_cmd[c] & 0x40) != 0)) { dev->last_cmd[c] &= 0x0f; workreq = dev->id[c][dev->last_cmd[c]].curr_req; if (!workreq) { dev->last_cmd[c] = 0xff; if (dev->quhd[c] == dev->quend[c]) { dev->in_snd[c] = 0; return; } } } if (!workreq) { if ((dev->last_cmd[c] != 0xff) && (dev->working[c] != 0)) { dev->in_snd[c] = 0; return; } dev->working[c]++; j = dev->quhd[c]; dev->quhd[c]++; if (dev->quhd[c] >= qcnt) dev->quhd[c] = 0; workreq = dev->quereq[c][dev->quhd[c]]; if (dev->id[c][scmd_id(workreq)].curr_req != NULL) { dev->quhd[c] = j; dev->working[c]--; dev->in_snd[c] = 0; return; } dev->id[c][scmd_id(workreq)].curr_req = workreq; dev->last_cmd[c] = scmd_id(workreq); } if ((atp_readb_io(dev, c, 0x1f) & 0xb0) != 0 || atp_readb_io(dev, c, 0x1c) != 0) { #ifdef ED_DBGP printk("Abort to Send\n"); #endif dev->last_cmd[c] |= 0x40; dev->in_snd[c] = 0; return; } #ifdef ED_DBGP printk("OK to Send\n"); scmd_printk(KERN_DEBUG, workreq, "CDB"); for(i=0;i<workreq->cmd_len;i++) { printk(" %x",workreq->cmnd[i]); } printk("\n"); #endif l = scsi_bufflen(workreq); if (is885(dev)) { j = atp_readb_base(dev, 0x29) & 0xfe; atp_writeb_base(dev, 0x29, j); dev->r1f[c][scmd_id(workreq)] = 0; } if (workreq->cmnd[0] == READ_CAPACITY) { if (l > 8) l = 8; } if (workreq->cmnd[0] == TEST_UNIT_READY) { l = 0; } j = 0; target_id = scmd_id(workreq); /* * Wide ? */ w = 1; w = w << target_id; if ((w & dev->wide_id[c]) != 0) { j |= 0x01; } atp_writeb_io(dev, c, 0x1b, j); while ((atp_readb_io(dev, c, 0x1b) & 0x01) != j) { atp_writeb_pci(dev, c, 0x1b, j); #ifdef ED_DBGP printk("send_s870 while loop 1\n"); #endif } /* * Write the command */ atp_writeb_io(dev, c, 0x00, workreq->cmd_len); atp_writeb_io(dev, c, 0x01, 0x2c); if (is885(dev)) atp_writeb_io(dev, c, 0x02, 0x7f); else atp_writeb_io(dev, c, 0x02, 0xcf); for (i = 0; i < workreq->cmd_len; i++) atp_writeb_io(dev, c, 0x03 + i, workreq->cmnd[i]); atp_writeb_io(dev, c, 0x0f, workreq->device->lun); /* * Write the target */ atp_writeb_io(dev, c, 0x11, dev->id[c][target_id].devsp); #ifdef ED_DBGP printk("dev->id[%d][%d].devsp = %2x\n",c,target_id, dev->id[c][target_id].devsp); #endif sg_count = scsi_dma_map(workreq); /* * Write transfer size */ atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&l))[2]); atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&l))[1]); atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&l))[0]); j = target_id; dev->id[c][j].last_len = l; dev->id[c][j].tran_len = 0; #ifdef ED_DBGP printk("dev->id[%2d][%2d].last_len = %d\n",c,j,dev->id[c][j].last_len); #endif /* * Flip the wide bits */ if ((j & 0x08) != 0) { j = (j & 0x07) | 0x40; } /* * Check transfer direction */ if (workreq->sc_data_direction == DMA_TO_DEVICE) atp_writeb_io(dev, c, 0x15, j | 0x20); else atp_writeb_io(dev, c, 0x15, j); atp_writeb_io(dev, c, 0x16, atp_readb_io(dev, c, 0x16) | 0x80); atp_writeb_io(dev, c, 0x16, 0x80); dev->id[c][target_id].dirct = 0; if (l == 0) { if (atp_readb_io(dev, c, 0x1c) == 0) { #ifdef ED_DBGP printk("change SCSI_CMD_REG 0x08\n"); #endif atp_writeb_io(dev, c, 0x18, 0x08); } else dev->last_cmd[c] |= 0x40; dev->in_snd[c] = 0; return; } prd = dev->id[c][target_id].prd_table; dev->id[c][target_id].prd_pos = prd; /* * Now write the request list. Either as scatter/gather or as * a linear chain. */ if (l) { struct scatterlist *sgpnt; i = 0; scsi_for_each_sg(workreq, sgpnt, sg_count, j) { bttl = sg_dma_address(sgpnt); l=sg_dma_len(sgpnt); #ifdef ED_DBGP printk("1. bttl %x, l %x\n",bttl, l); #endif while (l > 0x10000) { (((u16 *) (prd))[i + 3]) = 0x0000; (((u16 *) (prd))[i + 2]) = 0x0000; (((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl); l -= 0x10000; bttl += 0x10000; i += 0x04; } (((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl); (((u16 *) (prd))[i + 2]) = cpu_to_le16(l); (((u16 *) (prd))[i + 3]) = 0; i += 0x04; } (((u16 *) (prd))[i - 1]) = cpu_to_le16(0x8000); #ifdef ED_DBGP printk("prd %4x %4x %4x %4x\n", (((unsigned short int *)prd)[0]), (((unsigned short int *)prd)[1]), (((unsigned short int *)prd)[2]), (((unsigned short int *)prd)[3])); printk("2. bttl %x, l %x\n",bttl, l); #endif } #ifdef ED_DBGP printk("send_s870: prdaddr_2 0x%8x target_id %d\n", dev->id[c][target_id].prdaddr,target_id); #endif dev->id[c][target_id].prdaddr = dev->id[c][target_id].prd_bus; atp_writel_pci(dev, c, 4, dev->id[c][target_id].prdaddr); atp_writeb_pci(dev, c, 2, 0x06); atp_writeb_pci(dev, c, 2, 0x00); if (is885(dev)) { j = atp_readb_pci(dev, c, 1) & 0xf3; if ((workreq->cmnd[0] == READ_6) || (workreq->cmnd[0] == READ_10) || (workreq->cmnd[0] == WRITE_6) || (workreq->cmnd[0] == WRITE_10)) { j |= 0x0c; } atp_writeb_pci(dev, c, 1, j); } else if (is880(dev)) { if ((workreq->cmnd[0] == READ_6) || (workreq->cmnd[0] == READ_10) || (workreq->cmnd[0] == WRITE_6) || (workreq->cmnd[0] == WRITE_10)) atp_writeb_base(dev, 0x3b, (atp_readb_base(dev, 0x3b) & 0x3f) | 0xc0); else atp_writeb_base(dev, 0x3b, atp_readb_base(dev, 0x3b) & 0x3f); } else { if ((workreq->cmnd[0] == READ_6) || (workreq->cmnd[0] == READ_10) || (workreq->cmnd[0] == WRITE_6) || (workreq->cmnd[0] == WRITE_10)) atp_writeb_base(dev, 0x3a, (atp_readb_base(dev, 0x3a) & 0xf3) | 0x08); else atp_writeb_base(dev, 0x3a, atp_readb_base(dev, 0x3a) & 0xf3); } if(workreq->sc_data_direction == DMA_TO_DEVICE) { dev->id[c][target_id].dirct = 0x20; if (atp_readb_io(dev, c, 0x1c) == 0) { atp_writeb_io(dev, c, 0x18, 0x08); atp_writeb_pci(dev, c, 0, 0x01); #ifdef ED_DBGP printk( "start DMA(to target)\n"); #endif } else { dev->last_cmd[c] |= 0x40; } dev->in_snd[c] = 0; return; } if (atp_readb_io(dev, c, 0x1c) == 0) { atp_writeb_io(dev, c, 0x18, 0x08); atp_writeb_pci(dev, c, 0, 0x09); #ifdef ED_DBGP printk( "start DMA(to host)\n"); #endif } else { dev->last_cmd[c] |= 0x40; } dev->in_snd[c] = 0; return; } static unsigned char fun_scam(struct atp_unit *dev, unsigned short int *val) { unsigned short int i, k; unsigned char j; atp_writew_io(dev, 0, 0x1c, *val); for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */ k = atp_readw_io(dev, 0, 0x1c); j = (unsigned char) (k >> 8); if ((k & 0x8000) != 0) /* DB7 all release? */ i = 0; } *val |= 0x4000; /* assert DB6 */ atp_writew_io(dev, 0, 0x1c, *val); *val &= 0xdfff; /* assert DB5 */ atp_writew_io(dev, 0, 0x1c, *val); for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */ if ((atp_readw_io(dev, 0, 0x1c) & 0x2000) != 0) /* DB5 all release? */ i = 0; } *val |= 0x8000; /* no DB4-0, assert DB7 */ *val &= 0xe0ff; atp_writew_io(dev, 0, 0x1c, *val); *val &= 0xbfff; /* release DB6 */ atp_writew_io(dev, 0, 0x1c, *val); for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */ if ((atp_readw_io(dev, 0, 0x1c) & 0x4000) != 0) /* DB6 all release? */ i = 0; } return j; } static void tscam(struct Scsi_Host *host, bool wide_chip, u8 scam_on) { unsigned char i, j, k; unsigned long n; unsigned short int m, assignid_map, val; unsigned char mbuf[33], quintet[2]; struct atp_unit *dev = (struct atp_unit *)&host->hostdata; static unsigned char g2q_tab[8] = { 0x38, 0x31, 0x32, 0x2b, 0x34, 0x2d, 0x2e, 0x27 }; /* I can't believe we need this before we've even done anything. Remove it * and see if anyone bitches. for (i = 0; i < 0x10; i++) { udelay(0xffff); } */ atp_writeb_io(dev, 0, 1, 0x08); atp_writeb_io(dev, 0, 2, 0x7f); atp_writeb_io(dev, 0, 0x11, 0x20); if ((scam_on & 0x40) == 0) { return; } m = 1; m <<= dev->host_id[0]; j = 16; if (!wide_chip) { m |= 0xff00; j = 8; } assignid_map = m; atp_writeb_io(dev, 0, 0x02, 0x02); /* 2*2=4ms,3EH 2/32*3E=3.9ms */ atp_writeb_io(dev, 0, 0x03, 0); atp_writeb_io(dev, 0, 0x04, 0); atp_writeb_io(dev, 0, 0x05, 0); atp_writeb_io(dev, 0, 0x06, 0); atp_writeb_io(dev, 0, 0x07, 0); atp_writeb_io(dev, 0, 0x08, 0); for (i = 0; i < j; i++) { m = 1; m = m << i; if ((m & assignid_map) != 0) { continue; } atp_writeb_io(dev, 0, 0x0f, 0); atp_writeb_io(dev, 0, 0x12, 0); atp_writeb_io(dev, 0, 0x13, 0); atp_writeb_io(dev, 0, 0x14, 0); if (i > 7) { k = (i & 0x07) | 0x40; } else { k = i; } atp_writeb_io(dev, 0, 0x15, k); if (wide_chip) atp_writeb_io(dev, 0, 0x1b, 0x01); else atp_writeb_io(dev, 0, 0x1b, 0x00); do { atp_writeb_io(dev, 0, 0x18, 0x09); while ((atp_readb_io(dev, 0, 0x1f) & 0x80) == 0x00) cpu_relax(); k = atp_readb_io(dev, 0, 0x17); if ((k == 0x85) || (k == 0x42)) break; if (k != 0x16) atp_writeb_io(dev, 0, 0x10, 0x41); } while (k != 0x16); if ((k == 0x85) || (k == 0x42)) continue; assignid_map |= m; } atp_writeb_io(dev, 0, 0x02, 0x7f); atp_writeb_io(dev, 0, 0x1b, 0x02); udelay(2); val = 0x0080; /* bsy */ atp_writew_io(dev, 0, 0x1c, val); val |= 0x0040; /* sel */ atp_writew_io(dev, 0, 0x1c, val); val |= 0x0004; /* msg */ atp_writew_io(dev, 0, 0x1c, val); udelay(2); /* 2 deskew delay(45ns*2=90ns) */ val &= 0x007f; /* no bsy */ atp_writew_io(dev, 0, 0x1c, val); msleep(128); val &= 0x00fb; /* after 1ms no msg */ atp_writew_io(dev, 0, 0x1c, val); while ((atp_readb_io(dev, 0, 0x1c) & 0x04) != 0) ; udelay(2); udelay(100); for (n = 0; n < 0x30000; n++) if ((atp_readb_io(dev, 0, 0x1c) & 0x80) != 0) /* bsy ? */ break; if (n < 0x30000) for (n = 0; n < 0x30000; n++) if ((atp_readb_io(dev, 0, 0x1c) & 0x81) == 0x0081) { udelay(2); val |= 0x8003; /* io,cd,db7 */ atp_writew_io(dev, 0, 0x1c, val); udelay(2); val &= 0x00bf; /* no sel */ atp_writew_io(dev, 0, 0x1c, val); udelay(2); break; } while (1) { /* * The funny division into multiple delays is to accomodate * arches like ARM where udelay() multiplies its argument by * a large number to initialize a loop counter. To avoid * overflow, the maximum supported udelay is 2000 microseconds. * * XXX it would be more polite to find a way to use msleep() */ mdelay(2); udelay(48); if ((atp_readb_io(dev, 0, 0x1c) & 0x80) == 0x00) { /* bsy ? */ atp_writew_io(dev, 0, 0x1c, 0); atp_writeb_io(dev, 0, 0x1b, 0); atp_writeb_io(dev, 0, 0x15, 0); atp_writeb_io(dev, 0, 0x18, 0x09); while ((atp_readb_io(dev, 0, 0x1f) & 0x80) == 0) cpu_relax(); atp_readb_io(dev, 0, 0x17); return; } val &= 0x00ff; /* synchronization */ val |= 0x3f00; fun_scam(dev, &val); udelay(2); val &= 0x00ff; /* isolation */ val |= 0x2000; fun_scam(dev, &val); udelay(2); i = 8; j = 0; while (1) { if ((atp_readw_io(dev, 0, 0x1c) & 0x2000) == 0) continue; udelay(2); val &= 0x00ff; /* get ID_STRING */ val |= 0x2000; k = fun_scam(dev, &val); if ((k & 0x03) == 0) break; mbuf[j] <<= 0x01; mbuf[j] &= 0xfe; if ((k & 0x02) != 0) mbuf[j] |= 0x01; i--; if (i > 0) continue; j++; i = 8; } /* isolation complete.. */ /* mbuf[32]=0; printk(" \n%x %x %x %s\n ",assignid_map,mbuf[0],mbuf[1],&mbuf[2]); */ i = 15; j = mbuf[0]; if ((j & 0x20) != 0) { /* bit5=1:ID up to 7 */ i = 7; } if ((j & 0x06) != 0) { /* IDvalid? */ k = mbuf[1]; while (1) { m = 1; m <<= k; if ((m & assignid_map) == 0) break; if (k > 0) k--; else break; } } if ((m & assignid_map) != 0) { /* srch from max acceptable ID# */ k = i; /* max acceptable ID# */ while (1) { m = 1; m <<= k; if ((m & assignid_map) == 0) break; if (k > 0) k--; else break; } } /* k=binID#, */ assignid_map |= m; if (k < 8) { quintet[0] = 0x38; /* 1st dft ID<8 */ } else { quintet[0] = 0x31; /* 1st ID>=8 */ } k &= 0x07; quintet[1] = g2q_tab[k]; val &= 0x00ff; /* AssignID 1stQuintet,AH=001xxxxx */ m = quintet[0] << 8; val |= m; fun_scam(dev, &val); val &= 0x00ff; /* AssignID 2ndQuintet,AH=001xxxxx */ m = quintet[1] << 8; val |= m; fun_scam(dev, &val); } } static void atp870u_free_tables(struct Scsi_Host *host) { struct atp_unit *atp_dev = (struct atp_unit *)&host->hostdata; int j, k; for (j=0; j < 2; j++) { for (k = 0; k < 16; k++) { if (!atp_dev->id[j][k].prd_table) continue; dma_free_coherent(&atp_dev->pdev->dev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus); atp_dev->id[j][k].prd_table = NULL; } } } static int atp870u_init_tables(struct Scsi_Host *host) { struct atp_unit *atp_dev = (struct atp_unit *)&host->hostdata; int c,k; for(c=0;c < 2;c++) { for(k=0;k<16;k++) { atp_dev->id[c][k].prd_table = dma_alloc_coherent(&atp_dev->pdev->dev, 1024, &(atp_dev->id[c][k].prd_bus), GFP_KERNEL); if (!atp_dev->id[c][k].prd_table) { printk("atp870u_init_tables fail\n"); atp870u_free_tables(host); return -ENOMEM; } atp_dev->id[c][k].prdaddr = atp_dev->id[c][k].prd_bus; atp_dev->id[c][k].devsp=0x20; atp_dev->id[c][k].devtype = 0x7f; atp_dev->id[c][k].curr_req = NULL; } atp_dev->active_id[c] = 0; atp_dev->wide_id[c] = 0; atp_dev->host_id[c] = 0x07; atp_dev->quhd[c] = 0; atp_dev->quend[c] = 0; atp_dev->last_cmd[c] = 0xff; atp_dev->in_snd[c] = 0; atp_dev->in_int[c] = 0; for (k = 0; k < qcnt; k++) { atp_dev->quereq[c][k] = NULL; } for (k = 0; k < 16; k++) { atp_dev->id[c][k].curr_req = NULL; atp_dev->sp[c][k] = 0x04; } } return 0; } static void atp_set_host_id(struct atp_unit *atp, u8 c, u8 host_id) { atp_writeb_io(atp, c, 0, host_id | 0x08); atp_writeb_io(atp, c, 0x18, 0); while ((atp_readb_io(atp, c, 0x1f) & 0x80) == 0) mdelay(1); atp_readb_io(atp, c, 0x17); atp_writeb_io(atp, c, 1, 8); atp_writeb_io(atp, c, 2, 0x7f); atp_writeb_io(atp, c, 0x11, 0x20); } static void atp870_init(struct Scsi_Host *shpnt) { struct atp_unit *atpdev = shost_priv(shpnt); struct pci_dev *pdev = atpdev->pdev; unsigned char k, host_id; u8 scam_on; bool wide_chip = (pdev->device == PCI_DEVICE_ID_ARTOP_AEC7610 && pdev->revision == 4) || (pdev->device == PCI_DEVICE_ID_ARTOP_AEC7612UW) || (pdev->device == PCI_DEVICE_ID_ARTOP_AEC7612SUW); pci_read_config_byte(pdev, 0x49, &host_id); dev_info(&pdev->dev, "ACARD AEC-671X PCI Ultra/W SCSI-2/3 " "Host Adapter: IO:%lx, IRQ:%d.\n", shpnt->io_port, shpnt->irq); atpdev->ioport[0] = shpnt->io_port; atpdev->pciport[0] = shpnt->io_port + 0x20; host_id &= 0x07; atpdev->host_id[0] = host_id; scam_on = atp_readb_pci(atpdev, 0, 2); atpdev->global_map[0] = atp_readb_base(atpdev, 0x2d); atpdev->ultra_map[0] = atp_readw_base(atpdev, 0x2e); if (atpdev->ultra_map[0] == 0) { scam_on = 0x00; atpdev->global_map[0] = 0x20; atpdev->ultra_map[0] = 0xffff; } if (pdev->revision > 0x07) /* check if atp876 chip */ atp_writeb_base(atpdev, 0x3e, 0x00); /* enable terminator */ k = (atp_readb_base(atpdev, 0x3a) & 0xf3) | 0x10; atp_writeb_base(atpdev, 0x3a, k); atp_writeb_base(atpdev, 0x3a, k & 0xdf); msleep(32); atp_writeb_base(atpdev, 0x3a, k); msleep(32); atp_set_host_id(atpdev, 0, host_id); tscam(shpnt, wide_chip, scam_on); atp_writeb_base(atpdev, 0x3a, atp_readb_base(atpdev, 0x3a) | 0x10); atp_is(atpdev, 0, wide_chip, 0); atp_writeb_base(atpdev, 0x3a, atp_readb_base(atpdev, 0x3a) & 0xef); atp_writeb_base(atpdev, 0x3b, atp_readb_base(atpdev, 0x3b) | 0x20); shpnt->max_id = wide_chip ? 16 : 8; shpnt->this_id = host_id; } static void atp880_init(struct Scsi_Host *shpnt) { struct atp_unit *atpdev = shost_priv(shpnt); struct pci_dev *pdev = atpdev->pdev; unsigned char k, m, host_id; unsigned int n; pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80); atpdev->ioport[0] = shpnt->io_port + 0x40; atpdev->pciport[0] = shpnt->io_port + 0x28; host_id = atp_readb_base(atpdev, 0x39) >> 4; dev_info(&pdev->dev, "ACARD AEC-67160 PCI Ultra3 LVD " "Host Adapter: IO:%lx, IRQ:%d.\n", shpnt->io_port, shpnt->irq); atpdev->host_id[0] = host_id; atpdev->global_map[0] = atp_readb_base(atpdev, 0x35); atpdev->ultra_map[0] = atp_readw_base(atpdev, 0x3c); n = 0x3f09; while (n < 0x4000) { m = 0; atp_writew_base(atpdev, 0x34, n); n += 0x0002; if (atp_readb_base(atpdev, 0x30) == 0xff) break; atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x30); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x31); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x32); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x33); atp_writew_base(atpdev, 0x34, n); n += 0x0002; atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x30); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x31); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x32); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x33); atp_writew_base(atpdev, 0x34, n); n += 0x0002; atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x30); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x31); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x32); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x33); atp_writew_base(atpdev, 0x34, n); n += 0x0002; atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x30); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x31); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x32); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x33); n += 0x0018; } atp_writew_base(atpdev, 0x34, 0); atpdev->ultra_map[0] = 0; atpdev->async[0] = 0; for (k = 0; k < 16; k++) { n = 1 << k; if (atpdev->sp[0][k] > 1) atpdev->ultra_map[0] |= n; else if (atpdev->sp[0][k] == 0) atpdev->async[0] |= n; } atpdev->async[0] = ~(atpdev->async[0]); atp_writeb_base(atpdev, 0x35, atpdev->global_map[0]); k = atp_readb_base(atpdev, 0x38) & 0x80; atp_writeb_base(atpdev, 0x38, k); atp_writeb_base(atpdev, 0x3b, 0x20); msleep(32); atp_writeb_base(atpdev, 0x3b, 0); msleep(32); atp_readb_io(atpdev, 0, 0x1b); atp_readb_io(atpdev, 0, 0x17); atp_set_host_id(atpdev, 0, host_id); tscam(shpnt, true, atp_readb_base(atpdev, 0x22)); atp_is(atpdev, 0, true, atp_readb_base(atpdev, 0x3f) & 0x40); atp_writeb_base(atpdev, 0x38, 0xb0); shpnt->max_id = 16; shpnt->this_id = host_id; } static void atp885_init(struct Scsi_Host *shpnt) { struct atp_unit *atpdev = shost_priv(shpnt); struct pci_dev *pdev = atpdev->pdev; unsigned char k, m, c; unsigned int n; unsigned char setupdata[2][16]; dev_info(&pdev->dev, "ACARD AEC-67162 PCI Ultra3 LVD " "Host Adapter: IO:%lx, IRQ:%d.\n", shpnt->io_port, shpnt->irq); atpdev->ioport[0] = shpnt->io_port + 0x80; atpdev->ioport[1] = shpnt->io_port + 0xc0; atpdev->pciport[0] = shpnt->io_port + 0x40; atpdev->pciport[1] = shpnt->io_port + 0x50; c = atp_readb_base(atpdev, 0x29); atp_writeb_base(atpdev, 0x29, c | 0x04); n = 0x1f80; while (n < 0x2000) { atp_writew_base(atpdev, 0x3c, n); if (atp_readl_base(atpdev, 0x38) == 0xffffffff) break; for (m = 0; m < 2; m++) { atpdev->global_map[m] = 0; for (k = 0; k < 4; k++) { atp_writew_base(atpdev, 0x3c, n++); ((u32 *)&setupdata[m][0])[k] = atp_readl_base(atpdev, 0x38); } for (k = 0; k < 4; k++) { atp_writew_base(atpdev, 0x3c, n++); ((u32 *)&atpdev->sp[m][0])[k] = atp_readl_base(atpdev, 0x38); } n += 8; } } c = atp_readb_base(atpdev, 0x29); atp_writeb_base(atpdev, 0x29, c & 0xfb); for (c = 0; c < 2; c++) { atpdev->ultra_map[c] = 0; atpdev->async[c] = 0; for (k = 0; k < 16; k++) { n = 1 << k; if (atpdev->sp[c][k] > 1) atpdev->ultra_map[c] |= n; else if (atpdev->sp[c][k] == 0) atpdev->async[c] |= n; } atpdev->async[c] = ~(atpdev->async[c]); if (atpdev->global_map[c] == 0) { k = setupdata[c][1]; if ((k & 0x40) != 0) atpdev->global_map[c] |= 0x20; k &= 0x07; atpdev->global_map[c] |= k; if ((setupdata[c][2] & 0x04) != 0) atpdev->global_map[c] |= 0x08; atpdev->host_id[c] = setupdata[c][0] & 0x07; } } k = atp_readb_base(atpdev, 0x28) & 0x8f; k |= 0x10; atp_writeb_base(atpdev, 0x28, k); atp_writeb_pci(atpdev, 0, 1, 0x80); atp_writeb_pci(atpdev, 1, 1, 0x80); msleep(100); atp_writeb_pci(atpdev, 0, 1, 0); atp_writeb_pci(atpdev, 1, 1, 0); msleep(1000); atp_readb_io(atpdev, 0, 0x1b); atp_readb_io(atpdev, 0, 0x17); atp_readb_io(atpdev, 1, 0x1b); atp_readb_io(atpdev, 1, 0x17); k = atpdev->host_id[0]; if (k > 7) k = (k & 0x07) | 0x40; atp_set_host_id(atpdev, 0, k); k = atpdev->host_id[1]; if (k > 7) k = (k & 0x07) | 0x40; atp_set_host_id(atpdev, 1, k); msleep(600); /* this delay used to be called tscam_885() */ dev_info(&pdev->dev, "Scanning Channel A SCSI Device ...\n"); atp_is(atpdev, 0, true, atp_readb_io(atpdev, 0, 0x1b) >> 7); atp_writeb_io(atpdev, 0, 0x16, 0x80); dev_info(&pdev->dev, "Scanning Channel B SCSI Device ...\n"); atp_is(atpdev, 1, true, atp_readb_io(atpdev, 1, 0x1b) >> 7); atp_writeb_io(atpdev, 1, 0x16, 0x80); k = atp_readb_base(atpdev, 0x28) & 0xcf; k |= 0xc0; atp_writeb_base(atpdev, 0x28, k); k = atp_readb_base(atpdev, 0x1f) | 0x80; atp_writeb_base(atpdev, 0x1f, k); k = atp_readb_base(atpdev, 0x29) | 0x01; atp_writeb_base(atpdev, 0x29, k); shpnt->max_id = 16; shpnt->max_lun = (atpdev->global_map[0] & 0x07) + 1; shpnt->max_channel = 1; shpnt->this_id = atpdev->host_id[0]; } /* return non-zero on detection */ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct Scsi_Host *shpnt = NULL; struct atp_unit *atpdev; int err; if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610 && pdev->revision < 2) { dev_err(&pdev->dev, "ATP850S chips (AEC6710L/F cards) are not supported.\n"); return -ENODEV; } err = pci_enable_device(pdev); if (err) goto fail; if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { printk(KERN_ERR "atp870u: DMA mask required but not available.\n"); err = -EIO; goto disable_device; } err = pci_request_regions(pdev, "atp870u"); if (err) goto disable_device; pci_set_master(pdev); err = -ENOMEM; shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit)); if (!shpnt) goto release_region; atpdev = shost_priv(shpnt); atpdev->host = shpnt; atpdev->pdev = pdev; pci_set_drvdata(pdev, atpdev); shpnt->io_port = pci_resource_start(pdev, 0); shpnt->io_port &= 0xfffffff8; shpnt->n_io_port = pci_resource_len(pdev, 0); atpdev->baseport = shpnt->io_port; shpnt->unique_id = shpnt->io_port; shpnt->irq = pdev->irq; err = atp870u_init_tables(shpnt); if (err) { dev_err(&pdev->dev, "Unable to allocate tables for Acard controller\n"); goto unregister; } if (is880(atpdev)) atp880_init(shpnt); else if (is885(atpdev)) atp885_init(shpnt); else atp870_init(shpnt); err = request_irq(shpnt->irq, atp870u_intr_handle, IRQF_SHARED, "atp870u", shpnt); if (err) { dev_err(&pdev->dev, "Unable to allocate IRQ %d.\n", shpnt->irq); goto free_tables; } err = scsi_add_host(shpnt, &pdev->dev); if (err) goto scsi_add_fail; scsi_scan_host(shpnt); return 0; scsi_add_fail: free_irq(shpnt->irq, shpnt); free_tables: atp870u_free_tables(shpnt); unregister: scsi_host_put(shpnt); release_region: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); fail: return err; } /* The abort command does not leave the device in a clean state where it is available to be used again. Until this gets worked out, we will leave it commented out. */ static int atp870u_abort(struct scsi_cmnd * SCpnt) { unsigned char j, k, c; struct scsi_cmnd *workrequ; struct atp_unit *dev; struct Scsi_Host *host; host = SCpnt->device->host; dev = (struct atp_unit *)&host->hostdata; c = scmd_channel(SCpnt); printk(" atp870u: abort Channel = %x \n", c); printk("working=%x last_cmd=%x ", dev->working[c], dev->last_cmd[c]); printk(" quhdu=%x quendu=%x ", dev->quhd[c], dev->quend[c]); for (j = 0; j < 0x18; j++) { printk(" r%2x=%2x", j, atp_readb_io(dev, c, j)); } printk(" r1c=%2x", atp_readb_io(dev, c, 0x1c)); printk(" r1f=%2x in_snd=%2x ", atp_readb_io(dev, c, 0x1f), dev->in_snd[c]); printk(" d00=%2x", atp_readb_pci(dev, c, 0x00)); printk(" d02=%2x", atp_readb_pci(dev, c, 0x02)); for(j=0;j<16;j++) { if (dev->id[c][j].curr_req != NULL) { workrequ = dev->id[c][j].curr_req; printk("\n que cdb= "); for (k=0; k < workrequ->cmd_len; k++) { printk(" %2x ",workrequ->cmnd[k]); } printk(" last_lenu= %x ",(unsigned int)dev->id[c][j].last_len); } } return SUCCESS; } static const char *atp870u_info(struct Scsi_Host *notused) { static char buffer[128]; strcpy(buffer, "ACARD AEC-6710/6712/67160 PCI Ultra/W/LVD SCSI-3 Adapter Driver V2.6+ac "); return buffer; } static int atp870u_show_info(struct seq_file *m, struct Scsi_Host *HBAptr) { seq_puts(m, "ACARD AEC-671X Driver Version: 2.6+ac\n\n" "Adapter Configuration:\n"); seq_printf(m, " Base IO: %#.4lx\n", HBAptr->io_port); seq_printf(m, " IRQ: %d\n", HBAptr->irq); return 0; } static int atp870u_biosparam(struct scsi_device *disk, struct block_device *dev, sector_t capacity, int *ip) { int heads, sectors, cylinders; heads = 64; sectors = 32; cylinders = (unsigned long)capacity / (heads * sectors); if (cylinders > 1024) { heads = 255; sectors = 63; cylinders = (unsigned long)capacity / (heads * sectors); } ip[0] = heads; ip[1] = sectors; ip[2] = cylinders; return 0; } static void atp870u_remove (struct pci_dev *pdev) { struct atp_unit *devext = pci_get_drvdata(pdev); struct Scsi_Host *pshost = devext->host; scsi_remove_host(pshost); free_irq(pshost->irq, pshost); pci_release_regions(pdev); pci_disable_device(pdev); atp870u_free_tables(pshost); scsi_host_put(pshost); } MODULE_LICENSE("GPL"); static const struct scsi_host_template atp870u_template = { .module = THIS_MODULE, .name = "atp870u" /* name */, .proc_name = "atp870u", .show_info = atp870u_show_info, .info = atp870u_info /* info */, .queuecommand = atp870u_queuecommand /* queuecommand */, .eh_abort_handler = atp870u_abort /* abort */, .bios_param = atp870u_biosparam /* biosparm */, .can_queue = qcnt /* can_queue */, .this_id = 7 /* SCSI ID */, .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/, .max_sectors = ATP870U_MAX_SECTORS, }; static struct pci_device_id atp870u_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP885_DEVID) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID1) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID2) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7610) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612UW) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612U) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612S) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612D) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612SUW) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_8060) }, { 0, }, }; MODULE_DEVICE_TABLE(pci, atp870u_id_table); static struct pci_driver atp870u_driver = { .id_table = atp870u_id_table, .name = "atp870u", .probe = atp870u_probe, .remove = atp870u_remove, }; module_pci_driver(atp870u_driver); static void atp_is(struct atp_unit *dev, unsigned char c, bool wide_chip, unsigned char lvdmode) { unsigned char i, j, k, rmb, n; unsigned short int m; static unsigned char mbuf[512]; static unsigned char satn[9] = { 0, 0, 0, 0, 0, 0, 0, 6, 6 }; static unsigned char inqd[9] = { 0x12, 0, 0, 0, 0x24, 0, 0, 0x24, 6 }; static unsigned char synn[6] = { 0x80, 1, 3, 1, 0x19, 0x0e }; unsigned char synu[6] = { 0x80, 1, 3, 1, 0x0a, 0x0e }; static unsigned char synw[6] = { 0x80, 1, 3, 1, 0x19, 0x0e }; static unsigned char synw_870[6] = { 0x80, 1, 3, 1, 0x0c, 0x07 }; unsigned char synuw[6] = { 0x80, 1, 3, 1, 0x0a, 0x0e }; static unsigned char wide[6] = { 0x80, 1, 2, 3, 1, 0 }; static unsigned char u3[9] = { 0x80, 1, 6, 4, 0x09, 00, 0x0e, 0x01, 0x02 }; for (i = 0; i < 16; i++) { if (!wide_chip && (i > 7)) break; m = 1; m = m << i; if ((m & dev->active_id[c]) != 0) { continue; } if (i == dev->host_id[c]) { printk(KERN_INFO " ID: %2d Host Adapter\n", dev->host_id[c]); continue; } atp_writeb_io(dev, c, 0x1b, wide_chip ? 0x01 : 0x00); atp_writeb_io(dev, c, 1, 0x08); atp_writeb_io(dev, c, 2, 0x7f); atp_writeb_io(dev, c, 3, satn[0]); atp_writeb_io(dev, c, 4, satn[1]); atp_writeb_io(dev, c, 5, satn[2]); atp_writeb_io(dev, c, 6, satn[3]); atp_writeb_io(dev, c, 7, satn[4]); atp_writeb_io(dev, c, 8, satn[5]); atp_writeb_io(dev, c, 0x0f, 0); atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); atp_writeb_io(dev, c, 0x12, 0); atp_writeb_io(dev, c, 0x13, satn[6]); atp_writeb_io(dev, c, 0x14, satn[7]); j = i; if ((j & 0x08) != 0) { j = (j & 0x07) | 0x40; } atp_writeb_io(dev, c, 0x15, j); atp_writeb_io(dev, c, 0x18, satn[8]); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) continue; while (atp_readb_io(dev, c, 0x17) != 0x8e) cpu_relax(); dev->active_id[c] |= m; atp_writeb_io(dev, c, 0x10, 0x30); if (is885(dev) || is880(dev)) atp_writeb_io(dev, c, 0x14, 0x00); else /* result of is870() merge - is this a bug? */ atp_writeb_io(dev, c, 0x04, 0x00); phase_cmd: atp_writeb_io(dev, c, 0x18, 0x08); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); j = atp_readb_io(dev, c, 0x17); if (j != 0x16) { atp_writeb_io(dev, c, 0x10, 0x41); goto phase_cmd; } sel_ok: atp_writeb_io(dev, c, 3, inqd[0]); atp_writeb_io(dev, c, 4, inqd[1]); atp_writeb_io(dev, c, 5, inqd[2]); atp_writeb_io(dev, c, 6, inqd[3]); atp_writeb_io(dev, c, 7, inqd[4]); atp_writeb_io(dev, c, 8, inqd[5]); atp_writeb_io(dev, c, 0x0f, 0); atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); atp_writeb_io(dev, c, 0x12, 0); atp_writeb_io(dev, c, 0x13, inqd[6]); atp_writeb_io(dev, c, 0x14, inqd[7]); atp_writeb_io(dev, c, 0x18, inqd[8]); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) continue; while (atp_readb_io(dev, c, 0x17) != 0x8e) cpu_relax(); if (wide_chip) atp_writeb_io(dev, c, 0x1b, 0x00); atp_writeb_io(dev, c, 0x18, 0x08); j = 0; rd_inq_data: k = atp_readb_io(dev, c, 0x1f); if ((k & 0x01) != 0) { mbuf[j++] = atp_readb_io(dev, c, 0x19); goto rd_inq_data; } if ((k & 0x80) == 0) { goto rd_inq_data; } j = atp_readb_io(dev, c, 0x17); if (j == 0x16) { goto inq_ok; } atp_writeb_io(dev, c, 0x10, 0x46); atp_writeb_io(dev, c, 0x12, 0); atp_writeb_io(dev, c, 0x13, 0); atp_writeb_io(dev, c, 0x14, 0); atp_writeb_io(dev, c, 0x18, 0x08); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); if (atp_readb_io(dev, c, 0x17) != 0x16) goto sel_ok; inq_ok: mbuf[36] = 0; printk(KERN_INFO " ID: %2d %s\n", i, &mbuf[8]); dev->id[c][i].devtype = mbuf[0]; rmb = mbuf[1]; n = mbuf[7]; if (!wide_chip) goto not_wide; if ((mbuf[7] & 0x60) == 0) { goto not_wide; } if (is885(dev) || is880(dev)) { if ((i < 8) && ((dev->global_map[c] & 0x20) == 0)) goto not_wide; } else { /* result of is870() merge - is this a bug? */ if ((dev->global_map[c] & 0x20) == 0) goto not_wide; } if (lvdmode == 0) { goto chg_wide; } if (dev->sp[c][i] != 0x04) // force u2 { goto chg_wide; } atp_writeb_io(dev, c, 0x1b, 0x01); atp_writeb_io(dev, c, 3, satn[0]); atp_writeb_io(dev, c, 4, satn[1]); atp_writeb_io(dev, c, 5, satn[2]); atp_writeb_io(dev, c, 6, satn[3]); atp_writeb_io(dev, c, 7, satn[4]); atp_writeb_io(dev, c, 8, satn[5]); atp_writeb_io(dev, c, 0x0f, 0); atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); atp_writeb_io(dev, c, 0x12, 0); atp_writeb_io(dev, c, 0x13, satn[6]); atp_writeb_io(dev, c, 0x14, satn[7]); atp_writeb_io(dev, c, 0x18, satn[8]); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) continue; while (atp_readb_io(dev, c, 0x17) != 0x8e) cpu_relax(); try_u3: j = 0; atp_writeb_io(dev, c, 0x14, 0x09); atp_writeb_io(dev, c, 0x18, 0x20); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) atp_writeb_io(dev, c, 0x19, u3[j++]); cpu_relax(); } while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00) cpu_relax(); j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto u3p_in; } if (j == 0x0a) { goto u3p_cmd; } if (j == 0x0e) { goto try_u3; } continue; u3p_out: atp_writeb_io(dev, c, 0x18, 0x20); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) atp_writeb_io(dev, c, 0x19, 0); cpu_relax(); } j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto u3p_in; } if (j == 0x0a) { goto u3p_cmd; } if (j == 0x0e) { goto u3p_out; } continue; u3p_in: atp_writeb_io(dev, c, 0x14, 0x09); atp_writeb_io(dev, c, 0x18, 0x20); k = 0; u3p_in1: j = atp_readb_io(dev, c, 0x1f); if ((j & 0x01) != 0) { mbuf[k++] = atp_readb_io(dev, c, 0x19); goto u3p_in1; } if ((j & 0x80) == 0x00) { goto u3p_in1; } j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto u3p_in; } if (j == 0x0a) { goto u3p_cmd; } if (j == 0x0e) { goto u3p_out; } continue; u3p_cmd: atp_writeb_io(dev, c, 0x10, 0x30); atp_writeb_io(dev, c, 0x14, 0x00); atp_writeb_io(dev, c, 0x18, 0x08); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00); j = atp_readb_io(dev, c, 0x17); if (j != 0x16) { if (j == 0x4e) { goto u3p_out; } continue; } if (mbuf[0] != 0x01) { goto chg_wide; } if (mbuf[1] != 0x06) { goto chg_wide; } if (mbuf[2] != 0x04) { goto chg_wide; } if (mbuf[3] == 0x09) { m = 1; m = m << i; dev->wide_id[c] |= m; dev->id[c][i].devsp = 0xce; #ifdef ED_DBGP printk("dev->id[%2d][%2d].devsp = %2x\n", c, i, dev->id[c][i].devsp); #endif continue; } chg_wide: atp_writeb_io(dev, c, 0x1b, 0x01); atp_writeb_io(dev, c, 3, satn[0]); atp_writeb_io(dev, c, 4, satn[1]); atp_writeb_io(dev, c, 5, satn[2]); atp_writeb_io(dev, c, 6, satn[3]); atp_writeb_io(dev, c, 7, satn[4]); atp_writeb_io(dev, c, 8, satn[5]); atp_writeb_io(dev, c, 0x0f, 0); atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); atp_writeb_io(dev, c, 0x12, 0); atp_writeb_io(dev, c, 0x13, satn[6]); atp_writeb_io(dev, c, 0x14, satn[7]); atp_writeb_io(dev, c, 0x18, satn[8]); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) continue; while (atp_readb_io(dev, c, 0x17) != 0x8e) cpu_relax(); try_wide: j = 0; atp_writeb_io(dev, c, 0x14, 0x05); atp_writeb_io(dev, c, 0x18, 0x20); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) atp_writeb_io(dev, c, 0x19, wide[j++]); cpu_relax(); } while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00) cpu_relax(); j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto widep_in; } if (j == 0x0a) { goto widep_cmd; } if (j == 0x0e) { goto try_wide; } continue; widep_out: atp_writeb_io(dev, c, 0x18, 0x20); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) atp_writeb_io(dev, c, 0x19, 0); cpu_relax(); } j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto widep_in; } if (j == 0x0a) { goto widep_cmd; } if (j == 0x0e) { goto widep_out; } continue; widep_in: atp_writeb_io(dev, c, 0x14, 0xff); atp_writeb_io(dev, c, 0x18, 0x20); k = 0; widep_in1: j = atp_readb_io(dev, c, 0x1f); if ((j & 0x01) != 0) { mbuf[k++] = atp_readb_io(dev, c, 0x19); goto widep_in1; } if ((j & 0x80) == 0x00) { goto widep_in1; } j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto widep_in; } if (j == 0x0a) { goto widep_cmd; } if (j == 0x0e) { goto widep_out; } continue; widep_cmd: atp_writeb_io(dev, c, 0x10, 0x30); atp_writeb_io(dev, c, 0x14, 0x00); atp_writeb_io(dev, c, 0x18, 0x08); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); j = atp_readb_io(dev, c, 0x17); if (j != 0x16) { if (j == 0x4e) { goto widep_out; } continue; } if (mbuf[0] != 0x01) { goto not_wide; } if (mbuf[1] != 0x02) { goto not_wide; } if (mbuf[2] != 0x03) { goto not_wide; } if (mbuf[3] != 0x01) { goto not_wide; } m = 1; m = m << i; dev->wide_id[c] |= m; not_wide: if ((dev->id[c][i].devtype == 0x00) || (dev->id[c][i].devtype == 0x07) || ((dev->id[c][i].devtype == 0x05) && ((n & 0x10) != 0))) { m = 1; m = m << i; if ((dev->async[c] & m) != 0) { goto set_sync; } } continue; set_sync: if ((!is885(dev) && !is880(dev)) || (dev->sp[c][i] == 0x02)) { synu[4] = 0x0c; synuw[4] = 0x0c; } else { if (dev->sp[c][i] >= 0x03) { synu[4] = 0x0a; synuw[4] = 0x0a; } } j = 0; if ((m & dev->wide_id[c]) != 0) { j |= 0x01; } atp_writeb_io(dev, c, 0x1b, j); atp_writeb_io(dev, c, 3, satn[0]); atp_writeb_io(dev, c, 4, satn[1]); atp_writeb_io(dev, c, 5, satn[2]); atp_writeb_io(dev, c, 6, satn[3]); atp_writeb_io(dev, c, 7, satn[4]); atp_writeb_io(dev, c, 8, satn[5]); atp_writeb_io(dev, c, 0x0f, 0); atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); atp_writeb_io(dev, c, 0x12, 0); atp_writeb_io(dev, c, 0x13, satn[6]); atp_writeb_io(dev, c, 0x14, satn[7]); atp_writeb_io(dev, c, 0x18, satn[8]); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) continue; while (atp_readb_io(dev, c, 0x17) != 0x8e) cpu_relax(); try_sync: j = 0; atp_writeb_io(dev, c, 0x14, 0x06); atp_writeb_io(dev, c, 0x18, 0x20); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) { if ((m & dev->wide_id[c]) != 0) { if (is885(dev) || is880(dev)) { if ((m & dev->ultra_map[c]) != 0) { atp_writeb_io(dev, c, 0x19, synuw[j++]); } else { atp_writeb_io(dev, c, 0x19, synw[j++]); } } else atp_writeb_io(dev, c, 0x19, synw_870[j++]); } else { if ((m & dev->ultra_map[c]) != 0) { atp_writeb_io(dev, c, 0x19, synu[j++]); } else { atp_writeb_io(dev, c, 0x19, synn[j++]); } } } } while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00) cpu_relax(); j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto phase_ins; } if (j == 0x0a) { goto phase_cmds; } if (j == 0x0e) { goto try_sync; } continue; phase_outs: atp_writeb_io(dev, c, 0x18, 0x20); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) { if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0x00) atp_writeb_io(dev, c, 0x19, 0x00); cpu_relax(); } j = atp_readb_io(dev, c, 0x17); if (j == 0x85) { goto tar_dcons; } j &= 0x0f; if (j == 0x0f) { goto phase_ins; } if (j == 0x0a) { goto phase_cmds; } if (j == 0x0e) { goto phase_outs; } continue; phase_ins: if (is885(dev) || is880(dev)) atp_writeb_io(dev, c, 0x14, 0x06); else atp_writeb_io(dev, c, 0x14, 0xff); atp_writeb_io(dev, c, 0x18, 0x20); k = 0; phase_ins1: j = atp_readb_io(dev, c, 0x1f); if ((j & 0x01) != 0x00) { mbuf[k++] = atp_readb_io(dev, c, 0x19); goto phase_ins1; } if ((j & 0x80) == 0x00) { goto phase_ins1; } while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00); j = atp_readb_io(dev, c, 0x17); if (j == 0x85) { goto tar_dcons; } j &= 0x0f; if (j == 0x0f) { goto phase_ins; } if (j == 0x0a) { goto phase_cmds; } if (j == 0x0e) { goto phase_outs; } continue; phase_cmds: atp_writeb_io(dev, c, 0x10, 0x30); tar_dcons: atp_writeb_io(dev, c, 0x14, 0x00); atp_writeb_io(dev, c, 0x18, 0x08); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); j = atp_readb_io(dev, c, 0x17); if (j != 0x16) { continue; } if (mbuf[0] != 0x01) { continue; } if (mbuf[1] != 0x03) { continue; } if (mbuf[4] == 0x00) { continue; } if (mbuf[3] > 0x64) { continue; } if (is885(dev) || is880(dev)) { if (mbuf[4] > 0x0e) { mbuf[4] = 0x0e; } } else { if (mbuf[4] > 0x0c) { mbuf[4] = 0x0c; } } dev->id[c][i].devsp = mbuf[4]; if (is885(dev) || is880(dev)) if (mbuf[3] < 0x0c) { j = 0xb0; goto set_syn_ok; } if ((mbuf[3] < 0x0d) && (rmb == 0)) { j = 0xa0; goto set_syn_ok; } if (mbuf[3] < 0x1a) { j = 0x20; goto set_syn_ok; } if (mbuf[3] < 0x33) { j = 0x40; goto set_syn_ok; } if (mbuf[3] < 0x4c) { j = 0x50; goto set_syn_ok; } j = 0x60; set_syn_ok: dev->id[c][i].devsp = (dev->id[c][i].devsp & 0x0f) | j; #ifdef ED_DBGP printk("dev->id[%2d][%2d].devsp = %2x\n", c,i,dev->id[c][i].devsp); #endif } }
linux-master
drivers/scsi/atp870u.c
// SPDX-License-Identifier: GPL-2.0-or-later /* PARISC LASI driver for the 53c700 chip * * Copyright (C) 2001 by [email protected] **----------------------------------------------------------------------------- ** ** **----------------------------------------------------------------------------- */ /* * Many thanks to Richard Hirst <[email protected]> for patiently * debugging this driver on the parisc architecture and suggesting * many improvements and bug fixes. * * Thanks also go to Linuxcare Inc. for providing several PARISC * machines for me to debug the driver on. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/stat.h> #include <linux/mm.h> #include <linux/blkdev.h> #include <linux/ioport.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <asm/page.h> #include <asm/irq.h> #include <asm/hardware.h> #include <asm/parisc-device.h> #include <asm/delay.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_spi.h> #include "53c700.h" MODULE_AUTHOR("James Bottomley"); MODULE_DESCRIPTION("lasi700 SCSI Driver"); MODULE_LICENSE("GPL"); #define LASI_700_SVERSION 0x00071 #define LASI_710_SVERSION 0x00082 #define LASI700_ID_TABLE { \ .hw_type = HPHW_FIO, \ .sversion = LASI_700_SVERSION, \ .hversion = HVERSION_ANY_ID, \ .hversion_rev = HVERSION_REV_ANY_ID, \ } #define LASI710_ID_TABLE { \ .hw_type = HPHW_FIO, \ .sversion = LASI_710_SVERSION, \ .hversion = HVERSION_ANY_ID, \ .hversion_rev = HVERSION_REV_ANY_ID, \ } #define LASI700_CLOCK 25 #define LASI710_CLOCK 40 #define LASI_SCSI_CORE_OFFSET 0x100 static const struct parisc_device_id lasi700_ids[] __initconst = { LASI700_ID_TABLE, LASI710_ID_TABLE, { 0 } }; static struct scsi_host_template lasi700_template = { .name = "LASI SCSI 53c700", .proc_name = "lasi700", .this_id = 7, .module = THIS_MODULE, }; MODULE_DEVICE_TABLE(parisc, lasi700_ids); static int __init lasi700_probe(struct parisc_device *dev) { unsigned long base = dev->hpa.start + LASI_SCSI_CORE_OFFSET; struct NCR_700_Host_Parameters *hostdata; struct Scsi_Host *host; hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); if (!hostdata) { dev_printk(KERN_ERR, &dev->dev, "Failed to allocate host data\n"); return -ENOMEM; } hostdata->dev = &dev->dev; dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); hostdata->base = ioremap(base, 0x100); hostdata->differential = 0; if (dev->id.sversion == LASI_700_SVERSION) { hostdata->clock = LASI700_CLOCK; hostdata->force_le_on_be = 1; } else { hostdata->clock = LASI710_CLOCK; hostdata->force_le_on_be = 0; hostdata->chip710 = 1; hostdata->dmode_extra = DMODE_FC2; hostdata->burst_length = 8; } host = NCR_700_detect(&lasi700_template, hostdata, &dev->dev); if (!host) goto out_kfree; host->this_id = 7; host->base = base; host->irq = dev->irq; if(request_irq(dev->irq, NCR_700_intr, IRQF_SHARED, "lasi700", host)) { printk(KERN_ERR "lasi700: request_irq failed!\n"); goto out_put_host; } dev_set_drvdata(&dev->dev, host); scsi_scan_host(host); return 0; out_put_host: scsi_host_put(host); out_kfree: iounmap(hostdata->base); kfree(hostdata); return -ENODEV; } static void __exit lasi700_driver_remove(struct parisc_device *dev) { struct Scsi_Host *host = dev_get_drvdata(&dev->dev); struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)host->hostdata[0]; scsi_remove_host(host); NCR_700_release(host); free_irq(host->irq, host); iounmap(hostdata->base); kfree(hostdata); } static struct parisc_driver lasi700_driver __refdata = { .name = "lasi_scsi", .id_table = lasi700_ids, .probe = lasi700_probe, .remove = __exit_p(lasi700_driver_remove), }; static int __init lasi700_init(void) { return register_parisc_driver(&lasi700_driver); } static void __exit lasi700_exit(void) { unregister_parisc_driver(&lasi700_driver); } module_init(lasi700_init); module_exit(lasi700_exit);
linux-master
drivers/scsi/lasi700.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * iSCSI transport class definitions * * Copyright (C) IBM Corporation, 2004 * Copyright (C) Mike Christie, 2004 - 2005 * Copyright (C) Dmitry Yusupov, 2004 - 2005 * Copyright (C) Alex Aizman, 2004 - 2005 */ #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/bsg-lib.h> #include <linux/idr.h> #include <net/tcp.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_iscsi.h> #include <scsi/iscsi_if.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_bsg_iscsi.h> #define ISCSI_TRANSPORT_VERSION "2.0-870" #define ISCSI_SEND_MAX_ALLOWED 10 #define CREATE_TRACE_POINTS #include <trace/events/iscsi.h> /* * Export tracepoint symbols to be used by other modules. */ EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_conn); EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_eh); EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_session); EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_tcp); EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_sw_tcp); static int dbg_session; module_param_named(debug_session, dbg_session, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug_session, "Turn on debugging for sessions in scsi_transport_iscsi " "module. Set to 1 to turn on, and zero to turn off. Default " "is off."); static int dbg_conn; module_param_named(debug_conn, dbg_conn, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug_conn, "Turn on debugging for connections in scsi_transport_iscsi " "module. Set to 1 to turn on, and zero to turn off. Default " "is off."); #define ISCSI_DBG_TRANS_SESSION(_session, dbg_fmt, arg...) \ do { \ if (dbg_session) \ iscsi_cls_session_printk(KERN_INFO, _session, \ "%s: " dbg_fmt, \ __func__, ##arg); \ iscsi_dbg_trace(trace_iscsi_dbg_trans_session, \ &(_session)->dev, \ "%s " dbg_fmt, __func__, ##arg); \ } while (0); #define ISCSI_DBG_TRANS_CONN(_conn, dbg_fmt, arg...) \ do { \ if (dbg_conn) \ iscsi_cls_conn_printk(KERN_INFO, _conn, \ "%s: " dbg_fmt, \ __func__, ##arg); \ iscsi_dbg_trace(trace_iscsi_dbg_trans_conn, \ &(_conn)->dev, \ "%s " dbg_fmt, __func__, ##arg); \ } while (0); struct iscsi_internal { struct scsi_transport_template t; struct iscsi_transport *iscsi_transport; struct list_head list; struct device dev; struct transport_container conn_cont; struct transport_container session_cont; }; static DEFINE_IDR(iscsi_ep_idr); static DEFINE_MUTEX(iscsi_ep_idr_mutex); static atomic_t iscsi_session_nr; /* sysfs session id for next new session */ static struct workqueue_struct *iscsi_conn_cleanup_workq; static DEFINE_IDA(iscsi_sess_ida); /* * list of registered transports and lock that must * be held while accessing list. The iscsi_transport_lock must * be acquired after the rx_queue_mutex. */ static LIST_HEAD(iscsi_transports); static DEFINE_SPINLOCK(iscsi_transport_lock); #define to_iscsi_internal(tmpl) \ container_of(tmpl, struct iscsi_internal, t) #define dev_to_iscsi_internal(_dev) \ container_of(_dev, struct iscsi_internal, dev) static void iscsi_transport_release(struct device *dev) { struct iscsi_internal *priv = dev_to_iscsi_internal(dev); kfree(priv); } /* * iscsi_transport_class represents the iscsi_transports that are * registered. */ static struct class iscsi_transport_class = { .name = "iscsi_transport", .dev_release = iscsi_transport_release, }; static ssize_t show_transport_handle(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_internal *priv = dev_to_iscsi_internal(dev); if (!capable(CAP_SYS_ADMIN)) return -EACCES; return sysfs_emit(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport)); } static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL); #define show_transport_attr(name, format) \ static ssize_t \ show_transport_##name(struct device *dev, \ struct device_attribute *attr,char *buf) \ { \ struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \ return sysfs_emit(buf, format"\n", priv->iscsi_transport->name);\ } \ static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL); show_transport_attr(caps, "0x%x"); static struct attribute *iscsi_transport_attrs[] = { &dev_attr_handle.attr, &dev_attr_caps.attr, NULL, }; static struct attribute_group iscsi_transport_group = { .attrs = iscsi_transport_attrs, }; /* * iSCSI endpoint attrs */ #define iscsi_dev_to_endpoint(_dev) \ container_of(_dev, struct iscsi_endpoint, dev) #define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \ struct device_attribute dev_attr_##_prefix##_##_name = \ __ATTR(_name,_mode,_show,_store) static void iscsi_endpoint_release(struct device *dev) { struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); mutex_lock(&iscsi_ep_idr_mutex); idr_remove(&iscsi_ep_idr, ep->id); mutex_unlock(&iscsi_ep_idr_mutex); kfree(ep); } static struct class iscsi_endpoint_class = { .name = "iscsi_endpoint", .dev_release = iscsi_endpoint_release, }; static ssize_t show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); return sysfs_emit(buf, "%d\n", ep->id); } static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL); static struct attribute *iscsi_endpoint_attrs[] = { &dev_attr_ep_handle.attr, NULL, }; static struct attribute_group iscsi_endpoint_group = { .attrs = iscsi_endpoint_attrs, }; struct iscsi_endpoint * iscsi_create_endpoint(int dd_size) { struct iscsi_endpoint *ep; int err, id; ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL); if (!ep) return NULL; mutex_lock(&iscsi_ep_idr_mutex); /* * First endpoint id should be 1 to comply with user space * applications (iscsid). */ id = idr_alloc(&iscsi_ep_idr, ep, 1, -1, GFP_NOIO); if (id < 0) { mutex_unlock(&iscsi_ep_idr_mutex); printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n", id); goto free_ep; } mutex_unlock(&iscsi_ep_idr_mutex); ep->id = id; ep->dev.class = &iscsi_endpoint_class; dev_set_name(&ep->dev, "ep-%d", id); err = device_register(&ep->dev); if (err) goto put_dev; err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group); if (err) goto unregister_dev; if (dd_size) ep->dd_data = &ep[1]; return ep; unregister_dev: device_unregister(&ep->dev); return NULL; put_dev: mutex_lock(&iscsi_ep_idr_mutex); idr_remove(&iscsi_ep_idr, id); mutex_unlock(&iscsi_ep_idr_mutex); put_device(&ep->dev); return NULL; free_ep: kfree(ep); return NULL; } EXPORT_SYMBOL_GPL(iscsi_create_endpoint); void iscsi_destroy_endpoint(struct iscsi_endpoint *ep) { sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group); device_unregister(&ep->dev); } EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint); void iscsi_put_endpoint(struct iscsi_endpoint *ep) { put_device(&ep->dev); } EXPORT_SYMBOL_GPL(iscsi_put_endpoint); /** * iscsi_lookup_endpoint - get ep from handle * @handle: endpoint handle * * Caller must do a iscsi_put_endpoint. */ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle) { struct iscsi_endpoint *ep; mutex_lock(&iscsi_ep_idr_mutex); ep = idr_find(&iscsi_ep_idr, handle); if (!ep) goto unlock; get_device(&ep->dev); unlock: mutex_unlock(&iscsi_ep_idr_mutex); return ep; } EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint); /* * Interface to display network param to sysfs */ static void iscsi_iface_release(struct device *dev) { struct iscsi_iface *iface = iscsi_dev_to_iface(dev); struct device *parent = iface->dev.parent; kfree(iface); put_device(parent); } static struct class iscsi_iface_class = { .name = "iscsi_iface", .dev_release = iscsi_iface_release, }; #define ISCSI_IFACE_ATTR(_prefix, _name, _mode, _show, _store) \ struct device_attribute dev_attr_##_prefix##_##_name = \ __ATTR(_name, _mode, _show, _store) /* iface attrs show */ #define iscsi_iface_attr_show(type, name, param_type, param) \ static ssize_t \ show_##type##_##name(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct iscsi_iface *iface = iscsi_dev_to_iface(dev); \ struct iscsi_transport *t = iface->transport; \ return t->get_iface_param(iface, param_type, param, buf); \ } \ #define iscsi_iface_net_attr(type, name, param) \ iscsi_iface_attr_show(type, name, ISCSI_NET_PARAM, param) \ static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL); #define iscsi_iface_attr(type, name, param) \ iscsi_iface_attr_show(type, name, ISCSI_IFACE_PARAM, param) \ static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL); /* generic read only ipv4 attribute */ iscsi_iface_net_attr(ipv4_iface, ipaddress, ISCSI_NET_PARAM_IPV4_ADDR); iscsi_iface_net_attr(ipv4_iface, gateway, ISCSI_NET_PARAM_IPV4_GW); iscsi_iface_net_attr(ipv4_iface, subnet, ISCSI_NET_PARAM_IPV4_SUBNET); iscsi_iface_net_attr(ipv4_iface, bootproto, ISCSI_NET_PARAM_IPV4_BOOTPROTO); iscsi_iface_net_attr(ipv4_iface, dhcp_dns_address_en, ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN); iscsi_iface_net_attr(ipv4_iface, dhcp_slp_da_info_en, ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN); iscsi_iface_net_attr(ipv4_iface, tos_en, ISCSI_NET_PARAM_IPV4_TOS_EN); iscsi_iface_net_attr(ipv4_iface, tos, ISCSI_NET_PARAM_IPV4_TOS); iscsi_iface_net_attr(ipv4_iface, grat_arp_en, ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN); iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id_en, ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN); iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id, ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID); iscsi_iface_net_attr(ipv4_iface, dhcp_req_vendor_id_en, ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN); iscsi_iface_net_attr(ipv4_iface, dhcp_use_vendor_id_en, ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN); iscsi_iface_net_attr(ipv4_iface, dhcp_vendor_id, ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID); iscsi_iface_net_attr(ipv4_iface, dhcp_learn_iqn_en, ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN); iscsi_iface_net_attr(ipv4_iface, fragment_disable, ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE); iscsi_iface_net_attr(ipv4_iface, incoming_forwarding_en, ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN); iscsi_iface_net_attr(ipv4_iface, ttl, ISCSI_NET_PARAM_IPV4_TTL); /* generic read only ipv6 attribute */ iscsi_iface_net_attr(ipv6_iface, ipaddress, ISCSI_NET_PARAM_IPV6_ADDR); iscsi_iface_net_attr(ipv6_iface, link_local_addr, ISCSI_NET_PARAM_IPV6_LINKLOCAL); iscsi_iface_net_attr(ipv6_iface, router_addr, ISCSI_NET_PARAM_IPV6_ROUTER); iscsi_iface_net_attr(ipv6_iface, ipaddr_autocfg, ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG); iscsi_iface_net_attr(ipv6_iface, link_local_autocfg, ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG); iscsi_iface_net_attr(ipv6_iface, link_local_state, ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE); iscsi_iface_net_attr(ipv6_iface, router_state, ISCSI_NET_PARAM_IPV6_ROUTER_STATE); iscsi_iface_net_attr(ipv6_iface, grat_neighbor_adv_en, ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN); iscsi_iface_net_attr(ipv6_iface, mld_en, ISCSI_NET_PARAM_IPV6_MLD_EN); iscsi_iface_net_attr(ipv6_iface, flow_label, ISCSI_NET_PARAM_IPV6_FLOW_LABEL); iscsi_iface_net_attr(ipv6_iface, traffic_class, ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS); iscsi_iface_net_attr(ipv6_iface, hop_limit, ISCSI_NET_PARAM_IPV6_HOP_LIMIT); iscsi_iface_net_attr(ipv6_iface, nd_reachable_tmo, ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO); iscsi_iface_net_attr(ipv6_iface, nd_rexmit_time, ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME); iscsi_iface_net_attr(ipv6_iface, nd_stale_tmo, ISCSI_NET_PARAM_IPV6_ND_STALE_TMO); iscsi_iface_net_attr(ipv6_iface, dup_addr_detect_cnt, ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT); iscsi_iface_net_attr(ipv6_iface, router_adv_link_mtu, ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU); /* common read only iface attribute */ iscsi_iface_net_attr(iface, enabled, ISCSI_NET_PARAM_IFACE_ENABLE); iscsi_iface_net_attr(iface, vlan_id, ISCSI_NET_PARAM_VLAN_ID); iscsi_iface_net_attr(iface, vlan_priority, ISCSI_NET_PARAM_VLAN_PRIORITY); iscsi_iface_net_attr(iface, vlan_enabled, ISCSI_NET_PARAM_VLAN_ENABLED); iscsi_iface_net_attr(iface, mtu, ISCSI_NET_PARAM_MTU); iscsi_iface_net_attr(iface, port, ISCSI_NET_PARAM_PORT); iscsi_iface_net_attr(iface, ipaddress_state, ISCSI_NET_PARAM_IPADDR_STATE); iscsi_iface_net_attr(iface, delayed_ack_en, ISCSI_NET_PARAM_DELAYED_ACK_EN); iscsi_iface_net_attr(iface, tcp_nagle_disable, ISCSI_NET_PARAM_TCP_NAGLE_DISABLE); iscsi_iface_net_attr(iface, tcp_wsf_disable, ISCSI_NET_PARAM_TCP_WSF_DISABLE); iscsi_iface_net_attr(iface, tcp_wsf, ISCSI_NET_PARAM_TCP_WSF); iscsi_iface_net_attr(iface, tcp_timer_scale, ISCSI_NET_PARAM_TCP_TIMER_SCALE); iscsi_iface_net_attr(iface, tcp_timestamp_en, ISCSI_NET_PARAM_TCP_TIMESTAMP_EN); iscsi_iface_net_attr(iface, cache_id, ISCSI_NET_PARAM_CACHE_ID); iscsi_iface_net_attr(iface, redirect_en, ISCSI_NET_PARAM_REDIRECT_EN); /* common iscsi specific settings attributes */ iscsi_iface_attr(iface, def_taskmgmt_tmo, ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO); iscsi_iface_attr(iface, header_digest, ISCSI_IFACE_PARAM_HDRDGST_EN); iscsi_iface_attr(iface, data_digest, ISCSI_IFACE_PARAM_DATADGST_EN); iscsi_iface_attr(iface, immediate_data, ISCSI_IFACE_PARAM_IMM_DATA_EN); iscsi_iface_attr(iface, initial_r2t, ISCSI_IFACE_PARAM_INITIAL_R2T_EN); iscsi_iface_attr(iface, data_seq_in_order, ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN); iscsi_iface_attr(iface, data_pdu_in_order, ISCSI_IFACE_PARAM_PDU_INORDER_EN); iscsi_iface_attr(iface, erl, ISCSI_IFACE_PARAM_ERL); iscsi_iface_attr(iface, max_recv_dlength, ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH); iscsi_iface_attr(iface, first_burst_len, ISCSI_IFACE_PARAM_FIRST_BURST); iscsi_iface_attr(iface, max_outstanding_r2t, ISCSI_IFACE_PARAM_MAX_R2T); iscsi_iface_attr(iface, max_burst_len, ISCSI_IFACE_PARAM_MAX_BURST); iscsi_iface_attr(iface, chap_auth, ISCSI_IFACE_PARAM_CHAP_AUTH_EN); iscsi_iface_attr(iface, bidi_chap, ISCSI_IFACE_PARAM_BIDI_CHAP_EN); iscsi_iface_attr(iface, discovery_auth_optional, ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL); iscsi_iface_attr(iface, discovery_logout, ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN); iscsi_iface_attr(iface, strict_login_comp_en, ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN); iscsi_iface_attr(iface, initiator_name, ISCSI_IFACE_PARAM_INITIATOR_NAME); static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *dev = container_of(kobj, struct device, kobj); struct iscsi_iface *iface = iscsi_dev_to_iface(dev); struct iscsi_transport *t = iface->transport; int param = -1; if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO; else if (attr == &dev_attr_iface_header_digest.attr) param = ISCSI_IFACE_PARAM_HDRDGST_EN; else if (attr == &dev_attr_iface_data_digest.attr) param = ISCSI_IFACE_PARAM_DATADGST_EN; else if (attr == &dev_attr_iface_immediate_data.attr) param = ISCSI_IFACE_PARAM_IMM_DATA_EN; else if (attr == &dev_attr_iface_initial_r2t.attr) param = ISCSI_IFACE_PARAM_INITIAL_R2T_EN; else if (attr == &dev_attr_iface_data_seq_in_order.attr) param = ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN; else if (attr == &dev_attr_iface_data_pdu_in_order.attr) param = ISCSI_IFACE_PARAM_PDU_INORDER_EN; else if (attr == &dev_attr_iface_erl.attr) param = ISCSI_IFACE_PARAM_ERL; else if (attr == &dev_attr_iface_max_recv_dlength.attr) param = ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH; else if (attr == &dev_attr_iface_first_burst_len.attr) param = ISCSI_IFACE_PARAM_FIRST_BURST; else if (attr == &dev_attr_iface_max_outstanding_r2t.attr) param = ISCSI_IFACE_PARAM_MAX_R2T; else if (attr == &dev_attr_iface_max_burst_len.attr) param = ISCSI_IFACE_PARAM_MAX_BURST; else if (attr == &dev_attr_iface_chap_auth.attr) param = ISCSI_IFACE_PARAM_CHAP_AUTH_EN; else if (attr == &dev_attr_iface_bidi_chap.attr) param = ISCSI_IFACE_PARAM_BIDI_CHAP_EN; else if (attr == &dev_attr_iface_discovery_auth_optional.attr) param = ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL; else if (attr == &dev_attr_iface_discovery_logout.attr) param = ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN; else if (attr == &dev_attr_iface_strict_login_comp_en.attr) param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN; else if (attr == &dev_attr_iface_initiator_name.attr) param = ISCSI_IFACE_PARAM_INITIATOR_NAME; if (param != -1) return t->attr_is_visible(ISCSI_IFACE_PARAM, param); if (attr == &dev_attr_iface_enabled.attr) param = ISCSI_NET_PARAM_IFACE_ENABLE; else if (attr == &dev_attr_iface_vlan_id.attr) param = ISCSI_NET_PARAM_VLAN_ID; else if (attr == &dev_attr_iface_vlan_priority.attr) param = ISCSI_NET_PARAM_VLAN_PRIORITY; else if (attr == &dev_attr_iface_vlan_enabled.attr) param = ISCSI_NET_PARAM_VLAN_ENABLED; else if (attr == &dev_attr_iface_mtu.attr) param = ISCSI_NET_PARAM_MTU; else if (attr == &dev_attr_iface_port.attr) param = ISCSI_NET_PARAM_PORT; else if (attr == &dev_attr_iface_ipaddress_state.attr) param = ISCSI_NET_PARAM_IPADDR_STATE; else if (attr == &dev_attr_iface_delayed_ack_en.attr) param = ISCSI_NET_PARAM_DELAYED_ACK_EN; else if (attr == &dev_attr_iface_tcp_nagle_disable.attr) param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE; else if (attr == &dev_attr_iface_tcp_wsf_disable.attr) param = ISCSI_NET_PARAM_TCP_WSF_DISABLE; else if (attr == &dev_attr_iface_tcp_wsf.attr) param = ISCSI_NET_PARAM_TCP_WSF; else if (attr == &dev_attr_iface_tcp_timer_scale.attr) param = ISCSI_NET_PARAM_TCP_TIMER_SCALE; else if (attr == &dev_attr_iface_tcp_timestamp_en.attr) param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN; else if (attr == &dev_attr_iface_cache_id.attr) param = ISCSI_NET_PARAM_CACHE_ID; else if (attr == &dev_attr_iface_redirect_en.attr) param = ISCSI_NET_PARAM_REDIRECT_EN; else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { if (attr == &dev_attr_ipv4_iface_ipaddress.attr) param = ISCSI_NET_PARAM_IPV4_ADDR; else if (attr == &dev_attr_ipv4_iface_gateway.attr) param = ISCSI_NET_PARAM_IPV4_GW; else if (attr == &dev_attr_ipv4_iface_subnet.attr) param = ISCSI_NET_PARAM_IPV4_SUBNET; else if (attr == &dev_attr_ipv4_iface_bootproto.attr) param = ISCSI_NET_PARAM_IPV4_BOOTPROTO; else if (attr == &dev_attr_ipv4_iface_dhcp_dns_address_en.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN; else if (attr == &dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN; else if (attr == &dev_attr_ipv4_iface_tos_en.attr) param = ISCSI_NET_PARAM_IPV4_TOS_EN; else if (attr == &dev_attr_ipv4_iface_tos.attr) param = ISCSI_NET_PARAM_IPV4_TOS; else if (attr == &dev_attr_ipv4_iface_grat_arp_en.attr) param = ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN; else if (attr == &dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN; else if (attr == &dev_attr_ipv4_iface_dhcp_alt_client_id.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID; else if (attr == &dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN; else if (attr == &dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN; else if (attr == &dev_attr_ipv4_iface_dhcp_vendor_id.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID; else if (attr == &dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN; else if (attr == &dev_attr_ipv4_iface_fragment_disable.attr) param = ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE; else if (attr == &dev_attr_ipv4_iface_incoming_forwarding_en.attr) param = ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN; else if (attr == &dev_attr_ipv4_iface_ttl.attr) param = ISCSI_NET_PARAM_IPV4_TTL; else return 0; } else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) { if (attr == &dev_attr_ipv6_iface_ipaddress.attr) param = ISCSI_NET_PARAM_IPV6_ADDR; else if (attr == &dev_attr_ipv6_iface_link_local_addr.attr) param = ISCSI_NET_PARAM_IPV6_LINKLOCAL; else if (attr == &dev_attr_ipv6_iface_router_addr.attr) param = ISCSI_NET_PARAM_IPV6_ROUTER; else if (attr == &dev_attr_ipv6_iface_ipaddr_autocfg.attr) param = ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG; else if (attr == &dev_attr_ipv6_iface_link_local_autocfg.attr) param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG; else if (attr == &dev_attr_ipv6_iface_link_local_state.attr) param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE; else if (attr == &dev_attr_ipv6_iface_router_state.attr) param = ISCSI_NET_PARAM_IPV6_ROUTER_STATE; else if (attr == &dev_attr_ipv6_iface_grat_neighbor_adv_en.attr) param = ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN; else if (attr == &dev_attr_ipv6_iface_mld_en.attr) param = ISCSI_NET_PARAM_IPV6_MLD_EN; else if (attr == &dev_attr_ipv6_iface_flow_label.attr) param = ISCSI_NET_PARAM_IPV6_FLOW_LABEL; else if (attr == &dev_attr_ipv6_iface_traffic_class.attr) param = ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS; else if (attr == &dev_attr_ipv6_iface_hop_limit.attr) param = ISCSI_NET_PARAM_IPV6_HOP_LIMIT; else if (attr == &dev_attr_ipv6_iface_nd_reachable_tmo.attr) param = ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO; else if (attr == &dev_attr_ipv6_iface_nd_rexmit_time.attr) param = ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME; else if (attr == &dev_attr_ipv6_iface_nd_stale_tmo.attr) param = ISCSI_NET_PARAM_IPV6_ND_STALE_TMO; else if (attr == &dev_attr_ipv6_iface_dup_addr_detect_cnt.attr) param = ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT; else if (attr == &dev_attr_ipv6_iface_router_adv_link_mtu.attr) param = ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU; else return 0; } else { WARN_ONCE(1, "Invalid iface attr"); return 0; } return t->attr_is_visible(ISCSI_NET_PARAM, param); } static struct attribute *iscsi_iface_attrs[] = { &dev_attr_iface_enabled.attr, &dev_attr_iface_vlan_id.attr, &dev_attr_iface_vlan_priority.attr, &dev_attr_iface_vlan_enabled.attr, &dev_attr_ipv4_iface_ipaddress.attr, &dev_attr_ipv4_iface_gateway.attr, &dev_attr_ipv4_iface_subnet.attr, &dev_attr_ipv4_iface_bootproto.attr, &dev_attr_ipv6_iface_ipaddress.attr, &dev_attr_ipv6_iface_link_local_addr.attr, &dev_attr_ipv6_iface_router_addr.attr, &dev_attr_ipv6_iface_ipaddr_autocfg.attr, &dev_attr_ipv6_iface_link_local_autocfg.attr, &dev_attr_iface_mtu.attr, &dev_attr_iface_port.attr, &dev_attr_iface_ipaddress_state.attr, &dev_attr_iface_delayed_ack_en.attr, &dev_attr_iface_tcp_nagle_disable.attr, &dev_attr_iface_tcp_wsf_disable.attr, &dev_attr_iface_tcp_wsf.attr, &dev_attr_iface_tcp_timer_scale.attr, &dev_attr_iface_tcp_timestamp_en.attr, &dev_attr_iface_cache_id.attr, &dev_attr_iface_redirect_en.attr, &dev_attr_iface_def_taskmgmt_tmo.attr, &dev_attr_iface_header_digest.attr, &dev_attr_iface_data_digest.attr, &dev_attr_iface_immediate_data.attr, &dev_attr_iface_initial_r2t.attr, &dev_attr_iface_data_seq_in_order.attr, &dev_attr_iface_data_pdu_in_order.attr, &dev_attr_iface_erl.attr, &dev_attr_iface_max_recv_dlength.attr, &dev_attr_iface_first_burst_len.attr, &dev_attr_iface_max_outstanding_r2t.attr, &dev_attr_iface_max_burst_len.attr, &dev_attr_iface_chap_auth.attr, &dev_attr_iface_bidi_chap.attr, &dev_attr_iface_discovery_auth_optional.attr, &dev_attr_iface_discovery_logout.attr, &dev_attr_iface_strict_login_comp_en.attr, &dev_attr_iface_initiator_name.attr, &dev_attr_ipv4_iface_dhcp_dns_address_en.attr, &dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr, &dev_attr_ipv4_iface_tos_en.attr, &dev_attr_ipv4_iface_tos.attr, &dev_attr_ipv4_iface_grat_arp_en.attr, &dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr, &dev_attr_ipv4_iface_dhcp_alt_client_id.attr, &dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr, &dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr, &dev_attr_ipv4_iface_dhcp_vendor_id.attr, &dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr, &dev_attr_ipv4_iface_fragment_disable.attr, &dev_attr_ipv4_iface_incoming_forwarding_en.attr, &dev_attr_ipv4_iface_ttl.attr, &dev_attr_ipv6_iface_link_local_state.attr, &dev_attr_ipv6_iface_router_state.attr, &dev_attr_ipv6_iface_grat_neighbor_adv_en.attr, &dev_attr_ipv6_iface_mld_en.attr, &dev_attr_ipv6_iface_flow_label.attr, &dev_attr_ipv6_iface_traffic_class.attr, &dev_attr_ipv6_iface_hop_limit.attr, &dev_attr_ipv6_iface_nd_reachable_tmo.attr, &dev_attr_ipv6_iface_nd_rexmit_time.attr, &dev_attr_ipv6_iface_nd_stale_tmo.attr, &dev_attr_ipv6_iface_dup_addr_detect_cnt.attr, &dev_attr_ipv6_iface_router_adv_link_mtu.attr, NULL, }; static struct attribute_group iscsi_iface_group = { .attrs = iscsi_iface_attrs, .is_visible = iscsi_iface_attr_is_visible, }; /* convert iscsi_ipaddress_state values to ascii string name */ static const struct { enum iscsi_ipaddress_state value; char *name; } iscsi_ipaddress_state_names[] = { {ISCSI_IPDDRESS_STATE_UNCONFIGURED, "Unconfigured" }, {ISCSI_IPDDRESS_STATE_ACQUIRING, "Acquiring" }, {ISCSI_IPDDRESS_STATE_TENTATIVE, "Tentative" }, {ISCSI_IPDDRESS_STATE_VALID, "Valid" }, {ISCSI_IPDDRESS_STATE_DISABLING, "Disabling" }, {ISCSI_IPDDRESS_STATE_INVALID, "Invalid" }, {ISCSI_IPDDRESS_STATE_DEPRECATED, "Deprecated" }, }; char *iscsi_get_ipaddress_state_name(enum iscsi_ipaddress_state port_state) { int i; char *state = NULL; for (i = 0; i < ARRAY_SIZE(iscsi_ipaddress_state_names); i++) { if (iscsi_ipaddress_state_names[i].value == port_state) { state = iscsi_ipaddress_state_names[i].name; break; } } return state; } EXPORT_SYMBOL_GPL(iscsi_get_ipaddress_state_name); /* convert iscsi_router_state values to ascii string name */ static const struct { enum iscsi_router_state value; char *name; } iscsi_router_state_names[] = { {ISCSI_ROUTER_STATE_UNKNOWN, "Unknown" }, {ISCSI_ROUTER_STATE_ADVERTISED, "Advertised" }, {ISCSI_ROUTER_STATE_MANUAL, "Manual" }, {ISCSI_ROUTER_STATE_STALE, "Stale" }, }; char *iscsi_get_router_state_name(enum iscsi_router_state router_state) { int i; char *state = NULL; for (i = 0; i < ARRAY_SIZE(iscsi_router_state_names); i++) { if (iscsi_router_state_names[i].value == router_state) { state = iscsi_router_state_names[i].name; break; } } return state; } EXPORT_SYMBOL_GPL(iscsi_get_router_state_name); struct iscsi_iface * iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *transport, uint32_t iface_type, uint32_t iface_num, int dd_size) { struct iscsi_iface *iface; int err; iface = kzalloc(sizeof(*iface) + dd_size, GFP_KERNEL); if (!iface) return NULL; iface->transport = transport; iface->iface_type = iface_type; iface->iface_num = iface_num; iface->dev.release = iscsi_iface_release; iface->dev.class = &iscsi_iface_class; /* parent reference released in iscsi_iface_release */ iface->dev.parent = get_device(&shost->shost_gendev); if (iface_type == ISCSI_IFACE_TYPE_IPV4) dev_set_name(&iface->dev, "ipv4-iface-%u-%u", shost->host_no, iface_num); else dev_set_name(&iface->dev, "ipv6-iface-%u-%u", shost->host_no, iface_num); err = device_register(&iface->dev); if (err) goto put_dev; err = sysfs_create_group(&iface->dev.kobj, &iscsi_iface_group); if (err) goto unreg_iface; if (dd_size) iface->dd_data = &iface[1]; return iface; unreg_iface: device_unregister(&iface->dev); return NULL; put_dev: put_device(&iface->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_create_iface); void iscsi_destroy_iface(struct iscsi_iface *iface) { sysfs_remove_group(&iface->dev.kobj, &iscsi_iface_group); device_unregister(&iface->dev); } EXPORT_SYMBOL_GPL(iscsi_destroy_iface); /* * Interface to display flash node params to sysfs */ #define ISCSI_FLASHNODE_ATTR(_prefix, _name, _mode, _show, _store) \ struct device_attribute dev_attr_##_prefix##_##_name = \ __ATTR(_name, _mode, _show, _store) /* flash node session attrs show */ #define iscsi_flashnode_sess_attr_show(type, name, param) \ static ssize_t \ show_##type##_##name(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct iscsi_bus_flash_session *fnode_sess = \ iscsi_dev_to_flash_session(dev);\ struct iscsi_transport *t = fnode_sess->transport; \ return t->get_flashnode_param(fnode_sess, param, buf); \ } \ #define iscsi_flashnode_sess_attr(type, name, param) \ iscsi_flashnode_sess_attr_show(type, name, param) \ static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \ show_##type##_##name, NULL); /* Flash node session attributes */ iscsi_flashnode_sess_attr(fnode, auto_snd_tgt_disable, ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE); iscsi_flashnode_sess_attr(fnode, discovery_session, ISCSI_FLASHNODE_DISCOVERY_SESS); iscsi_flashnode_sess_attr(fnode, portal_type, ISCSI_FLASHNODE_PORTAL_TYPE); iscsi_flashnode_sess_attr(fnode, entry_enable, ISCSI_FLASHNODE_ENTRY_EN); iscsi_flashnode_sess_attr(fnode, immediate_data, ISCSI_FLASHNODE_IMM_DATA_EN); iscsi_flashnode_sess_attr(fnode, initial_r2t, ISCSI_FLASHNODE_INITIAL_R2T_EN); iscsi_flashnode_sess_attr(fnode, data_seq_in_order, ISCSI_FLASHNODE_DATASEQ_INORDER); iscsi_flashnode_sess_attr(fnode, data_pdu_in_order, ISCSI_FLASHNODE_PDU_INORDER); iscsi_flashnode_sess_attr(fnode, chap_auth, ISCSI_FLASHNODE_CHAP_AUTH_EN); iscsi_flashnode_sess_attr(fnode, discovery_logout, ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN); iscsi_flashnode_sess_attr(fnode, bidi_chap, ISCSI_FLASHNODE_BIDI_CHAP_EN); iscsi_flashnode_sess_attr(fnode, discovery_auth_optional, ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL); iscsi_flashnode_sess_attr(fnode, erl, ISCSI_FLASHNODE_ERL); iscsi_flashnode_sess_attr(fnode, first_burst_len, ISCSI_FLASHNODE_FIRST_BURST); iscsi_flashnode_sess_attr(fnode, def_time2wait, ISCSI_FLASHNODE_DEF_TIME2WAIT); iscsi_flashnode_sess_attr(fnode, def_time2retain, ISCSI_FLASHNODE_DEF_TIME2RETAIN); iscsi_flashnode_sess_attr(fnode, max_outstanding_r2t, ISCSI_FLASHNODE_MAX_R2T); iscsi_flashnode_sess_attr(fnode, isid, ISCSI_FLASHNODE_ISID); iscsi_flashnode_sess_attr(fnode, tsid, ISCSI_FLASHNODE_TSID); iscsi_flashnode_sess_attr(fnode, max_burst_len, ISCSI_FLASHNODE_MAX_BURST); iscsi_flashnode_sess_attr(fnode, def_taskmgmt_tmo, ISCSI_FLASHNODE_DEF_TASKMGMT_TMO); iscsi_flashnode_sess_attr(fnode, targetalias, ISCSI_FLASHNODE_ALIAS); iscsi_flashnode_sess_attr(fnode, targetname, ISCSI_FLASHNODE_NAME); iscsi_flashnode_sess_attr(fnode, tpgt, ISCSI_FLASHNODE_TPGT); iscsi_flashnode_sess_attr(fnode, discovery_parent_idx, ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX); iscsi_flashnode_sess_attr(fnode, discovery_parent_type, ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE); iscsi_flashnode_sess_attr(fnode, chap_in_idx, ISCSI_FLASHNODE_CHAP_IN_IDX); iscsi_flashnode_sess_attr(fnode, chap_out_idx, ISCSI_FLASHNODE_CHAP_OUT_IDX); iscsi_flashnode_sess_attr(fnode, username, ISCSI_FLASHNODE_USERNAME); iscsi_flashnode_sess_attr(fnode, username_in, ISCSI_FLASHNODE_USERNAME_IN); iscsi_flashnode_sess_attr(fnode, password, ISCSI_FLASHNODE_PASSWORD); iscsi_flashnode_sess_attr(fnode, password_in, ISCSI_FLASHNODE_PASSWORD_IN); iscsi_flashnode_sess_attr(fnode, is_boot_target, ISCSI_FLASHNODE_IS_BOOT_TGT); static struct attribute *iscsi_flashnode_sess_attrs[] = { &dev_attr_fnode_auto_snd_tgt_disable.attr, &dev_attr_fnode_discovery_session.attr, &dev_attr_fnode_portal_type.attr, &dev_attr_fnode_entry_enable.attr, &dev_attr_fnode_immediate_data.attr, &dev_attr_fnode_initial_r2t.attr, &dev_attr_fnode_data_seq_in_order.attr, &dev_attr_fnode_data_pdu_in_order.attr, &dev_attr_fnode_chap_auth.attr, &dev_attr_fnode_discovery_logout.attr, &dev_attr_fnode_bidi_chap.attr, &dev_attr_fnode_discovery_auth_optional.attr, &dev_attr_fnode_erl.attr, &dev_attr_fnode_first_burst_len.attr, &dev_attr_fnode_def_time2wait.attr, &dev_attr_fnode_def_time2retain.attr, &dev_attr_fnode_max_outstanding_r2t.attr, &dev_attr_fnode_isid.attr, &dev_attr_fnode_tsid.attr, &dev_attr_fnode_max_burst_len.attr, &dev_attr_fnode_def_taskmgmt_tmo.attr, &dev_attr_fnode_targetalias.attr, &dev_attr_fnode_targetname.attr, &dev_attr_fnode_tpgt.attr, &dev_attr_fnode_discovery_parent_idx.attr, &dev_attr_fnode_discovery_parent_type.attr, &dev_attr_fnode_chap_in_idx.attr, &dev_attr_fnode_chap_out_idx.attr, &dev_attr_fnode_username.attr, &dev_attr_fnode_username_in.attr, &dev_attr_fnode_password.attr, &dev_attr_fnode_password_in.attr, &dev_attr_fnode_is_boot_target.attr, NULL, }; static umode_t iscsi_flashnode_sess_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *dev = container_of(kobj, struct device, kobj); struct iscsi_bus_flash_session *fnode_sess = iscsi_dev_to_flash_session(dev); struct iscsi_transport *t = fnode_sess->transport; int param; if (attr == &dev_attr_fnode_auto_snd_tgt_disable.attr) { param = ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE; } else if (attr == &dev_attr_fnode_discovery_session.attr) { param = ISCSI_FLASHNODE_DISCOVERY_SESS; } else if (attr == &dev_attr_fnode_portal_type.attr) { param = ISCSI_FLASHNODE_PORTAL_TYPE; } else if (attr == &dev_attr_fnode_entry_enable.attr) { param = ISCSI_FLASHNODE_ENTRY_EN; } else if (attr == &dev_attr_fnode_immediate_data.attr) { param = ISCSI_FLASHNODE_IMM_DATA_EN; } else if (attr == &dev_attr_fnode_initial_r2t.attr) { param = ISCSI_FLASHNODE_INITIAL_R2T_EN; } else if (attr == &dev_attr_fnode_data_seq_in_order.attr) { param = ISCSI_FLASHNODE_DATASEQ_INORDER; } else if (attr == &dev_attr_fnode_data_pdu_in_order.attr) { param = ISCSI_FLASHNODE_PDU_INORDER; } else if (attr == &dev_attr_fnode_chap_auth.attr) { param = ISCSI_FLASHNODE_CHAP_AUTH_EN; } else if (attr == &dev_attr_fnode_discovery_logout.attr) { param = ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN; } else if (attr == &dev_attr_fnode_bidi_chap.attr) { param = ISCSI_FLASHNODE_BIDI_CHAP_EN; } else if (attr == &dev_attr_fnode_discovery_auth_optional.attr) { param = ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL; } else if (attr == &dev_attr_fnode_erl.attr) { param = ISCSI_FLASHNODE_ERL; } else if (attr == &dev_attr_fnode_first_burst_len.attr) { param = ISCSI_FLASHNODE_FIRST_BURST; } else if (attr == &dev_attr_fnode_def_time2wait.attr) { param = ISCSI_FLASHNODE_DEF_TIME2WAIT; } else if (attr == &dev_attr_fnode_def_time2retain.attr) { param = ISCSI_FLASHNODE_DEF_TIME2RETAIN; } else if (attr == &dev_attr_fnode_max_outstanding_r2t.attr) { param = ISCSI_FLASHNODE_MAX_R2T; } else if (attr == &dev_attr_fnode_isid.attr) { param = ISCSI_FLASHNODE_ISID; } else if (attr == &dev_attr_fnode_tsid.attr) { param = ISCSI_FLASHNODE_TSID; } else if (attr == &dev_attr_fnode_max_burst_len.attr) { param = ISCSI_FLASHNODE_MAX_BURST; } else if (attr == &dev_attr_fnode_def_taskmgmt_tmo.attr) { param = ISCSI_FLASHNODE_DEF_TASKMGMT_TMO; } else if (attr == &dev_attr_fnode_targetalias.attr) { param = ISCSI_FLASHNODE_ALIAS; } else if (attr == &dev_attr_fnode_targetname.attr) { param = ISCSI_FLASHNODE_NAME; } else if (attr == &dev_attr_fnode_tpgt.attr) { param = ISCSI_FLASHNODE_TPGT; } else if (attr == &dev_attr_fnode_discovery_parent_idx.attr) { param = ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX; } else if (attr == &dev_attr_fnode_discovery_parent_type.attr) { param = ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE; } else if (attr == &dev_attr_fnode_chap_in_idx.attr) { param = ISCSI_FLASHNODE_CHAP_IN_IDX; } else if (attr == &dev_attr_fnode_chap_out_idx.attr) { param = ISCSI_FLASHNODE_CHAP_OUT_IDX; } else if (attr == &dev_attr_fnode_username.attr) { param = ISCSI_FLASHNODE_USERNAME; } else if (attr == &dev_attr_fnode_username_in.attr) { param = ISCSI_FLASHNODE_USERNAME_IN; } else if (attr == &dev_attr_fnode_password.attr) { param = ISCSI_FLASHNODE_PASSWORD; } else if (attr == &dev_attr_fnode_password_in.attr) { param = ISCSI_FLASHNODE_PASSWORD_IN; } else if (attr == &dev_attr_fnode_is_boot_target.attr) { param = ISCSI_FLASHNODE_IS_BOOT_TGT; } else { WARN_ONCE(1, "Invalid flashnode session attr"); return 0; } return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param); } static struct attribute_group iscsi_flashnode_sess_attr_group = { .attrs = iscsi_flashnode_sess_attrs, .is_visible = iscsi_flashnode_sess_attr_is_visible, }; static const struct attribute_group *iscsi_flashnode_sess_attr_groups[] = { &iscsi_flashnode_sess_attr_group, NULL, }; static void iscsi_flashnode_sess_release(struct device *dev) { struct iscsi_bus_flash_session *fnode_sess = iscsi_dev_to_flash_session(dev); kfree(fnode_sess->targetname); kfree(fnode_sess->targetalias); kfree(fnode_sess->portal_type); kfree(fnode_sess); } static const struct device_type iscsi_flashnode_sess_dev_type = { .name = "iscsi_flashnode_sess_dev_type", .groups = iscsi_flashnode_sess_attr_groups, .release = iscsi_flashnode_sess_release, }; /* flash node connection attrs show */ #define iscsi_flashnode_conn_attr_show(type, name, param) \ static ssize_t \ show_##type##_##name(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);\ struct iscsi_bus_flash_session *fnode_sess = \ iscsi_flash_conn_to_flash_session(fnode_conn);\ struct iscsi_transport *t = fnode_conn->transport; \ return t->get_flashnode_param(fnode_sess, param, buf); \ } \ #define iscsi_flashnode_conn_attr(type, name, param) \ iscsi_flashnode_conn_attr_show(type, name, param) \ static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \ show_##type##_##name, NULL); /* Flash node connection attributes */ iscsi_flashnode_conn_attr(fnode, is_fw_assigned_ipv6, ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6); iscsi_flashnode_conn_attr(fnode, header_digest, ISCSI_FLASHNODE_HDR_DGST_EN); iscsi_flashnode_conn_attr(fnode, data_digest, ISCSI_FLASHNODE_DATA_DGST_EN); iscsi_flashnode_conn_attr(fnode, snack_req, ISCSI_FLASHNODE_SNACK_REQ_EN); iscsi_flashnode_conn_attr(fnode, tcp_timestamp_stat, ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT); iscsi_flashnode_conn_attr(fnode, tcp_nagle_disable, ISCSI_FLASHNODE_TCP_NAGLE_DISABLE); iscsi_flashnode_conn_attr(fnode, tcp_wsf_disable, ISCSI_FLASHNODE_TCP_WSF_DISABLE); iscsi_flashnode_conn_attr(fnode, tcp_timer_scale, ISCSI_FLASHNODE_TCP_TIMER_SCALE); iscsi_flashnode_conn_attr(fnode, tcp_timestamp_enable, ISCSI_FLASHNODE_TCP_TIMESTAMP_EN); iscsi_flashnode_conn_attr(fnode, fragment_disable, ISCSI_FLASHNODE_IP_FRAG_DISABLE); iscsi_flashnode_conn_attr(fnode, keepalive_tmo, ISCSI_FLASHNODE_KEEPALIVE_TMO); iscsi_flashnode_conn_attr(fnode, port, ISCSI_FLASHNODE_PORT); iscsi_flashnode_conn_attr(fnode, ipaddress, ISCSI_FLASHNODE_IPADDR); iscsi_flashnode_conn_attr(fnode, max_recv_dlength, ISCSI_FLASHNODE_MAX_RECV_DLENGTH); iscsi_flashnode_conn_attr(fnode, max_xmit_dlength, ISCSI_FLASHNODE_MAX_XMIT_DLENGTH); iscsi_flashnode_conn_attr(fnode, local_port, ISCSI_FLASHNODE_LOCAL_PORT); iscsi_flashnode_conn_attr(fnode, ipv4_tos, ISCSI_FLASHNODE_IPV4_TOS); iscsi_flashnode_conn_attr(fnode, ipv6_traffic_class, ISCSI_FLASHNODE_IPV6_TC); iscsi_flashnode_conn_attr(fnode, ipv6_flow_label, ISCSI_FLASHNODE_IPV6_FLOW_LABEL); iscsi_flashnode_conn_attr(fnode, redirect_ipaddr, ISCSI_FLASHNODE_REDIRECT_IPADDR); iscsi_flashnode_conn_attr(fnode, max_segment_size, ISCSI_FLASHNODE_MAX_SEGMENT_SIZE); iscsi_flashnode_conn_attr(fnode, link_local_ipv6, ISCSI_FLASHNODE_LINK_LOCAL_IPV6); iscsi_flashnode_conn_attr(fnode, tcp_xmit_wsf, ISCSI_FLASHNODE_TCP_XMIT_WSF); iscsi_flashnode_conn_attr(fnode, tcp_recv_wsf, ISCSI_FLASHNODE_TCP_RECV_WSF); iscsi_flashnode_conn_attr(fnode, statsn, ISCSI_FLASHNODE_STATSN); iscsi_flashnode_conn_attr(fnode, exp_statsn, ISCSI_FLASHNODE_EXP_STATSN); static struct attribute *iscsi_flashnode_conn_attrs[] = { &dev_attr_fnode_is_fw_assigned_ipv6.attr, &dev_attr_fnode_header_digest.attr, &dev_attr_fnode_data_digest.attr, &dev_attr_fnode_snack_req.attr, &dev_attr_fnode_tcp_timestamp_stat.attr, &dev_attr_fnode_tcp_nagle_disable.attr, &dev_attr_fnode_tcp_wsf_disable.attr, &dev_attr_fnode_tcp_timer_scale.attr, &dev_attr_fnode_tcp_timestamp_enable.attr, &dev_attr_fnode_fragment_disable.attr, &dev_attr_fnode_max_recv_dlength.attr, &dev_attr_fnode_max_xmit_dlength.attr, &dev_attr_fnode_keepalive_tmo.attr, &dev_attr_fnode_port.attr, &dev_attr_fnode_ipaddress.attr, &dev_attr_fnode_redirect_ipaddr.attr, &dev_attr_fnode_max_segment_size.attr, &dev_attr_fnode_local_port.attr, &dev_attr_fnode_ipv4_tos.attr, &dev_attr_fnode_ipv6_traffic_class.attr, &dev_attr_fnode_ipv6_flow_label.attr, &dev_attr_fnode_link_local_ipv6.attr, &dev_attr_fnode_tcp_xmit_wsf.attr, &dev_attr_fnode_tcp_recv_wsf.attr, &dev_attr_fnode_statsn.attr, &dev_attr_fnode_exp_statsn.attr, NULL, }; static umode_t iscsi_flashnode_conn_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *dev = container_of(kobj, struct device, kobj); struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev); struct iscsi_transport *t = fnode_conn->transport; int param; if (attr == &dev_attr_fnode_is_fw_assigned_ipv6.attr) { param = ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6; } else if (attr == &dev_attr_fnode_header_digest.attr) { param = ISCSI_FLASHNODE_HDR_DGST_EN; } else if (attr == &dev_attr_fnode_data_digest.attr) { param = ISCSI_FLASHNODE_DATA_DGST_EN; } else if (attr == &dev_attr_fnode_snack_req.attr) { param = ISCSI_FLASHNODE_SNACK_REQ_EN; } else if (attr == &dev_attr_fnode_tcp_timestamp_stat.attr) { param = ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT; } else if (attr == &dev_attr_fnode_tcp_nagle_disable.attr) { param = ISCSI_FLASHNODE_TCP_NAGLE_DISABLE; } else if (attr == &dev_attr_fnode_tcp_wsf_disable.attr) { param = ISCSI_FLASHNODE_TCP_WSF_DISABLE; } else if (attr == &dev_attr_fnode_tcp_timer_scale.attr) { param = ISCSI_FLASHNODE_TCP_TIMER_SCALE; } else if (attr == &dev_attr_fnode_tcp_timestamp_enable.attr) { param = ISCSI_FLASHNODE_TCP_TIMESTAMP_EN; } else if (attr == &dev_attr_fnode_fragment_disable.attr) { param = ISCSI_FLASHNODE_IP_FRAG_DISABLE; } else if (attr == &dev_attr_fnode_max_recv_dlength.attr) { param = ISCSI_FLASHNODE_MAX_RECV_DLENGTH; } else if (attr == &dev_attr_fnode_max_xmit_dlength.attr) { param = ISCSI_FLASHNODE_MAX_XMIT_DLENGTH; } else if (attr == &dev_attr_fnode_keepalive_tmo.attr) { param = ISCSI_FLASHNODE_KEEPALIVE_TMO; } else if (attr == &dev_attr_fnode_port.attr) { param = ISCSI_FLASHNODE_PORT; } else if (attr == &dev_attr_fnode_ipaddress.attr) { param = ISCSI_FLASHNODE_IPADDR; } else if (attr == &dev_attr_fnode_redirect_ipaddr.attr) { param = ISCSI_FLASHNODE_REDIRECT_IPADDR; } else if (attr == &dev_attr_fnode_max_segment_size.attr) { param = ISCSI_FLASHNODE_MAX_SEGMENT_SIZE; } else if (attr == &dev_attr_fnode_local_port.attr) { param = ISCSI_FLASHNODE_LOCAL_PORT; } else if (attr == &dev_attr_fnode_ipv4_tos.attr) { param = ISCSI_FLASHNODE_IPV4_TOS; } else if (attr == &dev_attr_fnode_ipv6_traffic_class.attr) { param = ISCSI_FLASHNODE_IPV6_TC; } else if (attr == &dev_attr_fnode_ipv6_flow_label.attr) { param = ISCSI_FLASHNODE_IPV6_FLOW_LABEL; } else if (attr == &dev_attr_fnode_link_local_ipv6.attr) { param = ISCSI_FLASHNODE_LINK_LOCAL_IPV6; } else if (attr == &dev_attr_fnode_tcp_xmit_wsf.attr) { param = ISCSI_FLASHNODE_TCP_XMIT_WSF; } else if (attr == &dev_attr_fnode_tcp_recv_wsf.attr) { param = ISCSI_FLASHNODE_TCP_RECV_WSF; } else if (attr == &dev_attr_fnode_statsn.attr) { param = ISCSI_FLASHNODE_STATSN; } else if (attr == &dev_attr_fnode_exp_statsn.attr) { param = ISCSI_FLASHNODE_EXP_STATSN; } else { WARN_ONCE(1, "Invalid flashnode connection attr"); return 0; } return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param); } static struct attribute_group iscsi_flashnode_conn_attr_group = { .attrs = iscsi_flashnode_conn_attrs, .is_visible = iscsi_flashnode_conn_attr_is_visible, }; static const struct attribute_group *iscsi_flashnode_conn_attr_groups[] = { &iscsi_flashnode_conn_attr_group, NULL, }; static void iscsi_flashnode_conn_release(struct device *dev) { struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev); kfree(fnode_conn->ipaddress); kfree(fnode_conn->redirect_ipaddr); kfree(fnode_conn->link_local_ipv6_addr); kfree(fnode_conn); } static const struct device_type iscsi_flashnode_conn_dev_type = { .name = "iscsi_flashnode_conn_dev_type", .groups = iscsi_flashnode_conn_attr_groups, .release = iscsi_flashnode_conn_release, }; static struct bus_type iscsi_flashnode_bus; int iscsi_flashnode_bus_match(struct device *dev, struct device_driver *drv) { if (dev->bus == &iscsi_flashnode_bus) return 1; return 0; } EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match); static struct bus_type iscsi_flashnode_bus = { .name = "iscsi_flashnode", .match = &iscsi_flashnode_bus_match, }; /** * iscsi_create_flashnode_sess - Add flashnode session entry in sysfs * @shost: pointer to host data * @index: index of flashnode to add in sysfs * @transport: pointer to transport data * @dd_size: total size to allocate * * Adds a sysfs entry for the flashnode session attributes * * Returns: * pointer to allocated flashnode sess on success * %NULL on failure */ struct iscsi_bus_flash_session * iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index, struct iscsi_transport *transport, int dd_size) { struct iscsi_bus_flash_session *fnode_sess; int err; fnode_sess = kzalloc(sizeof(*fnode_sess) + dd_size, GFP_KERNEL); if (!fnode_sess) return NULL; fnode_sess->transport = transport; fnode_sess->target_id = index; fnode_sess->dev.type = &iscsi_flashnode_sess_dev_type; fnode_sess->dev.bus = &iscsi_flashnode_bus; fnode_sess->dev.parent = &shost->shost_gendev; dev_set_name(&fnode_sess->dev, "flashnode_sess-%u:%u", shost->host_no, index); err = device_register(&fnode_sess->dev); if (err) goto put_dev; if (dd_size) fnode_sess->dd_data = &fnode_sess[1]; return fnode_sess; put_dev: put_device(&fnode_sess->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_create_flashnode_sess); /** * iscsi_create_flashnode_conn - Add flashnode conn entry in sysfs * @shost: pointer to host data * @fnode_sess: pointer to the parent flashnode session entry * @transport: pointer to transport data * @dd_size: total size to allocate * * Adds a sysfs entry for the flashnode connection attributes * * Returns: * pointer to allocated flashnode conn on success * %NULL on failure */ struct iscsi_bus_flash_conn * iscsi_create_flashnode_conn(struct Scsi_Host *shost, struct iscsi_bus_flash_session *fnode_sess, struct iscsi_transport *transport, int dd_size) { struct iscsi_bus_flash_conn *fnode_conn; int err; fnode_conn = kzalloc(sizeof(*fnode_conn) + dd_size, GFP_KERNEL); if (!fnode_conn) return NULL; fnode_conn->transport = transport; fnode_conn->dev.type = &iscsi_flashnode_conn_dev_type; fnode_conn->dev.bus = &iscsi_flashnode_bus; fnode_conn->dev.parent = &fnode_sess->dev; dev_set_name(&fnode_conn->dev, "flashnode_conn-%u:%u:0", shost->host_no, fnode_sess->target_id); err = device_register(&fnode_conn->dev); if (err) goto put_dev; if (dd_size) fnode_conn->dd_data = &fnode_conn[1]; return fnode_conn; put_dev: put_device(&fnode_conn->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn); /** * iscsi_is_flashnode_conn_dev - verify passed device is to be flashnode conn * @dev: device to verify * @data: pointer to data containing value to use for verification * * Verifies if the passed device is flashnode conn device * * Returns: * 1 on success * 0 on failure */ static int iscsi_is_flashnode_conn_dev(struct device *dev, void *data) { return dev->bus == &iscsi_flashnode_bus; } static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn) { device_unregister(&fnode_conn->dev); return 0; } static int flashnode_match_index(struct device *dev, void *data) { struct iscsi_bus_flash_session *fnode_sess = NULL; int ret = 0; if (!iscsi_flashnode_bus_match(dev, NULL)) goto exit_match_index; fnode_sess = iscsi_dev_to_flash_session(dev); ret = (fnode_sess->target_id == *((int *)data)) ? 1 : 0; exit_match_index: return ret; } /** * iscsi_get_flashnode_by_index -finds flashnode session entry by index * @shost: pointer to host data * @idx: index to match * * Finds the flashnode session object for the passed index * * Returns: * pointer to found flashnode session object on success * %NULL on failure */ static struct iscsi_bus_flash_session * iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx) { struct iscsi_bus_flash_session *fnode_sess = NULL; struct device *dev; dev = device_find_child(&shost->shost_gendev, &idx, flashnode_match_index); if (dev) fnode_sess = iscsi_dev_to_flash_session(dev); return fnode_sess; } /** * iscsi_find_flashnode_sess - finds flashnode session entry * @shost: pointer to host data * @data: pointer to data containing value to use for comparison * @fn: function pointer that does actual comparison * * Finds the flashnode session object comparing the data passed using logic * defined in passed function pointer * * Returns: * pointer to found flashnode session device object on success * %NULL on failure */ struct device * iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data, int (*fn)(struct device *dev, void *data)) { return device_find_child(&shost->shost_gendev, data, fn); } EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess); /** * iscsi_find_flashnode_conn - finds flashnode connection entry * @fnode_sess: pointer to parent flashnode session entry * * Finds the flashnode connection object comparing the data passed using logic * defined in passed function pointer * * Returns: * pointer to found flashnode connection device object on success * %NULL on failure */ struct device * iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess) { return device_find_child(&fnode_sess->dev, NULL, iscsi_is_flashnode_conn_dev); } EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn); static int iscsi_iter_destroy_flashnode_conn_fn(struct device *dev, void *data) { if (!iscsi_is_flashnode_conn_dev(dev, NULL)) return 0; return iscsi_destroy_flashnode_conn(iscsi_dev_to_flash_conn(dev)); } /** * iscsi_destroy_flashnode_sess - destroy flashnode session entry * @fnode_sess: pointer to flashnode session entry to be destroyed * * Deletes the flashnode session entry and all children flashnode connection * entries from sysfs */ void iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess) { int err; err = device_for_each_child(&fnode_sess->dev, NULL, iscsi_iter_destroy_flashnode_conn_fn); if (err) pr_err("Could not delete all connections for %s. Error %d.\n", fnode_sess->dev.kobj.name, err); device_unregister(&fnode_sess->dev); } EXPORT_SYMBOL_GPL(iscsi_destroy_flashnode_sess); static int iscsi_iter_destroy_flashnode_fn(struct device *dev, void *data) { if (!iscsi_flashnode_bus_match(dev, NULL)) return 0; iscsi_destroy_flashnode_sess(iscsi_dev_to_flash_session(dev)); return 0; } /** * iscsi_destroy_all_flashnode - destroy all flashnode session entries * @shost: pointer to host data * * Destroys all the flashnode session entries and all corresponding children * flashnode connection entries from sysfs */ void iscsi_destroy_all_flashnode(struct Scsi_Host *shost) { device_for_each_child(&shost->shost_gendev, NULL, iscsi_iter_destroy_flashnode_fn); } EXPORT_SYMBOL_GPL(iscsi_destroy_all_flashnode); /* * BSG support */ /** * iscsi_bsg_host_dispatch - Dispatch command to LLD. * @job: bsg job to be processed */ static int iscsi_bsg_host_dispatch(struct bsg_job *job) { struct Scsi_Host *shost = iscsi_job_to_shost(job); struct iscsi_bsg_request *req = job->request; struct iscsi_bsg_reply *reply = job->reply; struct iscsi_internal *i = to_iscsi_internal(shost->transportt); int cmdlen = sizeof(uint32_t); /* start with length of msgcode */ int ret; /* check if we have the msgcode value at least */ if (job->request_len < sizeof(uint32_t)) { ret = -ENOMSG; goto fail_host_msg; } /* Validate the host command */ switch (req->msgcode) { case ISCSI_BSG_HST_VENDOR: cmdlen += sizeof(struct iscsi_bsg_host_vendor); if ((shost->hostt->vendor_id == 0L) || (req->rqst_data.h_vendor.vendor_id != shost->hostt->vendor_id)) { ret = -ESRCH; goto fail_host_msg; } break; default: ret = -EBADR; goto fail_host_msg; } /* check if we really have all the request data needed */ if (job->request_len < cmdlen) { ret = -ENOMSG; goto fail_host_msg; } ret = i->iscsi_transport->bsg_request(job); if (!ret) return 0; fail_host_msg: /* return the errno failure code as the only status */ BUG_ON(job->reply_len < sizeof(uint32_t)); reply->reply_payload_rcv_len = 0; reply->result = ret; job->reply_len = sizeof(uint32_t); bsg_job_done(job, ret, 0); return 0; } /** * iscsi_bsg_host_add - Create and add the bsg hooks to receive requests * @shost: shost for iscsi_host * @ihost: iscsi_cls_host adding the structures to */ static int iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost) { struct device *dev = &shost->shost_gendev; struct iscsi_internal *i = to_iscsi_internal(shost->transportt); struct request_queue *q; char bsg_name[20]; if (!i->iscsi_transport->bsg_request) return -ENOTSUPP; snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no); q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, NULL, 0); if (IS_ERR(q)) { shost_printk(KERN_ERR, shost, "bsg interface failed to " "initialize - no request queue\n"); return PTR_ERR(q); } __scsi_init_queue(shost, q); ihost->bsg_q = q; return 0; } static int iscsi_setup_host(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); struct iscsi_cls_host *ihost = shost->shost_data; memset(ihost, 0, sizeof(*ihost)); mutex_init(&ihost->mutex); iscsi_bsg_host_add(shost, ihost); /* ignore any bsg add error - we just can't do sgio */ return 0; } static int iscsi_remove_host(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); struct iscsi_cls_host *ihost = shost->shost_data; bsg_remove_queue(ihost->bsg_q); return 0; } static DECLARE_TRANSPORT_CLASS(iscsi_host_class, "iscsi_host", iscsi_setup_host, iscsi_remove_host, NULL); static DECLARE_TRANSPORT_CLASS(iscsi_session_class, "iscsi_session", NULL, NULL, NULL); static DECLARE_TRANSPORT_CLASS(iscsi_connection_class, "iscsi_connection", NULL, NULL, NULL); static struct sock *nls; static DEFINE_MUTEX(rx_queue_mutex); static LIST_HEAD(sesslist); static DEFINE_SPINLOCK(sesslock); static LIST_HEAD(connlist); static LIST_HEAD(connlist_err); static DEFINE_SPINLOCK(connlock); static uint32_t iscsi_conn_get_sid(struct iscsi_cls_conn *conn) { struct iscsi_cls_session *sess = iscsi_dev_to_session(conn->dev.parent); return sess->sid; } /* * Returns the matching session to a given sid */ static struct iscsi_cls_session *iscsi_session_lookup(uint32_t sid) { unsigned long flags; struct iscsi_cls_session *sess; spin_lock_irqsave(&sesslock, flags); list_for_each_entry(sess, &sesslist, sess_list) { if (sess->sid == sid) { spin_unlock_irqrestore(&sesslock, flags); return sess; } } spin_unlock_irqrestore(&sesslock, flags); return NULL; } /* * Returns the matching connection to a given sid / cid tuple */ static struct iscsi_cls_conn *iscsi_conn_lookup(uint32_t sid, uint32_t cid) { unsigned long flags; struct iscsi_cls_conn *conn; spin_lock_irqsave(&connlock, flags); list_for_each_entry(conn, &connlist, conn_list) { if ((conn->cid == cid) && (iscsi_conn_get_sid(conn) == sid)) { spin_unlock_irqrestore(&connlock, flags); return conn; } } spin_unlock_irqrestore(&connlock, flags); return NULL; } /* * The following functions can be used by LLDs that allocate * their own scsi_hosts or by software iscsi LLDs */ static struct { int value; char *name; } iscsi_session_state_names[] = { { ISCSI_SESSION_LOGGED_IN, "LOGGED_IN" }, { ISCSI_SESSION_FAILED, "FAILED" }, { ISCSI_SESSION_FREE, "FREE" }, }; static const char *iscsi_session_state_name(int state) { int i; char *name = NULL; for (i = 0; i < ARRAY_SIZE(iscsi_session_state_names); i++) { if (iscsi_session_state_names[i].value == state) { name = iscsi_session_state_names[i].name; break; } } return name; } static char *iscsi_session_target_state_name[] = { [ISCSI_SESSION_TARGET_UNBOUND] = "UNBOUND", [ISCSI_SESSION_TARGET_ALLOCATED] = "ALLOCATED", [ISCSI_SESSION_TARGET_SCANNED] = "SCANNED", [ISCSI_SESSION_TARGET_UNBINDING] = "UNBINDING", }; int iscsi_session_chkready(struct iscsi_cls_session *session) { int err; switch (session->state) { case ISCSI_SESSION_LOGGED_IN: err = 0; break; case ISCSI_SESSION_FAILED: err = DID_IMM_RETRY << 16; break; case ISCSI_SESSION_FREE: err = DID_TRANSPORT_FAILFAST << 16; break; default: err = DID_NO_CONNECT << 16; break; } return err; } EXPORT_SYMBOL_GPL(iscsi_session_chkready); int iscsi_is_session_online(struct iscsi_cls_session *session) { unsigned long flags; int ret = 0; spin_lock_irqsave(&session->lock, flags); if (session->state == ISCSI_SESSION_LOGGED_IN) ret = 1; spin_unlock_irqrestore(&session->lock, flags); return ret; } EXPORT_SYMBOL_GPL(iscsi_is_session_online); static void iscsi_session_release(struct device *dev) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev); struct Scsi_Host *shost; shost = iscsi_session_to_shost(session); scsi_host_put(shost); ISCSI_DBG_TRANS_SESSION(session, "Completing session release\n"); kfree(session); } int iscsi_is_session_dev(const struct device *dev) { return dev->release == iscsi_session_release; } EXPORT_SYMBOL_GPL(iscsi_is_session_dev); static int iscsi_iter_session_fn(struct device *dev, void *data) { void (* fn) (struct iscsi_cls_session *) = data; if (!iscsi_is_session_dev(dev)) return 0; fn(iscsi_dev_to_session(dev)); return 0; } void iscsi_host_for_each_session(struct Scsi_Host *shost, void (*fn)(struct iscsi_cls_session *)) { device_for_each_child(&shost->shost_gendev, fn, iscsi_iter_session_fn); } EXPORT_SYMBOL_GPL(iscsi_host_for_each_session); struct iscsi_scan_data { unsigned int channel; unsigned int id; u64 lun; enum scsi_scan_mode rescan; }; static int iscsi_user_scan_session(struct device *dev, void *data) { struct iscsi_scan_data *scan_data = data; struct iscsi_cls_session *session; struct Scsi_Host *shost; struct iscsi_cls_host *ihost; unsigned long flags; unsigned int id; if (!iscsi_is_session_dev(dev)) return 0; session = iscsi_dev_to_session(dev); ISCSI_DBG_TRANS_SESSION(session, "Scanning session\n"); shost = iscsi_session_to_shost(session); ihost = shost->shost_data; mutex_lock(&ihost->mutex); spin_lock_irqsave(&session->lock, flags); if (session->state != ISCSI_SESSION_LOGGED_IN) { spin_unlock_irqrestore(&session->lock, flags); goto user_scan_exit; } id = session->target_id; spin_unlock_irqrestore(&session->lock, flags); if (id != ISCSI_MAX_TARGET) { if ((scan_data->channel == SCAN_WILD_CARD || scan_data->channel == 0) && (scan_data->id == SCAN_WILD_CARD || scan_data->id == id)) { scsi_scan_target(&session->dev, 0, id, scan_data->lun, scan_data->rescan); spin_lock_irqsave(&session->lock, flags); session->target_state = ISCSI_SESSION_TARGET_SCANNED; spin_unlock_irqrestore(&session->lock, flags); } } user_scan_exit: mutex_unlock(&ihost->mutex); ISCSI_DBG_TRANS_SESSION(session, "Completed session scan\n"); return 0; } static int iscsi_user_scan(struct Scsi_Host *shost, uint channel, uint id, u64 lun) { struct iscsi_scan_data scan_data; scan_data.channel = channel; scan_data.id = id; scan_data.lun = lun; scan_data.rescan = SCSI_SCAN_MANUAL; return device_for_each_child(&shost->shost_gendev, &scan_data, iscsi_user_scan_session); } static void iscsi_scan_session(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, scan_work); struct iscsi_scan_data scan_data; scan_data.channel = 0; scan_data.id = SCAN_WILD_CARD; scan_data.lun = SCAN_WILD_CARD; scan_data.rescan = SCSI_SCAN_RESCAN; iscsi_user_scan_session(&session->dev, &scan_data); } /** * iscsi_block_scsi_eh - block scsi eh until session state has transistioned * @cmd: scsi cmd passed to scsi eh handler * * If the session is down this function will wait for the recovery * timer to fire or for the session to be logged back in. If the * recovery timer fires then FAST_IO_FAIL is returned. The caller * should pass this error value to the scsi eh. */ int iscsi_block_scsi_eh(struct scsi_cmnd *cmd) { struct iscsi_cls_session *session = starget_to_session(scsi_target(cmd->device)); unsigned long flags; int ret = 0; spin_lock_irqsave(&session->lock, flags); while (session->state != ISCSI_SESSION_LOGGED_IN) { if (session->state == ISCSI_SESSION_FREE) { ret = FAST_IO_FAIL; break; } spin_unlock_irqrestore(&session->lock, flags); msleep(1000); spin_lock_irqsave(&session->lock, flags); } spin_unlock_irqrestore(&session->lock, flags); return ret; } EXPORT_SYMBOL_GPL(iscsi_block_scsi_eh); static void session_recovery_timedout(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, recovery_work.work); unsigned long flags; iscsi_cls_session_printk(KERN_INFO, session, "session recovery timed out after %d secs\n", session->recovery_tmo); spin_lock_irqsave(&session->lock, flags); switch (session->state) { case ISCSI_SESSION_FAILED: session->state = ISCSI_SESSION_FREE; break; case ISCSI_SESSION_LOGGED_IN: case ISCSI_SESSION_FREE: /* we raced with the unblock's flush */ spin_unlock_irqrestore(&session->lock, flags); return; } spin_unlock_irqrestore(&session->lock, flags); ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n"); scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n"); if (session->transport->session_recovery_timedout) session->transport->session_recovery_timedout(session); } static void __iscsi_unblock_session(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, unblock_work); unsigned long flags; ISCSI_DBG_TRANS_SESSION(session, "Unblocking session\n"); cancel_delayed_work_sync(&session->recovery_work); spin_lock_irqsave(&session->lock, flags); session->state = ISCSI_SESSION_LOGGED_IN; spin_unlock_irqrestore(&session->lock, flags); /* start IO */ scsi_target_unblock(&session->dev, SDEV_RUNNING); ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking session\n"); } /** * iscsi_unblock_session - set a session as logged in and start IO. * @session: iscsi session * * Mark a session as ready to accept IO. */ void iscsi_unblock_session(struct iscsi_cls_session *session) { if (!cancel_work_sync(&session->block_work)) cancel_delayed_work_sync(&session->recovery_work); queue_work(session->workq, &session->unblock_work); /* * Blocking the session can be done from any context so we only * queue the block work. Make sure the unblock work has completed * because it flushes/cancels the other works and updates the state. */ flush_work(&session->unblock_work); } EXPORT_SYMBOL_GPL(iscsi_unblock_session); static void __iscsi_block_session(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, block_work); struct Scsi_Host *shost = iscsi_session_to_shost(session); unsigned long flags; ISCSI_DBG_TRANS_SESSION(session, "Blocking session\n"); spin_lock_irqsave(&session->lock, flags); session->state = ISCSI_SESSION_FAILED; spin_unlock_irqrestore(&session->lock, flags); scsi_block_targets(shost, &session->dev); ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n"); if (session->recovery_tmo >= 0) queue_delayed_work(session->workq, &session->recovery_work, session->recovery_tmo * HZ); } void iscsi_block_session(struct iscsi_cls_session *session) { queue_work(session->workq, &session->block_work); } EXPORT_SYMBOL_GPL(iscsi_block_session); static void __iscsi_unbind_session(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, unbind_work); struct Scsi_Host *shost = iscsi_session_to_shost(session); struct iscsi_cls_host *ihost = shost->shost_data; unsigned long flags; unsigned int target_id; bool remove_target = true; ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n"); /* Prevent new scans and make sure scanning is not in progress */ mutex_lock(&ihost->mutex); spin_lock_irqsave(&session->lock, flags); if (session->target_state == ISCSI_SESSION_TARGET_ALLOCATED) { remove_target = false; } else if (session->target_state != ISCSI_SESSION_TARGET_SCANNED) { spin_unlock_irqrestore(&session->lock, flags); mutex_unlock(&ihost->mutex); ISCSI_DBG_TRANS_SESSION(session, "Skipping target unbinding: Session is unbound/unbinding.\n"); return; } session->target_state = ISCSI_SESSION_TARGET_UNBINDING; target_id = session->target_id; session->target_id = ISCSI_MAX_TARGET; spin_unlock_irqrestore(&session->lock, flags); mutex_unlock(&ihost->mutex); if (remove_target) scsi_remove_target(&session->dev); if (session->ida_used) ida_free(&iscsi_sess_ida, target_id); iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION); ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n"); spin_lock_irqsave(&session->lock, flags); session->target_state = ISCSI_SESSION_TARGET_UNBOUND; spin_unlock_irqrestore(&session->lock, flags); } static void __iscsi_destroy_session(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, destroy_work); session->transport->destroy_session(session); } struct iscsi_cls_session * iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport, int dd_size) { struct iscsi_cls_session *session; session = kzalloc(sizeof(*session) + dd_size, GFP_KERNEL); if (!session) return NULL; session->transport = transport; session->creator = -1; session->recovery_tmo = 120; session->recovery_tmo_sysfs_override = false; session->state = ISCSI_SESSION_FREE; INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); INIT_LIST_HEAD(&session->sess_list); INIT_WORK(&session->unblock_work, __iscsi_unblock_session); INIT_WORK(&session->block_work, __iscsi_block_session); INIT_WORK(&session->unbind_work, __iscsi_unbind_session); INIT_WORK(&session->scan_work, iscsi_scan_session); INIT_WORK(&session->destroy_work, __iscsi_destroy_session); spin_lock_init(&session->lock); /* this is released in the dev's release function */ scsi_host_get(shost); session->dev.parent = &shost->shost_gendev; session->dev.release = iscsi_session_release; device_initialize(&session->dev); if (dd_size) session->dd_data = &session[1]; ISCSI_DBG_TRANS_SESSION(session, "Completed session allocation\n"); return session; } EXPORT_SYMBOL_GPL(iscsi_alloc_session); int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) { struct Scsi_Host *shost = iscsi_session_to_shost(session); unsigned long flags; int id = 0; int err; session->sid = atomic_add_return(1, &iscsi_session_nr); session->workq = alloc_workqueue("iscsi_ctrl_%d:%d", WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0, shost->host_no, session->sid); if (!session->workq) return -ENOMEM; if (target_id == ISCSI_MAX_TARGET) { id = ida_alloc(&iscsi_sess_ida, GFP_KERNEL); if (id < 0) { iscsi_cls_session_printk(KERN_ERR, session, "Failure in Target ID Allocation\n"); err = id; goto destroy_wq; } session->target_id = (unsigned int)id; session->ida_used = true; } else session->target_id = target_id; spin_lock_irqsave(&session->lock, flags); session->target_state = ISCSI_SESSION_TARGET_ALLOCATED; spin_unlock_irqrestore(&session->lock, flags); dev_set_name(&session->dev, "session%u", session->sid); err = device_add(&session->dev); if (err) { iscsi_cls_session_printk(KERN_ERR, session, "could not register session's dev\n"); goto release_ida; } err = transport_register_device(&session->dev); if (err) { iscsi_cls_session_printk(KERN_ERR, session, "could not register transport's dev\n"); goto release_dev; } spin_lock_irqsave(&sesslock, flags); list_add(&session->sess_list, &sesslist); spin_unlock_irqrestore(&sesslock, flags); iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION); ISCSI_DBG_TRANS_SESSION(session, "Completed session adding\n"); return 0; release_dev: device_del(&session->dev); release_ida: if (session->ida_used) ida_free(&iscsi_sess_ida, session->target_id); destroy_wq: destroy_workqueue(session->workq); return err; } EXPORT_SYMBOL_GPL(iscsi_add_session); /** * iscsi_create_session - create iscsi class session * @shost: scsi host * @transport: iscsi transport * @dd_size: private driver data size * @target_id: which target * * This can be called from a LLD or iscsi_transport. */ struct iscsi_cls_session * iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport, int dd_size, unsigned int target_id) { struct iscsi_cls_session *session; session = iscsi_alloc_session(shost, transport, dd_size); if (!session) return NULL; if (iscsi_add_session(session, target_id)) { iscsi_free_session(session); return NULL; } return session; } EXPORT_SYMBOL_GPL(iscsi_create_session); static void iscsi_conn_release(struct device *dev) { struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev); struct device *parent = conn->dev.parent; ISCSI_DBG_TRANS_CONN(conn, "Releasing conn\n"); kfree(conn); put_device(parent); } static int iscsi_is_conn_dev(const struct device *dev) { return dev->release == iscsi_conn_release; } static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data) { if (!iscsi_is_conn_dev(dev)) return 0; iscsi_remove_conn(iscsi_dev_to_conn(dev)); return 0; } void iscsi_remove_session(struct iscsi_cls_session *session) { unsigned long flags; int err; ISCSI_DBG_TRANS_SESSION(session, "Removing session\n"); spin_lock_irqsave(&sesslock, flags); if (!list_empty(&session->sess_list)) list_del(&session->sess_list); spin_unlock_irqrestore(&sesslock, flags); if (!cancel_work_sync(&session->block_work)) cancel_delayed_work_sync(&session->recovery_work); cancel_work_sync(&session->unblock_work); /* * If we are blocked let commands flow again. The lld or iscsi * layer should set up the queuecommand to fail commands. * We assume that LLD will not be calling block/unblock while * removing the session. */ spin_lock_irqsave(&session->lock, flags); session->state = ISCSI_SESSION_FREE; spin_unlock_irqrestore(&session->lock, flags); scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); /* * qla4xxx can perform it's own scans when it runs in kernel only * mode. Make sure to flush those scans. */ flush_work(&session->scan_work); /* flush running unbind operations */ flush_work(&session->unbind_work); __iscsi_unbind_session(&session->unbind_work); /* hw iscsi may not have removed all connections from session */ err = device_for_each_child(&session->dev, NULL, iscsi_iter_destroy_conn_fn); if (err) iscsi_cls_session_printk(KERN_ERR, session, "Could not delete all connections " "for session. Error %d.\n", err); transport_unregister_device(&session->dev); destroy_workqueue(session->workq); ISCSI_DBG_TRANS_SESSION(session, "Completing session removal\n"); device_del(&session->dev); } EXPORT_SYMBOL_GPL(iscsi_remove_session); static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag) { ISCSI_DBG_TRANS_CONN(conn, "Stopping conn.\n"); switch (flag) { case STOP_CONN_RECOVER: WRITE_ONCE(conn->state, ISCSI_CONN_FAILED); break; case STOP_CONN_TERM: WRITE_ONCE(conn->state, ISCSI_CONN_DOWN); break; default: iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n", flag); return; } conn->transport->stop_conn(conn, flag); ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n"); } static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active) { struct iscsi_cls_session *session = iscsi_conn_to_session(conn); struct iscsi_endpoint *ep; ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n"); WRITE_ONCE(conn->state, ISCSI_CONN_FAILED); if (!conn->ep || !session->transport->ep_disconnect) return; ep = conn->ep; conn->ep = NULL; session->transport->unbind_conn(conn, is_active); session->transport->ep_disconnect(ep); ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n"); } static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn, struct iscsi_endpoint *ep, bool is_active) { /* Check if this was a conn error and the kernel took ownership */ spin_lock_irq(&conn->lock); if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { spin_unlock_irq(&conn->lock); iscsi_ep_disconnect(conn, is_active); } else { spin_unlock_irq(&conn->lock); ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n"); mutex_unlock(&conn->ep_mutex); flush_work(&conn->cleanup_work); /* * Userspace is now done with the EP so we can release the ref * iscsi_cleanup_conn_work_fn took. */ iscsi_put_endpoint(ep); mutex_lock(&conn->ep_mutex); } } static int iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag) { ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop.\n"); /* * For offload, iscsid may not know about the ep like when iscsid is * restarted or for kernel based session shutdown iscsid is not even * up. For these cases, we do the disconnect now. */ mutex_lock(&conn->ep_mutex); if (conn->ep) iscsi_if_disconnect_bound_ep(conn, conn->ep, true); mutex_unlock(&conn->ep_mutex); /* * If this is a termination we have to call stop_conn with that flag * so the correct states get set. If we haven't run the work yet try to * avoid the extra run. */ if (flag == STOP_CONN_TERM) { cancel_work_sync(&conn->cleanup_work); iscsi_stop_conn(conn, flag); } else { /* * Figure out if it was the kernel or userspace initiating this. */ spin_lock_irq(&conn->lock); if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { spin_unlock_irq(&conn->lock); iscsi_stop_conn(conn, flag); } else { spin_unlock_irq(&conn->lock); ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n"); flush_work(&conn->cleanup_work); } /* * Only clear for recovery to avoid extra cleanup runs during * termination. */ spin_lock_irq(&conn->lock); clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags); spin_unlock_irq(&conn->lock); } ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n"); return 0; } static void iscsi_cleanup_conn_work_fn(struct work_struct *work) { struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn, cleanup_work); struct iscsi_cls_session *session = iscsi_conn_to_session(conn); mutex_lock(&conn->ep_mutex); /* * Get a ref to the ep, so we don't release its ID until after * userspace is done referencing it in iscsi_if_disconnect_bound_ep. */ if (conn->ep) get_device(&conn->ep->dev); iscsi_ep_disconnect(conn, false); if (system_state != SYSTEM_RUNNING) { /* * If the user has set up for the session to never timeout * then hang like they wanted. For all other cases fail right * away since userspace is not going to relogin. */ if (session->recovery_tmo > 0) session->recovery_tmo = 0; } iscsi_stop_conn(conn, STOP_CONN_RECOVER); mutex_unlock(&conn->ep_mutex); ISCSI_DBG_TRANS_CONN(conn, "cleanup done.\n"); } static int iscsi_iter_force_destroy_conn_fn(struct device *dev, void *data) { struct iscsi_transport *transport; struct iscsi_cls_conn *conn; if (!iscsi_is_conn_dev(dev)) return 0; conn = iscsi_dev_to_conn(dev); transport = conn->transport; if (READ_ONCE(conn->state) != ISCSI_CONN_DOWN) iscsi_if_stop_conn(conn, STOP_CONN_TERM); transport->destroy_conn(conn); return 0; } /** * iscsi_force_destroy_session - destroy a session from the kernel * @session: session to destroy * * Force the destruction of a session from the kernel. This should only be * used when userspace is no longer running during system shutdown. */ void iscsi_force_destroy_session(struct iscsi_cls_session *session) { struct iscsi_transport *transport = session->transport; unsigned long flags; WARN_ON_ONCE(system_state == SYSTEM_RUNNING); spin_lock_irqsave(&sesslock, flags); if (list_empty(&session->sess_list)) { spin_unlock_irqrestore(&sesslock, flags); /* * Conn/ep is already freed. Session is being torn down via * async path. For shutdown we don't care about it so return. */ return; } spin_unlock_irqrestore(&sesslock, flags); device_for_each_child(&session->dev, NULL, iscsi_iter_force_destroy_conn_fn); transport->destroy_session(session); } EXPORT_SYMBOL_GPL(iscsi_force_destroy_session); void iscsi_free_session(struct iscsi_cls_session *session) { ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n"); iscsi_session_event(session, ISCSI_KEVENT_DESTROY_SESSION); put_device(&session->dev); } EXPORT_SYMBOL_GPL(iscsi_free_session); /** * iscsi_alloc_conn - alloc iscsi class connection * @session: iscsi cls session * @dd_size: private driver data size * @cid: connection id */ struct iscsi_cls_conn * iscsi_alloc_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) { struct iscsi_transport *transport = session->transport; struct iscsi_cls_conn *conn; conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL); if (!conn) return NULL; if (dd_size) conn->dd_data = &conn[1]; mutex_init(&conn->ep_mutex); spin_lock_init(&conn->lock); INIT_LIST_HEAD(&conn->conn_list); INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn); conn->transport = transport; conn->cid = cid; WRITE_ONCE(conn->state, ISCSI_CONN_DOWN); /* this is released in the dev's release function */ if (!get_device(&session->dev)) goto free_conn; dev_set_name(&conn->dev, "connection%d:%u", session->sid, cid); device_initialize(&conn->dev); conn->dev.parent = &session->dev; conn->dev.release = iscsi_conn_release; return conn; free_conn: kfree(conn); return NULL; } EXPORT_SYMBOL_GPL(iscsi_alloc_conn); /** * iscsi_add_conn - add iscsi class connection * @conn: iscsi cls connection * * This will expose iscsi_cls_conn to sysfs so make sure the related * resources for sysfs attributes are initialized before calling this. */ int iscsi_add_conn(struct iscsi_cls_conn *conn) { int err; unsigned long flags; struct iscsi_cls_session *session = iscsi_dev_to_session(conn->dev.parent); err = device_add(&conn->dev); if (err) { iscsi_cls_session_printk(KERN_ERR, session, "could not register connection's dev\n"); return err; } err = transport_register_device(&conn->dev); if (err) { iscsi_cls_session_printk(KERN_ERR, session, "could not register transport's dev\n"); device_del(&conn->dev); return err; } spin_lock_irqsave(&connlock, flags); list_add(&conn->conn_list, &connlist); spin_unlock_irqrestore(&connlock, flags); return 0; } EXPORT_SYMBOL_GPL(iscsi_add_conn); /** * iscsi_remove_conn - remove iscsi class connection from sysfs * @conn: iscsi cls connection * * Remove iscsi_cls_conn from sysfs, and wait for previous * read/write of iscsi_cls_conn's attributes in sysfs to finish. */ void iscsi_remove_conn(struct iscsi_cls_conn *conn) { unsigned long flags; spin_lock_irqsave(&connlock, flags); list_del(&conn->conn_list); spin_unlock_irqrestore(&connlock, flags); transport_unregister_device(&conn->dev); device_del(&conn->dev); } EXPORT_SYMBOL_GPL(iscsi_remove_conn); void iscsi_put_conn(struct iscsi_cls_conn *conn) { put_device(&conn->dev); } EXPORT_SYMBOL_GPL(iscsi_put_conn); void iscsi_get_conn(struct iscsi_cls_conn *conn) { get_device(&conn->dev); } EXPORT_SYMBOL_GPL(iscsi_get_conn); /* * iscsi interface functions */ static struct iscsi_internal * iscsi_if_transport_lookup(struct iscsi_transport *tt) { struct iscsi_internal *priv; unsigned long flags; spin_lock_irqsave(&iscsi_transport_lock, flags); list_for_each_entry(priv, &iscsi_transports, list) { if (tt == priv->iscsi_transport) { spin_unlock_irqrestore(&iscsi_transport_lock, flags); return priv; } } spin_unlock_irqrestore(&iscsi_transport_lock, flags); return NULL; } static int iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp) { return nlmsg_multicast(nls, skb, 0, group, gfp); } static int iscsi_unicast_skb(struct sk_buff *skb, u32 portid) { return nlmsg_unicast(nls, skb, portid); } int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; char *pdu; struct iscsi_internal *priv; int len = nlmsg_total_size(sizeof(*ev) + sizeof(struct iscsi_hdr) + data_size); priv = iscsi_if_transport_lookup(conn->transport); if (!priv) return -EINVAL; skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { iscsi_conn_error_event(conn, ISCSI_ERR_CONN_FAILED); iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver " "control PDU: OOM\n"); return -ENOMEM; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); memset(ev, 0, sizeof(*ev)); ev->transport_handle = iscsi_handle(conn->transport); ev->type = ISCSI_KEVENT_RECV_PDU; ev->r.recv_req.cid = conn->cid; ev->r.recv_req.sid = iscsi_conn_get_sid(conn); pdu = (char*)ev + sizeof(*ev); memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); return iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); } EXPORT_SYMBOL_GPL(iscsi_recv_pdu); int iscsi_offload_mesg(struct Scsi_Host *shost, struct iscsi_transport *transport, uint32_t type, char *data, uint16_t data_size) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; int len = nlmsg_total_size(sizeof(*ev) + data_size); skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { printk(KERN_ERR "can not deliver iscsi offload message:OOM\n"); return -ENOMEM; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); memset(ev, 0, sizeof(*ev)); ev->type = type; ev->transport_handle = iscsi_handle(transport); switch (type) { case ISCSI_KEVENT_PATH_REQ: ev->r.req_path.host_no = shost->host_no; break; case ISCSI_KEVENT_IF_DOWN: ev->r.notify_if_down.host_no = shost->host_no; break; } memcpy((char *)ev + sizeof(*ev), data, data_size); return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_ATOMIC); } EXPORT_SYMBOL_GPL(iscsi_offload_mesg); void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; struct iscsi_internal *priv; int len = nlmsg_total_size(sizeof(*ev)); unsigned long flags; int state; spin_lock_irqsave(&conn->lock, flags); /* * Userspace will only do a stop call if we are at least bound. And, we * only need to do the in kernel cleanup if in the UP state so cmds can * be released to upper layers. If in other states just wait for * userspace to avoid races that can leave the cleanup_work queued. */ state = READ_ONCE(conn->state); switch (state) { case ISCSI_CONN_BOUND: case ISCSI_CONN_UP: if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { queue_work(iscsi_conn_cleanup_workq, &conn->cleanup_work); } break; default: ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n", state); break; } spin_unlock_irqrestore(&conn->lock, flags); priv = iscsi_if_transport_lookup(conn->transport); if (!priv) return; skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored " "conn error (%d)\n", error); return; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(conn->transport); ev->type = ISCSI_KEVENT_CONN_ERROR; ev->r.connerror.error = error; ev->r.connerror.cid = conn->cid; ev->r.connerror.sid = iscsi_conn_get_sid(conn); iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n", error); } EXPORT_SYMBOL_GPL(iscsi_conn_error_event); void iscsi_conn_login_event(struct iscsi_cls_conn *conn, enum iscsi_conn_state state) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; struct iscsi_internal *priv; int len = nlmsg_total_size(sizeof(*ev)); priv = iscsi_if_transport_lookup(conn->transport); if (!priv) return; skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored " "conn login (%d)\n", state); return; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(conn->transport); ev->type = ISCSI_KEVENT_CONN_LOGIN_STATE; ev->r.conn_login.state = state; ev->r.conn_login.cid = conn->cid; ev->r.conn_login.sid = iscsi_conn_get_sid(conn); iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn login (%d)\n", state); } EXPORT_SYMBOL_GPL(iscsi_conn_login_event); void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport, enum iscsi_host_event_code code, uint32_t data_size, uint8_t *data) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; int len = nlmsg_total_size(sizeof(*ev) + data_size); skb = alloc_skb(len, GFP_NOIO); if (!skb) { printk(KERN_ERR "gracefully ignored host event (%d):%d OOM\n", host_no, code); return; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(transport); ev->type = ISCSI_KEVENT_HOST_EVENT; ev->r.host_event.host_no = host_no; ev->r.host_event.code = code; ev->r.host_event.data_size = data_size; if (data_size) memcpy((char *)ev + sizeof(*ev), data, data_size); iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO); } EXPORT_SYMBOL_GPL(iscsi_post_host_event); void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport, uint32_t status, uint32_t pid, uint32_t data_size, uint8_t *data) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; int len = nlmsg_total_size(sizeof(*ev) + data_size); skb = alloc_skb(len, GFP_NOIO); if (!skb) { printk(KERN_ERR "gracefully ignored ping comp: OOM\n"); return; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(transport); ev->type = ISCSI_KEVENT_PING_COMP; ev->r.ping_comp.host_no = host_no; ev->r.ping_comp.status = status; ev->r.ping_comp.pid = pid; ev->r.ping_comp.data_size = data_size; memcpy((char *)ev + sizeof(*ev), data, data_size); iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO); } EXPORT_SYMBOL_GPL(iscsi_ping_comp_event); static int iscsi_if_send_reply(u32 portid, int type, void *payload, int size) { struct sk_buff *skb; struct nlmsghdr *nlh; int len = nlmsg_total_size(size); skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { printk(KERN_ERR "Could not allocate skb to send reply.\n"); return -ENOMEM; } nlh = __nlmsg_put(skb, 0, 0, type, (len - sizeof(*nlh)), 0); memcpy(nlmsg_data(nlh), payload, size); return iscsi_unicast_skb(skb, portid); } static int iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) { struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_stats *stats; struct sk_buff *skbstat; struct iscsi_cls_conn *conn; struct nlmsghdr *nlhstat; struct iscsi_uevent *evstat; struct iscsi_internal *priv; int len = nlmsg_total_size(sizeof(*ev) + sizeof(struct iscsi_stats) + sizeof(struct iscsi_stats_custom) * ISCSI_STATS_CUSTOM_MAX); int err = 0; priv = iscsi_if_transport_lookup(transport); if (!priv) return -EINVAL; conn = iscsi_conn_lookup(ev->u.get_stats.sid, ev->u.get_stats.cid); if (!conn) return -EEXIST; do { int actual_size; skbstat = alloc_skb(len, GFP_ATOMIC); if (!skbstat) { iscsi_cls_conn_printk(KERN_ERR, conn, "can not " "deliver stats: OOM\n"); return -ENOMEM; } nlhstat = __nlmsg_put(skbstat, 0, 0, 0, (len - sizeof(*nlhstat)), 0); evstat = nlmsg_data(nlhstat); memset(evstat, 0, sizeof(*evstat)); evstat->transport_handle = iscsi_handle(conn->transport); evstat->type = nlh->nlmsg_type; evstat->u.get_stats.cid = ev->u.get_stats.cid; evstat->u.get_stats.sid = ev->u.get_stats.sid; stats = (struct iscsi_stats *) ((char*)evstat + sizeof(*evstat)); memset(stats, 0, sizeof(*stats)); transport->get_stats(conn, stats); actual_size = nlmsg_total_size(sizeof(struct iscsi_uevent) + sizeof(struct iscsi_stats) + sizeof(struct iscsi_stats_custom) * stats->custom_length); actual_size -= sizeof(*nlhstat); actual_size = nlmsg_msg_size(actual_size); skb_trim(skbstat, NLMSG_ALIGN(actual_size)); nlhstat->nlmsg_len = actual_size; err = iscsi_multicast_skb(skbstat, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); } while (err < 0 && err != -ECONNREFUSED); return err; } /** * iscsi_session_event - send session destr. completion event * @session: iscsi class session * @event: type of event */ int iscsi_session_event(struct iscsi_cls_session *session, enum iscsi_uevent_e event) { struct iscsi_internal *priv; struct Scsi_Host *shost; struct iscsi_uevent *ev; struct sk_buff *skb; struct nlmsghdr *nlh; int rc, len = nlmsg_total_size(sizeof(*ev)); priv = iscsi_if_transport_lookup(session->transport); if (!priv) return -EINVAL; shost = iscsi_session_to_shost(session); skb = alloc_skb(len, GFP_KERNEL); if (!skb) { iscsi_cls_session_printk(KERN_ERR, session, "Cannot notify userspace of session " "event %u\n", event); return -ENOMEM; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(session->transport); ev->type = event; switch (event) { case ISCSI_KEVENT_DESTROY_SESSION: ev->r.d_session.host_no = shost->host_no; ev->r.d_session.sid = session->sid; break; case ISCSI_KEVENT_CREATE_SESSION: ev->r.c_session_ret.host_no = shost->host_no; ev->r.c_session_ret.sid = session->sid; break; case ISCSI_KEVENT_UNBIND_SESSION: ev->r.unbind_session.host_no = shost->host_no; ev->r.unbind_session.sid = session->sid; break; default: iscsi_cls_session_printk(KERN_ERR, session, "Invalid event " "%u.\n", event); kfree_skb(skb); return -EINVAL; } /* * this will occur if the daemon is not up, so we just warn * the user and when the daemon is restarted it will handle it */ rc = iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL); if (rc == -ESRCH) iscsi_cls_session_printk(KERN_ERR, session, "Cannot notify userspace of session " "event %u. Check iscsi daemon\n", event); ISCSI_DBG_TRANS_SESSION(session, "Completed handling event %d rc %d\n", event, rc); return rc; } EXPORT_SYMBOL_GPL(iscsi_session_event); static int iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep, struct iscsi_uevent *ev, pid_t pid, uint32_t initial_cmdsn, uint16_t cmds_max, uint16_t queue_depth) { struct iscsi_transport *transport = priv->iscsi_transport; struct iscsi_cls_session *session; struct Scsi_Host *shost; session = transport->create_session(ep, cmds_max, queue_depth, initial_cmdsn); if (!session) return -ENOMEM; session->creator = pid; shost = iscsi_session_to_shost(session); ev->r.c_session_ret.host_no = shost->host_no; ev->r.c_session_ret.sid = session->sid; ISCSI_DBG_TRANS_SESSION(session, "Completed creating transport session\n"); return 0; } static int iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct iscsi_cls_conn *conn; struct iscsi_cls_session *session; session = iscsi_session_lookup(ev->u.c_conn.sid); if (!session) { printk(KERN_ERR "iscsi: invalid session %d.\n", ev->u.c_conn.sid); return -EINVAL; } conn = transport->create_conn(session, ev->u.c_conn.cid); if (!conn) { iscsi_cls_session_printk(KERN_ERR, session, "couldn't create a new connection."); return -ENOMEM; } ev->r.c_conn_ret.sid = session->sid; ev->r.c_conn_ret.cid = conn->cid; ISCSI_DBG_TRANS_CONN(conn, "Completed creating transport conn\n"); return 0; } static int iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct iscsi_cls_conn *conn; conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid); if (!conn) return -EINVAL; ISCSI_DBG_TRANS_CONN(conn, "Flushing cleanup during destruction\n"); flush_work(&conn->cleanup_work); ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n"); if (transport->destroy_conn) transport->destroy_conn(conn); return 0; } static int iscsi_if_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen) { char *data = (char*)ev + sizeof(*ev); struct iscsi_cls_conn *conn; struct iscsi_cls_session *session; int err = 0, value = 0, state; if (ev->u.set_param.len > rlen || ev->u.set_param.len > PAGE_SIZE) return -EINVAL; session = iscsi_session_lookup(ev->u.set_param.sid); conn = iscsi_conn_lookup(ev->u.set_param.sid, ev->u.set_param.cid); if (!conn || !session) return -EINVAL; /* data will be regarded as NULL-ended string, do length check */ if (strlen(data) > ev->u.set_param.len) return -EINVAL; switch (ev->u.set_param.param) { case ISCSI_PARAM_SESS_RECOVERY_TMO: sscanf(data, "%d", &value); if (!session->recovery_tmo_sysfs_override) session->recovery_tmo = value; break; default: state = READ_ONCE(conn->state); if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) { err = transport->set_param(conn, ev->u.set_param.param, data, ev->u.set_param.len); } else { return -ENOTCONN; } } return err; } static int iscsi_if_ep_connect(struct iscsi_transport *transport, struct iscsi_uevent *ev, int msg_type) { struct iscsi_endpoint *ep; struct sockaddr *dst_addr; struct Scsi_Host *shost = NULL; int non_blocking, err = 0; if (!transport->ep_connect) return -EINVAL; if (msg_type == ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST) { shost = scsi_host_lookup(ev->u.ep_connect_through_host.host_no); if (!shost) { printk(KERN_ERR "ep connect failed. Could not find " "host no %u\n", ev->u.ep_connect_through_host.host_no); return -ENODEV; } non_blocking = ev->u.ep_connect_through_host.non_blocking; } else non_blocking = ev->u.ep_connect.non_blocking; dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); ep = transport->ep_connect(shost, dst_addr, non_blocking); if (IS_ERR(ep)) { err = PTR_ERR(ep); goto release_host; } ev->r.ep_connect_ret.handle = ep->id; release_host: if (shost) scsi_host_put(shost); return err; } static int iscsi_if_ep_disconnect(struct iscsi_transport *transport, u64 ep_handle) { struct iscsi_cls_conn *conn; struct iscsi_endpoint *ep; if (!transport->ep_disconnect) return -EINVAL; ep = iscsi_lookup_endpoint(ep_handle); if (!ep) return -EINVAL; conn = ep->conn; if (!conn) { /* * conn was not even bound yet, so we can't get iscsi conn * failures yet. */ transport->ep_disconnect(ep); goto put_ep; } mutex_lock(&conn->ep_mutex); iscsi_if_disconnect_bound_ep(conn, ep, false); mutex_unlock(&conn->ep_mutex); put_ep: iscsi_put_endpoint(ep); return 0; } static int iscsi_if_transport_ep(struct iscsi_transport *transport, struct iscsi_uevent *ev, int msg_type, u32 rlen) { struct iscsi_endpoint *ep; int rc = 0; switch (msg_type) { case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST: case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: if (rlen < sizeof(struct sockaddr)) rc = -EINVAL; else rc = iscsi_if_ep_connect(transport, ev, msg_type); break; case ISCSI_UEVENT_TRANSPORT_EP_POLL: if (!transport->ep_poll) return -EINVAL; ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle); if (!ep) return -EINVAL; ev->r.retcode = transport->ep_poll(ep, ev->u.ep_poll.timeout_ms); iscsi_put_endpoint(ep); break; case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: rc = iscsi_if_ep_disconnect(transport, ev->u.ep_disconnect.ep_handle); break; } return rc; } static int iscsi_tgt_dscvr(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen) { struct Scsi_Host *shost; struct sockaddr *dst_addr; int err; if (rlen < sizeof(*dst_addr)) return -EINVAL; if (!transport->tgt_dscvr) return -EINVAL; shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no); if (!shost) { printk(KERN_ERR "target discovery could not find host no %u\n", ev->u.tgt_dscvr.host_no); return -ENODEV; } dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); err = transport->tgt_dscvr(shost, ev->u.tgt_dscvr.type, ev->u.tgt_dscvr.enable, dst_addr); scsi_host_put(shost); return err; } static int iscsi_set_host_param(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen) { char *data = (char*)ev + sizeof(*ev); struct Scsi_Host *shost; int err; if (!transport->set_host_param) return -ENOSYS; if (ev->u.set_host_param.len > rlen || ev->u.set_host_param.len > PAGE_SIZE) return -EINVAL; shost = scsi_host_lookup(ev->u.set_host_param.host_no); if (!shost) { printk(KERN_ERR "set_host_param could not find host no %u\n", ev->u.set_host_param.host_no); return -ENODEV; } /* see similar check in iscsi_if_set_param() */ if (strlen(data) > ev->u.set_host_param.len) return -EINVAL; err = transport->set_host_param(shost, ev->u.set_host_param.param, data, ev->u.set_host_param.len); scsi_host_put(shost); return err; } static int iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen) { struct Scsi_Host *shost; struct iscsi_path *params; int err; if (rlen < sizeof(*params)) return -EINVAL; if (!transport->set_path) return -ENOSYS; shost = scsi_host_lookup(ev->u.set_path.host_no); if (!shost) { printk(KERN_ERR "set path could not find host no %u\n", ev->u.set_path.host_no); return -ENODEV; } params = (struct iscsi_path *)((char *)ev + sizeof(*ev)); err = transport->set_path(shost, params); scsi_host_put(shost); return err; } static int iscsi_session_has_conns(int sid) { struct iscsi_cls_conn *conn; unsigned long flags; int found = 0; spin_lock_irqsave(&connlock, flags); list_for_each_entry(conn, &connlist, conn_list) { if (iscsi_conn_get_sid(conn) == sid) { found = 1; break; } } spin_unlock_irqrestore(&connlock, flags); return found; } static int iscsi_set_iface_params(struct iscsi_transport *transport, struct iscsi_uevent *ev, uint32_t len) { char *data = (char *)ev + sizeof(*ev); struct Scsi_Host *shost; int err; if (!transport->set_iface_param) return -ENOSYS; shost = scsi_host_lookup(ev->u.set_iface_params.host_no); if (!shost) { printk(KERN_ERR "set_iface_params could not find host no %u\n", ev->u.set_iface_params.host_no); return -ENODEV; } err = transport->set_iface_param(shost, data, len); scsi_host_put(shost); return err; } static int iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen) { struct Scsi_Host *shost; struct sockaddr *dst_addr; int err; if (rlen < sizeof(*dst_addr)) return -EINVAL; if (!transport->send_ping) return -ENOSYS; shost = scsi_host_lookup(ev->u.iscsi_ping.host_no); if (!shost) { printk(KERN_ERR "iscsi_ping could not find host no %u\n", ev->u.iscsi_ping.host_no); return -ENODEV; } dst_addr = (struct sockaddr *)((char *)ev + sizeof(*ev)); err = transport->send_ping(shost, ev->u.iscsi_ping.iface_num, ev->u.iscsi_ping.iface_type, ev->u.iscsi_ping.payload_size, ev->u.iscsi_ping.pid, dst_addr); scsi_host_put(shost); return err; } static int iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh) { struct iscsi_uevent *ev = nlmsg_data(nlh); struct Scsi_Host *shost = NULL; struct iscsi_chap_rec *chap_rec; struct iscsi_internal *priv; struct sk_buff *skbchap; struct nlmsghdr *nlhchap; struct iscsi_uevent *evchap; uint32_t chap_buf_size; int len, err = 0; char *buf; if (!transport->get_chap) return -EINVAL; priv = iscsi_if_transport_lookup(transport); if (!priv) return -EINVAL; chap_buf_size = (ev->u.get_chap.num_entries * sizeof(*chap_rec)); len = nlmsg_total_size(sizeof(*ev) + chap_buf_size); shost = scsi_host_lookup(ev->u.get_chap.host_no); if (!shost) { printk(KERN_ERR "%s: failed. Could not find host no %u\n", __func__, ev->u.get_chap.host_no); return -ENODEV; } do { int actual_size; skbchap = alloc_skb(len, GFP_KERNEL); if (!skbchap) { printk(KERN_ERR "can not deliver chap: OOM\n"); err = -ENOMEM; goto exit_get_chap; } nlhchap = __nlmsg_put(skbchap, 0, 0, 0, (len - sizeof(*nlhchap)), 0); evchap = nlmsg_data(nlhchap); memset(evchap, 0, sizeof(*evchap)); evchap->transport_handle = iscsi_handle(transport); evchap->type = nlh->nlmsg_type; evchap->u.get_chap.host_no = ev->u.get_chap.host_no; evchap->u.get_chap.chap_tbl_idx = ev->u.get_chap.chap_tbl_idx; evchap->u.get_chap.num_entries = ev->u.get_chap.num_entries; buf = (char *)evchap + sizeof(*evchap); memset(buf, 0, chap_buf_size); err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx, &evchap->u.get_chap.num_entries, buf); actual_size = nlmsg_total_size(sizeof(*ev) + chap_buf_size); skb_trim(skbchap, NLMSG_ALIGN(actual_size)); nlhchap->nlmsg_len = actual_size; err = iscsi_multicast_skb(skbchap, ISCSI_NL_GRP_ISCSID, GFP_KERNEL); } while (err < 0 && err != -ECONNREFUSED); exit_get_chap: scsi_host_put(shost); return err; } static int iscsi_set_chap(struct iscsi_transport *transport, struct iscsi_uevent *ev, uint32_t len) { char *data = (char *)ev + sizeof(*ev); struct Scsi_Host *shost; int err = 0; if (!transport->set_chap) return -ENOSYS; shost = scsi_host_lookup(ev->u.set_path.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.set_path.host_no); return -ENODEV; } err = transport->set_chap(shost, data, len); scsi_host_put(shost); return err; } static int iscsi_delete_chap(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct Scsi_Host *shost; int err = 0; if (!transport->delete_chap) return -ENOSYS; shost = scsi_host_lookup(ev->u.delete_chap.host_no); if (!shost) { printk(KERN_ERR "%s could not find host no %u\n", __func__, ev->u.delete_chap.host_no); return -ENODEV; } err = transport->delete_chap(shost, ev->u.delete_chap.chap_tbl_idx); scsi_host_put(shost); return err; } static const struct { enum iscsi_discovery_parent_type value; char *name; } iscsi_discovery_parent_names[] = { {ISCSI_DISC_PARENT_UNKNOWN, "Unknown" }, {ISCSI_DISC_PARENT_SENDTGT, "Sendtarget" }, {ISCSI_DISC_PARENT_ISNS, "isns" }, }; char *iscsi_get_discovery_parent_name(int parent_type) { int i; char *state = "Unknown!"; for (i = 0; i < ARRAY_SIZE(iscsi_discovery_parent_names); i++) { if (iscsi_discovery_parent_names[i].value & parent_type) { state = iscsi_discovery_parent_names[i].name; break; } } return state; } EXPORT_SYMBOL_GPL(iscsi_get_discovery_parent_name); static int iscsi_set_flashnode_param(struct iscsi_transport *transport, struct iscsi_uevent *ev, uint32_t len) { char *data = (char *)ev + sizeof(*ev); struct Scsi_Host *shost; struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; uint32_t idx; int err = 0; if (!transport->set_flashnode_param) { err = -ENOSYS; goto exit_set_fnode; } shost = scsi_host_lookup(ev->u.set_flashnode.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.set_flashnode.host_no); err = -ENODEV; goto exit_set_fnode; } idx = ev->u.set_flashnode.flashnode_idx; fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", __func__, idx, ev->u.set_flashnode.host_no); err = -ENODEV; goto put_host; } dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len); put_device(dev); put_sess: put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); exit_set_fnode: return err; } static int iscsi_new_flashnode(struct iscsi_transport *transport, struct iscsi_uevent *ev, uint32_t len) { char *data = (char *)ev + sizeof(*ev); struct Scsi_Host *shost; int index; int err = 0; if (!transport->new_flashnode) { err = -ENOSYS; goto exit_new_fnode; } shost = scsi_host_lookup(ev->u.new_flashnode.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.new_flashnode.host_no); err = -ENODEV; goto put_host; } index = transport->new_flashnode(shost, data, len); if (index >= 0) ev->r.new_flashnode_ret.flashnode_idx = index; else err = -EIO; put_host: scsi_host_put(shost); exit_new_fnode: return err; } static int iscsi_del_flashnode(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct Scsi_Host *shost; struct iscsi_bus_flash_session *fnode_sess; uint32_t idx; int err = 0; if (!transport->del_flashnode) { err = -ENOSYS; goto exit_del_fnode; } shost = scsi_host_lookup(ev->u.del_flashnode.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.del_flashnode.host_no); err = -ENODEV; goto put_host; } idx = ev->u.del_flashnode.flashnode_idx; fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", __func__, idx, ev->u.del_flashnode.host_no); err = -ENODEV; goto put_host; } err = transport->del_flashnode(fnode_sess); put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); exit_del_fnode: return err; } static int iscsi_login_flashnode(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct Scsi_Host *shost; struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; uint32_t idx; int err = 0; if (!transport->login_flashnode) { err = -ENOSYS; goto exit_login_fnode; } shost = scsi_host_lookup(ev->u.login_flashnode.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.login_flashnode.host_no); err = -ENODEV; goto put_host; } idx = ev->u.login_flashnode.flashnode_idx; fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", __func__, idx, ev->u.login_flashnode.host_no); err = -ENODEV; goto put_host; } dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->login_flashnode(fnode_sess, fnode_conn); put_device(dev); put_sess: put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); exit_login_fnode: return err; } static int iscsi_logout_flashnode(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct Scsi_Host *shost; struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; uint32_t idx; int err = 0; if (!transport->logout_flashnode) { err = -ENOSYS; goto exit_logout_fnode; } shost = scsi_host_lookup(ev->u.logout_flashnode.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.logout_flashnode.host_no); err = -ENODEV; goto put_host; } idx = ev->u.logout_flashnode.flashnode_idx; fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", __func__, idx, ev->u.logout_flashnode.host_no); err = -ENODEV; goto put_host; } dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->logout_flashnode(fnode_sess, fnode_conn); put_device(dev); put_sess: put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); exit_logout_fnode: return err; } static int iscsi_logout_flashnode_sid(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct Scsi_Host *shost; struct iscsi_cls_session *session; int err = 0; if (!transport->logout_flashnode_sid) { err = -ENOSYS; goto exit_logout_sid; } shost = scsi_host_lookup(ev->u.logout_flashnode_sid.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.logout_flashnode.host_no); err = -ENODEV; goto put_host; } session = iscsi_session_lookup(ev->u.logout_flashnode_sid.sid); if (!session) { pr_err("%s could not find session id %u\n", __func__, ev->u.logout_flashnode_sid.sid); err = -EINVAL; goto put_host; } err = transport->logout_flashnode_sid(session); put_host: scsi_host_put(shost); exit_logout_sid: return err; } static int iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) { struct iscsi_uevent *ev = nlmsg_data(nlh); struct Scsi_Host *shost = NULL; struct iscsi_internal *priv; struct sk_buff *skbhost_stats; struct nlmsghdr *nlhhost_stats; struct iscsi_uevent *evhost_stats; int host_stats_size = 0; int len, err = 0; char *buf; if (!transport->get_host_stats) return -ENOSYS; priv = iscsi_if_transport_lookup(transport); if (!priv) return -EINVAL; host_stats_size = sizeof(struct iscsi_offload_host_stats); len = nlmsg_total_size(sizeof(*ev) + host_stats_size); shost = scsi_host_lookup(ev->u.get_host_stats.host_no); if (!shost) { pr_err("%s: failed. Could not find host no %u\n", __func__, ev->u.get_host_stats.host_no); return -ENODEV; } do { int actual_size; skbhost_stats = alloc_skb(len, GFP_KERNEL); if (!skbhost_stats) { pr_err("cannot deliver host stats: OOM\n"); err = -ENOMEM; goto exit_host_stats; } nlhhost_stats = __nlmsg_put(skbhost_stats, 0, 0, 0, (len - sizeof(*nlhhost_stats)), 0); evhost_stats = nlmsg_data(nlhhost_stats); memset(evhost_stats, 0, sizeof(*evhost_stats)); evhost_stats->transport_handle = iscsi_handle(transport); evhost_stats->type = nlh->nlmsg_type; evhost_stats->u.get_host_stats.host_no = ev->u.get_host_stats.host_no; buf = (char *)evhost_stats + sizeof(*evhost_stats); memset(buf, 0, host_stats_size); err = transport->get_host_stats(shost, buf, host_stats_size); if (err) { kfree_skb(skbhost_stats); goto exit_host_stats; } actual_size = nlmsg_total_size(sizeof(*ev) + host_stats_size); skb_trim(skbhost_stats, NLMSG_ALIGN(actual_size)); nlhhost_stats->nlmsg_len = actual_size; err = iscsi_multicast_skb(skbhost_stats, ISCSI_NL_GRP_ISCSID, GFP_KERNEL); } while (err < 0 && err != -ECONNREFUSED); exit_host_stats: scsi_host_put(shost); return err; } static int iscsi_if_transport_conn(struct iscsi_transport *transport, struct nlmsghdr *nlh, u32 pdu_len) { struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_cls_session *session; struct iscsi_cls_conn *conn = NULL; struct iscsi_endpoint *ep; int err = 0; switch (nlh->nlmsg_type) { case ISCSI_UEVENT_CREATE_CONN: return iscsi_if_create_conn(transport, ev); case ISCSI_UEVENT_DESTROY_CONN: return iscsi_if_destroy_conn(transport, ev); case ISCSI_UEVENT_STOP_CONN: conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid); if (!conn) return -EINVAL; return iscsi_if_stop_conn(conn, ev->u.stop_conn.flag); } /* * The following cmds need to be run under the ep_mutex so in kernel * conn cleanup (ep_disconnect + unbind and conn) is not done while * these are running. They also must not run if we have just run a conn * cleanup because they would set the state in a way that might allow * IO or send IO themselves. */ switch (nlh->nlmsg_type) { case ISCSI_UEVENT_START_CONN: conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid); break; case ISCSI_UEVENT_BIND_CONN: conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid); break; case ISCSI_UEVENT_SEND_PDU: conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid); break; } if (!conn) return -EINVAL; mutex_lock(&conn->ep_mutex); spin_lock_irq(&conn->lock); if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { spin_unlock_irq(&conn->lock); mutex_unlock(&conn->ep_mutex); ev->r.retcode = -ENOTCONN; return 0; } spin_unlock_irq(&conn->lock); switch (nlh->nlmsg_type) { case ISCSI_UEVENT_BIND_CONN: session = iscsi_session_lookup(ev->u.b_conn.sid); if (!session) { err = -EINVAL; break; } ev->r.retcode = transport->bind_conn(session, conn, ev->u.b_conn.transport_eph, ev->u.b_conn.is_leading); if (!ev->r.retcode) WRITE_ONCE(conn->state, ISCSI_CONN_BOUND); if (ev->r.retcode || !transport->ep_connect) break; ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph); if (ep) { ep->conn = conn; conn->ep = ep; iscsi_put_endpoint(ep); } else { err = -ENOTCONN; iscsi_cls_conn_printk(KERN_ERR, conn, "Could not set ep conn binding\n"); } break; case ISCSI_UEVENT_START_CONN: ev->r.retcode = transport->start_conn(conn); if (!ev->r.retcode) WRITE_ONCE(conn->state, ISCSI_CONN_UP); break; case ISCSI_UEVENT_SEND_PDU: if ((ev->u.send_pdu.hdr_size > pdu_len) || (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) { err = -EINVAL; break; } ev->r.retcode = transport->send_pdu(conn, (struct iscsi_hdr *)((char *)ev + sizeof(*ev)), (char *)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, ev->u.send_pdu.data_size); break; default: err = -ENOSYS; } mutex_unlock(&conn->ep_mutex); return err; } static int iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) { int err = 0; u32 portid; struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_transport *transport = NULL; struct iscsi_internal *priv; struct iscsi_cls_session *session; struct iscsi_endpoint *ep = NULL; u32 rlen; if (!netlink_capable(skb, CAP_SYS_ADMIN)) return -EPERM; if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE) *group = ISCSI_NL_GRP_UIP; else *group = ISCSI_NL_GRP_ISCSID; priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); if (!priv) return -EINVAL; transport = priv->iscsi_transport; if (!try_module_get(transport->owner)) return -EINVAL; portid = NETLINK_CB(skb).portid; /* * Even though the remaining payload may not be regarded as nlattr, * (like address or something else), calculate the remaining length * here to ease following length checks. */ rlen = nlmsg_attrlen(nlh, sizeof(*ev)); switch (nlh->nlmsg_type) { case ISCSI_UEVENT_CREATE_SESSION: err = iscsi_if_create_session(priv, ep, ev, portid, ev->u.c_session.initial_cmdsn, ev->u.c_session.cmds_max, ev->u.c_session.queue_depth); break; case ISCSI_UEVENT_CREATE_BOUND_SESSION: ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle); if (!ep) { err = -EINVAL; break; } err = iscsi_if_create_session(priv, ep, ev, portid, ev->u.c_bound_session.initial_cmdsn, ev->u.c_bound_session.cmds_max, ev->u.c_bound_session.queue_depth); iscsi_put_endpoint(ep); break; case ISCSI_UEVENT_DESTROY_SESSION: session = iscsi_session_lookup(ev->u.d_session.sid); if (!session) err = -EINVAL; else if (iscsi_session_has_conns(ev->u.d_session.sid)) err = -EBUSY; else transport->destroy_session(session); break; case ISCSI_UEVENT_DESTROY_SESSION_ASYNC: session = iscsi_session_lookup(ev->u.d_session.sid); if (!session) err = -EINVAL; else if (iscsi_session_has_conns(ev->u.d_session.sid)) err = -EBUSY; else { unsigned long flags; /* Prevent this session from being found again */ spin_lock_irqsave(&sesslock, flags); list_del_init(&session->sess_list); spin_unlock_irqrestore(&sesslock, flags); queue_work(system_unbound_wq, &session->destroy_work); } break; case ISCSI_UEVENT_UNBIND_SESSION: session = iscsi_session_lookup(ev->u.d_session.sid); if (session) queue_work(session->workq, &session->unbind_work); else err = -EINVAL; break; case ISCSI_UEVENT_SET_PARAM: err = iscsi_if_set_param(transport, ev, rlen); break; case ISCSI_UEVENT_CREATE_CONN: case ISCSI_UEVENT_DESTROY_CONN: case ISCSI_UEVENT_STOP_CONN: case ISCSI_UEVENT_START_CONN: case ISCSI_UEVENT_BIND_CONN: case ISCSI_UEVENT_SEND_PDU: err = iscsi_if_transport_conn(transport, nlh, rlen); break; case ISCSI_UEVENT_GET_STATS: err = iscsi_if_get_stats(transport, nlh); break; case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: case ISCSI_UEVENT_TRANSPORT_EP_POLL: case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST: err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type, rlen); break; case ISCSI_UEVENT_TGT_DSCVR: err = iscsi_tgt_dscvr(transport, ev, rlen); break; case ISCSI_UEVENT_SET_HOST_PARAM: err = iscsi_set_host_param(transport, ev, rlen); break; case ISCSI_UEVENT_PATH_UPDATE: err = iscsi_set_path(transport, ev, rlen); break; case ISCSI_UEVENT_SET_IFACE_PARAMS: err = iscsi_set_iface_params(transport, ev, rlen); break; case ISCSI_UEVENT_PING: err = iscsi_send_ping(transport, ev, rlen); break; case ISCSI_UEVENT_GET_CHAP: err = iscsi_get_chap(transport, nlh); break; case ISCSI_UEVENT_DELETE_CHAP: err = iscsi_delete_chap(transport, ev); break; case ISCSI_UEVENT_SET_FLASHNODE_PARAMS: err = iscsi_set_flashnode_param(transport, ev, rlen); break; case ISCSI_UEVENT_NEW_FLASHNODE: err = iscsi_new_flashnode(transport, ev, rlen); break; case ISCSI_UEVENT_DEL_FLASHNODE: err = iscsi_del_flashnode(transport, ev); break; case ISCSI_UEVENT_LOGIN_FLASHNODE: err = iscsi_login_flashnode(transport, ev); break; case ISCSI_UEVENT_LOGOUT_FLASHNODE: err = iscsi_logout_flashnode(transport, ev); break; case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID: err = iscsi_logout_flashnode_sid(transport, ev); break; case ISCSI_UEVENT_SET_CHAP: err = iscsi_set_chap(transport, ev, rlen); break; case ISCSI_UEVENT_GET_HOST_STATS: err = iscsi_get_host_stats(transport, nlh); break; default: err = -ENOSYS; break; } module_put(transport->owner); return err; } /* * Get message from skb. Each message is processed by iscsi_if_recv_msg. * Malformed skbs with wrong lengths or invalid creds are not processed. */ static void iscsi_if_rx(struct sk_buff *skb) { u32 portid = NETLINK_CB(skb).portid; mutex_lock(&rx_queue_mutex); while (skb->len >= NLMSG_HDRLEN) { int err; uint32_t rlen; struct nlmsghdr *nlh; struct iscsi_uevent *ev; uint32_t group; int retries = ISCSI_SEND_MAX_ALLOWED; nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) || skb->len < nlh->nlmsg_len) { break; } ev = nlmsg_data(nlh); rlen = NLMSG_ALIGN(nlh->nlmsg_len); if (rlen > skb->len) rlen = skb->len; err = iscsi_if_recv_msg(skb, nlh, &group); if (err) { ev->type = ISCSI_KEVENT_IF_ERROR; ev->iferror = err; } do { /* * special case for GET_STATS: * on success - sending reply and stats from * inside of if_recv_msg(), * on error - fall through. */ if (ev->type == ISCSI_UEVENT_GET_STATS && !err) break; if (ev->type == ISCSI_UEVENT_GET_CHAP && !err) break; err = iscsi_if_send_reply(portid, nlh->nlmsg_type, ev, sizeof(*ev)); if (err == -EAGAIN && --retries < 0) { printk(KERN_WARNING "Send reply failed, error %d\n", err); break; } } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH); skb_pull(skb, rlen); } mutex_unlock(&rx_queue_mutex); } #define ISCSI_CLASS_ATTR(_prefix,_name,_mode,_show,_store) \ struct device_attribute dev_attr_##_prefix##_##_name = \ __ATTR(_name,_mode,_show,_store) /* * iSCSI connection attrs */ #define iscsi_conn_attr_show(param) \ static ssize_t \ show_conn_param_##param(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \ struct iscsi_transport *t = conn->transport; \ return t->get_conn_param(conn, param, buf); \ } #define iscsi_conn_attr(field, param) \ iscsi_conn_attr_show(param) \ static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, show_conn_param_##param, \ NULL); iscsi_conn_attr(max_recv_dlength, ISCSI_PARAM_MAX_RECV_DLENGTH); iscsi_conn_attr(max_xmit_dlength, ISCSI_PARAM_MAX_XMIT_DLENGTH); iscsi_conn_attr(header_digest, ISCSI_PARAM_HDRDGST_EN); iscsi_conn_attr(data_digest, ISCSI_PARAM_DATADGST_EN); iscsi_conn_attr(ifmarker, ISCSI_PARAM_IFMARKER_EN); iscsi_conn_attr(ofmarker, ISCSI_PARAM_OFMARKER_EN); iscsi_conn_attr(persistent_port, ISCSI_PARAM_PERSISTENT_PORT); iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN); iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS); iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO); iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO); iscsi_conn_attr(local_port, ISCSI_PARAM_LOCAL_PORT); iscsi_conn_attr(statsn, ISCSI_PARAM_STATSN); iscsi_conn_attr(keepalive_tmo, ISCSI_PARAM_KEEPALIVE_TMO); iscsi_conn_attr(max_segment_size, ISCSI_PARAM_MAX_SEGMENT_SIZE); iscsi_conn_attr(tcp_timestamp_stat, ISCSI_PARAM_TCP_TIMESTAMP_STAT); iscsi_conn_attr(tcp_wsf_disable, ISCSI_PARAM_TCP_WSF_DISABLE); iscsi_conn_attr(tcp_nagle_disable, ISCSI_PARAM_TCP_NAGLE_DISABLE); iscsi_conn_attr(tcp_timer_scale, ISCSI_PARAM_TCP_TIMER_SCALE); iscsi_conn_attr(tcp_timestamp_enable, ISCSI_PARAM_TCP_TIMESTAMP_EN); iscsi_conn_attr(fragment_disable, ISCSI_PARAM_IP_FRAGMENT_DISABLE); iscsi_conn_attr(ipv4_tos, ISCSI_PARAM_IPV4_TOS); iscsi_conn_attr(ipv6_traffic_class, ISCSI_PARAM_IPV6_TC); iscsi_conn_attr(ipv6_flow_label, ISCSI_PARAM_IPV6_FLOW_LABEL); iscsi_conn_attr(is_fw_assigned_ipv6, ISCSI_PARAM_IS_FW_ASSIGNED_IPV6); iscsi_conn_attr(tcp_xmit_wsf, ISCSI_PARAM_TCP_XMIT_WSF); iscsi_conn_attr(tcp_recv_wsf, ISCSI_PARAM_TCP_RECV_WSF); iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR); static const char *const connection_state_names[] = { [ISCSI_CONN_UP] = "up", [ISCSI_CONN_DOWN] = "down", [ISCSI_CONN_FAILED] = "failed", [ISCSI_CONN_BOUND] = "bound" }; static ssize_t show_conn_state(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); const char *state = "unknown"; int conn_state = READ_ONCE(conn->state); if (conn_state >= 0 && conn_state < ARRAY_SIZE(connection_state_names)) state = connection_state_names[conn_state]; return sysfs_emit(buf, "%s\n", state); } static ISCSI_CLASS_ATTR(conn, state, S_IRUGO, show_conn_state, NULL); #define iscsi_conn_ep_attr_show(param) \ static ssize_t show_conn_ep_param_##param(struct device *dev, \ struct device_attribute *attr,\ char *buf) \ { \ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \ struct iscsi_transport *t = conn->transport; \ struct iscsi_endpoint *ep; \ ssize_t rc; \ \ /* \ * Need to make sure ep_disconnect does not free the LLD's \ * interconnect resources while we are trying to read them. \ */ \ mutex_lock(&conn->ep_mutex); \ ep = conn->ep; \ if (!ep && t->ep_connect) { \ mutex_unlock(&conn->ep_mutex); \ return -ENOTCONN; \ } \ \ if (ep) \ rc = t->get_ep_param(ep, param, buf); \ else \ rc = t->get_conn_param(conn, param, buf); \ mutex_unlock(&conn->ep_mutex); \ return rc; \ } #define iscsi_conn_ep_attr(field, param) \ iscsi_conn_ep_attr_show(param) \ static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, \ show_conn_ep_param_##param, NULL); iscsi_conn_ep_attr(address, ISCSI_PARAM_CONN_ADDRESS); iscsi_conn_ep_attr(port, ISCSI_PARAM_CONN_PORT); static struct attribute *iscsi_conn_attrs[] = { &dev_attr_conn_max_recv_dlength.attr, &dev_attr_conn_max_xmit_dlength.attr, &dev_attr_conn_header_digest.attr, &dev_attr_conn_data_digest.attr, &dev_attr_conn_ifmarker.attr, &dev_attr_conn_ofmarker.attr, &dev_attr_conn_address.attr, &dev_attr_conn_port.attr, &dev_attr_conn_exp_statsn.attr, &dev_attr_conn_persistent_address.attr, &dev_attr_conn_persistent_port.attr, &dev_attr_conn_ping_tmo.attr, &dev_attr_conn_recv_tmo.attr, &dev_attr_conn_local_port.attr, &dev_attr_conn_statsn.attr, &dev_attr_conn_keepalive_tmo.attr, &dev_attr_conn_max_segment_size.attr, &dev_attr_conn_tcp_timestamp_stat.attr, &dev_attr_conn_tcp_wsf_disable.attr, &dev_attr_conn_tcp_nagle_disable.attr, &dev_attr_conn_tcp_timer_scale.attr, &dev_attr_conn_tcp_timestamp_enable.attr, &dev_attr_conn_fragment_disable.attr, &dev_attr_conn_ipv4_tos.attr, &dev_attr_conn_ipv6_traffic_class.attr, &dev_attr_conn_ipv6_flow_label.attr, &dev_attr_conn_is_fw_assigned_ipv6.attr, &dev_attr_conn_tcp_xmit_wsf.attr, &dev_attr_conn_tcp_recv_wsf.attr, &dev_attr_conn_local_ipaddr.attr, &dev_attr_conn_state.attr, NULL, }; static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *cdev = container_of(kobj, struct device, kobj); struct iscsi_cls_conn *conn = transport_class_to_conn(cdev); struct iscsi_transport *t = conn->transport; int param; if (attr == &dev_attr_conn_max_recv_dlength.attr) param = ISCSI_PARAM_MAX_RECV_DLENGTH; else if (attr == &dev_attr_conn_max_xmit_dlength.attr) param = ISCSI_PARAM_MAX_XMIT_DLENGTH; else if (attr == &dev_attr_conn_header_digest.attr) param = ISCSI_PARAM_HDRDGST_EN; else if (attr == &dev_attr_conn_data_digest.attr) param = ISCSI_PARAM_DATADGST_EN; else if (attr == &dev_attr_conn_ifmarker.attr) param = ISCSI_PARAM_IFMARKER_EN; else if (attr == &dev_attr_conn_ofmarker.attr) param = ISCSI_PARAM_OFMARKER_EN; else if (attr == &dev_attr_conn_address.attr) param = ISCSI_PARAM_CONN_ADDRESS; else if (attr == &dev_attr_conn_port.attr) param = ISCSI_PARAM_CONN_PORT; else if (attr == &dev_attr_conn_exp_statsn.attr) param = ISCSI_PARAM_EXP_STATSN; else if (attr == &dev_attr_conn_persistent_address.attr) param = ISCSI_PARAM_PERSISTENT_ADDRESS; else if (attr == &dev_attr_conn_persistent_port.attr) param = ISCSI_PARAM_PERSISTENT_PORT; else if (attr == &dev_attr_conn_ping_tmo.attr) param = ISCSI_PARAM_PING_TMO; else if (attr == &dev_attr_conn_recv_tmo.attr) param = ISCSI_PARAM_RECV_TMO; else if (attr == &dev_attr_conn_local_port.attr) param = ISCSI_PARAM_LOCAL_PORT; else if (attr == &dev_attr_conn_statsn.attr) param = ISCSI_PARAM_STATSN; else if (attr == &dev_attr_conn_keepalive_tmo.attr) param = ISCSI_PARAM_KEEPALIVE_TMO; else if (attr == &dev_attr_conn_max_segment_size.attr) param = ISCSI_PARAM_MAX_SEGMENT_SIZE; else if (attr == &dev_attr_conn_tcp_timestamp_stat.attr) param = ISCSI_PARAM_TCP_TIMESTAMP_STAT; else if (attr == &dev_attr_conn_tcp_wsf_disable.attr) param = ISCSI_PARAM_TCP_WSF_DISABLE; else if (attr == &dev_attr_conn_tcp_nagle_disable.attr) param = ISCSI_PARAM_TCP_NAGLE_DISABLE; else if (attr == &dev_attr_conn_tcp_timer_scale.attr) param = ISCSI_PARAM_TCP_TIMER_SCALE; else if (attr == &dev_attr_conn_tcp_timestamp_enable.attr) param = ISCSI_PARAM_TCP_TIMESTAMP_EN; else if (attr == &dev_attr_conn_fragment_disable.attr) param = ISCSI_PARAM_IP_FRAGMENT_DISABLE; else if (attr == &dev_attr_conn_ipv4_tos.attr) param = ISCSI_PARAM_IPV4_TOS; else if (attr == &dev_attr_conn_ipv6_traffic_class.attr) param = ISCSI_PARAM_IPV6_TC; else if (attr == &dev_attr_conn_ipv6_flow_label.attr) param = ISCSI_PARAM_IPV6_FLOW_LABEL; else if (attr == &dev_attr_conn_is_fw_assigned_ipv6.attr) param = ISCSI_PARAM_IS_FW_ASSIGNED_IPV6; else if (attr == &dev_attr_conn_tcp_xmit_wsf.attr) param = ISCSI_PARAM_TCP_XMIT_WSF; else if (attr == &dev_attr_conn_tcp_recv_wsf.attr) param = ISCSI_PARAM_TCP_RECV_WSF; else if (attr == &dev_attr_conn_local_ipaddr.attr) param = ISCSI_PARAM_LOCAL_IPADDR; else if (attr == &dev_attr_conn_state.attr) return S_IRUGO; else { WARN_ONCE(1, "Invalid conn attr"); return 0; } return t->attr_is_visible(ISCSI_PARAM, param); } static struct attribute_group iscsi_conn_group = { .attrs = iscsi_conn_attrs, .is_visible = iscsi_conn_attr_is_visible, }; /* * iSCSI session attrs */ #define iscsi_session_attr_show(param, perm) \ static ssize_t \ show_session_param_##param(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct iscsi_cls_session *session = \ iscsi_dev_to_session(dev->parent); \ struct iscsi_transport *t = session->transport; \ \ if (perm && !capable(CAP_SYS_ADMIN)) \ return -EACCES; \ return t->get_session_param(session, param, buf); \ } #define iscsi_session_attr(field, param, perm) \ iscsi_session_attr_show(param, perm) \ static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \ NULL); iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME, 0); iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, 0); iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, 0); iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN, 0); iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST, 0); iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST, 0); iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN, 0); iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN, 0); iscsi_session_attr(erl, ISCSI_PARAM_ERL, 0); iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT, 0); iscsi_session_attr(username, ISCSI_PARAM_USERNAME, 1); iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1); iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1); iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1); iscsi_session_attr(chap_out_idx, ISCSI_PARAM_CHAP_OUT_IDX, 1); iscsi_session_attr(chap_in_idx, ISCSI_PARAM_CHAP_IN_IDX, 1); iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0); iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0); iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0); iscsi_session_attr(tgt_reset_tmo, ISCSI_PARAM_TGT_RESET_TMO, 0); iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0); iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0); iscsi_session_attr(targetalias, ISCSI_PARAM_TARGET_ALIAS, 0); iscsi_session_attr(boot_root, ISCSI_PARAM_BOOT_ROOT, 0); iscsi_session_attr(boot_nic, ISCSI_PARAM_BOOT_NIC, 0); iscsi_session_attr(boot_target, ISCSI_PARAM_BOOT_TARGET, 0); iscsi_session_attr(auto_snd_tgt_disable, ISCSI_PARAM_AUTO_SND_TGT_DISABLE, 0); iscsi_session_attr(discovery_session, ISCSI_PARAM_DISCOVERY_SESS, 0); iscsi_session_attr(portal_type, ISCSI_PARAM_PORTAL_TYPE, 0); iscsi_session_attr(chap_auth, ISCSI_PARAM_CHAP_AUTH_EN, 0); iscsi_session_attr(discovery_logout, ISCSI_PARAM_DISCOVERY_LOGOUT_EN, 0); iscsi_session_attr(bidi_chap, ISCSI_PARAM_BIDI_CHAP_EN, 0); iscsi_session_attr(discovery_auth_optional, ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL, 0); iscsi_session_attr(def_time2wait, ISCSI_PARAM_DEF_TIME2WAIT, 0); iscsi_session_attr(def_time2retain, ISCSI_PARAM_DEF_TIME2RETAIN, 0); iscsi_session_attr(isid, ISCSI_PARAM_ISID, 0); iscsi_session_attr(tsid, ISCSI_PARAM_TSID, 0); iscsi_session_attr(def_taskmgmt_tmo, ISCSI_PARAM_DEF_TASKMGMT_TMO, 0); iscsi_session_attr(discovery_parent_idx, ISCSI_PARAM_DISCOVERY_PARENT_IDX, 0); iscsi_session_attr(discovery_parent_type, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 0); static ssize_t show_priv_session_target_state(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); return sysfs_emit(buf, "%s\n", iscsi_session_target_state_name[session->target_state]); } static ISCSI_CLASS_ATTR(priv_sess, target_state, S_IRUGO, show_priv_session_target_state, NULL); static ssize_t show_priv_session_state(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); return sysfs_emit(buf, "%s\n", iscsi_session_state_name(session->state)); } static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state, NULL); static ssize_t show_priv_session_creator(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); return sysfs_emit(buf, "%d\n", session->creator); } static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator, NULL); static ssize_t show_priv_session_target_id(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); return sysfs_emit(buf, "%d\n", session->target_id); } static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO, show_priv_session_target_id, NULL); #define iscsi_priv_session_attr_show(field, format) \ static ssize_t \ show_priv_session_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct iscsi_cls_session *session = \ iscsi_dev_to_session(dev->parent); \ if (session->field == -1) \ return sysfs_emit(buf, "off\n"); \ return sysfs_emit(buf, format"\n", session->field); \ } #define iscsi_priv_session_attr_store(field) \ static ssize_t \ store_priv_session_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ int val; \ char *cp; \ struct iscsi_cls_session *session = \ iscsi_dev_to_session(dev->parent); \ if ((session->state == ISCSI_SESSION_FREE) || \ (session->state == ISCSI_SESSION_FAILED)) \ return -EBUSY; \ if (strncmp(buf, "off", 3) == 0) { \ session->field = -1; \ session->field##_sysfs_override = true; \ } else { \ val = simple_strtoul(buf, &cp, 0); \ if (*cp != '\0' && *cp != '\n') \ return -EINVAL; \ session->field = val; \ session->field##_sysfs_override = true; \ } \ return count; \ } #define iscsi_priv_session_rw_attr(field, format) \ iscsi_priv_session_attr_show(field, format) \ iscsi_priv_session_attr_store(field) \ static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \ show_priv_session_##field, \ store_priv_session_##field) iscsi_priv_session_rw_attr(recovery_tmo, "%d"); static struct attribute *iscsi_session_attrs[] = { &dev_attr_sess_initial_r2t.attr, &dev_attr_sess_max_outstanding_r2t.attr, &dev_attr_sess_immediate_data.attr, &dev_attr_sess_first_burst_len.attr, &dev_attr_sess_max_burst_len.attr, &dev_attr_sess_data_pdu_in_order.attr, &dev_attr_sess_data_seq_in_order.attr, &dev_attr_sess_erl.attr, &dev_attr_sess_targetname.attr, &dev_attr_sess_tpgt.attr, &dev_attr_sess_password.attr, &dev_attr_sess_password_in.attr, &dev_attr_sess_username.attr, &dev_attr_sess_username_in.attr, &dev_attr_sess_fast_abort.attr, &dev_attr_sess_abort_tmo.attr, &dev_attr_sess_lu_reset_tmo.attr, &dev_attr_sess_tgt_reset_tmo.attr, &dev_attr_sess_ifacename.attr, &dev_attr_sess_initiatorname.attr, &dev_attr_sess_targetalias.attr, &dev_attr_sess_boot_root.attr, &dev_attr_sess_boot_nic.attr, &dev_attr_sess_boot_target.attr, &dev_attr_priv_sess_recovery_tmo.attr, &dev_attr_priv_sess_state.attr, &dev_attr_priv_sess_target_state.attr, &dev_attr_priv_sess_creator.attr, &dev_attr_sess_chap_out_idx.attr, &dev_attr_sess_chap_in_idx.attr, &dev_attr_priv_sess_target_id.attr, &dev_attr_sess_auto_snd_tgt_disable.attr, &dev_attr_sess_discovery_session.attr, &dev_attr_sess_portal_type.attr, &dev_attr_sess_chap_auth.attr, &dev_attr_sess_discovery_logout.attr, &dev_attr_sess_bidi_chap.attr, &dev_attr_sess_discovery_auth_optional.attr, &dev_attr_sess_def_time2wait.attr, &dev_attr_sess_def_time2retain.attr, &dev_attr_sess_isid.attr, &dev_attr_sess_tsid.attr, &dev_attr_sess_def_taskmgmt_tmo.attr, &dev_attr_sess_discovery_parent_idx.attr, &dev_attr_sess_discovery_parent_type.attr, NULL, }; static umode_t iscsi_session_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *cdev = container_of(kobj, struct device, kobj); struct iscsi_cls_session *session = transport_class_to_session(cdev); struct iscsi_transport *t = session->transport; int param; if (attr == &dev_attr_sess_initial_r2t.attr) param = ISCSI_PARAM_INITIAL_R2T_EN; else if (attr == &dev_attr_sess_max_outstanding_r2t.attr) param = ISCSI_PARAM_MAX_R2T; else if (attr == &dev_attr_sess_immediate_data.attr) param = ISCSI_PARAM_IMM_DATA_EN; else if (attr == &dev_attr_sess_first_burst_len.attr) param = ISCSI_PARAM_FIRST_BURST; else if (attr == &dev_attr_sess_max_burst_len.attr) param = ISCSI_PARAM_MAX_BURST; else if (attr == &dev_attr_sess_data_pdu_in_order.attr) param = ISCSI_PARAM_PDU_INORDER_EN; else if (attr == &dev_attr_sess_data_seq_in_order.attr) param = ISCSI_PARAM_DATASEQ_INORDER_EN; else if (attr == &dev_attr_sess_erl.attr) param = ISCSI_PARAM_ERL; else if (attr == &dev_attr_sess_targetname.attr) param = ISCSI_PARAM_TARGET_NAME; else if (attr == &dev_attr_sess_tpgt.attr) param = ISCSI_PARAM_TPGT; else if (attr == &dev_attr_sess_chap_in_idx.attr) param = ISCSI_PARAM_CHAP_IN_IDX; else if (attr == &dev_attr_sess_chap_out_idx.attr) param = ISCSI_PARAM_CHAP_OUT_IDX; else if (attr == &dev_attr_sess_password.attr) param = ISCSI_PARAM_USERNAME; else if (attr == &dev_attr_sess_password_in.attr) param = ISCSI_PARAM_USERNAME_IN; else if (attr == &dev_attr_sess_username.attr) param = ISCSI_PARAM_PASSWORD; else if (attr == &dev_attr_sess_username_in.attr) param = ISCSI_PARAM_PASSWORD_IN; else if (attr == &dev_attr_sess_fast_abort.attr) param = ISCSI_PARAM_FAST_ABORT; else if (attr == &dev_attr_sess_abort_tmo.attr) param = ISCSI_PARAM_ABORT_TMO; else if (attr == &dev_attr_sess_lu_reset_tmo.attr) param = ISCSI_PARAM_LU_RESET_TMO; else if (attr == &dev_attr_sess_tgt_reset_tmo.attr) param = ISCSI_PARAM_TGT_RESET_TMO; else if (attr == &dev_attr_sess_ifacename.attr) param = ISCSI_PARAM_IFACE_NAME; else if (attr == &dev_attr_sess_initiatorname.attr) param = ISCSI_PARAM_INITIATOR_NAME; else if (attr == &dev_attr_sess_targetalias.attr) param = ISCSI_PARAM_TARGET_ALIAS; else if (attr == &dev_attr_sess_boot_root.attr) param = ISCSI_PARAM_BOOT_ROOT; else if (attr == &dev_attr_sess_boot_nic.attr) param = ISCSI_PARAM_BOOT_NIC; else if (attr == &dev_attr_sess_boot_target.attr) param = ISCSI_PARAM_BOOT_TARGET; else if (attr == &dev_attr_sess_auto_snd_tgt_disable.attr) param = ISCSI_PARAM_AUTO_SND_TGT_DISABLE; else if (attr == &dev_attr_sess_discovery_session.attr) param = ISCSI_PARAM_DISCOVERY_SESS; else if (attr == &dev_attr_sess_portal_type.attr) param = ISCSI_PARAM_PORTAL_TYPE; else if (attr == &dev_attr_sess_chap_auth.attr) param = ISCSI_PARAM_CHAP_AUTH_EN; else if (attr == &dev_attr_sess_discovery_logout.attr) param = ISCSI_PARAM_DISCOVERY_LOGOUT_EN; else if (attr == &dev_attr_sess_bidi_chap.attr) param = ISCSI_PARAM_BIDI_CHAP_EN; else if (attr == &dev_attr_sess_discovery_auth_optional.attr) param = ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL; else if (attr == &dev_attr_sess_def_time2wait.attr) param = ISCSI_PARAM_DEF_TIME2WAIT; else if (attr == &dev_attr_sess_def_time2retain.attr) param = ISCSI_PARAM_DEF_TIME2RETAIN; else if (attr == &dev_attr_sess_isid.attr) param = ISCSI_PARAM_ISID; else if (attr == &dev_attr_sess_tsid.attr) param = ISCSI_PARAM_TSID; else if (attr == &dev_attr_sess_def_taskmgmt_tmo.attr) param = ISCSI_PARAM_DEF_TASKMGMT_TMO; else if (attr == &dev_attr_sess_discovery_parent_idx.attr) param = ISCSI_PARAM_DISCOVERY_PARENT_IDX; else if (attr == &dev_attr_sess_discovery_parent_type.attr) param = ISCSI_PARAM_DISCOVERY_PARENT_TYPE; else if (attr == &dev_attr_priv_sess_recovery_tmo.attr) return S_IRUGO | S_IWUSR; else if (attr == &dev_attr_priv_sess_state.attr) return S_IRUGO; else if (attr == &dev_attr_priv_sess_target_state.attr) return S_IRUGO; else if (attr == &dev_attr_priv_sess_creator.attr) return S_IRUGO; else if (attr == &dev_attr_priv_sess_target_id.attr) return S_IRUGO; else { WARN_ONCE(1, "Invalid session attr"); return 0; } return t->attr_is_visible(ISCSI_PARAM, param); } static struct attribute_group iscsi_session_group = { .attrs = iscsi_session_attrs, .is_visible = iscsi_session_attr_is_visible, }; /* * iSCSI host attrs */ #define iscsi_host_attr_show(param) \ static ssize_t \ show_host_param_##param(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct Scsi_Host *shost = transport_class_to_shost(dev); \ struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \ return priv->iscsi_transport->get_host_param(shost, param, buf); \ } #define iscsi_host_attr(field, param) \ iscsi_host_attr_show(param) \ static ISCSI_CLASS_ATTR(host, field, S_IRUGO, show_host_param_##param, \ NULL); iscsi_host_attr(netdev, ISCSI_HOST_PARAM_NETDEV_NAME); iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS); iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS); iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME); iscsi_host_attr(port_state, ISCSI_HOST_PARAM_PORT_STATE); iscsi_host_attr(port_speed, ISCSI_HOST_PARAM_PORT_SPEED); static struct attribute *iscsi_host_attrs[] = { &dev_attr_host_netdev.attr, &dev_attr_host_hwaddress.attr, &dev_attr_host_ipaddress.attr, &dev_attr_host_initiatorname.attr, &dev_attr_host_port_state.attr, &dev_attr_host_port_speed.attr, NULL, }; static umode_t iscsi_host_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *cdev = container_of(kobj, struct device, kobj); struct Scsi_Host *shost = transport_class_to_shost(cdev); struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); int param; if (attr == &dev_attr_host_netdev.attr) param = ISCSI_HOST_PARAM_NETDEV_NAME; else if (attr == &dev_attr_host_hwaddress.attr) param = ISCSI_HOST_PARAM_HWADDRESS; else if (attr == &dev_attr_host_ipaddress.attr) param = ISCSI_HOST_PARAM_IPADDRESS; else if (attr == &dev_attr_host_initiatorname.attr) param = ISCSI_HOST_PARAM_INITIATOR_NAME; else if (attr == &dev_attr_host_port_state.attr) param = ISCSI_HOST_PARAM_PORT_STATE; else if (attr == &dev_attr_host_port_speed.attr) param = ISCSI_HOST_PARAM_PORT_SPEED; else { WARN_ONCE(1, "Invalid host attr"); return 0; } return priv->iscsi_transport->attr_is_visible(ISCSI_HOST_PARAM, param); } static struct attribute_group iscsi_host_group = { .attrs = iscsi_host_attrs, .is_visible = iscsi_host_attr_is_visible, }; /* convert iscsi_port_speed values to ascii string name */ static const struct { enum iscsi_port_speed value; char *name; } iscsi_port_speed_names[] = { {ISCSI_PORT_SPEED_UNKNOWN, "Unknown" }, {ISCSI_PORT_SPEED_10MBPS, "10 Mbps" }, {ISCSI_PORT_SPEED_100MBPS, "100 Mbps" }, {ISCSI_PORT_SPEED_1GBPS, "1 Gbps" }, {ISCSI_PORT_SPEED_10GBPS, "10 Gbps" }, {ISCSI_PORT_SPEED_25GBPS, "25 Gbps" }, {ISCSI_PORT_SPEED_40GBPS, "40 Gbps" }, }; char *iscsi_get_port_speed_name(struct Scsi_Host *shost) { int i; char *speed = "Unknown!"; struct iscsi_cls_host *ihost = shost->shost_data; uint32_t port_speed = ihost->port_speed; for (i = 0; i < ARRAY_SIZE(iscsi_port_speed_names); i++) { if (iscsi_port_speed_names[i].value & port_speed) { speed = iscsi_port_speed_names[i].name; break; } } return speed; } EXPORT_SYMBOL_GPL(iscsi_get_port_speed_name); /* convert iscsi_port_state values to ascii string name */ static const struct { enum iscsi_port_state value; char *name; } iscsi_port_state_names[] = { {ISCSI_PORT_STATE_DOWN, "LINK DOWN" }, {ISCSI_PORT_STATE_UP, "LINK UP" }, }; char *iscsi_get_port_state_name(struct Scsi_Host *shost) { int i; char *state = "Unknown!"; struct iscsi_cls_host *ihost = shost->shost_data; uint32_t port_state = ihost->port_state; for (i = 0; i < ARRAY_SIZE(iscsi_port_state_names); i++) { if (iscsi_port_state_names[i].value & port_state) { state = iscsi_port_state_names[i].name; break; } } return state; } EXPORT_SYMBOL_GPL(iscsi_get_port_state_name); static int iscsi_session_match(struct attribute_container *cont, struct device *dev) { struct iscsi_cls_session *session; struct Scsi_Host *shost; struct iscsi_internal *priv; if (!iscsi_is_session_dev(dev)) return 0; session = iscsi_dev_to_session(dev); shost = iscsi_session_to_shost(session); if (!shost->transportt) return 0; priv = to_iscsi_internal(shost->transportt); if (priv->session_cont.ac.class != &iscsi_session_class.class) return 0; return &priv->session_cont.ac == cont; } static int iscsi_conn_match(struct attribute_container *cont, struct device *dev) { struct iscsi_cls_session *session; struct iscsi_cls_conn *conn; struct Scsi_Host *shost; struct iscsi_internal *priv; if (!iscsi_is_conn_dev(dev)) return 0; conn = iscsi_dev_to_conn(dev); session = iscsi_dev_to_session(conn->dev.parent); shost = iscsi_session_to_shost(session); if (!shost->transportt) return 0; priv = to_iscsi_internal(shost->transportt); if (priv->conn_cont.ac.class != &iscsi_connection_class.class) return 0; return &priv->conn_cont.ac == cont; } static int iscsi_host_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct iscsi_internal *priv; if (!scsi_is_host_device(dev)) return 0; shost = dev_to_shost(dev); if (!shost->transportt || shost->transportt->host_attrs.ac.class != &iscsi_host_class.class) return 0; priv = to_iscsi_internal(shost->transportt); return &priv->t.host_attrs.ac == cont; } struct scsi_transport_template * iscsi_register_transport(struct iscsi_transport *tt) { struct iscsi_internal *priv; unsigned long flags; int err; BUG_ON(!tt); WARN_ON(tt->ep_disconnect && !tt->unbind_conn); priv = iscsi_if_transport_lookup(tt); if (priv) return NULL; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return NULL; INIT_LIST_HEAD(&priv->list); priv->iscsi_transport = tt; priv->t.user_scan = iscsi_user_scan; priv->dev.class = &iscsi_transport_class; dev_set_name(&priv->dev, "%s", tt->name); err = device_register(&priv->dev); if (err) goto put_dev; err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group); if (err) goto unregister_dev; /* host parameters */ priv->t.host_attrs.ac.class = &iscsi_host_class.class; priv->t.host_attrs.ac.match = iscsi_host_match; priv->t.host_attrs.ac.grp = &iscsi_host_group; priv->t.host_size = sizeof(struct iscsi_cls_host); transport_container_register(&priv->t.host_attrs); /* connection parameters */ priv->conn_cont.ac.class = &iscsi_connection_class.class; priv->conn_cont.ac.match = iscsi_conn_match; priv->conn_cont.ac.grp = &iscsi_conn_group; transport_container_register(&priv->conn_cont); /* session parameters */ priv->session_cont.ac.class = &iscsi_session_class.class; priv->session_cont.ac.match = iscsi_session_match; priv->session_cont.ac.grp = &iscsi_session_group; transport_container_register(&priv->session_cont); spin_lock_irqsave(&iscsi_transport_lock, flags); list_add(&priv->list, &iscsi_transports); spin_unlock_irqrestore(&iscsi_transport_lock, flags); printk(KERN_NOTICE "iscsi: registered transport (%s)\n", tt->name); return &priv->t; unregister_dev: device_unregister(&priv->dev); return NULL; put_dev: put_device(&priv->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_register_transport); void iscsi_unregister_transport(struct iscsi_transport *tt) { struct iscsi_internal *priv; unsigned long flags; BUG_ON(!tt); mutex_lock(&rx_queue_mutex); priv = iscsi_if_transport_lookup(tt); BUG_ON (!priv); spin_lock_irqsave(&iscsi_transport_lock, flags); list_del(&priv->list); spin_unlock_irqrestore(&iscsi_transport_lock, flags); transport_container_unregister(&priv->conn_cont); transport_container_unregister(&priv->session_cont); transport_container_unregister(&priv->t.host_attrs); sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group); device_unregister(&priv->dev); mutex_unlock(&rx_queue_mutex); } EXPORT_SYMBOL_GPL(iscsi_unregister_transport); void iscsi_dbg_trace(void (*trace)(struct device *dev, struct va_format *), struct device *dev, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; trace(dev, &vaf); va_end(args); } EXPORT_SYMBOL_GPL(iscsi_dbg_trace); static __init int iscsi_transport_init(void) { int err; struct netlink_kernel_cfg cfg = { .groups = 1, .input = iscsi_if_rx, }; printk(KERN_INFO "Loading iSCSI transport class v%s.\n", ISCSI_TRANSPORT_VERSION); atomic_set(&iscsi_session_nr, 0); err = class_register(&iscsi_transport_class); if (err) return err; err = class_register(&iscsi_endpoint_class); if (err) goto unregister_transport_class; err = class_register(&iscsi_iface_class); if (err) goto unregister_endpoint_class; err = transport_class_register(&iscsi_host_class); if (err) goto unregister_iface_class; err = transport_class_register(&iscsi_connection_class); if (err) goto unregister_host_class; err = transport_class_register(&iscsi_session_class); if (err) goto unregister_conn_class; err = bus_register(&iscsi_flashnode_bus); if (err) goto unregister_session_class; nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, &cfg); if (!nls) { err = -ENOBUFS; goto unregister_flashnode_bus; } iscsi_conn_cleanup_workq = alloc_workqueue("%s", WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0, "iscsi_conn_cleanup"); if (!iscsi_conn_cleanup_workq) { err = -ENOMEM; goto release_nls; } return 0; release_nls: netlink_kernel_release(nls); unregister_flashnode_bus: bus_unregister(&iscsi_flashnode_bus); unregister_session_class: transport_class_unregister(&iscsi_session_class); unregister_conn_class: transport_class_unregister(&iscsi_connection_class); unregister_host_class: transport_class_unregister(&iscsi_host_class); unregister_iface_class: class_unregister(&iscsi_iface_class); unregister_endpoint_class: class_unregister(&iscsi_endpoint_class); unregister_transport_class: class_unregister(&iscsi_transport_class); return err; } static void __exit iscsi_transport_exit(void) { destroy_workqueue(iscsi_conn_cleanup_workq); netlink_kernel_release(nls); bus_unregister(&iscsi_flashnode_bus); transport_class_unregister(&iscsi_connection_class); transport_class_unregister(&iscsi_session_class); transport_class_unregister(&iscsi_host_class); class_unregister(&iscsi_endpoint_class); class_unregister(&iscsi_iface_class); class_unregister(&iscsi_transport_class); } module_init(iscsi_transport_init); module_exit(iscsi_transport_exit); MODULE_AUTHOR("Mike Christie <[email protected]>, " "Dmitry Yusupov <[email protected]>, " "Alex Aizman <[email protected]>"); MODULE_DESCRIPTION("iSCSI Transport Interface"); MODULE_LICENSE("GPL"); MODULE_VERSION(ISCSI_TRANSPORT_VERSION); MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_ISCSI);
linux-master
drivers/scsi/scsi_transport_iscsi.c
// SPDX-License-Identifier: GPL-2.0-only /* esp_scsi.c: ESP SCSI driver. * * Copyright (C) 2007 David S. Miller ([email protected]) */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/list.h> #include <linux/completion.h> #include <linux/kallsyms.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/irqreturn.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/dma.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_transport_spi.h> #include "esp_scsi.h" #define DRV_MODULE_NAME "esp" #define PFX DRV_MODULE_NAME ": " #define DRV_VERSION "2.000" #define DRV_MODULE_RELDATE "April 19, 2007" /* SCSI bus reset settle time in seconds. */ static int esp_bus_reset_settle = 3; static u32 esp_debug; #define ESP_DEBUG_INTR 0x00000001 #define ESP_DEBUG_SCSICMD 0x00000002 #define ESP_DEBUG_RESET 0x00000004 #define ESP_DEBUG_MSGIN 0x00000008 #define ESP_DEBUG_MSGOUT 0x00000010 #define ESP_DEBUG_CMDDONE 0x00000020 #define ESP_DEBUG_DISCONNECT 0x00000040 #define ESP_DEBUG_DATASTART 0x00000080 #define ESP_DEBUG_DATADONE 0x00000100 #define ESP_DEBUG_RECONNECT 0x00000200 #define ESP_DEBUG_AUTOSENSE 0x00000400 #define ESP_DEBUG_EVENT 0x00000800 #define ESP_DEBUG_COMMAND 0x00001000 #define esp_log_intr(f, a...) \ do { if (esp_debug & ESP_DEBUG_INTR) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_reset(f, a...) \ do { if (esp_debug & ESP_DEBUG_RESET) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_msgin(f, a...) \ do { if (esp_debug & ESP_DEBUG_MSGIN) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_msgout(f, a...) \ do { if (esp_debug & ESP_DEBUG_MSGOUT) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_cmddone(f, a...) \ do { if (esp_debug & ESP_DEBUG_CMDDONE) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_disconnect(f, a...) \ do { if (esp_debug & ESP_DEBUG_DISCONNECT) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_datastart(f, a...) \ do { if (esp_debug & ESP_DEBUG_DATASTART) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_datadone(f, a...) \ do { if (esp_debug & ESP_DEBUG_DATADONE) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_reconnect(f, a...) \ do { if (esp_debug & ESP_DEBUG_RECONNECT) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_autosense(f, a...) \ do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_event(f, a...) \ do { if (esp_debug & ESP_DEBUG_EVENT) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_command(f, a...) \ do { if (esp_debug & ESP_DEBUG_COMMAND) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_read8(REG) esp->ops->esp_read8(esp, REG) #define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG) static void esp_log_fill_regs(struct esp *esp, struct esp_event_ent *p) { p->sreg = esp->sreg; p->seqreg = esp->seqreg; p->sreg2 = esp->sreg2; p->ireg = esp->ireg; p->select_state = esp->select_state; p->event = esp->event; } void scsi_esp_cmd(struct esp *esp, u8 val) { struct esp_event_ent *p; int idx = esp->esp_event_cur; p = &esp->esp_event_log[idx]; p->type = ESP_EVENT_TYPE_CMD; p->val = val; esp_log_fill_regs(esp, p); esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); esp_log_command("cmd[%02x]\n", val); esp_write8(val, ESP_CMD); } EXPORT_SYMBOL(scsi_esp_cmd); static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd) { if (esp->flags & ESP_FLAG_USE_FIFO) { int i; scsi_esp_cmd(esp, ESP_CMD_FLUSH); for (i = 0; i < len; i++) esp_write8(esp->command_block[i], ESP_FDATA); scsi_esp_cmd(esp, cmd); } else { if (esp->rev == FASHME) scsi_esp_cmd(esp, ESP_CMD_FLUSH); cmd |= ESP_CMD_DMA; esp->ops->send_dma_cmd(esp, esp->command_block_dma, len, max_len, 0, cmd); } } static void esp_event(struct esp *esp, u8 val) { struct esp_event_ent *p; int idx = esp->esp_event_cur; p = &esp->esp_event_log[idx]; p->type = ESP_EVENT_TYPE_EVENT; p->val = val; esp_log_fill_regs(esp, p); esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); esp->event = val; } static void esp_dump_cmd_log(struct esp *esp) { int idx = esp->esp_event_cur; int stop = idx; shost_printk(KERN_INFO, esp->host, "Dumping command log\n"); do { struct esp_event_ent *p = &esp->esp_event_log[idx]; shost_printk(KERN_INFO, esp->host, "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] " "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n", idx, p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT", p->val, p->sreg, p->seqreg, p->sreg2, p->ireg, p->select_state, p->event); idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); } while (idx != stop); } static void esp_flush_fifo(struct esp *esp) { scsi_esp_cmd(esp, ESP_CMD_FLUSH); if (esp->rev == ESP236) { int lim = 1000; while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) { if (--lim == 0) { shost_printk(KERN_ALERT, esp->host, "ESP_FF_BYTES will not clear!\n"); break; } udelay(1); } } } static void hme_read_fifo(struct esp *esp) { int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; int idx = 0; while (fcnt--) { esp->fifo[idx++] = esp_read8(ESP_FDATA); esp->fifo[idx++] = esp_read8(ESP_FDATA); } if (esp->sreg2 & ESP_STAT2_F1BYTE) { esp_write8(0, ESP_FDATA); esp->fifo[idx++] = esp_read8(ESP_FDATA); scsi_esp_cmd(esp, ESP_CMD_FLUSH); } esp->fifo_cnt = idx; } static void esp_set_all_config3(struct esp *esp, u8 val) { int i; for (i = 0; i < ESP_MAX_TARGET; i++) esp->target[i].esp_config3 = val; } /* Reset the ESP chip, _not_ the SCSI bus. */ static void esp_reset_esp(struct esp *esp) { /* Now reset the ESP chip */ scsi_esp_cmd(esp, ESP_CMD_RC); scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); if (esp->rev == FAST) esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2); scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); /* This is the only point at which it is reliable to read * the ID-code for a fast ESP chip variants. */ esp->max_period = ((35 * esp->ccycle) / 1000); if (esp->rev == FAST) { u8 family_code = ESP_FAMILY(esp_read8(ESP_UID)); if (family_code == ESP_UID_F236) { esp->rev = FAS236; } else if (family_code == ESP_UID_HME) { esp->rev = FASHME; /* Version is usually '5'. */ } else if (family_code == ESP_UID_FSC) { esp->rev = FSC; /* Enable Active Negation */ esp_write8(ESP_CONFIG4_RADE, ESP_CFG4); } else { esp->rev = FAS100A; } esp->min_period = ((4 * esp->ccycle) / 1000); } else { esp->min_period = ((5 * esp->ccycle) / 1000); } if (esp->rev == FAS236) { /* * The AM53c974 chip returns the same ID as FAS236; * try to configure glitch eater. */ u8 config4 = ESP_CONFIG4_GE1; esp_write8(config4, ESP_CFG4); config4 = esp_read8(ESP_CFG4); if (config4 & ESP_CONFIG4_GE1) { esp->rev = PCSCSI; esp_write8(esp->config4, ESP_CFG4); } } esp->max_period = (esp->max_period + 3)>>2; esp->min_period = (esp->min_period + 3)>>2; esp_write8(esp->config1, ESP_CFG1); switch (esp->rev) { case ESP100: /* nothing to do */ break; case ESP100A: esp_write8(esp->config2, ESP_CFG2); break; case ESP236: /* Slow 236 */ esp_write8(esp->config2, ESP_CFG2); esp->prev_cfg3 = esp->target[0].esp_config3; esp_write8(esp->prev_cfg3, ESP_CFG3); break; case FASHME: esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB); fallthrough; case FAS236: case PCSCSI: case FSC: esp_write8(esp->config2, ESP_CFG2); if (esp->rev == FASHME) { u8 cfg3 = esp->target[0].esp_config3; cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH; if (esp->scsi_id >= 8) cfg3 |= ESP_CONFIG3_IDBIT3; esp_set_all_config3(esp, cfg3); } else { u32 cfg3 = esp->target[0].esp_config3; cfg3 |= ESP_CONFIG3_FCLK; esp_set_all_config3(esp, cfg3); } esp->prev_cfg3 = esp->target[0].esp_config3; esp_write8(esp->prev_cfg3, ESP_CFG3); if (esp->rev == FASHME) { esp->radelay = 80; } else { if (esp->flags & ESP_FLAG_DIFFERENTIAL) esp->radelay = 0; else esp->radelay = 96; } break; case FAS100A: /* Fast 100a */ esp_write8(esp->config2, ESP_CFG2); esp_set_all_config3(esp, (esp->target[0].esp_config3 | ESP_CONFIG3_FCLOCK)); esp->prev_cfg3 = esp->target[0].esp_config3; esp_write8(esp->prev_cfg3, ESP_CFG3); esp->radelay = 32; break; default: break; } /* Reload the configuration registers */ esp_write8(esp->cfact, ESP_CFACT); esp->prev_stp = 0; esp_write8(esp->prev_stp, ESP_STP); esp->prev_soff = 0; esp_write8(esp->prev_soff, ESP_SOFF); esp_write8(esp->neg_defp, ESP_TIMEO); /* Eat any bitrot in the chip */ esp_read8(ESP_INTRPT); udelay(100); } static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd) { struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); struct scatterlist *sg = scsi_sglist(cmd); int total = 0, i; struct scatterlist *s; if (cmd->sc_data_direction == DMA_NONE) return; if (esp->flags & ESP_FLAG_NO_DMA_MAP) { /* * For pseudo DMA and PIO we need the virtual address instead of * a dma address, so perform an identity mapping. */ spriv->num_sg = scsi_sg_count(cmd); scsi_for_each_sg(cmd, s, spriv->num_sg, i) { s->dma_address = (uintptr_t)sg_virt(s); total += sg_dma_len(s); } } else { spriv->num_sg = scsi_dma_map(cmd); scsi_for_each_sg(cmd, s, spriv->num_sg, i) total += sg_dma_len(s); } spriv->cur_residue = sg_dma_len(sg); spriv->prv_sg = NULL; spriv->cur_sg = sg; spriv->tot_residue = total; } static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent, struct scsi_cmnd *cmd) { struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { return ent->sense_dma + (ent->sense_ptr - cmd->sense_buffer); } return sg_dma_address(p->cur_sg) + (sg_dma_len(p->cur_sg) - p->cur_residue); } static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent, struct scsi_cmnd *cmd) { struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { return SCSI_SENSE_BUFFERSIZE - (ent->sense_ptr - cmd->sense_buffer); } return p->cur_residue; } static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent, struct scsi_cmnd *cmd, unsigned int len) { struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { ent->sense_ptr += len; return; } p->cur_residue -= len; p->tot_residue -= len; if (p->cur_residue < 0 || p->tot_residue < 0) { shost_printk(KERN_ERR, esp->host, "Data transfer overflow.\n"); shost_printk(KERN_ERR, esp->host, "cur_residue[%d] tot_residue[%d] len[%u]\n", p->cur_residue, p->tot_residue, len); p->cur_residue = 0; p->tot_residue = 0; } if (!p->cur_residue && p->tot_residue) { p->prv_sg = p->cur_sg; p->cur_sg = sg_next(p->cur_sg); p->cur_residue = sg_dma_len(p->cur_sg); } } static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd) { if (!(esp->flags & ESP_FLAG_NO_DMA_MAP)) scsi_dma_unmap(cmd); } static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent) { struct scsi_cmnd *cmd = ent->cmd; struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { ent->saved_sense_ptr = ent->sense_ptr; return; } ent->saved_cur_residue = spriv->cur_residue; ent->saved_prv_sg = spriv->prv_sg; ent->saved_cur_sg = spriv->cur_sg; ent->saved_tot_residue = spriv->tot_residue; } static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent) { struct scsi_cmnd *cmd = ent->cmd; struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { ent->sense_ptr = ent->saved_sense_ptr; return; } spriv->cur_residue = ent->saved_cur_residue; spriv->prv_sg = ent->saved_prv_sg; spriv->cur_sg = ent->saved_cur_sg; spriv->tot_residue = ent->saved_tot_residue; } static void esp_write_tgt_config3(struct esp *esp, int tgt) { if (esp->rev > ESP100A) { u8 val = esp->target[tgt].esp_config3; if (val != esp->prev_cfg3) { esp->prev_cfg3 = val; esp_write8(val, ESP_CFG3); } } } static void esp_write_tgt_sync(struct esp *esp, int tgt) { u8 off = esp->target[tgt].esp_offset; u8 per = esp->target[tgt].esp_period; if (off != esp->prev_soff) { esp->prev_soff = off; esp_write8(off, ESP_SOFF); } if (per != esp->prev_stp) { esp->prev_stp = per; esp_write8(per, ESP_STP); } } static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) { if (esp->rev == FASHME) { /* Arbitrary segment boundaries, 24-bit counts. */ if (dma_len > (1U << 24)) dma_len = (1U << 24); } else { u32 base, end; /* ESP chip limits other variants by 16-bits of transfer * count. Actually on FAS100A and FAS236 we could get * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB * in the ESP_CFG2 register but that causes other unwanted * changes so we don't use it currently. */ if (dma_len > (1U << 16)) dma_len = (1U << 16); /* All of the DMA variants hooked up to these chips * cannot handle crossing a 24-bit address boundary. */ base = dma_addr & ((1U << 24) - 1U); end = base + dma_len; if (end > (1U << 24)) end = (1U <<24); dma_len = end - base; } return dma_len; } static int esp_need_to_nego_wide(struct esp_target_data *tp) { struct scsi_target *target = tp->starget; return spi_width(target) != tp->nego_goal_width; } static int esp_need_to_nego_sync(struct esp_target_data *tp) { struct scsi_target *target = tp->starget; /* When offset is zero, period is "don't care". */ if (!spi_offset(target) && !tp->nego_goal_offset) return 0; if (spi_offset(target) == tp->nego_goal_offset && spi_period(target) == tp->nego_goal_period) return 0; return 1; } static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, struct esp_lun_data *lp) { if (!ent->orig_tag[0]) { /* Non-tagged, slot already taken? */ if (lp->non_tagged_cmd) return -EBUSY; if (lp->hold) { /* We are being held by active tagged * commands. */ if (lp->num_tagged) return -EBUSY; /* Tagged commands completed, we can unplug * the queue and run this untagged command. */ lp->hold = 0; } else if (lp->num_tagged) { /* Plug the queue until num_tagged decreases * to zero in esp_free_lun_tag. */ lp->hold = 1; return -EBUSY; } lp->non_tagged_cmd = ent; return 0; } /* Tagged command. Check that it isn't blocked by a non-tagged one. */ if (lp->non_tagged_cmd || lp->hold) return -EBUSY; BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]); lp->tagged_cmds[ent->orig_tag[1]] = ent; lp->num_tagged++; return 0; } static void esp_free_lun_tag(struct esp_cmd_entry *ent, struct esp_lun_data *lp) { if (ent->orig_tag[0]) { BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent); lp->tagged_cmds[ent->orig_tag[1]] = NULL; lp->num_tagged--; } else { BUG_ON(lp->non_tagged_cmd != ent); lp->non_tagged_cmd = NULL; } } static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent) { ent->sense_ptr = ent->cmd->sense_buffer; if (esp->flags & ESP_FLAG_NO_DMA_MAP) { ent->sense_dma = (uintptr_t)ent->sense_ptr; return; } ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); } static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent) { if (!(esp->flags & ESP_FLAG_NO_DMA_MAP)) dma_unmap_single(esp->dev, ent->sense_dma, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); ent->sense_ptr = NULL; } /* When a contingent allegiance condition is created, we force feed a * REQUEST_SENSE command to the device to fetch the sense data. I * tried many other schemes, relying on the scsi error handling layer * to send out the REQUEST_SENSE automatically, but this was difficult * to get right especially in the presence of applications like smartd * which use SG_IO to send out their own REQUEST_SENSE commands. */ static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent) { struct scsi_cmnd *cmd = ent->cmd; struct scsi_device *dev = cmd->device; int tgt, lun; u8 *p, val; tgt = dev->id; lun = dev->lun; if (!ent->sense_ptr) { esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n", tgt, lun); esp_map_sense(esp, ent); } ent->saved_sense_ptr = ent->sense_ptr; esp->active_cmd = ent; p = esp->command_block; esp->msg_out_len = 0; *p++ = IDENTIFY(0, lun); *p++ = REQUEST_SENSE; *p++ = ((dev->scsi_level <= SCSI_2) ? (lun << 5) : 0); *p++ = 0; *p++ = 0; *p++ = SCSI_SENSE_BUFFERSIZE; *p++ = 0; esp->select_state = ESP_SELECT_BASIC; val = tgt; if (esp->rev == FASHME) val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; esp_write8(val, ESP_BUSID); esp_write_tgt_sync(esp, tgt); esp_write_tgt_config3(esp, tgt); val = (p - esp->command_block); esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA); } static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp) { struct esp_cmd_entry *ent; list_for_each_entry(ent, &esp->queued_cmds, list) { struct scsi_cmnd *cmd = ent->cmd; struct scsi_device *dev = cmd->device; struct esp_lun_data *lp = dev->hostdata; if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { ent->tag[0] = 0; ent->tag[1] = 0; return ent; } if (!spi_populate_tag_msg(&ent->tag[0], cmd)) { ent->tag[0] = 0; ent->tag[1] = 0; } ent->orig_tag[0] = ent->tag[0]; ent->orig_tag[1] = ent->tag[1]; if (esp_alloc_lun_tag(ent, lp) < 0) continue; return ent; } return NULL; } static void esp_maybe_execute_command(struct esp *esp) { struct esp_target_data *tp; struct scsi_device *dev; struct scsi_cmnd *cmd; struct esp_cmd_entry *ent; bool select_and_stop = false; int tgt, lun, i; u32 val, start_cmd; u8 *p; if (esp->active_cmd || (esp->flags & ESP_FLAG_RESETTING)) return; ent = find_and_prep_issuable_command(esp); if (!ent) return; if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { esp_autosense(esp, ent); return; } cmd = ent->cmd; dev = cmd->device; tgt = dev->id; lun = dev->lun; tp = &esp->target[tgt]; list_move(&ent->list, &esp->active_cmds); esp->active_cmd = ent; esp_map_dma(esp, cmd); esp_save_pointers(esp, ent); if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12)) select_and_stop = true; p = esp->command_block; esp->msg_out_len = 0; if (tp->flags & ESP_TGT_CHECK_NEGO) { /* Need to negotiate. If the target is broken * go for synchronous transfers and non-wide. */ if (tp->flags & ESP_TGT_BROKEN) { tp->flags &= ~ESP_TGT_DISCONNECT; tp->nego_goal_period = 0; tp->nego_goal_offset = 0; tp->nego_goal_width = 0; tp->nego_goal_tags = 0; } /* If the settings are not changing, skip this. */ if (spi_width(tp->starget) == tp->nego_goal_width && spi_period(tp->starget) == tp->nego_goal_period && spi_offset(tp->starget) == tp->nego_goal_offset) { tp->flags &= ~ESP_TGT_CHECK_NEGO; goto build_identify; } if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) { esp->msg_out_len = spi_populate_width_msg(&esp->msg_out[0], (tp->nego_goal_width ? 1 : 0)); tp->flags |= ESP_TGT_NEGO_WIDE; } else if (esp_need_to_nego_sync(tp)) { esp->msg_out_len = spi_populate_sync_msg(&esp->msg_out[0], tp->nego_goal_period, tp->nego_goal_offset); tp->flags |= ESP_TGT_NEGO_SYNC; } else { tp->flags &= ~ESP_TGT_CHECK_NEGO; } /* If there are multiple message bytes, use Select and Stop */ if (esp->msg_out_len) select_and_stop = true; } build_identify: *p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun); if (ent->tag[0] && esp->rev == ESP100) { /* ESP100 lacks select w/atn3 command, use select * and stop instead. */ select_and_stop = true; } if (select_and_stop) { esp->cmd_bytes_left = cmd->cmd_len; esp->cmd_bytes_ptr = &cmd->cmnd[0]; if (ent->tag[0]) { for (i = esp->msg_out_len - 1; i >= 0; i--) esp->msg_out[i + 2] = esp->msg_out[i]; esp->msg_out[0] = ent->tag[0]; esp->msg_out[1] = ent->tag[1]; esp->msg_out_len += 2; } start_cmd = ESP_CMD_SELAS; esp->select_state = ESP_SELECT_MSGOUT; } else { start_cmd = ESP_CMD_SELA; if (ent->tag[0]) { *p++ = ent->tag[0]; *p++ = ent->tag[1]; start_cmd = ESP_CMD_SA3; } for (i = 0; i < cmd->cmd_len; i++) *p++ = cmd->cmnd[i]; esp->select_state = ESP_SELECT_BASIC; } val = tgt; if (esp->rev == FASHME) val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; esp_write8(val, ESP_BUSID); esp_write_tgt_sync(esp, tgt); esp_write_tgt_config3(esp, tgt); val = (p - esp->command_block); if (esp_debug & ESP_DEBUG_SCSICMD) { printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun); for (i = 0; i < cmd->cmd_len; i++) printk("%02x ", cmd->cmnd[i]); printk("]\n"); } esp_send_dma_cmd(esp, val, 16, start_cmd); } static struct esp_cmd_entry *esp_get_ent(struct esp *esp) { struct list_head *head = &esp->esp_cmd_pool; struct esp_cmd_entry *ret; if (list_empty(head)) { ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC); } else { ret = list_entry(head->next, struct esp_cmd_entry, list); list_del(&ret->list); memset(ret, 0, sizeof(*ret)); } return ret; } static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent) { list_add(&ent->list, &esp->esp_cmd_pool); } static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent, struct scsi_cmnd *cmd, unsigned char host_byte) { struct scsi_device *dev = cmd->device; int tgt = dev->id; int lun = dev->lun; esp->active_cmd = NULL; esp_unmap_dma(esp, cmd); esp_free_lun_tag(ent, dev->hostdata); cmd->result = 0; set_host_byte(cmd, host_byte); if (host_byte == DID_OK) set_status_byte(cmd, ent->status); if (ent->eh_done) { complete(ent->eh_done); ent->eh_done = NULL; } if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { esp_unmap_sense(esp, ent); /* Restore the message/status bytes to what we actually * saw originally. Also, report that we are providing * the sense data. */ cmd->result = SAM_STAT_CHECK_CONDITION; ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE; if (esp_debug & ESP_DEBUG_AUTOSENSE) { int i; printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ", esp->host->unique_id, tgt, lun); for (i = 0; i < 18; i++) printk("%02x ", cmd->sense_buffer[i]); printk("]\n"); } } scsi_done(cmd); list_del(&ent->list); esp_put_ent(esp, ent); esp_maybe_execute_command(esp); } static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent) { struct scsi_device *dev = ent->cmd->device; struct esp_lun_data *lp = dev->hostdata; scsi_track_queue_full(dev, lp->num_tagged - 1); } static int esp_queuecommand_lck(struct scsi_cmnd *cmd) { struct scsi_device *dev = cmd->device; struct esp *esp = shost_priv(dev->host); struct esp_cmd_priv *spriv; struct esp_cmd_entry *ent; ent = esp_get_ent(esp); if (!ent) return SCSI_MLQUEUE_HOST_BUSY; ent->cmd = cmd; spriv = ESP_CMD_PRIV(cmd); spriv->num_sg = 0; list_add_tail(&ent->list, &esp->queued_cmds); esp_maybe_execute_command(esp); return 0; } static DEF_SCSI_QCMD(esp_queuecommand) static int esp_check_gross_error(struct esp *esp) { if (esp->sreg & ESP_STAT_SPAM) { /* Gross Error, could be one of: * - top of fifo overwritten * - top of command register overwritten * - DMA programmed with wrong direction * - improper phase change */ shost_printk(KERN_ERR, esp->host, "Gross error sreg[%02x]\n", esp->sreg); /* XXX Reset the chip. XXX */ return 1; } return 0; } static int esp_check_spur_intr(struct esp *esp) { switch (esp->rev) { case ESP100: case ESP100A: /* The interrupt pending bit of the status register cannot * be trusted on these revisions. */ esp->sreg &= ~ESP_STAT_INTR; break; default: if (!(esp->sreg & ESP_STAT_INTR)) { if (esp->ireg & ESP_INTR_SR) return 1; /* If the DMA is indicating interrupt pending and the * ESP is not, the only possibility is a DMA error. */ if (!esp->ops->dma_error(esp)) { shost_printk(KERN_ERR, esp->host, "Spurious irq, sreg=%02x.\n", esp->sreg); return -1; } shost_printk(KERN_ERR, esp->host, "DMA error\n"); /* XXX Reset the chip. XXX */ return -1; } break; } return 0; } static void esp_schedule_reset(struct esp *esp) { esp_log_reset("esp_schedule_reset() from %ps\n", __builtin_return_address(0)); esp->flags |= ESP_FLAG_RESETTING; esp_event(esp, ESP_EVENT_RESET); } /* In order to avoid having to add a special half-reconnected state * into the driver we just sit here and poll through the rest of * the reselection process to get the tag message bytes. */ static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp, struct esp_lun_data *lp) { struct esp_cmd_entry *ent; int i; if (!lp->num_tagged) { shost_printk(KERN_ERR, esp->host, "Reconnect w/num_tagged==0\n"); return NULL; } esp_log_reconnect("reconnect tag, "); for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { if (esp->ops->irq_pending(esp)) break; } if (i == ESP_QUICKIRQ_LIMIT) { shost_printk(KERN_ERR, esp->host, "Reconnect IRQ1 timeout\n"); return NULL; } esp->sreg = esp_read8(ESP_STATUS); esp->ireg = esp_read8(ESP_INTRPT); esp_log_reconnect("IRQ(%d:%x:%x), ", i, esp->ireg, esp->sreg); if (esp->ireg & ESP_INTR_DC) { shost_printk(KERN_ERR, esp->host, "Reconnect, got disconnect.\n"); return NULL; } if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) { shost_printk(KERN_ERR, esp->host, "Reconnect, not MIP sreg[%02x].\n", esp->sreg); return NULL; } /* DMA in the tag bytes... */ esp->command_block[0] = 0xff; esp->command_block[1] = 0xff; esp->ops->send_dma_cmd(esp, esp->command_block_dma, 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI); /* ACK the message. */ scsi_esp_cmd(esp, ESP_CMD_MOK); for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) { if (esp->ops->irq_pending(esp)) { esp->sreg = esp_read8(ESP_STATUS); esp->ireg = esp_read8(ESP_INTRPT); if (esp->ireg & ESP_INTR_FDONE) break; } udelay(1); } if (i == ESP_RESELECT_TAG_LIMIT) { shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n"); return NULL; } esp->ops->dma_drain(esp); esp->ops->dma_invalidate(esp); esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n", i, esp->ireg, esp->sreg, esp->command_block[0], esp->command_block[1]); if (esp->command_block[0] < SIMPLE_QUEUE_TAG || esp->command_block[0] > ORDERED_QUEUE_TAG) { shost_printk(KERN_ERR, esp->host, "Reconnect, bad tag type %02x.\n", esp->command_block[0]); return NULL; } ent = lp->tagged_cmds[esp->command_block[1]]; if (!ent) { shost_printk(KERN_ERR, esp->host, "Reconnect, no entry for tag %02x.\n", esp->command_block[1]); return NULL; } return ent; } static int esp_reconnect(struct esp *esp) { struct esp_cmd_entry *ent; struct esp_target_data *tp; struct esp_lun_data *lp; struct scsi_device *dev; int target, lun; BUG_ON(esp->active_cmd); if (esp->rev == FASHME) { /* FASHME puts the target and lun numbers directly * into the fifo. */ target = esp->fifo[0]; lun = esp->fifo[1] & 0x7; } else { u8 bits = esp_read8(ESP_FDATA); /* Older chips put the lun directly into the fifo, but * the target is given as a sample of the arbitration * lines on the bus at reselection time. So we should * see the ID of the ESP and the one reconnecting target * set in the bitmap. */ if (!(bits & esp->scsi_id_mask)) goto do_reset; bits &= ~esp->scsi_id_mask; if (!bits || (bits & (bits - 1))) goto do_reset; target = ffs(bits) - 1; lun = (esp_read8(ESP_FDATA) & 0x7); scsi_esp_cmd(esp, ESP_CMD_FLUSH); if (esp->rev == ESP100) { u8 ireg = esp_read8(ESP_INTRPT); /* This chip has a bug during reselection that can * cause a spurious illegal-command interrupt, which * we simply ACK here. Another possibility is a bus * reset so we must check for that. */ if (ireg & ESP_INTR_SR) goto do_reset; } scsi_esp_cmd(esp, ESP_CMD_NULL); } esp_write_tgt_sync(esp, target); esp_write_tgt_config3(esp, target); scsi_esp_cmd(esp, ESP_CMD_MOK); if (esp->rev == FASHME) esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT, ESP_BUSID); tp = &esp->target[target]; dev = __scsi_device_lookup_by_target(tp->starget, lun); if (!dev) { shost_printk(KERN_ERR, esp->host, "Reconnect, no lp tgt[%u] lun[%u]\n", target, lun); goto do_reset; } lp = dev->hostdata; ent = lp->non_tagged_cmd; if (!ent) { ent = esp_reconnect_with_tag(esp, lp); if (!ent) goto do_reset; } esp->active_cmd = ent; esp_event(esp, ESP_EVENT_CHECK_PHASE); esp_restore_pointers(esp, ent); esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; return 1; do_reset: esp_schedule_reset(esp); return 0; } static int esp_finish_select(struct esp *esp) { struct esp_cmd_entry *ent; struct scsi_cmnd *cmd; /* No longer selecting. */ esp->select_state = ESP_SELECT_NONE; esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS; ent = esp->active_cmd; cmd = ent->cmd; if (esp->ops->dma_error(esp)) { /* If we see a DMA error during or as a result of selection, * all bets are off. */ esp_schedule_reset(esp); esp_cmd_is_done(esp, ent, cmd, DID_ERROR); return 0; } esp->ops->dma_invalidate(esp); if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) { struct esp_target_data *tp = &esp->target[cmd->device->id]; /* Carefully back out of the selection attempt. Release * resources (such as DMA mapping & TAG) and reset state (such * as message out and command delivery variables). */ if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { esp_unmap_dma(esp, cmd); esp_free_lun_tag(ent, cmd->device->hostdata); tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE); esp->cmd_bytes_ptr = NULL; esp->cmd_bytes_left = 0; } else { esp_unmap_sense(esp, ent); } /* Now that the state is unwound properly, put back onto * the issue queue. This command is no longer active. */ list_move(&ent->list, &esp->queued_cmds); esp->active_cmd = NULL; /* Return value ignored by caller, it directly invokes * esp_reconnect(). */ return 0; } if (esp->ireg == ESP_INTR_DC) { struct scsi_device *dev = cmd->device; /* Disconnect. Make sure we re-negotiate sync and * wide parameters if this target starts responding * again in the future. */ esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO; scsi_esp_cmd(esp, ESP_CMD_ESEL); esp_cmd_is_done(esp, ent, cmd, DID_BAD_TARGET); return 1; } if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) { /* Selection successful. On pre-FAST chips we have * to do a NOP and possibly clean out the FIFO. */ if (esp->rev <= ESP236) { int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; scsi_esp_cmd(esp, ESP_CMD_NULL); if (!fcnt && (!esp->prev_soff || ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP))) esp_flush_fifo(esp); } /* If we are doing a Select And Stop command, negotiation, etc. * we'll do the right thing as we transition to the next phase. */ esp_event(esp, ESP_EVENT_CHECK_PHASE); return 0; } shost_printk(KERN_INFO, esp->host, "Unexpected selection completion ireg[%x]\n", esp->ireg); esp_schedule_reset(esp); return 0; } static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent, struct scsi_cmnd *cmd) { int fifo_cnt, ecount, bytes_sent, flush_fifo; fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE) fifo_cnt <<= 1; ecount = 0; if (!(esp->sreg & ESP_STAT_TCNT)) { ecount = ((unsigned int)esp_read8(ESP_TCLOW) | (((unsigned int)esp_read8(ESP_TCMED)) << 8)); if (esp->rev == FASHME) ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16; if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB)) ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16; } bytes_sent = esp->data_dma_len; bytes_sent -= ecount; bytes_sent -= esp->send_cmd_residual; /* * The am53c974 has a DMA 'peculiarity'. The doc states: * In some odd byte conditions, one residual byte will * be left in the SCSI FIFO, and the FIFO Flags will * never count to '0 '. When this happens, the residual * byte should be retrieved via PIO following completion * of the BLAST operation. */ if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) { size_t count = 1; size_t offset = bytes_sent; u8 bval = esp_read8(ESP_FDATA); if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) ent->sense_ptr[bytes_sent] = bval; else { struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); u8 *ptr; ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg, &offset, &count); if (likely(ptr)) { *(ptr + offset) = bval; scsi_kunmap_atomic_sg(ptr); } } bytes_sent += fifo_cnt; ent->flags &= ~ESP_CMD_FLAG_RESIDUAL; } if (!(ent->flags & ESP_CMD_FLAG_WRITE)) bytes_sent -= fifo_cnt; flush_fifo = 0; if (!esp->prev_soff) { /* Synchronous data transfer, always flush fifo. */ flush_fifo = 1; } else { if (esp->rev == ESP100) { u32 fflags, phase; /* ESP100 has a chip bug where in the synchronous data * phase it can mistake a final long REQ pulse from the * target as an extra data byte. Fun. * * To detect this case we resample the status register * and fifo flags. If we're still in a data phase and * we see spurious chunks in the fifo, we return error * to the caller which should reset and set things up * such that we only try future transfers to this * target in synchronous mode. */ esp->sreg = esp_read8(ESP_STATUS); phase = esp->sreg & ESP_STAT_PMASK; fflags = esp_read8(ESP_FFLAGS); if ((phase == ESP_DOP && (fflags & ESP_FF_ONOTZERO)) || (phase == ESP_DIP && (fflags & ESP_FF_FBYTES))) return -1; } if (!(ent->flags & ESP_CMD_FLAG_WRITE)) flush_fifo = 1; } if (flush_fifo) esp_flush_fifo(esp); return bytes_sent; } static void esp_setsync(struct esp *esp, struct esp_target_data *tp, u8 scsi_period, u8 scsi_offset, u8 esp_stp, u8 esp_soff) { spi_period(tp->starget) = scsi_period; spi_offset(tp->starget) = scsi_offset; spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0; if (esp_soff) { esp_stp &= 0x1f; esp_soff |= esp->radelay; if (esp->rev >= FAS236) { u8 bit = ESP_CONFIG3_FSCSI; if (esp->rev >= FAS100A) bit = ESP_CONFIG3_FAST; if (scsi_period < 50) { if (esp->rev == FASHME) esp_soff &= ~esp->radelay; tp->esp_config3 |= bit; } else { tp->esp_config3 &= ~bit; } esp->prev_cfg3 = tp->esp_config3; esp_write8(esp->prev_cfg3, ESP_CFG3); } } tp->esp_period = esp->prev_stp = esp_stp; tp->esp_offset = esp->prev_soff = esp_soff; esp_write8(esp_soff, ESP_SOFF); esp_write8(esp_stp, ESP_STP); tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); spi_display_xfer_agreement(tp->starget); } static void esp_msgin_reject(struct esp *esp) { struct esp_cmd_entry *ent = esp->active_cmd; struct scsi_cmnd *cmd = ent->cmd; struct esp_target_data *tp; int tgt; tgt = cmd->device->id; tp = &esp->target[tgt]; if (tp->flags & ESP_TGT_NEGO_WIDE) { tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE); if (!esp_need_to_nego_sync(tp)) { tp->flags &= ~ESP_TGT_CHECK_NEGO; scsi_esp_cmd(esp, ESP_CMD_RATN); } else { esp->msg_out_len = spi_populate_sync_msg(&esp->msg_out[0], tp->nego_goal_period, tp->nego_goal_offset); tp->flags |= ESP_TGT_NEGO_SYNC; scsi_esp_cmd(esp, ESP_CMD_SATN); } return; } if (tp->flags & ESP_TGT_NEGO_SYNC) { tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); tp->esp_period = 0; tp->esp_offset = 0; esp_setsync(esp, tp, 0, 0, 0, 0); scsi_esp_cmd(esp, ESP_CMD_RATN); return; } shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n"); esp_schedule_reset(esp); } static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp) { u8 period = esp->msg_in[3]; u8 offset = esp->msg_in[4]; u8 stp; if (!(tp->flags & ESP_TGT_NEGO_SYNC)) goto do_reject; if (offset > 15) goto do_reject; if (offset) { int one_clock; if (period > esp->max_period) { period = offset = 0; goto do_sdtr; } if (period < esp->min_period) goto do_reject; one_clock = esp->ccycle / 1000; stp = DIV_ROUND_UP(period << 2, one_clock); if (stp && esp->rev >= FAS236) { if (stp >= 50) stp--; } } else { stp = 0; } esp_setsync(esp, tp, period, offset, stp, offset); return; do_reject: esp->msg_out[0] = MESSAGE_REJECT; esp->msg_out_len = 1; scsi_esp_cmd(esp, ESP_CMD_SATN); return; do_sdtr: tp->nego_goal_period = period; tp->nego_goal_offset = offset; esp->msg_out_len = spi_populate_sync_msg(&esp->msg_out[0], tp->nego_goal_period, tp->nego_goal_offset); scsi_esp_cmd(esp, ESP_CMD_SATN); } static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp) { int size = 8 << esp->msg_in[3]; u8 cfg3; if (esp->rev != FASHME) goto do_reject; if (size != 8 && size != 16) goto do_reject; if (!(tp->flags & ESP_TGT_NEGO_WIDE)) goto do_reject; cfg3 = tp->esp_config3; if (size == 16) { tp->flags |= ESP_TGT_WIDE; cfg3 |= ESP_CONFIG3_EWIDE; } else { tp->flags &= ~ESP_TGT_WIDE; cfg3 &= ~ESP_CONFIG3_EWIDE; } tp->esp_config3 = cfg3; esp->prev_cfg3 = cfg3; esp_write8(cfg3, ESP_CFG3); tp->flags &= ~ESP_TGT_NEGO_WIDE; spi_period(tp->starget) = 0; spi_offset(tp->starget) = 0; if (!esp_need_to_nego_sync(tp)) { tp->flags &= ~ESP_TGT_CHECK_NEGO; scsi_esp_cmd(esp, ESP_CMD_RATN); } else { esp->msg_out_len = spi_populate_sync_msg(&esp->msg_out[0], tp->nego_goal_period, tp->nego_goal_offset); tp->flags |= ESP_TGT_NEGO_SYNC; scsi_esp_cmd(esp, ESP_CMD_SATN); } return; do_reject: esp->msg_out[0] = MESSAGE_REJECT; esp->msg_out_len = 1; scsi_esp_cmd(esp, ESP_CMD_SATN); } static void esp_msgin_extended(struct esp *esp) { struct esp_cmd_entry *ent = esp->active_cmd; struct scsi_cmnd *cmd = ent->cmd; struct esp_target_data *tp; int tgt = cmd->device->id; tp = &esp->target[tgt]; if (esp->msg_in[2] == EXTENDED_SDTR) { esp_msgin_sdtr(esp, tp); return; } if (esp->msg_in[2] == EXTENDED_WDTR) { esp_msgin_wdtr(esp, tp); return; } shost_printk(KERN_INFO, esp->host, "Unexpected extended msg type %x\n", esp->msg_in[2]); esp->msg_out[0] = MESSAGE_REJECT; esp->msg_out_len = 1; scsi_esp_cmd(esp, ESP_CMD_SATN); } /* Analyze msgin bytes received from target so far. Return non-zero * if there are more bytes needed to complete the message. */ static int esp_msgin_process(struct esp *esp) { u8 msg0 = esp->msg_in[0]; int len = esp->msg_in_len; if (msg0 & 0x80) { /* Identify */ shost_printk(KERN_INFO, esp->host, "Unexpected msgin identify\n"); return 0; } switch (msg0) { case EXTENDED_MESSAGE: if (len == 1) return 1; if (len < esp->msg_in[1] + 2) return 1; esp_msgin_extended(esp); return 0; case IGNORE_WIDE_RESIDUE: { struct esp_cmd_entry *ent; struct esp_cmd_priv *spriv; if (len == 1) return 1; if (esp->msg_in[1] != 1) goto do_reject; ent = esp->active_cmd; spriv = ESP_CMD_PRIV(ent->cmd); if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) { spriv->cur_sg = spriv->prv_sg; spriv->cur_residue = 1; } else spriv->cur_residue++; spriv->tot_residue++; return 0; } case NOP: return 0; case RESTORE_POINTERS: esp_restore_pointers(esp, esp->active_cmd); return 0; case SAVE_POINTERS: esp_save_pointers(esp, esp->active_cmd); return 0; case COMMAND_COMPLETE: case DISCONNECT: { struct esp_cmd_entry *ent = esp->active_cmd; ent->message = msg0; esp_event(esp, ESP_EVENT_FREE_BUS); esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; return 0; } case MESSAGE_REJECT: esp_msgin_reject(esp); return 0; default: do_reject: esp->msg_out[0] = MESSAGE_REJECT; esp->msg_out_len = 1; scsi_esp_cmd(esp, ESP_CMD_SATN); return 0; } } static int esp_process_event(struct esp *esp) { int write, i; again: write = 0; esp_log_event("process event %d phase %x\n", esp->event, esp->sreg & ESP_STAT_PMASK); switch (esp->event) { case ESP_EVENT_CHECK_PHASE: switch (esp->sreg & ESP_STAT_PMASK) { case ESP_DOP: esp_event(esp, ESP_EVENT_DATA_OUT); break; case ESP_DIP: esp_event(esp, ESP_EVENT_DATA_IN); break; case ESP_STATP: esp_flush_fifo(esp); scsi_esp_cmd(esp, ESP_CMD_ICCSEQ); esp_event(esp, ESP_EVENT_STATUS); esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; return 1; case ESP_MOP: esp_event(esp, ESP_EVENT_MSGOUT); break; case ESP_MIP: esp_event(esp, ESP_EVENT_MSGIN); break; case ESP_CMDP: esp_event(esp, ESP_EVENT_CMD_START); break; default: shost_printk(KERN_INFO, esp->host, "Unexpected phase, sreg=%02x\n", esp->sreg); esp_schedule_reset(esp); return 0; } goto again; case ESP_EVENT_DATA_IN: write = 1; fallthrough; case ESP_EVENT_DATA_OUT: { struct esp_cmd_entry *ent = esp->active_cmd; struct scsi_cmnd *cmd = ent->cmd; dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd); unsigned int dma_len = esp_cur_dma_len(ent, cmd); if (esp->rev == ESP100) scsi_esp_cmd(esp, ESP_CMD_NULL); if (write) ent->flags |= ESP_CMD_FLAG_WRITE; else ent->flags &= ~ESP_CMD_FLAG_WRITE; if (esp->ops->dma_length_limit) dma_len = esp->ops->dma_length_limit(esp, dma_addr, dma_len); else dma_len = esp_dma_length_limit(esp, dma_addr, dma_len); esp->data_dma_len = dma_len; if (!dma_len) { shost_printk(KERN_ERR, esp->host, "DMA length is zero!\n"); shost_printk(KERN_ERR, esp->host, "cur adr[%08llx] len[%08x]\n", (unsigned long long)esp_cur_dma_addr(ent, cmd), esp_cur_dma_len(ent, cmd)); esp_schedule_reset(esp); return 0; } esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n", (unsigned long long)dma_addr, dma_len, write); esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len, write, ESP_CMD_DMA | ESP_CMD_TI); esp_event(esp, ESP_EVENT_DATA_DONE); break; } case ESP_EVENT_DATA_DONE: { struct esp_cmd_entry *ent = esp->active_cmd; struct scsi_cmnd *cmd = ent->cmd; int bytes_sent; if (esp->ops->dma_error(esp)) { shost_printk(KERN_INFO, esp->host, "data done, DMA error, resetting\n"); esp_schedule_reset(esp); return 0; } if (ent->flags & ESP_CMD_FLAG_WRITE) { /* XXX parity errors, etc. XXX */ esp->ops->dma_drain(esp); } esp->ops->dma_invalidate(esp); if (esp->ireg != ESP_INTR_BSERV) { /* We should always see exactly a bus-service * interrupt at the end of a successful transfer. */ shost_printk(KERN_INFO, esp->host, "data done, not BSERV, resetting\n"); esp_schedule_reset(esp); return 0; } bytes_sent = esp_data_bytes_sent(esp, ent, cmd); esp_log_datadone("data done flgs[%x] sent[%d]\n", ent->flags, bytes_sent); if (bytes_sent < 0) { /* XXX force sync mode for this target XXX */ esp_schedule_reset(esp); return 0; } esp_advance_dma(esp, ent, cmd, bytes_sent); esp_event(esp, ESP_EVENT_CHECK_PHASE); goto again; } case ESP_EVENT_STATUS: { struct esp_cmd_entry *ent = esp->active_cmd; if (esp->ireg & ESP_INTR_FDONE) { ent->status = esp_read8(ESP_FDATA); ent->message = esp_read8(ESP_FDATA); scsi_esp_cmd(esp, ESP_CMD_MOK); } else if (esp->ireg == ESP_INTR_BSERV) { ent->status = esp_read8(ESP_FDATA); ent->message = 0xff; esp_event(esp, ESP_EVENT_MSGIN); return 0; } if (ent->message != COMMAND_COMPLETE) { shost_printk(KERN_INFO, esp->host, "Unexpected message %x in status\n", ent->message); esp_schedule_reset(esp); return 0; } esp_event(esp, ESP_EVENT_FREE_BUS); esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; break; } case ESP_EVENT_FREE_BUS: { struct esp_cmd_entry *ent = esp->active_cmd; struct scsi_cmnd *cmd = ent->cmd; if (ent->message == COMMAND_COMPLETE || ent->message == DISCONNECT) scsi_esp_cmd(esp, ESP_CMD_ESEL); if (ent->message == COMMAND_COMPLETE) { esp_log_cmddone("Command done status[%x] message[%x]\n", ent->status, ent->message); if (ent->status == SAM_STAT_TASK_SET_FULL) esp_event_queue_full(esp, ent); if (ent->status == SAM_STAT_CHECK_CONDITION && !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { ent->flags |= ESP_CMD_FLAG_AUTOSENSE; esp_autosense(esp, ent); } else { esp_cmd_is_done(esp, ent, cmd, DID_OK); } } else if (ent->message == DISCONNECT) { esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n", cmd->device->id, ent->tag[0], ent->tag[1]); esp->active_cmd = NULL; esp_maybe_execute_command(esp); } else { shost_printk(KERN_INFO, esp->host, "Unexpected message %x in freebus\n", ent->message); esp_schedule_reset(esp); return 0; } if (esp->active_cmd) esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; break; } case ESP_EVENT_MSGOUT: { scsi_esp_cmd(esp, ESP_CMD_FLUSH); if (esp_debug & ESP_DEBUG_MSGOUT) { int i; printk("ESP: Sending message [ "); for (i = 0; i < esp->msg_out_len; i++) printk("%02x ", esp->msg_out[i]); printk("]\n"); } if (esp->rev == FASHME) { int i; /* Always use the fifo. */ for (i = 0; i < esp->msg_out_len; i++) { esp_write8(esp->msg_out[i], ESP_FDATA); esp_write8(0, ESP_FDATA); } scsi_esp_cmd(esp, ESP_CMD_TI); } else { if (esp->msg_out_len == 1) { esp_write8(esp->msg_out[0], ESP_FDATA); scsi_esp_cmd(esp, ESP_CMD_TI); } else if (esp->flags & ESP_FLAG_USE_FIFO) { for (i = 0; i < esp->msg_out_len; i++) esp_write8(esp->msg_out[i], ESP_FDATA); scsi_esp_cmd(esp, ESP_CMD_TI); } else { /* Use DMA. */ memcpy(esp->command_block, esp->msg_out, esp->msg_out_len); esp->ops->send_dma_cmd(esp, esp->command_block_dma, esp->msg_out_len, esp->msg_out_len, 0, ESP_CMD_DMA|ESP_CMD_TI); } } esp_event(esp, ESP_EVENT_MSGOUT_DONE); break; } case ESP_EVENT_MSGOUT_DONE: if (esp->rev == FASHME) { scsi_esp_cmd(esp, ESP_CMD_FLUSH); } else { if (esp->msg_out_len > 1) esp->ops->dma_invalidate(esp); /* XXX if the chip went into disconnected mode, * we can't run the phase state machine anyway. */ if (!(esp->ireg & ESP_INTR_DC)) scsi_esp_cmd(esp, ESP_CMD_NULL); } esp->msg_out_len = 0; esp_event(esp, ESP_EVENT_CHECK_PHASE); goto again; case ESP_EVENT_MSGIN: if (esp->ireg & ESP_INTR_BSERV) { if (esp->rev == FASHME) { if (!(esp_read8(ESP_STATUS2) & ESP_STAT2_FEMPTY)) scsi_esp_cmd(esp, ESP_CMD_FLUSH); } else { scsi_esp_cmd(esp, ESP_CMD_FLUSH); if (esp->rev == ESP100) scsi_esp_cmd(esp, ESP_CMD_NULL); } scsi_esp_cmd(esp, ESP_CMD_TI); esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; return 1; } if (esp->ireg & ESP_INTR_FDONE) { u8 val; if (esp->rev == FASHME) val = esp->fifo[0]; else val = esp_read8(ESP_FDATA); esp->msg_in[esp->msg_in_len++] = val; esp_log_msgin("Got msgin byte %x\n", val); if (!esp_msgin_process(esp)) esp->msg_in_len = 0; if (esp->rev == FASHME) scsi_esp_cmd(esp, ESP_CMD_FLUSH); scsi_esp_cmd(esp, ESP_CMD_MOK); /* Check whether a bus reset is to be done next */ if (esp->event == ESP_EVENT_RESET) return 0; if (esp->event != ESP_EVENT_FREE_BUS) esp_event(esp, ESP_EVENT_CHECK_PHASE); } else { shost_printk(KERN_INFO, esp->host, "MSGIN neither BSERV not FDON, resetting"); esp_schedule_reset(esp); return 0; } break; case ESP_EVENT_CMD_START: memcpy(esp->command_block, esp->cmd_bytes_ptr, esp->cmd_bytes_left); esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI); esp_event(esp, ESP_EVENT_CMD_DONE); esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; break; case ESP_EVENT_CMD_DONE: esp->ops->dma_invalidate(esp); if (esp->ireg & ESP_INTR_BSERV) { esp_event(esp, ESP_EVENT_CHECK_PHASE); goto again; } esp_schedule_reset(esp); return 0; case ESP_EVENT_RESET: scsi_esp_cmd(esp, ESP_CMD_RS); break; default: shost_printk(KERN_INFO, esp->host, "Unexpected event %x, resetting\n", esp->event); esp_schedule_reset(esp); return 0; } return 1; } static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent) { struct scsi_cmnd *cmd = ent->cmd; esp_unmap_dma(esp, cmd); esp_free_lun_tag(ent, cmd->device->hostdata); cmd->result = DID_RESET << 16; if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) esp_unmap_sense(esp, ent); scsi_done(cmd); list_del(&ent->list); esp_put_ent(esp, ent); } static void esp_clear_hold(struct scsi_device *dev, void *data) { struct esp_lun_data *lp = dev->hostdata; BUG_ON(lp->num_tagged); lp->hold = 0; } static void esp_reset_cleanup(struct esp *esp) { struct esp_cmd_entry *ent, *tmp; int i; list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) { struct scsi_cmnd *cmd = ent->cmd; list_del(&ent->list); cmd->result = DID_RESET << 16; scsi_done(cmd); esp_put_ent(esp, ent); } list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) { if (ent == esp->active_cmd) esp->active_cmd = NULL; esp_reset_cleanup_one(esp, ent); } BUG_ON(esp->active_cmd != NULL); /* Force renegotiation of sync/wide transfers. */ for (i = 0; i < ESP_MAX_TARGET; i++) { struct esp_target_data *tp = &esp->target[i]; tp->esp_period = 0; tp->esp_offset = 0; tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE | ESP_CONFIG3_FSCSI | ESP_CONFIG3_FAST); tp->flags &= ~ESP_TGT_WIDE; tp->flags |= ESP_TGT_CHECK_NEGO; if (tp->starget) __starget_for_each_device(tp->starget, NULL, esp_clear_hold); } esp->flags &= ~ESP_FLAG_RESETTING; } /* Runs under host->lock */ static void __esp_interrupt(struct esp *esp) { int finish_reset, intr_done; u8 phase; /* * Once INTRPT is read STATUS and SSTEP are cleared. */ esp->sreg = esp_read8(ESP_STATUS); esp->seqreg = esp_read8(ESP_SSTEP); esp->ireg = esp_read8(ESP_INTRPT); if (esp->flags & ESP_FLAG_RESETTING) { finish_reset = 1; } else { if (esp_check_gross_error(esp)) return; finish_reset = esp_check_spur_intr(esp); if (finish_reset < 0) return; } if (esp->ireg & ESP_INTR_SR) finish_reset = 1; if (finish_reset) { esp_reset_cleanup(esp); if (esp->eh_reset) { complete(esp->eh_reset); esp->eh_reset = NULL; } return; } phase = (esp->sreg & ESP_STAT_PMASK); if (esp->rev == FASHME) { if (((phase != ESP_DIP && phase != ESP_DOP) && esp->select_state == ESP_SELECT_NONE && esp->event != ESP_EVENT_STATUS && esp->event != ESP_EVENT_DATA_DONE) || (esp->ireg & ESP_INTR_RSEL)) { esp->sreg2 = esp_read8(ESP_STATUS2); if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || (esp->sreg2 & ESP_STAT2_F1BYTE)) hme_read_fifo(esp); } } esp_log_intr("intr sreg[%02x] seqreg[%02x] " "sreg2[%02x] ireg[%02x]\n", esp->sreg, esp->seqreg, esp->sreg2, esp->ireg); intr_done = 0; if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) { shost_printk(KERN_INFO, esp->host, "unexpected IREG %02x\n", esp->ireg); if (esp->ireg & ESP_INTR_IC) esp_dump_cmd_log(esp); esp_schedule_reset(esp); } else { if (esp->ireg & ESP_INTR_RSEL) { if (esp->active_cmd) (void) esp_finish_select(esp); intr_done = esp_reconnect(esp); } else { /* Some combination of FDONE, BSERV, DC. */ if (esp->select_state != ESP_SELECT_NONE) intr_done = esp_finish_select(esp); } } while (!intr_done) intr_done = esp_process_event(esp); } irqreturn_t scsi_esp_intr(int irq, void *dev_id) { struct esp *esp = dev_id; unsigned long flags; irqreturn_t ret; spin_lock_irqsave(esp->host->host_lock, flags); ret = IRQ_NONE; if (esp->ops->irq_pending(esp)) { ret = IRQ_HANDLED; for (;;) { int i; __esp_interrupt(esp); if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK)) break; esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK; for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { if (esp->ops->irq_pending(esp)) break; } if (i == ESP_QUICKIRQ_LIMIT) break; } } spin_unlock_irqrestore(esp->host->host_lock, flags); return ret; } EXPORT_SYMBOL(scsi_esp_intr); static void esp_get_revision(struct esp *esp) { u8 val; esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7)); if (esp->config2 == 0) { esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY); esp_write8(esp->config2, ESP_CFG2); val = esp_read8(ESP_CFG2); val &= ~ESP_CONFIG2_MAGIC; esp->config2 = 0; if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) { /* * If what we write to cfg2 does not come back, * cfg2 is not implemented. * Therefore this must be a plain esp100. */ esp->rev = ESP100; return; } } esp_set_all_config3(esp, 5); esp->prev_cfg3 = 5; esp_write8(esp->config2, ESP_CFG2); esp_write8(0, ESP_CFG3); esp_write8(esp->prev_cfg3, ESP_CFG3); val = esp_read8(ESP_CFG3); if (val != 5) { /* The cfg2 register is implemented, however * cfg3 is not, must be esp100a. */ esp->rev = ESP100A; } else { esp_set_all_config3(esp, 0); esp->prev_cfg3 = 0; esp_write8(esp->prev_cfg3, ESP_CFG3); /* All of cfg{1,2,3} implemented, must be one of * the fas variants, figure out which one. */ if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) { esp->rev = FAST; esp->sync_defp = SYNC_DEFP_FAST; } else { esp->rev = ESP236; } } } static void esp_init_swstate(struct esp *esp) { int i; INIT_LIST_HEAD(&esp->queued_cmds); INIT_LIST_HEAD(&esp->active_cmds); INIT_LIST_HEAD(&esp->esp_cmd_pool); /* Start with a clear state, domain validation (via ->slave_configure, * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged * commands. */ for (i = 0 ; i < ESP_MAX_TARGET; i++) { esp->target[i].flags = 0; esp->target[i].nego_goal_period = 0; esp->target[i].nego_goal_offset = 0; esp->target[i].nego_goal_width = 0; esp->target[i].nego_goal_tags = 0; } } /* This places the ESP into a known state at boot time. */ static void esp_bootup_reset(struct esp *esp) { u8 val; /* Reset the DMA */ esp->ops->reset_dma(esp); /* Reset the ESP */ esp_reset_esp(esp); /* Reset the SCSI bus, but tell ESP not to generate an irq */ val = esp_read8(ESP_CFG1); val |= ESP_CONFIG1_SRRDISAB; esp_write8(val, ESP_CFG1); scsi_esp_cmd(esp, ESP_CMD_RS); udelay(400); esp_write8(esp->config1, ESP_CFG1); /* Eat any bitrot in the chip and we are done... */ esp_read8(ESP_INTRPT); } static void esp_set_clock_params(struct esp *esp) { int fhz; u8 ccf; /* This is getting messy but it has to be done correctly or else * you get weird behavior all over the place. We are trying to * basically figure out three pieces of information. * * a) Clock Conversion Factor * * This is a representation of the input crystal clock frequency * going into the ESP on this machine. Any operation whose timing * is longer than 400ns depends on this value being correct. For * example, you'll get blips for arbitration/selection during high * load or with multiple targets if this is not set correctly. * * b) Selection Time-Out * * The ESP isn't very bright and will arbitrate for the bus and try * to select a target forever if you let it. This value tells the * ESP when it has taken too long to negotiate and that it should * interrupt the CPU so we can see what happened. The value is * computed as follows (from NCR/Symbios chip docs). * * (Time Out Period) * (Input Clock) * STO = ---------------------------------- * (8192) * (Clock Conversion Factor) * * We use a time out period of 250ms (ESP_BUS_TIMEOUT). * * c) Imperical constants for synchronous offset and transfer period * register values * * This entails the smallest and largest sync period we could ever * handle on this ESP. */ fhz = esp->cfreq; ccf = ((fhz / 1000000) + 4) / 5; if (ccf == 1) ccf = 2; /* If we can't find anything reasonable, just assume 20MHZ. * This is the clock frequency of the older sun4c's where I've * been unable to find the clock-frequency PROM property. All * other machines provide useful values it seems. */ if (fhz <= 5000000 || ccf < 1 || ccf > 8) { fhz = 20000000; ccf = 4; } esp->cfact = (ccf == 8 ? 0 : ccf); esp->cfreq = fhz; esp->ccycle = ESP_HZ_TO_CYCLE(fhz); esp->ctick = ESP_TICK(ccf, esp->ccycle); esp->neg_defp = ESP_NEG_DEFP(fhz, ccf); esp->sync_defp = SYNC_DEFP_SLOW; } static const char *esp_chip_names[] = { "ESP100", "ESP100A", "ESP236", "FAS236", "AM53C974", "53CF9x-2", "FAS100A", "FAST", "FASHME", }; static struct scsi_transport_template *esp_transport_template; int scsi_esp_register(struct esp *esp) { static int instance; int err; if (!esp->num_tags) esp->num_tags = ESP_DEFAULT_TAGS; esp->host->transportt = esp_transport_template; esp->host->max_lun = ESP_MAX_LUN; esp->host->cmd_per_lun = 2; esp->host->unique_id = instance; esp_set_clock_params(esp); esp_get_revision(esp); esp_init_swstate(esp); esp_bootup_reset(esp); dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n", esp->host->unique_id, esp->regs, esp->dma_regs, esp->host->irq); dev_printk(KERN_INFO, esp->dev, "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n", esp->host->unique_id, esp_chip_names[esp->rev], esp->cfreq / 1000000, esp->cfact, esp->scsi_id); /* Let the SCSI bus reset settle. */ ssleep(esp_bus_reset_settle); err = scsi_add_host(esp->host, esp->dev); if (err) return err; instance++; scsi_scan_host(esp->host); return 0; } EXPORT_SYMBOL(scsi_esp_register); void scsi_esp_unregister(struct esp *esp) { scsi_remove_host(esp->host); } EXPORT_SYMBOL(scsi_esp_unregister); static int esp_target_alloc(struct scsi_target *starget) { struct esp *esp = shost_priv(dev_to_shost(&starget->dev)); struct esp_target_data *tp = &esp->target[starget->id]; tp->starget = starget; return 0; } static void esp_target_destroy(struct scsi_target *starget) { struct esp *esp = shost_priv(dev_to_shost(&starget->dev)); struct esp_target_data *tp = &esp->target[starget->id]; tp->starget = NULL; } static int esp_slave_alloc(struct scsi_device *dev) { struct esp *esp = shost_priv(dev->host); struct esp_target_data *tp = &esp->target[dev->id]; struct esp_lun_data *lp; lp = kzalloc(sizeof(*lp), GFP_KERNEL); if (!lp) return -ENOMEM; dev->hostdata = lp; spi_min_period(tp->starget) = esp->min_period; spi_max_offset(tp->starget) = 15; if (esp->flags & ESP_FLAG_WIDE_CAPABLE) spi_max_width(tp->starget) = 1; else spi_max_width(tp->starget) = 0; return 0; } static int esp_slave_configure(struct scsi_device *dev) { struct esp *esp = shost_priv(dev->host); struct esp_target_data *tp = &esp->target[dev->id]; if (dev->tagged_supported) scsi_change_queue_depth(dev, esp->num_tags); tp->flags |= ESP_TGT_DISCONNECT; if (!spi_initial_dv(dev->sdev_target)) spi_dv_device(dev); return 0; } static void esp_slave_destroy(struct scsi_device *dev) { struct esp_lun_data *lp = dev->hostdata; kfree(lp); dev->hostdata = NULL; } static int esp_eh_abort_handler(struct scsi_cmnd *cmd) { struct esp *esp = shost_priv(cmd->device->host); struct esp_cmd_entry *ent, *tmp; struct completion eh_done; unsigned long flags; /* XXX This helps a lot with debugging but might be a bit * XXX much for the final driver. */ spin_lock_irqsave(esp->host->host_lock, flags); shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n", cmd, cmd->cmnd[0]); ent = esp->active_cmd; if (ent) shost_printk(KERN_ERR, esp->host, "Current command [%p:%02x]\n", ent->cmd, ent->cmd->cmnd[0]); list_for_each_entry(ent, &esp->queued_cmds, list) { shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n", ent->cmd, ent->cmd->cmnd[0]); } list_for_each_entry(ent, &esp->active_cmds, list) { shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n", ent->cmd, ent->cmd->cmnd[0]); } esp_dump_cmd_log(esp); spin_unlock_irqrestore(esp->host->host_lock, flags); spin_lock_irqsave(esp->host->host_lock, flags); ent = NULL; list_for_each_entry(tmp, &esp->queued_cmds, list) { if (tmp->cmd == cmd) { ent = tmp; break; } } if (ent) { /* Easiest case, we didn't even issue the command * yet so it is trivial to abort. */ list_del(&ent->list); cmd->result = DID_ABORT << 16; scsi_done(cmd); esp_put_ent(esp, ent); goto out_success; } init_completion(&eh_done); ent = esp->active_cmd; if (ent && ent->cmd == cmd) { /* Command is the currently active command on * the bus. If we already have an output message * pending, no dice. */ if (esp->msg_out_len) goto out_failure; /* Send out an abort, encouraging the target to * go to MSGOUT phase by asserting ATN. */ esp->msg_out[0] = ABORT_TASK_SET; esp->msg_out_len = 1; ent->eh_done = &eh_done; scsi_esp_cmd(esp, ESP_CMD_SATN); } else { /* The command is disconnected. This is not easy to * abort. For now we fail and let the scsi error * handling layer go try a scsi bus reset or host * reset. * * What we could do is put together a scsi command * solely for the purpose of sending an abort message * to the target. Coming up with all the code to * cook up scsi commands, special case them everywhere, * etc. is for questionable gain and it would be better * if the generic scsi error handling layer could do at * least some of that for us. * * Anyways this is an area for potential future improvement * in this driver. */ goto out_failure; } spin_unlock_irqrestore(esp->host->host_lock, flags); if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) { spin_lock_irqsave(esp->host->host_lock, flags); ent->eh_done = NULL; spin_unlock_irqrestore(esp->host->host_lock, flags); return FAILED; } return SUCCESS; out_success: spin_unlock_irqrestore(esp->host->host_lock, flags); return SUCCESS; out_failure: /* XXX This might be a good location to set ESP_TGT_BROKEN * XXX since we know which target/lun in particular is * XXX causing trouble. */ spin_unlock_irqrestore(esp->host->host_lock, flags); return FAILED; } static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd) { struct esp *esp = shost_priv(cmd->device->host); struct completion eh_reset; unsigned long flags; init_completion(&eh_reset); spin_lock_irqsave(esp->host->host_lock, flags); esp->eh_reset = &eh_reset; /* XXX This is too simple... We should add lots of * XXX checks here so that if we find that the chip is * XXX very wedged we return failure immediately so * XXX that we can perform a full chip reset. */ esp->flags |= ESP_FLAG_RESETTING; scsi_esp_cmd(esp, ESP_CMD_RS); spin_unlock_irqrestore(esp->host->host_lock, flags); ssleep(esp_bus_reset_settle); if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) { spin_lock_irqsave(esp->host->host_lock, flags); esp->eh_reset = NULL; spin_unlock_irqrestore(esp->host->host_lock, flags); return FAILED; } return SUCCESS; } /* All bets are off, reset the entire device. */ static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd) { struct esp *esp = shost_priv(cmd->device->host); unsigned long flags; spin_lock_irqsave(esp->host->host_lock, flags); esp_bootup_reset(esp); esp_reset_cleanup(esp); spin_unlock_irqrestore(esp->host->host_lock, flags); ssleep(esp_bus_reset_settle); return SUCCESS; } static const char *esp_info(struct Scsi_Host *host) { return "esp"; } const struct scsi_host_template scsi_esp_template = { .module = THIS_MODULE, .name = "esp", .info = esp_info, .queuecommand = esp_queuecommand, .target_alloc = esp_target_alloc, .target_destroy = esp_target_destroy, .slave_alloc = esp_slave_alloc, .slave_configure = esp_slave_configure, .slave_destroy = esp_slave_destroy, .eh_abort_handler = esp_eh_abort_handler, .eh_bus_reset_handler = esp_eh_bus_reset_handler, .eh_host_reset_handler = esp_eh_host_reset_handler, .can_queue = 7, .this_id = 7, .sg_tablesize = SG_ALL, .max_sectors = 0xffff, .skip_settle_delay = 1, .cmd_size = sizeof(struct esp_cmd_priv), }; EXPORT_SYMBOL(scsi_esp_template); static void esp_get_signalling(struct Scsi_Host *host) { struct esp *esp = shost_priv(host); enum spi_signal_type type; if (esp->flags & ESP_FLAG_DIFFERENTIAL) type = SPI_SIGNAL_HVD; else type = SPI_SIGNAL_SE; spi_signalling(host) = type; } static void esp_set_offset(struct scsi_target *target, int offset) { struct Scsi_Host *host = dev_to_shost(target->dev.parent); struct esp *esp = shost_priv(host); struct esp_target_data *tp = &esp->target[target->id]; if (esp->flags & ESP_FLAG_DISABLE_SYNC) tp->nego_goal_offset = 0; else tp->nego_goal_offset = offset; tp->flags |= ESP_TGT_CHECK_NEGO; } static void esp_set_period(struct scsi_target *target, int period) { struct Scsi_Host *host = dev_to_shost(target->dev.parent); struct esp *esp = shost_priv(host); struct esp_target_data *tp = &esp->target[target->id]; tp->nego_goal_period = period; tp->flags |= ESP_TGT_CHECK_NEGO; } static void esp_set_width(struct scsi_target *target, int width) { struct Scsi_Host *host = dev_to_shost(target->dev.parent); struct esp *esp = shost_priv(host); struct esp_target_data *tp = &esp->target[target->id]; tp->nego_goal_width = (width ? 1 : 0); tp->flags |= ESP_TGT_CHECK_NEGO; } static struct spi_function_template esp_transport_ops = { .set_offset = esp_set_offset, .show_offset = 1, .set_period = esp_set_period, .show_period = 1, .set_width = esp_set_width, .show_width = 1, .get_signalling = esp_get_signalling, }; static int __init esp_init(void) { esp_transport_template = spi_attach_transport(&esp_transport_ops); if (!esp_transport_template) return -ENODEV; return 0; } static void __exit esp_exit(void) { spi_release_transport(esp_transport_template); } MODULE_DESCRIPTION("ESP SCSI driver core"); MODULE_AUTHOR("David S. Miller ([email protected])"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); module_param(esp_bus_reset_settle, int, 0); MODULE_PARM_DESC(esp_bus_reset_settle, "ESP scsi bus reset delay in seconds"); module_param(esp_debug, int, 0); MODULE_PARM_DESC(esp_debug, "ESP bitmapped debugging message enable value:\n" " 0x00000001 Log interrupt events\n" " 0x00000002 Log scsi commands\n" " 0x00000004 Log resets\n" " 0x00000008 Log message in events\n" " 0x00000010 Log message out events\n" " 0x00000020 Log command completion\n" " 0x00000040 Log disconnects\n" " 0x00000080 Log data start\n" " 0x00000100 Log data done\n" " 0x00000200 Log reconnects\n" " 0x00000400 Log auto-sense data\n" ); module_init(esp_init); module_exit(esp_exit); #ifdef CONFIG_SCSI_ESP_PIO static inline unsigned int esp_wait_for_fifo(struct esp *esp) { int i = 500000; do { unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; if (fbytes) return fbytes; udelay(1); } while (--i); shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n", esp_read8(ESP_STATUS)); return 0; } static inline int esp_wait_for_intr(struct esp *esp) { int i = 500000; do { esp->sreg = esp_read8(ESP_STATUS); if (esp->sreg & ESP_STAT_INTR) return 0; udelay(1); } while (--i); shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n", esp->sreg); return 1; } #define ESP_FIFO_SIZE 16 void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, u32 dma_count, int write, u8 cmd) { u8 phase = esp->sreg & ESP_STAT_PMASK; cmd &= ~ESP_CMD_DMA; esp->send_cmd_error = 0; if (write) { u8 *dst = (u8 *)addr; u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV); scsi_esp_cmd(esp, cmd); while (1) { if (!esp_wait_for_fifo(esp)) break; *dst++ = readb(esp->fifo_reg); --esp_count; if (!esp_count) break; if (esp_wait_for_intr(esp)) { esp->send_cmd_error = 1; break; } if ((esp->sreg & ESP_STAT_PMASK) != phase) break; esp->ireg = esp_read8(ESP_INTRPT); if (esp->ireg & mask) { esp->send_cmd_error = 1; break; } if (phase == ESP_MIP) esp_write8(ESP_CMD_MOK, ESP_CMD); esp_write8(ESP_CMD_TI, ESP_CMD); } } else { unsigned int n = ESP_FIFO_SIZE; u8 *src = (u8 *)addr; scsi_esp_cmd(esp, ESP_CMD_FLUSH); if (n > esp_count) n = esp_count; writesb(esp->fifo_reg, src, n); src += n; esp_count -= n; scsi_esp_cmd(esp, cmd); while (esp_count) { if (esp_wait_for_intr(esp)) { esp->send_cmd_error = 1; break; } if ((esp->sreg & ESP_STAT_PMASK) != phase) break; esp->ireg = esp_read8(ESP_INTRPT); if (esp->ireg & ~ESP_INTR_BSERV) { esp->send_cmd_error = 1; break; } n = ESP_FIFO_SIZE - (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES); if (n > esp_count) n = esp_count; writesb(esp->fifo_reg, src, n); src += n; esp_count -= n; esp_write8(ESP_CMD_TI, ESP_CMD); } } esp->send_cmd_residual = esp_count; } EXPORT_SYMBOL(esp_send_pio_cmd); #endif
linux-master
drivers/scsi/esp_scsi.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/module.h> #include <linux/io.h> #include <linux/isa.h> #include <scsi/scsi_host.h> #include "fdomain.h" #define MAXBOARDS_PARAM 4 static int io[MAXBOARDS_PARAM] = { 0, 0, 0, 0 }; module_param_hw_array(io, int, ioport, NULL, 0); MODULE_PARM_DESC(io, "base I/O address of controller (0x140, 0x150, 0x160, 0x170)"); static int irq[MAXBOARDS_PARAM] = { 0, 0, 0, 0 }; module_param_hw_array(irq, int, irq, NULL, 0); MODULE_PARM_DESC(irq, "IRQ of controller (0=auto [default])"); static int scsi_id[MAXBOARDS_PARAM] = { 0, 0, 0, 0 }; module_param_hw_array(scsi_id, int, other, NULL, 0); MODULE_PARM_DESC(scsi_id, "SCSI ID of controller (default = 7)"); static unsigned long addresses[] = { 0xc8000, 0xca000, 0xce000, 0xde000, }; #define ADDRESS_COUNT ARRAY_SIZE(addresses) static unsigned short ports[] = { 0x140, 0x150, 0x160, 0x170 }; #define PORT_COUNT ARRAY_SIZE(ports) static unsigned short irqs[] = { 3, 5, 10, 11, 12, 14, 15, 0 }; /* This driver works *ONLY* for Future Domain cards using the TMC-1800, * TMC-18C50, or TMC-18C30 chip. This includes models TMC-1650, 1660, 1670, * and 1680. These are all 16-bit cards. * BIOS versions prior to 3.2 assigned SCSI ID 6 to SCSI adapter. * * The following BIOS signature signatures are for boards which do *NOT* * work with this driver (these TMC-8xx and TMC-9xx boards may work with the * Seagate driver): * * FUTURE DOMAIN CORP. (C) 1986-1988 V4.0I 03/16/88 * FUTURE DOMAIN CORP. (C) 1986-1989 V5.0C2/14/89 * FUTURE DOMAIN CORP. (C) 1986-1989 V6.0A7/28/89 * FUTURE DOMAIN CORP. (C) 1986-1990 V6.0105/31/90 * FUTURE DOMAIN CORP. (C) 1986-1990 V6.0209/18/90 * FUTURE DOMAIN CORP. (C) 1986-1990 V7.009/18/90 * FUTURE DOMAIN CORP. (C) 1992 V8.00.004/02/92 * * (The cards which do *NOT* work are all 8-bit cards -- although some of * them have a 16-bit form-factor, the upper 8-bits are used only for IRQs * and are *NOT* used for data. You can tell the difference by following * the tracings on the circuit board -- if only the IRQ lines are involved, * you have a "8-bit" card, and should *NOT* use this driver.) */ static struct signature { const char *signature; int offset; int length; int this_id; int base_offset; } signatures[] = { /* 1 2 3 4 5 6 */ /* 123456789012345678901234567890123456789012345678901234567890 */ { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.07/28/89", 5, 50, 6, 0x1fcc }, { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V1.07/28/89", 5, 50, 6, 0x1fcc }, { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.07/28/89", 72, 50, 6, 0x1fa2 }, { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.0", 73, 43, 6, 0x1fa2 }, { "FUTURE DOMAIN CORP. (C) 1991 1800-V2.0.", 72, 39, 6, 0x1fa3 }, { "FUTURE DOMAIN CORP. (C) 1992 V3.00.004/02/92", 5, 44, 6, 0 }, { "FUTURE DOMAIN TMC-18XX (C) 1993 V3.203/12/93", 5, 44, 7, 0 }, { "IBM F1 P2 BIOS v1.0011/09/92", 5, 28, 7, 0x1ff3 }, { "IBM F1 P2 BIOS v1.0104/29/93", 5, 28, 7, 0 }, { "Future Domain Corp. V1.0008/18/93", 5, 33, 7, 0 }, { "Future Domain Corp. V2.0108/18/93", 5, 33, 7, 0 }, { "FUTURE DOMAIN CORP. V3.5008/18/93", 5, 34, 7, 0 }, { "FUTURE DOMAIN 18c30/18c50/1800 (C) 1994 V3.5", 5, 44, 7, 0 }, { "FUTURE DOMAIN CORP. V3.6008/18/93", 5, 34, 7, 0 }, { "FUTURE DOMAIN CORP. V3.6108/18/93", 5, 34, 7, 0 }, }; #define SIGNATURE_COUNT ARRAY_SIZE(signatures) static int fdomain_isa_match(struct device *dev, unsigned int ndev) { struct Scsi_Host *sh; int i, base = 0, irq = 0; unsigned long bios_base = 0; struct signature *sig = NULL; void __iomem *p; static struct signature *saved_sig; int this_id = 7; if (ndev < ADDRESS_COUNT) { /* scan supported ISA BIOS addresses */ p = ioremap(addresses[ndev], FDOMAIN_BIOS_SIZE); if (!p) return 0; for (i = 0; i < SIGNATURE_COUNT; i++) if (check_signature(p + signatures[i].offset, signatures[i].signature, signatures[i].length)) break; if (i == SIGNATURE_COUNT) /* no signature found */ goto fail_unmap; sig = &signatures[i]; bios_base = addresses[ndev]; /* read I/O base from BIOS area */ if (sig->base_offset) base = readb(p + sig->base_offset) + (readb(p + sig->base_offset + 1) << 8); iounmap(p); if (base) { dev_info(dev, "BIOS at 0x%lx specifies I/O base 0x%x\n", bios_base, base); } else { /* no I/O base in BIOS area */ dev_info(dev, "BIOS at 0x%lx\n", bios_base); /* save BIOS signature for later use in port probing */ saved_sig = sig; return 0; } } else /* scan supported I/O ports */ base = ports[ndev - ADDRESS_COUNT]; /* use saved BIOS signature if present */ if (!sig && saved_sig) sig = saved_sig; if (!request_region(base, FDOMAIN_REGION_SIZE, "fdomain_isa")) return 0; irq = irqs[(inb(base + REG_CFG1) & CFG1_IRQ_MASK) >> 1]; if (sig) this_id = sig->this_id; sh = fdomain_create(base, irq, this_id, dev); if (!sh) { release_region(base, FDOMAIN_REGION_SIZE); return 0; } dev_set_drvdata(dev, sh); return 1; fail_unmap: iounmap(p); return 0; } static int fdomain_isa_param_match(struct device *dev, unsigned int ndev) { struct Scsi_Host *sh; int irq_ = irq[ndev]; if (!io[ndev]) return 0; if (!request_region(io[ndev], FDOMAIN_REGION_SIZE, "fdomain_isa")) { dev_err(dev, "base 0x%x already in use", io[ndev]); return 0; } if (irq_ <= 0) irq_ = irqs[(inb(io[ndev] + REG_CFG1) & CFG1_IRQ_MASK) >> 1]; sh = fdomain_create(io[ndev], irq_, scsi_id[ndev], dev); if (!sh) { dev_err(dev, "controller not found at base 0x%x", io[ndev]); release_region(io[ndev], FDOMAIN_REGION_SIZE); return 0; } dev_set_drvdata(dev, sh); return 1; } static void fdomain_isa_remove(struct device *dev, unsigned int ndev) { struct Scsi_Host *sh = dev_get_drvdata(dev); int base = sh->io_port; fdomain_destroy(sh); release_region(base, FDOMAIN_REGION_SIZE); dev_set_drvdata(dev, NULL); } static struct isa_driver fdomain_isa_driver = { .match = fdomain_isa_match, .remove = fdomain_isa_remove, .driver = { .name = "fdomain_isa", .pm = FDOMAIN_PM_OPS, }, }; static int __init fdomain_isa_init(void) { int isa_probe_count = ADDRESS_COUNT + PORT_COUNT; if (io[0]) { /* use module parameters if present */ fdomain_isa_driver.match = fdomain_isa_param_match; isa_probe_count = MAXBOARDS_PARAM; } return isa_register_driver(&fdomain_isa_driver, isa_probe_count); } static void __exit fdomain_isa_exit(void) { isa_unregister_driver(&fdomain_isa_driver); } module_init(fdomain_isa_init); module_exit(fdomain_isa_exit); MODULE_AUTHOR("Ondrej Zary, Rickard E. Faith"); MODULE_DESCRIPTION("Future Domain TMC-16x0 ISA SCSI driver"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/fdomain_isa.c
/* * Workbit NinjaSCSI-32Bi/UDE PCI/CardBus SCSI Host Bus Adapter driver * Debug routine * * This software may be used and distributed according to the terms of * the GNU General Public License. */ /* * Show the command data of a command */ static const char unknown[] = "UNKNOWN"; static const char * group_0_commands[] = { /* 00-03 */ "Test Unit Ready", "Rezero Unit", unknown, "Request Sense", /* 04-07 */ "Format Unit", "Read Block Limits", unknown, "Reassign Blocks", /* 08-0d */ "Read (6)", unknown, "Write (6)", "Seek (6)", unknown, unknown, /* 0e-12 */ unknown, "Read Reverse", "Write Filemarks", "Space", "Inquiry", /* 13-16 */ unknown, "Recover Buffered Data", "Mode Select", "Reserve", /* 17-1b */ "Release", "Copy", "Erase", "Mode Sense", "Start/Stop Unit", /* 1c-1d */ "Receive Diagnostic", "Send Diagnostic", /* 1e-1f */ "Prevent/Allow Medium Removal", unknown, }; static const char *group_1_commands[] = { /* 20-22 */ unknown, unknown, unknown, /* 23-28 */ unknown, unknown, "Read Capacity", unknown, unknown, "Read (10)", /* 29-2d */ unknown, "Write (10)", "Seek (10)", unknown, unknown, /* 2e-31 */ "Write Verify","Verify", "Search High", "Search Equal", /* 32-34 */ "Search Low", "Set Limits", "Prefetch or Read Position", /* 35-37 */ "Synchronize Cache","Lock/Unlock Cache", "Read Defect Data", /* 38-3c */ "Medium Scan", "Compare","Copy Verify", "Write Buffer", "Read Buffer", /* 3d-3f */ "Update Block", "Read Long", "Write Long", }; static const char *group_2_commands[] = { /* 40-41 */ "Change Definition", "Write Same", /* 42-48 */ "Read Sub-Ch(cd)", "Read TOC", "Read Header(cd)", "Play Audio(cd)", unknown, "Play Audio MSF(cd)", "Play Audio Track/Index(cd)", /* 49-4f */ "Play Track Relative(10)(cd)", unknown, "Pause/Resume(cd)", "Log Select", "Log Sense", unknown, unknown, /* 50-55 */ unknown, unknown, unknown, unknown, unknown, "Mode Select (10)", /* 56-5b */ unknown, unknown, unknown, unknown, "Mode Sense (10)", unknown, /* 5c-5f */ unknown, unknown, unknown, }; #define group(opcode) (((opcode) >> 5) & 7) #define RESERVED_GROUP 0 #define VENDOR_GROUP 1 #define NOTEXT_GROUP 2 static const char **commands[] = { group_0_commands, group_1_commands, group_2_commands, (const char **) RESERVED_GROUP, (const char **) RESERVED_GROUP, (const char **) NOTEXT_GROUP, (const char **) VENDOR_GROUP, (const char **) VENDOR_GROUP }; static const char reserved[] = "RESERVED"; static const char vendor[] = "VENDOR SPECIFIC"; static void print_opcodek(unsigned char opcode) { const char **table = commands[ group(opcode) ]; switch ((unsigned long) table) { case RESERVED_GROUP: printk("%s[%02x] ", reserved, opcode); break; case NOTEXT_GROUP: printk("%s(notext)[%02x] ", unknown, opcode); break; case VENDOR_GROUP: printk("%s[%02x] ", vendor, opcode); break; default: if (table[opcode & 0x1f] != unknown) printk("%s[%02x] ", table[opcode & 0x1f], opcode); else printk("%s[%02x] ", unknown, opcode); break; } } static void print_commandk (unsigned char *command) { int i,s; // printk(KERN_DEBUG); print_opcodek(command[0]); /*printk(KERN_DEBUG "%s ", __func__);*/ if ((command[0] >> 5) == 6 || (command[0] >> 5) == 7 ) { s = 12; /* vender specific */ } else { s = COMMAND_SIZE(command[0]); } for ( i = 1; i < s; ++i) { printk("%02x ", command[i]); } switch (s) { case 6: printk("LBA=%d len=%d", (((unsigned int)command[1] & 0x0f) << 16) | ( (unsigned int)command[2] << 8) | ( (unsigned int)command[3] ), (unsigned int)command[4] ); break; case 10: printk("LBA=%d len=%d", ((unsigned int)command[2] << 24) | ((unsigned int)command[3] << 16) | ((unsigned int)command[4] << 8) | ((unsigned int)command[5] ), ((unsigned int)command[7] << 8) | ((unsigned int)command[8] ) ); break; case 12: printk("LBA=%d len=%d", ((unsigned int)command[2] << 24) | ((unsigned int)command[3] << 16) | ((unsigned int)command[4] << 8) | ((unsigned int)command[5] ), ((unsigned int)command[6] << 24) | ((unsigned int)command[7] << 16) | ((unsigned int)command[8] << 8) | ((unsigned int)command[9] ) ); break; default: break; } printk("\n"); } static void show_command(struct scsi_cmnd *SCpnt) { print_commandk(SCpnt->cmnd); } static void show_busphase(unsigned char stat) { switch(stat) { case BUSPHASE_COMMAND: printk( "BUSPHASE_COMMAND\n"); break; case BUSPHASE_MESSAGE_IN: printk( "BUSPHASE_MESSAGE_IN\n"); break; case BUSPHASE_MESSAGE_OUT: printk( "BUSPHASE_MESSAGE_OUT\n"); break; case BUSPHASE_DATA_IN: printk( "BUSPHASE_DATA_IN\n"); break; case BUSPHASE_DATA_OUT: printk( "BUSPHASE_DATA_OUT\n"); break; case BUSPHASE_STATUS: printk( "BUSPHASE_STATUS\n"); break; case BUSPHASE_SELECT: printk( "BUSPHASE_SELECT\n"); break; default: printk( "BUSPHASE_other: 0x%x\n", stat); break; } } static void show_autophase(unsigned short i) { printk("auto: 0x%x,", i); if(i & COMMAND_PHASE) { printk(" cmd"); } if(i & DATA_IN_PHASE) { printk(" din"); } if(i & DATA_OUT_PHASE) { printk(" dout"); } if(i & MSGOUT_PHASE) { printk(" mout"); } if(i & STATUS_PHASE) { printk(" stat"); } if(i & ILLEGAL_PHASE) { printk(" ill"); } if(i & BUS_FREE_OCCUER) { printk(" bfree-o"); } if(i & MSG_IN_OCCUER) { printk(" min-o"); } if(i & MSG_OUT_OCCUER) { printk(" mout-o"); } if(i & SELECTION_TIMEOUT) { printk(" sel"); } if(i & MSGIN_00_VALID) { printk(" m0"); } if(i & MSGIN_02_VALID) { printk(" m2"); } if(i & MSGIN_03_VALID) { printk(" m3"); } if(i & MSGIN_04_VALID) { printk(" m4"); } if(i & AUTOSCSI_BUSY) { printk(" busy"); } printk("\n"); } static void nsp32_print_register(int base) { if (!(NSP32_DEBUG_MASK & NSP32_SPECIAL_PRINT_REGISTER)) return; printk("Phase=0x%x, ", nsp32_read1(base, SCSI_BUS_MONITOR)); printk("OldPhase=0x%x, ", nsp32_index_read1(base, OLD_SCSI_PHASE)); printk("syncreg=0x%x, ", nsp32_read1(base, SYNC_REG)); printk("ackwidth=0x%x, ", nsp32_read1(base, ACK_WIDTH)); printk("sgtpaddr=0x%lx, ", nsp32_read4(base, SGT_ADR)); printk("scsioutlatch=0x%x, ", nsp32_read1(base, SCSI_OUT_LATCH_TARGET_ID)); printk("msgout=0x%lx, ", nsp32_read4(base, SCSI_MSG_OUT)); printk("miscrd=0x%x, ", nsp32_index_read2(base, MISC_WR)); printk("seltimeout=0x%x, ", nsp32_read2(base, SEL_TIME_OUT)); printk("sreqrate=0x%x, ", nsp32_read1(base, SREQ_SMPL_RATE)); printk("transStatus=0x%x, ", nsp32_read2(base, TRANSFER_STATUS)); printk("reselectid=0x%x, ", nsp32_read2(base, COMMAND_CONTROL)); printk("arbit=0x%x, ", nsp32_read1(base, ARBIT_STATUS)); printk("BmStart=0x%lx, ", nsp32_read4(base, BM_START_ADR)); printk("BmCount=0x%lx, ", nsp32_read4(base, BM_CNT)); printk("SackCnt=0x%lx, ", nsp32_read4(base, SACK_CNT)); printk("SReqCnt=0x%lx, ", nsp32_read4(base, SREQ_CNT)); printk("SavedSackCnt=0x%lx, ", nsp32_read4(base, SAVED_SACK_CNT)); printk("ScsiBusControl=0x%x, ", nsp32_read1(base, SCSI_BUS_CONTROL)); printk("FifoRestCnt=0x%x, ", nsp32_read2(base, FIFO_REST_CNT)); printk("CdbIn=0x%x, ", nsp32_read1(base, SCSI_CSB_IN)); printk("\n"); if (0) { printk("execph=0x%x, ", nsp32_read2(base, SCSI_EXECUTE_PHASE)); printk("IrqStatus=0x%x, ", nsp32_read2(base, IRQ_STATUS)); printk("\n"); } } /* end */
linux-master
drivers/scsi/nsp32_debug.c
// SPDX-License-Identifier: GPL-2.0-or-later /* NCR (or Symbios) 53c700 and 53c700-66 Driver * * Copyright (C) 2001 by [email protected] **----------------------------------------------------------------------------- ** ** **----------------------------------------------------------------------------- */ /* Notes: * * This driver is designed exclusively for these chips (virtually the * earliest of the scripts engine chips). They need their own drivers * because they are missing so many of the scripts and snazzy register * features of their elder brothers (the 710, 720 and 770). * * The 700 is the lowliest of the line, it can only do async SCSI. * The 700-66 can at least do synchronous SCSI up to 10MHz. * * The 700 chip has no host bus interface logic of its own. However, * it is usually mapped to a location with well defined register * offsets. Therefore, if you can determine the base address and the * irq your board incorporating this chip uses, you can probably use * this driver to run it (although you'll probably have to write a * minimal wrapper for the purpose---see the NCR_D700 driver for * details about how to do this). * * * TODO List: * * 1. Better statistics in the proc fs * * 2. Implement message queue (queues SCSI messages like commands) and make * the abort and device reset functions use them. * */ /* CHANGELOG * * Version 2.8 * * Fixed bad bug affecting tag starvation processing (previously the * driver would hang the system if too many tags starved. Also fixed * bad bug having to do with 10 byte command processing and REQUEST * SENSE (the command would loop forever getting a transfer length * mismatch in the CMD phase). * * Version 2.7 * * Fixed scripts problem which caused certain devices (notably CDRWs) * to hang on initial INQUIRY. Updated NCR_700_readl/writel to use * __raw_readl/writel for parisc compatibility (Thomas * Bogendoerfer). Added missing SCp->request_bufflen initialisation * for sense requests (Ryan Bradetich). * * Version 2.6 * * Following test of the 64 bit parisc kernel by Richard Hirst, * several problems have now been corrected. Also adds support for * consistent memory allocation. * * Version 2.5 * * More Compatibility changes for 710 (now actually works). Enhanced * support for odd clock speeds which constrain SDTR negotiations. * correct cacheline separation for scsi messages and status for * incoherent architectures. Use of the pci mapping functions on * buffers to begin support for 64 bit drivers. * * Version 2.4 * * Added support for the 53c710 chip (in 53c700 emulation mode only---no * special 53c710 instructions or registers are used). * * Version 2.3 * * More endianness/cache coherency changes. * * Better bad device handling (handles devices lying about tag * queueing support and devices which fail to provide sense data on * contingent allegiance conditions) * * Many thanks to Richard Hirst <[email protected]> for patiently * debugging this driver on the parisc architecture and suggesting * many improvements and bug fixes. * * Thanks also go to Linuxcare Inc. for providing several PARISC * machines for me to debug the driver on. * * Version 2.2 * * Made the driver mem or io mapped; added endian invariance; added * dma cache flushing operations for architectures which need it; * added support for more varied clocking speeds. * * Version 2.1 * * Initial modularisation from the D700. See NCR_D700.c for the rest of * the changelog. * */ #define NCR_700_VERSION "2.8" #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/blkdev.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/pgtable.h> #include <asm/dma.h> #include <asm/io.h> #include <asm/byteorder.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_spi.h> #include "53c700.h" /* NOTE: For 64 bit drivers there are points in the code where we use * a non dereferenceable pointer to point to a structure in dma-able * memory (which is 32 bits) so that we can use all of the structure * operations but take the address at the end. This macro allows us * to truncate the 64 bit pointer down to 32 bits without the compiler * complaining */ #define to32bit(x) ((__u32)((unsigned long)(x))) #ifdef NCR_700_DEBUG #define STATIC #else #define STATIC static #endif MODULE_AUTHOR("James Bottomley"); MODULE_DESCRIPTION("53c700 and 53c700-66 Driver"); MODULE_LICENSE("GPL"); /* This is the script */ #include "53c700_d.h" STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *); STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt); STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt); STATIC void NCR_700_chip_setup(struct Scsi_Host *host); STATIC void NCR_700_chip_reset(struct Scsi_Host *host); STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt); STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt); STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt); static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth); STATIC const struct attribute_group *NCR_700_dev_groups[]; STATIC struct scsi_transport_template *NCR_700_transport_template = NULL; static char *NCR_700_phase[] = { "", "after selection", "before command phase", "after command phase", "after status phase", "after data in phase", "after data out phase", "during data phase", }; static char *NCR_700_condition[] = { "", "NOT MSG_OUT", "UNEXPECTED PHASE", "NOT MSG_IN", "UNEXPECTED MSG", "MSG_IN", "SDTR_MSG RECEIVED", "REJECT_MSG RECEIVED", "DISCONNECT_MSG RECEIVED", "MSG_OUT", "DATA_IN", }; static char *NCR_700_fatal_messages[] = { "unexpected message after reselection", "still MSG_OUT after message injection", "not MSG_IN after selection", "Illegal message length received", }; static char *NCR_700_SBCL_bits[] = { "IO ", "CD ", "MSG ", "ATN ", "SEL ", "BSY ", "ACK ", "REQ ", }; static char *NCR_700_SBCL_to_phase[] = { "DATA_OUT", "DATA_IN", "CMD_OUT", "STATE", "ILLEGAL PHASE", "ILLEGAL PHASE", "MSG OUT", "MSG IN", }; /* This translates the SDTR message offset and period to a value * which can be loaded into the SXFER_REG. * * NOTE: According to SCSI-2, the true transfer period (in ns) is * actually four times this period value */ static inline __u8 NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata, __u8 offset, __u8 period) { int XFERP; __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP); __u8 max_offset = (hostdata->chip710 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET); if(offset == 0) return 0; if(period < hostdata->min_period) { printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4); period = hostdata->min_period; } XFERP = (period*4 * hostdata->sync_clock)/1000 - 4; if(offset > max_offset) { printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n", offset, max_offset); offset = max_offset; } if(XFERP < min_xferp) { XFERP = min_xferp; } return (offset & 0x0f) | (XFERP & 0x07)<<4; } static inline __u8 NCR_700_get_SXFER(struct scsi_device *SDp) { struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0]; return NCR_700_offset_period_to_sxfer(hostdata, spi_offset(SDp->sdev_target), spi_period(SDp->sdev_target)); } static inline dma_addr_t virt_to_dma(struct NCR_700_Host_Parameters *h, void *p) { return h->pScript + ((uintptr_t)p - (uintptr_t)h->script); } static inline void dma_sync_to_dev(struct NCR_700_Host_Parameters *h, void *addr, size_t size) { if (h->noncoherent) dma_sync_single_for_device(h->dev, virt_to_dma(h, addr), size, DMA_BIDIRECTIONAL); } static inline void dma_sync_from_dev(struct NCR_700_Host_Parameters *h, void *addr, size_t size) { if (h->noncoherent) dma_sync_single_for_device(h->dev, virt_to_dma(h, addr), size, DMA_BIDIRECTIONAL); } struct Scsi_Host * NCR_700_detect(struct scsi_host_template *tpnt, struct NCR_700_Host_Parameters *hostdata, struct device *dev) { dma_addr_t pScript, pSlots; __u8 *memory; __u32 *script; struct Scsi_Host *host; static int banner = 0; int j; if (tpnt->sdev_groups == NULL) tpnt->sdev_groups = NCR_700_dev_groups; memory = dma_alloc_coherent(dev, TOTAL_MEM_SIZE, &pScript, GFP_KERNEL); if (!memory) { hostdata->noncoherent = 1; memory = dma_alloc_noncoherent(dev, TOTAL_MEM_SIZE, &pScript, DMA_BIDIRECTIONAL, GFP_KERNEL); } if (!memory) { printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n"); return NULL; } script = (__u32 *)memory; hostdata->msgin = memory + MSGIN_OFFSET; hostdata->msgout = memory + MSGOUT_OFFSET; hostdata->status = memory + STATUS_OFFSET; hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET); hostdata->dev = dev; pSlots = pScript + SLOTS_OFFSET; /* Fill in the missing routines from the host template */ tpnt->queuecommand = NCR_700_queuecommand; tpnt->eh_abort_handler = NCR_700_abort; tpnt->eh_host_reset_handler = NCR_700_host_reset; tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST; tpnt->sg_tablesize = NCR_700_SG_SEGMENTS; tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN; tpnt->slave_configure = NCR_700_slave_configure; tpnt->slave_destroy = NCR_700_slave_destroy; tpnt->slave_alloc = NCR_700_slave_alloc; tpnt->change_queue_depth = NCR_700_change_queue_depth; if(tpnt->name == NULL) tpnt->name = "53c700"; if(tpnt->proc_name == NULL) tpnt->proc_name = "53c700"; host = scsi_host_alloc(tpnt, 4); if (!host) return NULL; memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot) * NCR_700_COMMAND_SLOTS_PER_HOST); for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) { dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0] - (unsigned long)&hostdata->slots[0].SG[0]); hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset)); if(j == 0) hostdata->free_list = &hostdata->slots[j]; else hostdata->slots[j-1].ITL_forw = &hostdata->slots[j]; hostdata->slots[j].state = NCR_700_SLOT_FREE; } for (j = 0; j < ARRAY_SIZE(SCRIPT); j++) script[j] = bS_to_host(SCRIPT[j]); /* adjust all labels to be bus physical */ for (j = 0; j < PATCHES; j++) script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]); /* now patch up fixed addresses. */ script_patch_32(hostdata, script, MessageLocation, pScript + MSGOUT_OFFSET); script_patch_32(hostdata, script, StatusAddress, pScript + STATUS_OFFSET); script_patch_32(hostdata, script, ReceiveMsgAddress, pScript + MSGIN_OFFSET); hostdata->script = script; hostdata->pScript = pScript; dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE); hostdata->state = NCR_700_HOST_FREE; hostdata->cmd = NULL; host->max_id = 8; host->max_lun = NCR_700_MAX_LUNS; BUG_ON(NCR_700_transport_template == NULL); host->transportt = NCR_700_transport_template; host->unique_id = (unsigned long)hostdata->base; hostdata->eh_complete = NULL; host->hostdata[0] = (unsigned long)hostdata; /* kick the chip */ NCR_700_writeb(0xff, host, CTEST9_REG); if (hostdata->chip710) hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f; else hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f; hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0); if (banner == 0) { printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By [email protected]\n"); banner = 1; } printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no, hostdata->chip710 ? "53c710" : (hostdata->fast ? "53c700-66" : "53c700"), hostdata->rev, hostdata->differential ? "(Differential)" : ""); /* reset the chip */ NCR_700_chip_reset(host); if (scsi_add_host(host, dev)) { dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n"); scsi_host_put(host); return NULL; } spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD : SPI_SIGNAL_SE; return host; } int NCR_700_release(struct Scsi_Host *host) { struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)host->hostdata[0]; if (hostdata->noncoherent) dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE, hostdata->script, hostdata->pScript, DMA_BIDIRECTIONAL); else dma_free_coherent(hostdata->dev, TOTAL_MEM_SIZE, hostdata->script, hostdata->pScript); return 1; } static inline __u8 NCR_700_identify(int can_disconnect, __u8 lun) { return IDENTIFY_BASE | ((can_disconnect) ? 0x40 : 0) | (lun & NCR_700_LUN_MASK); } /* * Function : static int data_residual (Scsi_Host *host) * * Purpose : return residual data count of what's in the chip. If you * really want to know what this function is doing, it's almost a * direct transcription of the algorithm described in the 53c710 * guide, except that the DBC and DFIFO registers are only 6 bits * wide on a 53c700. * * Inputs : host - SCSI host */ static inline int NCR_700_data_residual (struct Scsi_Host *host) { struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)host->hostdata[0]; int count, synchronous = 0; unsigned int ddir; if(hostdata->chip710) { count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) - (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f; } else { count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) - (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f; } if(hostdata->fast) synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f; /* get the data direction */ ddir = NCR_700_readb(host, CTEST0_REG) & 0x01; if (ddir) { /* Receive */ if (synchronous) count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4; else if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL) ++count; } else { /* Send */ __u8 sstat = NCR_700_readb(host, SSTAT1_REG); if (sstat & SODL_REG_FULL) ++count; if (synchronous && (sstat & SODR_REG_FULL)) ++count; } #ifdef NCR_700_DEBUG if(count) printk("RESIDUAL IS %d (ddir %d)\n", count, ddir); #endif return count; } /* print out the SCSI wires and corresponding phase from the SBCL register * in the chip */ static inline char * sbcl_to_string(__u8 sbcl) { int i; static char ret[256]; ret[0]='\0'; for(i=0; i<8; i++) { if((1<<i) & sbcl) strcat(ret, NCR_700_SBCL_bits[i]); } strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]); return ret; } static inline __u8 bitmap_to_number(__u8 bitmap) { __u8 i; for(i=0; i<8 && !(bitmap &(1<<i)); i++) ; return i; } /* Pull a slot off the free list */ STATIC struct NCR_700_command_slot * find_empty_slot(struct NCR_700_Host_Parameters *hostdata) { struct NCR_700_command_slot *slot = hostdata->free_list; if(slot == NULL) { /* sanity check */ if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST) printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST); return NULL; } if(slot->state != NCR_700_SLOT_FREE) /* should panic! */ printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n"); hostdata->free_list = slot->ITL_forw; slot->ITL_forw = NULL; /* NOTE: set the state to busy here, not queued, since this * indicates the slot is in use and cannot be run by the IRQ * finish routine. If we cannot queue the command when it * is properly build, we then change to NCR_700_SLOT_QUEUED */ slot->state = NCR_700_SLOT_BUSY; slot->flags = 0; hostdata->command_slot_count++; return slot; } STATIC void free_slot(struct NCR_700_command_slot *slot, struct NCR_700_Host_Parameters *hostdata) { if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) { printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot); } if(slot->state == NCR_700_SLOT_FREE) { printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot); } slot->resume_offset = 0; slot->cmnd = NULL; slot->state = NCR_700_SLOT_FREE; slot->ITL_forw = hostdata->free_list; hostdata->free_list = slot; hostdata->command_slot_count--; } /* This routine really does very little. The command is indexed on the ITL and (if tagged) the ITLQ lists in _queuecommand */ STATIC void save_for_reselection(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp, __u32 dsp) { /* Its just possible that this gets executed twice */ if(SCp != NULL) { struct NCR_700_command_slot *slot = (struct NCR_700_command_slot *)SCp->host_scribble; slot->resume_offset = dsp; } hostdata->state = NCR_700_HOST_FREE; hostdata->cmd = NULL; } STATIC inline void NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp, struct NCR_700_command_slot *slot) { if(SCp->sc_data_direction != DMA_NONE && SCp->sc_data_direction != DMA_BIDIRECTIONAL) scsi_dma_unmap(SCp); } STATIC inline void NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp, int result) { hostdata->state = NCR_700_HOST_FREE; hostdata->cmd = NULL; if(SCp != NULL) { struct NCR_700_command_slot *slot = (struct NCR_700_command_slot *)SCp->host_scribble; dma_unmap_single(hostdata->dev, slot->pCmd, MAX_COMMAND_SIZE, DMA_TO_DEVICE); if (slot->flags == NCR_700_FLAG_AUTOSENSE) { char *cmnd = NCR_700_get_sense_cmnd(SCp->device); dma_unmap_single(hostdata->dev, slot->dma_handle, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); /* restore the old result if the request sense was * successful */ if (result == 0) result = cmnd[7]; /* restore the original length */ SCp->cmd_len = cmnd[8]; } else NCR_700_unmap(hostdata, SCp, slot); free_slot(slot, hostdata); #ifdef NCR_700_DEBUG if(NCR_700_get_depth(SCp->device) == 0 || NCR_700_get_depth(SCp->device) > SCp->device->queue_depth) printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n", NCR_700_get_depth(SCp->device)); #endif /* NCR_700_DEBUG */ NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1); SCp->host_scribble = NULL; SCp->result = result; scsi_done(SCp); } else { printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n"); } } STATIC void NCR_700_internal_bus_reset(struct Scsi_Host *host) { /* Bus reset */ NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG); udelay(50); NCR_700_writeb(0, host, SCNTL1_REG); } STATIC void NCR_700_chip_setup(struct Scsi_Host *host) { struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)host->hostdata[0]; __u8 min_period; __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP); if(hostdata->chip710) { __u8 burst_disable = 0; __u8 burst_length = 0; switch (hostdata->burst_length) { case 1: burst_length = BURST_LENGTH_1; break; case 2: burst_length = BURST_LENGTH_2; break; case 4: burst_length = BURST_LENGTH_4; break; case 8: burst_length = BURST_LENGTH_8; break; default: burst_disable = BURST_DISABLE; break; } hostdata->dcntl_extra |= COMPAT_700_MODE; NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG); NCR_700_writeb(burst_length | hostdata->dmode_extra, host, DMODE_710_REG); NCR_700_writeb(burst_disable | hostdata->ctest7_extra | (hostdata->differential ? DIFF : 0), host, CTEST7_REG); NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG); NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY | AUTO_ATN, host, SCNTL0_REG); } else { NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra, host, DMODE_700_REG); NCR_700_writeb(hostdata->differential ? DIFF : 0, host, CTEST7_REG); if(hostdata->fast) { /* this is for 700-66, does nothing on 700 */ NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION | GENERATE_RECEIVE_PARITY, host, CTEST8_REG); } else { NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY | AUTO_ATN, host, SCNTL0_REG); } } NCR_700_writeb(1 << host->this_id, host, SCID_REG); NCR_700_writeb(0, host, SBCL_REG); NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG); NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG); NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG); NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG); if(hostdata->clock > 75) { printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock); /* do the best we can, but the async clock will be out * of spec: sync divider 2, async divider 3 */ DEBUG(("53c700: sync 2 async 3\n")); NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG); NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG); hostdata->sync_clock = hostdata->clock/2; } else if(hostdata->clock > 50 && hostdata->clock <= 75) { /* sync divider 1.5, async divider 3 */ DEBUG(("53c700: sync 1.5 async 3\n")); NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG); NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG); hostdata->sync_clock = hostdata->clock*2; hostdata->sync_clock /= 3; } else if(hostdata->clock > 37 && hostdata->clock <= 50) { /* sync divider 1, async divider 2 */ DEBUG(("53c700: sync 1 async 2\n")); NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG); NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG); hostdata->sync_clock = hostdata->clock; } else if(hostdata->clock > 25 && hostdata->clock <=37) { /* sync divider 1, async divider 1.5 */ DEBUG(("53c700: sync 1 async 1.5\n")); NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG); NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG); hostdata->sync_clock = hostdata->clock; } else { DEBUG(("53c700: sync 1 async 1\n")); NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG); NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG); /* sync divider 1, async divider 1 */ hostdata->sync_clock = hostdata->clock; } /* Calculate the actual minimum period that can be supported * by our synchronous clock speed. See the 710 manual for * exact details of this calculation which is based on a * setting of the SXFER register */ min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock); hostdata->min_period = NCR_700_MIN_PERIOD; if(min_period > NCR_700_MIN_PERIOD) hostdata->min_period = min_period; } STATIC void NCR_700_chip_reset(struct Scsi_Host *host) { struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)host->hostdata[0]; if(hostdata->chip710) { NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG); udelay(100); NCR_700_writeb(0, host, ISTAT_REG); } else { NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG); udelay(100); NCR_700_writeb(0, host, DCNTL_REG); } mdelay(1000); NCR_700_chip_setup(host); } /* The heart of the message processing engine is that the instruction * immediately after the INT is the normal case (and so must be CLEAR * ACK). If we want to do something else, we call that routine in * scripts and set temp to be the normal case + 8 (skipping the CLEAR * ACK) so that the routine returns correctly to resume its activity * */ STATIC __u32 process_extended_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps) { __u32 resume_offset = dsp, temp = dsp + 8; __u8 pun = 0xff, lun = 0xff; if(SCp != NULL) { pun = SCp->device->id; lun = SCp->device->lun; } switch(hostdata->msgin[2]) { case A_SDTR_MSG: if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) { struct scsi_target *starget = SCp->device->sdev_target; __u8 period = hostdata->msgin[3]; __u8 offset = hostdata->msgin[4]; if(offset == 0 || period == 0) { offset = 0; period = 0; } spi_offset(starget) = offset; spi_period(starget) = period; if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) { spi_display_xfer_agreement(starget); NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION); } NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC); NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION); NCR_700_writeb(NCR_700_get_SXFER(SCp->device), host, SXFER_REG); } else { /* SDTR message out of the blue, reject it */ shost_printk(KERN_WARNING, host, "Unexpected SDTR msg\n"); hostdata->msgout[0] = A_REJECT_MSG; dma_sync_to_dev(hostdata, hostdata->msgout, 1); script_patch_16(hostdata, hostdata->script, MessageCount, 1); /* SendMsgOut returns, so set up the return * address */ resume_offset = hostdata->pScript + Ent_SendMessageWithATN; } break; case A_WDTR_MSG: printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n", host->host_no, pun, lun); hostdata->msgout[0] = A_REJECT_MSG; dma_sync_to_dev(hostdata, hostdata->msgout, 1); script_patch_16(hostdata, hostdata->script, MessageCount, 1); resume_offset = hostdata->pScript + Ent_SendMessageWithATN; break; default: printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ", host->host_no, pun, lun, NCR_700_phase[(dsps & 0xf00) >> 8]); spi_print_msg(hostdata->msgin); printk("\n"); /* just reject it */ hostdata->msgout[0] = A_REJECT_MSG; dma_sync_to_dev(hostdata, hostdata->msgout, 1); script_patch_16(hostdata, hostdata->script, MessageCount, 1); /* SendMsgOut returns, so set up the return * address */ resume_offset = hostdata->pScript + Ent_SendMessageWithATN; } NCR_700_writel(temp, host, TEMP_REG); return resume_offset; } STATIC __u32 process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps) { /* work out where to return to */ __u32 temp = dsp + 8, resume_offset = dsp; __u8 pun = 0xff, lun = 0xff; if(SCp != NULL) { pun = SCp->device->id; lun = SCp->device->lun; } #ifdef NCR_700_DEBUG printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun, NCR_700_phase[(dsps & 0xf00) >> 8]); spi_print_msg(hostdata->msgin); printk("\n"); #endif switch(hostdata->msgin[0]) { case A_EXTENDED_MSG: resume_offset = process_extended_message(host, hostdata, SCp, dsp, dsps); break; case A_REJECT_MSG: if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) { /* Rejected our sync negotiation attempt */ spi_period(SCp->device->sdev_target) = spi_offset(SCp->device->sdev_target) = 0; NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC); NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION); } else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) { /* rejected our first simple tag message */ scmd_printk(KERN_WARNING, SCp, "Rejected first tag queue attempt, turning off tag queueing\n"); /* we're done negotiating */ NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION); hostdata->tag_negotiated &= ~(1<<scmd_id(SCp)); SCp->device->tagged_supported = 0; SCp->device->simple_tags = 0; scsi_change_queue_depth(SCp->device, host->cmd_per_lun); } else { shost_printk(KERN_WARNING, host, "(%d:%d) Unexpected REJECT Message %s\n", pun, lun, NCR_700_phase[(dsps & 0xf00) >> 8]); /* however, just ignore it */ } break; case A_PARITY_ERROR_MSG: printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no, pun, lun); NCR_700_internal_bus_reset(host); break; case A_SIMPLE_TAG_MSG: printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no, pun, lun, hostdata->msgin[1], NCR_700_phase[(dsps & 0xf00) >> 8]); /* just ignore it */ break; default: printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ", host->host_no, pun, lun, NCR_700_phase[(dsps & 0xf00) >> 8]); spi_print_msg(hostdata->msgin); printk("\n"); /* just reject it */ hostdata->msgout[0] = A_REJECT_MSG; dma_sync_to_dev(hostdata, hostdata->msgout, 1); script_patch_16(hostdata, hostdata->script, MessageCount, 1); /* SendMsgOut returns, so set up the return * address */ resume_offset = hostdata->pScript + Ent_SendMessageWithATN; break; } NCR_700_writel(temp, host, TEMP_REG); /* set us up to receive another message */ dma_sync_from_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE); return resume_offset; } STATIC __u32 process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp, struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata) { __u32 resume_offset = 0; __u8 pun = 0xff, lun=0xff; if(SCp != NULL) { pun = SCp->device->id; lun = SCp->device->lun; } if(dsps == A_GOOD_STATUS_AFTER_STATUS) { DEBUG((" COMMAND COMPLETE, status=%02x\n", hostdata->status[0])); /* OK, if TCQ still under negotiation, we now know it works */ if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION); /* check for contingent allegiance conditions */ if (hostdata->status[0] == SAM_STAT_CHECK_CONDITION || hostdata->status[0] == SAM_STAT_COMMAND_TERMINATED) { struct NCR_700_command_slot *slot = (struct NCR_700_command_slot *)SCp->host_scribble; if(slot->flags == NCR_700_FLAG_AUTOSENSE) { /* OOPS: bad device, returning another * contingent allegiance condition */ scmd_printk(KERN_ERR, SCp, "broken device is looping in contingent allegiance: ignoring\n"); NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]); } else { char *cmnd = NCR_700_get_sense_cmnd(SCp->device); #ifdef NCR_DEBUG scsi_print_command(SCp); printk(" cmd %p has status %d, requesting sense\n", SCp, hostdata->status[0]); #endif /* we can destroy the command here * because the contingent allegiance * condition will cause a retry which * will re-copy the command from the * saved data_cmnd. We also unmap any * data associated with the command * here */ NCR_700_unmap(hostdata, SCp, slot); dma_unmap_single(hostdata->dev, slot->pCmd, MAX_COMMAND_SIZE, DMA_TO_DEVICE); cmnd[0] = REQUEST_SENSE; cmnd[1] = (lun & 0x7) << 5; cmnd[2] = 0; cmnd[3] = 0; cmnd[4] = SCSI_SENSE_BUFFERSIZE; cmnd[5] = 0; /* Here's a quiet hack: the * REQUEST_SENSE command is six bytes, * so store a flag indicating that * this was an internal sense request * and the original status at the end * of the command */ cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC; cmnd[7] = hostdata->status[0]; cmnd[8] = SCp->cmd_len; SCp->cmd_len = 6; /* command length for * REQUEST_SENSE */ slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE); slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | SCSI_SENSE_BUFFERSIZE); slot->SG[0].pAddr = bS_to_host(slot->dma_handle); slot->SG[1].ins = bS_to_host(SCRIPT_RETURN); slot->SG[1].pAddr = 0; slot->resume_offset = hostdata->pScript; dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG[0])*2); dma_sync_from_dev(hostdata, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE); /* queue the command for reissue */ slot->state = NCR_700_SLOT_QUEUED; slot->flags = NCR_700_FLAG_AUTOSENSE; hostdata->state = NCR_700_HOST_FREE; hostdata->cmd = NULL; } } else { // Currently rely on the mid layer evaluation // of the tag queuing capability // //if(status_byte(hostdata->status[0]) == GOOD && // SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) { // /* Piggy back the tag queueing support // * on this command */ // dma_sync_single_for_cpu(hostdata->dev, // slot->dma_handle, // SCp->request_bufflen, // DMA_FROM_DEVICE); // if(((char *)SCp->request_buffer)[7] & 0x02) { // scmd_printk(KERN_INFO, SCp, // "Enabling Tag Command Queuing\n"); // hostdata->tag_negotiated |= (1<<scmd_id(SCp)); // NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING); // } else { // NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING); // hostdata->tag_negotiated &= ~(1<<scmd_id(SCp)); // } //} NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]); } } else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) { __u8 i = (dsps & 0xf00) >> 8; scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n", NCR_700_phase[i], sbcl_to_string(NCR_700_readb(host, SBCL_REG))); scmd_printk(KERN_ERR, SCp, " len = %d, cmd =", SCp->cmd_len); scsi_print_command(SCp); NCR_700_internal_bus_reset(host); } else if((dsps & 0xfffff000) == A_FATAL) { int i = (dsps & 0xfff); printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n", host->host_no, pun, lun, NCR_700_fatal_messages[i]); if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) { printk(KERN_ERR " msg begins %02x %02x\n", hostdata->msgin[0], hostdata->msgin[1]); } NCR_700_internal_bus_reset(host); } else if((dsps & 0xfffff0f0) == A_DISCONNECT) { #ifdef NCR_700_DEBUG __u8 i = (dsps & 0xf00) >> 8; printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n", host->host_no, pun, lun, i, NCR_700_phase[i]); #endif save_for_reselection(hostdata, SCp, dsp); } else if(dsps == A_RESELECTION_IDENTIFIED) { __u8 lun; struct NCR_700_command_slot *slot; __u8 reselection_id = hostdata->reselection_id; struct scsi_device *SDp; lun = hostdata->msgin[0] & 0x1f; hostdata->reselection_id = 0xff; DEBUG(("scsi%d: (%d:%d) RESELECTED!\n", host->host_no, reselection_id, lun)); /* clear the reselection indicator */ SDp = __scsi_device_lookup(host, 0, reselection_id, lun); if(unlikely(SDp == NULL)) { printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n", host->host_no, reselection_id, lun); BUG(); } if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) { struct scsi_cmnd *SCp; SCp = scsi_host_find_tag(SDp->host, hostdata->msgin[2]); if(unlikely(SCp == NULL)) { printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n", host->host_no, reselection_id, lun, hostdata->msgin[2]); BUG(); } slot = (struct NCR_700_command_slot *)SCp->host_scribble; DDEBUG(KERN_DEBUG, SDp, "reselection is tag %d, slot %p(%d)\n", hostdata->msgin[2], slot, slot->tag); } else { struct NCR_700_Device_Parameters *p = SDp->hostdata; struct scsi_cmnd *SCp = p->current_cmnd; if(unlikely(SCp == NULL)) { sdev_printk(KERN_ERR, SDp, "no saved request for untagged cmd\n"); BUG(); } slot = (struct NCR_700_command_slot *)SCp->host_scribble; } if(slot == NULL) { printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n", host->host_no, reselection_id, lun, hostdata->msgin[0], hostdata->msgin[1], hostdata->msgin[2]); } else { if(hostdata->state != NCR_700_HOST_BUSY) printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n", host->host_no); resume_offset = slot->resume_offset; hostdata->cmd = slot->cmnd; /* re-patch for this command */ script_patch_32_abs(hostdata, hostdata->script, CommandAddress, slot->pCmd); script_patch_16(hostdata, hostdata->script, CommandCount, slot->cmnd->cmd_len); script_patch_32_abs(hostdata, hostdata->script, SGScriptStartAddress, to32bit(&slot->pSG[0].ins)); /* Note: setting SXFER only works if we're * still in the MESSAGE phase, so it is vital * that ACK is still asserted when we process * the reselection message. The resume offset * should therefore always clear ACK */ NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device), host, SXFER_REG); dma_sync_from_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE); dma_sync_to_dev(hostdata, hostdata->msgout, MSG_ARRAY_SIZE); /* I'm just being paranoid here, the command should * already have been flushed from the cache */ dma_sync_to_dev(hostdata, slot->cmnd->cmnd, slot->cmnd->cmd_len); } } else if(dsps == A_RESELECTED_DURING_SELECTION) { /* This section is full of debugging code because I've * never managed to reach it. I think what happens is * that, because the 700 runs with selection * interrupts enabled the whole time that we take a * selection interrupt before we manage to get to the * reselected script interrupt */ __u8 reselection_id = NCR_700_readb(host, SFBR_REG); struct NCR_700_command_slot *slot; /* Take out our own ID */ reselection_id &= ~(1<<host->this_id); /* I've never seen this happen, so keep this as a printk rather * than a debug */ printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n", host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count); { /* FIXME: DEBUGGING CODE */ __u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]); int i; for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) { if(SG >= to32bit(&hostdata->slots[i].pSG[0]) && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS])) break; } printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset); SCp = hostdata->slots[i].cmnd; } if(SCp != NULL) { slot = (struct NCR_700_command_slot *)SCp->host_scribble; /* change slot from busy to queued to redo command */ slot->state = NCR_700_SLOT_QUEUED; } hostdata->cmd = NULL; if(reselection_id == 0) { if(hostdata->reselection_id == 0xff) { printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no); return 0; } else { printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n", host->host_no); reselection_id = hostdata->reselection_id; } } else { /* convert to real ID */ reselection_id = bitmap_to_number(reselection_id); } hostdata->reselection_id = reselection_id; /* just in case we have a stale simple tag message, clear it */ hostdata->msgin[1] = 0; dma_sync_to_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE); if(hostdata->tag_negotiated & (1<<reselection_id)) { resume_offset = hostdata->pScript + Ent_GetReselectionWithTag; } else { resume_offset = hostdata->pScript + Ent_GetReselectionData; } } else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) { /* we've just disconnected from the bus, do nothing since * a return here will re-run the queued command slot * that may have been interrupted by the initial selection */ DEBUG((" SELECTION COMPLETED\n")); } else if((dsps & 0xfffff0f0) == A_MSG_IN) { resume_offset = process_message(host, hostdata, SCp, dsp, dsps); } else if((dsps & 0xfffff000) == 0) { __u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8; printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n", host->host_no, pun, lun, NCR_700_condition[i], NCR_700_phase[j], dsp - hostdata->pScript); if(SCp != NULL) { struct scatterlist *sg; scsi_print_command(SCp); scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) { printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr); } } NCR_700_internal_bus_reset(host); } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) { printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n", host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript); resume_offset = dsp; } else { printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n", host->host_no, pun, lun, dsps, dsp - hostdata->pScript); NCR_700_internal_bus_reset(host); } return resume_offset; } /* We run the 53c700 with selection interrupts always enabled. This * means that the chip may be selected as soon as the bus frees. On a * busy bus, this can be before the scripts engine finishes its * processing. Therefore, part of the selection processing has to be * to find out what the scripts engine is doing and complete the * function if necessary (i.e. process the pending disconnect or save * the interrupted initial selection */ STATIC inline __u32 process_selection(struct Scsi_Host *host, __u32 dsp) { __u8 id = 0; /* Squash compiler warning */ int count = 0; __u32 resume_offset = 0; struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)host->hostdata[0]; struct scsi_cmnd *SCp = hostdata->cmd; __u8 sbcl; for(count = 0; count < 5; count++) { id = NCR_700_readb(host, hostdata->chip710 ? CTEST9_REG : SFBR_REG); /* Take out our own ID */ id &= ~(1<<host->this_id); if(id != 0) break; udelay(5); } sbcl = NCR_700_readb(host, SBCL_REG); if((sbcl & SBCL_IO) == 0) { /* mark as having been selected rather than reselected */ id = 0xff; } else { /* convert to real ID */ hostdata->reselection_id = id = bitmap_to_number(id); DEBUG(("scsi%d: Reselected by %d\n", host->host_no, id)); } if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) { struct NCR_700_command_slot *slot = (struct NCR_700_command_slot *)SCp->host_scribble; DEBUG((" ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset)); switch(dsp - hostdata->pScript) { case Ent_Disconnect1: case Ent_Disconnect2: save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript); break; case Ent_Disconnect3: case Ent_Disconnect4: save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript); break; case Ent_Disconnect5: case Ent_Disconnect6: save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript); break; case Ent_Disconnect7: case Ent_Disconnect8: save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript); break; case Ent_Finish1: case Ent_Finish2: process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata); break; default: slot->state = NCR_700_SLOT_QUEUED; break; } } hostdata->state = NCR_700_HOST_BUSY; hostdata->cmd = NULL; /* clear any stale simple tag message */ hostdata->msgin[1] = 0; dma_sync_to_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE); if(id == 0xff) { /* Selected as target, Ignore */ resume_offset = hostdata->pScript + Ent_SelectedAsTarget; } else if(hostdata->tag_negotiated & (1<<id)) { resume_offset = hostdata->pScript + Ent_GetReselectionWithTag; } else { resume_offset = hostdata->pScript + Ent_GetReselectionData; } return resume_offset; } static inline void NCR_700_clear_fifo(struct Scsi_Host *host) { const struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)host->hostdata[0]; if(hostdata->chip710) { NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG); } else { NCR_700_writeb(CLR_FIFO, host, DFIFO_REG); } } static inline void NCR_700_flush_fifo(struct Scsi_Host *host) { const struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)host->hostdata[0]; if(hostdata->chip710) { NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG); udelay(10); NCR_700_writeb(0, host, CTEST8_REG); } else { NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG); udelay(10); NCR_700_writeb(0, host, DFIFO_REG); } } /* The queue lock with interrupts disabled must be held on entry to * this function */ STATIC int NCR_700_start_command(struct scsi_cmnd *SCp) { struct NCR_700_command_slot *slot = (struct NCR_700_command_slot *)SCp->host_scribble; struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0]; __u16 count = 1; /* for IDENTIFY message */ u8 lun = SCp->device->lun; if(hostdata->state != NCR_700_HOST_FREE) { /* keep this inside the lock to close the race window where * the running command finishes on another CPU while we don't * change the state to queued on this one */ slot->state = NCR_700_SLOT_QUEUED; DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n", SCp->device->host->host_no, slot->cmnd, slot)); return 0; } hostdata->state = NCR_700_HOST_BUSY; hostdata->cmd = SCp; slot->state = NCR_700_SLOT_BUSY; /* keep interrupts disabled until we have the command correctly * set up so we cannot take a selection interrupt */ hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE && slot->flags != NCR_700_FLAG_AUTOSENSE), lun); /* for INQUIRY or REQUEST_SENSE commands, we cannot be sure * if the negotiated transfer parameters still hold, so * always renegotiate them */ if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE || slot->flags == NCR_700_FLAG_AUTOSENSE) { NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC); } /* REQUEST_SENSE is asking for contingent I_T_L(_Q) status. * If a contingent allegiance condition exists, the device * will refuse all tags, so send the request sense as untagged * */ if((hostdata->tag_negotiated & (1<<scmd_id(SCp))) && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE && slot->flags != NCR_700_FLAG_AUTOSENSE)) { count += spi_populate_tag_msg(&hostdata->msgout[count], SCp); } if(hostdata->fast && NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) { count += spi_populate_sync_msg(&hostdata->msgout[count], spi_period(SCp->device->sdev_target), spi_offset(SCp->device->sdev_target)); NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION); } script_patch_16(hostdata, hostdata->script, MessageCount, count); script_patch_ID(hostdata, hostdata->script, Device_ID, 1<<scmd_id(SCp)); script_patch_32_abs(hostdata, hostdata->script, CommandAddress, slot->pCmd); script_patch_16(hostdata, hostdata->script, CommandCount, SCp->cmd_len); /* finally plumb the beginning of the SG list into the script * */ script_patch_32_abs(hostdata, hostdata->script, SGScriptStartAddress, to32bit(&slot->pSG[0].ins)); NCR_700_clear_fifo(SCp->device->host); if(slot->resume_offset == 0) slot->resume_offset = hostdata->pScript; /* now perform all the writebacks and invalidates */ dma_sync_to_dev(hostdata, hostdata->msgout, count); dma_sync_from_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE); dma_sync_to_dev(hostdata, SCp->cmnd, SCp->cmd_len); dma_sync_from_dev(hostdata, hostdata->status, 1); /* set the synchronous period/offset */ NCR_700_writeb(NCR_700_get_SXFER(SCp->device), SCp->device->host, SXFER_REG); NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG); NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG); return 1; } irqreturn_t NCR_700_intr(int irq, void *dev_id) { struct Scsi_Host *host = (struct Scsi_Host *)dev_id; struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)host->hostdata[0]; __u8 istat; __u32 resume_offset = 0; __u8 pun = 0xff, lun = 0xff; unsigned long flags; int handled = 0; /* Use the host lock to serialise access to the 53c700 * hardware. Note: In future, we may need to take the queue * lock to enter the done routines. When that happens, we * need to ensure that for this driver, the host lock and the * queue lock point to the same thing. */ spin_lock_irqsave(host->host_lock, flags); if((istat = NCR_700_readb(host, ISTAT_REG)) & (SCSI_INT_PENDING | DMA_INT_PENDING)) { __u32 dsps; __u8 sstat0 = 0, dstat = 0; __u32 dsp; struct scsi_cmnd *SCp = hostdata->cmd; handled = 1; if(istat & SCSI_INT_PENDING) { udelay(10); sstat0 = NCR_700_readb(host, SSTAT0_REG); } if(istat & DMA_INT_PENDING) { udelay(10); dstat = NCR_700_readb(host, DSTAT_REG); } dsps = NCR_700_readl(host, DSPS_REG); dsp = NCR_700_readl(host, DSP_REG); DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n", host->host_no, istat, sstat0, dstat, (dsp - (__u32)(hostdata->pScript))/4, dsp, dsps)); if(SCp != NULL) { pun = SCp->device->id; lun = SCp->device->lun; } if(sstat0 & SCSI_RESET_DETECTED) { struct scsi_device *SDp; int i; hostdata->state = NCR_700_HOST_BUSY; printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n", host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript); scsi_report_bus_reset(host, 0); /* clear all the negotiated parameters */ __shost_for_each_device(SDp, host) NCR_700_clear_flag(SDp, ~0); /* clear all the slots and their pending commands */ for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) { struct scsi_cmnd *SCp; struct NCR_700_command_slot *slot = &hostdata->slots[i]; if(slot->state == NCR_700_SLOT_FREE) continue; SCp = slot->cmnd; printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n", slot, SCp); free_slot(slot, hostdata); SCp->host_scribble = NULL; NCR_700_set_depth(SCp->device, 0); /* NOTE: deadlock potential here: we * rely on mid-layer guarantees that * scsi_done won't try to issue the * command again otherwise we'll * deadlock on the * hostdata->state_lock */ SCp->result = DID_RESET << 16; scsi_done(SCp); } mdelay(25); NCR_700_chip_setup(host); hostdata->state = NCR_700_HOST_FREE; hostdata->cmd = NULL; /* signal back if this was an eh induced reset */ if(hostdata->eh_complete != NULL) complete(hostdata->eh_complete); goto out_unlock; } else if(sstat0 & SELECTION_TIMEOUT) { DEBUG(("scsi%d: (%d:%d) selection timeout\n", host->host_no, pun, lun)); NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16); } else if(sstat0 & PHASE_MISMATCH) { struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL : (struct NCR_700_command_slot *)SCp->host_scribble; if(dsp == Ent_SendMessage + 8 + hostdata->pScript) { /* It wants to reply to some part of * our message */ #ifdef NCR_700_DEBUG __u32 temp = NCR_700_readl(host, TEMP_REG); int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host)); printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG))); #endif resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch; } else if (slot && dsp >= to32bit(&slot->pSG[0].ins) && dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) { int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff; int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List); int residual = NCR_700_data_residual(host); int i; #ifdef NCR_700_DEBUG __u32 naddr = NCR_700_readl(host, DNAD_REG); printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n", host->host_no, pun, lun, SGcount, data_transfer); scsi_print_command(SCp); if(residual) { printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n", host->host_no, pun, lun, SGcount, data_transfer, residual); } #endif data_transfer += residual; if(data_transfer != 0) { int count; __u32 pAddr; SGcount--; count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff); DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer)); slot->SG[SGcount].ins &= bS_to_host(0xff000000); slot->SG[SGcount].ins |= bS_to_host(data_transfer); pAddr = bS_to_cpu(slot->SG[SGcount].pAddr); pAddr += (count - data_transfer); #ifdef NCR_700_DEBUG if(pAddr != naddr) { printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual); } #endif slot->SG[SGcount].pAddr = bS_to_host(pAddr); } /* set the executed moves to nops */ for(i=0; i<SGcount; i++) { slot->SG[i].ins = bS_to_host(SCRIPT_NOP); slot->SG[i].pAddr = 0; } dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG)); /* and pretend we disconnected after * the command phase */ resume_offset = hostdata->pScript + Ent_MsgInDuringData; /* make sure all the data is flushed */ NCR_700_flush_fifo(host); } else { __u8 sbcl = NCR_700_readb(host, SBCL_REG); printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n", host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl)); NCR_700_internal_bus_reset(host); } } else if(sstat0 & SCSI_GROSS_ERROR) { printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n", host->host_no, pun, lun); NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16); } else if(sstat0 & PARITY_ERROR) { printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n", host->host_no, pun, lun); NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16); } else if(dstat & SCRIPT_INT_RECEIVED) { DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n", host->host_no, pun, lun)); resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata); } else if(dstat & (ILGL_INST_DETECTED)) { printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n" " Please email [email protected] with the details\n", host->host_no, pun, lun, dsp, dsp - hostdata->pScript); NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16); } else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) { printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n", host->host_no, pun, lun, dstat); NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16); } /* NOTE: selection interrupt processing MUST occur * after script interrupt processing to correctly cope * with the case where we process a disconnect and * then get reselected before we process the * disconnection */ if(sstat0 & SELECTED) { /* FIXME: It currently takes at least FOUR * interrupts to complete a command that * disconnects: one for the disconnect, one * for the reselection, one to get the * reselection data and one to complete the * command. If we guess the reselected * command here and prepare it, we only need * to get a reselection data interrupt if we * guessed wrongly. Since the interrupt * overhead is much greater than the command * setup, this would be an efficient * optimisation particularly as we probably * only have one outstanding command on a * target most of the time */ resume_offset = process_selection(host, dsp); } } if(resume_offset) { if(hostdata->state != NCR_700_HOST_BUSY) { printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n", host->host_no, resume_offset, resume_offset - hostdata->pScript); hostdata->state = NCR_700_HOST_BUSY; } DEBUG(("Attempting to resume at %x\n", resume_offset)); NCR_700_clear_fifo(host); NCR_700_writel(resume_offset, host, DSP_REG); } /* There is probably a technical no-no about this: If we're a * shared interrupt and we got this interrupt because the * other device needs servicing not us, we're still going to * check our queued commands here---of course, there shouldn't * be any outstanding.... */ if(hostdata->state == NCR_700_HOST_FREE) { int i; for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) { /* fairness: always run the queue from the last * position we left off */ int j = (i + hostdata->saved_slot_position) % NCR_700_COMMAND_SLOTS_PER_HOST; if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED) continue; if(NCR_700_start_command(hostdata->slots[j].cmnd)) { DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n", host->host_no, &hostdata->slots[j], hostdata->slots[j].cmnd)); hostdata->saved_slot_position = j + 1; } break; } } out_unlock: spin_unlock_irqrestore(host->host_lock, flags); return IRQ_RETVAL(handled); } static int NCR_700_queuecommand_lck(struct scsi_cmnd *SCp) { struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0]; __u32 move_ins; struct NCR_700_command_slot *slot; if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) { /* We're over our allocation, this should never happen * since we report the max allocation to the mid layer */ printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no); return 1; } /* check for untagged commands. We cannot have any outstanding * commands if we accept them. Commands could be untagged because: * * - The tag negotiated bitmap is clear * - The blk layer sent and untagged command */ if(NCR_700_get_depth(SCp->device) != 0 && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp))) || !(SCp->flags & SCMD_TAGGED))) { CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n", NCR_700_get_depth(SCp->device)); return SCSI_MLQUEUE_DEVICE_BUSY; } if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) { CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n", NCR_700_get_depth(SCp->device)); return SCSI_MLQUEUE_DEVICE_BUSY; } NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1); /* begin the command here */ /* no need to check for NULL, test for command_slot_count above * ensures a slot is free */ slot = find_empty_slot(hostdata); slot->cmnd = SCp; SCp->host_scribble = (unsigned char *)slot; #ifdef NCR_700_DEBUG printk("53c700: scsi%d, command ", SCp->device->host->host_no); scsi_print_command(SCp); #endif if ((SCp->flags & SCMD_TAGGED) && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0 && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) { scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n"); hostdata->tag_negotiated |= (1<<scmd_id(SCp)); NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION); } /* here we may have to process an untagged command. The gate * above ensures that this will be the only one outstanding, * so clear the tag negotiated bit. * * FIXME: This will royally screw up on multiple LUN devices * */ if (!(SCp->flags & SCMD_TAGGED) && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) { scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n"); hostdata->tag_negotiated &= ~(1<<scmd_id(SCp)); } if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) && SCp->device->simple_tags) { slot->tag = scsi_cmd_to_rq(SCp)->tag; CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n", slot->tag, slot); } else { struct NCR_700_Device_Parameters *p = SCp->device->hostdata; slot->tag = SCSI_NO_TAG; /* save current command for reselection */ p->current_cmnd = SCp; } /* sanity check: some of the commands generated by the mid-layer * have an eccentric idea of their sc_data_direction */ if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) && SCp->sc_data_direction != DMA_NONE) { #ifdef NCR_700_DEBUG printk("53c700: Command"); scsi_print_command(SCp); printk("Has wrong data direction %d\n", SCp->sc_data_direction); #endif SCp->sc_data_direction = DMA_NONE; } switch (SCp->cmnd[0]) { case REQUEST_SENSE: /* clear the internal sense magic */ SCp->cmnd[6] = 0; fallthrough; default: /* OK, get it from the command */ switch(SCp->sc_data_direction) { case DMA_BIDIRECTIONAL: default: printk(KERN_ERR "53c700: Unknown command for data direction "); scsi_print_command(SCp); move_ins = 0; break; case DMA_NONE: move_ins = 0; break; case DMA_FROM_DEVICE: move_ins = SCRIPT_MOVE_DATA_IN; break; case DMA_TO_DEVICE: move_ins = SCRIPT_MOVE_DATA_OUT; break; } } /* now build the scatter gather list */ if(move_ins != 0) { int i; int sg_count; dma_addr_t vPtr = 0; struct scatterlist *sg; __u32 count = 0; sg_count = scsi_dma_map(SCp); BUG_ON(sg_count < 0); scsi_for_each_sg(SCp, sg, sg_count, i) { vPtr = sg_dma_address(sg); count = sg_dma_len(sg); slot->SG[i].ins = bS_to_host(move_ins | count); DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n", i, count, slot->SG[i].ins, (unsigned long)vPtr)); slot->SG[i].pAddr = bS_to_host(vPtr); } slot->SG[i].ins = bS_to_host(SCRIPT_RETURN); slot->SG[i].pAddr = 0; dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG)); DEBUG((" SETTING %p to %x\n", (&slot->pSG[i].ins), slot->SG[i].ins)); } slot->resume_offset = 0; slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE); NCR_700_start_command(SCp); return 0; } STATIC DEF_SCSI_QCMD(NCR_700_queuecommand) STATIC int NCR_700_abort(struct scsi_cmnd * SCp) { struct NCR_700_command_slot *slot; scmd_printk(KERN_INFO, SCp, "abort command\n"); slot = (struct NCR_700_command_slot *)SCp->host_scribble; if(slot == NULL) /* no outstanding command to abort */ return SUCCESS; if(SCp->cmnd[0] == TEST_UNIT_READY) { /* FIXME: This is because of a problem in the new * error handler. When it is in error recovery, it * will send a TUR to a device it thinks may still be * showing a problem. If the TUR isn't responded to, * it will abort it and mark the device off line. * Unfortunately, it does no other error recovery, so * this would leave us with an outstanding command * occupying a slot. Rather than allow this to * happen, we issue a bus reset to force all * outstanding commands to terminate here. */ NCR_700_internal_bus_reset(SCp->device->host); /* still drop through and return failed */ } return FAILED; } STATIC int NCR_700_host_reset(struct scsi_cmnd * SCp) { DECLARE_COMPLETION_ONSTACK(complete); struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0]; scmd_printk(KERN_INFO, SCp, "New error handler wants HOST reset, cmd %p\n\t", SCp); scsi_print_command(SCp); /* In theory, eh_complete should always be null because the * eh is single threaded, but just in case we're handling a * reset via sg or something */ spin_lock_irq(SCp->device->host->host_lock); while (hostdata->eh_complete != NULL) { spin_unlock_irq(SCp->device->host->host_lock); msleep_interruptible(100); spin_lock_irq(SCp->device->host->host_lock); } hostdata->eh_complete = &complete; NCR_700_internal_bus_reset(SCp->device->host); NCR_700_chip_reset(SCp->device->host); spin_unlock_irq(SCp->device->host->host_lock); wait_for_completion(&complete); spin_lock_irq(SCp->device->host->host_lock); hostdata->eh_complete = NULL; /* Revalidate the transport parameters of the failing device */ if(hostdata->fast) spi_schedule_dv_device(SCp->device); spin_unlock_irq(SCp->device->host->host_lock); return SUCCESS; } STATIC void NCR_700_set_period(struct scsi_target *STp, int period) { struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent); struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)SHp->hostdata[0]; if(!hostdata->fast) return; if(period < hostdata->min_period) period = hostdata->min_period; spi_period(STp) = period; spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC | NCR_700_DEV_BEGIN_SYNC_NEGOTIATION); spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION; } STATIC void NCR_700_set_offset(struct scsi_target *STp, int offset) { struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent); struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)SHp->hostdata[0]; int max_offset = hostdata->chip710 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET; if(!hostdata->fast) return; if(offset > max_offset) offset = max_offset; /* if we're currently async, make sure the period is reasonable */ if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period || spi_period(STp) > 0xff)) spi_period(STp) = hostdata->min_period; spi_offset(STp) = offset; spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC | NCR_700_DEV_BEGIN_SYNC_NEGOTIATION); spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION; } STATIC int NCR_700_slave_alloc(struct scsi_device *SDp) { SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters), GFP_KERNEL); if (!SDp->hostdata) return -ENOMEM; return 0; } STATIC int NCR_700_slave_configure(struct scsi_device *SDp) { struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0]; /* to do here: allocate memory; build a queue_full list */ if(SDp->tagged_supported) { scsi_change_queue_depth(SDp, NCR_700_DEFAULT_TAGS); NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION); } if(hostdata->fast) { /* Find the correct offset and period via domain validation */ if (!spi_initial_dv(SDp->sdev_target)) spi_dv_device(SDp); } else { spi_offset(SDp->sdev_target) = 0; spi_period(SDp->sdev_target) = 0; } return 0; } STATIC void NCR_700_slave_destroy(struct scsi_device *SDp) { kfree(SDp->hostdata); SDp->hostdata = NULL; } static int NCR_700_change_queue_depth(struct scsi_device *SDp, int depth) { if (depth > NCR_700_MAX_TAGS) depth = NCR_700_MAX_TAGS; return scsi_change_queue_depth(SDp, depth); } static ssize_t NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *SDp = to_scsi_device(dev); return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp)); } static struct device_attribute NCR_700_active_tags_attr = { .attr = { .name = "active_tags", .mode = S_IRUGO, }, .show = NCR_700_show_active_tags, }; STATIC struct attribute *NCR_700_dev_attrs[] = { &NCR_700_active_tags_attr.attr, NULL, }; ATTRIBUTE_GROUPS(NCR_700_dev); EXPORT_SYMBOL(NCR_700_detect); EXPORT_SYMBOL(NCR_700_release); EXPORT_SYMBOL(NCR_700_intr); static struct spi_function_template NCR_700_transport_functions = { .set_period = NCR_700_set_period, .show_period = 1, .set_offset = NCR_700_set_offset, .show_offset = 1, }; static int __init NCR_700_init(void) { NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions); if(!NCR_700_transport_template) return -ENODEV; return 0; } static void __exit NCR_700_exit(void) { spi_release_transport(NCR_700_transport_template); } module_init(NCR_700_init); module_exit(NCR_700_exit);
linux-master
drivers/scsi/53c700.c
// SPDX-License-Identifier: GPL-2.0-or-later /****************************************************************************** ** Device driver for the PCI-SCSI NCR538XX controller family. ** ** Copyright (C) 1994 Wolfgang Stanglmeier ** ** **----------------------------------------------------------------------------- ** ** This driver has been ported to Linux from the FreeBSD NCR53C8XX driver ** and is currently maintained by ** ** Gerard Roudier <[email protected]> ** ** Being given that this driver originates from the FreeBSD version, and ** in order to keep synergy on both, any suggested enhancements and corrections ** received on Linux are automatically a potential candidate for the FreeBSD ** version. ** ** The original driver has been written for 386bsd and FreeBSD by ** Wolfgang Stanglmeier <[email protected]> ** Stefan Esser <[email protected]> ** ** And has been ported to NetBSD by ** Charles M. Hannum <[email protected]> ** **----------------------------------------------------------------------------- ** ** Brief history ** ** December 10 1995 by Gerard Roudier: ** Initial port to Linux. ** ** June 23 1996 by Gerard Roudier: ** Support for 64 bits architectures (Alpha). ** ** November 30 1996 by Gerard Roudier: ** Support for Fast-20 scsi. ** Support for large DMA fifo and 128 dwords bursting. ** ** February 27 1997 by Gerard Roudier: ** Support for Fast-40 scsi. ** Support for on-Board RAM. ** ** May 3 1997 by Gerard Roudier: ** Full support for scsi scripts instructions pre-fetching. ** ** May 19 1997 by Richard Waltham <[email protected]>: ** Support for NvRAM detection and reading. ** ** August 18 1997 by Cort <[email protected]>: ** Support for Power/PC (Big Endian). ** ** June 20 1998 by Gerard Roudier ** Support for up to 64 tags per lun. ** O(1) everywhere (C and SCRIPTS) for normal cases. ** Low PCI traffic for command handling when on-chip RAM is present. ** Aggressive SCSI SCRIPTS optimizations. ** ** 2005 by Matthew Wilcox and James Bottomley ** PCI-ectomy. This driver now supports only the 720 chip (see the ** NCR_Q720 and zalon drivers for the bus probe logic). ** ******************************************************************************* */ /* ** Supported SCSI-II features: ** Synchronous negotiation ** Wide negotiation (depends on the NCR Chip) ** Enable disconnection ** Tagged command queuing ** Parity checking ** Etc... ** ** Supported NCR/SYMBIOS chips: ** 53C720 (Wide, Fast SCSI-2, intfly problems) */ /* Name and version of the driver */ #define SCSI_NCR_DRIVER_NAME "ncr53c8xx-3.4.3g" #define SCSI_NCR_DEBUG_FLAGS (0) #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/gfp.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/spinlock.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/time.h> #include <linux/timer.h> #include <linux/types.h> #include <asm/dma.h> #include <asm/io.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_spi.h> #include "ncr53c8xx.h" #define NAME53C8XX "ncr53c8xx" /*========================================================== ** ** Debugging tags ** **========================================================== */ #define DEBUG_ALLOC (0x0001) #define DEBUG_PHASE (0x0002) #define DEBUG_QUEUE (0x0008) #define DEBUG_RESULT (0x0010) #define DEBUG_POINTER (0x0020) #define DEBUG_SCRIPT (0x0040) #define DEBUG_TINY (0x0080) #define DEBUG_TIMING (0x0100) #define DEBUG_NEGO (0x0200) #define DEBUG_TAGS (0x0400) #define DEBUG_SCATTER (0x0800) #define DEBUG_IC (0x1000) /* ** Enable/Disable debug messages. ** Can be changed at runtime too. */ #ifdef SCSI_NCR_DEBUG_INFO_SUPPORT static int ncr_debug = SCSI_NCR_DEBUG_FLAGS; #define DEBUG_FLAGS ncr_debug #else #define DEBUG_FLAGS SCSI_NCR_DEBUG_FLAGS #endif /* * Locally used status flag */ #define SAM_STAT_ILLEGAL 0xff static inline struct list_head *ncr_list_pop(struct list_head *head) { if (!list_empty(head)) { struct list_head *elem = head->next; list_del(elem); return elem; } return NULL; } /*========================================================== ** ** Simple power of two buddy-like allocator. ** ** This simple code is not intended to be fast, but to ** provide power of 2 aligned memory allocations. ** Since the SCRIPTS processor only supplies 8 bit ** arithmetic, this allocator allows simple and fast ** address calculations from the SCRIPTS code. ** In addition, cache line alignment is guaranteed for ** power of 2 cache line size. ** Enhanced in linux-2.3.44 to provide a memory pool ** per pcidev to support dynamic dma mapping. (I would ** have preferred a real bus abstraction, btw). ** **========================================================== */ #define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */ #if PAGE_SIZE >= 8192 #define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */ #else #define MEMO_PAGE_ORDER 1 /* 2 PAGES maximum */ #endif #define MEMO_FREE_UNUSED /* Free unused pages immediately */ #define MEMO_WARN 1 #define MEMO_GFP_FLAGS GFP_ATOMIC #define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER) #define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT) #define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1) typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */ typedef struct device *m_bush_t; /* Something that addresses DMAable */ typedef struct m_link { /* Link between free memory chunks */ struct m_link *next; } m_link_s; typedef struct m_vtob { /* Virtual to Bus address translation */ struct m_vtob *next; m_addr_t vaddr; m_addr_t baddr; } m_vtob_s; #define VTOB_HASH_SHIFT 5 #define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT) #define VTOB_HASH_MASK (VTOB_HASH_SIZE-1) #define VTOB_HASH_CODE(m) \ ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK) typedef struct m_pool { /* Memory pool of a given kind */ m_bush_t bush; m_addr_t (*getp)(struct m_pool *); void (*freep)(struct m_pool *, m_addr_t); int nump; m_vtob_s *(vtob[VTOB_HASH_SIZE]); struct m_pool *next; struct m_link h[PAGE_SHIFT-MEMO_SHIFT+MEMO_PAGE_ORDER+1]; } m_pool_s; static void *___m_alloc(m_pool_s *mp, int size) { int i = 0; int s = (1 << MEMO_SHIFT); int j; m_addr_t a; m_link_s *h = mp->h; if (size > (PAGE_SIZE << MEMO_PAGE_ORDER)) return NULL; while (size > s) { s <<= 1; ++i; } j = i; while (!h[j].next) { if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) { h[j].next = (m_link_s *)mp->getp(mp); if (h[j].next) h[j].next->next = NULL; break; } ++j; s <<= 1; } a = (m_addr_t) h[j].next; if (a) { h[j].next = h[j].next->next; while (j > i) { j -= 1; s >>= 1; h[j].next = (m_link_s *) (a+s); h[j].next->next = NULL; } } #ifdef DEBUG printk("___m_alloc(%d) = %p\n", size, (void *) a); #endif return (void *) a; } static void ___m_free(m_pool_s *mp, void *ptr, int size) { int i = 0; int s = (1 << MEMO_SHIFT); m_link_s *q; m_addr_t a, b; m_link_s *h = mp->h; #ifdef DEBUG printk("___m_free(%p, %d)\n", ptr, size); #endif if (size > (PAGE_SIZE << MEMO_PAGE_ORDER)) return; while (size > s) { s <<= 1; ++i; } a = (m_addr_t) ptr; while (1) { #ifdef MEMO_FREE_UNUSED if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) { mp->freep(mp, a); break; } #endif b = a ^ s; q = &h[i]; while (q->next && q->next != (m_link_s *) b) { q = q->next; } if (!q->next) { ((m_link_s *) a)->next = h[i].next; h[i].next = (m_link_s *) a; break; } q->next = q->next->next; a = a & b; s <<= 1; ++i; } } static DEFINE_SPINLOCK(ncr53c8xx_lock); static void *__m_calloc2(m_pool_s *mp, int size, char *name, int uflags) { void *p; p = ___m_alloc(mp, size); if (DEBUG_FLAGS & DEBUG_ALLOC) printk ("new %-10s[%4d] @%p.\n", name, size, p); if (p) memset(p, 0, size); else if (uflags & MEMO_WARN) printk (NAME53C8XX ": failed to allocate %s[%d]\n", name, size); return p; } #define __m_calloc(mp, s, n) __m_calloc2(mp, s, n, MEMO_WARN) static void __m_free(m_pool_s *mp, void *ptr, int size, char *name) { if (DEBUG_FLAGS & DEBUG_ALLOC) printk ("freeing %-10s[%4d] @%p.\n", name, size, ptr); ___m_free(mp, ptr, size); } /* * With pci bus iommu support, we use a default pool of unmapped memory * for memory we donnot need to DMA from/to and one pool per pcidev for * memory accessed by the PCI chip. `mp0' is the default not DMAable pool. */ static m_addr_t ___mp0_getp(m_pool_s *mp) { m_addr_t m = __get_free_pages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER); if (m) ++mp->nump; return m; } static void ___mp0_freep(m_pool_s *mp, m_addr_t m) { free_pages(m, MEMO_PAGE_ORDER); --mp->nump; } static m_pool_s mp0 = {NULL, ___mp0_getp, ___mp0_freep}; /* * DMAable pools. */ /* * With pci bus iommu support, we maintain one pool per pcidev and a * hashed reverse table for virtual to bus physical address translations. */ static m_addr_t ___dma_getp(m_pool_s *mp) { m_addr_t vp; m_vtob_s *vbp; vbp = __m_calloc(&mp0, sizeof(*vbp), "VTOB"); if (vbp) { dma_addr_t daddr; vp = (m_addr_t) dma_alloc_coherent(mp->bush, PAGE_SIZE<<MEMO_PAGE_ORDER, &daddr, GFP_ATOMIC); if (vp) { int hc = VTOB_HASH_CODE(vp); vbp->vaddr = vp; vbp->baddr = daddr; vbp->next = mp->vtob[hc]; mp->vtob[hc] = vbp; ++mp->nump; return vp; } } if (vbp) __m_free(&mp0, vbp, sizeof(*vbp), "VTOB"); return 0; } static void ___dma_freep(m_pool_s *mp, m_addr_t m) { m_vtob_s **vbpp, *vbp; int hc = VTOB_HASH_CODE(m); vbpp = &mp->vtob[hc]; while (*vbpp && (*vbpp)->vaddr != m) vbpp = &(*vbpp)->next; if (*vbpp) { vbp = *vbpp; *vbpp = (*vbpp)->next; dma_free_coherent(mp->bush, PAGE_SIZE<<MEMO_PAGE_ORDER, (void *)vbp->vaddr, (dma_addr_t)vbp->baddr); __m_free(&mp0, vbp, sizeof(*vbp), "VTOB"); --mp->nump; } } static inline m_pool_s *___get_dma_pool(m_bush_t bush) { m_pool_s *mp; for (mp = mp0.next; mp && mp->bush != bush; mp = mp->next); return mp; } static m_pool_s *___cre_dma_pool(m_bush_t bush) { m_pool_s *mp; mp = __m_calloc(&mp0, sizeof(*mp), "MPOOL"); if (mp) { memset(mp, 0, sizeof(*mp)); mp->bush = bush; mp->getp = ___dma_getp; mp->freep = ___dma_freep; mp->next = mp0.next; mp0.next = mp; } return mp; } static void ___del_dma_pool(m_pool_s *p) { struct m_pool **pp = &mp0.next; while (*pp && *pp != p) pp = &(*pp)->next; if (*pp) { *pp = (*pp)->next; __m_free(&mp0, p, sizeof(*p), "MPOOL"); } } static void *__m_calloc_dma(m_bush_t bush, int size, char *name) { u_long flags; struct m_pool *mp; void *m = NULL; spin_lock_irqsave(&ncr53c8xx_lock, flags); mp = ___get_dma_pool(bush); if (!mp) mp = ___cre_dma_pool(bush); if (mp) m = __m_calloc(mp, size, name); if (mp && !mp->nump) ___del_dma_pool(mp); spin_unlock_irqrestore(&ncr53c8xx_lock, flags); return m; } static void __m_free_dma(m_bush_t bush, void *m, int size, char *name) { u_long flags; struct m_pool *mp; spin_lock_irqsave(&ncr53c8xx_lock, flags); mp = ___get_dma_pool(bush); if (mp) __m_free(mp, m, size, name); if (mp && !mp->nump) ___del_dma_pool(mp); spin_unlock_irqrestore(&ncr53c8xx_lock, flags); } static m_addr_t __vtobus(m_bush_t bush, void *m) { u_long flags; m_pool_s *mp; int hc = VTOB_HASH_CODE(m); m_vtob_s *vp = NULL; m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK; spin_lock_irqsave(&ncr53c8xx_lock, flags); mp = ___get_dma_pool(bush); if (mp) { vp = mp->vtob[hc]; while (vp && (m_addr_t) vp->vaddr != a) vp = vp->next; } spin_unlock_irqrestore(&ncr53c8xx_lock, flags); return vp ? vp->baddr + (((m_addr_t) m) - a) : 0; } #define _m_calloc_dma(np, s, n) __m_calloc_dma(np->dev, s, n) #define _m_free_dma(np, p, s, n) __m_free_dma(np->dev, p, s, n) #define m_calloc_dma(s, n) _m_calloc_dma(np, s, n) #define m_free_dma(p, s, n) _m_free_dma(np, p, s, n) #define _vtobus(np, p) __vtobus(np->dev, p) #define vtobus(p) _vtobus(np, p) /* * Deal with DMA mapping/unmapping. */ static void __unmap_scsi_data(struct device *dev, struct scsi_cmnd *cmd) { struct ncr_cmd_priv *cmd_priv = scsi_cmd_priv(cmd); switch(cmd_priv->data_mapped) { case 2: scsi_dma_unmap(cmd); break; } cmd_priv->data_mapped = 0; } static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd) { struct ncr_cmd_priv *cmd_priv = scsi_cmd_priv(cmd); int use_sg; use_sg = scsi_dma_map(cmd); if (!use_sg) return 0; cmd_priv->data_mapped = 2; cmd_priv->data_mapping = use_sg; return use_sg; } #define unmap_scsi_data(np, cmd) __unmap_scsi_data(np->dev, cmd) #define map_scsi_sg_data(np, cmd) __map_scsi_sg_data(np->dev, cmd) /*========================================================== ** ** Driver setup. ** ** This structure is initialized from linux config ** options. It can be overridden at boot-up by the boot ** command line. ** **========================================================== */ static struct ncr_driver_setup driver_setup = SCSI_NCR_DRIVER_SETUP; #ifndef MODULE #ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT static struct ncr_driver_setup driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP; #endif #endif /* !MODULE */ #define initverbose (driver_setup.verbose) #define bootverbose (np->verbose) /*=================================================================== ** ** Driver setup from the boot command line ** **=================================================================== */ #ifdef MODULE #define ARG_SEP ' ' #else #define ARG_SEP ',' #endif #define OPT_TAGS 1 #define OPT_MASTER_PARITY 2 #define OPT_SCSI_PARITY 3 #define OPT_DISCONNECTION 4 #define OPT_SPECIAL_FEATURES 5 #define OPT_UNUSED_1 6 #define OPT_FORCE_SYNC_NEGO 7 #define OPT_REVERSE_PROBE 8 #define OPT_DEFAULT_SYNC 9 #define OPT_VERBOSE 10 #define OPT_DEBUG 11 #define OPT_BURST_MAX 12 #define OPT_LED_PIN 13 #define OPT_MAX_WIDE 14 #define OPT_SETTLE_DELAY 15 #define OPT_DIFF_SUPPORT 16 #define OPT_IRQM 17 #define OPT_PCI_FIX_UP 18 #define OPT_BUS_CHECK 19 #define OPT_OPTIMIZE 20 #define OPT_RECOVERY 21 #define OPT_SAFE_SETUP 22 #define OPT_USE_NVRAM 23 #define OPT_EXCLUDE 24 #define OPT_HOST_ID 25 #ifdef SCSI_NCR_IARB_SUPPORT #define OPT_IARB 26 #endif #ifdef MODULE #define ARG_SEP ' ' #else #define ARG_SEP ',' #endif #ifndef MODULE static char setup_token[] __initdata = "tags:" "mpar:" "spar:" "disc:" "specf:" "ultra:" "fsn:" "revprob:" "sync:" "verb:" "debug:" "burst:" "led:" "wide:" "settle:" "diff:" "irqm:" "pcifix:" "buschk:" "optim:" "recovery:" "safe:" "nvram:" "excl:" "hostid:" #ifdef SCSI_NCR_IARB_SUPPORT "iarb:" #endif ; /* DONNOT REMOVE THIS ';' */ static int __init get_setup_token(char *p) { char *cur = setup_token; char *pc; int i = 0; while (cur != NULL && (pc = strchr(cur, ':')) != NULL) { ++pc; ++i; if (!strncmp(p, cur, pc - cur)) return i; cur = pc; } return 0; } static int __init sym53c8xx__setup(char *str) { #ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT char *cur = str; char *pc, *pv; int i, val, c; int xi = 0; while (cur != NULL && (pc = strchr(cur, ':')) != NULL) { char *pe; val = 0; pv = pc; c = *++pv; if (c == 'n') val = 0; else if (c == 'y') val = 1; else val = (int) simple_strtoul(pv, &pe, 0); switch (get_setup_token(cur)) { case OPT_TAGS: driver_setup.default_tags = val; if (pe && *pe == '/') { i = 0; while (*pe && *pe != ARG_SEP && i < sizeof(driver_setup.tag_ctrl)-1) { driver_setup.tag_ctrl[i++] = *pe++; } driver_setup.tag_ctrl[i] = '\0'; } break; case OPT_MASTER_PARITY: driver_setup.master_parity = val; break; case OPT_SCSI_PARITY: driver_setup.scsi_parity = val; break; case OPT_DISCONNECTION: driver_setup.disconnection = val; break; case OPT_SPECIAL_FEATURES: driver_setup.special_features = val; break; case OPT_FORCE_SYNC_NEGO: driver_setup.force_sync_nego = val; break; case OPT_REVERSE_PROBE: driver_setup.reverse_probe = val; break; case OPT_DEFAULT_SYNC: driver_setup.default_sync = val; break; case OPT_VERBOSE: driver_setup.verbose = val; break; case OPT_DEBUG: driver_setup.debug = val; break; case OPT_BURST_MAX: driver_setup.burst_max = val; break; case OPT_LED_PIN: driver_setup.led_pin = val; break; case OPT_MAX_WIDE: driver_setup.max_wide = val? 1:0; break; case OPT_SETTLE_DELAY: driver_setup.settle_delay = val; break; case OPT_DIFF_SUPPORT: driver_setup.diff_support = val; break; case OPT_IRQM: driver_setup.irqm = val; break; case OPT_PCI_FIX_UP: driver_setup.pci_fix_up = val; break; case OPT_BUS_CHECK: driver_setup.bus_check = val; break; case OPT_OPTIMIZE: driver_setup.optimize = val; break; case OPT_RECOVERY: driver_setup.recovery = val; break; case OPT_USE_NVRAM: driver_setup.use_nvram = val; break; case OPT_SAFE_SETUP: memcpy(&driver_setup, &driver_safe_setup, sizeof(driver_setup)); break; case OPT_EXCLUDE: if (xi < SCSI_NCR_MAX_EXCLUDES) driver_setup.excludes[xi++] = val; break; case OPT_HOST_ID: driver_setup.host_id = val; break; #ifdef SCSI_NCR_IARB_SUPPORT case OPT_IARB: driver_setup.iarb = val; break; #endif default: printk("sym53c8xx_setup: unexpected boot option '%.*s' ignored\n", (int)(pc-cur+1), cur); break; } if ((cur = strchr(cur, ARG_SEP)) != NULL) ++cur; } #endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */ return 1; } #endif /* !MODULE */ /*=================================================================== ** ** Get device queue depth from boot command line. ** **=================================================================== */ #define DEF_DEPTH (driver_setup.default_tags) #define ALL_TARGETS -2 #define NO_TARGET -1 #define ALL_LUNS -2 #define NO_LUN -1 static int device_queue_depth(int unit, int target, int lun) { int c, h, t, u, v; char *p = driver_setup.tag_ctrl; char *ep; h = -1; t = NO_TARGET; u = NO_LUN; while ((c = *p++) != 0) { v = simple_strtoul(p, &ep, 0); switch(c) { case '/': ++h; t = ALL_TARGETS; u = ALL_LUNS; break; case 't': if (t != target) t = (target == v) ? v : NO_TARGET; u = ALL_LUNS; break; case 'u': if (u != lun) u = (lun == v) ? v : NO_LUN; break; case 'q': if (h == unit && (t == ALL_TARGETS || t == target) && (u == ALL_LUNS || u == lun)) return v; break; case '-': t = ALL_TARGETS; u = ALL_LUNS; break; default: break; } p = ep; } return DEF_DEPTH; } /*========================================================== ** ** The CCB done queue uses an array of CCB virtual ** addresses. Empty entries are flagged using the bogus ** virtual address 0xffffffff. ** ** Since PCI ensures that only aligned DWORDs are accessed ** atomically, 64 bit little-endian architecture requires ** to test the high order DWORD of the entry to determine ** if it is empty or valid. ** ** BTW, I will make things differently as soon as I will ** have a better idea, but this is simple and should work. ** **========================================================== */ #define SCSI_NCR_CCB_DONE_SUPPORT #ifdef SCSI_NCR_CCB_DONE_SUPPORT #define MAX_DONE 24 #define CCB_DONE_EMPTY 0xffffffffUL /* All 32 bit architectures */ #if BITS_PER_LONG == 32 #define CCB_DONE_VALID(cp) (((u_long) cp) != CCB_DONE_EMPTY) /* All > 32 bit (64 bit) architectures regardless endian-ness */ #else #define CCB_DONE_VALID(cp) \ ((((u_long) cp) & 0xffffffff00000000ul) && \ (((u_long) cp) & 0xfffffffful) != CCB_DONE_EMPTY) #endif #endif /* SCSI_NCR_CCB_DONE_SUPPORT */ /*========================================================== ** ** Configuration and Debugging ** **========================================================== */ /* ** SCSI address of this device. ** The boot routines should have set it. ** If not, use this. */ #ifndef SCSI_NCR_MYADDR #define SCSI_NCR_MYADDR (7) #endif /* ** The maximum number of tags per logic unit. ** Used only for disk devices that support tags. */ #ifndef SCSI_NCR_MAX_TAGS #define SCSI_NCR_MAX_TAGS (8) #endif /* ** TAGS are actually limited to 64 tags/lun. ** We need to deal with power of 2, for alignment constraints. */ #if SCSI_NCR_MAX_TAGS > 64 #define MAX_TAGS (64) #else #define MAX_TAGS SCSI_NCR_MAX_TAGS #endif #define NO_TAG (255) /* ** Choose appropriate type for tag bitmap. */ #if MAX_TAGS > 32 typedef u64 tagmap_t; #else typedef u32 tagmap_t; #endif /* ** Number of targets supported by the driver. ** n permits target numbers 0..n-1. ** Default is 16, meaning targets #0..#15. ** #7 .. is myself. */ #ifdef SCSI_NCR_MAX_TARGET #define MAX_TARGET (SCSI_NCR_MAX_TARGET) #else #define MAX_TARGET (16) #endif /* ** Number of logic units supported by the driver. ** n enables logic unit numbers 0..n-1. ** The common SCSI devices require only ** one lun, so take 1 as the default. */ #ifdef SCSI_NCR_MAX_LUN #define MAX_LUN SCSI_NCR_MAX_LUN #else #define MAX_LUN (1) #endif /* ** Asynchronous pre-scaler (ns). Shall be 40 */ #ifndef SCSI_NCR_MIN_ASYNC #define SCSI_NCR_MIN_ASYNC (40) #endif /* ** The maximum number of jobs scheduled for starting. ** There should be one slot per target, and one slot ** for each tag of each target in use. ** The calculation below is actually quite silly ... */ #ifdef SCSI_NCR_CAN_QUEUE #define MAX_START (SCSI_NCR_CAN_QUEUE + 4) #else #define MAX_START (MAX_TARGET + 7 * MAX_TAGS) #endif /* ** We limit the max number of pending IO to 250. ** since we donnot want to allocate more than 1 ** PAGE for 'scripth'. */ #if MAX_START > 250 #undef MAX_START #define MAX_START 250 #endif /* ** The maximum number of segments a transfer is split into. ** We support up to 127 segments for both read and write. ** The data scripts are broken into 2 sub-scripts. ** 80 (MAX_SCATTERL) segments are moved from a sub-script ** in on-chip RAM. This makes data transfers shorter than ** 80k (assuming 1k fs) as fast as possible. */ #define MAX_SCATTER (SCSI_NCR_MAX_SCATTER) #if (MAX_SCATTER > 80) #define MAX_SCATTERL 80 #define MAX_SCATTERH (MAX_SCATTER - MAX_SCATTERL) #else #define MAX_SCATTERL (MAX_SCATTER-1) #define MAX_SCATTERH 1 #endif /* ** other */ #define NCR_SNOOP_TIMEOUT (1000000) /* ** Other definitions */ #define initverbose (driver_setup.verbose) #define bootverbose (np->verbose) /*========================================================== ** ** Command control block states. ** **========================================================== */ #define HS_IDLE (0) #define HS_BUSY (1) #define HS_NEGOTIATE (2) /* sync/wide data transfer*/ #define HS_DISCONNECT (3) /* Disconnected by target */ #define HS_DONEMASK (0x80) #define HS_COMPLETE (4|HS_DONEMASK) #define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */ #define HS_RESET (6|HS_DONEMASK) /* SCSI reset */ #define HS_ABORTED (7|HS_DONEMASK) /* Transfer aborted */ #define HS_TIMEOUT (8|HS_DONEMASK) /* Software timeout */ #define HS_FAIL (9|HS_DONEMASK) /* SCSI or PCI bus errors */ #define HS_UNEXPECTED (10|HS_DONEMASK)/* Unexpected disconnect */ /* ** Invalid host status values used by the SCRIPTS processor ** when the nexus is not fully identified. ** Shall never appear in a CCB. */ #define HS_INVALMASK (0x40) #define HS_SELECTING (0|HS_INVALMASK) #define HS_IN_RESELECT (1|HS_INVALMASK) #define HS_STARTING (2|HS_INVALMASK) /* ** Flags set by the SCRIPT processor for commands ** that have been skipped. */ #define HS_SKIPMASK (0x20) /*========================================================== ** ** Software Interrupt Codes ** **========================================================== */ #define SIR_BAD_STATUS (1) #define SIR_XXXXXXXXXX (2) #define SIR_NEGO_SYNC (3) #define SIR_NEGO_WIDE (4) #define SIR_NEGO_FAILED (5) #define SIR_NEGO_PROTO (6) #define SIR_REJECT_RECEIVED (7) #define SIR_REJECT_SENT (8) #define SIR_IGN_RESIDUE (9) #define SIR_MISSING_SAVE (10) #define SIR_RESEL_NO_MSG_IN (11) #define SIR_RESEL_NO_IDENTIFY (12) #define SIR_RESEL_BAD_LUN (13) #define SIR_RESEL_BAD_TARGET (14) #define SIR_RESEL_BAD_I_T_L (15) #define SIR_RESEL_BAD_I_T_L_Q (16) #define SIR_DONE_OVERFLOW (17) #define SIR_INTFLY (18) #define SIR_MAX (18) /*========================================================== ** ** Extended error codes. ** xerr_status field of struct ccb. ** **========================================================== */ #define XE_OK (0) #define XE_EXTRA_DATA (1) /* unexpected data phase */ #define XE_BAD_PHASE (2) /* illegal phase (4/5) */ /*========================================================== ** ** Negotiation status. ** nego_status field of struct ccb. ** **========================================================== */ #define NS_NOCHANGE (0) #define NS_SYNC (1) #define NS_WIDE (2) #define NS_PPR (4) /*========================================================== ** ** Misc. ** **========================================================== */ #define CCB_MAGIC (0xf2691ad2) /*========================================================== ** ** Declaration of structs. ** **========================================================== */ static struct scsi_transport_template *ncr53c8xx_transport_template = NULL; struct tcb; struct lcb; struct ccb; struct ncb; struct script; struct link { ncrcmd l_cmd; ncrcmd l_paddr; }; struct usrcmd { u_long target; u_long lun; u_long data; u_long cmd; }; #define UC_SETSYNC 10 #define UC_SETTAGS 11 #define UC_SETDEBUG 12 #define UC_SETORDER 13 #define UC_SETWIDE 14 #define UC_SETFLAG 15 #define UC_SETVERBOSE 17 #define UF_TRACE (0x01) #define UF_NODISC (0x02) #define UF_NOSCAN (0x04) /*======================================================================== ** ** Declaration of structs: target control block ** **======================================================================== */ struct tcb { /*---------------------------------------------------------------- ** During reselection the ncr jumps to this point with SFBR ** set to the encoded target number with bit 7 set. ** if it's not this target, jump to the next. ** ** JUMP IF (SFBR != #target#), @(next tcb) **---------------------------------------------------------------- */ struct link jump_tcb; /*---------------------------------------------------------------- ** Load the actual values for the sxfer and the scntl3 ** register (sync/wide mode). ** ** SCR_COPY (1), @(sval field of this tcb), @(sxfer register) ** SCR_COPY (1), @(wval field of this tcb), @(scntl3 register) **---------------------------------------------------------------- */ ncrcmd getscr[6]; /*---------------------------------------------------------------- ** Get the IDENTIFY message and load the LUN to SFBR. ** ** CALL, <RESEL_LUN> **---------------------------------------------------------------- */ struct link call_lun; /*---------------------------------------------------------------- ** Now look for the right lun. ** ** For i = 0 to 3 ** SCR_JUMP ^ IFTRUE(MASK(i, 3)), @(first lcb mod. i) ** ** Recent chips will prefetch the 4 JUMPS using only 1 burst. ** It is kind of hashcoding. **---------------------------------------------------------------- */ struct link jump_lcb[4]; /* JUMPs for reselection */ struct lcb * lp[MAX_LUN]; /* The lcb's of this tcb */ /*---------------------------------------------------------------- ** Pointer to the ccb used for negotiation. ** Prevent from starting a negotiation for all queued commands ** when tagged command queuing is enabled. **---------------------------------------------------------------- */ struct ccb * nego_cp; /*---------------------------------------------------------------- ** statistical data **---------------------------------------------------------------- */ u_long transfers; u_long bytes; /*---------------------------------------------------------------- ** negotiation of wide and synch transfer and device quirks. **---------------------------------------------------------------- */ #ifdef SCSI_NCR_BIG_ENDIAN /*0*/ u16 period; /*2*/ u_char sval; /*3*/ u_char minsync; /*0*/ u_char wval; /*1*/ u_char widedone; /*2*/ u_char quirks; /*3*/ u_char maxoffs; #else /*0*/ u_char minsync; /*1*/ u_char sval; /*2*/ u16 period; /*0*/ u_char maxoffs; /*1*/ u_char quirks; /*2*/ u_char widedone; /*3*/ u_char wval; #endif /* User settable limits and options. */ u_char usrsync; u_char usrwide; u_char usrtags; u_char usrflag; struct scsi_target *starget; }; /*======================================================================== ** ** Declaration of structs: lun control block ** **======================================================================== */ struct lcb { /*---------------------------------------------------------------- ** During reselection the ncr jumps to this point ** with SFBR set to the "Identify" message. ** if it's not this lun, jump to the next. ** ** JUMP IF (SFBR != #lun#), @(next lcb of this target) ** ** It is this lun. Load TEMP with the nexus jumps table ** address and jump to RESEL_TAG (or RESEL_NOTAG). ** ** SCR_COPY (4), p_jump_ccb, TEMP, ** SCR_JUMP, <RESEL_TAG> **---------------------------------------------------------------- */ struct link jump_lcb; ncrcmd load_jump_ccb[3]; struct link jump_tag; ncrcmd p_jump_ccb; /* Jump table bus address */ /*---------------------------------------------------------------- ** Jump table used by the script processor to directly jump ** to the CCB corresponding to the reselected nexus. ** Address is allocated on 256 bytes boundary in order to ** allow 8 bit calculation of the tag jump entry for up to ** 64 possible tags. **---------------------------------------------------------------- */ u32 jump_ccb_0; /* Default table if no tags */ u32 *jump_ccb; /* Virtual address */ /*---------------------------------------------------------------- ** CCB queue management. **---------------------------------------------------------------- */ struct list_head free_ccbq; /* Queue of available CCBs */ struct list_head busy_ccbq; /* Queue of busy CCBs */ struct list_head wait_ccbq; /* Queue of waiting for IO CCBs */ struct list_head skip_ccbq; /* Queue of skipped CCBs */ u_char actccbs; /* Number of allocated CCBs */ u_char busyccbs; /* CCBs busy for this lun */ u_char queuedccbs; /* CCBs queued to the controller*/ u_char queuedepth; /* Queue depth for this lun */ u_char scdev_depth; /* SCSI device queue depth */ u_char maxnxs; /* Max possible nexuses */ /*---------------------------------------------------------------- ** Control of tagged command queuing. ** Tags allocation is performed using a circular buffer. ** This avoids using a loop for tag allocation. **---------------------------------------------------------------- */ u_char ia_tag; /* Allocation index */ u_char if_tag; /* Freeing index */ u_char cb_tags[MAX_TAGS]; /* Circular tags buffer */ u_char usetags; /* Command queuing is active */ u_char maxtags; /* Max nr of tags asked by user */ u_char numtags; /* Current number of tags */ /*---------------------------------------------------------------- ** QUEUE FULL control and ORDERED tag control. **---------------------------------------------------------------- */ /*---------------------------------------------------------------- ** QUEUE FULL and ORDERED tag control. **---------------------------------------------------------------- */ u16 num_good; /* Nr of GOOD since QUEUE FULL */ tagmap_t tags_umap; /* Used tags bitmap */ tagmap_t tags_smap; /* Tags in use at 'tag_stime' */ u_long tags_stime; /* Last time we set smap=umap */ struct ccb * held_ccb; /* CCB held for QUEUE FULL */ }; /*======================================================================== ** ** Declaration of structs: the launch script. ** **======================================================================== ** ** It is part of the CCB and is called by the scripts processor to ** start or restart the data structure (nexus). ** This 6 DWORDs mini script makes use of prefetching. ** **------------------------------------------------------------------------ */ struct launch { /*---------------------------------------------------------------- ** SCR_COPY(4), @(p_phys), @(dsa register) ** SCR_JUMP, @(scheduler_point) **---------------------------------------------------------------- */ ncrcmd setup_dsa[3]; /* Copy 'phys' address to dsa */ struct link schedule; /* Jump to scheduler point */ ncrcmd p_phys; /* 'phys' header bus address */ }; /*======================================================================== ** ** Declaration of structs: global HEADER. ** **======================================================================== ** ** This substructure is copied from the ccb to a global address after ** selection (or reselection) and copied back before disconnect. ** ** These fields are accessible to the script processor. ** **------------------------------------------------------------------------ */ struct head { /*---------------------------------------------------------------- ** Saved data pointer. ** Points to the position in the script responsible for the ** actual transfer transfer of data. ** It's written after reception of a SAVE_DATA_POINTER message. ** The goalpointer points after the last transfer command. **---------------------------------------------------------------- */ u32 savep; u32 lastp; u32 goalp; /*---------------------------------------------------------------- ** Alternate data pointer. ** They are copied back to savep/lastp/goalp by the SCRIPTS ** when the direction is unknown and the device claims data out. **---------------------------------------------------------------- */ u32 wlastp; u32 wgoalp; /*---------------------------------------------------------------- ** The virtual address of the ccb containing this header. **---------------------------------------------------------------- */ struct ccb * cp; /*---------------------------------------------------------------- ** Status fields. **---------------------------------------------------------------- */ u_char scr_st[4]; /* script status */ u_char status[4]; /* host status. must be the */ /* last DWORD of the header. */ }; /* ** The status bytes are used by the host and the script processor. ** ** The byte corresponding to the host_status must be stored in the ** last DWORD of the CCB header since it is used for command ** completion (ncr_wakeup()). Doing so, we are sure that the header ** has been entirely copied back to the CCB when the host_status is ** seen complete by the CPU. ** ** The last four bytes (status[4]) are copied to the scratchb register ** (declared as scr0..scr3 in ncr_reg.h) just after the select/reselect, ** and copied back just after disconnecting. ** Inside the script the XX_REG are used. ** ** The first four bytes (scr_st[4]) are used inside the script by ** "COPY" commands. ** Because source and destination must have the same alignment ** in a DWORD, the fields HAVE to be at the chosen offsets. ** xerr_st 0 (0x34) scratcha ** sync_st 1 (0x05) sxfer ** wide_st 3 (0x03) scntl3 */ /* ** Last four bytes (script) */ #define QU_REG scr0 #define HS_REG scr1 #define HS_PRT nc_scr1 #define SS_REG scr2 #define SS_PRT nc_scr2 #define PS_REG scr3 /* ** Last four bytes (host) */ #ifdef SCSI_NCR_BIG_ENDIAN #define actualquirks phys.header.status[3] #define host_status phys.header.status[2] #define scsi_status phys.header.status[1] #define parity_status phys.header.status[0] #else #define actualquirks phys.header.status[0] #define host_status phys.header.status[1] #define scsi_status phys.header.status[2] #define parity_status phys.header.status[3] #endif /* ** First four bytes (script) */ #define xerr_st header.scr_st[0] #define sync_st header.scr_st[1] #define nego_st header.scr_st[2] #define wide_st header.scr_st[3] /* ** First four bytes (host) */ #define xerr_status phys.xerr_st #define nego_status phys.nego_st /*========================================================== ** ** Declaration of structs: Data structure block ** **========================================================== ** ** During execution of a ccb by the script processor, ** the DSA (data structure address) register points ** to this substructure of the ccb. ** This substructure contains the header with ** the script-processor-changeable data and ** data blocks for the indirect move commands. ** **---------------------------------------------------------- */ struct dsb { /* ** Header. */ struct head header; /* ** Table data for Script */ struct scr_tblsel select; struct scr_tblmove smsg ; struct scr_tblmove cmd ; struct scr_tblmove sense ; struct scr_tblmove data[MAX_SCATTER]; }; /*======================================================================== ** ** Declaration of structs: Command control block. ** **======================================================================== */ struct ccb { /*---------------------------------------------------------------- ** This is the data structure which is pointed by the DSA ** register when it is executed by the script processor. ** It must be the first entry because it contains the header ** as first entry that must be cache line aligned. **---------------------------------------------------------------- */ struct dsb phys; /*---------------------------------------------------------------- ** Mini-script used at CCB execution start-up. ** Load the DSA with the data structure address (phys) and ** jump to SELECT. Jump to CANCEL if CCB is to be canceled. **---------------------------------------------------------------- */ struct launch start; /*---------------------------------------------------------------- ** Mini-script used at CCB relection to restart the nexus. ** Load the DSA with the data structure address (phys) and ** jump to RESEL_DSA. Jump to ABORT if CCB is to be aborted. **---------------------------------------------------------------- */ struct launch restart; /*---------------------------------------------------------------- ** If a data transfer phase is terminated too early ** (after reception of a message (i.e. DISCONNECT)), ** we have to prepare a mini script to transfer ** the rest of the data. **---------------------------------------------------------------- */ ncrcmd patch[8]; /*---------------------------------------------------------------- ** The general SCSI driver provides a ** pointer to a control block. **---------------------------------------------------------------- */ struct scsi_cmnd *cmd; /* SCSI command */ u_char cdb_buf[16]; /* Copy of CDB */ u_char sense_buf[64]; int data_len; /* Total data length */ /*---------------------------------------------------------------- ** Message areas. ** We prepare a message to be sent after selection. ** We may use a second one if the command is rescheduled ** due to GETCC or QFULL. ** Contents are IDENTIFY and SIMPLE_TAG. ** While negotiating sync or wide transfer, ** a SDTR or WDTR message is appended. **---------------------------------------------------------------- */ u_char scsi_smsg [8]; u_char scsi_smsg2[8]; /*---------------------------------------------------------------- ** Other fields. **---------------------------------------------------------------- */ u_long p_ccb; /* BUS address of this CCB */ u_char sensecmd[6]; /* Sense command */ u_char tag; /* Tag for this transfer */ /* 255 means no tag */ u_char target; u_char lun; u_char queued; u_char auto_sense; struct ccb * link_ccb; /* Host adapter CCB chain */ struct list_head link_ccbq; /* Link to unit CCB queue */ u32 startp; /* Initial data pointer */ u_long magic; /* Free / busy CCB flag */ }; #define CCB_PHYS(cp,lbl) (cp->p_ccb + offsetof(struct ccb, lbl)) /*======================================================================== ** ** Declaration of structs: NCR device descriptor ** **======================================================================== */ struct ncb { /*---------------------------------------------------------------- ** The global header. ** It is accessible to both the host and the script processor. ** Must be cache line size aligned (32 for x86) in order to ** allow cache line bursting when it is copied to/from CCB. **---------------------------------------------------------------- */ struct head header; /*---------------------------------------------------------------- ** CCBs management queues. **---------------------------------------------------------------- */ struct scsi_cmnd *waiting_list; /* Commands waiting for a CCB */ /* when lcb is not allocated. */ struct scsi_cmnd *done_list; /* Commands waiting for done() */ /* callback to be invoked. */ spinlock_t smp_lock; /* Lock for SMP threading */ /*---------------------------------------------------------------- ** Chip and controller identification. **---------------------------------------------------------------- */ int unit; /* Unit number */ char inst_name[16]; /* ncb instance name */ /*---------------------------------------------------------------- ** Initial value of some IO register bits. ** These values are assumed to have been set by BIOS, and may ** be used for probing adapter implementation differences. **---------------------------------------------------------------- */ u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest0, sv_ctest3, sv_ctest4, sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4; /*---------------------------------------------------------------- ** Actual initial value of IO register bits used by the ** driver. They are loaded at initialisation according to ** features that are to be enabled. **---------------------------------------------------------------- */ u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest0, rv_ctest3, rv_ctest4, rv_ctest5, rv_stest2; /*---------------------------------------------------------------- ** Targets management. ** During reselection the ncr jumps to jump_tcb. ** The SFBR register is loaded with the encoded target id. ** For i = 0 to 3 ** SCR_JUMP ^ IFTRUE(MASK(i, 3)), @(next tcb mod. i) ** ** Recent chips will prefetch the 4 JUMPS using only 1 burst. ** It is kind of hashcoding. **---------------------------------------------------------------- */ struct link jump_tcb[4]; /* JUMPs for reselection */ struct tcb target[MAX_TARGET]; /* Target data */ /*---------------------------------------------------------------- ** Virtual and physical bus addresses of the chip. **---------------------------------------------------------------- */ void __iomem *vaddr; /* Virtual and bus address of */ unsigned long paddr; /* chip's IO registers. */ unsigned long paddr2; /* On-chip RAM bus address. */ volatile /* Pointer to volatile for */ struct ncr_reg __iomem *reg; /* memory mapped IO. */ /*---------------------------------------------------------------- ** SCRIPTS virtual and physical bus addresses. ** 'script' is loaded in the on-chip RAM if present. ** 'scripth' stays in main memory. **---------------------------------------------------------------- */ struct script *script0; /* Copies of script and scripth */ struct scripth *scripth0; /* relocated for this ncb. */ struct scripth *scripth; /* Actual scripth virt. address */ u_long p_script; /* Actual script and scripth */ u_long p_scripth; /* bus addresses. */ /*---------------------------------------------------------------- ** General controller parameters and configuration. **---------------------------------------------------------------- */ struct device *dev; u_char revision_id; /* PCI device revision id */ u32 irq; /* IRQ level */ u32 features; /* Chip features map */ u_char myaddr; /* SCSI id of the adapter */ u_char maxburst; /* log base 2 of dwords burst */ u_char maxwide; /* Maximum transfer width */ u_char minsync; /* Minimum sync period factor */ u_char maxsync; /* Maximum sync period factor */ u_char maxoffs; /* Max scsi offset */ u_char multiplier; /* Clock multiplier (1,2,4) */ u_char clock_divn; /* Number of clock divisors */ u_long clock_khz; /* SCSI clock frequency in KHz */ /*---------------------------------------------------------------- ** Start queue management. ** It is filled up by the host processor and accessed by the ** SCRIPTS processor in order to start SCSI commands. **---------------------------------------------------------------- */ u16 squeueput; /* Next free slot of the queue */ u16 actccbs; /* Number of allocated CCBs */ u16 queuedccbs; /* Number of CCBs in start queue*/ u16 queuedepth; /* Start queue depth */ /*---------------------------------------------------------------- ** Timeout handler. **---------------------------------------------------------------- */ struct timer_list timer; /* Timer handler link header */ u_long lasttime; u_long settle_time; /* Resetting the SCSI BUS */ /*---------------------------------------------------------------- ** Debugging and profiling. **---------------------------------------------------------------- */ struct ncr_reg regdump; /* Register dump */ u_long regtime; /* Time it has been done */ /*---------------------------------------------------------------- ** Miscellaneous buffers accessed by the scripts-processor. ** They shall be DWORD aligned, because they may be read or ** written with a SCR_COPY script command. **---------------------------------------------------------------- */ u_char msgout[8]; /* Buffer for MESSAGE OUT */ u_char msgin [8]; /* Buffer for MESSAGE IN */ u32 lastmsg; /* Last SCSI message sent */ u_char scratch; /* Scratch for SCSI receive */ /*---------------------------------------------------------------- ** Miscellaneous configuration and status parameters. **---------------------------------------------------------------- */ u_char disc; /* Disconnection allowed */ u_char scsi_mode; /* Current SCSI BUS mode */ u_char order; /* Tag order to use */ u_char verbose; /* Verbosity for this controller*/ int ncr_cache; /* Used for cache test at init. */ u_long p_ncb; /* BUS address of this NCB */ /*---------------------------------------------------------------- ** Command completion handling. **---------------------------------------------------------------- */ #ifdef SCSI_NCR_CCB_DONE_SUPPORT struct ccb *(ccb_done[MAX_DONE]); int ccb_done_ic; #endif /*---------------------------------------------------------------- ** Fields that should be removed or changed. **---------------------------------------------------------------- */ struct ccb *ccb; /* Global CCB */ struct usrcmd user; /* Command from user */ volatile u_char release_stage; /* Synchronisation stage on release */ }; #define NCB_SCRIPT_PHYS(np,lbl) (np->p_script + offsetof (struct script, lbl)) #define NCB_SCRIPTH_PHYS(np,lbl) (np->p_scripth + offsetof (struct scripth,lbl)) /*========================================================== ** ** ** Script for NCR-Processor. ** ** Use ncr_script_fill() to create the variable parts. ** Use ncr_script_copy_and_bind() to make a copy and ** bind to physical addresses. ** ** **========================================================== ** ** We have to know the offsets of all labels before ** we reach them (for forward jumps). ** Therefore we declare a struct here. ** If you make changes inside the script, ** DONT FORGET TO CHANGE THE LENGTHS HERE! ** **---------------------------------------------------------- */ /* ** For HP Zalon/53c720 systems, the Zalon interface ** between CPU and 53c720 does prefetches, which causes ** problems with self modifying scripts. The problem ** is overcome by calling a dummy subroutine after each ** modification, to force a refetch of the script on ** return from the subroutine. */ #ifdef CONFIG_NCR53C8XX_PREFETCH #define PREFETCH_FLUSH_CNT 2 #define PREFETCH_FLUSH SCR_CALL, PADDRH (wait_dma), #else #define PREFETCH_FLUSH_CNT 0 #define PREFETCH_FLUSH #endif /* ** Script fragments which are loaded into the on-chip RAM ** of 825A, 875 and 895 chips. */ struct script { ncrcmd start [ 5]; ncrcmd startpos [ 1]; ncrcmd select [ 6]; ncrcmd select2 [ 9 + PREFETCH_FLUSH_CNT]; ncrcmd loadpos [ 4]; ncrcmd send_ident [ 9]; ncrcmd prepare [ 6]; ncrcmd prepare2 [ 7]; ncrcmd command [ 6]; ncrcmd dispatch [ 32]; ncrcmd clrack [ 4]; ncrcmd no_data [ 17]; ncrcmd status [ 8]; ncrcmd msg_in [ 2]; ncrcmd msg_in2 [ 16]; ncrcmd msg_bad [ 4]; ncrcmd setmsg [ 7]; ncrcmd cleanup [ 6]; ncrcmd complete [ 9]; ncrcmd cleanup_ok [ 8 + PREFETCH_FLUSH_CNT]; ncrcmd cleanup0 [ 1]; #ifndef SCSI_NCR_CCB_DONE_SUPPORT ncrcmd signal [ 12]; #else ncrcmd signal [ 9]; ncrcmd done_pos [ 1]; ncrcmd done_plug [ 2]; ncrcmd done_end [ 7]; #endif ncrcmd save_dp [ 7]; ncrcmd restore_dp [ 5]; ncrcmd disconnect [ 10]; ncrcmd msg_out [ 9]; ncrcmd msg_out_done [ 7]; ncrcmd idle [ 2]; ncrcmd reselect [ 8]; ncrcmd reselected [ 8]; ncrcmd resel_dsa [ 6 + PREFETCH_FLUSH_CNT]; ncrcmd loadpos1 [ 4]; ncrcmd resel_lun [ 6]; ncrcmd resel_tag [ 6]; ncrcmd jump_to_nexus [ 4 + PREFETCH_FLUSH_CNT]; ncrcmd nexus_indirect [ 4]; ncrcmd resel_notag [ 4]; ncrcmd data_in [MAX_SCATTERL * 4]; ncrcmd data_in2 [ 4]; ncrcmd data_out [MAX_SCATTERL * 4]; ncrcmd data_out2 [ 4]; }; /* ** Script fragments which stay in main memory for all chips. */ struct scripth { ncrcmd tryloop [MAX_START*2]; ncrcmd tryloop2 [ 2]; #ifdef SCSI_NCR_CCB_DONE_SUPPORT ncrcmd done_queue [MAX_DONE*5]; ncrcmd done_queue2 [ 2]; #endif ncrcmd select_no_atn [ 8]; ncrcmd cancel [ 4]; ncrcmd skip [ 9 + PREFETCH_FLUSH_CNT]; ncrcmd skip2 [ 19]; ncrcmd par_err_data_in [ 6]; ncrcmd par_err_other [ 4]; ncrcmd msg_reject [ 8]; ncrcmd msg_ign_residue [ 24]; ncrcmd msg_extended [ 10]; ncrcmd msg_ext_2 [ 10]; ncrcmd msg_wdtr [ 14]; ncrcmd send_wdtr [ 7]; ncrcmd msg_ext_3 [ 10]; ncrcmd msg_sdtr [ 14]; ncrcmd send_sdtr [ 7]; ncrcmd nego_bad_phase [ 4]; ncrcmd msg_out_abort [ 10]; ncrcmd hdata_in [MAX_SCATTERH * 4]; ncrcmd hdata_in2 [ 2]; ncrcmd hdata_out [MAX_SCATTERH * 4]; ncrcmd hdata_out2 [ 2]; ncrcmd reset [ 4]; ncrcmd aborttag [ 4]; ncrcmd abort [ 2]; ncrcmd abort_resel [ 20]; ncrcmd resend_ident [ 4]; ncrcmd clratn_go_on [ 3]; ncrcmd nxtdsp_go_on [ 1]; ncrcmd sdata_in [ 8]; ncrcmd data_io [ 18]; ncrcmd bad_identify [ 12]; ncrcmd bad_i_t_l [ 4]; ncrcmd bad_i_t_l_q [ 4]; ncrcmd bad_target [ 8]; ncrcmd bad_status [ 8]; ncrcmd start_ram [ 4 + PREFETCH_FLUSH_CNT]; ncrcmd start_ram0 [ 4]; ncrcmd sto_restart [ 5]; ncrcmd wait_dma [ 2]; ncrcmd snooptest [ 9]; ncrcmd snoopend [ 2]; }; /*========================================================== ** ** ** Function headers. ** ** **========================================================== */ static void ncr_alloc_ccb (struct ncb *np, u_char tn, u_char ln); static void ncr_complete (struct ncb *np, struct ccb *cp); static void ncr_exception (struct ncb *np); static void ncr_free_ccb (struct ncb *np, struct ccb *cp); static void ncr_init_ccb (struct ncb *np, struct ccb *cp); static void ncr_init_tcb (struct ncb *np, u_char tn); static struct lcb * ncr_alloc_lcb (struct ncb *np, u_char tn, u_char ln); static struct lcb * ncr_setup_lcb (struct ncb *np, struct scsi_device *sdev); static void ncr_getclock (struct ncb *np, int mult); static void ncr_selectclock (struct ncb *np, u_char scntl3); static struct ccb *ncr_get_ccb (struct ncb *np, struct scsi_cmnd *cmd); static void ncr_chip_reset (struct ncb *np, int delay); static void ncr_init (struct ncb *np, int reset, char * msg, u_long code); static int ncr_int_sbmc (struct ncb *np); static int ncr_int_par (struct ncb *np); static void ncr_int_ma (struct ncb *np); static void ncr_int_sir (struct ncb *np); static void ncr_int_sto (struct ncb *np); static void ncr_negotiate (struct ncb* np, struct tcb* tp); static int ncr_prepare_nego(struct ncb *np, struct ccb *cp, u_char *msgptr); static void ncr_script_copy_and_bind (struct ncb *np, ncrcmd *src, ncrcmd *dst, int len); static void ncr_script_fill (struct script * scr, struct scripth * scripth); static int ncr_scatter (struct ncb *np, struct ccb *cp, struct scsi_cmnd *cmd); static void ncr_getsync (struct ncb *np, u_char sfac, u_char *fakp, u_char *scntl3p); static void ncr_setsync (struct ncb *np, struct ccb *cp, u_char scntl3, u_char sxfer); static void ncr_setup_tags (struct ncb *np, struct scsi_device *sdev); static void ncr_setwide (struct ncb *np, struct ccb *cp, u_char wide, u_char ack); static int ncr_snooptest (struct ncb *np); static void ncr_timeout (struct ncb *np); static void ncr_wakeup (struct ncb *np, u_long code); static void ncr_wakeup_done (struct ncb *np); static void ncr_start_next_ccb (struct ncb *np, struct lcb * lp, int maxn); static void ncr_put_start_queue(struct ncb *np, struct ccb *cp); static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd); static void process_waiting_list(struct ncb *np, int sts); #define requeue_waiting_list(np) process_waiting_list((np), DID_OK) #define reset_waiting_list(np) process_waiting_list((np), DID_RESET) static inline char *ncr_name (struct ncb *np) { return np->inst_name; } /*========================================================== ** ** ** Scripts for NCR-Processor. ** ** Use ncr_script_bind for binding to physical addresses. ** ** **========================================================== ** ** NADDR generates a reference to a field of the controller data. ** PADDR generates a reference to another part of the script. ** RADDR generates a reference to a script processor register. ** FADDR generates a reference to a script processor register ** with offset. ** **---------------------------------------------------------- */ #define RELOC_SOFTC 0x40000000 #define RELOC_LABEL 0x50000000 #define RELOC_REGISTER 0x60000000 #define RELOC_LABELH 0x80000000 #define RELOC_MASK 0xf0000000 #define NADDR(label) (RELOC_SOFTC | offsetof(struct ncb, label)) #define PADDR(label) (RELOC_LABEL | offsetof(struct script, label)) #define PADDRH(label) (RELOC_LABELH | offsetof(struct scripth, label)) #define RADDR(label) (RELOC_REGISTER | REG(label)) #define FADDR(label,ofs)(RELOC_REGISTER | ((REG(label))+(ofs))) static struct script script0 __initdata = { /*--------------------------< START >-----------------------*/ { /* ** This NOP will be patched with LED ON ** SCR_REG_REG (gpreg, SCR_AND, 0xfe) */ SCR_NO_OP, 0, /* ** Clear SIGP. */ SCR_FROM_REG (ctest2), 0, /* ** Then jump to a certain point in tryloop. ** Due to the lack of indirect addressing the code ** is self modifying here. */ SCR_JUMP, }/*-------------------------< STARTPOS >--------------------*/,{ PADDRH(tryloop), }/*-------------------------< SELECT >----------------------*/,{ /* ** DSA contains the address of a scheduled ** data structure. ** ** SCRATCHA contains the address of the script, ** which starts the next entry. ** ** Set Initiator mode. ** ** (Target mode is left as an exercise for the reader) */ SCR_CLR (SCR_TRG), 0, SCR_LOAD_REG (HS_REG, HS_SELECTING), 0, /* ** And try to select this target. */ SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select), PADDR (reselect), }/*-------------------------< SELECT2 >----------------------*/,{ /* ** Now there are 4 possibilities: ** ** (1) The ncr loses arbitration. ** This is ok, because it will try again, ** when the bus becomes idle. ** (But beware of the timeout function!) ** ** (2) The ncr is reselected. ** Then the script processor takes the jump ** to the RESELECT label. ** ** (3) The ncr wins arbitration. ** Then it will execute SCRIPTS instruction until ** the next instruction that checks SCSI phase. ** Then will stop and wait for selection to be ** complete or selection time-out to occur. ** As a result the SCRIPTS instructions until ** LOADPOS + 2 should be executed in parallel with ** the SCSI core performing selection. */ /* ** The MESSAGE_REJECT problem seems to be due to a selection ** timing problem. ** Wait immediately for the selection to complete. ** (2.5x behaves so) */ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)), 0, /* ** Next time use the next slot. */ SCR_COPY (4), RADDR (temp), PADDR (startpos), /* ** The ncr doesn't have an indirect load ** or store command. So we have to ** copy part of the control block to a ** fixed place, where we can access it. ** ** We patch the address part of a ** COPY command with the DSA-register. */ SCR_COPY_F (4), RADDR (dsa), PADDR (loadpos), /* ** Flush script prefetch if required */ PREFETCH_FLUSH /* ** then we do the actual copy. */ SCR_COPY (sizeof (struct head)), /* ** continued after the next label ... */ }/*-------------------------< LOADPOS >---------------------*/,{ 0, NADDR (header), /* ** Wait for the next phase or the selection ** to complete or time-out. */ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)), PADDR (prepare), }/*-------------------------< SEND_IDENT >----------------------*/,{ /* ** Selection complete. ** Send the IDENTIFY and SIMPLE_TAG messages ** (and the EXTENDED_SDTR message) */ SCR_MOVE_TBL ^ SCR_MSG_OUT, offsetof (struct dsb, smsg), SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)), PADDRH (resend_ident), SCR_LOAD_REG (scratcha, 0x80), 0, SCR_COPY (1), RADDR (scratcha), NADDR (lastmsg), }/*-------------------------< PREPARE >----------------------*/,{ /* ** load the savep (saved pointer) into ** the TEMP register (actual pointer) */ SCR_COPY (4), NADDR (header.savep), RADDR (temp), /* ** Initialize the status registers */ SCR_COPY (4), NADDR (header.status), RADDR (scr0), }/*-------------------------< PREPARE2 >---------------------*/,{ /* ** Initialize the msgout buffer with a NOOP message. */ SCR_LOAD_REG (scratcha, NOP), 0, SCR_COPY (1), RADDR (scratcha), NADDR (msgout), /* ** Anticipate the COMMAND phase. ** This is the normal case for initial selection. */ SCR_JUMP ^ IFFALSE (WHEN (SCR_COMMAND)), PADDR (dispatch), }/*-------------------------< COMMAND >--------------------*/,{ /* ** ... and send the command */ SCR_MOVE_TBL ^ SCR_COMMAND, offsetof (struct dsb, cmd), /* ** If status is still HS_NEGOTIATE, negotiation failed. ** We check this here, since we want to do that ** only once. */ SCR_FROM_REG (HS_REG), 0, SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)), SIR_NEGO_FAILED, }/*-----------------------< DISPATCH >----------------------*/,{ /* ** MSG_IN is the only phase that shall be ** entered at least once for each (re)selection. ** So we test it first. */ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)), PADDR (msg_in), SCR_RETURN ^ IFTRUE (IF (SCR_DATA_OUT)), 0, /* ** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 4. ** Possible data corruption during Memory Write and Invalidate. ** This work-around resets the addressing logic prior to the ** start of the first MOVE of a DATA IN phase. ** (See Documentation/scsi/ncr53c8xx.rst for more information) */ SCR_JUMPR ^ IFFALSE (IF (SCR_DATA_IN)), 20, SCR_COPY (4), RADDR (scratcha), RADDR (scratcha), SCR_RETURN, 0, SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)), PADDR (status), SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)), PADDR (command), SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)), PADDR (msg_out), /* ** Discard one illegal phase byte, if required. */ SCR_LOAD_REG (scratcha, XE_BAD_PHASE), 0, SCR_COPY (1), RADDR (scratcha), NADDR (xerr_st), SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_OUT)), 8, SCR_MOVE_ABS (1) ^ SCR_ILG_OUT, NADDR (scratch), SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_IN)), 8, SCR_MOVE_ABS (1) ^ SCR_ILG_IN, NADDR (scratch), SCR_JUMP, PADDR (dispatch), }/*-------------------------< CLRACK >----------------------*/,{ /* ** Terminate possible pending message phase. */ SCR_CLR (SCR_ACK), 0, SCR_JUMP, PADDR (dispatch), }/*-------------------------< NO_DATA >--------------------*/,{ /* ** The target wants to tranfer too much data ** or in the wrong direction. ** Remember that in extended error. */ SCR_LOAD_REG (scratcha, XE_EXTRA_DATA), 0, SCR_COPY (1), RADDR (scratcha), NADDR (xerr_st), /* ** Discard one data byte, if required. */ SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)), 8, SCR_MOVE_ABS (1) ^ SCR_DATA_OUT, NADDR (scratch), SCR_JUMPR ^ IFFALSE (IF (SCR_DATA_IN)), 8, SCR_MOVE_ABS (1) ^ SCR_DATA_IN, NADDR (scratch), /* ** .. and repeat as required. */ SCR_CALL, PADDR (dispatch), SCR_JUMP, PADDR (no_data), }/*-------------------------< STATUS >--------------------*/,{ /* ** get the status */ SCR_MOVE_ABS (1) ^ SCR_STATUS, NADDR (scratch), /* ** save status to scsi_status. ** mark as complete. */ SCR_TO_REG (SS_REG), 0, SCR_LOAD_REG (HS_REG, HS_COMPLETE), 0, SCR_JUMP, PADDR (dispatch), }/*-------------------------< MSG_IN >--------------------*/,{ /* ** Get the first byte of the message ** and save it to SCRATCHA. ** ** The script processor doesn't negate the ** ACK signal after this transfer. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[0]), }/*-------------------------< MSG_IN2 >--------------------*/,{ /* ** Handle this message. */ SCR_JUMP ^ IFTRUE (DATA (COMMAND_COMPLETE)), PADDR (complete), SCR_JUMP ^ IFTRUE (DATA (DISCONNECT)), PADDR (disconnect), SCR_JUMP ^ IFTRUE (DATA (SAVE_POINTERS)), PADDR (save_dp), SCR_JUMP ^ IFTRUE (DATA (RESTORE_POINTERS)), PADDR (restore_dp), SCR_JUMP ^ IFTRUE (DATA (EXTENDED_MESSAGE)), PADDRH (msg_extended), SCR_JUMP ^ IFTRUE (DATA (NOP)), PADDR (clrack), SCR_JUMP ^ IFTRUE (DATA (MESSAGE_REJECT)), PADDRH (msg_reject), SCR_JUMP ^ IFTRUE (DATA (IGNORE_WIDE_RESIDUE)), PADDRH (msg_ign_residue), /* ** Rest of the messages left as ** an exercise ... ** ** Unimplemented messages: ** fall through to MSG_BAD. */ }/*-------------------------< MSG_BAD >------------------*/,{ /* ** unimplemented message - reject it. */ SCR_INT, SIR_REJECT_SENT, SCR_LOAD_REG (scratcha, MESSAGE_REJECT), 0, }/*-------------------------< SETMSG >----------------------*/,{ SCR_COPY (1), RADDR (scratcha), NADDR (msgout), SCR_SET (SCR_ATN), 0, SCR_JUMP, PADDR (clrack), }/*-------------------------< CLEANUP >-------------------*/,{ /* ** dsa: Pointer to ccb ** or xxxxxxFF (no ccb) ** ** HS_REG: Host-Status (<>0!) */ SCR_FROM_REG (dsa), 0, SCR_JUMP ^ IFTRUE (DATA (0xff)), PADDR (start), /* ** dsa is valid. ** complete the cleanup. */ SCR_JUMP, PADDR (cleanup_ok), }/*-------------------------< COMPLETE >-----------------*/,{ /* ** Complete message. ** ** Copy TEMP register to LASTP in header. */ SCR_COPY (4), RADDR (temp), NADDR (header.lastp), /* ** When we terminate the cycle by clearing ACK, ** the target may disconnect immediately. ** ** We don't want to be told of an ** "unexpected disconnect", ** so we disable this feature. */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, /* ** Terminate cycle ... */ SCR_CLR (SCR_ACK|SCR_ATN), 0, /* ** ... and wait for the disconnect. */ SCR_WAIT_DISC, 0, }/*-------------------------< CLEANUP_OK >----------------*/,{ /* ** Save host status to header. */ SCR_COPY (4), RADDR (scr0), NADDR (header.status), /* ** and copy back the header to the ccb. */ SCR_COPY_F (4), RADDR (dsa), PADDR (cleanup0), /* ** Flush script prefetch if required */ PREFETCH_FLUSH SCR_COPY (sizeof (struct head)), NADDR (header), }/*-------------------------< CLEANUP0 >--------------------*/,{ 0, }/*-------------------------< SIGNAL >----------------------*/,{ /* ** if job not completed ... */ SCR_FROM_REG (HS_REG), 0, /* ** ... start the next command. */ SCR_JUMP ^ IFTRUE (MASK (0, (HS_DONEMASK|HS_SKIPMASK))), PADDR(start), /* ** If command resulted in not GOOD status, ** call the C code if needed. */ SCR_FROM_REG (SS_REG), 0, SCR_CALL ^ IFFALSE (DATA (SAM_STAT_GOOD)), PADDRH (bad_status), #ifndef SCSI_NCR_CCB_DONE_SUPPORT /* ** ... signal completion to the host */ SCR_INT, SIR_INTFLY, /* ** Auf zu neuen Schandtaten! */ SCR_JUMP, PADDR(start), #else /* defined SCSI_NCR_CCB_DONE_SUPPORT */ /* ** ... signal completion to the host */ SCR_JUMP, }/*------------------------< DONE_POS >---------------------*/,{ PADDRH (done_queue), }/*------------------------< DONE_PLUG >--------------------*/,{ SCR_INT, SIR_DONE_OVERFLOW, }/*------------------------< DONE_END >---------------------*/,{ SCR_INT, SIR_INTFLY, SCR_COPY (4), RADDR (temp), PADDR (done_pos), SCR_JUMP, PADDR (start), #endif /* SCSI_NCR_CCB_DONE_SUPPORT */ }/*-------------------------< SAVE_DP >------------------*/,{ /* ** SAVE_DP message: ** Copy TEMP register to SAVEP in header. */ SCR_COPY (4), RADDR (temp), NADDR (header.savep), SCR_CLR (SCR_ACK), 0, SCR_JUMP, PADDR (dispatch), }/*-------------------------< RESTORE_DP >---------------*/,{ /* ** RESTORE_DP message: ** Copy SAVEP in header to TEMP register. */ SCR_COPY (4), NADDR (header.savep), RADDR (temp), SCR_JUMP, PADDR (clrack), }/*-------------------------< DISCONNECT >---------------*/,{ /* ** DISCONNECTing ... ** ** disable the "unexpected disconnect" feature, ** and remove the ACK signal. */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, SCR_CLR (SCR_ACK|SCR_ATN), 0, /* ** Wait for the disconnect. */ SCR_WAIT_DISC, 0, /* ** Status is: DISCONNECTED. */ SCR_LOAD_REG (HS_REG, HS_DISCONNECT), 0, SCR_JUMP, PADDR (cleanup_ok), }/*-------------------------< MSG_OUT >-------------------*/,{ /* ** The target requests a message. */ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, NADDR (msgout), SCR_COPY (1), NADDR (msgout), NADDR (lastmsg), /* ** If it was no ABORT message ... */ SCR_JUMP ^ IFTRUE (DATA (ABORT_TASK_SET)), PADDRH (msg_out_abort), /* ** ... wait for the next phase ** if it's a message out, send it again, ... */ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)), PADDR (msg_out), }/*-------------------------< MSG_OUT_DONE >--------------*/,{ /* ** ... else clear the message ... */ SCR_LOAD_REG (scratcha, NOP), 0, SCR_COPY (4), RADDR (scratcha), NADDR (msgout), /* ** ... and process the next phase */ SCR_JUMP, PADDR (dispatch), }/*-------------------------< IDLE >------------------------*/,{ /* ** Nothing to do? ** Wait for reselect. ** This NOP will be patched with LED OFF ** SCR_REG_REG (gpreg, SCR_OR, 0x01) */ SCR_NO_OP, 0, }/*-------------------------< RESELECT >--------------------*/,{ /* ** make the DSA invalid. */ SCR_LOAD_REG (dsa, 0xff), 0, SCR_CLR (SCR_TRG), 0, SCR_LOAD_REG (HS_REG, HS_IN_RESELECT), 0, /* ** Sleep waiting for a reselection. ** If SIGP is set, special treatment. ** ** Zu allem bereit .. */ SCR_WAIT_RESEL, PADDR(start), }/*-------------------------< RESELECTED >------------------*/,{ /* ** This NOP will be patched with LED ON ** SCR_REG_REG (gpreg, SCR_AND, 0xfe) */ SCR_NO_OP, 0, /* ** ... zu nichts zu gebrauchen ? ** ** load the target id into the SFBR ** and jump to the control block. ** ** Look at the declarations of ** - struct ncb ** - struct tcb ** - struct lcb ** - struct ccb ** to understand what's going on. */ SCR_REG_SFBR (ssid, SCR_AND, 0x8F), 0, SCR_TO_REG (sdid), 0, SCR_JUMP, NADDR (jump_tcb), }/*-------------------------< RESEL_DSA >-------------------*/,{ /* ** Ack the IDENTIFY or TAG previously received. */ SCR_CLR (SCR_ACK), 0, /* ** The ncr doesn't have an indirect load ** or store command. So we have to ** copy part of the control block to a ** fixed place, where we can access it. ** ** We patch the address part of a ** COPY command with the DSA-register. */ SCR_COPY_F (4), RADDR (dsa), PADDR (loadpos1), /* ** Flush script prefetch if required */ PREFETCH_FLUSH /* ** then we do the actual copy. */ SCR_COPY (sizeof (struct head)), /* ** continued after the next label ... */ }/*-------------------------< LOADPOS1 >-------------------*/,{ 0, NADDR (header), /* ** The DSA contains the data structure address. */ SCR_JUMP, PADDR (prepare), }/*-------------------------< RESEL_LUN >-------------------*/,{ /* ** come back to this point ** to get an IDENTIFY message ** Wait for a msg_in phase. */ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)), SIR_RESEL_NO_MSG_IN, /* ** message phase. ** Read the data directly from the BUS DATA lines. ** This helps to support very old SCSI devices that ** may reselect without sending an IDENTIFY. */ SCR_FROM_REG (sbdl), 0, /* ** It should be an Identify message. */ SCR_RETURN, 0, }/*-------------------------< RESEL_TAG >-------------------*/,{ /* ** Read IDENTIFY + SIMPLE + TAG using a single MOVE. ** Aggressive optimization, is'nt it? ** No need to test the SIMPLE TAG message, since the ** driver only supports conformant devices for tags. ;-) */ SCR_MOVE_ABS (3) ^ SCR_MSG_IN, NADDR (msgin), /* ** Read the TAG from the SIDL. ** Still an aggressive optimization. ;-) ** Compute the CCB indirect jump address which ** is (#TAG*2 & 0xfc) due to tag numbering using ** 1,3,5..MAXTAGS*2+1 actual values. */ SCR_REG_SFBR (sidl, SCR_SHL, 0), 0, SCR_SFBR_REG (temp, SCR_AND, 0xfc), 0, }/*-------------------------< JUMP_TO_NEXUS >-------------------*/,{ SCR_COPY_F (4), RADDR (temp), PADDR (nexus_indirect), /* ** Flush script prefetch if required */ PREFETCH_FLUSH SCR_COPY (4), }/*-------------------------< NEXUS_INDIRECT >-------------------*/,{ 0, RADDR (temp), SCR_RETURN, 0, }/*-------------------------< RESEL_NOTAG >-------------------*/,{ /* ** No tag expected. ** Read an throw away the IDENTIFY. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin), SCR_JUMP, PADDR (jump_to_nexus), }/*-------------------------< DATA_IN >--------------------*/,{ /* ** Because the size depends on the ** #define MAX_SCATTERL parameter, ** it is filled in at runtime. ** ** ##===========< i=0; i<MAX_SCATTERL >========= ** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN)), ** || PADDR (dispatch), ** || SCR_MOVE_TBL ^ SCR_DATA_IN, ** || offsetof (struct dsb, data[ i]), ** ##========================================== ** **--------------------------------------------------------- */ 0 }/*-------------------------< DATA_IN2 >-------------------*/,{ SCR_CALL, PADDR (dispatch), SCR_JUMP, PADDR (no_data), }/*-------------------------< DATA_OUT >--------------------*/,{ /* ** Because the size depends on the ** #define MAX_SCATTERL parameter, ** it is filled in at runtime. ** ** ##===========< i=0; i<MAX_SCATTERL >========= ** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT)), ** || PADDR (dispatch), ** || SCR_MOVE_TBL ^ SCR_DATA_OUT, ** || offsetof (struct dsb, data[ i]), ** ##========================================== ** **--------------------------------------------------------- */ 0 }/*-------------------------< DATA_OUT2 >-------------------*/,{ SCR_CALL, PADDR (dispatch), SCR_JUMP, PADDR (no_data), }/*--------------------------------------------------------*/ }; static struct scripth scripth0 __initdata = { /*-------------------------< TRYLOOP >---------------------*/{ /* ** Start the next entry. ** Called addresses point to the launch script in the CCB. ** They are patched by the main processor. ** ** Because the size depends on the ** #define MAX_START parameter, it is filled ** in at runtime. ** **----------------------------------------------------------- ** ** ##===========< I=0; i<MAX_START >=========== ** || SCR_CALL, ** || PADDR (idle), ** ##========================================== ** **----------------------------------------------------------- */ 0 }/*------------------------< TRYLOOP2 >---------------------*/,{ SCR_JUMP, PADDRH(tryloop), #ifdef SCSI_NCR_CCB_DONE_SUPPORT }/*------------------------< DONE_QUEUE >-------------------*/,{ /* ** Copy the CCB address to the next done entry. ** Because the size depends on the ** #define MAX_DONE parameter, it is filled ** in at runtime. ** **----------------------------------------------------------- ** ** ##===========< I=0; i<MAX_DONE >=========== ** || SCR_COPY (sizeof(struct ccb *), ** || NADDR (header.cp), ** || NADDR (ccb_done[i]), ** || SCR_CALL, ** || PADDR (done_end), ** ##========================================== ** **----------------------------------------------------------- */ 0 }/*------------------------< DONE_QUEUE2 >------------------*/,{ SCR_JUMP, PADDRH (done_queue), #endif /* SCSI_NCR_CCB_DONE_SUPPORT */ }/*------------------------< SELECT_NO_ATN >-----------------*/,{ /* ** Set Initiator mode. ** And try to select this target without ATN. */ SCR_CLR (SCR_TRG), 0, SCR_LOAD_REG (HS_REG, HS_SELECTING), 0, SCR_SEL_TBL ^ offsetof (struct dsb, select), PADDR (reselect), SCR_JUMP, PADDR (select2), }/*-------------------------< CANCEL >------------------------*/,{ SCR_LOAD_REG (scratcha, HS_ABORTED), 0, SCR_JUMPR, 8, }/*-------------------------< SKIP >------------------------*/,{ SCR_LOAD_REG (scratcha, 0), 0, /* ** This entry has been canceled. ** Next time use the next slot. */ SCR_COPY (4), RADDR (temp), PADDR (startpos), /* ** The ncr doesn't have an indirect load ** or store command. So we have to ** copy part of the control block to a ** fixed place, where we can access it. ** ** We patch the address part of a ** COPY command with the DSA-register. */ SCR_COPY_F (4), RADDR (dsa), PADDRH (skip2), /* ** Flush script prefetch if required */ PREFETCH_FLUSH /* ** then we do the actual copy. */ SCR_COPY (sizeof (struct head)), /* ** continued after the next label ... */ }/*-------------------------< SKIP2 >---------------------*/,{ 0, NADDR (header), /* ** Initialize the status registers */ SCR_COPY (4), NADDR (header.status), RADDR (scr0), /* ** Force host status. */ SCR_FROM_REG (scratcha), 0, SCR_JUMPR ^ IFFALSE (MASK (0, HS_DONEMASK)), 16, SCR_REG_REG (HS_REG, SCR_OR, HS_SKIPMASK), 0, SCR_JUMPR, 8, SCR_TO_REG (HS_REG), 0, SCR_LOAD_REG (SS_REG, SAM_STAT_GOOD), 0, SCR_JUMP, PADDR (cleanup_ok), },/*-------------------------< PAR_ERR_DATA_IN >---------------*/{ /* ** Ignore all data in byte, until next phase */ SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)), PADDRH (par_err_other), SCR_MOVE_ABS (1) ^ SCR_DATA_IN, NADDR (scratch), SCR_JUMPR, -24, },/*-------------------------< PAR_ERR_OTHER >------------------*/{ /* ** count it. */ SCR_REG_REG (PS_REG, SCR_ADD, 0x01), 0, /* ** jump to dispatcher. */ SCR_JUMP, PADDR (dispatch), }/*-------------------------< MSG_REJECT >---------------*/,{ /* ** If a negotiation was in progress, ** negotiation failed. ** Otherwise, let the C code print ** some message. */ SCR_FROM_REG (HS_REG), 0, SCR_INT ^ IFFALSE (DATA (HS_NEGOTIATE)), SIR_REJECT_RECEIVED, SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)), SIR_NEGO_FAILED, SCR_JUMP, PADDR (clrack), }/*-------------------------< MSG_IGN_RESIDUE >----------*/,{ /* ** Terminate cycle */ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get residue size. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[1]), /* ** Size is 0 .. ignore message. */ SCR_JUMP ^ IFTRUE (DATA (0)), PADDR (clrack), /* ** Size is not 1 .. have to interrupt. */ SCR_JUMPR ^ IFFALSE (DATA (1)), 40, /* ** Check for residue byte in swide register */ SCR_FROM_REG (scntl2), 0, SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)), 16, /* ** There IS data in the swide register. ** Discard it. */ SCR_REG_REG (scntl2, SCR_OR, WSR), 0, SCR_JUMP, PADDR (clrack), /* ** Load again the size to the sfbr register. */ SCR_FROM_REG (scratcha), 0, SCR_INT, SIR_IGN_RESIDUE, SCR_JUMP, PADDR (clrack), }/*-------------------------< MSG_EXTENDED >-------------*/,{ /* ** Terminate cycle */ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get length. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[1]), /* */ SCR_JUMP ^ IFTRUE (DATA (3)), PADDRH (msg_ext_3), SCR_JUMP ^ IFFALSE (DATA (2)), PADDR (msg_bad), }/*-------------------------< MSG_EXT_2 >----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get extended message code. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[2]), SCR_JUMP ^ IFTRUE (DATA (EXTENDED_WDTR)), PADDRH (msg_wdtr), /* ** unknown extended message */ SCR_JUMP, PADDR (msg_bad) }/*-------------------------< MSG_WDTR >-----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get data bus width */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[3]), /* ** let the host do the real work. */ SCR_INT, SIR_NEGO_WIDE, /* ** let the target fetch our answer. */ SCR_SET (SCR_ATN), 0, SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)), PADDRH (nego_bad_phase), }/*-------------------------< SEND_WDTR >----------------*/,{ /* ** Send the EXTENDED_WDTR */ SCR_MOVE_ABS (4) ^ SCR_MSG_OUT, NADDR (msgout), SCR_COPY (1), NADDR (msgout), NADDR (lastmsg), SCR_JUMP, PADDR (msg_out_done), }/*-------------------------< MSG_EXT_3 >----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get extended message code. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[2]), SCR_JUMP ^ IFTRUE (DATA (EXTENDED_SDTR)), PADDRH (msg_sdtr), /* ** unknown extended message */ SCR_JUMP, PADDR (msg_bad) }/*-------------------------< MSG_SDTR >-----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get period and offset */ SCR_MOVE_ABS (2) ^ SCR_MSG_IN, NADDR (msgin[3]), /* ** let the host do the real work. */ SCR_INT, SIR_NEGO_SYNC, /* ** let the target fetch our answer. */ SCR_SET (SCR_ATN), 0, SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)), PADDRH (nego_bad_phase), }/*-------------------------< SEND_SDTR >-------------*/,{ /* ** Send the EXTENDED_SDTR */ SCR_MOVE_ABS (5) ^ SCR_MSG_OUT, NADDR (msgout), SCR_COPY (1), NADDR (msgout), NADDR (lastmsg), SCR_JUMP, PADDR (msg_out_done), }/*-------------------------< NEGO_BAD_PHASE >------------*/,{ SCR_INT, SIR_NEGO_PROTO, SCR_JUMP, PADDR (dispatch), }/*-------------------------< MSG_OUT_ABORT >-------------*/,{ /* ** After ABORT message, ** ** expect an immediate disconnect, ... */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, SCR_CLR (SCR_ACK|SCR_ATN), 0, SCR_WAIT_DISC, 0, /* ** ... and set the status to "ABORTED" */ SCR_LOAD_REG (HS_REG, HS_ABORTED), 0, SCR_JUMP, PADDR (cleanup), }/*-------------------------< HDATA_IN >-------------------*/,{ /* ** Because the size depends on the ** #define MAX_SCATTERH parameter, ** it is filled in at runtime. ** ** ##==< i=MAX_SCATTERL; i<MAX_SCATTERL+MAX_SCATTERH >== ** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN)), ** || PADDR (dispatch), ** || SCR_MOVE_TBL ^ SCR_DATA_IN, ** || offsetof (struct dsb, data[ i]), ** ##=================================================== ** **--------------------------------------------------------- */ 0 }/*-------------------------< HDATA_IN2 >------------------*/,{ SCR_JUMP, PADDR (data_in), }/*-------------------------< HDATA_OUT >-------------------*/,{ /* ** Because the size depends on the ** #define MAX_SCATTERH parameter, ** it is filled in at runtime. ** ** ##==< i=MAX_SCATTERL; i<MAX_SCATTERL+MAX_SCATTERH >== ** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT)), ** || PADDR (dispatch), ** || SCR_MOVE_TBL ^ SCR_DATA_OUT, ** || offsetof (struct dsb, data[ i]), ** ##=================================================== ** **--------------------------------------------------------- */ 0 }/*-------------------------< HDATA_OUT2 >------------------*/,{ SCR_JUMP, PADDR (data_out), }/*-------------------------< RESET >----------------------*/,{ /* ** Send a TARGET_RESET message if bad IDENTIFY ** received on reselection. */ SCR_LOAD_REG (scratcha, ABORT_TASK), 0, SCR_JUMP, PADDRH (abort_resel), }/*-------------------------< ABORTTAG >-------------------*/,{ /* ** Abort a wrong tag received on reselection. */ SCR_LOAD_REG (scratcha, ABORT_TASK), 0, SCR_JUMP, PADDRH (abort_resel), }/*-------------------------< ABORT >----------------------*/,{ /* ** Abort a reselection when no active CCB. */ SCR_LOAD_REG (scratcha, ABORT_TASK_SET), 0, }/*-------------------------< ABORT_RESEL >----------------*/,{ SCR_COPY (1), RADDR (scratcha), NADDR (msgout), SCR_SET (SCR_ATN), 0, SCR_CLR (SCR_ACK), 0, /* ** and send it. ** we expect an immediate disconnect */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, NADDR (msgout), SCR_COPY (1), NADDR (msgout), NADDR (lastmsg), SCR_CLR (SCR_ACK|SCR_ATN), 0, SCR_WAIT_DISC, 0, SCR_JUMP, PADDR (start), }/*-------------------------< RESEND_IDENT >-------------------*/,{ /* ** The target stays in MSG OUT phase after having acked ** Identify [+ Tag [+ Extended message ]]. Targets shall ** behave this way on parity error. ** We must send it again all the messages. */ SCR_SET (SCR_ATN), /* Shall be asserted 2 deskew delays before the */ 0, /* 1rst ACK = 90 ns. Hope the NCR is'nt too fast */ SCR_JUMP, PADDR (send_ident), }/*-------------------------< CLRATN_GO_ON >-------------------*/,{ SCR_CLR (SCR_ATN), 0, SCR_JUMP, }/*-------------------------< NXTDSP_GO_ON >-------------------*/,{ 0, }/*-------------------------< SDATA_IN >-------------------*/,{ SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN)), PADDR (dispatch), SCR_MOVE_TBL ^ SCR_DATA_IN, offsetof (struct dsb, sense), SCR_CALL, PADDR (dispatch), SCR_JUMP, PADDR (no_data), }/*-------------------------< DATA_IO >--------------------*/,{ /* ** We jump here if the data direction was unknown at the ** time we had to queue the command to the scripts processor. ** Pointers had been set as follow in this situation: ** savep --> DATA_IO ** lastp --> start pointer when DATA_IN ** goalp --> goal pointer when DATA_IN ** wlastp --> start pointer when DATA_OUT ** wgoalp --> goal pointer when DATA_OUT ** This script sets savep/lastp/goalp according to the ** direction chosen by the target. */ SCR_JUMPR ^ IFTRUE (WHEN (SCR_DATA_OUT)), 32, /* ** Direction is DATA IN. ** Warning: we jump here, even when phase is DATA OUT. */ SCR_COPY (4), NADDR (header.lastp), NADDR (header.savep), /* ** Jump to the SCRIPTS according to actual direction. */ SCR_COPY (4), NADDR (header.savep), RADDR (temp), SCR_RETURN, 0, /* ** Direction is DATA OUT. */ SCR_COPY (4), NADDR (header.wlastp), NADDR (header.lastp), SCR_COPY (4), NADDR (header.wgoalp), NADDR (header.goalp), SCR_JUMPR, -64, }/*-------------------------< BAD_IDENTIFY >---------------*/,{ /* ** If message phase but not an IDENTIFY, ** get some help from the C code. ** Old SCSI device may behave so. */ SCR_JUMPR ^ IFTRUE (MASK (0x80, 0x80)), 16, SCR_INT, SIR_RESEL_NO_IDENTIFY, SCR_JUMP, PADDRH (reset), /* ** Message is an IDENTIFY, but lun is unknown. ** Read the message, since we got it directly ** from the SCSI BUS data lines. ** Signal problem to C code for logging the event. ** Send an ABORT_TASK_SET to clear all pending tasks. */ SCR_INT, SIR_RESEL_BAD_LUN, SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin), SCR_JUMP, PADDRH (abort), }/*-------------------------< BAD_I_T_L >------------------*/,{ /* ** We donnot have a task for that I_T_L. ** Signal problem to C code for logging the event. ** Send an ABORT_TASK_SET message. */ SCR_INT, SIR_RESEL_BAD_I_T_L, SCR_JUMP, PADDRH (abort), }/*-------------------------< BAD_I_T_L_Q >----------------*/,{ /* ** We donnot have a task that matches the tag. ** Signal problem to C code for logging the event. ** Send an ABORT_TASK message. */ SCR_INT, SIR_RESEL_BAD_I_T_L_Q, SCR_JUMP, PADDRH (aborttag), }/*-------------------------< BAD_TARGET >-----------------*/,{ /* ** We donnot know the target that reselected us. ** Grab the first message if any (IDENTIFY). ** Signal problem to C code for logging the event. ** TARGET_RESET message. */ SCR_INT, SIR_RESEL_BAD_TARGET, SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)), 8, SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin), SCR_JUMP, PADDRH (reset), }/*-------------------------< BAD_STATUS >-----------------*/,{ /* ** If command resulted in either TASK_SET FULL, ** CHECK CONDITION or COMMAND TERMINATED, ** call the C code. */ SCR_INT ^ IFTRUE (DATA (SAM_STAT_TASK_SET_FULL)), SIR_BAD_STATUS, SCR_INT ^ IFTRUE (DATA (SAM_STAT_CHECK_CONDITION)), SIR_BAD_STATUS, SCR_INT ^ IFTRUE (DATA (SAM_STAT_COMMAND_TERMINATED)), SIR_BAD_STATUS, SCR_RETURN, 0, }/*-------------------------< START_RAM >-------------------*/,{ /* ** Load the script into on-chip RAM, ** and jump to start point. */ SCR_COPY_F (4), RADDR (scratcha), PADDRH (start_ram0), /* ** Flush script prefetch if required */ PREFETCH_FLUSH SCR_COPY (sizeof (struct script)), }/*-------------------------< START_RAM0 >--------------------*/,{ 0, PADDR (start), SCR_JUMP, PADDR (start), }/*-------------------------< STO_RESTART >-------------------*/,{ /* ** ** Repair start queue (e.g. next time use the next slot) ** and jump to start point. */ SCR_COPY (4), RADDR (temp), PADDR (startpos), SCR_JUMP, PADDR (start), }/*-------------------------< WAIT_DMA >-------------------*/,{ /* ** For HP Zalon/53c720 systems, the Zalon interface ** between CPU and 53c720 does prefetches, which causes ** problems with self modifying scripts. The problem ** is overcome by calling a dummy subroutine after each ** modification, to force a refetch of the script on ** return from the subroutine. */ SCR_RETURN, 0, }/*-------------------------< SNOOPTEST >-------------------*/,{ /* ** Read the variable. */ SCR_COPY (4), NADDR(ncr_cache), RADDR (scratcha), /* ** Write the variable. */ SCR_COPY (4), RADDR (temp), NADDR(ncr_cache), /* ** Read back the variable. */ SCR_COPY (4), NADDR(ncr_cache), RADDR (temp), }/*-------------------------< SNOOPEND >-------------------*/,{ /* ** And stop. */ SCR_INT, 99, }/*--------------------------------------------------------*/ }; /*========================================================== ** ** ** Fill in #define dependent parts of the script ** ** **========================================================== */ void __init ncr_script_fill (struct script * scr, struct scripth * scrh) { int i; ncrcmd *p; p = scrh->tryloop; for (i=0; i<MAX_START; i++) { *p++ =SCR_CALL; *p++ =PADDR (idle); } BUG_ON((u_long)p != (u_long)&scrh->tryloop + sizeof (scrh->tryloop)); #ifdef SCSI_NCR_CCB_DONE_SUPPORT p = scrh->done_queue; for (i = 0; i<MAX_DONE; i++) { *p++ =SCR_COPY (sizeof(struct ccb *)); *p++ =NADDR (header.cp); *p++ =NADDR (ccb_done[i]); *p++ =SCR_CALL; *p++ =PADDR (done_end); } BUG_ON((u_long)p != (u_long)&scrh->done_queue+sizeof(scrh->done_queue)); #endif /* SCSI_NCR_CCB_DONE_SUPPORT */ p = scrh->hdata_in; for (i=0; i<MAX_SCATTERH; i++) { *p++ =SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN)); *p++ =PADDR (dispatch); *p++ =SCR_MOVE_TBL ^ SCR_DATA_IN; *p++ =offsetof (struct dsb, data[i]); } BUG_ON((u_long)p != (u_long)&scrh->hdata_in + sizeof (scrh->hdata_in)); p = scr->data_in; for (i=MAX_SCATTERH; i<MAX_SCATTERH+MAX_SCATTERL; i++) { *p++ =SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN)); *p++ =PADDR (dispatch); *p++ =SCR_MOVE_TBL ^ SCR_DATA_IN; *p++ =offsetof (struct dsb, data[i]); } BUG_ON((u_long)p != (u_long)&scr->data_in + sizeof (scr->data_in)); p = scrh->hdata_out; for (i=0; i<MAX_SCATTERH; i++) { *p++ =SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT)); *p++ =PADDR (dispatch); *p++ =SCR_MOVE_TBL ^ SCR_DATA_OUT; *p++ =offsetof (struct dsb, data[i]); } BUG_ON((u_long)p != (u_long)&scrh->hdata_out + sizeof (scrh->hdata_out)); p = scr->data_out; for (i=MAX_SCATTERH; i<MAX_SCATTERH+MAX_SCATTERL; i++) { *p++ =SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT)); *p++ =PADDR (dispatch); *p++ =SCR_MOVE_TBL ^ SCR_DATA_OUT; *p++ =offsetof (struct dsb, data[i]); } BUG_ON((u_long) p != (u_long)&scr->data_out + sizeof (scr->data_out)); } /*========================================================== ** ** ** Copy and rebind a script. ** ** **========================================================== */ static void __init ncr_script_copy_and_bind (struct ncb *np, ncrcmd *src, ncrcmd *dst, int len) { ncrcmd opcode, new, old, tmp1, tmp2; ncrcmd *start, *end; int relocs; int opchanged = 0; start = src; end = src + len/4; while (src < end) { opcode = *src++; *dst++ = cpu_to_scr(opcode); /* ** If we forget to change the length ** in struct script, a field will be ** padded with 0. This is an illegal ** command. */ if (opcode == 0) { printk (KERN_ERR "%s: ERROR0 IN SCRIPT at %d.\n", ncr_name(np), (int) (src-start-1)); mdelay(1000); } if (DEBUG_FLAGS & DEBUG_SCRIPT) printk (KERN_DEBUG "%p: <%x>\n", (src-1), (unsigned)opcode); /* ** We don't have to decode ALL commands */ switch (opcode >> 28) { case 0xc: /* ** COPY has TWO arguments. */ relocs = 2; tmp1 = src[0]; #ifdef RELOC_KVAR if ((tmp1 & RELOC_MASK) == RELOC_KVAR) tmp1 = 0; #endif tmp2 = src[1]; #ifdef RELOC_KVAR if ((tmp2 & RELOC_MASK) == RELOC_KVAR) tmp2 = 0; #endif if ((tmp1 ^ tmp2) & 3) { printk (KERN_ERR"%s: ERROR1 IN SCRIPT at %d.\n", ncr_name(np), (int) (src-start-1)); mdelay(1000); } /* ** If PREFETCH feature not enabled, remove ** the NO FLUSH bit if present. */ if ((opcode & SCR_NO_FLUSH) && !(np->features & FE_PFEN)) { dst[-1] = cpu_to_scr(opcode & ~SCR_NO_FLUSH); ++opchanged; } break; case 0x0: /* ** MOVE (absolute address) */ relocs = 1; break; case 0x8: /* ** JUMP / CALL ** don't relocate if relative :-) */ if (opcode & 0x00800000) relocs = 0; else relocs = 1; break; case 0x4: case 0x5: case 0x6: case 0x7: relocs = 1; break; default: relocs = 0; break; } if (relocs) { while (relocs--) { old = *src++; switch (old & RELOC_MASK) { case RELOC_REGISTER: new = (old & ~RELOC_MASK) + np->paddr; break; case RELOC_LABEL: new = (old & ~RELOC_MASK) + np->p_script; break; case RELOC_LABELH: new = (old & ~RELOC_MASK) + np->p_scripth; break; case RELOC_SOFTC: new = (old & ~RELOC_MASK) + np->p_ncb; break; #ifdef RELOC_KVAR case RELOC_KVAR: if (((old & ~RELOC_MASK) < SCRIPT_KVAR_FIRST) || ((old & ~RELOC_MASK) > SCRIPT_KVAR_LAST)) panic("ncr KVAR out of range"); new = vtophys(script_kvars[old & ~RELOC_MASK]); break; #endif case 0: /* Don't relocate a 0 address. */ if (old == 0) { new = old; break; } fallthrough; default: panic("ncr_script_copy_and_bind: weird relocation %x\n", old); break; } *dst++ = cpu_to_scr(new); } } else *dst++ = cpu_to_scr(*src++); } } /* ** Linux host data structure */ struct host_data { struct ncb *ncb; }; #define PRINT_ADDR(cmd, arg...) dev_info(&cmd->device->sdev_gendev , ## arg) static void ncr_print_msg(struct ccb *cp, char *label, u_char *msg) { PRINT_ADDR(cp->cmd, "%s: ", label); spi_print_msg(msg); printk("\n"); } /*========================================================== ** ** NCR chip clock divisor table. ** Divisors are multiplied by 10,000,000 in order to make ** calculations more simple. ** **========================================================== */ #define _5M 5000000 static u_long div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; /*=============================================================== ** ** Prepare io register values used by ncr_init() according ** to selected and supported features. ** ** NCR chips allow burst lengths of 2, 4, 8, 16, 32, 64, 128 ** transfers. 32,64,128 are only supported by 875 and 895 chips. ** We use log base 2 (burst length) as internal code, with ** value 0 meaning "burst disabled". ** **=============================================================== */ /* * Burst length from burst code. */ #define burst_length(bc) (!(bc))? 0 : 1 << (bc) /* * Burst code from io register bits. Burst enable is ctest0 for c720 */ #define burst_code(dmode, ctest0) \ (ctest0) & 0x80 ? 0 : (((dmode) & 0xc0) >> 6) + 1 /* * Set initial io register bits from burst code. */ static inline void ncr_init_burst(struct ncb *np, u_char bc) { u_char *be = &np->rv_ctest0; *be &= ~0x80; np->rv_dmode &= ~(0x3 << 6); np->rv_ctest5 &= ~0x4; if (!bc) { *be |= 0x80; } else { --bc; np->rv_dmode |= ((bc & 0x3) << 6); np->rv_ctest5 |= (bc & 0x4); } } static void __init ncr_prepare_setting(struct ncb *np) { u_char burst_max; u_long period; int i; /* ** Save assumed BIOS setting */ np->sv_scntl0 = INB(nc_scntl0) & 0x0a; np->sv_scntl3 = INB(nc_scntl3) & 0x07; np->sv_dmode = INB(nc_dmode) & 0xce; np->sv_dcntl = INB(nc_dcntl) & 0xa8; np->sv_ctest0 = INB(nc_ctest0) & 0x84; np->sv_ctest3 = INB(nc_ctest3) & 0x01; np->sv_ctest4 = INB(nc_ctest4) & 0x80; np->sv_ctest5 = INB(nc_ctest5) & 0x24; np->sv_gpcntl = INB(nc_gpcntl); np->sv_stest2 = INB(nc_stest2) & 0x20; np->sv_stest4 = INB(nc_stest4); /* ** Wide ? */ np->maxwide = (np->features & FE_WIDE)? 1 : 0; /* * Guess the frequency of the chip's clock. */ if (np->features & FE_ULTRA) np->clock_khz = 80000; else np->clock_khz = 40000; /* * Get the clock multiplier factor. */ if (np->features & FE_QUAD) np->multiplier = 4; else if (np->features & FE_DBLR) np->multiplier = 2; else np->multiplier = 1; /* * Measure SCSI clock frequency for chips * it may vary from assumed one. */ if (np->features & FE_VARCLK) ncr_getclock(np, np->multiplier); /* * Divisor to be used for async (timer pre-scaler). */ i = np->clock_divn - 1; while (--i >= 0) { if (10ul * SCSI_NCR_MIN_ASYNC * np->clock_khz > div_10M[i]) { ++i; break; } } np->rv_scntl3 = i+1; /* * Minimum synchronous period factor supported by the chip. * Btw, 'period' is in tenths of nanoseconds. */ period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz; if (period <= 250) np->minsync = 10; else if (period <= 303) np->minsync = 11; else if (period <= 500) np->minsync = 12; else np->minsync = (period + 40 - 1) / 40; /* * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). */ if (np->minsync < 25 && !(np->features & FE_ULTRA)) np->minsync = 25; /* * Maximum synchronous period factor supported by the chip. */ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); np->maxsync = period > 2540 ? 254 : period / 10; /* ** Prepare initial value of other IO registers */ #if defined SCSI_NCR_TRUST_BIOS_SETTING np->rv_scntl0 = np->sv_scntl0; np->rv_dmode = np->sv_dmode; np->rv_dcntl = np->sv_dcntl; np->rv_ctest0 = np->sv_ctest0; np->rv_ctest3 = np->sv_ctest3; np->rv_ctest4 = np->sv_ctest4; np->rv_ctest5 = np->sv_ctest5; burst_max = burst_code(np->sv_dmode, np->sv_ctest0); #else /* ** Select burst length (dwords) */ burst_max = driver_setup.burst_max; if (burst_max == 255) burst_max = burst_code(np->sv_dmode, np->sv_ctest0); if (burst_max > 7) burst_max = 7; if (burst_max > np->maxburst) burst_max = np->maxburst; /* ** Select all supported special features */ if (np->features & FE_ERL) np->rv_dmode |= ERL; /* Enable Read Line */ if (np->features & FE_BOF) np->rv_dmode |= BOF; /* Burst Opcode Fetch */ if (np->features & FE_ERMP) np->rv_dmode |= ERMP; /* Enable Read Multiple */ if (np->features & FE_PFEN) np->rv_dcntl |= PFEN; /* Prefetch Enable */ if (np->features & FE_CLSE) np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ if (np->features & FE_WRIE) np->rv_ctest3 |= WRIE; /* Write and Invalidate */ if (np->features & FE_DFS) np->rv_ctest5 |= DFS; /* Dma Fifo Size */ if (np->features & FE_MUX) np->rv_ctest4 |= MUX; /* Host bus multiplex mode */ if (np->features & FE_EA) np->rv_dcntl |= EA; /* Enable ACK */ if (np->features & FE_EHP) np->rv_ctest0 |= EHP; /* Even host parity */ /* ** Select some other */ if (driver_setup.master_parity) np->rv_ctest4 |= MPEE; /* Master parity checking */ if (driver_setup.scsi_parity) np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */ /* ** Get SCSI addr of host adapter (set by bios?). */ if (np->myaddr == 255) { np->myaddr = INB(nc_scid) & 0x07; if (!np->myaddr) np->myaddr = SCSI_NCR_MYADDR; } #endif /* SCSI_NCR_TRUST_BIOS_SETTING */ /* * Prepare initial io register bits for burst length */ ncr_init_burst(np, burst_max); /* ** Set SCSI BUS mode. ** ** - ULTRA2 chips (895/895A/896) report the current ** BUS mode through the STEST4 IO register. ** - For previous generation chips (825/825A/875), ** user has to tell us how to check against HVD, ** since a 100% safe algorithm is not possible. */ np->scsi_mode = SMODE_SE; if (np->features & FE_DIFF) { switch(driver_setup.diff_support) { case 4: /* Trust previous settings if present, then GPIO3 */ if (np->sv_scntl3) { if (np->sv_stest2 & 0x20) np->scsi_mode = SMODE_HVD; break; } fallthrough; case 3: /* SYMBIOS controllers report HVD through GPIO3 */ if (INB(nc_gpreg) & 0x08) break; fallthrough; case 2: /* Set HVD unconditionally */ np->scsi_mode = SMODE_HVD; fallthrough; case 1: /* Trust previous settings for HVD */ if (np->sv_stest2 & 0x20) np->scsi_mode = SMODE_HVD; break; default:/* Don't care about HVD */ break; } } if (np->scsi_mode == SMODE_HVD) np->rv_stest2 |= 0x20; /* ** Set LED support from SCRIPTS. ** Ignore this feature for boards known to use a ** specific GPIO wiring and for the 895A or 896 ** that drive the LED directly. ** Also probe initial setting of GPIO0 as output. */ if ((driver_setup.led_pin) && !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01)) np->features |= FE_LED0; /* ** Set irq mode. */ switch(driver_setup.irqm & 3) { case 2: np->rv_dcntl |= IRQM; break; case 1: np->rv_dcntl |= (np->sv_dcntl & IRQM); break; default: break; } /* ** Configure targets according to driver setup. ** Allow to override sync, wide and NOSCAN from ** boot command line. */ for (i = 0 ; i < MAX_TARGET ; i++) { struct tcb *tp = &np->target[i]; tp->usrsync = driver_setup.default_sync; tp->usrwide = driver_setup.max_wide; tp->usrtags = MAX_TAGS; tp->period = 0xffff; if (!driver_setup.disconnection) np->target[i].usrflag = UF_NODISC; } /* ** Announce all that stuff to user. */ printk(KERN_INFO "%s: ID %d, Fast-%d%s%s\n", ncr_name(np), np->myaddr, np->minsync < 12 ? 40 : (np->minsync < 25 ? 20 : 10), (np->rv_scntl0 & 0xa) ? ", Parity Checking" : ", NO Parity", (np->rv_stest2 & 0x20) ? ", Differential" : ""); if (bootverbose > 1) { printk (KERN_INFO "%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", ncr_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl, np->sv_ctest3, np->sv_ctest4, np->sv_ctest5); printk (KERN_INFO "%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", ncr_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl, np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } if (bootverbose && np->paddr2) printk (KERN_INFO "%s: on-chip RAM at 0x%lx\n", ncr_name(np), np->paddr2); } /*========================================================== ** ** ** Done SCSI commands list management. ** ** We donnot enter the scsi_done() callback immediately ** after a command has been seen as completed but we ** insert it into a list which is flushed outside any kind ** of driver critical section. ** This allows to do minimal stuff under interrupt and ** inside critical sections and to also avoid locking up ** on recursive calls to driver entry points under SMP. ** In fact, the only kernel point which is entered by the ** driver with a driver lock set is kmalloc(GFP_ATOMIC) ** that shall not reenter the driver under any circumstances, ** AFAIK. ** **========================================================== */ static inline void ncr_queue_done_cmd(struct ncb *np, struct scsi_cmnd *cmd) { unmap_scsi_data(np, cmd); cmd->host_scribble = (char *) np->done_list; np->done_list = cmd; } static inline void ncr_flush_done_cmds(struct scsi_cmnd *lcmd) { struct scsi_cmnd *cmd; while (lcmd) { cmd = lcmd; lcmd = (struct scsi_cmnd *) cmd->host_scribble; scsi_done(cmd); } } /*========================================================== ** ** ** Prepare the next negotiation message if needed. ** ** Fill in the part of message buffer that contains the ** negotiation and the nego_status field of the CCB. ** Returns the size of the message in bytes. ** ** **========================================================== */ static int ncr_prepare_nego(struct ncb *np, struct ccb *cp, u_char *msgptr) { struct tcb *tp = &np->target[cp->target]; int msglen = 0; int nego = 0; struct scsi_target *starget = tp->starget; /* negotiate wide transfers ? */ if (!tp->widedone) { if (spi_support_wide(starget)) { nego = NS_WIDE; } else tp->widedone=1; } /* negotiate synchronous transfers? */ if (!nego && !tp->period) { if (spi_support_sync(starget)) { nego = NS_SYNC; } else { tp->period =0xffff; dev_info(&starget->dev, "target did not report SYNC.\n"); } } switch (nego) { case NS_SYNC: msglen += spi_populate_sync_msg(msgptr + msglen, tp->maxoffs ? tp->minsync : 0, tp->maxoffs); break; case NS_WIDE: msglen += spi_populate_width_msg(msgptr + msglen, tp->usrwide); break; } cp->nego_status = nego; if (nego) { tp->nego_cp = cp; if (DEBUG_FLAGS & DEBUG_NEGO) { ncr_print_msg(cp, nego == NS_WIDE ? "wide msgout":"sync_msgout", msgptr); } } return msglen; } /*========================================================== ** ** ** Start execution of a SCSI command. ** This is called from the generic SCSI driver. ** ** **========================================================== */ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct tcb *tp = &np->target[sdev->id]; struct lcb *lp = tp->lp[sdev->lun]; struct ccb *cp; int segments; u_char idmsg, *msgptr; u32 msglen; int direction; u32 lastp, goalp; /*--------------------------------------------- ** ** Some shortcuts ... ** **--------------------------------------------- */ if ((sdev->id == np->myaddr ) || (sdev->id >= MAX_TARGET) || (sdev->lun >= MAX_LUN )) { return(DID_BAD_TARGET); } /*--------------------------------------------- ** ** Complete the 1st TEST UNIT READY command ** with error condition if the device is ** flagged NOSCAN, in order to speed up ** the boot. ** **--------------------------------------------- */ if ((cmd->cmnd[0] == 0 || cmd->cmnd[0] == 0x12) && (tp->usrflag & UF_NOSCAN)) { tp->usrflag &= ~UF_NOSCAN; return DID_BAD_TARGET; } if (DEBUG_FLAGS & DEBUG_TINY) { PRINT_ADDR(cmd, "CMD=%x ", cmd->cmnd[0]); } /*--------------------------------------------------- ** ** Assign a ccb / bind cmd. ** If resetting, shorten settle_time if necessary ** in order to avoid spurious timeouts. ** If resetting or no free ccb, ** insert cmd into the waiting list. ** **---------------------------------------------------- */ if (np->settle_time && scsi_cmd_to_rq(cmd)->timeout >= HZ) { u_long tlimit = jiffies + scsi_cmd_to_rq(cmd)->timeout - HZ; if (time_after(np->settle_time, tlimit)) np->settle_time = tlimit; } if (np->settle_time || !(cp=ncr_get_ccb (np, cmd))) { insert_into_waiting_list(np, cmd); return(DID_OK); } cp->cmd = cmd; /*---------------------------------------------------- ** ** Build the identify / tag / sdtr message ** **---------------------------------------------------- */ idmsg = IDENTIFY(0, sdev->lun); if (cp ->tag != NO_TAG || (cp != np->ccb && np->disc && !(tp->usrflag & UF_NODISC))) idmsg |= 0x40; msgptr = cp->scsi_smsg; msglen = 0; msgptr[msglen++] = idmsg; if (cp->tag != NO_TAG) { char order = np->order; /* ** Force ordered tag if necessary to avoid timeouts ** and to preserve interactivity. */ if (lp && time_after(jiffies, lp->tags_stime)) { if (lp->tags_smap) { order = ORDERED_QUEUE_TAG; if ((DEBUG_FLAGS & DEBUG_TAGS)||bootverbose>2){ PRINT_ADDR(cmd, "ordered tag forced.\n"); } } lp->tags_stime = jiffies + 3*HZ; lp->tags_smap = lp->tags_umap; } if (order == 0) { /* ** Ordered write ops, unordered read ops. */ switch (cmd->cmnd[0]) { case 0x08: /* READ_SMALL (6) */ case 0x28: /* READ_BIG (10) */ case 0xa8: /* READ_HUGE (12) */ order = SIMPLE_QUEUE_TAG; break; default: order = ORDERED_QUEUE_TAG; } } msgptr[msglen++] = order; /* ** Actual tags are numbered 1,3,5,..2*MAXTAGS+1, ** since we may have to deal with devices that have ** problems with #TAG 0 or too great #TAG numbers. */ msgptr[msglen++] = (cp->tag << 1) + 1; } /*---------------------------------------------------- ** ** Build the data descriptors ** **---------------------------------------------------- */ direction = cmd->sc_data_direction; if (direction != DMA_NONE) { segments = ncr_scatter(np, cp, cp->cmd); if (segments < 0) { ncr_free_ccb(np, cp); return(DID_ERROR); } } else { cp->data_len = 0; segments = 0; } /*--------------------------------------------------- ** ** negotiation required? ** ** (nego_status is filled by ncr_prepare_nego()) ** **--------------------------------------------------- */ cp->nego_status = 0; if ((!tp->widedone || !tp->period) && !tp->nego_cp && lp) { msglen += ncr_prepare_nego (np, cp, msgptr + msglen); } /*---------------------------------------------------- ** ** Determine xfer direction. ** **---------------------------------------------------- */ if (!cp->data_len) direction = DMA_NONE; /* ** If data direction is BIDIRECTIONAL, speculate FROM_DEVICE ** but prepare alternate pointers for TO_DEVICE in case ** of our speculation will be just wrong. ** SCRIPTS will swap values if needed. */ switch(direction) { case DMA_BIDIRECTIONAL: case DMA_TO_DEVICE: goalp = NCB_SCRIPT_PHYS (np, data_out2) + 8; if (segments <= MAX_SCATTERL) lastp = goalp - 8 - (segments * 16); else { lastp = NCB_SCRIPTH_PHYS (np, hdata_out2); lastp -= (segments - MAX_SCATTERL) * 16; } if (direction != DMA_BIDIRECTIONAL) break; cp->phys.header.wgoalp = cpu_to_scr(goalp); cp->phys.header.wlastp = cpu_to_scr(lastp); fallthrough; case DMA_FROM_DEVICE: goalp = NCB_SCRIPT_PHYS (np, data_in2) + 8; if (segments <= MAX_SCATTERL) lastp = goalp - 8 - (segments * 16); else { lastp = NCB_SCRIPTH_PHYS (np, hdata_in2); lastp -= (segments - MAX_SCATTERL) * 16; } break; default: case DMA_NONE: lastp = goalp = NCB_SCRIPT_PHYS (np, no_data); break; } /* ** Set all pointers values needed by SCRIPTS. ** If direction is unknown, start at data_io. */ cp->phys.header.lastp = cpu_to_scr(lastp); cp->phys.header.goalp = cpu_to_scr(goalp); if (direction == DMA_BIDIRECTIONAL) cp->phys.header.savep = cpu_to_scr(NCB_SCRIPTH_PHYS (np, data_io)); else cp->phys.header.savep= cpu_to_scr(lastp); /* ** Save the initial data pointer in order to be able ** to redo the command. */ cp->startp = cp->phys.header.savep; /*---------------------------------------------------- ** ** fill in ccb ** **---------------------------------------------------- ** ** ** physical -> virtual backlink ** Generic SCSI command */ /* ** Startqueue */ cp->start.schedule.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, select)); cp->restart.schedule.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, resel_dsa)); /* ** select */ cp->phys.select.sel_id = sdev_id(sdev); cp->phys.select.sel_scntl3 = tp->wval; cp->phys.select.sel_sxfer = tp->sval; /* ** message */ cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg)); cp->phys.smsg.size = cpu_to_scr(msglen); /* ** command */ memcpy(cp->cdb_buf, cmd->cmnd, min_t(int, cmd->cmd_len, sizeof(cp->cdb_buf))); cp->phys.cmd.addr = cpu_to_scr(CCB_PHYS (cp, cdb_buf[0])); cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len); /* ** status */ cp->actualquirks = 0; cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; cp->scsi_status = SAM_STAT_ILLEGAL; cp->parity_status = 0; cp->xerr_status = XE_OK; /*---------------------------------------------------- ** ** Critical region: start this job. ** **---------------------------------------------------- */ /* activate this job. */ cp->magic = CCB_MAGIC; /* ** insert next CCBs into start queue. ** 2 max at a time is enough to flush the CCB wait queue. */ cp->auto_sense = 0; if (lp) ncr_start_next_ccb(np, lp, 2); else ncr_put_start_queue(np, cp); /* Command is successfully queued. */ return DID_OK; } /*========================================================== ** ** ** Insert a CCB into the start queue and wake up the ** SCRIPTS processor. ** ** **========================================================== */ static void ncr_start_next_ccb(struct ncb *np, struct lcb *lp, int maxn) { struct list_head *qp; struct ccb *cp; if (lp->held_ccb) return; while (maxn-- && lp->queuedccbs < lp->queuedepth) { qp = ncr_list_pop(&lp->wait_ccbq); if (!qp) break; ++lp->queuedccbs; cp = list_entry(qp, struct ccb, link_ccbq); list_add_tail(qp, &lp->busy_ccbq); lp->jump_ccb[cp->tag == NO_TAG ? 0 : cp->tag] = cpu_to_scr(CCB_PHYS (cp, restart)); ncr_put_start_queue(np, cp); } } static void ncr_put_start_queue(struct ncb *np, struct ccb *cp) { u16 qidx; /* ** insert into start queue. */ if (!np->squeueput) np->squeueput = 1; qidx = np->squeueput + 2; if (qidx >= MAX_START + MAX_START) qidx = 1; np->scripth->tryloop [qidx] = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle)); MEMORY_BARRIER(); np->scripth->tryloop [np->squeueput] = cpu_to_scr(CCB_PHYS (cp, start)); np->squeueput = qidx; ++np->queuedccbs; cp->queued = 1; if (DEBUG_FLAGS & DEBUG_QUEUE) printk ("%s: queuepos=%d.\n", ncr_name (np), np->squeueput); /* ** Script processor may be waiting for reselect. ** Wake it up. */ MEMORY_BARRIER(); OUTB (nc_istat, SIGP); } static int ncr_reset_scsi_bus(struct ncb *np, int enab_int, int settle_delay) { u32 term; int retv = 0; np->settle_time = jiffies + settle_delay * HZ; if (bootverbose > 1) printk("%s: resetting, " "command processing suspended for %d seconds\n", ncr_name(np), settle_delay); ncr_chip_reset(np, 100); udelay(2000); /* The 895 needs time for the bus mode to settle */ if (enab_int) OUTW (nc_sien, RST); /* ** Enable Tolerant, reset IRQD if present and ** properly set IRQ mode, prior to resetting the bus. */ OUTB (nc_stest3, TE); OUTB (nc_scntl1, CRST); udelay(200); if (!driver_setup.bus_check) goto out; /* ** Check for no terminators or SCSI bus shorts to ground. ** Read SCSI data bus, data parity bits and control signals. ** We are expecting RESET to be TRUE and other signals to be ** FALSE. */ term = INB(nc_sstat0); term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */ term |= ((INB(nc_sstat2) & 0x01) << 26) | /* sdp1 */ ((INW(nc_sbdl) & 0xff) << 9) | /* d7-0 */ ((INW(nc_sbdl) & 0xff00) << 10) | /* d15-8 */ INB(nc_sbcl); /* req ack bsy sel atn msg cd io */ if (!(np->features & FE_WIDE)) term &= 0x3ffff; if (term != (2<<7)) { printk("%s: suspicious SCSI data while resetting the BUS.\n", ncr_name(np)); printk("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = " "0x%lx, expecting 0x%lx\n", ncr_name(np), (np->features & FE_WIDE) ? "dp1,d15-8," : "", (u_long)term, (u_long)(2<<7)); if (driver_setup.bus_check == 1) retv = 1; } out: OUTB (nc_scntl1, 0); return retv; } /* * Start reset process. * If reset in progress do nothing. * The interrupt handler will reinitialize the chip. * The timeout handler will wait for settle_time before * clearing it and so resuming command processing. */ static void ncr_start_reset(struct ncb *np) { if (!np->settle_time) { ncr_reset_scsi_bus(np, 1, driver_setup.settle_delay); } } /*========================================================== ** ** ** Reset the SCSI BUS. ** This is called from the generic SCSI driver. ** ** **========================================================== */ static int ncr_reset_bus (struct ncb *np) { /* * Return immediately if reset is in progress. */ if (np->settle_time) { return FAILED; } /* * Start the reset process. * The script processor is then assumed to be stopped. * Commands will now be queued in the waiting list until a settle * delay of 2 seconds will be completed. */ ncr_start_reset(np); /* * Wake-up all awaiting commands with DID_RESET. */ reset_waiting_list(np); /* * Wake-up all pending commands with HS_RESET -> DID_RESET. */ ncr_wakeup(np, HS_RESET); return SUCCESS; } static void ncr_detach(struct ncb *np) { struct ccb *cp; struct tcb *tp; struct lcb *lp; int target, lun; int i; char inst_name[16]; /* Local copy so we don't access np after freeing it! */ strscpy(inst_name, ncr_name(np), sizeof(inst_name)); printk("%s: releasing host resources\n", ncr_name(np)); /* ** Stop the ncr_timeout process ** Set release_stage to 1 and wait that ncr_timeout() set it to 2. */ #ifdef DEBUG_NCR53C8XX printk("%s: stopping the timer\n", ncr_name(np)); #endif np->release_stage = 1; for (i = 50 ; i && np->release_stage != 2 ; i--) mdelay(100); if (np->release_stage != 2) printk("%s: the timer seems to be already stopped\n", ncr_name(np)); else np->release_stage = 2; /* ** Disable chip interrupts */ #ifdef DEBUG_NCR53C8XX printk("%s: disabling chip interrupts\n", ncr_name(np)); #endif OUTW (nc_sien , 0); OUTB (nc_dien , 0); /* ** Reset NCR chip ** Restore bios setting for automatic clock detection. */ printk("%s: resetting chip\n", ncr_name(np)); ncr_chip_reset(np, 100); OUTB(nc_dmode, np->sv_dmode); OUTB(nc_dcntl, np->sv_dcntl); OUTB(nc_ctest0, np->sv_ctest0); OUTB(nc_ctest3, np->sv_ctest3); OUTB(nc_ctest4, np->sv_ctest4); OUTB(nc_ctest5, np->sv_ctest5); OUTB(nc_gpcntl, np->sv_gpcntl); OUTB(nc_stest2, np->sv_stest2); ncr_selectclock(np, np->sv_scntl3); /* ** Free allocated ccb(s) */ while ((cp=np->ccb->link_ccb) != NULL) { np->ccb->link_ccb = cp->link_ccb; if (cp->host_status) { printk("%s: shall free an active ccb (host_status=%d)\n", ncr_name(np), cp->host_status); } #ifdef DEBUG_NCR53C8XX printk("%s: freeing ccb (%lx)\n", ncr_name(np), (u_long) cp); #endif m_free_dma(cp, sizeof(*cp), "CCB"); } /* Free allocated tp(s) */ for (target = 0; target < MAX_TARGET ; target++) { tp=&np->target[target]; for (lun = 0 ; lun < MAX_LUN ; lun++) { lp = tp->lp[lun]; if (lp) { #ifdef DEBUG_NCR53C8XX printk("%s: freeing lp (%lx)\n", ncr_name(np), (u_long) lp); #endif if (lp->jump_ccb != &lp->jump_ccb_0) m_free_dma(lp->jump_ccb,256,"JUMP_CCB"); m_free_dma(lp, sizeof(*lp), "LCB"); } } } if (np->scripth0) m_free_dma(np->scripth0, sizeof(struct scripth), "SCRIPTH"); if (np->script0) m_free_dma(np->script0, sizeof(struct script), "SCRIPT"); if (np->ccb) m_free_dma(np->ccb, sizeof(struct ccb), "CCB"); m_free_dma(np, sizeof(struct ncb), "NCB"); printk("%s: host resources successfully released\n", inst_name); } /*========================================================== ** ** ** Complete execution of a SCSI command. ** Signal completion to the generic SCSI driver. ** ** **========================================================== */ void ncr_complete (struct ncb *np, struct ccb *cp) { struct scsi_cmnd *cmd; struct tcb *tp; struct lcb *lp; /* ** Sanity check */ if (!cp || cp->magic != CCB_MAGIC || !cp->cmd) return; /* ** Print minimal debug information. */ if (DEBUG_FLAGS & DEBUG_TINY) printk ("CCB=%lx STAT=%x/%x\n", (unsigned long)cp, cp->host_status,cp->scsi_status); /* ** Get command, target and lun pointers. */ cmd = cp->cmd; cp->cmd = NULL; tp = &np->target[cmd->device->id]; lp = tp->lp[cmd->device->lun]; /* ** We donnot queue more than 1 ccb per target ** with negotiation at any time. If this ccb was ** used for negotiation, clear this info in the tcb. */ if (cp == tp->nego_cp) tp->nego_cp = NULL; /* ** If auto-sense performed, change scsi status. */ if (cp->auto_sense) { cp->scsi_status = cp->auto_sense; } /* ** If we were recovering from queue full or performing ** auto-sense, requeue skipped CCBs to the wait queue. */ if (lp && lp->held_ccb) { if (cp == lp->held_ccb) { list_splice_init(&lp->skip_ccbq, &lp->wait_ccbq); lp->held_ccb = NULL; } } /* ** Check for parity errors. */ if (cp->parity_status > 1) { PRINT_ADDR(cmd, "%d parity error(s).\n",cp->parity_status); } /* ** Check for extended errors. */ if (cp->xerr_status != XE_OK) { switch (cp->xerr_status) { case XE_EXTRA_DATA: PRINT_ADDR(cmd, "extraneous data discarded.\n"); break; case XE_BAD_PHASE: PRINT_ADDR(cmd, "invalid scsi phase (4/5).\n"); break; default: PRINT_ADDR(cmd, "extended error %d.\n", cp->xerr_status); break; } if (cp->host_status==HS_COMPLETE) cp->host_status = HS_FAIL; } /* ** Print out any error for debugging purpose. */ if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) { if (cp->host_status != HS_COMPLETE || cp->scsi_status != SAM_STAT_GOOD) { PRINT_ADDR(cmd, "ERROR: cmd=%x host_status=%x " "scsi_status=%x\n", cmd->cmnd[0], cp->host_status, cp->scsi_status); } } /* ** Check the status. */ cmd->result = 0; if ( (cp->host_status == HS_COMPLETE) && (cp->scsi_status == SAM_STAT_GOOD || cp->scsi_status == SAM_STAT_CONDITION_MET)) { /* * All went well (GOOD status). * CONDITION MET status is returned on * `Pre-Fetch' or `Search data' success. */ set_status_byte(cmd, cp->scsi_status); /* ** @RESID@ ** Could dig out the correct value for resid, ** but it would be quite complicated. */ /* if (cp->phys.header.lastp != cp->phys.header.goalp) */ /* ** Allocate the lcb if not yet. */ if (!lp) ncr_alloc_lcb (np, cmd->device->id, cmd->device->lun); tp->bytes += cp->data_len; tp->transfers ++; /* ** If tags was reduced due to queue full, ** increase tags if 1000 good status received. */ if (lp && lp->usetags && lp->numtags < lp->maxtags) { ++lp->num_good; if (lp->num_good >= 1000) { lp->num_good = 0; ++lp->numtags; ncr_setup_tags (np, cmd->device); } } } else if ((cp->host_status == HS_COMPLETE) && (cp->scsi_status == SAM_STAT_CHECK_CONDITION)) { /* ** Check condition code */ set_status_byte(cmd, SAM_STAT_CHECK_CONDITION); /* ** Copy back sense data to caller's buffer. */ memcpy(cmd->sense_buffer, cp->sense_buf, min_t(size_t, SCSI_SENSE_BUFFERSIZE, sizeof(cp->sense_buf))); if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) { u_char *p = cmd->sense_buffer; int i; PRINT_ADDR(cmd, "sense data:"); for (i=0; i<14; i++) printk (" %x", *p++); printk (".\n"); } } else if ((cp->host_status == HS_COMPLETE) && (cp->scsi_status == SAM_STAT_RESERVATION_CONFLICT)) { /* ** Reservation Conflict condition code */ set_status_byte(cmd, SAM_STAT_RESERVATION_CONFLICT); } else if ((cp->host_status == HS_COMPLETE) && (cp->scsi_status == SAM_STAT_BUSY || cp->scsi_status == SAM_STAT_TASK_SET_FULL)) { /* ** Target is busy. */ set_status_byte(cmd, cp->scsi_status); } else if ((cp->host_status == HS_SEL_TIMEOUT) || (cp->host_status == HS_TIMEOUT)) { /* ** No response */ set_status_byte(cmd, cp->scsi_status); set_host_byte(cmd, DID_TIME_OUT); } else if (cp->host_status == HS_RESET) { /* ** SCSI bus reset */ set_status_byte(cmd, cp->scsi_status); set_host_byte(cmd, DID_RESET); } else if (cp->host_status == HS_ABORTED) { /* ** Transfer aborted */ set_status_byte(cmd, cp->scsi_status); set_host_byte(cmd, DID_ABORT); } else { /* ** Other protocol messes */ PRINT_ADDR(cmd, "COMMAND FAILED (%x %x) @%p.\n", cp->host_status, cp->scsi_status, cp); set_status_byte(cmd, cp->scsi_status); set_host_byte(cmd, DID_ERROR); } /* ** trace output */ if (tp->usrflag & UF_TRACE) { u_char * p; int i; PRINT_ADDR(cmd, " CMD:"); p = (u_char*) &cmd->cmnd[0]; for (i=0; i<cmd->cmd_len; i++) printk (" %x", *p++); if (cp->host_status==HS_COMPLETE) { switch (cp->scsi_status) { case SAM_STAT_GOOD: printk (" GOOD"); break; case SAM_STAT_CHECK_CONDITION: printk (" SENSE:"); p = (u_char*) &cmd->sense_buffer; for (i=0; i<14; i++) printk (" %x", *p++); break; default: printk (" STAT: %x\n", cp->scsi_status); break; } } else printk (" HOSTERROR: %x", cp->host_status); printk ("\n"); } /* ** Free this ccb */ ncr_free_ccb (np, cp); /* ** requeue awaiting scsi commands for this lun. */ if (lp && lp->queuedccbs < lp->queuedepth && !list_empty(&lp->wait_ccbq)) ncr_start_next_ccb(np, lp, 2); /* ** requeue awaiting scsi commands for this controller. */ if (np->waiting_list) requeue_waiting_list(np); /* ** signal completion to generic driver. */ ncr_queue_done_cmd(np, cmd); } /*========================================================== ** ** ** Signal all (or one) control block done. ** ** **========================================================== */ /* ** This CCB has been skipped by the NCR. ** Queue it in the corresponding unit queue. */ static void ncr_ccb_skipped(struct ncb *np, struct ccb *cp) { struct tcb *tp = &np->target[cp->target]; struct lcb *lp = tp->lp[cp->lun]; if (lp && cp != np->ccb) { cp->host_status &= ~HS_SKIPMASK; cp->start.schedule.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, select)); list_move_tail(&cp->link_ccbq, &lp->skip_ccbq); if (cp->queued) { --lp->queuedccbs; } } if (cp->queued) { --np->queuedccbs; cp->queued = 0; } } /* ** The NCR has completed CCBs. ** Look at the DONE QUEUE if enabled, otherwise scan all CCBs */ void ncr_wakeup_done (struct ncb *np) { struct ccb *cp; #ifdef SCSI_NCR_CCB_DONE_SUPPORT int i, j; i = np->ccb_done_ic; while (1) { j = i+1; if (j >= MAX_DONE) j = 0; cp = np->ccb_done[j]; if (!CCB_DONE_VALID(cp)) break; np->ccb_done[j] = (struct ccb *)CCB_DONE_EMPTY; np->scripth->done_queue[5*j + 4] = cpu_to_scr(NCB_SCRIPT_PHYS (np, done_plug)); MEMORY_BARRIER(); np->scripth->done_queue[5*i + 4] = cpu_to_scr(NCB_SCRIPT_PHYS (np, done_end)); if (cp->host_status & HS_DONEMASK) ncr_complete (np, cp); else if (cp->host_status & HS_SKIPMASK) ncr_ccb_skipped (np, cp); i = j; } np->ccb_done_ic = i; #else cp = np->ccb; while (cp) { if (cp->host_status & HS_DONEMASK) ncr_complete (np, cp); else if (cp->host_status & HS_SKIPMASK) ncr_ccb_skipped (np, cp); cp = cp->link_ccb; } #endif } /* ** Complete all active CCBs. */ void ncr_wakeup (struct ncb *np, u_long code) { struct ccb *cp = np->ccb; while (cp) { if (cp->host_status != HS_IDLE) { cp->host_status = code; ncr_complete (np, cp); } cp = cp->link_ccb; } } /* ** Reset ncr chip. */ /* Some initialisation must be done immediately following reset, for 53c720, * at least. EA (dcntl bit 5) isn't set here as it is set once only in * the _detect function. */ static void ncr_chip_reset(struct ncb *np, int delay) { OUTB (nc_istat, SRST); udelay(delay); OUTB (nc_istat, 0 ); if (np->features & FE_EHP) OUTB (nc_ctest0, EHP); if (np->features & FE_MUX) OUTB (nc_ctest4, MUX); } /*========================================================== ** ** ** Start NCR chip. ** ** **========================================================== */ void ncr_init (struct ncb *np, int reset, char * msg, u_long code) { int i; /* ** Reset chip if asked, otherwise just clear fifos. */ if (reset) { OUTB (nc_istat, SRST); udelay(100); } else { OUTB (nc_stest3, TE|CSF); OUTONB (nc_ctest3, CLF); } /* ** Message. */ if (msg) printk (KERN_INFO "%s: restart (%s).\n", ncr_name (np), msg); /* ** Clear Start Queue */ np->queuedepth = MAX_START - 1; /* 1 entry needed as end marker */ for (i = 1; i < MAX_START + MAX_START; i += 2) np->scripth0->tryloop[i] = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle)); /* ** Start at first entry. */ np->squeueput = 0; np->script0->startpos[0] = cpu_to_scr(NCB_SCRIPTH_PHYS (np, tryloop)); #ifdef SCSI_NCR_CCB_DONE_SUPPORT /* ** Clear Done Queue */ for (i = 0; i < MAX_DONE; i++) { np->ccb_done[i] = (struct ccb *)CCB_DONE_EMPTY; np->scripth0->done_queue[5*i + 4] = cpu_to_scr(NCB_SCRIPT_PHYS (np, done_end)); } #endif /* ** Start at first entry. */ np->script0->done_pos[0] = cpu_to_scr(NCB_SCRIPTH_PHYS (np,done_queue)); np->ccb_done_ic = MAX_DONE-1; np->scripth0->done_queue[5*(MAX_DONE-1) + 4] = cpu_to_scr(NCB_SCRIPT_PHYS (np, done_plug)); /* ** Wakeup all pending jobs. */ ncr_wakeup (np, code); /* ** Init chip. */ /* ** Remove reset; big delay because the 895 needs time for the ** bus mode to settle */ ncr_chip_reset(np, 2000); OUTB (nc_scntl0, np->rv_scntl0 | 0xc0); /* full arb., ena parity, par->ATN */ OUTB (nc_scntl1, 0x00); /* odd parity, and remove CRST!! */ ncr_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ OUTB (nc_scid , RRE|np->myaddr); /* Adapter SCSI address */ OUTW (nc_respid, 1ul<<np->myaddr); /* Id to respond to */ OUTB (nc_istat , SIGP ); /* Signal Process */ OUTB (nc_dmode , np->rv_dmode); /* Burst length, dma mode */ OUTB (nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */ OUTB (nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */ OUTB (nc_ctest0, np->rv_ctest0); /* 720: CDIS and EHP */ OUTB (nc_ctest3, np->rv_ctest3); /* Write and invalidate */ OUTB (nc_ctest4, np->rv_ctest4); /* Master parity checking */ OUTB (nc_stest2, EXT|np->rv_stest2); /* Extended Sreq/Sack filtering */ OUTB (nc_stest3, TE); /* TolerANT enable */ OUTB (nc_stime0, 0x0c ); /* HTH disabled STO 0.25 sec */ /* ** Disable disconnects. */ np->disc = 0; /* ** Enable GPIO0 pin for writing if LED support. */ if (np->features & FE_LED0) { OUTOFFB (nc_gpcntl, 0x01); } /* ** enable ints */ OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR); OUTB (nc_dien , MDPE|BF|ABRT|SSI|SIR|IID); /* ** Fill in target structure. ** Reinitialize usrsync. ** Reinitialize usrwide. ** Prepare sync negotiation according to actual SCSI bus mode. */ for (i=0;i<MAX_TARGET;i++) { struct tcb *tp = &np->target[i]; tp->sval = 0; tp->wval = np->rv_scntl3; if (tp->usrsync != 255) { if (tp->usrsync <= np->maxsync) { if (tp->usrsync < np->minsync) { tp->usrsync = np->minsync; } } else tp->usrsync = 255; } if (tp->usrwide > np->maxwide) tp->usrwide = np->maxwide; } /* ** Start script processor. */ if (np->paddr2) { if (bootverbose) printk ("%s: Downloading SCSI SCRIPTS.\n", ncr_name(np)); OUTL (nc_scratcha, vtobus(np->script0)); OUTL_DSP (NCB_SCRIPTH_PHYS (np, start_ram)); } else OUTL_DSP (NCB_SCRIPT_PHYS (np, start)); } /*========================================================== ** ** Prepare the negotiation values for wide and ** synchronous transfers. ** **========================================================== */ static void ncr_negotiate (struct ncb* np, struct tcb* tp) { /* ** minsync unit is 4ns ! */ u_long minsync = tp->usrsync; /* ** SCSI bus mode limit */ if (np->scsi_mode && np->scsi_mode == SMODE_SE) { if (minsync < 12) minsync = 12; } /* ** our limit .. */ if (minsync < np->minsync) minsync = np->minsync; /* ** divider limit */ if (minsync > np->maxsync) minsync = 255; if (tp->maxoffs > np->maxoffs) tp->maxoffs = np->maxoffs; tp->minsync = minsync; tp->maxoffs = (minsync<255 ? tp->maxoffs : 0); /* ** period=0: has to negotiate sync transfer */ tp->period=0; /* ** widedone=0: has to negotiate wide transfer */ tp->widedone=0; } /*========================================================== ** ** Get clock factor and sync divisor for a given ** synchronous factor period. ** Returns the clock factor (in sxfer) and scntl3 ** synchronous divisor field. ** **========================================================== */ static void ncr_getsync(struct ncb *np, u_char sfac, u_char *fakp, u_char *scntl3p) { u_long clk = np->clock_khz; /* SCSI clock frequency in kHz */ int div = np->clock_divn; /* Number of divisors supported */ u_long fak; /* Sync factor in sxfer */ u_long per; /* Period in tenths of ns */ u_long kpc; /* (per * clk) */ /* ** Compute the synchronous period in tenths of nano-seconds */ if (sfac <= 10) per = 250; else if (sfac == 11) per = 303; else if (sfac == 12) per = 500; else per = 40 * sfac; /* ** Look for the greatest clock divisor that allows an ** input speed faster than the period. */ kpc = per * clk; while (--div > 0) if (kpc >= (div_10M[div] << 2)) break; /* ** Calculate the lowest clock factor that allows an output ** speed not faster than the period. */ fak = (kpc - 1) / div_10M[div] + 1; if (fak < 4) fak = 4; /* Should never happen, too bad ... */ /* ** Compute and return sync parameters for the ncr */ *fakp = fak - 4; *scntl3p = ((div+1) << 4) + (sfac < 25 ? 0x80 : 0); } /*========================================================== ** ** Set actual values, sync status and patch all ccbs of ** a target according to new sync/wide agreement. ** **========================================================== */ static void ncr_set_sync_wide_status (struct ncb *np, u_char target) { struct ccb *cp; struct tcb *tp = &np->target[target]; /* ** set actual value and sync_status */ OUTB (nc_sxfer, tp->sval); np->sync_st = tp->sval; OUTB (nc_scntl3, tp->wval); np->wide_st = tp->wval; /* ** patch ALL ccbs of this target. */ for (cp = np->ccb; cp; cp = cp->link_ccb) { if (!cp->cmd) continue; if (scmd_id(cp->cmd) != target) continue; cp->phys.select.sel_scntl3 = tp->wval; cp->phys.select.sel_sxfer = tp->sval; } } /*========================================================== ** ** Switch sync mode for current job and it's target ** **========================================================== */ static void ncr_setsync (struct ncb *np, struct ccb *cp, u_char scntl3, u_char sxfer) { struct scsi_cmnd *cmd = cp->cmd; struct tcb *tp; u_char target = INB (nc_sdid) & 0x0f; u_char idiv; BUG_ON(target != (scmd_id(cmd) & 0xf)); tp = &np->target[target]; if (!scntl3 || !(sxfer & 0x1f)) scntl3 = np->rv_scntl3; scntl3 = (scntl3 & 0xf0) | (tp->wval & EWS) | (np->rv_scntl3 & 0x07); /* ** Deduce the value of controller sync period from scntl3. ** period is in tenths of nano-seconds. */ idiv = ((scntl3 >> 4) & 0x7); if ((sxfer & 0x1f) && idiv) tp->period = (((sxfer>>5)+4)*div_10M[idiv-1])/np->clock_khz; else tp->period = 0xffff; /* Stop there if sync parameters are unchanged */ if (tp->sval == sxfer && tp->wval == scntl3) return; tp->sval = sxfer; tp->wval = scntl3; if (sxfer & 0x01f) { /* Disable extended Sreq/Sack filtering */ if (tp->period <= 2000) OUTOFFB(nc_stest2, EXT); } spi_display_xfer_agreement(tp->starget); /* ** set actual value and sync_status ** patch ALL ccbs of this target. */ ncr_set_sync_wide_status(np, target); } /*========================================================== ** ** Switch wide mode for current job and it's target ** SCSI specs say: a SCSI device that accepts a WDTR ** message shall reset the synchronous agreement to ** asynchronous mode. ** **========================================================== */ static void ncr_setwide (struct ncb *np, struct ccb *cp, u_char wide, u_char ack) { struct scsi_cmnd *cmd = cp->cmd; u16 target = INB (nc_sdid) & 0x0f; struct tcb *tp; u_char scntl3; u_char sxfer; BUG_ON(target != (scmd_id(cmd) & 0xf)); tp = &np->target[target]; tp->widedone = wide+1; scntl3 = (tp->wval & (~EWS)) | (wide ? EWS : 0); sxfer = ack ? 0 : tp->sval; /* ** Stop there if sync/wide parameters are unchanged */ if (tp->sval == sxfer && tp->wval == scntl3) return; tp->sval = sxfer; tp->wval = scntl3; /* ** Bells and whistles ;-) */ if (bootverbose >= 2) { dev_info(&cmd->device->sdev_target->dev, "WIDE SCSI %sabled.\n", (scntl3 & EWS) ? "en" : "dis"); } /* ** set actual value and sync_status ** patch ALL ccbs of this target. */ ncr_set_sync_wide_status(np, target); } /*========================================================== ** ** Switch tagged mode for a target. ** **========================================================== */ static void ncr_setup_tags (struct ncb *np, struct scsi_device *sdev) { unsigned char tn = sdev->id, ln = sdev->lun; struct tcb *tp = &np->target[tn]; struct lcb *lp = tp->lp[ln]; u_char reqtags, maxdepth; /* ** Just in case ... */ if ((!tp) || (!lp) || !sdev) return; /* ** If SCSI device queue depth is not yet set, leave here. */ if (!lp->scdev_depth) return; /* ** Donnot allow more tags than the SCSI driver can queue ** for this device. ** Donnot allow more tags than we can handle. */ maxdepth = lp->scdev_depth; if (maxdepth > lp->maxnxs) maxdepth = lp->maxnxs; if (lp->maxtags > maxdepth) lp->maxtags = maxdepth; if (lp->numtags > maxdepth) lp->numtags = maxdepth; /* ** only devices conformant to ANSI Version >= 2 ** only devices capable of tagged commands ** only if enabled by user .. */ if (sdev->tagged_supported && lp->numtags > 1) { reqtags = lp->numtags; } else { reqtags = 1; } /* ** Update max number of tags */ lp->numtags = reqtags; if (lp->numtags > lp->maxtags) lp->maxtags = lp->numtags; /* ** If we want to switch tag mode, we must wait ** for no CCB to be active. */ if (reqtags > 1 && lp->usetags) { /* Stay in tagged mode */ if (lp->queuedepth == reqtags) /* Already announced */ return; lp->queuedepth = reqtags; } else if (reqtags <= 1 && !lp->usetags) { /* Stay in untagged mode */ lp->queuedepth = reqtags; return; } else { /* Want to switch tag mode */ if (lp->busyccbs) /* If not yet safe, return */ return; lp->queuedepth = reqtags; lp->usetags = reqtags > 1 ? 1 : 0; } /* ** Patch the lun mini-script, according to tag mode. */ lp->jump_tag.l_paddr = lp->usetags? cpu_to_scr(NCB_SCRIPT_PHYS(np, resel_tag)) : cpu_to_scr(NCB_SCRIPT_PHYS(np, resel_notag)); /* ** Announce change to user. */ if (bootverbose) { if (lp->usetags) { dev_info(&sdev->sdev_gendev, "tagged command queue depth set to %d\n", reqtags); } else { dev_info(&sdev->sdev_gendev, "tagged command queueing disabled\n"); } } } /*========================================================== ** ** ** ncr timeout handler. ** ** **========================================================== ** ** Misused to keep the driver running when ** interrupts are not configured correctly. ** **---------------------------------------------------------- */ static void ncr_timeout (struct ncb *np) { u_long thistime = jiffies; /* ** If release process in progress, let's go ** Set the release stage from 1 to 2 to synchronize ** with the release process. */ if (np->release_stage) { if (np->release_stage == 1) np->release_stage = 2; return; } np->timer.expires = jiffies + SCSI_NCR_TIMER_INTERVAL; add_timer(&np->timer); /* ** If we are resetting the ncr, wait for settle_time before ** clearing it. Then command processing will be resumed. */ if (np->settle_time) { if (np->settle_time <= thistime) { if (bootverbose > 1) printk("%s: command processing resumed\n", ncr_name(np)); np->settle_time = 0; np->disc = 1; requeue_waiting_list(np); } return; } /* ** Since the generic scsi driver only allows us 0.5 second ** to perform abort of a command, we must look at ccbs about ** every 0.25 second. */ if (np->lasttime + 4*HZ < thistime) { /* ** block ncr interrupts */ np->lasttime = thistime; } #ifdef SCSI_NCR_BROKEN_INTR if (INB(nc_istat) & (INTF|SIP|DIP)) { /* ** Process pending interrupts. */ if (DEBUG_FLAGS & DEBUG_TINY) printk ("{"); ncr_exception (np); if (DEBUG_FLAGS & DEBUG_TINY) printk ("}"); } #endif /* SCSI_NCR_BROKEN_INTR */ } /*========================================================== ** ** log message for real hard errors ** ** "ncr0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc)." ** " reg: r0 r1 r2 r3 r4 r5 r6 ..... rf." ** ** exception register: ** ds: dstat ** si: sist ** ** SCSI bus lines: ** so: control lines as driver by NCR. ** si: control lines as seen by NCR. ** sd: scsi data lines as seen by NCR. ** ** wide/fastmode: ** sxfer: (see the manual) ** scntl3: (see the manual) ** ** current script command: ** dsp: script address (relative to start of script). ** dbc: first word of script command. ** ** First 16 register of the chip: ** r0..rf ** **========================================================== */ static void ncr_log_hard_error(struct ncb *np, u16 sist, u_char dstat) { u32 dsp; int script_ofs; int script_size; char *script_name; u_char *script_base; int i; dsp = INL (nc_dsp); if (dsp > np->p_script && dsp <= np->p_script + sizeof(struct script)) { script_ofs = dsp - np->p_script; script_size = sizeof(struct script); script_base = (u_char *) np->script0; script_name = "script"; } else if (np->p_scripth < dsp && dsp <= np->p_scripth + sizeof(struct scripth)) { script_ofs = dsp - np->p_scripth; script_size = sizeof(struct scripth); script_base = (u_char *) np->scripth0; script_name = "scripth"; } else { script_ofs = dsp; script_size = 0; script_base = NULL; script_name = "mem"; } printk ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n", ncr_name (np), (unsigned)INB (nc_sdid)&0x0f, dstat, sist, (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), (unsigned)INB (nc_sbdl), (unsigned)INB (nc_sxfer),(unsigned)INB (nc_scntl3), script_name, script_ofs, (unsigned)INL (nc_dbc)); if (((script_ofs & 3) == 0) && (unsigned)script_ofs < script_size) { printk ("%s: script cmd = %08x\n", ncr_name(np), scr_to_cpu((int) *(ncrcmd *)(script_base + script_ofs))); } printk ("%s: regdump:", ncr_name(np)); for (i=0; i<16;i++) printk (" %02x", (unsigned)INB_OFF(i)); printk (".\n"); } /*============================================================ ** ** ncr chip exception handler. ** **============================================================ ** ** In normal cases, interrupt conditions occur one at a ** time. The ncr is able to stack in some extra registers ** other interrupts that will occur after the first one. ** But, several interrupts may occur at the same time. ** ** We probably should only try to deal with the normal ** case, but it seems that multiple interrupts occur in ** some cases that are not abnormal at all. ** ** The most frequent interrupt condition is Phase Mismatch. ** We should want to service this interrupt quickly. ** A SCSI parity error may be delivered at the same time. ** The SIR interrupt is not very frequent in this driver, ** since the INTFLY is likely used for command completion ** signaling. ** The Selection Timeout interrupt may be triggered with ** IID and/or UDC. ** The SBMC interrupt (SCSI Bus Mode Change) may probably ** occur at any time. ** ** This handler try to deal as cleverly as possible with all ** the above. ** **============================================================ */ void ncr_exception (struct ncb *np) { u_char istat, dstat; u16 sist; int i; /* ** interrupt on the fly ? ** Since the global header may be copied back to a CCB ** using a posted PCI memory write, the last operation on ** the istat register is a READ in order to flush posted ** PCI write commands. */ istat = INB (nc_istat); if (istat & INTF) { OUTB (nc_istat, (istat & SIGP) | INTF); istat = INB (nc_istat); if (DEBUG_FLAGS & DEBUG_TINY) printk ("F "); ncr_wakeup_done (np); } if (!(istat & (SIP|DIP))) return; if (istat & CABRT) OUTB (nc_istat, CABRT); /* ** Steinbach's Guideline for Systems Programming: ** Never test for an error condition you don't know how to handle. */ sist = (istat & SIP) ? INW (nc_sist) : 0; dstat = (istat & DIP) ? INB (nc_dstat) : 0; if (DEBUG_FLAGS & DEBUG_TINY) printk ("<%d|%x:%x|%x:%x>", (int)INB(nc_scr0), dstat,sist, (unsigned)INL(nc_dsp), (unsigned)INL(nc_dbc)); /*======================================================== ** First, interrupts we want to service cleanly. ** ** Phase mismatch is the most frequent interrupt, and ** so we have to service it as quickly and as cleanly ** as possible. ** Programmed interrupts are rarely used in this driver, ** but we must handle them cleanly anyway. ** We try to deal with PAR and SBMC combined with ** some other interrupt(s). **========================================================= */ if (!(sist & (STO|GEN|HTH|SGE|UDC|RST)) && !(dstat & (MDPE|BF|ABRT|IID))) { if ((sist & SBMC) && ncr_int_sbmc (np)) return; if ((sist & PAR) && ncr_int_par (np)) return; if (sist & MA) { ncr_int_ma (np); return; } if (dstat & SIR) { ncr_int_sir (np); return; } /* ** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 2. */ if (!(sist & (SBMC|PAR)) && !(dstat & SSI)) { printk( "%s: unknown interrupt(s) ignored, " "ISTAT=%x DSTAT=%x SIST=%x\n", ncr_name(np), istat, dstat, sist); return; } OUTONB_STD (); return; } /*======================================================== ** Now, interrupts that need some fixing up. ** Order and multiple interrupts is so less important. ** ** If SRST has been asserted, we just reset the chip. ** ** Selection is intirely handled by the chip. If the ** chip says STO, we trust it. Seems some other ** interrupts may occur at the same time (UDC, IID), so ** we ignore them. In any case we do enough fix-up ** in the service routine. ** We just exclude some fatal dma errors. **========================================================= */ if (sist & RST) { ncr_init (np, 1, bootverbose ? "scsi reset" : NULL, HS_RESET); return; } if ((sist & STO) && !(dstat & (MDPE|BF|ABRT))) { /* ** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 1. */ OUTONB (nc_ctest3, CLF); ncr_int_sto (np); return; } /*========================================================= ** Now, interrupts we are not able to recover cleanly. ** (At least for the moment). ** ** Do the register dump. ** Log message for real hard errors. ** Clear all fifos. ** For MDPE, BF, ABORT, IID, SGE and HTH we reset the ** BUS and the chip. ** We are more soft for UDC. **========================================================= */ if (time_after(jiffies, np->regtime)) { np->regtime = jiffies + 10*HZ; for (i = 0; i<sizeof(np->regdump); i++) ((char*)&np->regdump)[i] = INB_OFF(i); np->regdump.nc_dstat = dstat; np->regdump.nc_sist = sist; } ncr_log_hard_error(np, sist, dstat); printk ("%s: have to clear fifos.\n", ncr_name (np)); OUTB (nc_stest3, TE|CSF); OUTONB (nc_ctest3, CLF); if ((sist & (SGE)) || (dstat & (MDPE|BF|ABRT|IID))) { ncr_start_reset(np); return; } if (sist & HTH) { printk ("%s: handshake timeout\n", ncr_name(np)); ncr_start_reset(np); return; } if (sist & UDC) { printk ("%s: unexpected disconnect\n", ncr_name(np)); OUTB (HS_PRT, HS_UNEXPECTED); OUTL_DSP (NCB_SCRIPT_PHYS (np, cleanup)); return; } /*========================================================= ** We just miss the cause of the interrupt. :( ** Print a message. The timeout will do the real work. **========================================================= */ printk ("%s: unknown interrupt\n", ncr_name(np)); } /*========================================================== ** ** ncr chip exception handler for selection timeout ** **========================================================== ** ** There seems to be a bug in the 53c810. ** Although a STO-Interrupt is pending, ** it continues executing script commands. ** But it will fail and interrupt (IID) on ** the next instruction where it's looking ** for a valid phase. ** **---------------------------------------------------------- */ void ncr_int_sto (struct ncb *np) { u_long dsa; struct ccb *cp; if (DEBUG_FLAGS & DEBUG_TINY) printk ("T"); /* ** look for ccb and set the status. */ dsa = INL (nc_dsa); cp = np->ccb; while (cp && (CCB_PHYS (cp, phys) != dsa)) cp = cp->link_ccb; if (cp) { cp-> host_status = HS_SEL_TIMEOUT; ncr_complete (np, cp); } /* ** repair start queue and jump to start point. */ OUTL_DSP (NCB_SCRIPTH_PHYS (np, sto_restart)); return; } /*========================================================== ** ** ncr chip exception handler for SCSI bus mode change ** **========================================================== ** ** spi2-r12 11.2.3 says a transceiver mode change must ** generate a reset event and a device that detects a reset ** event shall initiate a hard reset. It says also that a ** device that detects a mode change shall set data transfer ** mode to eight bit asynchronous, etc... ** So, just resetting should be enough. ** ** **---------------------------------------------------------- */ static int ncr_int_sbmc (struct ncb *np) { u_char scsi_mode = INB (nc_stest4) & SMODE; if (scsi_mode != np->scsi_mode) { printk("%s: SCSI bus mode change from %x to %x.\n", ncr_name(np), np->scsi_mode, scsi_mode); np->scsi_mode = scsi_mode; /* ** Suspend command processing for 1 second and ** reinitialize all except the chip. */ np->settle_time = jiffies + HZ; ncr_init (np, 0, bootverbose ? "scsi mode change" : NULL, HS_RESET); return 1; } return 0; } /*========================================================== ** ** ncr chip exception handler for SCSI parity error. ** **========================================================== ** ** **---------------------------------------------------------- */ static int ncr_int_par (struct ncb *np) { u_char hsts = INB (HS_PRT); u32 dbc = INL (nc_dbc); u_char sstat1 = INB (nc_sstat1); int phase = -1; int msg = -1; u32 jmp; printk("%s: SCSI parity error detected: SCR1=%d DBC=%x SSTAT1=%x\n", ncr_name(np), hsts, dbc, sstat1); /* * Ignore the interrupt if the NCR is not connected * to the SCSI bus, since the right work should have * been done on unexpected disconnection handling. */ if (!(INB (nc_scntl1) & ISCON)) return 0; /* * If the nexus is not clearly identified, reset the bus. * We will try to do better later. */ if (hsts & HS_INVALMASK) goto reset_all; /* * If the SCSI parity error occurs in MSG IN phase, prepare a * MSG PARITY message. Otherwise, prepare a INITIATOR DETECTED * ERROR message and let the device decide to retry the command * or to terminate with check condition. If we were in MSG IN * phase waiting for the response of a negotiation, we will * get SIR_NEGO_FAILED at dispatch. */ if (!(dbc & 0xc0000000)) phase = (dbc >> 24) & 7; if (phase == 7) msg = MSG_PARITY_ERROR; else msg = INITIATOR_ERROR; /* * If the NCR stopped on a MOVE ^ DATA_IN, we jump to a * script that will ignore all data in bytes until phase * change, since we are not sure the chip will wait the phase * change prior to delivering the interrupt. */ if (phase == 1) jmp = NCB_SCRIPTH_PHYS (np, par_err_data_in); else jmp = NCB_SCRIPTH_PHYS (np, par_err_other); OUTONB (nc_ctest3, CLF ); /* clear dma fifo */ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ np->msgout[0] = msg; OUTL_DSP (jmp); return 1; reset_all: ncr_start_reset(np); return 1; } /*========================================================== ** ** ** ncr chip exception handler for phase errors. ** ** **========================================================== ** ** We have to construct a new transfer descriptor, ** to transfer the rest of the current block. ** **---------------------------------------------------------- */ static void ncr_int_ma (struct ncb *np) { u32 dbc; u32 rest; u32 dsp; u32 dsa; u32 nxtdsp; u32 newtmp; u32 *vdsp; u32 oadr, olen; u32 *tblp; ncrcmd *newcmd; u_char cmd, sbcl; struct ccb *cp; dsp = INL (nc_dsp); dbc = INL (nc_dbc); sbcl = INB (nc_sbcl); cmd = dbc >> 24; rest = dbc & 0xffffff; /* ** Take into account dma fifo and various buffers and latches, ** only if the interrupted phase is an OUTPUT phase. */ if ((cmd & 1) == 0) { u_char ctest5, ss0, ss2; u16 delta; ctest5 = (np->rv_ctest5 & DFS) ? INB (nc_ctest5) : 0; if (ctest5 & DFS) delta=(((ctest5 << 8) | (INB (nc_dfifo) & 0xff)) - rest) & 0x3ff; else delta=(INB (nc_dfifo) - rest) & 0x7f; /* ** The data in the dma fifo has not been transferred to ** the target -> add the amount to the rest ** and clear the data. ** Check the sstat2 register in case of wide transfer. */ rest += delta; ss0 = INB (nc_sstat0); if (ss0 & OLF) rest++; if (ss0 & ORF) rest++; if (INB(nc_scntl3) & EWS) { ss2 = INB (nc_sstat2); if (ss2 & OLF1) rest++; if (ss2 & ORF1) rest++; } if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) printk ("P%x%x RL=%d D=%d SS0=%x ", cmd&7, sbcl&7, (unsigned) rest, (unsigned) delta, ss0); } else { if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) printk ("P%x%x RL=%d ", cmd&7, sbcl&7, rest); } /* ** Clear fifos. */ OUTONB (nc_ctest3, CLF ); /* clear dma fifo */ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ /* ** locate matching cp. ** if the interrupted phase is DATA IN or DATA OUT, ** trust the global header. */ dsa = INL (nc_dsa); if (!(cmd & 6)) { cp = np->header.cp; if (CCB_PHYS(cp, phys) != dsa) cp = NULL; } else { cp = np->ccb; while (cp && (CCB_PHYS (cp, phys) != dsa)) cp = cp->link_ccb; } /* ** try to find the interrupted script command, ** and the address at which to continue. */ vdsp = NULL; nxtdsp = 0; if (dsp > np->p_script && dsp <= np->p_script + sizeof(struct script)) { vdsp = (u32 *)((char*)np->script0 + (dsp-np->p_script-8)); nxtdsp = dsp; } else if (dsp > np->p_scripth && dsp <= np->p_scripth + sizeof(struct scripth)) { vdsp = (u32 *)((char*)np->scripth0 + (dsp-np->p_scripth-8)); nxtdsp = dsp; } else if (cp) { if (dsp == CCB_PHYS (cp, patch[2])) { vdsp = &cp->patch[0]; nxtdsp = scr_to_cpu(vdsp[3]); } else if (dsp == CCB_PHYS (cp, patch[6])) { vdsp = &cp->patch[4]; nxtdsp = scr_to_cpu(vdsp[3]); } } /* ** log the information */ if (DEBUG_FLAGS & DEBUG_PHASE) { printk ("\nCP=%p CP2=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", cp, np->header.cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd); } /* ** cp=0 means that the DSA does not point to a valid control ** block. This should not happen since we donnot use multi-byte ** move while we are being reselected ot after command complete. ** We are not able to recover from such a phase error. */ if (!cp) { printk ("%s: SCSI phase error fixup: " "CCB already dequeued (0x%08lx)\n", ncr_name (np), (u_long) np->header.cp); goto reset_all; } /* ** get old startaddress and old length. */ oadr = scr_to_cpu(vdsp[1]); if (cmd & 0x10) { /* Table indirect */ tblp = (u32 *) ((char*) &cp->phys + oadr); olen = scr_to_cpu(tblp[0]); oadr = scr_to_cpu(tblp[1]); } else { tblp = (u32 *) 0; olen = scr_to_cpu(vdsp[0]) & 0xffffff; } if (DEBUG_FLAGS & DEBUG_PHASE) { printk ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n", (unsigned) (scr_to_cpu(vdsp[0]) >> 24), tblp, (unsigned) olen, (unsigned) oadr); } /* ** check cmd against assumed interrupted script command. */ if (cmd != (scr_to_cpu(vdsp[0]) >> 24)) { PRINT_ADDR(cp->cmd, "internal error: cmd=%02x != %02x=(vdsp[0] " ">> 24)\n", cmd, scr_to_cpu(vdsp[0]) >> 24); goto reset_all; } /* ** cp != np->header.cp means that the header of the CCB ** currently being processed has not yet been copied to ** the global header area. That may happen if the device did ** not accept all our messages after having been selected. */ if (cp != np->header.cp) { printk ("%s: SCSI phase error fixup: " "CCB address mismatch (0x%08lx != 0x%08lx)\n", ncr_name (np), (u_long) cp, (u_long) np->header.cp); } /* ** if old phase not dataphase, leave here. */ if (cmd & 0x06) { PRINT_ADDR(cp->cmd, "phase change %x-%x %d@%08x resid=%d.\n", cmd&7, sbcl&7, (unsigned)olen, (unsigned)oadr, (unsigned)rest); goto unexpected_phase; } /* ** choose the correct patch area. ** if savep points to one, choose the other. */ newcmd = cp->patch; newtmp = CCB_PHYS (cp, patch); if (newtmp == scr_to_cpu(cp->phys.header.savep)) { newcmd = &cp->patch[4]; newtmp = CCB_PHYS (cp, patch[4]); } /* ** fillin the commands */ newcmd[0] = cpu_to_scr(((cmd & 0x0f) << 24) | rest); newcmd[1] = cpu_to_scr(oadr + olen - rest); newcmd[2] = cpu_to_scr(SCR_JUMP); newcmd[3] = cpu_to_scr(nxtdsp); if (DEBUG_FLAGS & DEBUG_PHASE) { PRINT_ADDR(cp->cmd, "newcmd[%d] %x %x %x %x.\n", (int) (newcmd - cp->patch), (unsigned)scr_to_cpu(newcmd[0]), (unsigned)scr_to_cpu(newcmd[1]), (unsigned)scr_to_cpu(newcmd[2]), (unsigned)scr_to_cpu(newcmd[3])); } /* ** fake the return address (to the patch). ** and restart script processor at dispatcher. */ OUTL (nc_temp, newtmp); OUTL_DSP (NCB_SCRIPT_PHYS (np, dispatch)); return; /* ** Unexpected phase changes that occurs when the current phase ** is not a DATA IN or DATA OUT phase are due to error conditions. ** Such event may only happen when the SCRIPTS is using a ** multibyte SCSI MOVE. ** ** Phase change Some possible cause ** ** COMMAND --> MSG IN SCSI parity error detected by target. ** COMMAND --> STATUS Bad command or refused by target. ** MSG OUT --> MSG IN Message rejected by target. ** MSG OUT --> COMMAND Bogus target that discards extended ** negotiation messages. ** ** The code below does not care of the new phase and so ** trusts the target. Why to annoy it ? ** If the interrupted phase is COMMAND phase, we restart at ** dispatcher. ** If a target does not get all the messages after selection, ** the code assumes blindly that the target discards extended ** messages and clears the negotiation status. ** If the target does not want all our response to negotiation, ** we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids ** bloat for such a should_not_happen situation). ** In all other situation, we reset the BUS. ** Are these assumptions reasonable ? (Wait and see ...) */ unexpected_phase: dsp -= 8; nxtdsp = 0; switch (cmd & 7) { case 2: /* COMMAND phase */ nxtdsp = NCB_SCRIPT_PHYS (np, dispatch); break; #if 0 case 3: /* STATUS phase */ nxtdsp = NCB_SCRIPT_PHYS (np, dispatch); break; #endif case 6: /* MSG OUT phase */ np->scripth->nxtdsp_go_on[0] = cpu_to_scr(dsp + 8); if (dsp == NCB_SCRIPT_PHYS (np, send_ident)) { cp->host_status = HS_BUSY; nxtdsp = NCB_SCRIPTH_PHYS (np, clratn_go_on); } else if (dsp == NCB_SCRIPTH_PHYS (np, send_wdtr) || dsp == NCB_SCRIPTH_PHYS (np, send_sdtr)) { nxtdsp = NCB_SCRIPTH_PHYS (np, nego_bad_phase); } break; #if 0 case 7: /* MSG IN phase */ nxtdsp = NCB_SCRIPT_PHYS (np, clrack); break; #endif } if (nxtdsp) { OUTL_DSP (nxtdsp); return; } reset_all: ncr_start_reset(np); } static void ncr_sir_to_redo(struct ncb *np, int num, struct ccb *cp) { struct scsi_cmnd *cmd = cp->cmd; struct tcb *tp = &np->target[cmd->device->id]; struct lcb *lp = tp->lp[cmd->device->lun]; struct list_head *qp; struct ccb * cp2; int disc_cnt = 0; int busy_cnt = 0; u32 startp; u_char s_status = INB (SS_PRT); /* ** Let the SCRIPTS processor skip all not yet started CCBs, ** and count disconnected CCBs. Since the busy queue is in ** the same order as the chip start queue, disconnected CCBs ** are before cp and busy ones after. */ if (lp) { qp = lp->busy_ccbq.prev; while (qp != &lp->busy_ccbq) { cp2 = list_entry(qp, struct ccb, link_ccbq); qp = qp->prev; ++busy_cnt; if (cp2 == cp) break; cp2->start.schedule.l_paddr = cpu_to_scr(NCB_SCRIPTH_PHYS (np, skip)); } lp->held_ccb = cp; /* Requeue when this one completes */ disc_cnt = lp->queuedccbs - busy_cnt; } switch(s_status) { default: /* Just for safety, should never happen */ case SAM_STAT_TASK_SET_FULL: /* ** Decrease number of tags to the number of ** disconnected commands. */ if (!lp) goto out; if (bootverbose >= 1) { PRINT_ADDR(cmd, "QUEUE FULL! %d busy, %d disconnected " "CCBs\n", busy_cnt, disc_cnt); } if (disc_cnt < lp->numtags) { lp->numtags = disc_cnt > 2 ? disc_cnt : 2; lp->num_good = 0; ncr_setup_tags (np, cmd->device); } /* ** Requeue the command to the start queue. ** If any disconnected commands, ** Clear SIGP. ** Jump to reselect. */ cp->phys.header.savep = cp->startp; cp->host_status = HS_BUSY; cp->scsi_status = SAM_STAT_ILLEGAL; ncr_put_start_queue(np, cp); if (disc_cnt) INB (nc_ctest2); /* Clear SIGP */ OUTL_DSP (NCB_SCRIPT_PHYS (np, reselect)); return; case SAM_STAT_COMMAND_TERMINATED: case SAM_STAT_CHECK_CONDITION: /* ** If we were requesting sense, give up. */ if (cp->auto_sense) goto out; /* ** Device returned CHECK CONDITION status. ** Prepare all needed data strutures for getting ** sense data. ** ** identify message */ cp->scsi_smsg2[0] = IDENTIFY(0, cmd->device->lun); cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg2)); cp->phys.smsg.size = cpu_to_scr(1); /* ** sense command */ cp->phys.cmd.addr = cpu_to_scr(CCB_PHYS (cp, sensecmd)); cp->phys.cmd.size = cpu_to_scr(6); /* ** patch requested size into sense command */ cp->sensecmd[0] = 0x03; cp->sensecmd[1] = (cmd->device->lun & 0x7) << 5; cp->sensecmd[4] = sizeof(cp->sense_buf); /* ** sense data */ memset(cp->sense_buf, 0, sizeof(cp->sense_buf)); cp->phys.sense.addr = cpu_to_scr(CCB_PHYS(cp,sense_buf[0])); cp->phys.sense.size = cpu_to_scr(sizeof(cp->sense_buf)); /* ** requeue the command. */ startp = cpu_to_scr(NCB_SCRIPTH_PHYS (np, sdata_in)); cp->phys.header.savep = startp; cp->phys.header.goalp = startp + 24; cp->phys.header.lastp = startp; cp->phys.header.wgoalp = startp + 24; cp->phys.header.wlastp = startp; cp->host_status = HS_BUSY; cp->scsi_status = SAM_STAT_ILLEGAL; cp->auto_sense = s_status; cp->start.schedule.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, select)); /* ** Select without ATN for quirky devices. */ if (cmd->device->select_no_atn) cp->start.schedule.l_paddr = cpu_to_scr(NCB_SCRIPTH_PHYS (np, select_no_atn)); ncr_put_start_queue(np, cp); OUTL_DSP (NCB_SCRIPT_PHYS (np, start)); return; } out: OUTONB_STD (); return; } /*========================================================== ** ** ** ncr chip exception handler for programmed interrupts. ** ** **========================================================== */ void ncr_int_sir (struct ncb *np) { u_char scntl3; u_char chg, ofs, per, fak, wide; u_char num = INB (nc_dsps); struct ccb *cp=NULL; u_long dsa = INL (nc_dsa); u_char target = INB (nc_sdid) & 0x0f; struct tcb *tp = &np->target[target]; struct scsi_target *starget = tp->starget; if (DEBUG_FLAGS & DEBUG_TINY) printk ("I#%d", num); switch (num) { case SIR_INTFLY: /* ** This is used for HP Zalon/53c720 where INTFLY ** operation is currently broken. */ ncr_wakeup_done(np); #ifdef SCSI_NCR_CCB_DONE_SUPPORT OUTL(nc_dsp, NCB_SCRIPT_PHYS (np, done_end) + 8); #else OUTL(nc_dsp, NCB_SCRIPT_PHYS (np, start)); #endif return; case SIR_RESEL_NO_MSG_IN: case SIR_RESEL_NO_IDENTIFY: /* ** If devices reselecting without sending an IDENTIFY ** message still exist, this should help. ** We just assume lun=0, 1 CCB, no tag. */ if (tp->lp[0]) { OUTL_DSP (scr_to_cpu(tp->lp[0]->jump_ccb[0])); return; } fallthrough; case SIR_RESEL_BAD_TARGET: /* Will send a TARGET RESET message */ case SIR_RESEL_BAD_LUN: /* Will send a TARGET RESET message */ case SIR_RESEL_BAD_I_T_L_Q: /* Will send an ABORT TAG message */ case SIR_RESEL_BAD_I_T_L: /* Will send an ABORT message */ printk ("%s:%d: SIR %d, " "incorrect nexus identification on reselection\n", ncr_name (np), target, num); goto out; case SIR_DONE_OVERFLOW: printk ("%s:%d: SIR %d, " "CCB done queue overflow\n", ncr_name (np), target, num); goto out; case SIR_BAD_STATUS: cp = np->header.cp; if (!cp || CCB_PHYS (cp, phys) != dsa) goto out; ncr_sir_to_redo(np, num, cp); return; default: /* ** lookup the ccb */ cp = np->ccb; while (cp && (CCB_PHYS (cp, phys) != dsa)) cp = cp->link_ccb; BUG_ON(!cp); BUG_ON(cp != np->header.cp); if (!cp || cp != np->header.cp) goto out; } switch (num) { /*----------------------------------------------------------------------------- ** ** Was Sie schon immer ueber transfermode negotiation wissen wollten ... ** ("Everything you've always wanted to know about transfer mode ** negotiation") ** ** We try to negotiate sync and wide transfer only after ** a successful inquire command. We look at byte 7 of the ** inquire data to determine the capabilities of the target. ** ** When we try to negotiate, we append the negotiation message ** to the identify and (maybe) simple tag message. ** The host status field is set to HS_NEGOTIATE to mark this ** situation. ** ** If the target doesn't answer this message immediately ** (as required by the standard), the SIR_NEGO_FAIL interrupt ** will be raised eventually. ** The handler removes the HS_NEGOTIATE status, and sets the ** negotiated value to the default (async / nowide). ** ** If we receive a matching answer immediately, we check it ** for validity, and set the values. ** ** If we receive a Reject message immediately, we assume the ** negotiation has failed, and fall back to standard values. ** ** If we receive a negotiation message while not in HS_NEGOTIATE ** state, it's a target initiated negotiation. We prepare a ** (hopefully) valid answer, set our parameters, and send back ** this answer to the target. ** ** If the target doesn't fetch the answer (no message out phase), ** we assume the negotiation has failed, and fall back to default ** settings. ** ** When we set the values, we adjust them in all ccbs belonging ** to this target, in the controller's register, and in the "phys" ** field of the controller's struct ncb. ** ** Possible cases: hs sir msg_in value send goto ** We try to negotiate: ** -> target doesn't msgin NEG FAIL noop defa. - dispatch ** -> target rejected our msg NEG FAIL reject defa. - dispatch ** -> target answered (ok) NEG SYNC sdtr set - clrack ** -> target answered (!ok) NEG SYNC sdtr defa. REJ--->msg_bad ** -> target answered (ok) NEG WIDE wdtr set - clrack ** -> target answered (!ok) NEG WIDE wdtr defa. REJ--->msg_bad ** -> any other msgin NEG FAIL noop defa. - dispatch ** ** Target tries to negotiate: ** -> incoming message --- SYNC sdtr set SDTR - ** -> incoming message --- WIDE wdtr set WDTR - ** We sent our answer: ** -> target doesn't msgout --- PROTO ? defa. - dispatch ** **----------------------------------------------------------------------------- */ case SIR_NEGO_FAILED: /*------------------------------------------------------- ** ** Negotiation failed. ** Target doesn't send an answer message, ** or target rejected our message. ** ** Remove negotiation request. ** **------------------------------------------------------- */ OUTB (HS_PRT, HS_BUSY); fallthrough; case SIR_NEGO_PROTO: /*------------------------------------------------------- ** ** Negotiation failed. ** Target doesn't fetch the answer message. ** **------------------------------------------------------- */ if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->cmd, "negotiation failed sir=%x " "status=%x.\n", num, cp->nego_status); } /* ** any error in negotiation: ** fall back to default mode. */ switch (cp->nego_status) { case NS_SYNC: spi_period(starget) = 0; spi_offset(starget) = 0; ncr_setsync (np, cp, 0, 0xe0); break; case NS_WIDE: spi_width(starget) = 0; ncr_setwide (np, cp, 0, 0); break; } np->msgin [0] = NOP; np->msgout[0] = NOP; cp->nego_status = 0; break; case SIR_NEGO_SYNC: if (DEBUG_FLAGS & DEBUG_NEGO) { ncr_print_msg(cp, "sync msgin", np->msgin); } chg = 0; per = np->msgin[3]; ofs = np->msgin[4]; if (ofs==0) per=255; /* ** if target sends SDTR message, ** it CAN transfer synch. */ if (ofs && starget) spi_support_sync(starget) = 1; /* ** check values against driver limits. */ if (per < np->minsync) {chg = 1; per = np->minsync;} if (per < tp->minsync) {chg = 1; per = tp->minsync;} if (ofs > tp->maxoffs) {chg = 1; ofs = tp->maxoffs;} /* ** Check against controller limits. */ fak = 7; scntl3 = 0; if (ofs != 0) { ncr_getsync(np, per, &fak, &scntl3); if (fak > 7) { chg = 1; ofs = 0; } } if (ofs == 0) { fak = 7; per = 0; scntl3 = 0; tp->minsync = 0; } if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->cmd, "sync: per=%d scntl3=0x%x ofs=%d " "fak=%d chg=%d.\n", per, scntl3, ofs, fak, chg); } if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); switch (cp->nego_status) { case NS_SYNC: /* This was an answer message */ if (chg) { /* Answer wasn't acceptable. */ spi_period(starget) = 0; spi_offset(starget) = 0; ncr_setsync(np, cp, 0, 0xe0); OUTL_DSP(NCB_SCRIPT_PHYS (np, msg_bad)); } else { /* Answer is ok. */ spi_period(starget) = per; spi_offset(starget) = ofs; ncr_setsync(np, cp, scntl3, (fak<<5)|ofs); OUTL_DSP(NCB_SCRIPT_PHYS (np, clrack)); } return; case NS_WIDE: spi_width(starget) = 0; ncr_setwide(np, cp, 0, 0); break; } } /* ** It was a request. Set value and ** prepare an answer message */ spi_period(starget) = per; spi_offset(starget) = ofs; ncr_setsync(np, cp, scntl3, (fak<<5)|ofs); spi_populate_sync_msg(np->msgout, per, ofs); cp->nego_status = NS_SYNC; if (DEBUG_FLAGS & DEBUG_NEGO) { ncr_print_msg(cp, "sync msgout", np->msgout); } if (!ofs) { OUTL_DSP (NCB_SCRIPT_PHYS (np, msg_bad)); return; } np->msgin [0] = NOP; break; case SIR_NEGO_WIDE: /* ** Wide request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { ncr_print_msg(cp, "wide msgin", np->msgin); } /* ** get requested values. */ chg = 0; wide = np->msgin[3]; /* ** if target sends WDTR message, ** it CAN transfer wide. */ if (wide && starget) spi_support_wide(starget) = 1; /* ** check values against driver limits. */ if (wide > tp->usrwide) {chg = 1; wide = tp->usrwide;} if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->cmd, "wide: wide=%d chg=%d.\n", wide, chg); } if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); switch (cp->nego_status) { case NS_WIDE: /* ** This was an answer message */ if (chg) { /* Answer wasn't acceptable. */ spi_width(starget) = 0; ncr_setwide(np, cp, 0, 1); OUTL_DSP (NCB_SCRIPT_PHYS (np, msg_bad)); } else { /* Answer is ok. */ spi_width(starget) = wide; ncr_setwide(np, cp, wide, 1); OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack)); } return; case NS_SYNC: spi_period(starget) = 0; spi_offset(starget) = 0; ncr_setsync(np, cp, 0, 0xe0); break; } } /* ** It was a request, set value and ** prepare an answer message */ spi_width(starget) = wide; ncr_setwide(np, cp, wide, 1); spi_populate_width_msg(np->msgout, wide); np->msgin [0] = NOP; cp->nego_status = NS_WIDE; if (DEBUG_FLAGS & DEBUG_NEGO) { ncr_print_msg(cp, "wide msgout", np->msgin); } break; /*-------------------------------------------------------------------- ** ** Processing of special messages ** **-------------------------------------------------------------------- */ case SIR_REJECT_RECEIVED: /*----------------------------------------------- ** ** We received a MESSAGE_REJECT. ** **----------------------------------------------- */ PRINT_ADDR(cp->cmd, "MESSAGE_REJECT received (%x:%x).\n", (unsigned)scr_to_cpu(np->lastmsg), np->msgout[0]); break; case SIR_REJECT_SENT: /*----------------------------------------------- ** ** We received an unknown message ** **----------------------------------------------- */ ncr_print_msg(cp, "MESSAGE_REJECT sent for", np->msgin); break; /*-------------------------------------------------------------------- ** ** Processing of special messages ** **-------------------------------------------------------------------- */ case SIR_IGN_RESIDUE: /*----------------------------------------------- ** ** We received an IGNORE RESIDUE message, ** which couldn't be handled by the script. ** **----------------------------------------------- */ PRINT_ADDR(cp->cmd, "IGNORE_WIDE_RESIDUE received, but not yet " "implemented.\n"); break; #if 0 case SIR_MISSING_SAVE: /*----------------------------------------------- ** ** We received an DISCONNECT message, ** but the datapointer wasn't saved before. ** **----------------------------------------------- */ PRINT_ADDR(cp->cmd, "DISCONNECT received, but datapointer " "not saved: data=%x save=%x goal=%x.\n", (unsigned) INL (nc_temp), (unsigned) scr_to_cpu(np->header.savep), (unsigned) scr_to_cpu(np->header.goalp)); break; #endif } out: OUTONB_STD (); } /*========================================================== ** ** ** Acquire a control block ** ** **========================================================== */ static struct ccb *ncr_get_ccb(struct ncb *np, struct scsi_cmnd *cmd) { u_char tn = cmd->device->id; u_char ln = cmd->device->lun; struct tcb *tp = &np->target[tn]; struct lcb *lp = tp->lp[ln]; u_char tag = NO_TAG; struct ccb *cp = NULL; /* ** Lun structure available ? */ if (lp) { struct list_head *qp; /* ** Keep from using more tags than we can handle. */ if (lp->usetags && lp->busyccbs >= lp->maxnxs) return NULL; /* ** Allocate a new CCB if needed. */ if (list_empty(&lp->free_ccbq)) ncr_alloc_ccb(np, tn, ln); /* ** Look for free CCB */ qp = ncr_list_pop(&lp->free_ccbq); if (qp) { cp = list_entry(qp, struct ccb, link_ccbq); if (cp->magic) { PRINT_ADDR(cmd, "ccb free list corrupted " "(@%p)\n", cp); cp = NULL; } else { list_add_tail(qp, &lp->wait_ccbq); ++lp->busyccbs; } } /* ** If a CCB is available, ** Get a tag for this nexus if required. */ if (cp) { if (lp->usetags) tag = lp->cb_tags[lp->ia_tag]; } else if (lp->actccbs > 0) return NULL; } /* ** if nothing available, take the default. */ if (!cp) cp = np->ccb; /* ** Wait until available. */ #if 0 while (cp->magic) { if (flags & SCSI_NOSLEEP) break; if (tsleep ((caddr_t)cp, PRIBIO|PCATCH, "ncr", 0)) break; } #endif if (cp->magic) return NULL; cp->magic = 1; /* ** Move to next available tag if tag used. */ if (lp) { if (tag != NO_TAG) { ++lp->ia_tag; if (lp->ia_tag == MAX_TAGS) lp->ia_tag = 0; lp->tags_umap |= (((tagmap_t) 1) << tag); } } /* ** Remember all informations needed to free this CCB. */ cp->tag = tag; cp->target = tn; cp->lun = ln; if (DEBUG_FLAGS & DEBUG_TAGS) { PRINT_ADDR(cmd, "ccb @%p using tag %d.\n", cp, tag); } return cp; } /*========================================================== ** ** ** Release one control block ** ** **========================================================== */ static void ncr_free_ccb (struct ncb *np, struct ccb *cp) { struct tcb *tp = &np->target[cp->target]; struct lcb *lp = tp->lp[cp->lun]; if (DEBUG_FLAGS & DEBUG_TAGS) { PRINT_ADDR(cp->cmd, "ccb @%p freeing tag %d.\n", cp, cp->tag); } /* ** If lun control block available, ** decrement active commands and increment credit, ** free the tag if any and remove the JUMP for reselect. */ if (lp) { if (cp->tag != NO_TAG) { lp->cb_tags[lp->if_tag++] = cp->tag; if (lp->if_tag == MAX_TAGS) lp->if_tag = 0; lp->tags_umap &= ~(((tagmap_t) 1) << cp->tag); lp->tags_smap &= lp->tags_umap; lp->jump_ccb[cp->tag] = cpu_to_scr(NCB_SCRIPTH_PHYS(np, bad_i_t_l_q)); } else { lp->jump_ccb[0] = cpu_to_scr(NCB_SCRIPTH_PHYS(np, bad_i_t_l)); } } /* ** Make this CCB available. */ if (lp) { if (cp != np->ccb) list_move(&cp->link_ccbq, &lp->free_ccbq); --lp->busyccbs; if (cp->queued) { --lp->queuedccbs; } } cp -> host_status = HS_IDLE; cp -> magic = 0; if (cp->queued) { --np->queuedccbs; cp->queued = 0; } #if 0 if (cp == np->ccb) wakeup ((caddr_t) cp); #endif } #define ncr_reg_bus_addr(r) (np->paddr + offsetof (struct ncr_reg, r)) /*------------------------------------------------------------------------ ** Initialize the fixed part of a CCB structure. **------------------------------------------------------------------------ **------------------------------------------------------------------------ */ static void ncr_init_ccb(struct ncb *np, struct ccb *cp) { ncrcmd copy_4 = np->features & FE_PFEN ? SCR_COPY(4) : SCR_COPY_F(4); /* ** Remember virtual and bus address of this ccb. */ cp->p_ccb = vtobus(cp); cp->phys.header.cp = cp; /* ** This allows list_del to work for the default ccb. */ INIT_LIST_HEAD(&cp->link_ccbq); /* ** Initialyze the start and restart launch script. ** ** COPY(4) @(...p_phys), @(dsa) ** JUMP @(sched_point) */ cp->start.setup_dsa[0] = cpu_to_scr(copy_4); cp->start.setup_dsa[1] = cpu_to_scr(CCB_PHYS(cp, start.p_phys)); cp->start.setup_dsa[2] = cpu_to_scr(ncr_reg_bus_addr(nc_dsa)); cp->start.schedule.l_cmd = cpu_to_scr(SCR_JUMP); cp->start.p_phys = cpu_to_scr(CCB_PHYS(cp, phys)); memcpy(&cp->restart, &cp->start, sizeof(cp->restart)); cp->start.schedule.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle)); cp->restart.schedule.l_paddr = cpu_to_scr(NCB_SCRIPTH_PHYS (np, abort)); } /*------------------------------------------------------------------------ ** Allocate a CCB and initialize its fixed part. **------------------------------------------------------------------------ **------------------------------------------------------------------------ */ static void ncr_alloc_ccb(struct ncb *np, u_char tn, u_char ln) { struct tcb *tp = &np->target[tn]; struct lcb *lp = tp->lp[ln]; struct ccb *cp = NULL; /* ** Allocate memory for this CCB. */ cp = m_calloc_dma(sizeof(struct ccb), "CCB"); if (!cp) return; /* ** Count it and initialyze it. */ lp->actccbs++; np->actccbs++; memset(cp, 0, sizeof (*cp)); ncr_init_ccb(np, cp); /* ** Chain into wakeup list and free ccb queue and take it ** into account for tagged commands. */ cp->link_ccb = np->ccb->link_ccb; np->ccb->link_ccb = cp; list_add(&cp->link_ccbq, &lp->free_ccbq); } /*========================================================== ** ** ** Allocation of resources for Targets/Luns/Tags. ** ** **========================================================== */ /*------------------------------------------------------------------------ ** Target control block initialisation. **------------------------------------------------------------------------ ** This data structure is fully initialized after a SCSI command ** has been successfully completed for this target. ** It contains a SCRIPT that is called on target reselection. **------------------------------------------------------------------------ */ static void ncr_init_tcb (struct ncb *np, u_char tn) { struct tcb *tp = &np->target[tn]; ncrcmd copy_1 = np->features & FE_PFEN ? SCR_COPY(1) : SCR_COPY_F(1); int th = tn & 3; int i; /* ** Jump to next tcb if SFBR does not match this target. ** JUMP IF (SFBR != #target#), @(next tcb) */ tp->jump_tcb.l_cmd = cpu_to_scr((SCR_JUMP ^ IFFALSE (DATA (0x80 + tn)))); tp->jump_tcb.l_paddr = np->jump_tcb[th].l_paddr; /* ** Load the synchronous transfer register. ** COPY @(tp->sval), @(sxfer) */ tp->getscr[0] = cpu_to_scr(copy_1); tp->getscr[1] = cpu_to_scr(vtobus (&tp->sval)); #ifdef SCSI_NCR_BIG_ENDIAN tp->getscr[2] = cpu_to_scr(ncr_reg_bus_addr(nc_sxfer) ^ 3); #else tp->getscr[2] = cpu_to_scr(ncr_reg_bus_addr(nc_sxfer)); #endif /* ** Load the timing register. ** COPY @(tp->wval), @(scntl3) */ tp->getscr[3] = cpu_to_scr(copy_1); tp->getscr[4] = cpu_to_scr(vtobus (&tp->wval)); #ifdef SCSI_NCR_BIG_ENDIAN tp->getscr[5] = cpu_to_scr(ncr_reg_bus_addr(nc_scntl3) ^ 3); #else tp->getscr[5] = cpu_to_scr(ncr_reg_bus_addr(nc_scntl3)); #endif /* ** Get the IDENTIFY message and the lun. ** CALL @script(resel_lun) */ tp->call_lun.l_cmd = cpu_to_scr(SCR_CALL); tp->call_lun.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, resel_lun)); /* ** Look for the lun control block of this nexus. ** For i = 0 to 3 ** JUMP ^ IFTRUE (MASK (i, 3)), @(next_lcb) */ for (i = 0 ; i < 4 ; i++) { tp->jump_lcb[i].l_cmd = cpu_to_scr((SCR_JUMP ^ IFTRUE (MASK (i, 3)))); tp->jump_lcb[i].l_paddr = cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_identify)); } /* ** Link this target control block to the JUMP chain. */ np->jump_tcb[th].l_paddr = cpu_to_scr(vtobus (&tp->jump_tcb)); /* ** These assert's should be moved at driver initialisations. */ #ifdef SCSI_NCR_BIG_ENDIAN BUG_ON(((offsetof(struct ncr_reg, nc_sxfer) ^ offsetof(struct tcb , sval )) &3) != 3); BUG_ON(((offsetof(struct ncr_reg, nc_scntl3) ^ offsetof(struct tcb , wval )) &3) != 3); #else BUG_ON(((offsetof(struct ncr_reg, nc_sxfer) ^ offsetof(struct tcb , sval )) &3) != 0); BUG_ON(((offsetof(struct ncr_reg, nc_scntl3) ^ offsetof(struct tcb , wval )) &3) != 0); #endif } /*------------------------------------------------------------------------ ** Lun control block allocation and initialization. **------------------------------------------------------------------------ ** This data structure is allocated and initialized after a SCSI ** command has been successfully completed for this target/lun. **------------------------------------------------------------------------ */ static struct lcb *ncr_alloc_lcb (struct ncb *np, u_char tn, u_char ln) { struct tcb *tp = &np->target[tn]; struct lcb *lp = tp->lp[ln]; ncrcmd copy_4 = np->features & FE_PFEN ? SCR_COPY(4) : SCR_COPY_F(4); int lh = ln & 3; /* ** Already done, return. */ if (lp) return lp; /* ** Allocate the lcb. */ lp = m_calloc_dma(sizeof(struct lcb), "LCB"); if (!lp) goto fail; memset(lp, 0, sizeof(*lp)); tp->lp[ln] = lp; /* ** Initialize the target control block if not yet. */ if (!tp->jump_tcb.l_cmd) ncr_init_tcb(np, tn); /* ** Initialize the CCB queue headers. */ INIT_LIST_HEAD(&lp->free_ccbq); INIT_LIST_HEAD(&lp->busy_ccbq); INIT_LIST_HEAD(&lp->wait_ccbq); INIT_LIST_HEAD(&lp->skip_ccbq); /* ** Set max CCBs to 1 and use the default 1 entry ** jump table by default. */ lp->maxnxs = 1; lp->jump_ccb = &lp->jump_ccb_0; lp->p_jump_ccb = cpu_to_scr(vtobus(lp->jump_ccb)); /* ** Initilialyze the reselect script: ** ** Jump to next lcb if SFBR does not match this lun. ** Load TEMP with the CCB direct jump table bus address. ** Get the SIMPLE TAG message and the tag. ** ** JUMP IF (SFBR != #lun#), @(next lcb) ** COPY @(lp->p_jump_ccb), @(temp) ** JUMP @script(resel_notag) */ lp->jump_lcb.l_cmd = cpu_to_scr((SCR_JUMP ^ IFFALSE (MASK (0x80+ln, 0xff)))); lp->jump_lcb.l_paddr = tp->jump_lcb[lh].l_paddr; lp->load_jump_ccb[0] = cpu_to_scr(copy_4); lp->load_jump_ccb[1] = cpu_to_scr(vtobus (&lp->p_jump_ccb)); lp->load_jump_ccb[2] = cpu_to_scr(ncr_reg_bus_addr(nc_temp)); lp->jump_tag.l_cmd = cpu_to_scr(SCR_JUMP); lp->jump_tag.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, resel_notag)); /* ** Link this lun control block to the JUMP chain. */ tp->jump_lcb[lh].l_paddr = cpu_to_scr(vtobus (&lp->jump_lcb)); /* ** Initialize command queuing control. */ lp->busyccbs = 1; lp->queuedccbs = 1; lp->queuedepth = 1; fail: return lp; } /*------------------------------------------------------------------------ ** Lun control block setup on INQUIRY data received. **------------------------------------------------------------------------ ** We only support WIDE, SYNC for targets and CMDQ for logical units. ** This setup is done on each INQUIRY since we are expecting user ** will play with CHANGE DEFINITION commands. :-) **------------------------------------------------------------------------ */ static struct lcb *ncr_setup_lcb (struct ncb *np, struct scsi_device *sdev) { unsigned char tn = sdev->id, ln = sdev->lun; struct tcb *tp = &np->target[tn]; struct lcb *lp = tp->lp[ln]; /* If no lcb, try to allocate it. */ if (!lp && !(lp = ncr_alloc_lcb(np, tn, ln))) goto fail; /* ** If unit supports tagged commands, allocate the ** CCB JUMP table if not yet. */ if (sdev->tagged_supported && lp->jump_ccb == &lp->jump_ccb_0) { int i; lp->jump_ccb = m_calloc_dma(256, "JUMP_CCB"); if (!lp->jump_ccb) { lp->jump_ccb = &lp->jump_ccb_0; goto fail; } lp->p_jump_ccb = cpu_to_scr(vtobus(lp->jump_ccb)); for (i = 0 ; i < 64 ; i++) lp->jump_ccb[i] = cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_i_t_l_q)); for (i = 0 ; i < MAX_TAGS ; i++) lp->cb_tags[i] = i; lp->maxnxs = MAX_TAGS; lp->tags_stime = jiffies + 3*HZ; ncr_setup_tags (np, sdev); } fail: return lp; } /*========================================================== ** ** ** Build Scatter Gather Block ** ** **========================================================== ** ** The transfer area may be scattered among ** several non adjacent physical pages. ** ** We may use MAX_SCATTER blocks. ** **---------------------------------------------------------- */ /* ** We try to reduce the number of interrupts caused ** by unexpected phase changes due to disconnects. ** A typical harddisk may disconnect before ANY block. ** If we wanted to avoid unexpected phase changes at all ** we had to use a break point every 512 bytes. ** Of course the number of scatter/gather blocks is ** limited. ** Under Linux, the scatter/gatter blocks are provided by ** the generic driver. We just have to copy addresses and ** sizes to the data segment array. */ static int ncr_scatter(struct ncb *np, struct ccb *cp, struct scsi_cmnd *cmd) { int segment = 0; int use_sg = scsi_sg_count(cmd); cp->data_len = 0; use_sg = map_scsi_sg_data(np, cmd); if (use_sg > 0) { struct scatterlist *sg; struct scr_tblmove *data; if (use_sg > MAX_SCATTER) { unmap_scsi_data(np, cmd); return -1; } data = &cp->phys.data[MAX_SCATTER - use_sg]; scsi_for_each_sg(cmd, sg, use_sg, segment) { dma_addr_t baddr = sg_dma_address(sg); unsigned int len = sg_dma_len(sg); ncr_build_sge(np, &data[segment], baddr, len); cp->data_len += len; } } else segment = -2; return segment; } /*========================================================== ** ** ** Test the bus snoop logic :-( ** ** Has to be called with interrupts disabled. ** ** **========================================================== */ static int __init ncr_regtest (struct ncb* np) { register volatile u32 data; /* ** ncr registers may NOT be cached. ** write 0xffffffff to a read only register area, ** and try to read it back. */ data = 0xffffffff; OUTL_OFF(offsetof(struct ncr_reg, nc_dstat), data); data = INL_OFF(offsetof(struct ncr_reg, nc_dstat)); #if 1 if (data == 0xffffffff) { #else if ((data & 0xe2f0fffd) != 0x02000080) { #endif printk ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", (unsigned) data); return (0x10); } return (0); } static int __init ncr_snooptest (struct ncb* np) { u32 ncr_rd, ncr_wr, ncr_bk, host_rd, host_wr, pc; int i, err=0; if (np->reg) { err |= ncr_regtest (np); if (err) return (err); } /* init */ pc = NCB_SCRIPTH_PHYS (np, snooptest); host_wr = 1; ncr_wr = 2; /* ** Set memory and register. */ np->ncr_cache = cpu_to_scr(host_wr); OUTL (nc_temp, ncr_wr); /* ** Start script (exchange values) */ OUTL_DSP (pc); /* ** Wait 'til done (with timeout) */ for (i=0; i<NCR_SNOOP_TIMEOUT; i++) if (INB(nc_istat) & (INTF|SIP|DIP)) break; /* ** Save termination position. */ pc = INL (nc_dsp); /* ** Read memory and register. */ host_rd = scr_to_cpu(np->ncr_cache); ncr_rd = INL (nc_scratcha); ncr_bk = INL (nc_temp); /* ** Reset ncr chip */ ncr_chip_reset(np, 100); /* ** check for timeout */ if (i>=NCR_SNOOP_TIMEOUT) { printk ("CACHE TEST FAILED: timeout.\n"); return (0x20); } /* ** Check termination position. */ if (pc != NCB_SCRIPTH_PHYS (np, snoopend)+8) { printk ("CACHE TEST FAILED: script execution failed.\n"); printk ("start=%08lx, pc=%08lx, end=%08lx\n", (u_long) NCB_SCRIPTH_PHYS (np, snooptest), (u_long) pc, (u_long) NCB_SCRIPTH_PHYS (np, snoopend) +8); return (0x40); } /* ** Show results. */ if (host_wr != ncr_rd) { printk ("CACHE TEST FAILED: host wrote %d, ncr read %d.\n", (int) host_wr, (int) ncr_rd); err |= 1; } if (host_rd != ncr_wr) { printk ("CACHE TEST FAILED: ncr wrote %d, host read %d.\n", (int) ncr_wr, (int) host_rd); err |= 2; } if (ncr_bk != ncr_wr) { printk ("CACHE TEST FAILED: ncr wrote %d, read back %d.\n", (int) ncr_wr, (int) ncr_bk); err |= 4; } return (err); } /*========================================================== ** ** Determine the ncr's clock frequency. ** This is essential for the negotiation ** of the synchronous transfer rate. ** **========================================================== ** ** Note: we have to return the correct value. ** THERE IS NO SAFE DEFAULT VALUE. ** ** Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. ** 53C860 and 53C875 rev. 1 support fast20 transfers but ** do not have a clock doubler and so are provided with a ** 80 MHz clock. All other fast20 boards incorporate a doubler ** and so should be delivered with a 40 MHz clock. ** The future fast40 chips (895/895) use a 40 Mhz base clock ** and provide a clock quadrupler (160 Mhz). The code below ** tries to deal as cleverly as possible with all this stuff. ** **---------------------------------------------------------- */ /* * Select NCR SCSI clock frequency */ static void ncr_selectclock(struct ncb *np, u_char scntl3) { if (np->multiplier < 2) { OUTB(nc_scntl3, scntl3); return; } if (bootverbose >= 2) printk ("%s: enabling clock multiplier\n", ncr_name(np)); OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */ if (np->multiplier > 2) { /* Poll bit 5 of stest4 for quadrupler */ int i = 20; while (!(INB(nc_stest4) & LCKFRQ) && --i > 0) udelay(20); if (!i) printk("%s: the chip cannot lock the frequency\n", ncr_name(np)); } else /* Wait 20 micro-seconds for doubler */ udelay(20); OUTB(nc_stest3, HSC); /* Halt the scsi clock */ OUTB(nc_scntl3, scntl3); OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ OUTB(nc_stest3, 0x00); /* Restart scsi clock */ } /* * calculate NCR SCSI clock frequency (in KHz) */ static unsigned __init ncrgetfreq (struct ncb *np, int gen) { unsigned ms = 0; char count = 0; /* * Measure GEN timer delay in order * to calculate SCSI clock frequency * * This code will never execute too * many loop iterations (if DELAY is * reasonably correct). It could get * too low a delay (too high a freq.) * if the CPU is slow executing the * loop for some reason (an NMI, for * example). For this reason we will * if multiple measurements are to be * performed trust the higher delay * (lower frequency returned). */ OUTB (nc_stest1, 0); /* make sure clock doubler is OFF */ OUTW (nc_sien , 0); /* mask all scsi interrupts */ (void) INW (nc_sist); /* clear pending scsi interrupt */ OUTB (nc_dien , 0); /* mask all dma interrupts */ (void) INW (nc_sist); /* another one, just to be sure :) */ OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */ OUTB (nc_stime1, 0); /* disable general purpose timer */ OUTB (nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */ while (!(INW(nc_sist) & GEN) && ms++ < 100000) { for (count = 0; count < 10; count ++) udelay(100); /* count ms */ } OUTB (nc_stime1, 0); /* disable general purpose timer */ /* * set prescaler to divide by whatever 0 means * 0 ought to choose divide by 2, but appears * to set divide by 3.5 mode in my 53c810 ... */ OUTB (nc_scntl3, 0); if (bootverbose >= 2) printk ("%s: Delay (GEN=%d): %u msec\n", ncr_name(np), gen, ms); /* * adjust for prescaler, and convert into KHz */ return ms ? ((1 << gen) * 4340) / ms : 0; } /* * Get/probe NCR SCSI clock frequency */ static void __init ncr_getclock (struct ncb *np, int mult) { unsigned char scntl3 = INB(nc_scntl3); unsigned char stest1 = INB(nc_stest1); unsigned f1; np->multiplier = 1; f1 = 40000; /* ** True with 875 or 895 with clock multiplier selected */ if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) { if (bootverbose >= 2) printk ("%s: clock multiplier found\n", ncr_name(np)); np->multiplier = mult; } /* ** If multiplier not found or scntl3 not 7,5,3, ** reset chip and get frequency from general purpose timer. ** Otherwise trust scntl3 BIOS setting. */ if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) { unsigned f2; ncr_chip_reset(np, 5); (void) ncrgetfreq (np, 11); /* throw away first result */ f1 = ncrgetfreq (np, 11); f2 = ncrgetfreq (np, 11); if(bootverbose) printk ("%s: NCR clock is %uKHz, %uKHz\n", ncr_name(np), f1, f2); if (f1 > f2) f1 = f2; /* trust lower result */ if (f1 < 45000) f1 = 40000; else if (f1 < 55000) f1 = 50000; else f1 = 80000; if (f1 < 80000 && mult > 1) { if (bootverbose >= 2) printk ("%s: clock multiplier assumed\n", ncr_name(np)); np->multiplier = mult; } } else { if ((scntl3 & 7) == 3) f1 = 40000; else if ((scntl3 & 7) == 5) f1 = 80000; else f1 = 160000; f1 /= np->multiplier; } /* ** Compute controller synchronous parameters. */ f1 *= np->multiplier; np->clock_khz = f1; } /*===================== LINUX ENTRY POINTS SECTION ==========================*/ static int ncr53c8xx_slave_alloc(struct scsi_device *device) { struct Scsi_Host *host = device->host; struct ncb *np = ((struct host_data *) host->hostdata)->ncb; struct tcb *tp = &np->target[device->id]; tp->starget = device->sdev_target; return 0; } static int ncr53c8xx_slave_configure(struct scsi_device *device) { struct Scsi_Host *host = device->host; struct ncb *np = ((struct host_data *) host->hostdata)->ncb; struct tcb *tp = &np->target[device->id]; struct lcb *lp = tp->lp[device->lun]; int numtags, depth_to_use; ncr_setup_lcb(np, device); /* ** Select queue depth from driver setup. ** Donnot use more than configured by user. ** Use at least 2. ** Donnot use more than our maximum. */ numtags = device_queue_depth(np->unit, device->id, device->lun); if (numtags > tp->usrtags) numtags = tp->usrtags; if (!device->tagged_supported) numtags = 1; depth_to_use = numtags; if (depth_to_use < 2) depth_to_use = 2; if (depth_to_use > MAX_TAGS) depth_to_use = MAX_TAGS; scsi_change_queue_depth(device, depth_to_use); /* ** Since the queue depth is not tunable under Linux, ** we need to know this value in order not to ** announce stupid things to user. ** ** XXX(hch): As of Linux 2.6 it certainly _is_ tunable.. ** In fact we just tuned it, or did I miss ** something important? :) */ if (lp) { lp->numtags = lp->maxtags = numtags; lp->scdev_depth = depth_to_use; } ncr_setup_tags (np, device); #ifdef DEBUG_NCR53C8XX printk("ncr53c8xx_select_queue_depth: host=%d, id=%d, lun=%d, depth=%d\n", np->unit, device->id, device->lun, depth_to_use); #endif if (spi_support_sync(device->sdev_target) && !spi_initial_dv(device->sdev_target)) spi_dv_device(device); return 0; } static int ncr53c8xx_queue_command_lck(struct scsi_cmnd *cmd) { struct ncr_cmd_priv *cmd_priv = scsi_cmd_priv(cmd); void (*done)(struct scsi_cmnd *) = scsi_done; struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb; unsigned long flags; int sts; #ifdef DEBUG_NCR53C8XX printk("ncr53c8xx_queue_command\n"); #endif cmd->host_scribble = NULL; cmd_priv->data_mapped = 0; cmd_priv->data_mapping = 0; spin_lock_irqsave(&np->smp_lock, flags); if ((sts = ncr_queue_command(np, cmd)) != DID_OK) { set_host_byte(cmd, sts); #ifdef DEBUG_NCR53C8XX printk("ncr53c8xx : command not queued - result=%d\n", sts); #endif } #ifdef DEBUG_NCR53C8XX else printk("ncr53c8xx : command successfully queued\n"); #endif spin_unlock_irqrestore(&np->smp_lock, flags); if (sts != DID_OK) { unmap_scsi_data(np, cmd); done(cmd); sts = 0; } return sts; } static DEF_SCSI_QCMD(ncr53c8xx_queue_command) irqreturn_t ncr53c8xx_intr(int irq, void *dev_id) { unsigned long flags; struct Scsi_Host *shost = (struct Scsi_Host *)dev_id; struct host_data *host_data = (struct host_data *)shost->hostdata; struct ncb *np = host_data->ncb; struct scsi_cmnd *done_list; #ifdef DEBUG_NCR53C8XX printk("ncr53c8xx : interrupt received\n"); #endif if (DEBUG_FLAGS & DEBUG_TINY) printk ("["); spin_lock_irqsave(&np->smp_lock, flags); ncr_exception(np); done_list = np->done_list; np->done_list = NULL; spin_unlock_irqrestore(&np->smp_lock, flags); if (DEBUG_FLAGS & DEBUG_TINY) printk ("]\n"); if (done_list) ncr_flush_done_cmds(done_list); return IRQ_HANDLED; } static void ncr53c8xx_timeout(struct timer_list *t) { struct ncb *np = from_timer(np, t, timer); unsigned long flags; struct scsi_cmnd *done_list; spin_lock_irqsave(&np->smp_lock, flags); ncr_timeout(np); done_list = np->done_list; np->done_list = NULL; spin_unlock_irqrestore(&np->smp_lock, flags); if (done_list) ncr_flush_done_cmds(done_list); } static int ncr53c8xx_bus_reset(struct scsi_cmnd *cmd) { struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb; int sts; unsigned long flags; struct scsi_cmnd *done_list; /* * If the mid-level driver told us reset is synchronous, it seems * that we must call the done() callback for the involved command, * even if this command was not queued to the low-level driver, * before returning SUCCESS. */ spin_lock_irqsave(&np->smp_lock, flags); sts = ncr_reset_bus(np); done_list = np->done_list; np->done_list = NULL; spin_unlock_irqrestore(&np->smp_lock, flags); ncr_flush_done_cmds(done_list); return sts; } /* ** Scsi command waiting list management. ** ** It may happen that we cannot insert a scsi command into the start queue, ** in the following circumstances. ** Too few preallocated ccb(s), ** maxtags < cmd_per_lun of the Linux host control block, ** etc... ** Such scsi commands are inserted into a waiting list. ** When a scsi command complete, we try to requeue the commands of the ** waiting list. */ #define next_wcmd host_scribble static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd) { struct scsi_cmnd *wcmd; #ifdef DEBUG_WAITING_LIST printk("%s: cmd %lx inserted into waiting list\n", ncr_name(np), (u_long) cmd); #endif cmd->next_wcmd = NULL; if (!(wcmd = np->waiting_list)) np->waiting_list = cmd; else { while (wcmd->next_wcmd) wcmd = (struct scsi_cmnd *) wcmd->next_wcmd; wcmd->next_wcmd = (char *) cmd; } } static void process_waiting_list(struct ncb *np, int sts) { struct scsi_cmnd *waiting_list, *wcmd; waiting_list = np->waiting_list; np->waiting_list = NULL; #ifdef DEBUG_WAITING_LIST if (waiting_list) printk("%s: waiting_list=%lx processing sts=%d\n", ncr_name(np), (u_long) waiting_list, sts); #endif while ((wcmd = waiting_list) != NULL) { waiting_list = (struct scsi_cmnd *) wcmd->next_wcmd; wcmd->next_wcmd = NULL; if (sts == DID_OK) { #ifdef DEBUG_WAITING_LIST printk("%s: cmd %lx trying to requeue\n", ncr_name(np), (u_long) wcmd); #endif sts = ncr_queue_command(np, wcmd); } if (sts != DID_OK) { #ifdef DEBUG_WAITING_LIST printk("%s: cmd %lx done forced sts=%d\n", ncr_name(np), (u_long) wcmd, sts); #endif set_host_byte(wcmd, sts); ncr_queue_done_cmd(np, wcmd); } } } #undef next_wcmd static ssize_t show_ncr53c8xx_revision(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *host = class_to_shost(dev); struct host_data *host_data = (struct host_data *)host->hostdata; return snprintf(buf, 20, "0x%x\n", host_data->ncb->revision_id); } static struct device_attribute ncr53c8xx_revision_attr = { .attr = { .name = "revision", .mode = S_IRUGO, }, .show = show_ncr53c8xx_revision, }; static struct attribute *ncr53c8xx_host_attrs[] = { &ncr53c8xx_revision_attr.attr, NULL }; ATTRIBUTE_GROUPS(ncr53c8xx_host); /*========================================================== ** ** Boot command line. ** **========================================================== */ #ifdef MODULE char *ncr53c8xx; /* command line passed by insmod */ module_param(ncr53c8xx, charp, 0); #endif #ifndef MODULE static int __init ncr53c8xx_setup(char *str) { return sym53c8xx__setup(str); } __setup("ncr53c8xx=", ncr53c8xx_setup); #endif /* * Host attach and initialisations. * * Allocate host data and ncb structure. * Request IO region and remap MMIO region. * Do chip initialization. * If all is OK, install interrupt handling and * start the timer daemon. */ struct Scsi_Host * __init ncr_attach(struct scsi_host_template *tpnt, int unit, struct ncr_device *device) { struct host_data *host_data; struct ncb *np = NULL; struct Scsi_Host *instance = NULL; u_long flags = 0; int i; WARN_ON_ONCE(tpnt->cmd_size < sizeof(struct ncr_cmd_priv)); if (!tpnt->name) tpnt->name = SCSI_NCR_DRIVER_NAME; if (!tpnt->shost_groups) tpnt->shost_groups = ncr53c8xx_host_groups; tpnt->queuecommand = ncr53c8xx_queue_command; tpnt->slave_configure = ncr53c8xx_slave_configure; tpnt->slave_alloc = ncr53c8xx_slave_alloc; tpnt->eh_bus_reset_handler = ncr53c8xx_bus_reset; tpnt->can_queue = SCSI_NCR_CAN_QUEUE; tpnt->this_id = 7; tpnt->sg_tablesize = SCSI_NCR_SG_TABLESIZE; tpnt->cmd_per_lun = SCSI_NCR_CMD_PER_LUN; if (device->differential) driver_setup.diff_support = device->differential; printk(KERN_INFO "ncr53c720-%d: rev 0x%x irq %d\n", unit, device->chip.revision_id, device->slot.irq); instance = scsi_host_alloc(tpnt, sizeof(*host_data)); if (!instance) goto attach_error; host_data = (struct host_data *) instance->hostdata; np = __m_calloc_dma(device->dev, sizeof(struct ncb), "NCB"); if (!np) goto attach_error; spin_lock_init(&np->smp_lock); np->dev = device->dev; np->p_ncb = vtobus(np); host_data->ncb = np; np->ccb = m_calloc_dma(sizeof(struct ccb), "CCB"); if (!np->ccb) goto attach_error; /* Store input information in the host data structure. */ np->unit = unit; np->verbose = driver_setup.verbose; sprintf(np->inst_name, "ncr53c720-%d", np->unit); np->revision_id = device->chip.revision_id; np->features = device->chip.features; np->clock_divn = device->chip.nr_divisor; np->maxoffs = device->chip.offset_max; np->maxburst = device->chip.burst_max; np->myaddr = device->host_id; /* Allocate SCRIPTS areas. */ np->script0 = m_calloc_dma(sizeof(struct script), "SCRIPT"); if (!np->script0) goto attach_error; np->scripth0 = m_calloc_dma(sizeof(struct scripth), "SCRIPTH"); if (!np->scripth0) goto attach_error; timer_setup(&np->timer, ncr53c8xx_timeout, 0); /* Try to map the controller chip to virtual and physical memory. */ np->paddr = device->slot.base; np->paddr2 = (np->features & FE_RAM) ? device->slot.base_2 : 0; if (device->slot.base_v) np->vaddr = device->slot.base_v; else np->vaddr = ioremap(device->slot.base_c, 128); if (!np->vaddr) { printk(KERN_ERR "%s: can't map memory mapped IO region\n",ncr_name(np)); goto attach_error; } else { if (bootverbose > 1) printk(KERN_INFO "%s: using memory mapped IO at virtual address 0x%lx\n", ncr_name(np), (u_long) np->vaddr); } /* Make the controller's registers available. Now the INB INW INL * OUTB OUTW OUTL macros can be used safely. */ np->reg = (struct ncr_reg __iomem *)np->vaddr; /* Do chip dependent initialization. */ ncr_prepare_setting(np); if (np->paddr2 && sizeof(struct script) > 4096) { np->paddr2 = 0; printk(KERN_WARNING "%s: script too large, NOT using on chip RAM.\n", ncr_name(np)); } instance->max_channel = 0; instance->this_id = np->myaddr; instance->max_id = np->maxwide ? 16 : 8; instance->max_lun = SCSI_NCR_MAX_LUN; instance->base = (unsigned long) np->reg; instance->irq = device->slot.irq; instance->unique_id = device->slot.base; instance->dma_channel = 0; instance->cmd_per_lun = MAX_TAGS; instance->can_queue = (MAX_START-4); /* This can happen if you forget to call ncr53c8xx_init from * your module_init */ BUG_ON(!ncr53c8xx_transport_template); instance->transportt = ncr53c8xx_transport_template; /* Patch script to physical addresses */ ncr_script_fill(&script0, &scripth0); np->scripth = np->scripth0; np->p_scripth = vtobus(np->scripth); np->p_script = (np->paddr2) ? np->paddr2 : vtobus(np->script0); ncr_script_copy_and_bind(np, (ncrcmd *) &script0, (ncrcmd *) np->script0, sizeof(struct script)); ncr_script_copy_and_bind(np, (ncrcmd *) &scripth0, (ncrcmd *) np->scripth0, sizeof(struct scripth)); np->ccb->p_ccb = vtobus (np->ccb); /* Patch the script for LED support. */ if (np->features & FE_LED0) { np->script0->idle[0] = cpu_to_scr(SCR_REG_REG(gpreg, SCR_OR, 0x01)); np->script0->reselected[0] = cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe)); np->script0->start[0] = cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe)); } /* * Look for the target control block of this nexus. * For i = 0 to 3 * JUMP ^ IFTRUE (MASK (i, 3)), @(next_lcb) */ for (i = 0 ; i < 4 ; i++) { np->jump_tcb[i].l_cmd = cpu_to_scr((SCR_JUMP ^ IFTRUE (MASK (i, 3)))); np->jump_tcb[i].l_paddr = cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_target)); } ncr_chip_reset(np, 100); /* Now check the cache handling of the chipset. */ if (ncr_snooptest(np)) { printk(KERN_ERR "CACHE INCORRECTLY CONFIGURED.\n"); goto attach_error; } /* Install the interrupt handler. */ np->irq = device->slot.irq; /* Initialize the fixed part of the default ccb. */ ncr_init_ccb(np, np->ccb); /* * After SCSI devices have been opened, we cannot reset the bus * safely, so we do it here. Interrupt handler does the real work. * Process the reset exception if interrupts are not enabled yet. * Then enable disconnects. */ spin_lock_irqsave(&np->smp_lock, flags); if (ncr_reset_scsi_bus(np, 0, driver_setup.settle_delay) != 0) { printk(KERN_ERR "%s: FATAL ERROR: CHECK SCSI BUS - CABLES, TERMINATION, DEVICE POWER etc.!\n", ncr_name(np)); spin_unlock_irqrestore(&np->smp_lock, flags); goto attach_error; } ncr_exception(np); np->disc = 1; /* * The middle-level SCSI driver does not wait for devices to settle. * Wait synchronously if more than 2 seconds. */ if (driver_setup.settle_delay > 2) { printk(KERN_INFO "%s: waiting %d seconds for scsi devices to settle...\n", ncr_name(np), driver_setup.settle_delay); mdelay(1000 * driver_setup.settle_delay); } /* start the timeout daemon */ np->lasttime=0; ncr_timeout (np); /* use SIMPLE TAG messages by default */ #ifdef SCSI_NCR_ALWAYS_SIMPLE_TAG np->order = SIMPLE_QUEUE_TAG; #endif spin_unlock_irqrestore(&np->smp_lock, flags); return instance; attach_error: if (!instance) return NULL; printk(KERN_INFO "%s: detaching...\n", ncr_name(np)); if (!np) goto unregister; if (np->scripth0) m_free_dma(np->scripth0, sizeof(struct scripth), "SCRIPTH"); if (np->script0) m_free_dma(np->script0, sizeof(struct script), "SCRIPT"); if (np->ccb) m_free_dma(np->ccb, sizeof(struct ccb), "CCB"); m_free_dma(np, sizeof(struct ncb), "NCB"); host_data->ncb = NULL; unregister: scsi_host_put(instance); return NULL; } void ncr53c8xx_release(struct Scsi_Host *host) { struct host_data *host_data = shost_priv(host); #ifdef DEBUG_NCR53C8XX printk("ncr53c8xx: release\n"); #endif if (host_data->ncb) ncr_detach(host_data->ncb); scsi_host_put(host); } static void ncr53c8xx_set_period(struct scsi_target *starget, int period) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ncb *np = ((struct host_data *)shost->hostdata)->ncb; struct tcb *tp = &np->target[starget->id]; if (period > np->maxsync) period = np->maxsync; else if (period < np->minsync) period = np->minsync; tp->usrsync = period; ncr_negotiate(np, tp); } static void ncr53c8xx_set_offset(struct scsi_target *starget, int offset) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ncb *np = ((struct host_data *)shost->hostdata)->ncb; struct tcb *tp = &np->target[starget->id]; if (offset > np->maxoffs) offset = np->maxoffs; else if (offset < 0) offset = 0; tp->maxoffs = offset; ncr_negotiate(np, tp); } static void ncr53c8xx_set_width(struct scsi_target *starget, int width) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ncb *np = ((struct host_data *)shost->hostdata)->ncb; struct tcb *tp = &np->target[starget->id]; if (width > np->maxwide) width = np->maxwide; else if (width < 0) width = 0; tp->usrwide = width; ncr_negotiate(np, tp); } static void ncr53c8xx_get_signalling(struct Scsi_Host *shost) { struct ncb *np = ((struct host_data *)shost->hostdata)->ncb; enum spi_signal_type type; switch (np->scsi_mode) { case SMODE_SE: type = SPI_SIGNAL_SE; break; case SMODE_HVD: type = SPI_SIGNAL_HVD; break; default: type = SPI_SIGNAL_UNKNOWN; break; } spi_signalling(shost) = type; } static struct spi_function_template ncr53c8xx_transport_functions = { .set_period = ncr53c8xx_set_period, .show_period = 1, .set_offset = ncr53c8xx_set_offset, .show_offset = 1, .set_width = ncr53c8xx_set_width, .show_width = 1, .get_signalling = ncr53c8xx_get_signalling, }; int __init ncr53c8xx_init(void) { ncr53c8xx_transport_template = spi_attach_transport(&ncr53c8xx_transport_functions); if (!ncr53c8xx_transport_template) return -ENODEV; return 0; } void ncr53c8xx_exit(void) { spi_release_transport(ncr53c8xx_transport_template); }
linux-master
drivers/scsi/ncr53c8xx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * ipr.c -- driver for IBM Power Linux RAID adapters * * Written By: Brian King <[email protected]>, IBM Corporation * * Copyright (C) 2003, 2004 IBM Corporation */ /* * Notes: * * This driver is used to control the following SCSI adapters: * * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B * * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter * PCI-X Dual Channel Ultra 320 SCSI Adapter * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card * Embedded SCSI adapter on p615 and p655 systems * * Supported Hardware Features: * - Ultra 320 SCSI controller * - PCI-X host interface * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine * - Non-Volatile Write Cache * - Supports attachment of non-RAID disks, tape, and optical devices * - RAID Levels 0, 5, 10 * - Hot spare * - Background Parity Checking * - Background Data Scrubbing * - Ability to increase the capacity of an existing RAID 5 disk array * by adding disks * * Driver Features: * - Tagged command queuing * - Adapter microcode download * - PCI hot plug * - SCSI device hot plug * */ #include <linux/fs.h> #include <linux/init.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/wait.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/blkdev.h> #include <linux/firmware.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/hdreg.h> #include <linux/reboot.h> #include <linux/stringify.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/processor.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_cmnd.h> #include "ipr.h" /* * Global Data */ static LIST_HEAD(ipr_ioa_head); static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL; static unsigned int ipr_max_speed = 1; static int ipr_testmode = 0; static unsigned int ipr_fastfail = 0; static unsigned int ipr_transop_timeout = 0; static unsigned int ipr_debug = 0; static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS; static unsigned int ipr_dual_ioa_raid = 1; static unsigned int ipr_number_of_msix = 16; static unsigned int ipr_fast_reboot; static DEFINE_SPINLOCK(ipr_driver_lock); /* This table describes the differences between DMA controller chips */ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { { /* Gemstone, Citrine, Obsidian, and Obsidian-E */ .mailbox = 0x0042C, .max_cmds = 100, .cache_line_size = 0x20, .clear_isr = 1, .iopoll_weight = 0, { .set_interrupt_mask_reg = 0x0022C, .clr_interrupt_mask_reg = 0x00230, .clr_interrupt_mask_reg32 = 0x00230, .sense_interrupt_mask_reg = 0x0022C, .sense_interrupt_mask_reg32 = 0x0022C, .clr_interrupt_reg = 0x00228, .clr_interrupt_reg32 = 0x00228, .sense_interrupt_reg = 0x00224, .sense_interrupt_reg32 = 0x00224, .ioarrin_reg = 0x00404, .sense_uproc_interrupt_reg = 0x00214, .sense_uproc_interrupt_reg32 = 0x00214, .set_uproc_interrupt_reg = 0x00214, .set_uproc_interrupt_reg32 = 0x00214, .clr_uproc_interrupt_reg = 0x00218, .clr_uproc_interrupt_reg32 = 0x00218 } }, { /* Snipe and Scamp */ .mailbox = 0x0052C, .max_cmds = 100, .cache_line_size = 0x20, .clear_isr = 1, .iopoll_weight = 0, { .set_interrupt_mask_reg = 0x00288, .clr_interrupt_mask_reg = 0x0028C, .clr_interrupt_mask_reg32 = 0x0028C, .sense_interrupt_mask_reg = 0x00288, .sense_interrupt_mask_reg32 = 0x00288, .clr_interrupt_reg = 0x00284, .clr_interrupt_reg32 = 0x00284, .sense_interrupt_reg = 0x00280, .sense_interrupt_reg32 = 0x00280, .ioarrin_reg = 0x00504, .sense_uproc_interrupt_reg = 0x00290, .sense_uproc_interrupt_reg32 = 0x00290, .set_uproc_interrupt_reg = 0x00290, .set_uproc_interrupt_reg32 = 0x00290, .clr_uproc_interrupt_reg = 0x00294, .clr_uproc_interrupt_reg32 = 0x00294 } }, { /* CRoC */ .mailbox = 0x00044, .max_cmds = 1000, .cache_line_size = 0x20, .clear_isr = 0, .iopoll_weight = 64, { .set_interrupt_mask_reg = 0x00010, .clr_interrupt_mask_reg = 0x00018, .clr_interrupt_mask_reg32 = 0x0001C, .sense_interrupt_mask_reg = 0x00010, .sense_interrupt_mask_reg32 = 0x00014, .clr_interrupt_reg = 0x00008, .clr_interrupt_reg32 = 0x0000C, .sense_interrupt_reg = 0x00000, .sense_interrupt_reg32 = 0x00004, .ioarrin_reg = 0x00070, .sense_uproc_interrupt_reg = 0x00020, .sense_uproc_interrupt_reg32 = 0x00024, .set_uproc_interrupt_reg = 0x00020, .set_uproc_interrupt_reg32 = 0x00024, .clr_uproc_interrupt_reg = 0x00028, .clr_uproc_interrupt_reg32 = 0x0002C, .init_feedback_reg = 0x0005C, .dump_addr_reg = 0x00064, .dump_data_reg = 0x00068, .endian_swap_reg = 0x00084 } }, }; static const struct ipr_chip_t ipr_chip[] = { { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] } }; static int ipr_max_bus_speeds[] = { IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE }; MODULE_AUTHOR("Brian King <[email protected]>"); MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver"); module_param_named(max_speed, ipr_max_speed, uint, 0); MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320"); module_param_named(log_level, ipr_log_level, uint, 0); MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver"); module_param_named(testmode, ipr_testmode, int, 0); MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations"); module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries"); module_param_named(transop_timeout, ipr_transop_timeout, int, 0); MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)"); module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0); MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)"); module_param_named(max_devs, ipr_max_devs, int, 0); MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. " "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]"); module_param_named(number_of_msix, ipr_number_of_msix, int, 0); MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)"); module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)"); MODULE_LICENSE("GPL"); MODULE_VERSION(IPR_DRIVER_VERSION); /* A constant array of IOASCs/URCs/Error Messages */ static const struct ipr_error_table_t ipr_error_table[] = { {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL, "8155: An unknown error was received"}, {0x00330000, 0, 0, "Soft underlength error"}, {0x005A0000, 0, 0, "Command to be cancelled not found"}, {0x00808000, 0, 0, "Qualified success"}, {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL, "FFFE: Soft device bus error recovered by the IOA"}, {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL, "4101: Soft device bus fabric error"}, {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL, "FFFC: Logical block guard error recovered by the device"}, {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL, "FFFC: Logical block reference tag error recovered by the device"}, {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL, "4171: Recovered scatter list tag / sequence number error"}, {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL, "FF3D: Recovered logical block CRC error on IOA to Host transfer"}, {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL, "4171: Recovered logical block sequence number error on IOA to Host transfer"}, {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL, "FFFD: Recovered logical block reference tag error detected by the IOA"}, {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL, "FFFD: Logical block guard error recovered by the IOA"}, {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL, "FFF9: Device sector reassign successful"}, {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL, "FFF7: Media error recovered by device rewrite procedures"}, {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL, "7001: IOA sector reassignment successful"}, {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL, "FFF9: Soft media error. Sector reassignment recommended"}, {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL, "FFF7: Media error recovered by IOA rewrite procedures"}, {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL, "FF3D: Soft PCI bus error recovered by the IOA"}, {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL, "FFF6: Device hardware error recovered by the IOA"}, {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL, "FFF6: Device hardware error recovered by the device"}, {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL, "FF3D: Soft IOA error recovered by the IOA"}, {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL, "FFFA: Undefined device response recovered by the IOA"}, {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL, "FFF6: Device bus error, message or command phase"}, {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL, "FFFE: Task Management Function failed"}, {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL, "FFF6: Failure prediction threshold exceeded"}, {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL, "8009: Impending cache battery pack failure"}, {0x02040100, 0, 0, "Logical Unit in process of becoming ready"}, {0x02040200, 0, 0, "Initializing command required"}, {0x02040400, 0, 0, "34FF: Disk device format in progress"}, {0x02040C00, 0, 0, "Logical unit not accessible, target port in unavailable state"}, {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL, "9070: IOA requested reset"}, {0x023F0000, 0, 0, "Synchronization required"}, {0x02408500, 0, 0, "IOA microcode download required"}, {0x02408600, 0, 0, "Device bus connection is prohibited by host"}, {0x024E0000, 0, 0, "No ready, IOA shutdown"}, {0x025A0000, 0, 0, "Not ready, IOA has been shutdown"}, {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL, "3020: Storage subsystem configuration error"}, {0x03110B00, 0, 0, "FFF5: Medium error, data unreadable, recommend reassign"}, {0x03110C00, 0, 0, "7000: Medium error, data unreadable, do not reassign"}, {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL, "FFF3: Disk media format bad"}, {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL, "3002: Addressed device failed to respond to selection"}, {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL, "3100: Device bus error"}, {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL, "3109: IOA timed out a device command"}, {0x04088000, 0, 0, "3120: SCSI bus is not operational"}, {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL, "4100: Hard device bus fabric error"}, {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL, "310C: Logical block guard error detected by the device"}, {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL, "310C: Logical block reference tag error detected by the device"}, {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL, "4170: Scatter list tag / sequence number error"}, {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL, "8150: Logical block CRC error on IOA to Host transfer"}, {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL, "4170: Logical block sequence number error on IOA to Host transfer"}, {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL, "310D: Logical block reference tag error detected by the IOA"}, {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL, "310D: Logical block guard error detected by the IOA"}, {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL, "9000: IOA reserved area data check"}, {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL, "9001: IOA reserved area invalid data pattern"}, {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL, "9002: IOA reserved area LRC error"}, {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL, "Hardware Error, IOA metadata access error"}, {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL, "102E: Out of alternate sectors for disk storage"}, {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL, "FFF4: Data transfer underlength error"}, {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL, "FFF4: Data transfer overlength error"}, {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL, "3400: Logical unit failure"}, {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL, "FFF4: Device microcode is corrupt"}, {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL, "8150: PCI bus error"}, {0x04430000, 1, 0, "Unsupported device bus message received"}, {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL, "FFF4: Disk device problem"}, {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL, "8150: Permanent IOA failure"}, {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL, "3010: Disk device returned wrong response to IOA"}, {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL, "8151: IOA microcode error"}, {0x04448500, 0, 0, "Device bus status error"}, {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL, "8157: IOA error requiring IOA reset to recover"}, {0x04448700, 0, 0, "ATA device status error"}, {0x04490000, 0, 0, "Message reject received from the device"}, {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL, "8008: A permanent cache battery pack failure occurred"}, {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL, "9090: Disk unit has been modified after the last known status"}, {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL, "9081: IOA detected device error"}, {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL, "9082: IOA detected device error"}, {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL, "3110: Device bus error, message or command phase"}, {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL, "3110: SAS Command / Task Management Function failed"}, {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL, "9091: Incorrect hardware configuration change has been detected"}, {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL, "9073: Invalid multi-adapter configuration"}, {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL, "4010: Incorrect connection between cascaded expanders"}, {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL, "4020: Connections exceed IOA design limits"}, {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL, "4030: Incorrect multipath connection"}, {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL, "4110: Unsupported enclosure function"}, {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL, "4120: SAS cable VPD cannot be read"}, {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL, "FFF4: Command to logical unit failed"}, {0x05240000, 1, 0, "Illegal request, invalid request type or request packet"}, {0x05250000, 0, 0, "Illegal request, invalid resource handle"}, {0x05258000, 0, 0, "Illegal request, commands not allowed to this device"}, {0x05258100, 0, 0, "Illegal request, command not allowed to a secondary adapter"}, {0x05258200, 0, 0, "Illegal request, command not allowed to a non-optimized resource"}, {0x05260000, 0, 0, "Illegal request, invalid field in parameter list"}, {0x05260100, 0, 0, "Illegal request, parameter not supported"}, {0x05260200, 0, 0, "Illegal request, parameter value invalid"}, {0x052C0000, 0, 0, "Illegal request, command sequence error"}, {0x052C8000, 1, 0, "Illegal request, dual adapter support not enabled"}, {0x052C8100, 1, 0, "Illegal request, another cable connector was physically disabled"}, {0x054E8000, 1, 0, "Illegal request, inconsistent group id/group count"}, {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL, "9031: Array protection temporarily suspended, protection resuming"}, {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL, "9040: Array protection temporarily suspended, protection resuming"}, {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL, "4080: IOA exceeded maximum operating temperature"}, {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL, "4085: Service required"}, {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL, "4086: SAS Adapter Hardware Configuration Error"}, {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL, "3140: Device bus not ready to ready transition"}, {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL, "FFFB: SCSI bus was reset"}, {0x06290500, 0, 0, "FFFE: SCSI bus transition to single ended"}, {0x06290600, 0, 0, "FFFE: SCSI bus transition to LVD"}, {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL, "FFFB: SCSI bus was reset by another initiator"}, {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL, "3029: A device replacement has occurred"}, {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL, "4102: Device bus fabric performance degradation"}, {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL, "9051: IOA cache data exists for a missing or failed device"}, {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL, "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"}, {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL, "9025: Disk unit is not supported at its physical location"}, {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL, "3020: IOA detected a SCSI bus configuration error"}, {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL, "3150: SCSI bus configuration error"}, {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL, "9074: Asymmetric advanced function disk configuration"}, {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL, "4040: Incomplete multipath connection between IOA and enclosure"}, {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL, "4041: Incomplete multipath connection between enclosure and device"}, {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL, "9075: Incomplete multipath connection between IOA and remote IOA"}, {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL, "9076: Configuration error, missing remote IOA"}, {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL, "4050: Enclosure does not support a required multipath function"}, {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL, "4121: Configuration error, required cable is missing"}, {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL, "4122: Cable is not plugged into the correct location on remote IOA"}, {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL, "4123: Configuration error, invalid cable vital product data"}, {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL, "4124: Configuration error, both cable ends are plugged into the same IOA"}, {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL, "4070: Logically bad block written on device"}, {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL, "9041: Array protection temporarily suspended"}, {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL, "9042: Corrupt array parity detected on specified device"}, {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL, "9030: Array no longer protected due to missing or failed disk unit"}, {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL, "9071: Link operational transition"}, {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL, "9072: Link not operational transition"}, {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL, "9032: Array exposed but still protected"}, {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL, "70DD: Device forced failed by disrupt device command"}, {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL, "4061: Multipath redundancy level got better"}, {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL, "4060: Multipath redundancy level got worse"}, {0x06808100, 0, IPR_DEBUG_LOG_LEVEL, "9083: Device raw mode enabled"}, {0x06808200, 0, IPR_DEBUG_LOG_LEVEL, "9084: Device raw mode disabled"}, {0x07270000, 0, 0, "Failure due to other device"}, {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL, "9008: IOA does not support functions expected by devices"}, {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL, "9010: Cache data associated with attached devices cannot be found"}, {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL, "9011: Cache data belongs to devices other than those attached"}, {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL, "9020: Array missing 2 or more devices with only 1 device present"}, {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL, "9021: Array missing 2 or more devices with 2 or more devices present"}, {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL, "9022: Exposed array is missing a required device"}, {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL, "9023: Array member(s) not at required physical locations"}, {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL, "9024: Array not functional due to present hardware configuration"}, {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL, "9026: Array not functional due to present hardware configuration"}, {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL, "9027: Array is missing a device and parity is out of sync"}, {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL, "9028: Maximum number of arrays already exist"}, {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL, "9050: Required cache data cannot be located for a disk unit"}, {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL, "9052: Cache data exists for a device that has been modified"}, {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL, "9054: IOA resources not available due to previous problems"}, {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL, "9092: Disk unit requires initialization before use"}, {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL, "9029: Incorrect hardware configuration change has been detected"}, {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL, "9060: One or more disk pairs are missing from an array"}, {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL, "9061: One or more disks are missing from an array"}, {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL, "9062: One or more disks are missing from an array"}, {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL, "9063: Maximum number of functional arrays has been exceeded"}, {0x07279A00, 0, 0, "Data protect, other volume set problem"}, {0x0B260000, 0, 0, "Aborted command, invalid descriptor"}, {0x0B3F9000, 0, 0, "Target operating conditions have changed, dual adapter takeover"}, {0x0B530200, 0, 0, "Aborted command, medium removal prevented"}, {0x0B5A0000, 0, 0, "Command terminated by host"}, {0x0B5B8000, 0, 0, "Aborted command, command terminated by host"} }; static const struct ipr_ses_table_entry ipr_ses_table[] = { { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 }, { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 }, { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */ { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */ { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */ { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */ { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 }, { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 }, { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 }, { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 }, { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 }, { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 }, { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 } }; /* * Function Prototypes */ static int ipr_reset_alert(struct ipr_cmnd *); static void ipr_process_ccn(struct ipr_cmnd *); static void ipr_process_error(struct ipr_cmnd *); static void ipr_reset_ioa_job(struct ipr_cmnd *); static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *, enum ipr_shutdown_type); #ifdef CONFIG_SCSI_IPR_TRACE /** * ipr_trc_hook - Add a trace entry to the driver trace * @ipr_cmd: ipr command struct * @type: trace type * @add_data: additional data * * Return value: * none **/ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, u8 type, u32 add_data) { struct ipr_trace_entry *trace_entry; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; unsigned int trace_index; trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; trace_entry = &ioa_cfg->trace[trace_index]; trace_entry->time = jiffies; trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; trace_entry->type = type; trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; trace_entry->u.add_data = add_data; wmb(); } #else #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0) #endif /** * ipr_lock_and_done - Acquire lock and complete command * @ipr_cmd: ipr command struct * * Return value: * none **/ static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd) { unsigned long lock_flags; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); ipr_cmd->done(ipr_cmd); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); } /** * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse * @ipr_cmd: ipr command struct * * Return value: * none **/ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) { struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; dma_addr_t dma_addr = ipr_cmd->dma_addr; int hrrq_id; hrrq_id = ioarcb->cmd_pkt.hrrq_id; memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); ioarcb->cmd_pkt.hrrq_id = hrrq_id; ioarcb->data_transfer_length = 0; ioarcb->read_data_transfer_length = 0; ioarcb->ioadl_len = 0; ioarcb->read_ioadl_len = 0; if (ipr_cmd->ioa_cfg->sis64) { ioarcb->u.sis64_addr_data.data_ioadl_addr = cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); } else { ioarcb->write_ioadl_addr = cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; } ioasa->hdr.ioasc = 0; ioasa->hdr.residual_data_len = 0; ipr_cmd->scsi_cmd = NULL; ipr_cmd->sense_buffer[0] = 0; ipr_cmd->dma_use_sg = 0; } /** * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block * @ipr_cmd: ipr command struct * @fast_done: fast done function call-back * * Return value: * none **/ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd, void (*fast_done) (struct ipr_cmnd *)) { ipr_reinit_ipr_cmnd(ipr_cmd); ipr_cmd->u.scratch = 0; ipr_cmd->sibling = NULL; ipr_cmd->eh_comp = NULL; ipr_cmd->fast_done = fast_done; timer_setup(&ipr_cmd->timer, NULL, 0); } /** * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block * @hrrq: hrr queue * * Return value: * pointer to ipr command struct **/ static struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq) { struct ipr_cmnd *ipr_cmd = NULL; if (likely(!list_empty(&hrrq->hrrq_free_q))) { ipr_cmd = list_entry(hrrq->hrrq_free_q.next, struct ipr_cmnd, queue); list_del(&ipr_cmd->queue); } return ipr_cmd; } /** * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it * @ioa_cfg: ioa config struct * * Return value: * pointer to ipr command struct **/ static struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) { struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]); ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done); return ipr_cmd; } /** * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts * @ioa_cfg: ioa config struct * @clr_ints: interrupts to clear * * This function masks all interrupts on the adapter, then clears the * interrupts specified in the mask * * Return value: * none **/ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg, u32 clr_ints) { int i; /* Stop new interrupts */ for (i = 0; i < ioa_cfg->hrrq_num; i++) { spin_lock(&ioa_cfg->hrrq[i]._lock); ioa_cfg->hrrq[i].allow_interrupts = 0; spin_unlock(&ioa_cfg->hrrq[i]._lock); } /* Set interrupt mask to stop all new interrupts */ if (ioa_cfg->sis64) writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg); else writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); /* Clear any pending interrupts */ if (ioa_cfg->sis64) writel(~0, ioa_cfg->regs.clr_interrupt_reg); writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32); readl(ioa_cfg->regs.sense_interrupt_reg); } /** * ipr_save_pcix_cmd_reg - Save PCI-X command register * @ioa_cfg: ioa config struct * * Return value: * 0 on success / -EIO on failure **/ static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) { int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); if (pcix_cmd_reg == 0) return 0; if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); return -EIO; } ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO; return 0; } /** * ipr_set_pcix_cmd_reg - Setup PCI-X command register * @ioa_cfg: ioa config struct * * Return value: * 0 on success / -EIO on failure **/ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) { int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); if (pcix_cmd_reg) { if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); return -EIO; } } return 0; } /** * __ipr_scsi_eh_done - mid-layer done function for aborted ops * @ipr_cmd: ipr command struct * * This function is invoked by the interrupt handler for * ops generated by the SCSI mid-layer which are being aborted. * * Return value: * none **/ static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) { struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; scsi_cmd->result |= (DID_ERROR << 16); scsi_dma_unmap(ipr_cmd->scsi_cmd); scsi_done(scsi_cmd); if (ipr_cmd->eh_comp) complete(ipr_cmd->eh_comp); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); } /** * ipr_scsi_eh_done - mid-layer done function for aborted ops * @ipr_cmd: ipr command struct * * This function is invoked by the interrupt handler for * ops generated by the SCSI mid-layer which are being aborted. * * Return value: * none **/ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) { unsigned long hrrq_flags; struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; spin_lock_irqsave(&hrrq->_lock, hrrq_flags); __ipr_scsi_eh_done(ipr_cmd); spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); } /** * ipr_fail_all_ops - Fails all outstanding ops. * @ioa_cfg: ioa config struct * * This function fails all outstanding ops. * * Return value: * none **/ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) { struct ipr_cmnd *ipr_cmd, *temp; struct ipr_hrr_queue *hrrq; ENTER; for_each_hrrq(hrrq, ioa_cfg) { spin_lock(&hrrq->_lock); list_for_each_entry_safe(ipr_cmd, temp, &hrrq->hrrq_pending_q, queue) { list_del(&ipr_cmd->queue); ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID); if (ipr_cmd->scsi_cmd) ipr_cmd->done = __ipr_scsi_eh_done; ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET); del_timer(&ipr_cmd->timer); ipr_cmd->done(ipr_cmd); } spin_unlock(&hrrq->_lock); } LEAVE; } /** * ipr_send_command - Send driver initiated requests. * @ipr_cmd: ipr command struct * * This function sends a command to the adapter using the correct write call. * In the case of sis64, calculate the ioarcb size required. Then or in the * appropriate bits. * * Return value: * none **/ static void ipr_send_command(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; dma_addr_t send_dma_addr = ipr_cmd->dma_addr; if (ioa_cfg->sis64) { /* The default size is 256 bytes */ send_dma_addr |= 0x1; /* If the number of ioadls * size of ioadl > 128 bytes, then use a 512 byte ioarcb */ if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 ) send_dma_addr |= 0x4; writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg); } else writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg); } /** * ipr_do_req - Send driver initiated requests. * @ipr_cmd: ipr command struct * @done: done function * @timeout_func: timeout function * @timeout: timeout value * * This function sends the specified command to the adapter with the * timeout given. The done function is invoked on command completion. * * Return value: * none **/ static void ipr_do_req(struct ipr_cmnd *ipr_cmd, void (*done) (struct ipr_cmnd *), void (*timeout_func) (struct timer_list *), u32 timeout) { list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); ipr_cmd->done = done; ipr_cmd->timer.expires = jiffies + timeout; ipr_cmd->timer.function = timeout_func; add_timer(&ipr_cmd->timer); ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); ipr_send_command(ipr_cmd); } /** * ipr_internal_cmd_done - Op done function for an internally generated op. * @ipr_cmd: ipr command struct * * This function is the op done function for an internally generated, * blocking op. It simply wakes the sleeping thread. * * Return value: * none **/ static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd) { if (ipr_cmd->sibling) ipr_cmd->sibling = NULL; else complete(&ipr_cmd->completion); } /** * ipr_init_ioadl - initialize the ioadl for the correct SIS type * @ipr_cmd: ipr command struct * @dma_addr: dma address * @len: transfer length * @flags: ioadl flag value * * This function initializes an ioadl in the case where there is only a single * descriptor. * * Return value: * nothing **/ static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr, u32 len, int flags) { struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; ipr_cmd->dma_use_sg = 1; if (ipr_cmd->ioa_cfg->sis64) { ioadl64->flags = cpu_to_be32(flags); ioadl64->data_len = cpu_to_be32(len); ioadl64->address = cpu_to_be64(dma_addr); ipr_cmd->ioarcb.ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl64_desc)); ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); } else { ioadl->flags_and_data_len = cpu_to_be32(flags | len); ioadl->address = cpu_to_be32(dma_addr); if (flags == IPR_IOADL_FLAGS_READ_LAST) { ipr_cmd->ioarcb.read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len); } else { ipr_cmd->ioarcb.ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); } } } /** * ipr_send_blocking_cmd - Send command and sleep on its completion. * @ipr_cmd: ipr command struct * @timeout_func: function to invoke if command times out * @timeout: timeout * * Return value: * none **/ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd, void (*timeout_func) (struct timer_list *), u32 timeout) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; init_completion(&ipr_cmd->completion); ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout); spin_unlock_irq(ioa_cfg->host->host_lock); wait_for_completion(&ipr_cmd->completion); spin_lock_irq(ioa_cfg->host->host_lock); } static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) { unsigned int hrrq; if (ioa_cfg->hrrq_num == 1) hrrq = 0; else { hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; } return hrrq; } /** * ipr_send_hcam - Send an HCAM to the adapter. * @ioa_cfg: ioa config struct * @type: HCAM type * @hostrcb: hostrcb struct * * This function will send a Host Controlled Async command to the adapter. * If HCAMs are currently not allowed to be issued to the adapter, it will * place the hostrcb on the free queue. * * Return value: * none **/ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, struct ipr_hostrcb *hostrcb) { struct ipr_cmnd *ipr_cmd; struct ipr_ioarcb *ioarcb; if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); ipr_cmd->u.hostrcb = hostrcb; ioarcb = &ipr_cmd->ioarcb; ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM; ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC; ioarcb->cmd_pkt.cdb[1] = type; ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma, sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST); if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE) ipr_cmd->done = ipr_process_ccn; else ipr_cmd->done = ipr_process_error; ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); ipr_send_command(ipr_cmd); } else { list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); } } /** * ipr_init_res_entry - Initialize a resource entry struct. * @res: resource entry struct * @cfgtew: config table entry wrapper struct * * Return value: * none **/ static void ipr_init_res_entry(struct ipr_resource_entry *res, struct ipr_config_table_entry_wrapper *cfgtew) { int found = 0; struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; struct ipr_resource_entry *gscsi_res = NULL; res->needs_sync_complete = 0; res->in_erp = 0; res->add_to_ml = 0; res->del_from_ml = 0; res->resetting_device = 0; res->reset_occurred = 0; res->sdev = NULL; if (ioa_cfg->sis64) { res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); res->qmodel = IPR_QUEUEING_MODEL64(res); res->type = cfgtew->u.cfgte64->res_type; memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, sizeof(res->res_path)); res->bus = 0; memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, sizeof(res->dev_lun.scsi_lun)); res->lun = scsilun_to_int(&res->dev_lun); if (res->type == IPR_RES_TYPE_GENERIC_SCSI) { list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) { if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) { found = 1; res->target = gscsi_res->target; break; } } if (!found) { res->target = find_first_zero_bit(ioa_cfg->target_ids, ioa_cfg->max_devs_supported); set_bit(res->target, ioa_cfg->target_ids); } } else if (res->type == IPR_RES_TYPE_IOAFP) { res->bus = IPR_IOAFP_VIRTUAL_BUS; res->target = 0; } else if (res->type == IPR_RES_TYPE_ARRAY) { res->bus = IPR_ARRAY_VIRTUAL_BUS; res->target = find_first_zero_bit(ioa_cfg->array_ids, ioa_cfg->max_devs_supported); set_bit(res->target, ioa_cfg->array_ids); } else if (res->type == IPR_RES_TYPE_VOLUME_SET) { res->bus = IPR_VSET_VIRTUAL_BUS; res->target = find_first_zero_bit(ioa_cfg->vset_ids, ioa_cfg->max_devs_supported); set_bit(res->target, ioa_cfg->vset_ids); } else { res->target = find_first_zero_bit(ioa_cfg->target_ids, ioa_cfg->max_devs_supported); set_bit(res->target, ioa_cfg->target_ids); } } else { res->qmodel = IPR_QUEUEING_MODEL(res); res->flags = cfgtew->u.cfgte->flags; if (res->flags & IPR_IS_IOA_RESOURCE) res->type = IPR_RES_TYPE_IOAFP; else res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; res->bus = cfgtew->u.cfgte->res_addr.bus; res->target = cfgtew->u.cfgte->res_addr.target; res->lun = cfgtew->u.cfgte->res_addr.lun; res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn); } } /** * ipr_is_same_device - Determine if two devices are the same. * @res: resource entry struct * @cfgtew: config table entry wrapper struct * * Return value: * 1 if the devices are the same / 0 otherwise **/ static int ipr_is_same_device(struct ipr_resource_entry *res, struct ipr_config_table_entry_wrapper *cfgtew) { if (res->ioa_cfg->sis64) { if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id, sizeof(cfgtew->u.cfgte64->dev_id)) && !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, sizeof(cfgtew->u.cfgte64->lun))) { return 1; } } else { if (res->bus == cfgtew->u.cfgte->res_addr.bus && res->target == cfgtew->u.cfgte->res_addr.target && res->lun == cfgtew->u.cfgte->res_addr.lun) return 1; } return 0; } /** * __ipr_format_res_path - Format the resource path for printing. * @res_path: resource path * @buffer: buffer * @len: length of buffer provided * * Return value: * pointer to buffer **/ static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len) { int i; char *p = buffer; *p = '\0'; p += scnprintf(p, buffer + len - p, "%02X", res_path[0]); for (i = 1; res_path[i] != 0xff && i < IPR_RES_PATH_BYTES; i++) p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]); return buffer; } /** * ipr_format_res_path - Format the resource path for printing. * @ioa_cfg: ioa config struct * @res_path: resource path * @buffer: buffer * @len: length of buffer provided * * Return value: * pointer to buffer **/ static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg, u8 *res_path, char *buffer, int len) { char *p = buffer; *p = '\0'; p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no); __ipr_format_res_path(res_path, p, len - (p - buffer)); return buffer; } /** * ipr_update_res_entry - Update the resource entry. * @res: resource entry struct * @cfgtew: config table entry wrapper struct * * Return value: * none **/ static void ipr_update_res_entry(struct ipr_resource_entry *res, struct ipr_config_table_entry_wrapper *cfgtew) { char buffer[IPR_MAX_RES_PATH_LENGTH]; int new_path = 0; if (res->ioa_cfg->sis64) { res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); res->type = cfgtew->u.cfgte64->res_type; memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data, sizeof(struct ipr_std_inq_data)); res->qmodel = IPR_QUEUEING_MODEL64(res); res->res_handle = cfgtew->u.cfgte64->res_handle; res->dev_id = cfgtew->u.cfgte64->dev_id; memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, sizeof(res->dev_lun.scsi_lun)); if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path, sizeof(res->res_path))) { memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, sizeof(res->res_path)); new_path = 1; } if (res->sdev && new_path) sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n", ipr_format_res_path(res->ioa_cfg, res->res_path, buffer, sizeof(buffer))); } else { res->flags = cfgtew->u.cfgte->flags; if (res->flags & IPR_IS_IOA_RESOURCE) res->type = IPR_RES_TYPE_IOAFP; else res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data, sizeof(struct ipr_std_inq_data)); res->qmodel = IPR_QUEUEING_MODEL(res); res->res_handle = cfgtew->u.cfgte->res_handle; } } /** * ipr_clear_res_target - Clear the bit in the bit map representing the target * for the resource. * @res: resource entry struct * * Return value: * none **/ static void ipr_clear_res_target(struct ipr_resource_entry *res) { struct ipr_resource_entry *gscsi_res = NULL; struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; if (!ioa_cfg->sis64) return; if (res->bus == IPR_ARRAY_VIRTUAL_BUS) clear_bit(res->target, ioa_cfg->array_ids); else if (res->bus == IPR_VSET_VIRTUAL_BUS) clear_bit(res->target, ioa_cfg->vset_ids); else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) { list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) if (gscsi_res->dev_id == res->dev_id && gscsi_res != res) return; clear_bit(res->target, ioa_cfg->target_ids); } else if (res->bus == 0) clear_bit(res->target, ioa_cfg->target_ids); } /** * ipr_handle_config_change - Handle a config change from the adapter * @ioa_cfg: ioa config struct * @hostrcb: hostrcb * * Return value: * none **/ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, struct ipr_hostrcb *hostrcb) { struct ipr_resource_entry *res = NULL; struct ipr_config_table_entry_wrapper cfgtew; __be32 cc_res_handle; u32 is_ndn = 1; if (ioa_cfg->sis64) { cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64; cc_res_handle = cfgtew.u.cfgte64->res_handle; } else { cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte; cc_res_handle = cfgtew.u.cfgte->res_handle; } list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { if (res->res_handle == cc_res_handle) { is_ndn = 0; break; } } if (is_ndn) { if (list_empty(&ioa_cfg->free_res_q)) { ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); return; } res = list_entry(ioa_cfg->free_res_q.next, struct ipr_resource_entry, queue); list_del(&res->queue); ipr_init_res_entry(res, &cfgtew); list_add_tail(&res->queue, &ioa_cfg->used_res_q); } ipr_update_res_entry(res, &cfgtew); if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { if (res->sdev) { res->del_from_ml = 1; res->res_handle = IPR_INVALID_RES_HANDLE; schedule_work(&ioa_cfg->work_q); } else { ipr_clear_res_target(res); list_move_tail(&res->queue, &ioa_cfg->free_res_q); } } else if (!res->sdev || res->del_from_ml) { res->add_to_ml = 1; schedule_work(&ioa_cfg->work_q); } ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); } /** * ipr_process_ccn - Op done function for a CCN. * @ipr_cmd: ipr command struct * * This function is the op done function for a configuration * change notification host controlled async from the adapter. * * Return value: * none **/ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); list_del_init(&hostrcb->queue); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); if (ioasc) { if (ioasc != IPR_IOASC_IOA_WAS_RESET && ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) dev_err(&ioa_cfg->pdev->dev, "Host RCB failed with IOASC: 0x%08X\n", ioasc); ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); } else { ipr_handle_config_change(ioa_cfg, hostrcb); } } /** * strip_whitespace - Strip and pad trailing whitespace. * @i: size of buffer * @buf: string to modify * * This function will strip all trailing whitespace and * NUL terminate the string. * **/ static void strip_whitespace(int i, char *buf) { if (i < 1) return; i--; while (i && buf[i] == ' ') i--; buf[i+1] = '\0'; } /** * ipr_log_vpd_compact - Log the passed extended VPD compactly. * @prefix: string to print at start of printk * @hostrcb: hostrcb pointer * @vpd: vendor/product id/sn struct * * Return value: * none **/ static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, struct ipr_vpd *vpd) { char vendor_id[IPR_VENDOR_ID_LEN + 1]; char product_id[IPR_PROD_ID_LEN + 1]; char sn[IPR_SERIAL_NUM_LEN + 1]; memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); strip_whitespace(IPR_VENDOR_ID_LEN, vendor_id); memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN); strip_whitespace(IPR_PROD_ID_LEN, product_id); memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN); strip_whitespace(IPR_SERIAL_NUM_LEN, sn); ipr_hcam_err(hostrcb, "%s VPID/SN: %s %s %s\n", prefix, vendor_id, product_id, sn); } /** * ipr_log_vpd - Log the passed VPD to the error log. * @vpd: vendor/product id/sn struct * * Return value: * none **/ static void ipr_log_vpd(struct ipr_vpd *vpd) { char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN]; memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id, IPR_PROD_ID_LEN); buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0'; ipr_err("Vendor/Product ID: %s\n", buffer); memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN); buffer[IPR_SERIAL_NUM_LEN] = '\0'; ipr_err(" Serial Number: %s\n", buffer); } /** * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly. * @prefix: string to print at start of printk * @hostrcb: hostrcb pointer * @vpd: vendor/product id/sn/wwn struct * * Return value: * none **/ static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, struct ipr_ext_vpd *vpd) { ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd); ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix, be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1])); } /** * ipr_log_ext_vpd - Log the passed extended VPD to the error log. * @vpd: vendor/product id/sn/wwn struct * * Return value: * none **/ static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd) { ipr_log_vpd(&vpd->vpd); ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1])); } /** * ipr_log_enhanced_cache_error - Log a cache error. * @ioa_cfg: ioa config struct * @hostrcb: hostrcb struct * * Return value: * none **/ static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg, struct ipr_hostrcb *hostrcb) { struct ipr_hostrcb_type_12_error *error; if (ioa_cfg->sis64) error = &hostrcb->hcam.u.error64.u.type_12_error; else error = &hostrcb->hcam.u.error.u.type_12_error; ipr_err("-----Current Configuration-----\n"); ipr_err("Cache Directory Card Information:\n"); ipr_log_ext_vpd(&error->ioa_vpd); ipr_err("Adapter Card Information:\n"); ipr_log_ext_vpd(&error->cfc_vpd); ipr_err("-----Expected Configuration-----\n"); ipr_err("Cache Directory Card Information:\n"); ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd); ipr_err("Adapter Card Information:\n"); ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd); ipr_err("Additional IOA Data: %08X %08X %08X\n", be32_to_cpu(error->ioa_data[0]), be32_to_cpu(error->ioa_data[1]), be32_to_cpu(error->ioa_data[2])); } /** * ipr_log_cache_error - Log a cache error. * @ioa_cfg: ioa config struct * @hostrcb: hostrcb struct * * Return value: * none **/ static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg, struct ipr_hostrcb *hostrcb) { struct ipr_hostrcb_type_02_error *error = &hostrcb->hcam.u.error.u.type_02_error; ipr_err("-----Current Configuration-----\n"); ipr_err("Cache Directory Card Information:\n"); ipr_log_vpd(&error->ioa_vpd); ipr_err("Adapter Card Information:\n"); ipr_log_vpd(&error->cfc_vpd); ipr_err("-----Expected Configuration-----\n"); ipr_err("Cache Directory Card Information:\n"); ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd); ipr_err("Adapter Card Information:\n"); ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd); ipr_err("Additional IOA Data: %08X %08X %08X\n", be32_to_cpu(error->ioa_data[0]), be32_to_cpu(error->ioa_data[1]), be32_to_cpu(error->ioa_data[2])); } /** * ipr_log_enhanced_config_error - Log a configuration error. * @ioa_cfg: ioa config struct * @hostrcb: hostrcb struct * * Return value: * none **/ static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg, struct ipr_hostrcb *hostrcb) { int errors_logged, i; struct ipr_hostrcb_device_data_entry_enhanced *dev_entry; struct ipr_hostrcb_type_13_error *error; error = &hostrcb->hcam.u.error.u.type_13_error; errors_logged = be32_to_cpu(error->errors_logged); ipr_err("Device Errors Detected/Logged: %d/%d\n", be32_to_cpu(error->errors_detected), errors_logged); dev_entry = error->dev; for (i = 0; i < errors_logged; i++, dev_entry++) { ipr_err_separator; ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); ipr_log_ext_vpd(&dev_entry->vpd); ipr_err("-----New Device Information-----\n"); ipr_log_ext_vpd(&dev_entry->new_vpd); ipr_err("Cache Directory Card Information:\n"); ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); ipr_err("Adapter Card Information:\n"); ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); } } /** * ipr_log_sis64_config_error - Log a device error. * @ioa_cfg: ioa config struct * @hostrcb: hostrcb struct * * Return value: * none **/ static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg, struct ipr_hostrcb *hostrcb) { int errors_logged, i; struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry; struct ipr_hostrcb_type_23_error *error; char buffer[IPR_MAX_RES_PATH_LENGTH]; error = &hostrcb->hcam.u.error64.u.type_23_error; errors_logged = be32_to_cpu(error->errors_logged); ipr_err("Device Errors Detected/Logged: %d/%d\n", be32_to_cpu(error->errors_detected), errors_logged); dev_entry = error->dev; for (i = 0; i < errors_logged; i++, dev_entry++) { ipr_err_separator; ipr_err("Device %d : %s", i + 1, __ipr_format_res_path(dev_entry->res_path, buffer, sizeof(buffer))); ipr_log_ext_vpd(&dev_entry->vpd); ipr_err("-----New Device Information-----\n"); ipr_log_ext_vpd(&dev_entry->new_vpd); ipr_err("Cache Directory Card Information:\n"); ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); ipr_err("Adapter Card Information:\n"); ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); } } /** * ipr_log_config_error - Log a configuration error. * @ioa_cfg: ioa config struct * @hostrcb: hostrcb struct * * Return value: * none **/ static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg, struct ipr_hostrcb *hostrcb) { int errors_logged, i; struct ipr_hostrcb_device_data_entry *dev_entry; struct ipr_hostrcb_type_03_error *error; error = &hostrcb->hcam.u.error.u.type_03_error; errors_logged = be32_to_cpu(error->errors_logged); ipr_err("Device Errors Detected/Logged: %d/%d\n", be32_to_cpu(error->errors_detected), errors_logged); dev_entry = error->dev; for (i = 0; i < errors_logged; i++, dev_entry++) { ipr_err_separator; ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); ipr_log_vpd(&dev_entry->vpd); ipr_err("-----New Device Information-----\n"); ipr_log_vpd(&dev_entry->new_vpd); ipr_err("Cache Directory Card Information:\n"); ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd); ipr_err("Adapter Card Information:\n"); ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd); ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n", be32_to_cpu(dev_entry->ioa_data[0]), be32_to_cpu(dev_entry->ioa_data[1]), be32_to_cpu(dev_entry->ioa_data[2]), be32_to_cpu(dev_entry->ioa_data[3]), be32_to_cpu(dev_entry->ioa_data[4])); } } /** * ipr_log_enhanced_array_error - Log an array configuration error. * @ioa_cfg: ioa config struct * @hostrcb: hostrcb struct * * Return value: * none **/ static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg, struct ipr_hostrcb *hostrcb) { int i, num_entries; struct ipr_hostrcb_type_14_error *error; struct ipr_hostrcb_array_data_entry_enhanced *array_entry; const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; error = &hostrcb->hcam.u.error.u.type_14_error; ipr_err_separator; ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n", error->protection_level, ioa_cfg->host->host_no, error->last_func_vset_res_addr.bus, error->last_func_vset_res_addr.target, error->last_func_vset_res_addr.lun); ipr_err_separator; array_entry = error->array_member; num_entries = min_t(u32, be32_to_cpu(error->num_entries), ARRAY_SIZE(error->array_member)); for (i = 0; i < num_entries; i++, array_entry++) { if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) continue; if (be32_to_cpu(error->exposed_mode_adn) == i) ipr_err("Exposed Array Member %d:\n", i); else ipr_err("Array Member %d:\n", i); ipr_log_ext_vpd(&array_entry->vpd); ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, "Expected Location"); ipr_err_separator; } } /** * ipr_log_array_error - Log an array configuration error. * @ioa_cfg: ioa config struct * @hostrcb: hostrcb struct * * Return value: * none **/ static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg, struct ipr_hostrcb *hostrcb) { int i; struct ipr_hostrcb_type_04_error *error; struct ipr_hostrcb_array_data_entry *array_entry; const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; error = &hostrcb->hcam.u.error.u.type_04_error; ipr_err_separator; ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n", error->protection_level, ioa_cfg->host->host_no, error->last_func_vset_res_addr.bus, error->last_func_vset_res_addr.target, error->last_func_vset_res_addr.lun); ipr_err_separator; array_entry = error->array_member; for (i = 0; i < 18; i++) { if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) continue; if (be32_to_cpu(error->exposed_mode_adn) == i) ipr_err("Exposed Array Member %d:\n", i); else ipr_err("Array Member %d:\n", i); ipr_log_vpd(&array_entry->vpd); ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, "Expected Location"); ipr_err_separator; if (i == 9) array_entry = error->array_member2; else array_entry++; } } /** * ipr_log_hex_data - Log additional hex IOA error data. * @ioa_cfg: ioa config struct * @data: IOA error data * @len: data length * * Return value: * none **/ static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len) { int i; if (len == 0) return; if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP); for (i = 0; i < len / 4; i += 4) { ipr_err("%08X: %08X %08X %08X %08X\n", i*4, be32_to_cpu(data[i]), be32_to_cpu(data[i+1]), be32_to_cpu(data[i+2]), be32_to_cpu(data[i+3])); } } /** * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error. * @ioa_cfg: ioa config struct * @hostrcb: hostrcb struct * * Return value: * none **/ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, struct ipr_hostrcb *hostrcb) { struct ipr_hostrcb_type_17_error *error; if (ioa_cfg->sis64) error = &hostrcb->hcam.u.error64.u.type_17_error; else error = &hostrcb->hcam.u.error.u.type_17_error; error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; strim(error->failure_reason); ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, be32_to_cpu(hostrcb->hcam.u.error.prc)); ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd); ipr_log_hex_data(ioa_cfg, error->data, be32_to_cpu(hostrcb->hcam.length) - (offsetof(struct ipr_hostrcb_error, u) + offsetof(struct ipr_hostrcb_type_17_error, data))); } /** * ipr_log_dual_ioa_error - Log a dual adapter error. * @ioa_cfg: ioa config struct * @hostrcb: hostrcb struct * * Return value: * none **/ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, struct ipr_hostrcb *hostrcb) { struct ipr_hostrcb_type_07_error *error; error = &hostrcb->hcam.u.error.u.type_07_error; error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; strim(error->failure_reason); ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, be32_to_cpu(hostrcb->hcam.u.error.prc)); ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd); ipr_log_hex_data(ioa_cfg, error->data, be32_to_cpu(hostrcb->hcam.length) - (offsetof(struct ipr_hostrcb_error, u) + offsetof(struct ipr_hostrcb_type_07_error, data))); } static const struct { u8 active; char *desc; } path_active_desc[] = { { IPR_PATH_NO_INFO, "Path" }, { IPR_PATH_ACTIVE, "Active path" }, { IPR_PATH_NOT_ACTIVE, "Inactive path" } }; static const struct { u8 state; char *desc; } path_state_desc[] = { { IPR_PATH_STATE_NO_INFO, "has no path state information available" }, { IPR_PATH_HEALTHY, "is healthy" }, { IPR_PATH_DEGRADED, "is degraded" }, { IPR_PATH_FAILED, "is failed" } }; /** * ipr_log_fabric_path - Log a fabric path error * @hostrcb: hostrcb struct * @fabric: fabric descriptor * * Return value: * none **/ static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb, struct ipr_hostrcb_fabric_desc *fabric) { int i, j; u8 path_state = fabric->path_state; u8 active = path_state & IPR_PATH_ACTIVE_MASK; u8 state = path_state & IPR_PATH_STATE_MASK; for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) { if (path_active_desc[i].active != active) continue; for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) { if (path_state_desc[j].state != state) continue; if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) { ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n", path_active_desc[i].desc, path_state_desc[j].desc, fabric->ioa_port); } else if (fabric->cascaded_expander == 0xff) { ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n", path_active_desc[i].desc, path_state_desc[j].desc, fabric->ioa_port, fabric->phy); } else if (fabric->phy == 0xff) { ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n", path_active_desc[i].desc, path_state_desc[j].desc, fabric->ioa_port, fabric->cascaded_expander); } else { ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n", path_active_desc[i].desc, path_state_desc[j].desc, fabric->ioa_port, fabric->cascaded_expander, fabric->phy); } return; } } ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state, fabric->ioa_port, fabric->cascaded_expander, fabric->phy); } /** * ipr_log64_fabric_path - Log a fabric path error * @hostrcb: hostrcb struct * @fabric: fabric descriptor * * Return value: * none **/ static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb, struct ipr_hostrcb64_fabric_desc *fabric) { int i, j; u8 path_state = fabric->path_state; u8 active = path_state & IPR_PATH_ACTIVE_MASK; u8 state = path_state & IPR_PATH_STATE_MASK; char buffer[IPR_MAX_RES_PATH_LENGTH]; for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) { if (path_active_desc[i].active != active) continue; for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) { if (path_state_desc[j].state != state) continue; ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n", path_active_desc[i].desc, path_state_desc[j].desc, ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path, buffer, sizeof(buffer))); return; } } ipr_err("Path state=%02X Resource Path=%s\n", path_state, ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path, buffer, sizeof(buffer))); } static const struct { u8 type; char *desc; } path_type_desc[] = { { IPR_PATH_CFG_IOA_PORT, "IOA port" }, { IPR_PATH_CFG_EXP_PORT, "Expander port" }, { IPR_PATH_CFG_DEVICE_PORT, "Device port" }, { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" } }; static const struct { u8 status; char *desc; } path_status_desc[] = { { IPR_PATH_CFG_NO_PROB, "Functional" }, { IPR_PATH_CFG_DEGRADED, "Degraded" }, { IPR_PATH_CFG_FAILED, "Failed" }, { IPR_PATH_CFG_SUSPECT, "Suspect" }, { IPR_PATH_NOT_DETECTED, "Missing" }, { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" } }; static const char *link_rate[] = { "unknown", "disabled", "phy reset problem", "spinup hold", "port selector", "unknown", "unknown", "unknown", "1.5Gbps", "3.0Gbps", "unknown", "unknown", "unknown", "unknown", "unknown", "unknown" }; /** * ipr_log_path_elem - Log a fabric path element. * @hostrcb: hostrcb struct * @cfg: fabric path element struct * * Return value: * none **/ static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb, struct ipr_hostrcb_config_element *cfg) { int i, j; u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; if (type == IPR_PATH_CFG_NOT_EXIST) return; for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) { if (path_type_desc[i].type != type) continue; for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) { if (path_status_desc[j].status != status) continue; if (type == IPR_PATH_CFG_IOA_PORT) { ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n", path_status_desc[j].desc, path_type_desc[i].desc, cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); } else { if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) { ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n", path_status_desc[j].desc, path_type_desc[i].desc, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); } else if (cfg->cascaded_expander == 0xff) { ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, " "WWN=%08X%08X\n", path_status_desc[j].desc, path_type_desc[i].desc, cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); } else if (cfg->phy == 0xff) { ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, " "WWN=%08X%08X\n", path_status_desc[j].desc, path_type_desc[i].desc, cfg->cascaded_expander, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); } else { ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s " "WWN=%08X%08X\n", path_status_desc[j].desc, path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); } } return; } } ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s " "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); } /** * ipr_log64_path_elem - Log a fabric path element. * @hostrcb: hostrcb struct * @cfg: fabric path element struct * * Return value: * none **/ static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb, struct ipr_hostrcb64_config_element *cfg) { int i, j; u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK; u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; char buffer[IPR_MAX_RES_PATH_LENGTH]; if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64) return; for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) { if (path_type_desc[i].type != type) continue; for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) { if (path_status_desc[j].status != status) continue; ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n", path_status_desc[j].desc, path_type_desc[i].desc, ipr_format_res_path(hostrcb->ioa_cfg, cfg->res_path, buffer, sizeof(buffer)), link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); return; } } ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s " "WWN=%08X%08X\n", cfg->type_status, ipr_format_res_path(hostrcb->ioa_cfg, cfg->res_path, buffer, sizeof(buffer)), link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); } /** * ipr_log_fabric_error - Log a fabric error. * @ioa_cfg: ioa config struct * @hostrcb: hostrcb struct * * Return value: * none **/ static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg, struct ipr_hostrcb *hostrcb) { struct ipr_hostrcb_type_20_error *error; struct ipr_hostrcb_fabric_desc *fabric; struct ipr_hostrcb_config_element *cfg; int i, add_len; error = &hostrcb->hcam.u.error.u.type_20_error; error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); add_len = be32_to_cpu(hostrcb->hcam.length) - (offsetof(struct ipr_hostrcb_error, u) + offsetof(struct ipr_hostrcb_type_20_error, desc)); for (i = 0, fabric = error->desc; i < error->num_entries; i++) { ipr_log_fabric_path(hostrcb, fabric); for_each_fabric_cfg(fabric, cfg) ipr_log_path_elem(hostrcb, cfg); add_len -= be16_to_cpu(fabric->length); fabric = (struct ipr_hostrcb_fabric_desc *) ((unsigned long)fabric + be16_to_cpu(fabric->length)); } ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len); } /** * ipr_log_sis64_array_error - Log a sis64 array error. * @ioa_cfg: ioa config struct * @hostrcb: hostrcb struct * * Return value: * none **/ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg, struct ipr_hostrcb *hostrcb) { int i, num_entries; struct ipr_hostrcb_type_24_error *error; struct ipr_hostrcb64_array_data_entry *array_entry; char buffer[IPR_MAX_RES_PATH_LENGTH]; const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; error = &hostrcb->hcam.u.error64.u.type_24_error; ipr_err_separator; ipr_err("RAID %s Array Configuration: %s\n", error->protection_level, ipr_format_res_path(ioa_cfg, error->last_res_path, buffer, sizeof(buffer))); ipr_err_separator; array_entry = error->array_member; num_entries = min_t(u32, error->num_entries, ARRAY_SIZE(error->array_member)); for (i = 0; i < num_entries; i++, array_entry++) { if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) continue; if (error->exposed_mode_adn == i) ipr_err("Exposed Array Member %d:\n", i); else ipr_err("Array Member %d:\n", i); ipr_err("Array Member %d:\n", i); ipr_log_ext_vpd(&array_entry->vpd); ipr_err("Current Location: %s\n", ipr_format_res_path(ioa_cfg, array_entry->res_path, buffer, sizeof(buffer))); ipr_err("Expected Location: %s\n", ipr_format_res_path(ioa_cfg, array_entry->expected_res_path, buffer, sizeof(buffer))); ipr_err_separator; } } /** * ipr_log_sis64_fabric_error - Log a sis64 fabric error. * @ioa_cfg: ioa config struct * @hostrcb: hostrcb struct * * Return value: * none **/ static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg, struct ipr_hostrcb *hostrcb) { struct ipr_hostrcb_type_30_error *error; struct ipr_hostrcb64_fabric_desc *fabric; struct ipr_hostrcb64_config_element *cfg; int i, add_len; error = &hostrcb->hcam.u.error64.u.type_30_error; error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); add_len = be32_to_cpu(hostrcb->hcam.length) - (offsetof(struct ipr_hostrcb64_error, u) + offsetof(struct ipr_hostrcb_type_30_error, desc)); for (i = 0, fabric = error->desc; i < error->num_entries; i++) { ipr_log64_fabric_path(hostrcb, fabric); for_each_fabric_cfg(fabric, cfg) ipr_log64_path_elem(hostrcb, cfg); add_len -= be16_to_cpu(fabric->length); fabric = (struct ipr_hostrcb64_fabric_desc *) ((unsigned long)fabric + be16_to_cpu(fabric->length)); } ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len); } /** * ipr_log_sis64_service_required_error - Log a sis64 service required error. * @ioa_cfg: ioa config struct * @hostrcb: hostrcb struct * * Return value: * none **/ static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg, struct ipr_hostrcb *hostrcb) { struct ipr_hostrcb_type_41_error *error; error = &hostrcb->hcam.u.error64.u.type_41_error; error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; ipr_err("Primary Failure Reason: %s\n", error->failure_reason); ipr_log_hex_data(ioa_cfg, error->data, be32_to_cpu(hostrcb->hcam.length) - (offsetof(struct ipr_hostrcb_error, u) + offsetof(struct ipr_hostrcb_type_41_error, data))); } /** * ipr_log_generic_error - Log an adapter error. * @ioa_cfg: ioa config struct * @hostrcb: hostrcb struct * * Return value: * none **/ static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, struct ipr_hostrcb *hostrcb) { ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data, be32_to_cpu(hostrcb->hcam.length)); } /** * ipr_log_sis64_device_error - Log a cache error. * @ioa_cfg: ioa config struct * @hostrcb: hostrcb struct * * Return value: * none **/ static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg, struct ipr_hostrcb *hostrcb) { struct ipr_hostrcb_type_21_error *error; char buffer[IPR_MAX_RES_PATH_LENGTH]; error = &hostrcb->hcam.u.error64.u.type_21_error; ipr_err("-----Failing Device Information-----\n"); ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n", be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]), be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3])); ipr_err("Device Resource Path: %s\n", __ipr_format_res_path(error->res_path, buffer, sizeof(buffer))); error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0'; error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0'; ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc); ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc); ipr_err("SCSI Sense Data:\n"); ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data)); ipr_err("SCSI Command Descriptor Block: \n"); ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb)); ipr_err("Additional IOA Data:\n"); ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error)); } /** * ipr_get_error - Find the specfied IOASC in the ipr_error_table. * @ioasc: IOASC * * This function will return the index of into the ipr_error_table * for the specified IOASC. If the IOASC is not in the table, * 0 will be returned, which points to the entry used for unknown errors. * * Return value: * index into the ipr_error_table **/ static u32 ipr_get_error(u32 ioasc) { int i; for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++) if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK)) return i; return 0; } /** * ipr_handle_log_data - Log an adapter error. * @ioa_cfg: ioa config struct * @hostrcb: hostrcb struct * * This function logs an adapter error to the system. * * Return value: * none **/ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, struct ipr_hostrcb *hostrcb) { u32 ioasc; int error_index; struct ipr_hostrcb_type_21_error *error; if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY) return; if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST) dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); if (ioa_cfg->sis64) ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); else ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) { /* Tell the midlayer we had a bus reset so it will handle the UA properly */ scsi_report_bus_reset(ioa_cfg->host, hostrcb->hcam.u.error.fd_res_addr.bus); } error_index = ipr_get_error(ioasc); if (!ipr_error_table[error_index].log_hcam) return; if (ioasc == IPR_IOASC_HW_CMD_FAILED && hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) { error = &hostrcb->hcam.u.error64.u.type_21_error; if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST && ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) return; } ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error); /* Set indication we have logged an error */ ioa_cfg->errors_logged++; if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam) return; if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw)) hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw)); switch (hostrcb->hcam.overlay_id) { case IPR_HOST_RCB_OVERLAY_ID_2: ipr_log_cache_error(ioa_cfg, hostrcb); break; case IPR_HOST_RCB_OVERLAY_ID_3: ipr_log_config_error(ioa_cfg, hostrcb); break; case IPR_HOST_RCB_OVERLAY_ID_4: case IPR_HOST_RCB_OVERLAY_ID_6: ipr_log_array_error(ioa_cfg, hostrcb); break; case IPR_HOST_RCB_OVERLAY_ID_7: ipr_log_dual_ioa_error(ioa_cfg, hostrcb); break; case IPR_HOST_RCB_OVERLAY_ID_12: ipr_log_enhanced_cache_error(ioa_cfg, hostrcb); break; case IPR_HOST_RCB_OVERLAY_ID_13: ipr_log_enhanced_config_error(ioa_cfg, hostrcb); break; case IPR_HOST_RCB_OVERLAY_ID_14: case IPR_HOST_RCB_OVERLAY_ID_16: ipr_log_enhanced_array_error(ioa_cfg, hostrcb); break; case IPR_HOST_RCB_OVERLAY_ID_17: ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb); break; case IPR_HOST_RCB_OVERLAY_ID_20: ipr_log_fabric_error(ioa_cfg, hostrcb); break; case IPR_HOST_RCB_OVERLAY_ID_21: ipr_log_sis64_device_error(ioa_cfg, hostrcb); break; case IPR_HOST_RCB_OVERLAY_ID_23: ipr_log_sis64_config_error(ioa_cfg, hostrcb); break; case IPR_HOST_RCB_OVERLAY_ID_24: case IPR_HOST_RCB_OVERLAY_ID_26: ipr_log_sis64_array_error(ioa_cfg, hostrcb); break; case IPR_HOST_RCB_OVERLAY_ID_30: ipr_log_sis64_fabric_error(ioa_cfg, hostrcb); break; case IPR_HOST_RCB_OVERLAY_ID_41: ipr_log_sis64_service_required_error(ioa_cfg, hostrcb); break; case IPR_HOST_RCB_OVERLAY_ID_1: case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: default: ipr_log_generic_error(ioa_cfg, hostrcb); break; } } static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa) { struct ipr_hostrcb *hostrcb; hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q, struct ipr_hostrcb, queue); if (unlikely(!hostrcb)) { dev_info(&ioa->pdev->dev, "Reclaiming async error buffers."); hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q, struct ipr_hostrcb, queue); } list_del_init(&hostrcb->queue); return hostrcb; } /** * ipr_process_error - Op done function for an adapter error log. * @ipr_cmd: ipr command struct * * This function is the op done function for an error log host * controlled async from the adapter. It will log the error and * send the HCAM back to the adapter. * * Return value: * none **/ static void ipr_process_error(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); u32 fd_ioasc; if (ioa_cfg->sis64) fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); else fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); list_del_init(&hostrcb->queue); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); if (!ioasc) { ipr_handle_log_data(ioa_cfg, hostrcb); if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED) ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); } else if (ioasc != IPR_IOASC_IOA_WAS_RESET && ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) { dev_err(&ioa_cfg->pdev->dev, "Host RCB failed with IOASC: 0x%08X\n", ioasc); } list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q); schedule_work(&ioa_cfg->work_q); hostrcb = ipr_get_free_hostrcb(ioa_cfg); ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); } /** * ipr_timeout - An internally generated op has timed out. * @t: Timer context used to fetch ipr command struct * * This function blocks host requests and initiates an * adapter reset. * * Return value: * none **/ static void ipr_timeout(struct timer_list *t) { struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); unsigned long lock_flags = 0; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); ioa_cfg->errors_logged++; dev_err(&ioa_cfg->pdev->dev, "Adapter being reset due to command timeout.\n"); if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) ioa_cfg->sdt_state = GET_DUMP; if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); LEAVE; } /** * ipr_oper_timeout - Adapter timed out transitioning to operational * @t: Timer context used to fetch ipr command struct * * This function blocks host requests and initiates an * adapter reset. * * Return value: * none **/ static void ipr_oper_timeout(struct timer_list *t) { struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); unsigned long lock_flags = 0; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); ioa_cfg->errors_logged++; dev_err(&ioa_cfg->pdev->dev, "Adapter timed out transitioning to operational.\n"); if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) ioa_cfg->sdt_state = GET_DUMP; if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) { if (ipr_fastfail) ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); LEAVE; } /** * ipr_find_ses_entry - Find matching SES in SES table * @res: resource entry struct of SES * * Return value: * pointer to SES table entry / NULL on failure **/ static const struct ipr_ses_table_entry * ipr_find_ses_entry(struct ipr_resource_entry *res) { int i, j, matches; struct ipr_std_inq_vpids *vpids; const struct ipr_ses_table_entry *ste = ipr_ses_table; for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) { for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) { if (ste->compare_product_id_byte[j] == 'X') { vpids = &res->std_inq_data.vpids; if (vpids->product_id[j] == ste->product_id[j]) matches++; else break; } else matches++; } if (matches == IPR_PROD_ID_LEN) return ste; } return NULL; } /** * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus * @ioa_cfg: ioa config struct * @bus: SCSI bus * @bus_width: bus width * * Return value: * SCSI bus speed in units of 100KHz, 1600 is 160 MHz * For a 2-byte wide SCSI bus, the maximum transfer speed is * twice the maximum transfer rate (e.g. for a wide enabled bus, * max 160MHz = max 320MB/sec). **/ static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width) { struct ipr_resource_entry *res; const struct ipr_ses_table_entry *ste; u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width); /* Loop through each config table entry in the config table buffer */ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { if (!(IPR_IS_SES_DEVICE(res->std_inq_data))) continue; if (bus != res->bus) continue; if (!(ste = ipr_find_ses_entry(res))) continue; max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8); } return max_xfer_rate; } /** * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA * @ioa_cfg: ioa config struct * @max_delay: max delay in micro-seconds to wait * * Waits for an IODEBUG ACK from the IOA, doing busy looping. * * Return value: * 0 on success / other on failure **/ static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay) { volatile u32 pcii_reg; int delay = 1; /* Read interrupt reg until IOA signals IO Debug Acknowledge */ while (delay < max_delay) { pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg); if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE) return 0; /* udelay cannot be used if delay is more than a few milliseconds */ if ((delay / 1000) > MAX_UDELAY_MS) mdelay(delay / 1000); else udelay(delay); delay += delay; } return -EIO; } /** * ipr_get_sis64_dump_data_section - Dump IOA memory * @ioa_cfg: ioa config struct * @start_addr: adapter address to dump * @dest: destination kernel buffer * @length_in_words: length to dump in 4 byte words * * Return value: * 0 on success **/ static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg, u32 start_addr, __be32 *dest, u32 length_in_words) { int i; for (i = 0; i < length_in_words; i++) { writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg); *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg)); dest++; } return 0; } /** * ipr_get_ldump_data_section - Dump IOA memory * @ioa_cfg: ioa config struct * @start_addr: adapter address to dump * @dest: destination kernel buffer * @length_in_words: length to dump in 4 byte words * * Return value: * 0 on success / -EIO on failure **/ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg, u32 start_addr, __be32 *dest, u32 length_in_words) { volatile u32 temp_pcii_reg; int i, delay = 0; if (ioa_cfg->sis64) return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr, dest, length_in_words); /* Write IOA interrupt reg starting LDUMP state */ writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT), ioa_cfg->regs.set_uproc_interrupt_reg32); /* Wait for IO debug acknowledge */ if (ipr_wait_iodbg_ack(ioa_cfg, IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) { dev_err(&ioa_cfg->pdev->dev, "IOA dump long data transfer timeout\n"); return -EIO; } /* Signal LDUMP interlocked - clear IO debug ack */ writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_reg); /* Write Mailbox with starting address */ writel(start_addr, ioa_cfg->ioa_mailbox); /* Signal address valid - clear IOA Reset alert */ writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.clr_uproc_interrupt_reg32); for (i = 0; i < length_in_words; i++) { /* Wait for IO debug acknowledge */ if (ipr_wait_iodbg_ack(ioa_cfg, IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) { dev_err(&ioa_cfg->pdev->dev, "IOA dump short data transfer timeout\n"); return -EIO; } /* Read data from mailbox and increment destination pointer */ *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox)); dest++; /* For all but the last word of data, signal data received */ if (i < (length_in_words - 1)) { /* Signal dump data received - Clear IO debug Ack */ writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_reg); } } /* Signal end of block transfer. Set reset alert then clear IO debug ack */ writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32); writel(IPR_UPROCI_IO_DEBUG_ALERT, ioa_cfg->regs.clr_uproc_interrupt_reg32); /* Signal dump data received - Clear IO debug Ack */ writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_reg); /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */ while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) { temp_pcii_reg = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT)) return 0; udelay(10); delay += 10; } return 0; } #ifdef CONFIG_SCSI_IPR_DUMP /** * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer * @ioa_cfg: ioa config struct * @pci_address: adapter address * @length: length of data to copy * * Copy data from PCI adapter to kernel buffer. * Note: length MUST be a 4 byte multiple * Return value: * 0 on success / other on failure **/ static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg, unsigned long pci_address, u32 length) { int bytes_copied = 0; int cur_len, rc, rem_len, rem_page_len, max_dump_size; __be32 *page; unsigned long lock_flags = 0; struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; if (ioa_cfg->sis64) max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; else max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; while (bytes_copied < length && (ioa_dump->hdr.len + bytes_copied) < max_dump_size) { if (ioa_dump->page_offset >= PAGE_SIZE || ioa_dump->page_offset == 0) { page = (__be32 *)__get_free_page(GFP_ATOMIC); if (!page) { ipr_trace; return bytes_copied; } ioa_dump->page_offset = 0; ioa_dump->ioa_data[ioa_dump->next_page_index] = page; ioa_dump->next_page_index++; } else page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1]; rem_len = length - bytes_copied; rem_page_len = PAGE_SIZE - ioa_dump->page_offset; cur_len = min(rem_len, rem_page_len); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); if (ioa_cfg->sdt_state == ABORT_DUMP) { rc = -EIO; } else { rc = ipr_get_ldump_data_section(ioa_cfg, pci_address + bytes_copied, &page[ioa_dump->page_offset / 4], (cur_len / sizeof(u32))); } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); if (!rc) { ioa_dump->page_offset += cur_len; bytes_copied += cur_len; } else { ipr_trace; break; } schedule(); } return bytes_copied; } /** * ipr_init_dump_entry_hdr - Initialize a dump entry header. * @hdr: dump entry header struct * * Return value: * nothing **/ static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr) { hdr->eye_catcher = IPR_DUMP_EYE_CATCHER; hdr->num_elems = 1; hdr->offset = sizeof(*hdr); hdr->status = IPR_DUMP_STATUS_SUCCESS; } /** * ipr_dump_ioa_type_data - Fill in the adapter type in the dump. * @ioa_cfg: ioa config struct * @driver_dump: driver dump struct * * Return value: * nothing **/ static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg, struct ipr_driver_dump *driver_dump) { struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr); driver_dump->ioa_type_entry.hdr.len = sizeof(struct ipr_dump_ioa_type_entry) - sizeof(struct ipr_dump_entry_header); driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID; driver_dump->ioa_type_entry.type = ioa_cfg->type; driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) | (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) | ucode_vpd->minor_release[1]; driver_dump->hdr.num_entries++; } /** * ipr_dump_version_data - Fill in the driver version in the dump. * @ioa_cfg: ioa config struct * @driver_dump: driver dump struct * * Return value: * nothing **/ static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg, struct ipr_driver_dump *driver_dump) { ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr); driver_dump->version_entry.hdr.len = sizeof(struct ipr_dump_version_entry) - sizeof(struct ipr_dump_entry_header); driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID; strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION); driver_dump->hdr.num_entries++; } /** * ipr_dump_trace_data - Fill in the IOA trace in the dump. * @ioa_cfg: ioa config struct * @driver_dump: driver dump struct * * Return value: * nothing **/ static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg, struct ipr_driver_dump *driver_dump) { ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr); driver_dump->trace_entry.hdr.len = sizeof(struct ipr_dump_trace_entry) - sizeof(struct ipr_dump_entry_header); driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID; memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE); driver_dump->hdr.num_entries++; } /** * ipr_dump_location_data - Fill in the IOA location in the dump. * @ioa_cfg: ioa config struct * @driver_dump: driver dump struct * * Return value: * nothing **/ static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg, struct ipr_driver_dump *driver_dump) { ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr); driver_dump->location_entry.hdr.len = sizeof(struct ipr_dump_location_entry) - sizeof(struct ipr_dump_entry_header); driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID; strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev)); driver_dump->hdr.num_entries++; } /** * ipr_get_ioa_dump - Perform a dump of the driver and adapter. * @ioa_cfg: ioa config struct * @dump: dump struct * * Return value: * nothing **/ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) { unsigned long start_addr, sdt_word; unsigned long lock_flags = 0; struct ipr_driver_dump *driver_dump = &dump->driver_dump; struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; u32 num_entries, max_num_entries, start_off, end_off; u32 max_dump_size, bytes_to_copy, bytes_copied, rc; struct ipr_sdt *sdt; int valid = 1; int i; ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); if (ioa_cfg->sdt_state != READ_DUMP) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return; } if (ioa_cfg->sis64) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); ssleep(IPR_DUMP_DELAY_SECONDS); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); } start_addr = readl(ioa_cfg->ioa_mailbox); if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { dev_err(&ioa_cfg->pdev->dev, "Invalid dump table format: %lx\n", start_addr); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return; } dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n"); driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER; /* Initialize the overall dump header */ driver_dump->hdr.len = sizeof(struct ipr_driver_dump); driver_dump->hdr.num_entries = 1; driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header); driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS; driver_dump->hdr.os = IPR_DUMP_OS_LINUX; driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME; ipr_dump_version_data(ioa_cfg, driver_dump); ipr_dump_location_data(ioa_cfg, driver_dump); ipr_dump_ioa_type_data(ioa_cfg, driver_dump); ipr_dump_trace_data(ioa_cfg, driver_dump); /* Update dump_header */ driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header); /* IOA Dump entry */ ipr_init_dump_entry_hdr(&ioa_dump->hdr); ioa_dump->hdr.len = 0; ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID; /* First entries in sdt are actually a list of dump addresses and lengths to gather the real dump data. sdt represents the pointer to the ioa generated dump table. Dump data will be extracted based on entries in this table */ sdt = &ioa_dump->sdt; if (ioa_cfg->sis64) { max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES; max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; } else { max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES; max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; } bytes_to_copy = offsetof(struct ipr_sdt, entry) + (max_num_entries * sizeof(struct ipr_sdt_entry)); rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt, bytes_to_copy / sizeof(__be32)); /* Smart Dump table is ready to use and the first entry is valid */ if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { dev_err(&ioa_cfg->pdev->dev, "Dump of IOA failed. Dump table not valid: %d, %X.\n", rc, be32_to_cpu(sdt->hdr.state)); driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED; ioa_cfg->sdt_state = DUMP_OBTAINED; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return; } num_entries = be32_to_cpu(sdt->hdr.num_entries_used); if (num_entries > max_num_entries) num_entries = max_num_entries; /* Update dump length to the actual data to be copied */ dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header); if (ioa_cfg->sis64) dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry); else dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); for (i = 0; i < num_entries; i++) { if (ioa_dump->hdr.len > max_dump_size) { driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; break; } if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) { sdt_word = be32_to_cpu(sdt->entry[i].start_token); if (ioa_cfg->sis64) bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token); else { start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK; end_off = be32_to_cpu(sdt->entry[i].end_token); if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) bytes_to_copy = end_off - start_off; else valid = 0; } if (valid) { if (bytes_to_copy > max_dump_size) { sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; continue; } /* Copy data from adapter to driver buffers */ bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word, bytes_to_copy); ioa_dump->hdr.len += bytes_copied; if (bytes_copied != bytes_to_copy) { driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; break; } } } } dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n"); /* Update dump_header */ driver_dump->hdr.len += ioa_dump->hdr.len; wmb(); ioa_cfg->sdt_state = DUMP_OBTAINED; LEAVE; } #else #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0) #endif /** * ipr_release_dump - Free adapter dump memory * @kref: kref struct * * Return value: * nothing **/ static void ipr_release_dump(struct kref *kref) { struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref); struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; unsigned long lock_flags = 0; int i; ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); ioa_cfg->dump = NULL; ioa_cfg->sdt_state = INACTIVE; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); for (i = 0; i < dump->ioa_dump.next_page_index; i++) free_page((unsigned long) dump->ioa_dump.ioa_data[i]); vfree(dump->ioa_dump.ioa_data); kfree(dump); LEAVE; } static void ipr_add_remove_thread(struct work_struct *work) { unsigned long lock_flags; struct ipr_resource_entry *res; struct scsi_device *sdev; struct ipr_ioa_cfg *ioa_cfg = container_of(work, struct ipr_ioa_cfg, scsi_add_work_q); u8 bus, target, lun; int did_work; ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); restart: do { did_work = 0; if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return; } list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { if (res->del_from_ml && res->sdev) { did_work = 1; sdev = res->sdev; if (!scsi_device_get(sdev)) { if (!res->add_to_ml) list_move_tail(&res->queue, &ioa_cfg->free_res_q); else res->del_from_ml = 0; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); scsi_remove_device(sdev); scsi_device_put(sdev); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); } break; } } } while (did_work); list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { if (res->add_to_ml) { bus = res->bus; target = res->target; lun = res->lun; res->add_to_ml = 0; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); scsi_add_device(ioa_cfg->host, bus, target, lun); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); goto restart; } } ioa_cfg->scan_done = 1; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); LEAVE; } /** * ipr_worker_thread - Worker thread * @work: ioa config struct * * Called at task level from a work thread. This function takes care * of adding and removing device from the mid-layer as configuration * changes are detected by the adapter. * * Return value: * nothing **/ static void ipr_worker_thread(struct work_struct *work) { unsigned long lock_flags; struct ipr_dump *dump; struct ipr_ioa_cfg *ioa_cfg = container_of(work, struct ipr_ioa_cfg, work_q); ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); if (ioa_cfg->sdt_state == READ_DUMP) { dump = ioa_cfg->dump; if (!dump) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return; } kref_get(&dump->kref); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); ipr_get_ioa_dump(ioa_cfg, dump); kref_put(&dump->kref, ipr_release_dump); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout) ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return; } if (ioa_cfg->scsi_unblock) { ioa_cfg->scsi_unblock = 0; ioa_cfg->scsi_blocked = 0; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); scsi_unblock_requests(ioa_cfg->host); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); if (ioa_cfg->scsi_blocked) scsi_block_requests(ioa_cfg->host); } if (!ioa_cfg->scan_enabled) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return; } schedule_work(&ioa_cfg->scsi_add_work_q); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); LEAVE; } #ifdef CONFIG_SCSI_IPR_TRACE /** * ipr_read_trace - Dump the adapter trace * @filp: open sysfs file * @kobj: kobject struct * @bin_attr: bin_attribute struct * @buf: buffer * @off: offset * @count: buffer size * * Return value: * number of bytes printed to buffer **/ static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj); struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; unsigned long lock_flags = 0; ssize_t ret; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace, IPR_TRACE_SIZE); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return ret; } static struct bin_attribute ipr_trace_attr = { .attr = { .name = "trace", .mode = S_IRUGO, }, .size = 0, .read = ipr_read_trace, }; #endif /** * ipr_show_fw_version - Show the firmware version * @dev: class device struct * @attr: device attribute (unused) * @buf: buffer * * Return value: * number of bytes printed to buffer **/ static ssize_t ipr_show_fw_version(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; unsigned long lock_flags = 0; int len; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n", ucode_vpd->major_release, ucode_vpd->card_type, ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return len; } static struct device_attribute ipr_fw_version_attr = { .attr = { .name = "fw_version", .mode = S_IRUGO, }, .show = ipr_show_fw_version, }; /** * ipr_show_log_level - Show the adapter's error logging level * @dev: class device struct * @attr: device attribute (unused) * @buf: buffer * * Return value: * number of bytes printed to buffer **/ static ssize_t ipr_show_log_level(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; unsigned long lock_flags = 0; int len; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return len; } /** * ipr_store_log_level - Change the adapter's error logging level * @dev: class device struct * @attr: device attribute (unused) * @buf: buffer * @count: buffer size * * Return value: * number of bytes printed to buffer **/ static ssize_t ipr_store_log_level(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; unsigned long lock_flags = 0; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); ioa_cfg->log_level = simple_strtoul(buf, NULL, 10); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return strlen(buf); } static struct device_attribute ipr_log_level_attr = { .attr = { .name = "log_level", .mode = S_IRUGO | S_IWUSR, }, .show = ipr_show_log_level, .store = ipr_store_log_level }; /** * ipr_store_diagnostics - IOA Diagnostics interface * @dev: device struct * @attr: device attribute (unused) * @buf: buffer * @count: buffer size * * This function will reset the adapter and wait a reasonable * amount of time for any errors that the adapter might log. * * Return value: * count on success / other on failure **/ static ssize_t ipr_store_diagnostics(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; unsigned long lock_flags = 0; int rc = count; if (!capable(CAP_SYS_ADMIN)) return -EACCES; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); while (ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); } ioa_cfg->errors_logged = 0; ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); if (ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); /* Wait for a second for any errors to be logged */ msleep(1000); } else { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return -EIO; } spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged) rc = -EIO; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return rc; } static struct device_attribute ipr_diagnostics_attr = { .attr = { .name = "run_diagnostics", .mode = S_IWUSR, }, .store = ipr_store_diagnostics }; /** * ipr_show_adapter_state - Show the adapter's state * @dev: device struct * @attr: device attribute (unused) * @buf: buffer * * Return value: * number of bytes printed to buffer **/ static ssize_t ipr_show_adapter_state(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; unsigned long lock_flags = 0; int len; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) len = snprintf(buf, PAGE_SIZE, "offline\n"); else len = snprintf(buf, PAGE_SIZE, "online\n"); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return len; } /** * ipr_store_adapter_state - Change adapter state * @dev: device struct * @attr: device attribute (unused) * @buf: buffer * @count: buffer size * * This function will change the adapter's state. * * Return value: * count on success / other on failure **/ static ssize_t ipr_store_adapter_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; unsigned long lock_flags; int result = count, i; if (!capable(CAP_SYS_ADMIN)) return -EACCES; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !strncmp(buf, "online", 6)) { for (i = 0; i < ioa_cfg->hrrq_num; i++) { spin_lock(&ioa_cfg->hrrq[i]._lock); ioa_cfg->hrrq[i].ioa_is_dead = 0; spin_unlock(&ioa_cfg->hrrq[i]._lock); } wmb(); ioa_cfg->reset_retries = 0; ioa_cfg->in_ioa_bringdown = 0; ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); return result; } static struct device_attribute ipr_ioa_state_attr = { .attr = { .name = "online_state", .mode = S_IRUGO | S_IWUSR, }, .show = ipr_show_adapter_state, .store = ipr_store_adapter_state }; /** * ipr_store_reset_adapter - Reset the adapter * @dev: device struct * @attr: device attribute (unused) * @buf: buffer * @count: buffer size * * This function will reset the adapter. * * Return value: * count on success / other on failure **/ static ssize_t ipr_store_reset_adapter(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; unsigned long lock_flags; int result = count; if (!capable(CAP_SYS_ADMIN)) return -EACCES; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); if (!ioa_cfg->in_reset_reload) ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); return result; } static struct device_attribute ipr_ioa_reset_attr = { .attr = { .name = "reset_host", .mode = S_IWUSR, }, .store = ipr_store_reset_adapter }; static int ipr_iopoll(struct irq_poll *iop, int budget); /** * ipr_show_iopoll_weight - Show ipr polling mode * @dev: class device struct * @attr: device attribute (unused) * @buf: buffer * * Return value: * number of bytes printed to buffer **/ static ssize_t ipr_show_iopoll_weight(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; unsigned long lock_flags = 0; int len; spin_lock_irqsave(shost->host_lock, lock_flags); len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight); spin_unlock_irqrestore(shost->host_lock, lock_flags); return len; } /** * ipr_store_iopoll_weight - Change the adapter's polling mode * @dev: class device struct * @attr: device attribute (unused) * @buf: buffer * @count: buffer size * * Return value: * number of bytes printed to buffer **/ static ssize_t ipr_store_iopoll_weight(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; unsigned long user_iopoll_weight; unsigned long lock_flags = 0; int i; if (!ioa_cfg->sis64) { dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n"); return -EINVAL; } if (kstrtoul(buf, 10, &user_iopoll_weight)) return -EINVAL; if (user_iopoll_weight > 256) { dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n"); return -EINVAL; } if (user_iopoll_weight == ioa_cfg->iopoll_weight) { dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n"); return strlen(buf); } if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { for (i = 1; i < ioa_cfg->hrrq_num; i++) irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); } spin_lock_irqsave(shost->host_lock, lock_flags); ioa_cfg->iopoll_weight = user_iopoll_weight; if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { for (i = 1; i < ioa_cfg->hrrq_num; i++) { irq_poll_init(&ioa_cfg->hrrq[i].iopoll, ioa_cfg->iopoll_weight, ipr_iopoll); } } spin_unlock_irqrestore(shost->host_lock, lock_flags); return strlen(buf); } static struct device_attribute ipr_iopoll_weight_attr = { .attr = { .name = "iopoll_weight", .mode = S_IRUGO | S_IWUSR, }, .show = ipr_show_iopoll_weight, .store = ipr_store_iopoll_weight }; /** * ipr_alloc_ucode_buffer - Allocates a microcode download buffer * @buf_len: buffer length * * Allocates a DMA'able buffer in chunks and assembles a scatter/gather * list to use for microcode download * * Return value: * pointer to sglist / NULL on failure **/ static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len) { int sg_size, order; struct ipr_sglist *sglist; /* Get the minimum size per scatter/gather element */ sg_size = buf_len / (IPR_MAX_SGLIST - 1); /* Get the actual size per element */ order = get_order(sg_size); /* Allocate a scatter/gather list for the DMA */ sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL); if (sglist == NULL) { ipr_trace; return NULL; } sglist->order = order; sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL, &sglist->num_sg); if (!sglist->scatterlist) { kfree(sglist); return NULL; } return sglist; } /** * ipr_free_ucode_buffer - Frees a microcode download buffer * @sglist: scatter/gather list pointer * * Free a DMA'able ucode download buffer previously allocated with * ipr_alloc_ucode_buffer * * Return value: * nothing **/ static void ipr_free_ucode_buffer(struct ipr_sglist *sglist) { sgl_free_order(sglist->scatterlist, sglist->order); kfree(sglist); } /** * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer * @sglist: scatter/gather list pointer * @buffer: buffer pointer * @len: buffer length * * Copy a microcode image from a user buffer into a buffer allocated by * ipr_alloc_ucode_buffer * * Return value: * 0 on success / other on failure **/ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist, u8 *buffer, u32 len) { int bsize_elem, i, result = 0; struct scatterlist *sg; /* Determine the actual number of bytes per element */ bsize_elem = PAGE_SIZE * (1 << sglist->order); sg = sglist->scatterlist; for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg), buffer += bsize_elem) { struct page *page = sg_page(sg); memcpy_to_page(page, 0, buffer, bsize_elem); sg->length = bsize_elem; if (result != 0) { ipr_trace; return result; } } if (len % bsize_elem) { struct page *page = sg_page(sg); memcpy_to_page(page, 0, buffer, len % bsize_elem); sg->length = len % bsize_elem; } sglist->buffer_len = len; return result; } /** * ipr_build_ucode_ioadl64 - Build a microcode download IOADL * @ipr_cmd: ipr command struct * @sglist: scatter/gather list * * Builds a microcode download IOA data list (IOADL). * **/ static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd, struct ipr_sglist *sglist) { struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; struct scatterlist *scatterlist = sglist->scatterlist; struct scatterlist *sg; int i; ipr_cmd->dma_use_sg = sglist->num_dma_sg; ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); ioarcb->ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) { ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE); ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg)); ioadl64[i].address = cpu_to_be64(sg_dma_address(sg)); } ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); } /** * ipr_build_ucode_ioadl - Build a microcode download IOADL * @ipr_cmd: ipr command struct * @sglist: scatter/gather list * * Builds a microcode download IOA data list (IOADL). * **/ static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd, struct ipr_sglist *sglist) { struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; struct scatterlist *scatterlist = sglist->scatterlist; struct scatterlist *sg; int i; ipr_cmd->dma_use_sg = sglist->num_dma_sg; ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); ioarcb->ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) { ioadl[i].flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg)); ioadl[i].address = cpu_to_be32(sg_dma_address(sg)); } ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); } /** * ipr_update_ioa_ucode - Update IOA's microcode * @ioa_cfg: ioa config struct * @sglist: scatter/gather list * * Initiate an adapter reset to update the IOA's microcode * * Return value: * 0 on success / -EIO on failure **/ static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg, struct ipr_sglist *sglist) { unsigned long lock_flags; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); while (ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); } if (ioa_cfg->ucode_sglist) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); dev_err(&ioa_cfg->pdev->dev, "Microcode download already in progress\n"); return -EIO; } sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev, sglist->scatterlist, sglist->num_sg, DMA_TO_DEVICE); if (!sglist->num_dma_sg) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); dev_err(&ioa_cfg->pdev->dev, "Failed to map microcode download buffer!\n"); return -EIO; } ioa_cfg->ucode_sglist = sglist; ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); ioa_cfg->ucode_sglist = NULL; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return 0; } /** * ipr_store_update_fw - Update the firmware on the adapter * @dev: device struct * @attr: device attribute (unused) * @buf: buffer * @count: buffer size * * This function will update the firmware on the adapter. * * Return value: * count on success / other on failure **/ static ssize_t ipr_store_update_fw(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; struct ipr_ucode_image_header *image_hdr; const struct firmware *fw_entry; struct ipr_sglist *sglist; char fname[100]; char *src; char *endline; int result, dnld_size; if (!capable(CAP_SYS_ADMIN)) return -EACCES; snprintf(fname, sizeof(fname), "%s", buf); endline = strchr(fname, '\n'); if (endline) *endline = '\0'; if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); return -EIO; } image_hdr = (struct ipr_ucode_image_header *)fw_entry->data; src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length); dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length); sglist = ipr_alloc_ucode_buffer(dnld_size); if (!sglist) { dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n"); release_firmware(fw_entry); return -ENOMEM; } result = ipr_copy_ucode_buffer(sglist, src, dnld_size); if (result) { dev_err(&ioa_cfg->pdev->dev, "Microcode buffer copy to DMA buffer failed\n"); goto out; } ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n"); result = ipr_update_ioa_ucode(ioa_cfg, sglist); if (!result) result = count; out: ipr_free_ucode_buffer(sglist); release_firmware(fw_entry); return result; } static struct device_attribute ipr_update_fw_attr = { .attr = { .name = "update_fw", .mode = S_IWUSR, }, .store = ipr_store_update_fw }; /** * ipr_show_fw_type - Show the adapter's firmware type. * @dev: class device struct * @attr: device attribute (unused) * @buf: buffer * * Return value: * number of bytes printed to buffer **/ static ssize_t ipr_show_fw_type(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; unsigned long lock_flags = 0; int len; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return len; } static struct device_attribute ipr_ioa_fw_type_attr = { .attr = { .name = "fw_type", .mode = S_IRUGO, }, .show = ipr_show_fw_type }; static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *cdev = kobj_to_dev(kobj); struct Scsi_Host *shost = class_to_shost(cdev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; struct ipr_hostrcb *hostrcb; unsigned long lock_flags = 0; int ret; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, struct ipr_hostrcb, queue); if (!hostrcb) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return 0; } ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam, sizeof(hostrcb->hcam)); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return ret; } static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *cdev = kobj_to_dev(kobj); struct Scsi_Host *shost = class_to_shost(cdev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; struct ipr_hostrcb *hostrcb; unsigned long lock_flags = 0; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, struct ipr_hostrcb, queue); if (!hostrcb) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return count; } /* Reclaim hostrcb before exit */ list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return count; } static struct bin_attribute ipr_ioa_async_err_log = { .attr = { .name = "async_err_log", .mode = S_IRUGO | S_IWUSR, }, .size = 0, .read = ipr_read_async_err_log, .write = ipr_next_async_err_log }; static struct attribute *ipr_ioa_attrs[] = { &ipr_fw_version_attr.attr, &ipr_log_level_attr.attr, &ipr_diagnostics_attr.attr, &ipr_ioa_state_attr.attr, &ipr_ioa_reset_attr.attr, &ipr_update_fw_attr.attr, &ipr_ioa_fw_type_attr.attr, &ipr_iopoll_weight_attr.attr, NULL, }; ATTRIBUTE_GROUPS(ipr_ioa); #ifdef CONFIG_SCSI_IPR_DUMP /** * ipr_read_dump - Dump the adapter * @filp: open sysfs file * @kobj: kobject struct * @bin_attr: bin_attribute struct * @buf: buffer * @off: offset * @count: buffer size * * Return value: * number of bytes printed to buffer **/ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *cdev = kobj_to_dev(kobj); struct Scsi_Host *shost = class_to_shost(cdev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; struct ipr_dump *dump; unsigned long lock_flags = 0; char *src; int len, sdt_end; size_t rc = count; if (!capable(CAP_SYS_ADMIN)) return -EACCES; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); dump = ioa_cfg->dump; if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return 0; } kref_get(&dump->kref); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); if (off > dump->driver_dump.hdr.len) { kref_put(&dump->kref, ipr_release_dump); return 0; } if (off + count > dump->driver_dump.hdr.len) { count = dump->driver_dump.hdr.len - off; rc = count; } if (count && off < sizeof(dump->driver_dump)) { if (off + count > sizeof(dump->driver_dump)) len = sizeof(dump->driver_dump) - off; else len = count; src = (u8 *)&dump->driver_dump + off; memcpy(buf, src, len); buf += len; off += len; count -= len; } off -= sizeof(dump->driver_dump); if (ioa_cfg->sis64) sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) * sizeof(struct ipr_sdt_entry)); else sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry)); if (count && off < sdt_end) { if (off + count > sdt_end) len = sdt_end - off; else len = count; src = (u8 *)&dump->ioa_dump + off; memcpy(buf, src, len); buf += len; off += len; count -= len; } off -= sdt_end; while (count) { if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK)) len = PAGE_ALIGN(off) - off; else len = count; src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT]; src += off & ~PAGE_MASK; memcpy(buf, src, len); buf += len; off += len; count -= len; } kref_put(&dump->kref, ipr_release_dump); return rc; } /** * ipr_alloc_dump - Prepare for adapter dump * @ioa_cfg: ioa config struct * * Return value: * 0 on success / other on failure **/ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) { struct ipr_dump *dump; __be32 **ioa_data; unsigned long lock_flags = 0; dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); if (!dump) { ipr_err("Dump memory allocation failed\n"); return -ENOMEM; } if (ioa_cfg->sis64) ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES, sizeof(__be32 *))); else ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES, sizeof(__be32 *))); if (!ioa_data) { ipr_err("Dump memory allocation failed\n"); kfree(dump); return -ENOMEM; } dump->ioa_dump.ioa_data = ioa_data; kref_init(&dump->kref); dump->ioa_cfg = ioa_cfg; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); if (INACTIVE != ioa_cfg->sdt_state) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); vfree(dump->ioa_dump.ioa_data); kfree(dump); return 0; } ioa_cfg->dump = dump; ioa_cfg->sdt_state = WAIT_FOR_DUMP; if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) { ioa_cfg->dump_taken = 1; schedule_work(&ioa_cfg->work_q); } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return 0; } /** * ipr_free_dump - Free adapter dump memory * @ioa_cfg: ioa config struct * * Return value: * 0 on success / other on failure **/ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { struct ipr_dump *dump; unsigned long lock_flags = 0; ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); dump = ioa_cfg->dump; if (!dump) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return 0; } ioa_cfg->dump = NULL; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); kref_put(&dump->kref, ipr_release_dump); LEAVE; return 0; } /** * ipr_write_dump - Setup dump state of adapter * @filp: open sysfs file * @kobj: kobject struct * @bin_attr: bin_attribute struct * @buf: buffer * @off: offset * @count: buffer size * * Return value: * number of bytes printed to buffer **/ static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *cdev = kobj_to_dev(kobj); struct Scsi_Host *shost = class_to_shost(cdev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; int rc; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (buf[0] == '1') rc = ipr_alloc_dump(ioa_cfg); else if (buf[0] == '0') rc = ipr_free_dump(ioa_cfg); else return -EINVAL; if (rc) return rc; else return count; } static struct bin_attribute ipr_dump_attr = { .attr = { .name = "dump", .mode = S_IRUSR | S_IWUSR, }, .size = 0, .read = ipr_read_dump, .write = ipr_write_dump }; #else static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; }; #endif /** * ipr_change_queue_depth - Change the device's queue depth * @sdev: scsi device struct * @qdepth: depth to set * * Return value: * actual depth set **/ static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth) { scsi_change_queue_depth(sdev, qdepth); return sdev->queue_depth; } /** * ipr_show_adapter_handle - Show the adapter's resource handle for this device * @dev: device struct * @attr: device attribute structure * @buf: buffer * * Return value: * number of bytes printed to buffer **/ static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; struct ipr_resource_entry *res; unsigned long lock_flags = 0; ssize_t len = -ENXIO; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); res = (struct ipr_resource_entry *)sdev->hostdata; if (res) len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return len; } static struct device_attribute ipr_adapter_handle_attr = { .attr = { .name = "adapter_handle", .mode = S_IRUSR, }, .show = ipr_show_adapter_handle }; /** * ipr_show_resource_path - Show the resource path or the resource address for * this device. * @dev: device struct * @attr: device attribute structure * @buf: buffer * * Return value: * number of bytes printed to buffer **/ static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; struct ipr_resource_entry *res; unsigned long lock_flags = 0; ssize_t len = -ENXIO; char buffer[IPR_MAX_RES_PATH_LENGTH]; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); res = (struct ipr_resource_entry *)sdev->hostdata; if (res && ioa_cfg->sis64) len = snprintf(buf, PAGE_SIZE, "%s\n", __ipr_format_res_path(res->res_path, buffer, sizeof(buffer))); else if (res) len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no, res->bus, res->target, res->lun); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return len; } static struct device_attribute ipr_resource_path_attr = { .attr = { .name = "resource_path", .mode = S_IRUGO, }, .show = ipr_show_resource_path }; /** * ipr_show_device_id - Show the device_id for this device. * @dev: device struct * @attr: device attribute structure * @buf: buffer * * Return value: * number of bytes printed to buffer **/ static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; struct ipr_resource_entry *res; unsigned long lock_flags = 0; ssize_t len = -ENXIO; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); res = (struct ipr_resource_entry *)sdev->hostdata; if (res && ioa_cfg->sis64) len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id)); else if (res) len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return len; } static struct device_attribute ipr_device_id_attr = { .attr = { .name = "device_id", .mode = S_IRUGO, }, .show = ipr_show_device_id }; /** * ipr_show_resource_type - Show the resource type for this device. * @dev: device struct * @attr: device attribute structure * @buf: buffer * * Return value: * number of bytes printed to buffer **/ static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; struct ipr_resource_entry *res; unsigned long lock_flags = 0; ssize_t len = -ENXIO; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); res = (struct ipr_resource_entry *)sdev->hostdata; if (res) len = snprintf(buf, PAGE_SIZE, "%x\n", res->type); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return len; } static struct device_attribute ipr_resource_type_attr = { .attr = { .name = "resource_type", .mode = S_IRUGO, }, .show = ipr_show_resource_type }; /** * ipr_show_raw_mode - Show the adapter's raw mode * @dev: class device struct * @attr: device attribute (unused) * @buf: buffer * * Return value: * number of bytes printed to buffer **/ static ssize_t ipr_show_raw_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; struct ipr_resource_entry *res; unsigned long lock_flags = 0; ssize_t len; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); res = (struct ipr_resource_entry *)sdev->hostdata; if (res) len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode); else len = -ENXIO; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return len; } /** * ipr_store_raw_mode - Change the adapter's raw mode * @dev: class device struct * @attr: device attribute (unused) * @buf: buffer * @count: buffer size * * Return value: * number of bytes printed to buffer **/ static ssize_t ipr_store_raw_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; struct ipr_resource_entry *res; unsigned long lock_flags = 0; ssize_t len; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); res = (struct ipr_resource_entry *)sdev->hostdata; if (res) { if (ipr_is_af_dasd_device(res)) { res->raw_mode = simple_strtoul(buf, NULL, 10); len = strlen(buf); if (res->sdev) sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n", res->raw_mode ? "enabled" : "disabled"); } else len = -EINVAL; } else len = -ENXIO; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return len; } static struct device_attribute ipr_raw_mode_attr = { .attr = { .name = "raw_mode", .mode = S_IRUGO | S_IWUSR, }, .show = ipr_show_raw_mode, .store = ipr_store_raw_mode }; static struct attribute *ipr_dev_attrs[] = { &ipr_adapter_handle_attr.attr, &ipr_resource_path_attr.attr, &ipr_device_id_attr.attr, &ipr_resource_type_attr.attr, &ipr_raw_mode_attr.attr, NULL, }; ATTRIBUTE_GROUPS(ipr_dev); /** * ipr_biosparam - Return the HSC mapping * @sdev: scsi device struct * @block_device: block device pointer * @capacity: capacity of the device * @parm: Array containing returned HSC values. * * This function generates the HSC parms that fdisk uses. * We want to make sure we return something that places partitions * on 4k boundaries for best performance with the IOA. * * Return value: * 0 on success **/ static int ipr_biosparam(struct scsi_device *sdev, struct block_device *block_device, sector_t capacity, int *parm) { int heads, sectors; sector_t cylinders; heads = 128; sectors = 32; cylinders = capacity; sector_div(cylinders, (128 * 32)); /* return result */ parm[0] = heads; parm[1] = sectors; parm[2] = cylinders; return 0; } /** * ipr_find_starget - Find target based on bus/target. * @starget: scsi target struct * * Return value: * resource entry pointer if found / NULL if not found **/ static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(&starget->dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; struct ipr_resource_entry *res; list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { if ((res->bus == starget->channel) && (res->target == starget->id)) { return res; } } return NULL; } /** * ipr_target_destroy - Destroy a SCSI target * @starget: scsi target struct * **/ static void ipr_target_destroy(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(&starget->dev); struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; if (ioa_cfg->sis64) { if (!ipr_find_starget(starget)) { if (starget->channel == IPR_ARRAY_VIRTUAL_BUS) clear_bit(starget->id, ioa_cfg->array_ids); else if (starget->channel == IPR_VSET_VIRTUAL_BUS) clear_bit(starget->id, ioa_cfg->vset_ids); else if (starget->channel == 0) clear_bit(starget->id, ioa_cfg->target_ids); } } } /** * ipr_find_sdev - Find device based on bus/target/lun. * @sdev: scsi device struct * * Return value: * resource entry pointer if found / NULL if not found **/ static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev) { struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; struct ipr_resource_entry *res; list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { if ((res->bus == sdev->channel) && (res->target == sdev->id) && (res->lun == sdev->lun)) return res; } return NULL; } /** * ipr_slave_destroy - Unconfigure a SCSI device * @sdev: scsi device struct * * Return value: * nothing **/ static void ipr_slave_destroy(struct scsi_device *sdev) { struct ipr_resource_entry *res; struct ipr_ioa_cfg *ioa_cfg; unsigned long lock_flags = 0; ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); res = (struct ipr_resource_entry *) sdev->hostdata; if (res) { sdev->hostdata = NULL; res->sdev = NULL; } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); } /** * ipr_slave_configure - Configure a SCSI device * @sdev: scsi device struct * * This function configures the specified scsi device. * * Return value: * 0 on success **/ static int ipr_slave_configure(struct scsi_device *sdev) { struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; struct ipr_resource_entry *res; unsigned long lock_flags = 0; char buffer[IPR_MAX_RES_PATH_LENGTH]; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); res = sdev->hostdata; if (res) { if (ipr_is_af_dasd_device(res)) sdev->type = TYPE_RAID; if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) { sdev->scsi_level = 4; sdev->no_uld_attach = 1; } if (ipr_is_vset_device(res)) { sdev->scsi_level = SCSI_SPC_3; sdev->no_report_opcodes = 1; blk_queue_rq_timeout(sdev->request_queue, IPR_VSET_RW_TIMEOUT); blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); if (ioa_cfg->sis64) sdev_printk(KERN_INFO, sdev, "Resource path: %s\n", ipr_format_res_path(ioa_cfg, res->res_path, buffer, sizeof(buffer))); return 0; } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return 0; } /** * ipr_slave_alloc - Prepare for commands to a device. * @sdev: scsi device struct * * This function saves a pointer to the resource entry * in the scsi device struct if the device exists. We * can then use this pointer in ipr_queuecommand when * handling new commands. * * Return value: * 0 on success / -ENXIO if device does not exist **/ static int ipr_slave_alloc(struct scsi_device *sdev) { struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; struct ipr_resource_entry *res; unsigned long lock_flags; int rc = -ENXIO; sdev->hostdata = NULL; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); res = ipr_find_sdev(sdev); if (res) { res->sdev = sdev; res->add_to_ml = 0; res->in_erp = 0; sdev->hostdata = res; if (!ipr_is_naca_model(res)) res->needs_sync_complete = 1; rc = 0; if (ipr_is_gata(res)) { sdev_printk(KERN_ERR, sdev, "SATA devices are no longer " "supported by this driver. Skipping device.\n"); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return -ENXIO; } } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return rc; } /** * ipr_match_lun - Match function for specified LUN * @ipr_cmd: ipr command struct * @device: device to match (sdev) * * Returns: * 1 if command matches sdev / 0 if command does not match sdev **/ static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device) { if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device) return 1; return 0; } /** * ipr_cmnd_is_free - Check if a command is free or not * @ipr_cmd: ipr command struct * * Returns: * true / false **/ static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd) { struct ipr_cmnd *loop_cmd; list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) { if (loop_cmd == ipr_cmd) return true; } return false; } /** * ipr_wait_for_ops - Wait for matching commands to complete * @ioa_cfg: ioa config struct * @device: device to match (sdev) * @match: match function to use * * Returns: * SUCCESS / FAILED **/ static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device, int (*match)(struct ipr_cmnd *, void *)) { struct ipr_cmnd *ipr_cmd; int wait, i; unsigned long flags; struct ipr_hrr_queue *hrrq; signed long timeout = IPR_ABORT_TASK_TIMEOUT; DECLARE_COMPLETION_ONSTACK(comp); ENTER; do { wait = 0; for_each_hrrq(hrrq, ioa_cfg) { spin_lock_irqsave(hrrq->lock, flags); for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; if (!ipr_cmnd_is_free(ipr_cmd)) { if (match(ipr_cmd, device)) { ipr_cmd->eh_comp = &comp; wait++; } } } spin_unlock_irqrestore(hrrq->lock, flags); } if (wait) { timeout = wait_for_completion_timeout(&comp, timeout); if (!timeout) { wait = 0; for_each_hrrq(hrrq, ioa_cfg) { spin_lock_irqsave(hrrq->lock, flags); for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; if (!ipr_cmnd_is_free(ipr_cmd)) { if (match(ipr_cmd, device)) { ipr_cmd->eh_comp = NULL; wait++; } } } spin_unlock_irqrestore(hrrq->lock, flags); } if (wait) dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n"); LEAVE; return wait ? FAILED : SUCCESS; } } } while (wait); LEAVE; return SUCCESS; } static int ipr_eh_host_reset(struct scsi_cmnd *cmd) { struct ipr_ioa_cfg *ioa_cfg; unsigned long lock_flags = 0; int rc = SUCCESS; ENTER; ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); dev_err(&ioa_cfg->pdev->dev, "Adapter being reset as a result of error recovery.\n"); if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) ioa_cfg->sdt_state = GET_DUMP; } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); /* If we got hit with a host reset while we were already resetting the adapter for some reason, and the reset failed. */ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { ipr_trace; rc = FAILED; } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); LEAVE; return rc; } /** * ipr_device_reset - Reset the device * @ioa_cfg: ioa config struct * @res: resource entry struct * * This function issues a device reset to the affected device. * If the device is a SCSI device, a LUN reset will be sent * to the device first. If that does not work, a target reset * will be sent. * * Return value: * 0 on success / non-zero on failure **/ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, struct ipr_resource_entry *res) { struct ipr_cmnd *ipr_cmd; struct ipr_ioarcb *ioarcb; struct ipr_cmd_pkt *cmd_pkt; u32 ioasc; ENTER; ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); ioarcb = &ipr_cmd->ioarcb; cmd_pkt = &ioarcb->cmd_pkt; if (ipr_cmd->ioa_cfg->sis64) ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); ioarcb->res_handle = res->res_handle; cmd_pkt->request_type = IPR_RQTYPE_IOACMD; cmd_pkt->cdb[0] = IPR_RESET_DEVICE; ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); LEAVE; return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0; } /** * __ipr_eh_dev_reset - Reset the device * @scsi_cmd: scsi command struct * * This function issues a device reset to the affected device. * A LUN reset will be sent to the device first. If that does * not work, a target reset will be sent. * * Return value: * SUCCESS / FAILED **/ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd) { struct ipr_ioa_cfg *ioa_cfg; struct ipr_resource_entry *res; int rc = 0; ENTER; ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; res = scsi_cmd->device->hostdata; /* * If we are currently going through reset/reload, return failed. This will force the * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the * reset to complete */ if (ioa_cfg->in_reset_reload) return FAILED; if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) return FAILED; res->resetting_device = 1; scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n"); rc = ipr_device_reset(ioa_cfg, res); res->resetting_device = 0; res->reset_occurred = 1; LEAVE; return rc ? FAILED : SUCCESS; } static int ipr_eh_dev_reset(struct scsi_cmnd *cmd) { int rc; struct ipr_ioa_cfg *ioa_cfg; struct ipr_resource_entry *res; ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; res = cmd->device->hostdata; if (!res) return FAILED; spin_lock_irq(cmd->device->host->host_lock); rc = __ipr_eh_dev_reset(cmd); spin_unlock_irq(cmd->device->host->host_lock); if (rc == SUCCESS) rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); return rc; } /** * ipr_bus_reset_done - Op done function for bus reset. * @ipr_cmd: ipr command struct * * This function is the op done function for a bus reset * * Return value: * none **/ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_resource_entry *res; ENTER; if (!ioa_cfg->sis64) list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { if (res->res_handle == ipr_cmd->ioarcb.res_handle) { scsi_report_bus_reset(ioa_cfg->host, res->bus); break; } } /* * If abort has not completed, indicate the reset has, else call the * abort's done function to wake the sleeping eh thread */ if (ipr_cmd->sibling->sibling) ipr_cmd->sibling->sibling = NULL; else ipr_cmd->sibling->done(ipr_cmd->sibling); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); LEAVE; } /** * ipr_abort_timeout - An abort task has timed out * @t: Timer context used to fetch ipr command struct * * This function handles when an abort task times out. If this * happens we issue a bus reset since we have resources tied * up that must be freed before returning to the midlayer. * * Return value: * none **/ static void ipr_abort_timeout(struct timer_list *t) { struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); struct ipr_cmnd *reset_cmd; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_cmd_pkt *cmd_pkt; unsigned long lock_flags = 0; ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return; } sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n"); reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); ipr_cmd->sibling = reset_cmd; reset_cmd->sibling = ipr_cmd; reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle; cmd_pkt = &reset_cmd->ioarcb.cmd_pkt; cmd_pkt->request_type = IPR_RQTYPE_IOACMD; cmd_pkt->cdb[0] = IPR_RESET_DEVICE; cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET; ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); LEAVE; } /** * ipr_cancel_op - Cancel specified op * @scsi_cmd: scsi command struct * * This function cancels specified op. * * Return value: * SUCCESS / FAILED **/ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd) { struct ipr_cmnd *ipr_cmd; struct ipr_ioa_cfg *ioa_cfg; struct ipr_resource_entry *res; struct ipr_cmd_pkt *cmd_pkt; u32 ioasc; int i, op_found = 0; struct ipr_hrr_queue *hrrq; ENTER; ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; res = scsi_cmd->device->hostdata; /* If we are currently going through reset/reload, return failed. * This will force the mid-layer to call ipr_eh_host_reset, * which will then go to sleep and wait for the reset to complete */ if (ioa_cfg->in_reset_reload || ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) return FAILED; if (!res) return FAILED; /* * If we are aborting a timed out op, chances are that the timeout was caused * by a still not detected EEH error. In such cases, reading a register will * trigger the EEH recovery infrastructure. */ readl(ioa_cfg->regs.sense_interrupt_reg); if (!ipr_is_gscsi(res)) return FAILED; for_each_hrrq(hrrq, ioa_cfg) { spin_lock(&hrrq->_lock); for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) { if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) { op_found = 1; break; } } } spin_unlock(&hrrq->_lock); } if (!op_found) return SUCCESS; ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); ipr_cmd->ioarcb.res_handle = res->res_handle; cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; cmd_pkt->request_type = IPR_RQTYPE_IOACMD; cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; ipr_cmd->u.sdev = scsi_cmd->device; scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n", scsi_cmd->cmnd[0]); ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT); ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); /* * If the abort task timed out and we sent a bus reset, we will get * one the following responses to the abort */ if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) { ioasc = 0; ipr_trace; } list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); if (!ipr_is_naca_model(res)) res->needs_sync_complete = 1; LEAVE; return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS; } /** * ipr_scan_finished - Report whether scan is done * @shost: scsi host struct * @elapsed_time: elapsed time * * Return value: * 0 if scan in progress / 1 if scan is complete **/ static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time) { unsigned long lock_flags; struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; int rc = 0; spin_lock_irqsave(shost->host_lock, lock_flags); if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done) rc = 1; if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2)) rc = 1; spin_unlock_irqrestore(shost->host_lock, lock_flags); return rc; } /** * ipr_eh_abort - Reset the host adapter * @scsi_cmd: scsi command struct * * Return value: * SUCCESS / FAILED **/ static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd) { unsigned long flags; int rc; struct ipr_ioa_cfg *ioa_cfg; ENTER; ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); rc = ipr_cancel_op(scsi_cmd); spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); if (rc == SUCCESS) rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun); LEAVE; return rc; } /** * ipr_handle_other_interrupt - Handle "other" interrupts * @ioa_cfg: ioa config struct * @int_reg: interrupt register * * Return value: * IRQ_NONE / IRQ_HANDLED **/ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, u32 int_reg) { irqreturn_t rc = IRQ_HANDLED; u32 int_mask_reg; int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); int_reg &= ~int_mask_reg; /* If an interrupt on the adapter did not occur, ignore it. * Or in the case of SIS 64, check for a stage change interrupt. */ if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) { if (ioa_cfg->sis64) { int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { /* clear stage change */ writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; list_del(&ioa_cfg->reset_cmd->queue); del_timer(&ioa_cfg->reset_cmd->timer); ipr_reset_ioa_job(ioa_cfg->reset_cmd); return IRQ_HANDLED; } } return IRQ_NONE; } if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { /* Mask the interrupt */ writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg); int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); list_del(&ioa_cfg->reset_cmd->queue); del_timer(&ioa_cfg->reset_cmd->timer); ipr_reset_ioa_job(ioa_cfg->reset_cmd); } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) { if (ioa_cfg->clear_isr) { if (ipr_debug && printk_ratelimit()) dev_err(&ioa_cfg->pdev->dev, "Spurious interrupt detected. 0x%08X\n", int_reg); writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); return IRQ_NONE; } } else { if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) ioa_cfg->ioa_unit_checked = 1; else if (int_reg & IPR_PCII_NO_HOST_RRQ) dev_err(&ioa_cfg->pdev->dev, "No Host RRQ. 0x%08X\n", int_reg); else dev_err(&ioa_cfg->pdev->dev, "Permanent IOA failure. 0x%08X\n", int_reg); if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) ioa_cfg->sdt_state = GET_DUMP; ipr_mask_and_clear_interrupts(ioa_cfg, ~0); ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); } return rc; } /** * ipr_isr_eh - Interrupt service routine error handler * @ioa_cfg: ioa config struct * @msg: message to log * @number: various meanings depending on the caller/message * * Return value: * none **/ static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number) { ioa_cfg->errors_logged++; dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number); if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) ioa_cfg->sdt_state = GET_DUMP; ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); } static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget, struct list_head *doneq) { u32 ioasc; u16 cmd_index; struct ipr_cmnd *ipr_cmd; struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg; int num_hrrq = 0; /* If interrupts are disabled, ignore the interrupt */ if (!hrr_queue->allow_interrupts) return 0; while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == hrr_queue->toggle_bit) { cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT; if (unlikely(cmd_index > hrr_queue->max_cmd_id || cmd_index < hrr_queue->min_cmd_id)) { ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA: ", cmd_index); break; } ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); list_move_tail(&ipr_cmd->queue, doneq); if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) { hrr_queue->hrrq_curr++; } else { hrr_queue->hrrq_curr = hrr_queue->hrrq_start; hrr_queue->toggle_bit ^= 1u; } num_hrrq++; if (budget > 0 && num_hrrq >= budget) break; } return num_hrrq; } static int ipr_iopoll(struct irq_poll *iop, int budget) { struct ipr_hrr_queue *hrrq; struct ipr_cmnd *ipr_cmd, *temp; unsigned long hrrq_flags; int completed_ops; LIST_HEAD(doneq); hrrq = container_of(iop, struct ipr_hrr_queue, iopoll); spin_lock_irqsave(hrrq->lock, hrrq_flags); completed_ops = ipr_process_hrrq(hrrq, budget, &doneq); if (completed_ops < budget) irq_poll_complete(iop); spin_unlock_irqrestore(hrrq->lock, hrrq_flags); list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { list_del(&ipr_cmd->queue); del_timer(&ipr_cmd->timer); ipr_cmd->fast_done(ipr_cmd); } return completed_ops; } /** * ipr_isr - Interrupt service routine * @irq: irq number * @devp: pointer to ioa config struct * * Return value: * IRQ_NONE / IRQ_HANDLED **/ static irqreturn_t ipr_isr(int irq, void *devp) { struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; unsigned long hrrq_flags = 0; u32 int_reg = 0; int num_hrrq = 0; int irq_none = 0; struct ipr_cmnd *ipr_cmd, *temp; irqreturn_t rc = IRQ_NONE; LIST_HEAD(doneq); spin_lock_irqsave(hrrq->lock, hrrq_flags); /* If interrupts are disabled, ignore the interrupt */ if (!hrrq->allow_interrupts) { spin_unlock_irqrestore(hrrq->lock, hrrq_flags); return IRQ_NONE; } while (1) { if (ipr_process_hrrq(hrrq, -1, &doneq)) { rc = IRQ_HANDLED; if (!ioa_cfg->clear_isr) break; /* Clear the PCI interrupt */ num_hrrq = 0; do { writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); } while (int_reg & IPR_PCII_HRRQ_UPDATED && num_hrrq++ < IPR_MAX_HRRQ_RETRIES); } else if (rc == IRQ_NONE && irq_none == 0) { int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); irq_none++; } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES && int_reg & IPR_PCII_HRRQ_UPDATED) { ipr_isr_eh(ioa_cfg, "Error clearing HRRQ: ", num_hrrq); rc = IRQ_HANDLED; break; } else break; } if (unlikely(rc == IRQ_NONE)) rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); spin_unlock_irqrestore(hrrq->lock, hrrq_flags); list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { list_del(&ipr_cmd->queue); del_timer(&ipr_cmd->timer); ipr_cmd->fast_done(ipr_cmd); } return rc; } /** * ipr_isr_mhrrq - Interrupt service routine * @irq: irq number * @devp: pointer to ioa config struct * * Return value: * IRQ_NONE / IRQ_HANDLED **/ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp) { struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; unsigned long hrrq_flags = 0; struct ipr_cmnd *ipr_cmd, *temp; irqreturn_t rc = IRQ_NONE; LIST_HEAD(doneq); spin_lock_irqsave(hrrq->lock, hrrq_flags); /* If interrupts are disabled, ignore the interrupt */ if (!hrrq->allow_interrupts) { spin_unlock_irqrestore(hrrq->lock, hrrq_flags); return IRQ_NONE; } if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == hrrq->toggle_bit) { irq_poll_sched(&hrrq->iopoll); spin_unlock_irqrestore(hrrq->lock, hrrq_flags); return IRQ_HANDLED; } } else { if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == hrrq->toggle_bit) if (ipr_process_hrrq(hrrq, -1, &doneq)) rc = IRQ_HANDLED; } spin_unlock_irqrestore(hrrq->lock, hrrq_flags); list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { list_del(&ipr_cmd->queue); del_timer(&ipr_cmd->timer); ipr_cmd->fast_done(ipr_cmd); } return rc; } /** * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer * @ioa_cfg: ioa config struct * @ipr_cmd: ipr command struct * * Return value: * 0 on success / -1 on failure **/ static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, struct ipr_cmnd *ipr_cmd) { int i, nseg; struct scatterlist *sg; u32 length; u32 ioadl_flags = 0; struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; length = scsi_bufflen(scsi_cmd); if (!length) return 0; nseg = scsi_dma_map(scsi_cmd); if (nseg < 0) { if (printk_ratelimit()) dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); return -1; } ipr_cmd->dma_use_sg = nseg; ioarcb->data_transfer_length = cpu_to_be32(length); ioarcb->ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { ioadl_flags = IPR_IOADL_FLAGS_WRITE; ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) ioadl_flags = IPR_IOADL_FLAGS_READ; scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { ioadl64[i].flags = cpu_to_be32(ioadl_flags); ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg)); ioadl64[i].address = cpu_to_be64(sg_dma_address(sg)); } ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); return 0; } /** * ipr_build_ioadl - Build a scatter/gather list and map the buffer * @ioa_cfg: ioa config struct * @ipr_cmd: ipr command struct * * Return value: * 0 on success / -1 on failure **/ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, struct ipr_cmnd *ipr_cmd) { int i, nseg; struct scatterlist *sg; u32 length; u32 ioadl_flags = 0; struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; length = scsi_bufflen(scsi_cmd); if (!length) return 0; nseg = scsi_dma_map(scsi_cmd); if (nseg < 0) { dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); return -1; } ipr_cmd->dma_use_sg = nseg; if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { ioadl_flags = IPR_IOADL_FLAGS_WRITE; ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; ioarcb->data_transfer_length = cpu_to_be32(length); ioarcb->ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { ioadl_flags = IPR_IOADL_FLAGS_READ; ioarcb->read_data_transfer_length = cpu_to_be32(length); ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); } if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) { ioadl = ioarcb->u.add_data.u.ioadl; ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) + offsetof(struct ipr_ioarcb, u.add_data)); ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; } scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { ioadl[i].flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg)); ioadl[i].address = cpu_to_be32(sg_dma_address(sg)); } ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); return 0; } /** * __ipr_erp_done - Process completion of ERP for a device * @ipr_cmd: ipr command struct * * This function copies the sense buffer into the scsi_cmd * struct and pushes the scsi_done function. * * Return value: * nothing **/ static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd) { struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; struct ipr_resource_entry *res = scsi_cmd->device->hostdata; u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { scsi_cmd->result |= (DID_ERROR << 16); scmd_printk(KERN_ERR, scsi_cmd, "Request Sense failed with IOASC: 0x%08X\n", ioasc); } else { memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); } if (res) { if (!ipr_is_naca_model(res)) res->needs_sync_complete = 1; res->in_erp = 0; } scsi_dma_unmap(ipr_cmd->scsi_cmd); scsi_done(scsi_cmd); if (ipr_cmd->eh_comp) complete(ipr_cmd->eh_comp); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); } /** * ipr_erp_done - Process completion of ERP for a device * @ipr_cmd: ipr command struct * * This function copies the sense buffer into the scsi_cmd * struct and pushes the scsi_done function. * * Return value: * nothing **/ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) { struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; unsigned long hrrq_flags; spin_lock_irqsave(&hrrq->_lock, hrrq_flags); __ipr_erp_done(ipr_cmd); spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); } /** * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP * @ipr_cmd: ipr command struct * * Return value: * none **/ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) { struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; dma_addr_t dma_addr = ipr_cmd->dma_addr; memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); ioarcb->data_transfer_length = 0; ioarcb->read_data_transfer_length = 0; ioarcb->ioadl_len = 0; ioarcb->read_ioadl_len = 0; ioasa->hdr.ioasc = 0; ioasa->hdr.residual_data_len = 0; if (ipr_cmd->ioa_cfg->sis64) ioarcb->u.sis64_addr_data.data_ioadl_addr = cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); else { ioarcb->write_ioadl_addr = cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; } } /** * __ipr_erp_request_sense - Send request sense to a device * @ipr_cmd: ipr command struct * * This function sends a request sense to a device as a result * of a check condition. * * Return value: * nothing **/ static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) { struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { __ipr_erp_done(ipr_cmd); return; } ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); cmd_pkt->request_type = IPR_RQTYPE_SCSICDB; cmd_pkt->cdb[0] = REQUEST_SENSE; cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE; cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE; cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma, SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST); ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout, IPR_REQUEST_SENSE_TIMEOUT * 2); } /** * ipr_erp_request_sense - Send request sense to a device * @ipr_cmd: ipr command struct * * This function sends a request sense to a device as a result * of a check condition. * * Return value: * nothing **/ static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) { struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; unsigned long hrrq_flags; spin_lock_irqsave(&hrrq->_lock, hrrq_flags); __ipr_erp_request_sense(ipr_cmd); spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); } /** * ipr_erp_cancel_all - Send cancel all to a device * @ipr_cmd: ipr command struct * * This function sends a cancel all to a device to clear the * queue. If we are running TCQ on the device, QERR is set to 1, * which means all outstanding ops have been dropped on the floor. * Cancel all will return them to us. * * Return value: * nothing **/ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd) { struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; struct ipr_resource_entry *res = scsi_cmd->device->hostdata; struct ipr_cmd_pkt *cmd_pkt; res->in_erp = 1; ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); if (!scsi_cmd->device->simple_tags) { __ipr_erp_request_sense(ipr_cmd); return; } cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; cmd_pkt->request_type = IPR_RQTYPE_IOACMD; cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout, IPR_CANCEL_ALL_TIMEOUT); } /** * ipr_dump_ioasa - Dump contents of IOASA * @ioa_cfg: ioa config struct * @ipr_cmd: ipr command struct * @res: resource entry struct * * This function is invoked by the interrupt handler when ops * fail. It will log the IOASA if appropriate. Only called * for GPDD ops. * * Return value: * none **/ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res) { int i; u16 data_len; u32 ioasc, fd_ioasc; struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; __be32 *ioasa_data = (__be32 *)ioasa; int error_index; ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK; fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK; if (0 == ioasc) return; if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) return; if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc) error_index = ipr_get_error(fd_ioasc); else error_index = ipr_get_error(ioasc); if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { /* Don't log an error if the IOA already logged one */ if (ioasa->hdr.ilid != 0) return; if (!ipr_is_gscsi(res)) return; if (ipr_error_table[error_index].log_ioasa == 0) return; } ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error); data_len = be16_to_cpu(ioasa->hdr.ret_stat_len); if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len) data_len = sizeof(struct ipr_ioasa64); else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len) data_len = sizeof(struct ipr_ioasa); ipr_err("IOASA Dump:\n"); for (i = 0; i < data_len / 4; i += 4) { ipr_err("%08X: %08X %08X %08X %08X\n", i*4, be32_to_cpu(ioasa_data[i]), be32_to_cpu(ioasa_data[i+1]), be32_to_cpu(ioasa_data[i+2]), be32_to_cpu(ioasa_data[i+3])); } } /** * ipr_gen_sense - Generate SCSI sense data from an IOASA * @ipr_cmd: ipr command struct * * Return value: * none **/ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) { u32 failing_lba; u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc); memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); if (ioasc >= IPR_FIRST_DRIVER_IOASC) return; ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION; if (ipr_is_vset_device(res) && ioasc == IPR_IOASC_MED_DO_NOT_REALLOC && ioasa->u.vset.failing_lba_hi != 0) { sense_buf[0] = 0x72; sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc); sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc); sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc); sense_buf[7] = 12; sense_buf[8] = 0; sense_buf[9] = 0x0A; sense_buf[10] = 0x80; failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi); sense_buf[12] = (failing_lba & 0xff000000) >> 24; sense_buf[13] = (failing_lba & 0x00ff0000) >> 16; sense_buf[14] = (failing_lba & 0x0000ff00) >> 8; sense_buf[15] = failing_lba & 0x000000ff; failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); sense_buf[16] = (failing_lba & 0xff000000) >> 24; sense_buf[17] = (failing_lba & 0x00ff0000) >> 16; sense_buf[18] = (failing_lba & 0x0000ff00) >> 8; sense_buf[19] = failing_lba & 0x000000ff; } else { sense_buf[0] = 0x70; sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc); sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc); sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc); /* Illegal request */ if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) && (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) { sense_buf[7] = 10; /* additional length */ /* IOARCB was in error */ if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24) sense_buf[15] = 0xC0; else /* Parameter data was invalid */ sense_buf[15] = 0x80; sense_buf[16] = ((IPR_FIELD_POINTER_MASK & be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff; sense_buf[17] = (IPR_FIELD_POINTER_MASK & be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff; } else { if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) { if (ipr_is_vset_device(res)) failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); else failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba); sense_buf[0] |= 0x80; /* Or in the Valid bit */ sense_buf[3] = (failing_lba & 0xff000000) >> 24; sense_buf[4] = (failing_lba & 0x00ff0000) >> 16; sense_buf[5] = (failing_lba & 0x0000ff00) >> 8; sense_buf[6] = failing_lba & 0x000000ff; } sense_buf[7] = 6; /* additional length */ } } } /** * ipr_get_autosense - Copy autosense data to sense buffer * @ipr_cmd: ipr command struct * * This function copies the autosense buffer to the buffer * in the scsi_cmd, if there is autosense available. * * Return value: * 1 if autosense was available / 0 if not **/ static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd) { struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) return 0; if (ipr_cmd->ioa_cfg->sis64) memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data, min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len), SCSI_SENSE_BUFFERSIZE)); else memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), SCSI_SENSE_BUFFERSIZE)); return 1; } /** * ipr_erp_start - Process an error response for a SCSI op * @ioa_cfg: ioa config struct * @ipr_cmd: ipr command struct * * This function determines whether or not to initiate ERP * on the affected device. * * Return value: * nothing **/ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, struct ipr_cmnd *ipr_cmd) { struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; struct ipr_resource_entry *res = scsi_cmd->device->hostdata; u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK; if (!res) { __ipr_scsi_eh_done(ipr_cmd); return; } if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS) ipr_gen_sense(ipr_cmd); ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); switch (masked_ioasc) { case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST: if (ipr_is_naca_model(res)) scsi_cmd->result |= (DID_ABORT << 16); else scsi_cmd->result |= (DID_IMM_RETRY << 16); break; case IPR_IOASC_IR_RESOURCE_HANDLE: case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA: scsi_cmd->result |= (DID_NO_CONNECT << 16); break; case IPR_IOASC_HW_SEL_TIMEOUT: scsi_cmd->result |= (DID_NO_CONNECT << 16); if (!ipr_is_naca_model(res)) res->needs_sync_complete = 1; break; case IPR_IOASC_SYNC_REQUIRED: if (!res->in_erp) res->needs_sync_complete = 1; scsi_cmd->result |= (DID_IMM_RETRY << 16); break; case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ case IPR_IOASA_IR_DUAL_IOA_DISABLED: /* * exception: do not set DID_PASSTHROUGH on CHECK CONDITION * so SCSI mid-layer and upper layers handle it accordingly. */ if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION) scsi_cmd->result |= (DID_PASSTHROUGH << 16); break; case IPR_IOASC_BUS_WAS_RESET: case IPR_IOASC_BUS_WAS_RESET_BY_OTHER: /* * Report the bus reset and ask for a retry. The device * will give CC/UA the next command. */ if (!res->resetting_device) scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); scsi_cmd->result |= (DID_ERROR << 16); if (!ipr_is_naca_model(res)) res->needs_sync_complete = 1; break; case IPR_IOASC_HW_DEV_BUS_STATUS: scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc); if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) { if (!ipr_get_autosense(ipr_cmd)) { if (!ipr_is_naca_model(res)) { ipr_erp_cancel_all(ipr_cmd); return; } } } if (!ipr_is_naca_model(res)) res->needs_sync_complete = 1; break; case IPR_IOASC_NR_INIT_CMD_REQUIRED: break; case IPR_IOASC_IR_NON_OPTIMIZED: if (res->raw_mode) { res->raw_mode = 0; scsi_cmd->result |= (DID_IMM_RETRY << 16); } else scsi_cmd->result |= (DID_ERROR << 16); break; default: if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) scsi_cmd->result |= (DID_ERROR << 16); if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res)) res->needs_sync_complete = 1; break; } scsi_dma_unmap(ipr_cmd->scsi_cmd); scsi_done(scsi_cmd); if (ipr_cmd->eh_comp) complete(ipr_cmd->eh_comp); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); } /** * ipr_scsi_done - mid-layer done function * @ipr_cmd: ipr command struct * * This function is invoked by the interrupt handler for * ops generated by the SCSI mid-layer * * Return value: * none **/ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); unsigned long lock_flags; scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { scsi_dma_unmap(scsi_cmd); spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); scsi_done(scsi_cmd); if (ipr_cmd->eh_comp) complete(ipr_cmd->eh_comp); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); } else { spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); spin_lock(&ipr_cmd->hrrq->_lock); ipr_erp_start(ioa_cfg, ipr_cmd); spin_unlock(&ipr_cmd->hrrq->_lock); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); } } /** * ipr_queuecommand - Queue a mid-layer request * @shost: scsi host struct * @scsi_cmd: scsi command struct * * This function queues a request generated by the mid-layer. * * Return value: * 0 on success * SCSI_MLQUEUE_DEVICE_BUSY if device is busy * SCSI_MLQUEUE_HOST_BUSY if host is busy **/ static int ipr_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scsi_cmd) { struct ipr_ioa_cfg *ioa_cfg; struct ipr_resource_entry *res; struct ipr_ioarcb *ioarcb; struct ipr_cmnd *ipr_cmd; unsigned long hrrq_flags; int rc; struct ipr_hrr_queue *hrrq; int hrrq_id; ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; scsi_cmd->result = (DID_OK << 16); res = scsi_cmd->device->hostdata; hrrq_id = ipr_get_hrrq_index(ioa_cfg); hrrq = &ioa_cfg->hrrq[hrrq_id]; spin_lock_irqsave(hrrq->lock, hrrq_flags); /* * We are currently blocking all devices due to a host reset * We have told the host to stop giving us new requests, but * ERP ops don't count. FIXME */ if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) { spin_unlock_irqrestore(hrrq->lock, hrrq_flags); return SCSI_MLQUEUE_HOST_BUSY; } /* * FIXME - Create scsi_set_host_offline interface * and the ioa_is_dead check can be removed */ if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) { spin_unlock_irqrestore(hrrq->lock, hrrq_flags); goto err_nodev; } ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq); if (ipr_cmd == NULL) { spin_unlock_irqrestore(hrrq->lock, hrrq_flags); return SCSI_MLQUEUE_HOST_BUSY; } spin_unlock_irqrestore(hrrq->lock, hrrq_flags); ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done); ioarcb = &ipr_cmd->ioarcb; memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); ipr_cmd->scsi_cmd = scsi_cmd; ipr_cmd->done = ipr_scsi_eh_done; if (ipr_is_gscsi(res)) { if (scsi_cmd->underflow == 0) ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; if (res->reset_occurred) { res->reset_occurred = 0; ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; } } if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; if (scsi_cmd->flags & SCMD_TAGGED) ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK; else ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK; } if (scsi_cmd->cmnd[0] >= 0xC0 && (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) { ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; } if (res->raw_mode && ipr_is_af_dasd_device(res)) { ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE; if (scsi_cmd->underflow == 0) ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; } if (ioa_cfg->sis64) rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); else rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); spin_lock_irqsave(hrrq->lock, hrrq_flags); if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) { list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); spin_unlock_irqrestore(hrrq->lock, hrrq_flags); if (!rc) scsi_dma_unmap(scsi_cmd); return SCSI_MLQUEUE_HOST_BUSY; } if (unlikely(hrrq->ioa_is_dead)) { list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); spin_unlock_irqrestore(hrrq->lock, hrrq_flags); scsi_dma_unmap(scsi_cmd); goto err_nodev; } ioarcb->res_handle = res->res_handle; if (res->needs_sync_complete) { ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; res->needs_sync_complete = 0; } list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q); ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); ipr_send_command(ipr_cmd); spin_unlock_irqrestore(hrrq->lock, hrrq_flags); return 0; err_nodev: spin_lock_irqsave(hrrq->lock, hrrq_flags); memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); scsi_cmd->result = (DID_NO_CONNECT << 16); scsi_done(scsi_cmd); spin_unlock_irqrestore(hrrq->lock, hrrq_flags); return 0; } /** * ipr_ioa_info - Get information about the card/driver * @host: scsi host struct * * Return value: * pointer to buffer with description string **/ static const char *ipr_ioa_info(struct Scsi_Host *host) { static char buffer[512]; struct ipr_ioa_cfg *ioa_cfg; unsigned long lock_flags = 0; ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata; spin_lock_irqsave(host->host_lock, lock_flags); sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type); spin_unlock_irqrestore(host->host_lock, lock_flags); return buffer; } static const struct scsi_host_template driver_template = { .module = THIS_MODULE, .name = "IPR", .info = ipr_ioa_info, .queuecommand = ipr_queuecommand, .eh_abort_handler = ipr_eh_abort, .eh_device_reset_handler = ipr_eh_dev_reset, .eh_host_reset_handler = ipr_eh_host_reset, .slave_alloc = ipr_slave_alloc, .slave_configure = ipr_slave_configure, .slave_destroy = ipr_slave_destroy, .scan_finished = ipr_scan_finished, .target_destroy = ipr_target_destroy, .change_queue_depth = ipr_change_queue_depth, .bios_param = ipr_biosparam, .can_queue = IPR_MAX_COMMANDS, .this_id = -1, .sg_tablesize = IPR_MAX_SGLIST, .max_sectors = IPR_IOA_MAX_SECTORS, .cmd_per_lun = IPR_MAX_CMD_PER_LUN, .shost_groups = ipr_ioa_groups, .sdev_groups = ipr_dev_groups, .proc_name = IPR_NAME, }; #ifdef CONFIG_PPC_PSERIES static const u16 ipr_blocked_processors[] = { PVR_NORTHSTAR, PVR_PULSAR, PVR_POWER4, PVR_ICESTAR, PVR_SSTAR, PVR_POWER4p, PVR_630, PVR_630p }; /** * ipr_invalid_adapter - Determine if this adapter is supported on this hardware * @ioa_cfg: ioa cfg struct * * Adapters that use Gemstone revision < 3.1 do not work reliably on * certain pSeries hardware. This function determines if the given * adapter is in one of these confgurations or not. * * Return value: * 1 if adapter is not supported / 0 if adapter is supported **/ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) { int i; if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) { for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) { if (pvr_version_is(ipr_blocked_processors[i])) return 1; } } return 0; } #else #define ipr_invalid_adapter(ioa_cfg) 0 #endif /** * ipr_ioa_bringdown_done - IOA bring down completion. * @ipr_cmd: ipr command struct * * This function processes the completion of an adapter bring down. * It wakes any reset sleepers. * * Return value: * IPR_RC_JOB_RETURN **/ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; int i; ENTER; if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { ipr_trace; ioa_cfg->scsi_unblock = 1; schedule_work(&ioa_cfg->work_q); } ioa_cfg->in_reset_reload = 0; ioa_cfg->reset_retries = 0; for (i = 0; i < ioa_cfg->hrrq_num; i++) { spin_lock(&ioa_cfg->hrrq[i]._lock); ioa_cfg->hrrq[i].ioa_is_dead = 1; spin_unlock(&ioa_cfg->hrrq[i]._lock); } wmb(); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); wake_up_all(&ioa_cfg->reset_wait_q); LEAVE; return IPR_RC_JOB_RETURN; } /** * ipr_ioa_reset_done - IOA reset completion. * @ipr_cmd: ipr command struct * * This function processes the completion of an adapter reset. * It schedules any necessary mid-layer add/removes and * wakes any reset sleepers. * * Return value: * IPR_RC_JOB_RETURN **/ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_resource_entry *res; int j; ENTER; ioa_cfg->in_reset_reload = 0; for (j = 0; j < ioa_cfg->hrrq_num; j++) { spin_lock(&ioa_cfg->hrrq[j]._lock); ioa_cfg->hrrq[j].allow_cmds = 1; spin_unlock(&ioa_cfg->hrrq[j]._lock); } wmb(); ioa_cfg->reset_cmd = NULL; ioa_cfg->doorbell |= IPR_RUNTIME_RESET; list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { if (res->add_to_ml || res->del_from_ml) { ipr_trace; break; } } schedule_work(&ioa_cfg->work_q); for (j = 0; j < IPR_NUM_HCAMS; j++) { list_del_init(&ioa_cfg->hostrcb[j]->queue); if (j < IPR_NUM_LOG_HCAMS) ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, ioa_cfg->hostrcb[j]); else ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, ioa_cfg->hostrcb[j]); } scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS); dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); ioa_cfg->reset_retries = 0; list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); wake_up_all(&ioa_cfg->reset_wait_q); ioa_cfg->scsi_unblock = 1; schedule_work(&ioa_cfg->work_q); LEAVE; return IPR_RC_JOB_RETURN; } /** * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer * @supported_dev: supported device struct * @vpids: vendor product id struct * * Return value: * none **/ static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev, struct ipr_std_inq_vpids *vpids) { memset(supported_dev, 0, sizeof(struct ipr_supported_device)); memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids)); supported_dev->num_records = 1; supported_dev->data_length = cpu_to_be16(sizeof(struct ipr_supported_device)); supported_dev->reserved = 0; } /** * ipr_set_supported_devs - Send Set Supported Devices for a device * @ipr_cmd: ipr command struct * * This function sends a Set Supported Devices to the adapter * * Return value: * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN **/ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_resource_entry *res = ipr_cmd->u.res; ipr_cmd->job_step = ipr_ioa_reset_done; list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { if (!ipr_is_scsi_disk(res)) continue; ipr_cmd->u.res = res; ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids); ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES; ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; ipr_init_ioadl(ipr_cmd, ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, supp_dev), sizeof(struct ipr_supported_device), IPR_IOADL_FLAGS_WRITE_LAST); ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_SET_SUP_DEVICE_TIMEOUT); if (!ioa_cfg->sis64) ipr_cmd->job_step = ipr_set_supported_devs; LEAVE; return IPR_RC_JOB_RETURN; } LEAVE; return IPR_RC_JOB_CONTINUE; } /** * ipr_get_mode_page - Locate specified mode page * @mode_pages: mode page buffer * @page_code: page code to find * @len: minimum required length for mode page * * Return value: * pointer to mode page / NULL on failure **/ static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages, u32 page_code, u32 len) { struct ipr_mode_page_hdr *mode_hdr; u32 page_length; u32 length; if (!mode_pages || (mode_pages->hdr.length == 0)) return NULL; length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len; mode_hdr = (struct ipr_mode_page_hdr *) (mode_pages->data + mode_pages->hdr.block_desc_len); while (length) { if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) { if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr))) return mode_hdr; break; } else { page_length = (sizeof(struct ipr_mode_page_hdr) + mode_hdr->page_length); length -= page_length; mode_hdr = (struct ipr_mode_page_hdr *) ((unsigned long)mode_hdr + page_length); } } return NULL; } /** * ipr_check_term_power - Check for term power errors * @ioa_cfg: ioa config struct * @mode_pages: IOAFP mode pages buffer * * Check the IOAFP's mode page 28 for term power errors * * Return value: * nothing **/ static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg, struct ipr_mode_pages *mode_pages) { int i; int entry_length; struct ipr_dev_bus_entry *bus; struct ipr_mode_page28 *mode_page; mode_page = ipr_get_mode_page(mode_pages, 0x28, sizeof(struct ipr_mode_page28)); entry_length = mode_page->entry_length; bus = mode_page->bus; for (i = 0; i < mode_page->num_entries; i++) { if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) { dev_err(&ioa_cfg->pdev->dev, "Term power is absent on scsi bus %d\n", bus->res_addr.bus); } bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length); } } /** * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table * @ioa_cfg: ioa config struct * * Looks through the config table checking for SES devices. If * the SES device is in the SES table indicating a maximum SCSI * bus speed, the speed is limited for the bus. * * Return value: * none **/ static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg) { u32 max_xfer_rate; int i; for (i = 0; i < IPR_MAX_NUM_BUSES; i++) { max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i, ioa_cfg->bus_attr[i].bus_width); if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate) ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate; } } /** * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28 * @ioa_cfg: ioa config struct * @mode_pages: mode page 28 buffer * * Updates mode page 28 based on driver configuration * * Return value: * none **/ static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg, struct ipr_mode_pages *mode_pages) { int i, entry_length; struct ipr_dev_bus_entry *bus; struct ipr_bus_attributes *bus_attr; struct ipr_mode_page28 *mode_page; mode_page = ipr_get_mode_page(mode_pages, 0x28, sizeof(struct ipr_mode_page28)); entry_length = mode_page->entry_length; /* Loop for each device bus entry */ for (i = 0, bus = mode_page->bus; i < mode_page->num_entries; i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) { if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) { dev_err(&ioa_cfg->pdev->dev, "Invalid resource address reported: 0x%08X\n", IPR_GET_PHYS_LOC(bus->res_addr)); continue; } bus_attr = &ioa_cfg->bus_attr[i]; bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY; bus->bus_width = bus_attr->bus_width; bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate); bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK; if (bus_attr->qas_enabled) bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS; else bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS; } } /** * ipr_build_mode_select - Build a mode select command * @ipr_cmd: ipr command struct * @res_handle: resource handle to send command to * @parm: Byte 2 of Mode Sense command * @dma_addr: DMA buffer address * @xfer_len: data transfer length * * Return value: * none **/ static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd, __be32 res_handle, u8 parm, dma_addr_t dma_addr, u8 xfer_len) { struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; ioarcb->res_handle = res_handle; ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; ioarcb->cmd_pkt.cdb[0] = MODE_SELECT; ioarcb->cmd_pkt.cdb[1] = parm; ioarcb->cmd_pkt.cdb[4] = xfer_len; ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST); } /** * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA * @ipr_cmd: ipr command struct * * This function sets up the SCSI bus attributes and sends * a Mode Select for Page 28 to activate them. * * Return value: * IPR_RC_JOB_RETURN **/ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; int length; ENTER; ipr_scsi_bus_speed_limit(ioa_cfg); ipr_check_term_power(ioa_cfg, mode_pages); ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); length = mode_pages->hdr.length + 1; mode_pages->hdr.length = 0; ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), length); ipr_cmd->job_step = ipr_set_supported_devs; ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, struct ipr_resource_entry, queue); ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); LEAVE; return IPR_RC_JOB_RETURN; } /** * ipr_build_mode_sense - Builds a mode sense command * @ipr_cmd: ipr command struct * @res_handle: resource entry struct * @parm: Byte 2 of mode sense command * @dma_addr: DMA address of mode sense buffer * @xfer_len: Size of DMA buffer * * Return value: * none **/ static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, __be32 res_handle, u8 parm, dma_addr_t dma_addr, u8 xfer_len) { struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; ioarcb->res_handle = res_handle; ioarcb->cmd_pkt.cdb[0] = MODE_SENSE; ioarcb->cmd_pkt.cdb[2] = parm; ioarcb->cmd_pkt.cdb[4] = xfer_len; ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); } /** * ipr_reset_cmd_failed - Handle failure of IOA reset command * @ipr_cmd: ipr command struct * * This function handles the failure of an IOA bringup command. * * Return value: * IPR_RC_JOB_RETURN **/ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); dev_err(&ioa_cfg->pdev->dev, "0x%02X failed with IOASC: 0x%08X\n", ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); return IPR_RC_JOB_RETURN; } /** * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense * @ipr_cmd: ipr command struct * * This function handles the failure of a Mode Sense to the IOAFP. * Some adapters do not handle all mode pages. * * Return value: * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN **/ static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { ipr_cmd->job_step = ipr_set_supported_devs; ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, struct ipr_resource_entry, queue); return IPR_RC_JOB_CONTINUE; } return ipr_reset_cmd_failed(ipr_cmd); } /** * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA * @ipr_cmd: ipr command struct * * This function send a Page 28 mode sense to the IOA to * retrieve SCSI bus attributes. * * Return value: * IPR_RC_JOB_RETURN **/ static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; ENTER; ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x28, ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), sizeof(struct ipr_mode_pages)); ipr_cmd->job_step = ipr_ioafp_mode_select_page28; ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed; ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); LEAVE; return IPR_RC_JOB_RETURN; } /** * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA * @ipr_cmd: ipr command struct * * This function enables dual IOA RAID support if possible. * * Return value: * IPR_RC_JOB_RETURN **/ static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; struct ipr_mode_page24 *mode_page; int length; ENTER; mode_page = ipr_get_mode_page(mode_pages, 0x24, sizeof(struct ipr_mode_page24)); if (mode_page) mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF; length = mode_pages->hdr.length + 1; mode_pages->hdr.length = 0; ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), length); ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); LEAVE; return IPR_RC_JOB_RETURN; } /** * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense * @ipr_cmd: ipr command struct * * This function handles the failure of a Mode Sense to the IOAFP. * Some adapters do not handle all mode pages. * * Return value: * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN **/ static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd) { u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; return IPR_RC_JOB_CONTINUE; } return ipr_reset_cmd_failed(ipr_cmd); } /** * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA * @ipr_cmd: ipr command struct * * This function send a mode sense to the IOA to retrieve * the IOA Advanced Function Control mode page. * * Return value: * IPR_RC_JOB_RETURN **/ static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; ENTER; ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x24, ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), sizeof(struct ipr_mode_pages)); ipr_cmd->job_step = ipr_ioafp_mode_select_page24; ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed; ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); LEAVE; return IPR_RC_JOB_RETURN; } /** * ipr_init_res_table - Initialize the resource table * @ipr_cmd: ipr command struct * * This function looks through the existing resource table, comparing * it with the config table. This function will take care of old/new * devices and schedule adding/removing them from the mid-layer * as appropriate. * * Return value: * IPR_RC_JOB_CONTINUE **/ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_resource_entry *res, *temp; struct ipr_config_table_entry_wrapper cfgtew; int entries, found, flag, i; LIST_HEAD(old_res); ENTER; if (ioa_cfg->sis64) flag = ioa_cfg->u.cfg_table64->hdr64.flags; else flag = ioa_cfg->u.cfg_table->hdr.flags; if (flag & IPR_UCODE_DOWNLOAD_REQ) dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) list_move_tail(&res->queue, &old_res); if (ioa_cfg->sis64) entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); else entries = ioa_cfg->u.cfg_table->hdr.num_entries; for (i = 0; i < entries; i++) { if (ioa_cfg->sis64) cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i]; else cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i]; found = 0; list_for_each_entry_safe(res, temp, &old_res, queue) { if (ipr_is_same_device(res, &cfgtew)) { list_move_tail(&res->queue, &ioa_cfg->used_res_q); found = 1; break; } } if (!found) { if (list_empty(&ioa_cfg->free_res_q)) { dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n"); break; } found = 1; res = list_entry(ioa_cfg->free_res_q.next, struct ipr_resource_entry, queue); list_move_tail(&res->queue, &ioa_cfg->used_res_q); ipr_init_res_entry(res, &cfgtew); res->add_to_ml = 1; } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))) res->sdev->allow_restart = 1; if (found) ipr_update_res_entry(res, &cfgtew); } list_for_each_entry_safe(res, temp, &old_res, queue) { if (res->sdev) { res->del_from_ml = 1; res->res_handle = IPR_INVALID_RES_HANDLE; list_move_tail(&res->queue, &ioa_cfg->used_res_q); } } list_for_each_entry_safe(res, temp, &old_res, queue) { ipr_clear_res_target(res); list_move_tail(&res->queue, &ioa_cfg->free_res_q); } if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) ipr_cmd->job_step = ipr_ioafp_mode_sense_page24; else ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; LEAVE; return IPR_RC_JOB_CONTINUE; } /** * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter. * @ipr_cmd: ipr command struct * * This function sends a Query IOA Configuration command * to the adapter to retrieve the IOA configuration table. * * Return value: * IPR_RC_JOB_RETURN **/ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; ENTER; if (cap->cap & IPR_CAP_DUAL_IOA_RAID) ioa_cfg->dual_raid = 1; dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", ucode_vpd->major_release, ucode_vpd->card_type, ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]); ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff; ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size, IPR_IOADL_FLAGS_READ_LAST); ipr_cmd->job_step = ipr_init_res_table; ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); LEAVE; return IPR_RC_JOB_RETURN; } static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd) { u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) return IPR_RC_JOB_CONTINUE; return ipr_reset_cmd_failed(ipr_cmd); } static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd, __be32 res_handle, u8 sa_code) { struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; ioarcb->res_handle = res_handle; ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION; ioarcb->cmd_pkt.cdb[1] = sa_code; ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; } /** * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service * action * @ipr_cmd: ipr command struct * * Return value: * none **/ static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd) { struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; ENTER; ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) { ipr_build_ioa_service_action(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), IPR_IOA_SA_CHANGE_CACHE_PARAMS); ioarcb->cmd_pkt.cdb[2] = 0x40; ipr_cmd->job_step_failed = ipr_ioa_service_action_failed; ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_SET_SUP_DEVICE_TIMEOUT); LEAVE; return IPR_RC_JOB_RETURN; } LEAVE; return IPR_RC_JOB_CONTINUE; } /** * ipr_ioafp_inquiry - Send an Inquiry to the adapter. * @ipr_cmd: ipr command struct * @flags: flags to send * @page: page to inquire * @dma_addr: DMA address * @xfer_len: transfer data length * * This utility function sends an inquiry to the adapter. * * Return value: * none **/ static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, dma_addr_t dma_addr, u8 xfer_len) { struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; ENTER; ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); ioarcb->cmd_pkt.cdb[0] = INQUIRY; ioarcb->cmd_pkt.cdb[1] = flags; ioarcb->cmd_pkt.cdb[2] = page; ioarcb->cmd_pkt.cdb[4] = xfer_len; ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); LEAVE; } /** * ipr_inquiry_page_supported - Is the given inquiry page supported * @page0: inquiry page 0 buffer * @page: page code. * * This function determines if the specified inquiry page is supported. * * Return value: * 1 if page is supported / 0 if not **/ static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page) { int i; for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++) if (page0->page[i] == page) return 1; return 0; } /** * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter. * @ipr_cmd: ipr command struct * * This function sends a Page 0xC4 inquiry to the adapter * to retrieve software VPD information. * * Return value: * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN **/ static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; ENTER; ipr_cmd->job_step = ipr_ioafp_set_caching_parameters; memset(pageC4, 0, sizeof(*pageC4)); if (ipr_inquiry_page_supported(page0, 0xC4)) { ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4, (ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, pageC4_data)), sizeof(struct ipr_inquiry_pageC4)); return IPR_RC_JOB_RETURN; } LEAVE; return IPR_RC_JOB_CONTINUE; } /** * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter. * @ipr_cmd: ipr command struct * * This function sends a Page 0xD0 inquiry to the adapter * to retrieve adapter capabilities. * * Return value: * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN **/ static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; ENTER; ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry; memset(cap, 0, sizeof(*cap)); if (ipr_inquiry_page_supported(page0, 0xD0)) { ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0, ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap), sizeof(struct ipr_inquiry_cap)); return IPR_RC_JOB_RETURN; } LEAVE; return IPR_RC_JOB_CONTINUE; } /** * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter. * @ipr_cmd: ipr command struct * * This function sends a Page 3 inquiry to the adapter * to retrieve software VPD information. * * Return value: * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN **/ static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; ENTER; ipr_cmd->job_step = ipr_ioafp_cap_inquiry; ipr_ioafp_inquiry(ipr_cmd, 1, 3, ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), sizeof(struct ipr_inquiry_page3)); LEAVE; return IPR_RC_JOB_RETURN; } /** * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter. * @ipr_cmd: ipr command struct * * This function sends a Page 0 inquiry to the adapter * to retrieve supported inquiry pages. * * Return value: * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN **/ static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; char type[5]; ENTER; /* Grab the type out of the VPD and store it away */ memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4); type[4] = '\0'; ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); if (ipr_invalid_adapter(ioa_cfg)) { dev_err(&ioa_cfg->pdev->dev, "Adapter not supported in this hardware configuration.\n"); if (!ipr_testmode) { ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); list_add_tail(&ipr_cmd->queue, &ioa_cfg->hrrq->hrrq_free_q); return IPR_RC_JOB_RETURN; } } ipr_cmd->job_step = ipr_ioafp_page3_inquiry; ipr_ioafp_inquiry(ipr_cmd, 1, 0, ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data), sizeof(struct ipr_inquiry_page0)); LEAVE; return IPR_RC_JOB_RETURN; } /** * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter. * @ipr_cmd: ipr command struct * * This function sends a standard inquiry to the adapter. * * Return value: * IPR_RC_JOB_RETURN **/ static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; ENTER; ipr_cmd->job_step = ipr_ioafp_page0_inquiry; ipr_ioafp_inquiry(ipr_cmd, 0, 0, ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), sizeof(struct ipr_ioa_vpd)); LEAVE; return IPR_RC_JOB_RETURN; } /** * ipr_ioafp_identify_hrrq - Send Identify Host RRQ. * @ipr_cmd: ipr command struct * * This function send an Identify Host Request Response Queue * command to establish the HRRQ with the adapter. * * Return value: * IPR_RC_JOB_RETURN **/ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_hrr_queue *hrrq; ENTER; ipr_cmd->job_step = ipr_ioafp_std_inquiry; if (ioa_cfg->identify_hrrq_index == 0) dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) { hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index]; ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q; ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; if (ioa_cfg->sis64) ioarcb->cmd_pkt.cdb[1] = 0x1; if (ioa_cfg->nvectors == 1) ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE; else ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE; ioarcb->cmd_pkt.cdb[2] = ((u64) hrrq->host_rrq_dma >> 24) & 0xff; ioarcb->cmd_pkt.cdb[3] = ((u64) hrrq->host_rrq_dma >> 16) & 0xff; ioarcb->cmd_pkt.cdb[4] = ((u64) hrrq->host_rrq_dma >> 8) & 0xff; ioarcb->cmd_pkt.cdb[5] = ((u64) hrrq->host_rrq_dma) & 0xff; ioarcb->cmd_pkt.cdb[7] = ((sizeof(u32) * hrrq->size) >> 8) & 0xff; ioarcb->cmd_pkt.cdb[8] = (sizeof(u32) * hrrq->size) & 0xff; if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) ioarcb->cmd_pkt.cdb[9] = ioa_cfg->identify_hrrq_index; if (ioa_cfg->sis64) { ioarcb->cmd_pkt.cdb[10] = ((u64) hrrq->host_rrq_dma >> 56) & 0xff; ioarcb->cmd_pkt.cdb[11] = ((u64) hrrq->host_rrq_dma >> 48) & 0xff; ioarcb->cmd_pkt.cdb[12] = ((u64) hrrq->host_rrq_dma >> 40) & 0xff; ioarcb->cmd_pkt.cdb[13] = ((u64) hrrq->host_rrq_dma >> 32) & 0xff; } if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) ioarcb->cmd_pkt.cdb[14] = ioa_cfg->identify_hrrq_index; ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) ipr_cmd->job_step = ipr_ioafp_identify_hrrq; LEAVE; return IPR_RC_JOB_RETURN; } LEAVE; return IPR_RC_JOB_CONTINUE; } /** * ipr_reset_timer_done - Adapter reset timer function * @t: Timer context used to fetch ipr command struct * * Description: This function is used in adapter reset processing * for timing events. If the reset_cmd pointer in the IOA * config struct is not this adapter's we are doing nested * resets and fail_all_ops will take care of freeing the * command block. * * Return value: * none **/ static void ipr_reset_timer_done(struct timer_list *t) { struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; unsigned long lock_flags = 0; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); if (ioa_cfg->reset_cmd == ipr_cmd) { list_del(&ipr_cmd->queue); ipr_cmd->done(ipr_cmd); } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); } /** * ipr_reset_start_timer - Start a timer for adapter reset job * @ipr_cmd: ipr command struct * @timeout: timeout value * * Description: This function is used in adapter reset processing * for timing events. If the reset_cmd pointer in the IOA * config struct is not this adapter's we are doing nested * resets and fail_all_ops will take care of freeing the * command block. * * Return value: * none **/ static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd, unsigned long timeout) { ENTER; list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); ipr_cmd->done = ipr_reset_ioa_job; ipr_cmd->timer.expires = jiffies + timeout; ipr_cmd->timer.function = ipr_reset_timer_done; add_timer(&ipr_cmd->timer); } /** * ipr_init_ioa_mem - Initialize ioa_cfg control block * @ioa_cfg: ioa cfg struct * * Return value: * nothing **/ static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg) { struct ipr_hrr_queue *hrrq; for_each_hrrq(hrrq, ioa_cfg) { spin_lock(&hrrq->_lock); memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size); /* Initialize Host RRQ pointers */ hrrq->hrrq_start = hrrq->host_rrq; hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1]; hrrq->hrrq_curr = hrrq->hrrq_start; hrrq->toggle_bit = 1; spin_unlock(&hrrq->_lock); } wmb(); ioa_cfg->identify_hrrq_index = 0; if (ioa_cfg->hrrq_num == 1) atomic_set(&ioa_cfg->hrrq_index, 0); else atomic_set(&ioa_cfg->hrrq_index, 1); /* Zero out config table */ memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); } /** * ipr_reset_next_stage - Process IPL stage change based on feedback register. * @ipr_cmd: ipr command struct * * Return value: * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN **/ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd) { unsigned long stage, stage_time; u32 feedback; volatile u32 int_reg; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; u64 maskval = 0; feedback = readl(ioa_cfg->regs.init_feedback_reg); stage = feedback & IPR_IPL_INIT_STAGE_MASK; stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK; ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); /* sanity check the stage_time value */ if (stage_time == 0) stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME; else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME) stage_time = IPR_IPL_INIT_MIN_STAGE_TIME; else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT) stage_time = IPR_LONG_OPERATIONAL_TIMEOUT; if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) { writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg); int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); stage_time = ioa_cfg->transop_timeout; ipr_cmd->job_step = ipr_ioafp_identify_hrrq; } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) { int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { ipr_cmd->job_step = ipr_ioafp_identify_hrrq; maskval = IPR_PCII_IPL_STAGE_CHANGE; maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER; writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg); int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); return IPR_RC_JOB_CONTINUE; } } ipr_cmd->timer.expires = jiffies + stage_time * HZ; ipr_cmd->timer.function = ipr_oper_timeout; ipr_cmd->done = ipr_reset_ioa_job; add_timer(&ipr_cmd->timer); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); return IPR_RC_JOB_RETURN; } /** * ipr_reset_enable_ioa - Enable the IOA following a reset. * @ipr_cmd: ipr command struct * * This function reinitializes some control blocks and * enables destructive diagnostics on the adapter. * * Return value: * IPR_RC_JOB_RETURN **/ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; volatile u32 int_reg; volatile u64 maskval; int i; ENTER; ipr_cmd->job_step = ipr_ioafp_identify_hrrq; ipr_init_ioa_mem(ioa_cfg); for (i = 0; i < ioa_cfg->hrrq_num; i++) { spin_lock(&ioa_cfg->hrrq[i]._lock); ioa_cfg->hrrq[i].allow_interrupts = 1; spin_unlock(&ioa_cfg->hrrq[i]._lock); } if (ioa_cfg->sis64) { /* Set the adapter to the correct endian mode. */ writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); int_reg = readl(ioa_cfg->regs.endian_swap_reg); } int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), ioa_cfg->regs.clr_interrupt_mask_reg32); int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); return IPR_RC_JOB_CONTINUE; } /* Enable destructive diagnostics on IOA */ writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); if (ioa_cfg->sis64) { maskval = IPR_PCII_IPL_STAGE_CHANGE; maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS; writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg); } else writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); if (ioa_cfg->sis64) { ipr_cmd->job_step = ipr_reset_next_stage; return IPR_RC_JOB_CONTINUE; } ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); ipr_cmd->timer.function = ipr_oper_timeout; ipr_cmd->done = ipr_reset_ioa_job; add_timer(&ipr_cmd->timer); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); LEAVE; return IPR_RC_JOB_RETURN; } /** * ipr_reset_wait_for_dump - Wait for a dump to timeout. * @ipr_cmd: ipr command struct * * This function is invoked when an adapter dump has run out * of processing time. * * Return value: * IPR_RC_JOB_CONTINUE **/ static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; if (ioa_cfg->sdt_state == GET_DUMP) ioa_cfg->sdt_state = WAIT_FOR_DUMP; else if (ioa_cfg->sdt_state == READ_DUMP) ioa_cfg->sdt_state = ABORT_DUMP; ioa_cfg->dump_timeout = 1; ipr_cmd->job_step = ipr_reset_alert; return IPR_RC_JOB_CONTINUE; } /** * ipr_unit_check_no_data - Log a unit check/no data error log * @ioa_cfg: ioa config struct * * Logs an error indicating the adapter unit checked, but for some * reason, we were unable to fetch the unit check buffer. * * Return value: * nothing **/ static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg) { ioa_cfg->errors_logged++; dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n"); } /** * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA * @ioa_cfg: ioa config struct * * Fetches the unit check buffer from the adapter by clocking the data * through the mailbox register. * * Return value: * nothing **/ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) { unsigned long mailbox; struct ipr_hostrcb *hostrcb; struct ipr_uc_sdt sdt; int rc, length; u32 ioasc; mailbox = readl(ioa_cfg->ioa_mailbox); if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) { ipr_unit_check_no_data(ioa_cfg); return; } memset(&sdt, 0, sizeof(struct ipr_uc_sdt)); rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt, (sizeof(struct ipr_uc_sdt)) / sizeof(__be32)); if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) || ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { ipr_unit_check_no_data(ioa_cfg); return; } /* Find length of the first sdt entry (UC buffer) */ if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE) length = be32_to_cpu(sdt.entry[0].end_token); else length = (be32_to_cpu(sdt.entry[0].end_token) - be32_to_cpu(sdt.entry[0].start_token)) & IPR_FMT2_MBX_ADDR_MASK; hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, struct ipr_hostrcb, queue); list_del_init(&hostrcb->queue); memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); rc = ipr_get_ldump_data_section(ioa_cfg, be32_to_cpu(sdt.entry[0].start_token), (__be32 *)&hostrcb->hcam, min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); if (!rc) { ipr_handle_log_data(ioa_cfg, hostrcb); ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED && ioa_cfg->sdt_state == GET_DUMP) ioa_cfg->sdt_state = WAIT_FOR_DUMP; } else ipr_unit_check_no_data(ioa_cfg); list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); } /** * ipr_reset_get_unit_check_job - Call to get the unit check buffer. * @ipr_cmd: ipr command struct * * Description: This function will call to get the unit check buffer. * * Return value: * IPR_RC_JOB_RETURN **/ static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; ENTER; ioa_cfg->ioa_unit_checked = 0; ipr_get_unit_check_buffer(ioa_cfg); ipr_cmd->job_step = ipr_reset_alert; ipr_reset_start_timer(ipr_cmd, 0); LEAVE; return IPR_RC_JOB_RETURN; } static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; ENTER; if (ioa_cfg->sdt_state != GET_DUMP) return IPR_RC_JOB_RETURN; if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left || (readl(ioa_cfg->regs.sense_interrupt_reg) & IPR_PCII_MAILBOX_STABLE)) { if (!ipr_cmd->u.time_left) dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for Mailbox register.\n"); ioa_cfg->sdt_state = READ_DUMP; ioa_cfg->dump_timeout = 0; if (ioa_cfg->sis64) ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT); else ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT); ipr_cmd->job_step = ipr_reset_wait_for_dump; schedule_work(&ioa_cfg->work_q); } else { ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); } LEAVE; return IPR_RC_JOB_RETURN; } /** * ipr_reset_restore_cfg_space - Restore PCI config space. * @ipr_cmd: ipr command struct * * Description: This function restores the saved PCI config space of * the adapter, fails all outstanding ops back to the callers, and * fetches the dump/unit check if applicable to this reset. * * Return value: * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN **/ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; ENTER; ioa_cfg->pdev->state_saved = true; pci_restore_state(ioa_cfg->pdev); if (ipr_set_pcix_cmd_reg(ioa_cfg)) { ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); return IPR_RC_JOB_CONTINUE; } ipr_fail_all_ops(ioa_cfg); if (ioa_cfg->sis64) { /* Set the adapter to the correct endian mode. */ writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); readl(ioa_cfg->regs.endian_swap_reg); } if (ioa_cfg->ioa_unit_checked) { if (ioa_cfg->sis64) { ipr_cmd->job_step = ipr_reset_get_unit_check_job; ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT); return IPR_RC_JOB_RETURN; } else { ioa_cfg->ioa_unit_checked = 0; ipr_get_unit_check_buffer(ioa_cfg); ipr_cmd->job_step = ipr_reset_alert; ipr_reset_start_timer(ipr_cmd, 0); return IPR_RC_JOB_RETURN; } } if (ioa_cfg->in_ioa_bringdown) { ipr_cmd->job_step = ipr_ioa_bringdown_done; } else if (ioa_cfg->sdt_state == GET_DUMP) { ipr_cmd->job_step = ipr_dump_mailbox_wait; ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX; } else { ipr_cmd->job_step = ipr_reset_enable_ioa; } LEAVE; return IPR_RC_JOB_CONTINUE; } /** * ipr_reset_bist_done - BIST has completed on the adapter. * @ipr_cmd: ipr command struct * * Description: Unblock config space and resume the reset process. * * Return value: * IPR_RC_JOB_CONTINUE **/ static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; ENTER; if (ioa_cfg->cfg_locked) pci_cfg_access_unlock(ioa_cfg->pdev); ioa_cfg->cfg_locked = 0; ipr_cmd->job_step = ipr_reset_restore_cfg_space; LEAVE; return IPR_RC_JOB_CONTINUE; } /** * ipr_reset_start_bist - Run BIST on the adapter. * @ipr_cmd: ipr command struct * * Description: This function runs BIST on the adapter, then delays 2 seconds. * * Return value: * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN **/ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; int rc = PCIBIOS_SUCCESSFUL; ENTER; if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO) writel(IPR_UPROCI_SIS64_START_BIST, ioa_cfg->regs.set_uproc_interrupt_reg32); else rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); if (rc == PCIBIOS_SUCCESSFUL) { ipr_cmd->job_step = ipr_reset_bist_done; ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); rc = IPR_RC_JOB_RETURN; } else { if (ioa_cfg->cfg_locked) pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev); ioa_cfg->cfg_locked = 0; ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); rc = IPR_RC_JOB_CONTINUE; } LEAVE; return rc; } /** * ipr_reset_slot_reset_done - Clear PCI reset to the adapter * @ipr_cmd: ipr command struct * * Description: This clears PCI reset to the adapter and delays two seconds. * * Return value: * IPR_RC_JOB_RETURN **/ static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd) { ENTER; ipr_cmd->job_step = ipr_reset_bist_done; ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); LEAVE; return IPR_RC_JOB_RETURN; } /** * ipr_reset_reset_work - Pulse a PCIe fundamental reset * @work: work struct * * Description: This pulses warm reset to a slot. * **/ static void ipr_reset_reset_work(struct work_struct *work) { struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work); struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct pci_dev *pdev = ioa_cfg->pdev; unsigned long lock_flags = 0; ENTER; pci_set_pcie_reset_state(pdev, pcie_warm_reset); msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT)); pci_set_pcie_reset_state(pdev, pcie_deassert_reset); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); if (ioa_cfg->reset_cmd == ipr_cmd) ipr_reset_ioa_job(ipr_cmd); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); LEAVE; } /** * ipr_reset_slot_reset - Reset the PCI slot of the adapter. * @ipr_cmd: ipr command struct * * Description: This asserts PCI reset to the adapter. * * Return value: * IPR_RC_JOB_RETURN **/ static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; ENTER; INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work); queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work); ipr_cmd->job_step = ipr_reset_slot_reset_done; LEAVE; return IPR_RC_JOB_RETURN; } /** * ipr_reset_block_config_access_wait - Wait for permission to block config access * @ipr_cmd: ipr command struct * * Description: This attempts to block config access to the IOA. * * Return value: * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN **/ static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; int rc = IPR_RC_JOB_CONTINUE; if (pci_cfg_access_trylock(ioa_cfg->pdev)) { ioa_cfg->cfg_locked = 1; ipr_cmd->job_step = ioa_cfg->reset; } else { if (ipr_cmd->u.time_left) { rc = IPR_RC_JOB_RETURN; ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); } else { ipr_cmd->job_step = ioa_cfg->reset; dev_err(&ioa_cfg->pdev->dev, "Timed out waiting to lock config access. Resetting anyway.\n"); } } return rc; } /** * ipr_reset_block_config_access - Block config access to the IOA * @ipr_cmd: ipr command struct * * Description: This attempts to block config access to the IOA * * Return value: * IPR_RC_JOB_CONTINUE **/ static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd) { ipr_cmd->ioa_cfg->cfg_locked = 0; ipr_cmd->job_step = ipr_reset_block_config_access_wait; ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; return IPR_RC_JOB_CONTINUE; } /** * ipr_reset_allowed - Query whether or not IOA can be reset * @ioa_cfg: ioa config struct * * Return value: * 0 if reset not allowed / non-zero if reset is allowed **/ static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg) { volatile u32 temp_reg; temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg); return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0); } /** * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA. * @ipr_cmd: ipr command struct * * Description: This function waits for adapter permission to run BIST, * then runs BIST. If the adapter does not give permission after a * reasonable time, we will reset the adapter anyway. The impact of * resetting the adapter without warning the adapter is the risk of * losing the persistent error log on the adapter. If the adapter is * reset while it is writing to the flash on the adapter, the flash * segment will have bad ECC and be zeroed. * * Return value: * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN **/ static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; int rc = IPR_RC_JOB_RETURN; if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) { ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); } else { ipr_cmd->job_step = ipr_reset_block_config_access; rc = IPR_RC_JOB_CONTINUE; } return rc; } /** * ipr_reset_alert - Alert the adapter of a pending reset * @ipr_cmd: ipr command struct * * Description: This function alerts the adapter that it will be reset. * If memory space is not currently enabled, proceed directly * to running BIST on the adapter. The timer must always be started * so we guarantee we do not run BIST from ipr_isr. * * Return value: * IPR_RC_JOB_RETURN **/ static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; u16 cmd_reg; int rc; ENTER; rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg); if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) { ipr_mask_and_clear_interrupts(ioa_cfg, ~0); writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32); ipr_cmd->job_step = ipr_reset_wait_to_start_bist; } else { ipr_cmd->job_step = ipr_reset_block_config_access; } ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); LEAVE; return IPR_RC_JOB_RETURN; } /** * ipr_reset_quiesce_done - Complete IOA disconnect * @ipr_cmd: ipr command struct * * Description: Freeze the adapter to complete quiesce processing * * Return value: * IPR_RC_JOB_CONTINUE **/ static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; ENTER; ipr_cmd->job_step = ipr_ioa_bringdown_done; ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); LEAVE; return IPR_RC_JOB_CONTINUE; } /** * ipr_reset_cancel_hcam_done - Check for outstanding commands * @ipr_cmd: ipr command struct * * Description: Ensure nothing is outstanding to the IOA and * proceed with IOA disconnect. Otherwise reset the IOA. * * Return value: * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE **/ static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_cmnd *loop_cmd; struct ipr_hrr_queue *hrrq; int rc = IPR_RC_JOB_CONTINUE; int count = 0; ENTER; ipr_cmd->job_step = ipr_reset_quiesce_done; for_each_hrrq(hrrq, ioa_cfg) { spin_lock(&hrrq->_lock); list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) { count++; ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); rc = IPR_RC_JOB_RETURN; break; } spin_unlock(&hrrq->_lock); if (count) break; } LEAVE; return rc; } /** * ipr_reset_cancel_hcam - Cancel outstanding HCAMs * @ipr_cmd: ipr command struct * * Description: Cancel any oustanding HCAMs to the IOA. * * Return value: * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN **/ static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; int rc = IPR_RC_JOB_CONTINUE; struct ipr_cmd_pkt *cmd_pkt; struct ipr_cmnd *hcam_cmd; struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ]; ENTER; ipr_cmd->job_step = ipr_reset_cancel_hcam_done; if (!hrrq->ioa_is_dead) { if (!list_empty(&ioa_cfg->hostrcb_pending_q)) { list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) { if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC) continue; ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; cmd_pkt->request_type = IPR_RQTYPE_IOACMD; cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST; cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB; cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff; cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff; cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff; cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff; cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff; cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff; cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff; cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff; ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_CANCEL_TIMEOUT); rc = IPR_RC_JOB_RETURN; ipr_cmd->job_step = ipr_reset_cancel_hcam; break; } } } else ipr_cmd->job_step = ipr_reset_alert; LEAVE; return rc; } /** * ipr_reset_ucode_download_done - Microcode download completion * @ipr_cmd: ipr command struct * * Description: This function unmaps the microcode download buffer. * * Return value: * IPR_RC_JOB_CONTINUE **/ static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist, sglist->num_sg, DMA_TO_DEVICE); ipr_cmd->job_step = ipr_reset_alert; return IPR_RC_JOB_CONTINUE; } /** * ipr_reset_ucode_download - Download microcode to the adapter * @ipr_cmd: ipr command struct * * Description: This function checks to see if it there is microcode * to download to the adapter. If there is, a download is performed. * * Return value: * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN **/ static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; ENTER; ipr_cmd->job_step = ipr_reset_alert; if (!sglist) return IPR_RC_JOB_CONTINUE; ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER; ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE; ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16; ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; if (ioa_cfg->sis64) ipr_build_ucode_ioadl64(ipr_cmd, sglist); else ipr_build_ucode_ioadl(ipr_cmd, sglist); ipr_cmd->job_step = ipr_reset_ucode_download_done; ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_WRITE_BUFFER_TIMEOUT); LEAVE; return IPR_RC_JOB_RETURN; } /** * ipr_reset_shutdown_ioa - Shutdown the adapter * @ipr_cmd: ipr command struct * * Description: This function issues an adapter shutdown of the * specified type to the specified adapter as part of the * adapter reset job. * * Return value: * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN **/ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type; unsigned long timeout; int rc = IPR_RC_JOB_CONTINUE; ENTER; if (shutdown_type == IPR_SHUTDOWN_QUIESCE) ipr_cmd->job_step = ipr_reset_cancel_hcam; else if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type; if (shutdown_type == IPR_SHUTDOWN_NORMAL) timeout = IPR_SHUTDOWN_TIMEOUT; else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL) timeout = IPR_INTERNAL_TIMEOUT; else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO; else timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT; ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout); rc = IPR_RC_JOB_RETURN; ipr_cmd->job_step = ipr_reset_ucode_download; } else ipr_cmd->job_step = ipr_reset_alert; LEAVE; return rc; } /** * ipr_reset_ioa_job - Adapter reset job * @ipr_cmd: ipr command struct * * Description: This function is the job router for the adapter reset job. * * Return value: * none **/ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) { u32 rc, ioasc; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; do { ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); if (ioa_cfg->reset_cmd != ipr_cmd) { /* * We are doing nested adapter resets and this is * not the current reset job. */ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); return; } if (IPR_IOASC_SENSE_KEY(ioasc)) { rc = ipr_cmd->job_step_failed(ipr_cmd); if (rc == IPR_RC_JOB_RETURN) return; } ipr_reinit_ipr_cmnd(ipr_cmd); ipr_cmd->job_step_failed = ipr_reset_cmd_failed; rc = ipr_cmd->job_step(ipr_cmd); } while (rc == IPR_RC_JOB_CONTINUE); } /** * _ipr_initiate_ioa_reset - Initiate an adapter reset * @ioa_cfg: ioa config struct * @job_step: first job step of reset job * @shutdown_type: shutdown type * * Description: This function will initiate the reset of the given adapter * starting at the selected job step. * If the caller needs to wait on the completion of the reset, * the caller must sleep on the reset_wait_q. * * Return value: * none **/ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, int (*job_step) (struct ipr_cmnd *), enum ipr_shutdown_type shutdown_type) { struct ipr_cmnd *ipr_cmd; int i; ioa_cfg->in_reset_reload = 1; for (i = 0; i < ioa_cfg->hrrq_num; i++) { spin_lock(&ioa_cfg->hrrq[i]._lock); ioa_cfg->hrrq[i].allow_cmds = 0; spin_unlock(&ioa_cfg->hrrq[i]._lock); } wmb(); if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { ioa_cfg->scsi_unblock = 0; ioa_cfg->scsi_blocked = 1; scsi_block_requests(ioa_cfg->host); } ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); ioa_cfg->reset_cmd = ipr_cmd; ipr_cmd->job_step = job_step; ipr_cmd->u.shutdown_type = shutdown_type; ipr_reset_ioa_job(ipr_cmd); } /** * ipr_initiate_ioa_reset - Initiate an adapter reset * @ioa_cfg: ioa config struct * @shutdown_type: shutdown type * * Description: This function will initiate the reset of the given adapter. * If the caller needs to wait on the completion of the reset, * the caller must sleep on the reset_wait_q. * * Return value: * none **/ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, enum ipr_shutdown_type shutdown_type) { int i; if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) return; if (ioa_cfg->in_reset_reload) { if (ioa_cfg->sdt_state == GET_DUMP) ioa_cfg->sdt_state = WAIT_FOR_DUMP; else if (ioa_cfg->sdt_state == READ_DUMP) ioa_cfg->sdt_state = ABORT_DUMP; } if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) { dev_err(&ioa_cfg->pdev->dev, "IOA taken offline - error recovery failed\n"); ioa_cfg->reset_retries = 0; for (i = 0; i < ioa_cfg->hrrq_num; i++) { spin_lock(&ioa_cfg->hrrq[i]._lock); ioa_cfg->hrrq[i].ioa_is_dead = 1; spin_unlock(&ioa_cfg->hrrq[i]._lock); } wmb(); if (ioa_cfg->in_ioa_bringdown) { ioa_cfg->reset_cmd = NULL; ioa_cfg->in_reset_reload = 0; ipr_fail_all_ops(ioa_cfg); wake_up_all(&ioa_cfg->reset_wait_q); if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { ioa_cfg->scsi_unblock = 1; schedule_work(&ioa_cfg->work_q); } return; } else { ioa_cfg->in_ioa_bringdown = 1; shutdown_type = IPR_SHUTDOWN_NONE; } } _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa, shutdown_type); } /** * ipr_reset_freeze - Hold off all I/O activity * @ipr_cmd: ipr command struct * * Description: If the PCI slot is frozen, hold off all I/O * activity; then, as soon as the slot is available again, * initiate an adapter reset. */ static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; int i; /* Disallow new interrupts, avoid loop */ for (i = 0; i < ioa_cfg->hrrq_num; i++) { spin_lock(&ioa_cfg->hrrq[i]._lock); ioa_cfg->hrrq[i].allow_interrupts = 0; spin_unlock(&ioa_cfg->hrrq[i]._lock); } wmb(); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); ipr_cmd->done = ipr_reset_ioa_job; return IPR_RC_JOB_RETURN; } /** * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled * @pdev: PCI device struct * * Description: This routine is called to tell us that the MMIO * access to the IOA has been restored */ static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev) { unsigned long flags = 0; struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); spin_lock_irqsave(ioa_cfg->host->host_lock, flags); if (!ioa_cfg->probe_done) pci_save_state(pdev); spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); return PCI_ERS_RESULT_NEED_RESET; } /** * ipr_pci_frozen - Called when slot has experienced a PCI bus error. * @pdev: PCI device struct * * Description: This routine is called to tell us that the PCI bus * is down. Can't do anything here, except put the device driver * into a holding pattern, waiting for the PCI bus to come back. */ static void ipr_pci_frozen(struct pci_dev *pdev) { unsigned long flags = 0; struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); spin_lock_irqsave(ioa_cfg->host->host_lock, flags); if (ioa_cfg->probe_done) _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE); spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); } /** * ipr_pci_slot_reset - Called when PCI slot has been reset. * @pdev: PCI device struct * * Description: This routine is called by the pci error recovery * code after the PCI slot has been reset, just before we * should resume normal operations. */ static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev) { unsigned long flags = 0; struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); spin_lock_irqsave(ioa_cfg->host->host_lock, flags); if (ioa_cfg->probe_done) { if (ioa_cfg->needs_warm_reset) ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); else _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, IPR_SHUTDOWN_NONE); } else wake_up_all(&ioa_cfg->eeh_wait_q); spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); return PCI_ERS_RESULT_RECOVERED; } /** * ipr_pci_perm_failure - Called when PCI slot is dead for good. * @pdev: PCI device struct * * Description: This routine is called when the PCI bus has * permanently failed. */ static void ipr_pci_perm_failure(struct pci_dev *pdev) { unsigned long flags = 0; struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); int i; spin_lock_irqsave(ioa_cfg->host->host_lock, flags); if (ioa_cfg->probe_done) { if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) ioa_cfg->sdt_state = ABORT_DUMP; ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; ioa_cfg->in_ioa_bringdown = 1; for (i = 0; i < ioa_cfg->hrrq_num; i++) { spin_lock(&ioa_cfg->hrrq[i]._lock); ioa_cfg->hrrq[i].allow_cmds = 0; spin_unlock(&ioa_cfg->hrrq[i]._lock); } wmb(); ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); } else wake_up_all(&ioa_cfg->eeh_wait_q); spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); } /** * ipr_pci_error_detected - Called when a PCI error is detected. * @pdev: PCI device struct * @state: PCI channel state * * Description: Called when a PCI error is detected. * * Return value: * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT */ static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { switch (state) { case pci_channel_io_frozen: ipr_pci_frozen(pdev); return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_perm_failure: ipr_pci_perm_failure(pdev); return PCI_ERS_RESULT_DISCONNECT; default: break; } return PCI_ERS_RESULT_NEED_RESET; } /** * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..) * @ioa_cfg: ioa cfg struct * * Description: This is the second phase of adapter initialization * This function takes care of initilizing the adapter to the point * where it can accept new commands. * Return value: * none **/ static void ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg) { unsigned long host_lock_flags = 0; ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); ioa_cfg->probe_done = 1; if (ioa_cfg->needs_hard_reset) { ioa_cfg->needs_hard_reset = 0; ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); } else _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE); spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); LEAVE; } /** * ipr_free_cmd_blks - Frees command blocks allocated for an adapter * @ioa_cfg: ioa config struct * * Return value: * none **/ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) { int i; if (ioa_cfg->ipr_cmnd_list) { for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { if (ioa_cfg->ipr_cmnd_list[i]) dma_pool_free(ioa_cfg->ipr_cmd_pool, ioa_cfg->ipr_cmnd_list[i], ioa_cfg->ipr_cmnd_list_dma[i]); ioa_cfg->ipr_cmnd_list[i] = NULL; } } dma_pool_destroy(ioa_cfg->ipr_cmd_pool); kfree(ioa_cfg->ipr_cmnd_list); kfree(ioa_cfg->ipr_cmnd_list_dma); ioa_cfg->ipr_cmnd_list = NULL; ioa_cfg->ipr_cmnd_list_dma = NULL; ioa_cfg->ipr_cmd_pool = NULL; } /** * ipr_free_mem - Frees memory allocated for an adapter * @ioa_cfg: ioa cfg struct * * Return value: * nothing **/ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg) { int i; kfree(ioa_cfg->res_entries); dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs), ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); ipr_free_cmd_blks(ioa_cfg); for (i = 0; i < ioa_cfg->hrrq_num; i++) dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(u32) * ioa_cfg->hrrq[i].size, ioa_cfg->hrrq[i].host_rrq, ioa_cfg->hrrq[i].host_rrq_dma); dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size, ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); for (i = 0; i < IPR_MAX_HCAMS; i++) { dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_hostrcb), ioa_cfg->hostrcb[i], ioa_cfg->hostrcb_dma[i]); } ipr_free_dump(ioa_cfg); kfree(ioa_cfg->trace); } /** * ipr_free_irqs - Free all allocated IRQs for the adapter. * @ioa_cfg: ipr cfg struct * * This function frees all allocated IRQs for the * specified adapter. * * Return value: * none **/ static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg) { struct pci_dev *pdev = ioa_cfg->pdev; int i; for (i = 0; i < ioa_cfg->nvectors; i++) free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]); pci_free_irq_vectors(pdev); } /** * ipr_free_all_resources - Free all allocated resources for an adapter. * @ioa_cfg: ioa config struct * * This function frees all allocated resources for the * specified adapter. * * Return value: * none **/ static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg) { struct pci_dev *pdev = ioa_cfg->pdev; ENTER; ipr_free_irqs(ioa_cfg); if (ioa_cfg->reset_work_q) destroy_workqueue(ioa_cfg->reset_work_q); iounmap(ioa_cfg->hdw_dma_regs); pci_release_regions(pdev); ipr_free_mem(ioa_cfg); scsi_host_put(ioa_cfg->host); pci_disable_device(pdev); LEAVE; } /** * ipr_alloc_cmd_blks - Allocate command blocks for an adapter * @ioa_cfg: ioa config struct * * Return value: * 0 on success / -ENOMEM on allocation failure **/ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) { struct ipr_cmnd *ipr_cmd; struct ipr_ioarcb *ioarcb; dma_addr_t dma_addr; int i, entries_each_hrrq, hrrq_id = 0; ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev, sizeof(struct ipr_cmnd), 512, 0); if (!ioa_cfg->ipr_cmd_pool) return -ENOMEM; ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL); ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL); if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) { ipr_free_cmd_blks(ioa_cfg); return -ENOMEM; } for (i = 0; i < ioa_cfg->hrrq_num; i++) { if (ioa_cfg->hrrq_num > 1) { if (i == 0) { entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS; ioa_cfg->hrrq[i].min_cmd_id = 0; ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1); } else { entries_each_hrrq = IPR_NUM_BASE_CMD_BLKS/ (ioa_cfg->hrrq_num - 1); ioa_cfg->hrrq[i].min_cmd_id = IPR_NUM_INTERNAL_CMD_BLKS + (i - 1) * entries_each_hrrq; ioa_cfg->hrrq[i].max_cmd_id = (IPR_NUM_INTERNAL_CMD_BLKS + i * entries_each_hrrq - 1); } } else { entries_each_hrrq = IPR_NUM_CMD_BLKS; ioa_cfg->hrrq[i].min_cmd_id = 0; ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1); } ioa_cfg->hrrq[i].size = entries_each_hrrq; } BUG_ON(ioa_cfg->hrrq_num == 0); i = IPR_NUM_CMD_BLKS - ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1; if (i > 0) { ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i; ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i; } for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); if (!ipr_cmd) { ipr_free_cmd_blks(ioa_cfg); return -ENOMEM; } ioa_cfg->ipr_cmnd_list[i] = ipr_cmd; ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; ioarcb = &ipr_cmd->ioarcb; ipr_cmd->dma_addr = dma_addr; if (ioa_cfg->sis64) ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr); else ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr); ioarcb->host_response_handle = cpu_to_be32(i << 2); if (ioa_cfg->sis64) { ioarcb->u.sis64_addr_data.data_ioadl_addr = cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64)); } else { ioarcb->write_ioadl_addr = cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; ioarcb->ioasa_host_pci_addr = cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa)); } ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); ipr_cmd->cmd_index = i; ipr_cmd->ioa_cfg = ioa_cfg; ipr_cmd->sense_buffer_dma = dma_addr + offsetof(struct ipr_cmnd, sense_buffer); ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id; ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id]; list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id) hrrq_id++; } return 0; } /** * ipr_alloc_mem - Allocate memory for an adapter * @ioa_cfg: ioa config struct * * Return value: * 0 on success / non-zero for error **/ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) { struct pci_dev *pdev = ioa_cfg->pdev; int i, rc = -ENOMEM; ENTER; ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported, sizeof(struct ipr_resource_entry), GFP_KERNEL); if (!ioa_cfg->res_entries) goto out; for (i = 0; i < ioa_cfg->max_devs_supported; i++) { list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; } ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs), &ioa_cfg->vpd_cbs_dma, GFP_KERNEL); if (!ioa_cfg->vpd_cbs) goto out_free_res_entries; if (ipr_alloc_cmd_blks(ioa_cfg)) goto out_free_vpd_cbs; for (i = 0; i < ioa_cfg->hrrq_num; i++) { ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev, sizeof(u32) * ioa_cfg->hrrq[i].size, &ioa_cfg->hrrq[i].host_rrq_dma, GFP_KERNEL); if (!ioa_cfg->hrrq[i].host_rrq) { while (--i >= 0) dma_free_coherent(&pdev->dev, sizeof(u32) * ioa_cfg->hrrq[i].size, ioa_cfg->hrrq[i].host_rrq, ioa_cfg->hrrq[i].host_rrq_dma); goto out_ipr_free_cmd_blocks; } ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg; } ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev, ioa_cfg->cfg_table_size, &ioa_cfg->cfg_table_dma, GFP_KERNEL); if (!ioa_cfg->u.cfg_table) goto out_free_host_rrq; for (i = 0; i < IPR_MAX_HCAMS; i++) { ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev, sizeof(struct ipr_hostrcb), &ioa_cfg->hostrcb_dma[i], GFP_KERNEL); if (!ioa_cfg->hostrcb[i]) goto out_free_hostrcb_dma; ioa_cfg->hostrcb[i]->hostrcb_dma = ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg; list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); } ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES, sizeof(struct ipr_trace_entry), GFP_KERNEL); if (!ioa_cfg->trace) goto out_free_hostrcb_dma; rc = 0; out: LEAVE; return rc; out_free_hostrcb_dma: while (i-- > 0) { dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb), ioa_cfg->hostrcb[i], ioa_cfg->hostrcb_dma[i]); } dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size, ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); out_free_host_rrq: for (i = 0; i < ioa_cfg->hrrq_num; i++) { dma_free_coherent(&pdev->dev, sizeof(u32) * ioa_cfg->hrrq[i].size, ioa_cfg->hrrq[i].host_rrq, ioa_cfg->hrrq[i].host_rrq_dma); } out_ipr_free_cmd_blocks: ipr_free_cmd_blks(ioa_cfg); out_free_vpd_cbs: dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs), ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); out_free_res_entries: kfree(ioa_cfg->res_entries); goto out; } /** * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values * @ioa_cfg: ioa config struct * * Return value: * none **/ static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg) { int i; for (i = 0; i < IPR_MAX_NUM_BUSES; i++) { ioa_cfg->bus_attr[i].bus = i; ioa_cfg->bus_attr[i].qas_enabled = 0; ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH; if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds)) ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed]; else ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE; } } /** * ipr_init_regs - Initialize IOA registers * @ioa_cfg: ioa config struct * * Return value: * none **/ static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg) { const struct ipr_interrupt_offsets *p; struct ipr_interrupts *t; void __iomem *base; p = &ioa_cfg->chip_cfg->regs; t = &ioa_cfg->regs; base = ioa_cfg->hdw_dma_regs; t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32; t->clr_interrupt_reg = base + p->clr_interrupt_reg; t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32; t->sense_interrupt_reg = base + p->sense_interrupt_reg; t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32; t->ioarrin_reg = base + p->ioarrin_reg; t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32; t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32; t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32; if (ioa_cfg->sis64) { t->init_feedback_reg = base + p->init_feedback_reg; t->dump_addr_reg = base + p->dump_addr_reg; t->dump_data_reg = base + p->dump_data_reg; t->endian_swap_reg = base + p->endian_swap_reg; } } /** * ipr_init_ioa_cfg - Initialize IOA config struct * @ioa_cfg: ioa config struct * @host: scsi host struct * @pdev: PCI dev struct * * Return value: * none **/ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, struct Scsi_Host *host, struct pci_dev *pdev) { int i; ioa_cfg->host = host; ioa_cfg->pdev = pdev; ioa_cfg->log_level = ipr_log_level; ioa_cfg->doorbell = IPR_DOORBELL; sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START); sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL); sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL); sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL); INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q); INIT_LIST_HEAD(&ioa_cfg->free_res_q); INIT_LIST_HEAD(&ioa_cfg->used_res_q); INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread); init_waitqueue_head(&ioa_cfg->reset_wait_q); init_waitqueue_head(&ioa_cfg->msi_wait_q); init_waitqueue_head(&ioa_cfg->eeh_wait_q); ioa_cfg->sdt_state = INACTIVE; ipr_initialize_bus_attr(ioa_cfg); ioa_cfg->max_devs_supported = ipr_max_devs; if (ioa_cfg->sis64) { host->max_channel = IPR_MAX_SIS64_BUSES; host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS; host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; if (ipr_max_devs > IPR_MAX_SIS64_DEVS) ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) + ((sizeof(struct ipr_config_table_entry64) * ioa_cfg->max_devs_supported))); } else { host->max_channel = IPR_VSET_BUS; host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS) ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) + ((sizeof(struct ipr_config_table_entry) * ioa_cfg->max_devs_supported))); } host->unique_id = host->host_no; host->max_cmd_len = IPR_MAX_CDB_LEN; host->can_queue = ioa_cfg->max_cmds; pci_set_drvdata(pdev, ioa_cfg); for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) { INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); spin_lock_init(&ioa_cfg->hrrq[i]._lock); if (i == 0) ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; else ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock; } } /** * ipr_get_chip_info - Find adapter chip information * @dev_id: PCI device id struct * * Return value: * ptr to chip information on success / NULL on failure **/ static const struct ipr_chip_t * ipr_get_chip_info(const struct pci_device_id *dev_id) { int i; for (i = 0; i < ARRAY_SIZE(ipr_chip); i++) if (ipr_chip[i].vendor == dev_id->vendor && ipr_chip[i].device == dev_id->device) return &ipr_chip[i]; return NULL; } /** * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete * during probe time * @ioa_cfg: ioa config struct * * Return value: * None **/ static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg) { struct pci_dev *pdev = ioa_cfg->pdev; if (pci_channel_offline(pdev)) { wait_event_timeout(ioa_cfg->eeh_wait_q, !pci_channel_offline(pdev), IPR_PCI_ERROR_RECOVERY_TIMEOUT); pci_restore_state(pdev); } } static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg) { int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1; for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) { snprintf(ioa_cfg->vectors_info[vec_idx].desc, n, "host%d-%d", ioa_cfg->host->host_no, vec_idx); ioa_cfg->vectors_info[vec_idx]. desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0; } } static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) { int i, rc; for (i = 1; i < ioa_cfg->nvectors; i++) { rc = request_irq(pci_irq_vector(pdev, i), ipr_isr_mhrrq, 0, ioa_cfg->vectors_info[i].desc, &ioa_cfg->hrrq[i]); if (rc) { while (--i > 0) free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]); return rc; } } return 0; } /** * ipr_test_intr - Handle the interrupt generated in ipr_test_msi(). * @devp: PCI device struct * @irq: IRQ number * * Description: Simply set the msi_received flag to 1 indicating that * Message Signaled Interrupts are supported. * * Return value: * 0 on success / non-zero on failure **/ static irqreturn_t ipr_test_intr(int irq, void *devp) { struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; unsigned long lock_flags = 0; dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); ioa_cfg->msi_received = 1; wake_up(&ioa_cfg->msi_wait_q); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return IRQ_HANDLED; } /** * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support. * @ioa_cfg: ioa config struct * @pdev: PCI device struct * * Description: This routine sets up and initiates a test interrupt to determine * if the interrupt is received via the ipr_test_intr() service routine. * If the tests fails, the driver will fall back to LSI. * * Return value: * 0 on success / non-zero on failure **/ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) { int rc; unsigned long lock_flags = 0; int irq = pci_irq_vector(pdev, 0); ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); init_waitqueue_head(&ioa_cfg->msi_wait_q); ioa_cfg->msi_received = 0; ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32); readl(ioa_cfg->regs.sense_interrupt_mask_reg); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); if (rc) { dev_err(&pdev->dev, "Can not assign irq %d\n", irq); return rc; } else if (ipr_debug) dev_info(&pdev->dev, "IRQ assigned: %d\n", irq); writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); readl(ioa_cfg->regs.sense_interrupt_reg); wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); if (!ioa_cfg->msi_received) { /* MSI test failed */ dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n"); rc = -EOPNOTSUPP; } else if (ipr_debug) dev_info(&pdev->dev, "MSI test succeeded.\n"); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); free_irq(irq, ioa_cfg); LEAVE; return rc; } /* ipr_probe_ioa - Allocates memory and does first stage of initialization * @pdev: PCI device struct * @dev_id: PCI device id struct * * Return value: * 0 on success / non-zero on failure **/ static int ipr_probe_ioa(struct pci_dev *pdev, const struct pci_device_id *dev_id) { struct ipr_ioa_cfg *ioa_cfg; struct Scsi_Host *host; unsigned long ipr_regs_pci; void __iomem *ipr_regs; int rc = PCIBIOS_SUCCESSFUL; volatile u32 mask, uproc, interrupts; unsigned long lock_flags, driver_lock_flags; unsigned int irq_flag; ENTER; dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); if (!host) { dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n"); rc = -ENOMEM; goto out; } ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); if (!ioa_cfg->ipr_chip) { dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", dev_id->vendor, dev_id->device); goto out_scsi_host_put; } /* set SIS 32 or SIS 64 */ ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr; ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds; if (ipr_transop_timeout) ioa_cfg->transop_timeout = ipr_transop_timeout; else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT) ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT; else ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT; ioa_cfg->revid = pdev->revision; ipr_init_ioa_cfg(ioa_cfg, host, pdev); ipr_regs_pci = pci_resource_start(pdev, 0); rc = pci_request_regions(pdev, IPR_NAME); if (rc < 0) { dev_err(&pdev->dev, "Couldn't register memory range of registers\n"); goto out_scsi_host_put; } rc = pci_enable_device(pdev); if (rc || pci_channel_offline(pdev)) { if (pci_channel_offline(pdev)) { ipr_wait_for_pci_err_recovery(ioa_cfg); rc = pci_enable_device(pdev); } if (rc) { dev_err(&pdev->dev, "Cannot enable adapter\n"); ipr_wait_for_pci_err_recovery(ioa_cfg); goto out_release_regions; } } ipr_regs = pci_ioremap_bar(pdev, 0); if (!ipr_regs) { dev_err(&pdev->dev, "Couldn't map memory range of registers\n"); rc = -ENOMEM; goto out_disable; } ioa_cfg->hdw_dma_regs = ipr_regs; ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; ipr_init_regs(ioa_cfg); if (ioa_cfg->sis64) { rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (rc < 0) { dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n"); rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); } } else rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (rc < 0) { dev_err(&pdev->dev, "Failed to set DMA mask\n"); goto cleanup_nomem; } rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, ioa_cfg->chip_cfg->cache_line_size); if (rc != PCIBIOS_SUCCESSFUL) { dev_err(&pdev->dev, "Write of cache line size failed\n"); ipr_wait_for_pci_err_recovery(ioa_cfg); rc = -EIO; goto cleanup_nomem; } /* Issue MMIO read to ensure card is not in EEH */ interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); ipr_wait_for_pci_err_recovery(ioa_cfg); if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) { dev_err(&pdev->dev, "The max number of MSIX is %d\n", IPR_MAX_MSIX_VECTORS); ipr_number_of_msix = IPR_MAX_MSIX_VECTORS; } irq_flag = PCI_IRQ_LEGACY; if (ioa_cfg->ipr_chip->has_msi) irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX; rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag); if (rc < 0) { ipr_wait_for_pci_err_recovery(ioa_cfg); goto cleanup_nomem; } ioa_cfg->nvectors = rc; if (!pdev->msi_enabled && !pdev->msix_enabled) ioa_cfg->clear_isr = 1; pci_set_master(pdev); if (pci_channel_offline(pdev)) { ipr_wait_for_pci_err_recovery(ioa_cfg); pci_set_master(pdev); if (pci_channel_offline(pdev)) { rc = -EIO; goto out_msi_disable; } } if (pdev->msi_enabled || pdev->msix_enabled) { rc = ipr_test_msi(ioa_cfg, pdev); switch (rc) { case 0: dev_info(&pdev->dev, "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors, pdev->msix_enabled ? "-X" : ""); break; case -EOPNOTSUPP: ipr_wait_for_pci_err_recovery(ioa_cfg); pci_free_irq_vectors(pdev); ioa_cfg->nvectors = 1; ioa_cfg->clear_isr = 1; break; default: goto out_msi_disable; } } ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors, (unsigned int)num_online_cpus(), (unsigned int)IPR_MAX_HRRQ_NUM); if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg))) goto out_msi_disable; if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) goto out_msi_disable; rc = ipr_alloc_mem(ioa_cfg); if (rc < 0) { dev_err(&pdev->dev, "Couldn't allocate enough memory for device driver!\n"); goto out_msi_disable; } /* Save away PCI config space for use following IOA reset */ rc = pci_save_state(pdev); if (rc != PCIBIOS_SUCCESSFUL) { dev_err(&pdev->dev, "Failed to save PCI config space\n"); rc = -EIO; goto cleanup_nolog; } /* * If HRRQ updated interrupt is not masked, or reset alert is set, * the card is in an unknown state and needs a hard reset */ mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32); uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT)) ioa_cfg->needs_hard_reset = 1; if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices) ioa_cfg->needs_hard_reset = 1; if (interrupts & IPR_PCII_IOA_UNIT_CHECKED) ioa_cfg->ioa_unit_checked = 1; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); if (pdev->msi_enabled || pdev->msix_enabled) { name_msi_vectors(ioa_cfg); rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0, ioa_cfg->vectors_info[0].desc, &ioa_cfg->hrrq[0]); if (!rc) rc = ipr_request_other_msi_irqs(ioa_cfg, pdev); } else { rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, &ioa_cfg->hrrq[0]); } if (rc) { dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", pdev->irq, rc); goto cleanup_nolog; } if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) || (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) { ioa_cfg->needs_warm_reset = 1; ioa_cfg->reset = ipr_reset_slot_reset; ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d", WQ_MEM_RECLAIM, host->host_no); if (!ioa_cfg->reset_work_q) { dev_err(&pdev->dev, "Couldn't register reset workqueue\n"); rc = -ENOMEM; goto out_free_irq; } } else ioa_cfg->reset = ipr_reset_start_bist; spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); LEAVE; out: return rc; out_free_irq: ipr_free_irqs(ioa_cfg); cleanup_nolog: ipr_free_mem(ioa_cfg); out_msi_disable: ipr_wait_for_pci_err_recovery(ioa_cfg); pci_free_irq_vectors(pdev); cleanup_nomem: iounmap(ipr_regs); out_disable: pci_disable_device(pdev); out_release_regions: pci_release_regions(pdev); out_scsi_host_put: scsi_host_put(host); goto out; } /** * ipr_initiate_ioa_bringdown - Bring down an adapter * @ioa_cfg: ioa config struct * @shutdown_type: shutdown type * * Description: This function will initiate bringing down the adapter. * This consists of issuing an IOA shutdown to the adapter * to flush the cache, and running BIST. * If the caller needs to wait on the completion of the reset, * the caller must sleep on the reset_wait_q. * * Return value: * none **/ static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg, enum ipr_shutdown_type shutdown_type) { ENTER; if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) ioa_cfg->sdt_state = ABORT_DUMP; ioa_cfg->reset_retries = 0; ioa_cfg->in_ioa_bringdown = 1; ipr_initiate_ioa_reset(ioa_cfg, shutdown_type); LEAVE; } /** * __ipr_remove - Remove a single adapter * @pdev: pci device struct * * Adapter hot plug remove entry point. * * Return value: * none **/ static void __ipr_remove(struct pci_dev *pdev) { unsigned long host_lock_flags = 0; struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); int i; unsigned long driver_lock_flags; ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); while (ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); } for (i = 0; i < ioa_cfg->hrrq_num; i++) { spin_lock(&ioa_cfg->hrrq[i]._lock); ioa_cfg->hrrq[i].removing_ioa = 1; spin_unlock(&ioa_cfg->hrrq[i]._lock); } wmb(); ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); flush_work(&ioa_cfg->work_q); if (ioa_cfg->reset_work_q) flush_workqueue(ioa_cfg->reset_work_q); INIT_LIST_HEAD(&ioa_cfg->used_res_q); spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); list_del(&ioa_cfg->queue); spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); if (ioa_cfg->sdt_state == ABORT_DUMP) ioa_cfg->sdt_state = WAIT_FOR_DUMP; spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); ipr_free_all_resources(ioa_cfg); LEAVE; } /** * ipr_remove - IOA hot plug remove entry point * @pdev: pci device struct * * Adapter hot plug remove entry point. * * Return value: * none **/ static void ipr_remove(struct pci_dev *pdev) { struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); ENTER; ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, &ipr_trace_attr); ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, &ipr_dump_attr); sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, &ipr_ioa_async_err_log); scsi_remove_host(ioa_cfg->host); __ipr_remove(pdev); LEAVE; } /** * ipr_probe - Adapter hot plug add entry point * @pdev: pci device struct * @dev_id: pci device ID * * Return value: * 0 on success / non-zero on failure **/ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) { struct ipr_ioa_cfg *ioa_cfg; unsigned long flags; int rc, i; rc = ipr_probe_ioa(pdev, dev_id); if (rc) return rc; ioa_cfg = pci_get_drvdata(pdev); ipr_probe_ioa_part2(ioa_cfg); rc = scsi_add_host(ioa_cfg->host, &pdev->dev); if (rc) { __ipr_remove(pdev); return rc; } rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj, &ipr_trace_attr); if (rc) { scsi_remove_host(ioa_cfg->host); __ipr_remove(pdev); return rc; } rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj, &ipr_ioa_async_err_log); if (rc) { ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, &ipr_dump_attr); ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, &ipr_trace_attr); scsi_remove_host(ioa_cfg->host); __ipr_remove(pdev); return rc; } rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj, &ipr_dump_attr); if (rc) { sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, &ipr_ioa_async_err_log); ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, &ipr_trace_attr); scsi_remove_host(ioa_cfg->host); __ipr_remove(pdev); return rc; } spin_lock_irqsave(ioa_cfg->host->host_lock, flags); ioa_cfg->scan_enabled = 1; schedule_work(&ioa_cfg->work_q); spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { for (i = 1; i < ioa_cfg->hrrq_num; i++) { irq_poll_init(&ioa_cfg->hrrq[i].iopoll, ioa_cfg->iopoll_weight, ipr_iopoll); } } scsi_scan_host(ioa_cfg->host); return 0; } /** * ipr_shutdown - Shutdown handler. * @pdev: pci device struct * * This function is invoked upon system shutdown/reboot. It will issue * an adapter shutdown to the adapter to flush the write cache. * * Return value: * none **/ static void ipr_shutdown(struct pci_dev *pdev) { struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); unsigned long lock_flags = 0; enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL; int i; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { ioa_cfg->iopoll_weight = 0; for (i = 1; i < ioa_cfg->hrrq_num; i++) irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); } while (ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); } if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) shutdown_type = IPR_SHUTDOWN_QUIESCE; ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) { ipr_free_irqs(ioa_cfg); pci_disable_device(ioa_cfg->pdev); } } static struct pci_device_id ipr_pci_table[] = { { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 }, { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 }, { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 }, { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0, IPR_USE_LONG_TRANSOP_TIMEOUT }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, IPR_USE_LONG_TRANSOP_TIMEOUT }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, IPR_USE_LONG_TRANSOP_TIMEOUT }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, IPR_USE_LONG_TRANSOP_TIMEOUT}, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, IPR_USE_LONG_TRANSOP_TIMEOUT }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, IPR_USE_LONG_TRANSOP_TIMEOUT }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0, IPR_USE_LONG_TRANSOP_TIMEOUT }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, IPR_USE_LONG_TRANSOP_TIMEOUT }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 }, { } }; MODULE_DEVICE_TABLE(pci, ipr_pci_table); static const struct pci_error_handlers ipr_err_handler = { .error_detected = ipr_pci_error_detected, .mmio_enabled = ipr_pci_mmio_enabled, .slot_reset = ipr_pci_slot_reset, }; static struct pci_driver ipr_driver = { .name = IPR_NAME, .id_table = ipr_pci_table, .probe = ipr_probe, .remove = ipr_remove, .shutdown = ipr_shutdown, .err_handler = &ipr_err_handler, }; /** * ipr_halt_done - Shutdown prepare completion * @ipr_cmd: ipr command struct * * Return value: * none **/ static void ipr_halt_done(struct ipr_cmnd *ipr_cmd) { list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); } /** * ipr_halt - Issue shutdown prepare to all adapters * @nb: Notifier block * @event: Notifier event * @buf: Notifier data (unused) * * Return value: * NOTIFY_OK on success / NOTIFY_DONE on failure **/ static int ipr_halt(struct notifier_block *nb, ulong event, void *buf) { struct ipr_cmnd *ipr_cmd; struct ipr_ioa_cfg *ioa_cfg; unsigned long flags = 0, driver_lock_flags; if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) return NOTIFY_DONE; spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) { spin_lock_irqsave(ioa_cfg->host->host_lock, flags); if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); continue; } ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL; ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); } spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); return NOTIFY_OK; } static struct notifier_block ipr_notifier = { ipr_halt, NULL, 0 }; /** * ipr_init - Module entry point * * Return value: * 0 on success / negative value on failure **/ static int __init ipr_init(void) { int rc; ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", IPR_DRIVER_VERSION, IPR_DRIVER_DATE); register_reboot_notifier(&ipr_notifier); rc = pci_register_driver(&ipr_driver); if (rc) { unregister_reboot_notifier(&ipr_notifier); return rc; } return 0; } /** * ipr_exit - Module unload * * Module unload entry point. * * Return value: * none **/ static void __exit ipr_exit(void) { unregister_reboot_notifier(&ipr_notifier); pci_unregister_driver(&ipr_driver); } module_init(ipr_init); module_exit(ipr_exit);
linux-master
drivers/scsi/ipr.c
/* ppa.c -- low level driver for the IOMEGA PPA3 * parallel port SCSI host adapter. * * (The PPA3 is the embedded controller in the ZIP drive.) * * (c) 1995,1996 Grant R. Guenther, [email protected], * under the terms of the GNU General Public License. * */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/blkdev.h> #include <linux/parport.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/jiffies.h> #include <asm/io.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> static void ppa_reset_pulse(unsigned int base); typedef struct { struct pardevice *dev; /* Parport device entry */ int base; /* Actual port address */ int mode; /* Transfer mode */ struct scsi_cmnd *cur_cmd; /* Current queued command */ struct delayed_work ppa_tq; /* Polling interrupt stuff */ unsigned long jstart; /* Jiffies at start */ unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */ unsigned int failed:1; /* Failure flag */ unsigned wanted:1; /* Parport sharing busy flag */ unsigned int dev_no; /* Device number */ wait_queue_head_t *waiting; struct Scsi_Host *host; struct list_head list; } ppa_struct; #include "ppa.h" static unsigned int mode = PPA_AUTODETECT; module_param(mode, uint, 0644); MODULE_PARM_DESC(mode, "Transfer mode (0 = Autodetect, 1 = SPP 4-bit, " "2 = SPP 8-bit, 3 = EPP 8-bit, 4 = EPP 16-bit, 5 = EPP 32-bit"); static struct scsi_pointer *ppa_scsi_pointer(struct scsi_cmnd *cmd) { return scsi_cmd_priv(cmd); } static inline ppa_struct *ppa_dev(struct Scsi_Host *host) { return *(ppa_struct **)&host->hostdata; } static DEFINE_SPINLOCK(arbitration_lock); static void got_it(ppa_struct *dev) { dev->base = dev->dev->port->base; if (dev->cur_cmd) ppa_scsi_pointer(dev->cur_cmd)->phase = 1; else wake_up(dev->waiting); } static void ppa_wakeup(void *ref) { ppa_struct *dev = (ppa_struct *) ref; unsigned long flags; spin_lock_irqsave(&arbitration_lock, flags); if (dev->wanted) { parport_claim(dev->dev); got_it(dev); dev->wanted = 0; } spin_unlock_irqrestore(&arbitration_lock, flags); return; } static int ppa_pb_claim(ppa_struct *dev) { unsigned long flags; int res = 1; spin_lock_irqsave(&arbitration_lock, flags); if (parport_claim(dev->dev) == 0) { got_it(dev); res = 0; } dev->wanted = res; spin_unlock_irqrestore(&arbitration_lock, flags); return res; } static void ppa_pb_dismiss(ppa_struct *dev) { unsigned long flags; int wanted; spin_lock_irqsave(&arbitration_lock, flags); wanted = dev->wanted; dev->wanted = 0; spin_unlock_irqrestore(&arbitration_lock, flags); if (!wanted) parport_release(dev->dev); } static inline void ppa_pb_release(ppa_struct *dev) { parport_release(dev->dev); } /* * Start of Chipset kludges */ /* This is to give the ppa driver a way to modify the timings (and other * parameters) by writing to the /proc/scsi/ppa/0 file. * Very simple method really... (To simple, no error checking :( ) * Reason: Kernel hackers HATE having to unload and reload modules for * testing... * Also gives a method to use a script to obtain optimum timings (TODO) */ static inline int ppa_write_info(struct Scsi_Host *host, char *buffer, int length) { ppa_struct *dev = ppa_dev(host); unsigned long x; if ((length > 5) && (strncmp(buffer, "mode=", 5) == 0)) { x = simple_strtoul(buffer + 5, NULL, 0); dev->mode = x; return length; } if ((length > 10) && (strncmp(buffer, "recon_tmo=", 10) == 0)) { x = simple_strtoul(buffer + 10, NULL, 0); dev->recon_tmo = x; printk(KERN_INFO "ppa: recon_tmo set to %ld\n", x); return length; } printk(KERN_WARNING "ppa /proc: invalid variable\n"); return -EINVAL; } static int ppa_show_info(struct seq_file *m, struct Scsi_Host *host) { ppa_struct *dev = ppa_dev(host); seq_printf(m, "Version : %s\n", PPA_VERSION); seq_printf(m, "Parport : %s\n", dev->dev->port->name); seq_printf(m, "Mode : %s\n", PPA_MODE_STRING[dev->mode]); #if PPA_DEBUG > 0 seq_printf(m, "recon_tmo : %lu\n", dev->recon_tmo); #endif return 0; } static int device_check(ppa_struct *dev, bool autodetect); #if PPA_DEBUG > 0 #define ppa_fail(x,y) printk("ppa: ppa_fail(%i) from %s at line %d\n",\ y, __func__, __LINE__); ppa_fail_func(x,y); static inline void ppa_fail_func(ppa_struct *dev, int error_code) #else static inline void ppa_fail(ppa_struct *dev, int error_code) #endif { /* If we fail a device then we trash status / message bytes */ if (dev->cur_cmd) { dev->cur_cmd->result = error_code << 16; dev->failed = 1; } } /* * Wait for the high bit to be set. * * In principle, this could be tied to an interrupt, but the adapter * doesn't appear to be designed to support interrupts. We spin on * the 0x80 ready bit. */ static unsigned char ppa_wait(ppa_struct *dev) { int k; unsigned short ppb = dev->base; unsigned char r; k = PPA_SPIN_TMO; /* Wait for bit 6 and 7 - PJC */ for (r = r_str(ppb); ((r & 0xc0) != 0xc0) && (k); k--) { udelay(1); r = r_str(ppb); } /* * return some status information. * Semantics: 0xc0 = ZIP wants more data * 0xd0 = ZIP wants to send more data * 0xe0 = ZIP is expecting SCSI command data * 0xf0 = end of transfer, ZIP is sending status */ if (k) return (r & 0xf0); /* Counter expired - Time out occurred */ ppa_fail(dev, DID_TIME_OUT); printk(KERN_WARNING "ppa timeout in ppa_wait\n"); return 0; /* command timed out */ } /* * Clear EPP Timeout Bit */ static inline void epp_reset(unsigned short ppb) { int i; i = r_str(ppb); w_str(ppb, i); w_str(ppb, i & 0xfe); } /* * Wait for empty ECP fifo (if we are in ECP fifo mode only) */ static inline void ecp_sync(ppa_struct *dev) { int i, ppb_hi = dev->dev->port->base_hi; if (ppb_hi == 0) return; if ((r_ecr(ppb_hi) & 0xe0) == 0x60) { /* mode 011 == ECP fifo mode */ for (i = 0; i < 100; i++) { if (r_ecr(ppb_hi) & 0x01) return; udelay(5); } printk(KERN_WARNING "ppa: ECP sync failed as data still present in FIFO.\n"); } } static int ppa_byte_out(unsigned short base, const char *buffer, int len) { int i; for (i = len; i; i--) { w_dtr(base, *buffer++); w_ctr(base, 0xe); w_ctr(base, 0xc); } return 1; /* All went well - we hope! */ } static int ppa_byte_in(unsigned short base, char *buffer, int len) { int i; for (i = len; i; i--) { *buffer++ = r_dtr(base); w_ctr(base, 0x27); w_ctr(base, 0x25); } return 1; /* All went well - we hope! */ } static int ppa_nibble_in(unsigned short base, char *buffer, int len) { for (; len; len--) { unsigned char h; w_ctr(base, 0x4); h = r_str(base) & 0xf0; w_ctr(base, 0x6); *buffer++ = h | ((r_str(base) & 0xf0) >> 4); } return 1; /* All went well - we hope! */ } static int ppa_out(ppa_struct *dev, char *buffer, int len) { int r; unsigned short ppb = dev->base; r = ppa_wait(dev); if ((r & 0x50) != 0x40) { ppa_fail(dev, DID_ERROR); return 0; } switch (dev->mode) { case PPA_NIBBLE: case PPA_PS2: /* 8 bit output, with a loop */ r = ppa_byte_out(ppb, buffer, len); break; case PPA_EPP_32: case PPA_EPP_16: case PPA_EPP_8: epp_reset(ppb); w_ctr(ppb, 0x4); if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x03)) outsl(ppb + 4, buffer, len >> 2); else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x01)) outsw(ppb + 4, buffer, len >> 1); else outsb(ppb + 4, buffer, len); w_ctr(ppb, 0xc); r = !(r_str(ppb) & 0x01); w_ctr(ppb, 0xc); ecp_sync(dev); break; default: printk(KERN_ERR "PPA: bug in ppa_out()\n"); r = 0; } return r; } static int ppa_in(ppa_struct *dev, char *buffer, int len) { int r; unsigned short ppb = dev->base; r = ppa_wait(dev); if ((r & 0x50) != 0x50) { ppa_fail(dev, DID_ERROR); return 0; } switch (dev->mode) { case PPA_NIBBLE: /* 4 bit input, with a loop */ r = ppa_nibble_in(ppb, buffer, len); w_ctr(ppb, 0xc); break; case PPA_PS2: /* 8 bit input, with a loop */ w_ctr(ppb, 0x25); r = ppa_byte_in(ppb, buffer, len); w_ctr(ppb, 0x4); w_ctr(ppb, 0xc); break; case PPA_EPP_32: case PPA_EPP_16: case PPA_EPP_8: epp_reset(ppb); w_ctr(ppb, 0x24); if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x03)) insl(ppb + 4, buffer, len >> 2); else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x01)) insw(ppb + 4, buffer, len >> 1); else insb(ppb + 4, buffer, len); w_ctr(ppb, 0x2c); r = !(r_str(ppb) & 0x01); w_ctr(ppb, 0x2c); ecp_sync(dev); break; default: printk(KERN_ERR "PPA: bug in ppa_ins()\n"); r = 0; break; } return r; } /* end of ppa_io.h */ static inline void ppa_d_pulse(unsigned short ppb, unsigned char b) { w_dtr(ppb, b); w_ctr(ppb, 0xc); w_ctr(ppb, 0xe); w_ctr(ppb, 0xc); w_ctr(ppb, 0x4); w_ctr(ppb, 0xc); } static void ppa_disconnect(ppa_struct *dev) { unsigned short ppb = dev->base; ppa_d_pulse(ppb, 0); ppa_d_pulse(ppb, 0x3c); ppa_d_pulse(ppb, 0x20); ppa_d_pulse(ppb, 0xf); } static inline void ppa_c_pulse(unsigned short ppb, unsigned char b) { w_dtr(ppb, b); w_ctr(ppb, 0x4); w_ctr(ppb, 0x6); w_ctr(ppb, 0x4); w_ctr(ppb, 0xc); } static inline void ppa_connect(ppa_struct *dev, int flag) { unsigned short ppb = dev->base; ppa_c_pulse(ppb, 0); ppa_c_pulse(ppb, 0x3c); ppa_c_pulse(ppb, 0x20); if ((flag == CONNECT_EPP_MAYBE) && IN_EPP_MODE(dev->mode)) ppa_c_pulse(ppb, 0xcf); else ppa_c_pulse(ppb, 0x8f); } static int ppa_select(ppa_struct *dev, int target) { int k; unsigned short ppb = dev->base; /* * Bit 6 (0x40) is the device selected bit. * First we must wait till the current device goes off line... */ k = PPA_SELECT_TMO; do { k--; udelay(1); } while ((r_str(ppb) & 0x40) && (k)); if (!k) return 0; w_dtr(ppb, (1 << target)); w_ctr(ppb, 0xe); w_ctr(ppb, 0xc); w_dtr(ppb, 0x80); /* This is NOT the initator */ w_ctr(ppb, 0x8); k = PPA_SELECT_TMO; do { k--; udelay(1); } while (!(r_str(ppb) & 0x40) && (k)); if (!k) return 0; return 1; } /* * This is based on a trace of what the Iomega DOS 'guest' driver does. * I've tried several different kinds of parallel ports with guest and * coded this to react in the same ways that it does. * * The return value from this function is just a hint about where the * handshaking failed. * */ static int ppa_init(ppa_struct *dev) { int retv; unsigned short ppb = dev->base; bool autodetect = dev->mode == PPA_AUTODETECT; if (autodetect) { int modes = dev->dev->port->modes; int ppb_hi = dev->dev->port->base_hi; /* Mode detection works up the chain of speed * This avoids a nasty if-then-else-if-... tree */ dev->mode = PPA_NIBBLE; if (modes & PARPORT_MODE_TRISTATE) dev->mode = PPA_PS2; if (modes & PARPORT_MODE_ECP) { w_ecr(ppb_hi, 0x20); dev->mode = PPA_PS2; } if ((modes & PARPORT_MODE_EPP) && (modes & PARPORT_MODE_ECP)) w_ecr(ppb_hi, 0x80); } ppa_disconnect(dev); ppa_connect(dev, CONNECT_NORMAL); retv = 2; /* Failed */ w_ctr(ppb, 0xe); if ((r_str(ppb) & 0x08) == 0x08) retv--; w_ctr(ppb, 0xc); if ((r_str(ppb) & 0x08) == 0x00) retv--; if (!retv) ppa_reset_pulse(ppb); udelay(1000); /* Allow devices to settle down */ ppa_disconnect(dev); udelay(1000); /* Another delay to allow devices to settle */ if (retv) return -EIO; return device_check(dev, autodetect); } static inline int ppa_send_command(struct scsi_cmnd *cmd) { ppa_struct *dev = ppa_dev(cmd->device->host); int k; w_ctr(dev->base, 0x0c); for (k = 0; k < cmd->cmd_len; k++) if (!ppa_out(dev, &cmd->cmnd[k], 1)) return 0; return 1; } /* * The bulk flag enables some optimisations in the data transfer loops, * it should be true for any command that transfers data in integral * numbers of sectors. * * The driver appears to remain stable if we speed up the parallel port * i/o in this function, but not elsewhere. */ static int ppa_completion(struct scsi_cmnd *const cmd) { /* Return codes: * -1 Error * 0 Told to schedule * 1 Finished data transfer */ struct scsi_pointer *scsi_pointer = ppa_scsi_pointer(cmd); ppa_struct *dev = ppa_dev(cmd->device->host); unsigned short ppb = dev->base; unsigned long start_jiffies = jiffies; unsigned char r, v; int fast, bulk, status; v = cmd->cmnd[0]; bulk = ((v == READ_6) || (v == READ_10) || (v == WRITE_6) || (v == WRITE_10)); /* * We only get here if the drive is ready to comunicate, * hence no need for a full ppa_wait. */ r = (r_str(ppb) & 0xf0); while (r != (unsigned char) 0xf0) { /* * If we have been running for more than a full timer tick * then take a rest. */ if (time_after(jiffies, start_jiffies + 1)) return 0; if (scsi_pointer->this_residual <= 0) { ppa_fail(dev, DID_ERROR); return -1; /* ERROR_RETURN */ } /* On some hardware we have SCSI disconnected (6th bit low) * for about 100usecs. It is too expensive to wait a * tick on every loop so we busy wait for no more than * 500usecs to give the drive a chance first. We do not * change things for "normal" hardware since generally * the 6th bit is always high. * This makes the CPU load higher on some hardware * but otherwise we can not get more than 50K/secs * on this problem hardware. */ if ((r & 0xc0) != 0xc0) { /* Wait for reconnection should be no more than * jiffy/2 = 5ms = 5000 loops */ unsigned long k = dev->recon_tmo; for (; k && ((r = (r_str(ppb) & 0xf0)) & 0xc0) != 0xc0; k--) udelay(1); if (!k) return 0; } /* determine if we should use burst I/O */ fast = bulk && scsi_pointer->this_residual >= PPA_BURST_SIZE ? PPA_BURST_SIZE : 1; if (r == (unsigned char) 0xc0) status = ppa_out(dev, scsi_pointer->ptr, fast); else status = ppa_in(dev, scsi_pointer->ptr, fast); scsi_pointer->ptr += fast; scsi_pointer->this_residual -= fast; if (!status) { ppa_fail(dev, DID_BUS_BUSY); return -1; /* ERROR_RETURN */ } if (scsi_pointer->buffer && !scsi_pointer->this_residual) { /* if scatter/gather, advance to the next segment */ if (scsi_pointer->buffers_residual--) { scsi_pointer->buffer = sg_next(scsi_pointer->buffer); scsi_pointer->this_residual = scsi_pointer->buffer->length; scsi_pointer->ptr = sg_virt(scsi_pointer->buffer); } } /* Now check to see if the drive is ready to comunicate */ r = (r_str(ppb) & 0xf0); /* If not, drop back down to the scheduler and wait a timer tick */ if (!(r & 0x80)) return 0; } return 1; /* FINISH_RETURN */ } /* * Since the PPA itself doesn't generate interrupts, we use * the scheduler's task queue to generate a stream of call-backs and * complete the request when the drive is ready. */ static void ppa_interrupt(struct work_struct *work) { ppa_struct *dev = container_of(work, ppa_struct, ppa_tq.work); struct scsi_cmnd *cmd = dev->cur_cmd; if (!cmd) { printk(KERN_ERR "PPA: bug in ppa_interrupt\n"); return; } if (ppa_engine(dev, cmd)) { schedule_delayed_work(&dev->ppa_tq, 1); return; } /* Command must of completed hence it is safe to let go... */ #if PPA_DEBUG > 0 switch ((cmd->result >> 16) & 0xff) { case DID_OK: break; case DID_NO_CONNECT: printk(KERN_DEBUG "ppa: no device at SCSI ID %i\n", scmd_id(cmd)); break; case DID_BUS_BUSY: printk(KERN_DEBUG "ppa: BUS BUSY - EPP timeout detected\n"); break; case DID_TIME_OUT: printk(KERN_DEBUG "ppa: unknown timeout\n"); break; case DID_ABORT: printk(KERN_DEBUG "ppa: told to abort\n"); break; case DID_PARITY: printk(KERN_DEBUG "ppa: parity error (???)\n"); break; case DID_ERROR: printk(KERN_DEBUG "ppa: internal driver error\n"); break; case DID_RESET: printk(KERN_DEBUG "ppa: told to reset device\n"); break; case DID_BAD_INTR: printk(KERN_WARNING "ppa: bad interrupt (???)\n"); break; default: printk(KERN_WARNING "ppa: bad return code (%02x)\n", (cmd->result >> 16) & 0xff); } #endif if (ppa_scsi_pointer(cmd)->phase > 1) ppa_disconnect(dev); ppa_pb_dismiss(dev); dev->cur_cmd = NULL; scsi_done(cmd); } static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd) { struct scsi_pointer *scsi_pointer = ppa_scsi_pointer(cmd); unsigned short ppb = dev->base; unsigned char l = 0, h = 0; int retv; /* First check for any errors that may of occurred * Here we check for internal errors */ if (dev->failed) return 0; switch (scsi_pointer->phase) { case 0: /* Phase 0 - Waiting for parport */ if (time_after(jiffies, dev->jstart + HZ)) { /* * We waited more than a second * for parport to call us */ ppa_fail(dev, DID_BUS_BUSY); return 0; } return 1; /* wait until ppa_wakeup claims parport */ case 1: /* Phase 1 - Connected */ { /* Perform a sanity check for cable unplugged */ int retv = 2; /* Failed */ ppa_connect(dev, CONNECT_EPP_MAYBE); w_ctr(ppb, 0xe); if ((r_str(ppb) & 0x08) == 0x08) retv--; w_ctr(ppb, 0xc); if ((r_str(ppb) & 0x08) == 0x00) retv--; if (retv) { if (time_after(jiffies, dev->jstart + (1 * HZ))) { printk(KERN_ERR "ppa: Parallel port cable is unplugged.\n"); ppa_fail(dev, DID_BUS_BUSY); return 0; } else { ppa_disconnect(dev); return 1; /* Try again in a jiffy */ } } scsi_pointer->phase++; } fallthrough; case 2: /* Phase 2 - We are now talking to the scsi bus */ if (!ppa_select(dev, scmd_id(cmd))) { ppa_fail(dev, DID_NO_CONNECT); return 0; } scsi_pointer->phase++; fallthrough; case 3: /* Phase 3 - Ready to accept a command */ w_ctr(ppb, 0x0c); if (!(r_str(ppb) & 0x80)) return 1; if (!ppa_send_command(cmd)) return 0; scsi_pointer->phase++; fallthrough; case 4: /* Phase 4 - Setup scatter/gather buffers */ if (scsi_bufflen(cmd)) { scsi_pointer->buffer = scsi_sglist(cmd); scsi_pointer->this_residual = scsi_pointer->buffer->length; scsi_pointer->ptr = sg_virt(scsi_pointer->buffer); } else { scsi_pointer->buffer = NULL; scsi_pointer->this_residual = 0; scsi_pointer->ptr = NULL; } scsi_pointer->buffers_residual = scsi_sg_count(cmd) - 1; scsi_pointer->phase++; fallthrough; case 5: /* Phase 5 - Data transfer stage */ w_ctr(ppb, 0x0c); if (!(r_str(ppb) & 0x80)) return 1; retv = ppa_completion(cmd); if (retv == -1) return 0; if (retv == 0) return 1; scsi_pointer->phase++; fallthrough; case 6: /* Phase 6 - Read status/message */ cmd->result = DID_OK << 16; /* Check for data overrun */ if (ppa_wait(dev) != (unsigned char) 0xf0) { ppa_fail(dev, DID_ERROR); return 0; } if (ppa_in(dev, &l, 1)) { /* read status byte */ /* Check for optional message byte */ if (ppa_wait(dev) == (unsigned char) 0xf0) ppa_in(dev, &h, 1); cmd->result = (DID_OK << 16) + (h << 8) + (l & STATUS_MASK); } return 0; /* Finished */ default: printk(KERN_ERR "ppa: Invalid scsi phase\n"); } return 0; } static int ppa_queuecommand_lck(struct scsi_cmnd *cmd) { ppa_struct *dev = ppa_dev(cmd->device->host); if (dev->cur_cmd) { printk(KERN_ERR "PPA: bug in ppa_queuecommand\n"); return 0; } dev->failed = 0; dev->jstart = jiffies; dev->cur_cmd = cmd; cmd->result = DID_ERROR << 16; /* default return code */ ppa_scsi_pointer(cmd)->phase = 0; /* bus free */ schedule_delayed_work(&dev->ppa_tq, 0); ppa_pb_claim(dev); return 0; } static DEF_SCSI_QCMD(ppa_queuecommand) /* * Apparently the disk->capacity attribute is off by 1 sector * for all disk drives. We add the one here, but it should really * be done in sd.c. Even if it gets fixed there, this will still * work. */ static int ppa_biosparam(struct scsi_device *sdev, struct block_device *dev, sector_t capacity, int ip[]) { ip[0] = 0x40; ip[1] = 0x20; ip[2] = ((unsigned long) capacity + 1) / (ip[0] * ip[1]); if (ip[2] > 1024) { ip[0] = 0xff; ip[1] = 0x3f; ip[2] = ((unsigned long) capacity + 1) / (ip[0] * ip[1]); if (ip[2] > 1023) ip[2] = 1023; } return 0; } static int ppa_abort(struct scsi_cmnd *cmd) { ppa_struct *dev = ppa_dev(cmd->device->host); /* * There is no method for aborting commands since Iomega * have tied the SCSI_MESSAGE line high in the interface */ switch (ppa_scsi_pointer(cmd)->phase) { case 0: /* Do not have access to parport */ case 1: /* Have not connected to interface */ dev->cur_cmd = NULL; /* Forget the problem */ return SUCCESS; default: /* SCSI command sent, can not abort */ return FAILED; } } static void ppa_reset_pulse(unsigned int base) { w_dtr(base, 0x40); w_ctr(base, 0x8); udelay(30); w_ctr(base, 0xc); } static int ppa_reset(struct scsi_cmnd *cmd) { ppa_struct *dev = ppa_dev(cmd->device->host); if (ppa_scsi_pointer(cmd)->phase) ppa_disconnect(dev); dev->cur_cmd = NULL; /* Forget the problem */ ppa_connect(dev, CONNECT_NORMAL); ppa_reset_pulse(dev->base); mdelay(1); /* device settle delay */ ppa_disconnect(dev); mdelay(1); /* device settle delay */ return SUCCESS; } static int device_check(ppa_struct *dev, bool autodetect) { /* This routine looks for a device and then attempts to use EPP to send a command. If all goes as planned then EPP is available. */ static u8 cmd[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; int loop, old_mode, status, k, ppb = dev->base; unsigned char l; old_mode = dev->mode; for (loop = 0; loop < 8; loop++) { /* Attempt to use EPP for Test Unit Ready */ if (autodetect && (ppb & 0x0007) == 0x0000) dev->mode = PPA_EPP_8; second_pass: ppa_connect(dev, CONNECT_EPP_MAYBE); /* Select SCSI device */ if (!ppa_select(dev, loop)) { ppa_disconnect(dev); continue; } printk(KERN_INFO "ppa: Found device at ID %i, Attempting to use %s\n", loop, PPA_MODE_STRING[dev->mode]); /* Send SCSI command */ status = 1; w_ctr(ppb, 0x0c); for (l = 0; (l < 6) && (status); l++) status = ppa_out(dev, cmd, 1); if (!status) { ppa_disconnect(dev); ppa_connect(dev, CONNECT_EPP_MAYBE); w_dtr(ppb, 0x40); w_ctr(ppb, 0x08); udelay(30); w_ctr(ppb, 0x0c); udelay(1000); ppa_disconnect(dev); udelay(1000); if (dev->mode != old_mode) { dev->mode = old_mode; goto second_pass; } return -EIO; } w_ctr(ppb, 0x0c); k = 1000000; /* 1 Second */ do { l = r_str(ppb); k--; udelay(1); } while (!(l & 0x80) && (k)); l &= 0xf0; if (l != 0xf0) { ppa_disconnect(dev); ppa_connect(dev, CONNECT_EPP_MAYBE); ppa_reset_pulse(ppb); udelay(1000); ppa_disconnect(dev); udelay(1000); if (dev->mode != old_mode) { dev->mode = old_mode; goto second_pass; } return -EIO; } ppa_disconnect(dev); printk(KERN_INFO "ppa: Communication established with ID %i using %s\n", loop, PPA_MODE_STRING[dev->mode]); ppa_connect(dev, CONNECT_EPP_MAYBE); ppa_reset_pulse(ppb); udelay(1000); ppa_disconnect(dev); udelay(1000); return 0; } return -ENODEV; } static int ppa_adjust_queue(struct scsi_device *device) { blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH); return 0; } static const struct scsi_host_template ppa_template = { .module = THIS_MODULE, .proc_name = "ppa", .show_info = ppa_show_info, .write_info = ppa_write_info, .name = "Iomega VPI0 (ppa) interface", .queuecommand = ppa_queuecommand, .eh_abort_handler = ppa_abort, .eh_host_reset_handler = ppa_reset, .bios_param = ppa_biosparam, .this_id = -1, .sg_tablesize = SG_ALL, .can_queue = 1, .slave_alloc = ppa_adjust_queue, .cmd_size = sizeof(struct scsi_pointer), }; /*************************************************************************** * Parallel port probing routines * ***************************************************************************/ static LIST_HEAD(ppa_hosts); /* * Finds the first available device number that can be alloted to the * new ppa device and returns the address of the previous node so that * we can add to the tail and have a list in the ascending order. */ static inline ppa_struct *find_parent(void) { ppa_struct *dev, *par = NULL; unsigned int cnt = 0; if (list_empty(&ppa_hosts)) return NULL; list_for_each_entry(dev, &ppa_hosts, list) { if (dev->dev_no != cnt) return par; cnt++; par = dev; } return par; } static int __ppa_attach(struct parport *pb) { struct Scsi_Host *host; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waiting); DEFINE_WAIT(wait); ppa_struct *dev, *temp; int ports; int err = -ENOMEM; struct pardev_cb ppa_cb; dev = kzalloc(sizeof(ppa_struct), GFP_KERNEL); if (!dev) return -ENOMEM; dev->base = -1; dev->mode = mode < PPA_UNKNOWN ? mode : PPA_AUTODETECT; dev->recon_tmo = PPA_RECON_TMO; init_waitqueue_head(&waiting); temp = find_parent(); if (temp) dev->dev_no = temp->dev_no + 1; memset(&ppa_cb, 0, sizeof(ppa_cb)); ppa_cb.private = dev; ppa_cb.wakeup = ppa_wakeup; dev->dev = parport_register_dev_model(pb, "ppa", &ppa_cb, dev->dev_no); if (!dev->dev) goto out; /* Claim the bus so it remembers what we do to the control * registers. [ CTR and ECP ] */ err = -EBUSY; dev->waiting = &waiting; prepare_to_wait(&waiting, &wait, TASK_UNINTERRUPTIBLE); if (ppa_pb_claim(dev)) schedule_timeout(3 * HZ); if (dev->wanted) { printk(KERN_ERR "ppa%d: failed to claim parport because " "a pardevice is owning the port for too long " "time!\n", pb->number); ppa_pb_dismiss(dev); dev->waiting = NULL; finish_wait(&waiting, &wait); goto out1; } dev->waiting = NULL; finish_wait(&waiting, &wait); dev->base = dev->dev->port->base; w_ctr(dev->base, 0x0c); /* Done configuration */ err = ppa_init(dev); ppa_pb_release(dev); if (err) goto out1; /* now the glue ... */ if (dev->mode == PPA_NIBBLE || dev->mode == PPA_PS2) ports = 3; else ports = 8; INIT_DELAYED_WORK(&dev->ppa_tq, ppa_interrupt); err = -ENOMEM; host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *)); if (!host) goto out1; host->io_port = pb->base; host->n_io_port = ports; host->dma_channel = -1; host->unique_id = pb->number; *(ppa_struct **)&host->hostdata = dev; dev->host = host; list_add_tail(&dev->list, &ppa_hosts); err = scsi_add_host(host, NULL); if (err) goto out2; scsi_scan_host(host); return 0; out2: list_del_init(&dev->list); scsi_host_put(host); out1: parport_unregister_device(dev->dev); out: kfree(dev); return err; } static void ppa_attach(struct parport *pb) { __ppa_attach(pb); } static void ppa_detach(struct parport *pb) { ppa_struct *dev; list_for_each_entry(dev, &ppa_hosts, list) { if (dev->dev->port == pb) { list_del_init(&dev->list); scsi_remove_host(dev->host); scsi_host_put(dev->host); parport_unregister_device(dev->dev); kfree(dev); break; } } } static struct parport_driver ppa_driver = { .name = "ppa", .match_port = ppa_attach, .detach = ppa_detach, .devmodel = true, }; module_parport_driver(ppa_driver); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/ppa.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * iSCSI over TCP/IP Data-Path lib * * Copyright (C) 2004 Dmitry Yusupov * Copyright (C) 2004 Alex Aizman * Copyright (C) 2005 - 2006 Mike Christie * Copyright (C) 2006 Red Hat, Inc. All rights reserved. * maintained by [email protected] * * Credits: * Christoph Hellwig * FUJITA Tomonori * Arne Redlich * Zhenyu Wang */ #include <crypto/hash.h> #include <linux/types.h> #include <linux/list.h> #include <linux/inet.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/kfifo.h> #include <linux/scatterlist.h> #include <linux/module.h> #include <net/tcp.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi.h> #include <scsi/scsi_transport_iscsi.h> #include <trace/events/iscsi.h> #include "iscsi_tcp.h" MODULE_AUTHOR("Mike Christie <[email protected]>, " "Dmitry Yusupov <[email protected]>, " "Alex Aizman <[email protected]>"); MODULE_DESCRIPTION("iSCSI/TCP data-path"); MODULE_LICENSE("GPL"); static int iscsi_dbg_libtcp; module_param_named(debug_libiscsi_tcp, iscsi_dbg_libtcp, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug_libiscsi_tcp, "Turn on debugging for libiscsi_tcp " "module. Set to 1 to turn on, and zero to turn off. Default " "is off."); #define ISCSI_DBG_TCP(_conn, dbg_fmt, arg...) \ do { \ if (iscsi_dbg_libtcp) \ iscsi_conn_printk(KERN_INFO, _conn, \ "%s " dbg_fmt, \ __func__, ##arg); \ iscsi_dbg_trace(trace_iscsi_dbg_tcp, \ &(_conn)->cls_conn->dev, \ "%s " dbg_fmt, __func__, ##arg);\ } while (0); static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment); /* * Scatterlist handling: inside the iscsi_segment, we * remember an index into the scatterlist, and set data/size * to the current scatterlist entry. For highmem pages, we * kmap as needed. * * Note that the page is unmapped when we return from * TCP's data_ready handler, so we may end up mapping and * unmapping the same page repeatedly. The whole reason * for this is that we shouldn't keep the page mapped * outside the softirq. */ /** * iscsi_tcp_segment_init_sg - init indicated scatterlist entry * @segment: the buffer object * @sg: scatterlist * @offset: byte offset into that sg entry * * This function sets up the segment so that subsequent * data is copied to the indicated sg entry, at the given * offset. */ static inline void iscsi_tcp_segment_init_sg(struct iscsi_segment *segment, struct scatterlist *sg, unsigned int offset) { segment->sg = sg; segment->sg_offset = offset; segment->size = min(sg->length - offset, segment->total_size - segment->total_copied); segment->data = NULL; } /** * iscsi_tcp_segment_map - map the current S/G page * @segment: iscsi_segment * @recv: 1 if called from recv path * * We only need to possibly kmap data if scatter lists are being used, * because the iscsi passthrough and internal IO paths will never use high * mem pages. */ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv) { struct scatterlist *sg; if (segment->data != NULL || !segment->sg) return; sg = segment->sg; BUG_ON(segment->sg_mapped); BUG_ON(sg->length == 0); /* * We always map for the recv path. * * If the page count is greater than one it is ok to send * to the network layer's zero copy send path. If not we * have to go the slow sendmsg path. * * Same goes for slab pages: skb_can_coalesce() allows * coalescing neighboring slab objects into a single frag which * triggers one of hardened usercopy checks. */ if (!recv && sendpage_ok(sg_page(sg))) return; if (recv) { segment->atomic_mapped = true; segment->sg_mapped = kmap_atomic(sg_page(sg)); } else { segment->atomic_mapped = false; /* the xmit path can sleep with the page mapped so use kmap */ segment->sg_mapped = kmap(sg_page(sg)); } segment->data = segment->sg_mapped + sg->offset + segment->sg_offset; } void iscsi_tcp_segment_unmap(struct iscsi_segment *segment) { if (segment->sg_mapped) { if (segment->atomic_mapped) kunmap_atomic(segment->sg_mapped); else kunmap(sg_page(segment->sg)); segment->sg_mapped = NULL; segment->data = NULL; } } EXPORT_SYMBOL_GPL(iscsi_tcp_segment_unmap); /* * Splice the digest buffer into the buffer */ static inline void iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest) { segment->data = digest; segment->digest_len = ISCSI_DIGEST_SIZE; segment->total_size += ISCSI_DIGEST_SIZE; segment->size = ISCSI_DIGEST_SIZE; segment->copied = 0; segment->sg = NULL; segment->hash = NULL; } /** * iscsi_tcp_segment_done - check whether the segment is complete * @tcp_conn: iscsi tcp connection * @segment: iscsi segment to check * @recv: set to one of this is called from the recv path * @copied: number of bytes copied * * Check if we're done receiving this segment. If the receive * buffer is full but we expect more data, move on to the * next entry in the scatterlist. * * If the amount of data we received isn't a multiple of 4, * we will transparently receive the pad bytes, too. * * This function must be re-entrant. */ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment, int recv, unsigned copied) { struct scatterlist sg; unsigned int pad; ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copied %u %u size %u %s\n", segment->copied, copied, segment->size, recv ? "recv" : "xmit"); if (segment->hash && copied) { /* * If a segment is kmapd we must unmap it before sending * to the crypto layer since that will try to kmap it again. */ iscsi_tcp_segment_unmap(segment); if (!segment->data) { sg_init_table(&sg, 1); sg_set_page(&sg, sg_page(segment->sg), copied, segment->copied + segment->sg_offset + segment->sg->offset); } else sg_init_one(&sg, segment->data + segment->copied, copied); ahash_request_set_crypt(segment->hash, &sg, NULL, copied); crypto_ahash_update(segment->hash); } segment->copied += copied; if (segment->copied < segment->size) { iscsi_tcp_segment_map(segment, recv); return 0; } segment->total_copied += segment->copied; segment->copied = 0; segment->size = 0; /* Unmap the current scatterlist page, if there is one. */ iscsi_tcp_segment_unmap(segment); /* Do we have more scatterlist entries? */ ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "total copied %u total size %u\n", segment->total_copied, segment->total_size); if (segment->total_copied < segment->total_size) { /* Proceed to the next entry in the scatterlist. */ iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg), 0); iscsi_tcp_segment_map(segment, recv); BUG_ON(segment->size == 0); return 0; } /* Do we need to handle padding? */ if (!(tcp_conn->iscsi_conn->session->tt->caps & CAP_PADDING_OFFLOAD)) { pad = iscsi_padding(segment->total_copied); if (pad != 0) { ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "consume %d pad bytes\n", pad); segment->total_size += pad; segment->size = pad; segment->data = segment->padbuf; return 0; } } /* * Set us up for transferring the data digest. hdr digest * is completely handled in hdr done function. */ if (segment->hash) { ahash_request_set_crypt(segment->hash, NULL, segment->digest, 0); crypto_ahash_final(segment->hash); iscsi_tcp_segment_splice_digest(segment, recv ? segment->recv_digest : segment->digest); return 0; } return 1; } EXPORT_SYMBOL_GPL(iscsi_tcp_segment_done); /** * iscsi_tcp_segment_recv - copy data to segment * @tcp_conn: the iSCSI TCP connection * @segment: the buffer to copy to * @ptr: data pointer * @len: amount of data available * * This function copies up to @len bytes to the * given buffer, and returns the number of bytes * consumed, which can actually be less than @len. * * If hash digest is enabled, the function will update the * hash while copying. * Combining these two operations doesn't buy us a lot (yet), * but in the future we could implement combined copy+crc, * just way we do for network layer checksums. */ static int iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment, const void *ptr, unsigned int len) { unsigned int copy = 0, copied = 0; while (!iscsi_tcp_segment_done(tcp_conn, segment, 1, copy)) { if (copied == len) { ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copied %d bytes\n", len); break; } copy = min(len - copied, segment->size - segment->copied); ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copying %d\n", copy); memcpy(segment->data + segment->copied, ptr + copied, copy); copied += copy; } return copied; } inline void iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr, size_t hdrlen, unsigned char digest[ISCSI_DIGEST_SIZE]) { struct scatterlist sg; sg_init_one(&sg, hdr, hdrlen); ahash_request_set_crypt(hash, &sg, digest, hdrlen); crypto_ahash_digest(hash); } EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header); static inline int iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment) { if (!segment->digest_len) return 1; if (memcmp(segment->recv_digest, segment->digest, segment->digest_len)) { ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "digest mismatch\n"); return 0; } return 1; } /* * Helper function to set up segment buffer */ static inline void __iscsi_segment_init(struct iscsi_segment *segment, size_t size, iscsi_segment_done_fn_t *done, struct ahash_request *hash) { memset(segment, 0, sizeof(*segment)); segment->total_size = size; segment->done = done; if (hash) { segment->hash = hash; crypto_ahash_init(hash); } } inline void iscsi_segment_init_linear(struct iscsi_segment *segment, void *data, size_t size, iscsi_segment_done_fn_t *done, struct ahash_request *hash) { __iscsi_segment_init(segment, size, done, hash); segment->data = data; segment->size = size; } EXPORT_SYMBOL_GPL(iscsi_segment_init_linear); inline int iscsi_segment_seek_sg(struct iscsi_segment *segment, struct scatterlist *sg_list, unsigned int sg_count, unsigned int offset, size_t size, iscsi_segment_done_fn_t *done, struct ahash_request *hash) { struct scatterlist *sg; unsigned int i; __iscsi_segment_init(segment, size, done, hash); for_each_sg(sg_list, sg, sg_count, i) { if (offset < sg->length) { iscsi_tcp_segment_init_sg(segment, sg, offset); return 0; } offset -= sg->length; } return ISCSI_ERR_DATA_OFFSET; } EXPORT_SYMBOL_GPL(iscsi_segment_seek_sg); /** * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception * @tcp_conn: iscsi connection to prep for * * This function always passes NULL for the hash argument, because when this * function is called we do not yet know the final size of the header and want * to delay the digest processing until we know that. */ void iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn) { ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "(%s)\n", tcp_conn->iscsi_conn->hdrdgst_en ? "digest enabled" : "digest disabled"); iscsi_segment_init_linear(&tcp_conn->in.segment, tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr), iscsi_tcp_hdr_recv_done, NULL); } EXPORT_SYMBOL_GPL(iscsi_tcp_hdr_recv_prep); /* * Handle incoming reply to any other type of command */ static int iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment) { struct iscsi_conn *conn = tcp_conn->iscsi_conn; int rc = 0; if (!iscsi_tcp_dgst_verify(tcp_conn, segment)) return ISCSI_ERR_DATA_DGST; rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data, tcp_conn->in.datalen); if (rc) return rc; iscsi_tcp_hdr_recv_prep(tcp_conn); return 0; } static void iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn) { struct iscsi_conn *conn = tcp_conn->iscsi_conn; struct ahash_request *rx_hash = NULL; if (conn->datadgst_en && !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) rx_hash = tcp_conn->rx_hash; iscsi_segment_init_linear(&tcp_conn->in.segment, conn->data, tcp_conn->in.datalen, iscsi_tcp_data_recv_done, rx_hash); } /** * iscsi_tcp_cleanup_task - free tcp_task resources * @task: iscsi task * * must be called with session back_lock */ void iscsi_tcp_cleanup_task(struct iscsi_task *task) { struct iscsi_tcp_task *tcp_task = task->dd_data; struct iscsi_r2t_info *r2t; /* nothing to do for mgmt */ if (!task->sc) return; spin_lock_bh(&tcp_task->queue2pool); /* flush task's r2t queues */ while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) { kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*)); ISCSI_DBG_TCP(task->conn, "pending r2t dropped\n"); } r2t = tcp_task->r2t; if (r2t != NULL) { kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*)); tcp_task->r2t = NULL; } spin_unlock_bh(&tcp_task->queue2pool); } EXPORT_SYMBOL_GPL(iscsi_tcp_cleanup_task); /** * iscsi_tcp_data_in - SCSI Data-In Response processing * @conn: iscsi connection * @task: scsi command task */ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_tcp_task *tcp_task = task->dd_data; struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr; int datasn = be32_to_cpu(rhdr->datasn); unsigned total_in_length = task->sc->sdb.length; /* * lib iscsi will update this in the completion handling if there * is status. */ if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr); if (tcp_conn->in.datalen == 0) return 0; if (tcp_task->exp_datasn != datasn) { ISCSI_DBG_TCP(conn, "task->exp_datasn(%d) != rhdr->datasn(%d)" "\n", tcp_task->exp_datasn, datasn); return ISCSI_ERR_DATASN; } tcp_task->exp_datasn++; tcp_task->data_offset = be32_to_cpu(rhdr->offset); if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) { ISCSI_DBG_TCP(conn, "data_offset(%d) + data_len(%d) > " "total_length_in(%d)\n", tcp_task->data_offset, tcp_conn->in.datalen, total_in_length); return ISCSI_ERR_DATA_OFFSET; } conn->datain_pdus_cnt++; return 0; } /** * iscsi_tcp_r2t_rsp - iSCSI R2T Response processing * @conn: iscsi connection * @hdr: PDU header */ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) { struct iscsi_session *session = conn->session; struct iscsi_tcp_task *tcp_task; struct iscsi_tcp_conn *tcp_conn; struct iscsi_r2t_rsp *rhdr; struct iscsi_r2t_info *r2t; struct iscsi_task *task; u32 data_length; u32 data_offset; int r2tsn; int rc; spin_lock(&session->back_lock); task = iscsi_itt_to_ctask(conn, hdr->itt); if (!task) { spin_unlock(&session->back_lock); return ISCSI_ERR_BAD_ITT; } else if (task->sc->sc_data_direction != DMA_TO_DEVICE) { spin_unlock(&session->back_lock); return ISCSI_ERR_PROTO; } /* * A bad target might complete the cmd before we have handled R2Ts * so get a ref to the task that will be dropped in the xmit path. */ if (task->state != ISCSI_TASK_RUNNING) { spin_unlock(&session->back_lock); /* Let the path that got the early rsp complete it */ return 0; } task->last_xfer = jiffies; if (!iscsi_get_task(task)) { spin_unlock(&session->back_lock); /* Let the path that got the early rsp complete it */ return 0; } tcp_conn = conn->dd_data; rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr; /* fill-in new R2T associated with the task */ iscsi_update_cmdsn(session, (struct iscsi_nopin *)rhdr); spin_unlock(&session->back_lock); if (tcp_conn->in.datalen) { iscsi_conn_printk(KERN_ERR, conn, "invalid R2t with datalen %d\n", tcp_conn->in.datalen); rc = ISCSI_ERR_DATALEN; goto put_task; } tcp_task = task->dd_data; r2tsn = be32_to_cpu(rhdr->r2tsn); if (tcp_task->exp_datasn != r2tsn){ ISCSI_DBG_TCP(conn, "task->exp_datasn(%d) != rhdr->r2tsn(%d)\n", tcp_task->exp_datasn, r2tsn); rc = ISCSI_ERR_R2TSN; goto put_task; } if (session->state != ISCSI_STATE_LOGGED_IN) { iscsi_conn_printk(KERN_INFO, conn, "dropping R2T itt %d in recovery.\n", task->itt); rc = 0; goto put_task; } data_length = be32_to_cpu(rhdr->data_length); if (data_length == 0) { iscsi_conn_printk(KERN_ERR, conn, "invalid R2T with zero data len\n"); rc = ISCSI_ERR_DATALEN; goto put_task; } if (data_length > session->max_burst) ISCSI_DBG_TCP(conn, "invalid R2T with data len %u and max " "burst %u. Attempting to execute request.\n", data_length, session->max_burst); data_offset = be32_to_cpu(rhdr->data_offset); if (data_offset + data_length > task->sc->sdb.length) { iscsi_conn_printk(KERN_ERR, conn, "invalid R2T with data len %u at offset %u " "and total length %d\n", data_length, data_offset, task->sc->sdb.length); rc = ISCSI_ERR_DATALEN; goto put_task; } spin_lock(&tcp_task->pool2queue); rc = kfifo_out(&tcp_task->r2tpool.queue, (void *)&r2t, sizeof(void *)); if (!rc) { iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. " "Target has sent more R2Ts than it " "negotiated for or driver has leaked.\n"); spin_unlock(&tcp_task->pool2queue); rc = ISCSI_ERR_PROTO; goto put_task; } r2t->exp_statsn = rhdr->statsn; r2t->data_length = data_length; r2t->data_offset = data_offset; r2t->ttt = rhdr->ttt; /* no flip */ r2t->datasn = 0; r2t->sent = 0; tcp_task->exp_datasn = r2tsn + 1; kfifo_in(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*)); conn->r2t_pdus_cnt++; spin_unlock(&tcp_task->pool2queue); iscsi_requeue_task(task); return 0; put_task: iscsi_put_task(task); return rc; } /* * Handle incoming reply to DataIn command */ static int iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment) { struct iscsi_conn *conn = tcp_conn->iscsi_conn; struct iscsi_hdr *hdr = tcp_conn->in.hdr; int rc; if (!iscsi_tcp_dgst_verify(tcp_conn, segment)) return ISCSI_ERR_DATA_DGST; /* check for non-exceptional status */ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); if (rc) return rc; } iscsi_tcp_hdr_recv_prep(tcp_conn); return 0; } /** * iscsi_tcp_hdr_dissect - process PDU header * @conn: iSCSI connection * @hdr: PDU header * * This function analyzes the header of the PDU received, * and performs several sanity checks. If the PDU is accompanied * by data, the receive buffer is set up to copy the incoming data * to the correct location. */ static int iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) { int rc = 0, opcode, ahslen; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_task *task; /* verify PDU length */ tcp_conn->in.datalen = ntoh24(hdr->dlength); if (tcp_conn->in.datalen > conn->max_recv_dlength) { iscsi_conn_printk(KERN_ERR, conn, "iscsi_tcp: datalen %d > %d\n", tcp_conn->in.datalen, conn->max_recv_dlength); return ISCSI_ERR_DATALEN; } /* Additional header segments. So far, we don't * process additional headers. */ ahslen = hdr->hlength << 2; opcode = hdr->opcode & ISCSI_OPCODE_MASK; /* verify itt (itt encoding: age+cid+itt) */ rc = iscsi_verify_itt(conn, hdr->itt); if (rc) return rc; ISCSI_DBG_TCP(conn, "opcode 0x%x ahslen %d datalen %d\n", opcode, ahslen, tcp_conn->in.datalen); switch(opcode) { case ISCSI_OP_SCSI_DATA_IN: spin_lock(&conn->session->back_lock); task = iscsi_itt_to_ctask(conn, hdr->itt); if (!task) rc = ISCSI_ERR_BAD_ITT; else rc = iscsi_tcp_data_in(conn, task); if (rc) { spin_unlock(&conn->session->back_lock); break; } if (tcp_conn->in.datalen) { struct iscsi_tcp_task *tcp_task = task->dd_data; struct ahash_request *rx_hash = NULL; struct scsi_data_buffer *sdb = &task->sc->sdb; /* * Setup copy of Data-In into the struct scsi_cmnd * Scatterlist case: * We set up the iscsi_segment to point to the next * scatterlist entry to copy to. As we go along, * we move on to the next scatterlist entry and * update the digest per-entry. */ if (conn->datadgst_en && !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) rx_hash = tcp_conn->rx_hash; ISCSI_DBG_TCP(conn, "iscsi_tcp_begin_data_in( " "offset=%d, datalen=%d)\n", tcp_task->data_offset, tcp_conn->in.datalen); task->last_xfer = jiffies; rc = iscsi_segment_seek_sg(&tcp_conn->in.segment, sdb->table.sgl, sdb->table.nents, tcp_task->data_offset, tcp_conn->in.datalen, iscsi_tcp_process_data_in, rx_hash); spin_unlock(&conn->session->back_lock); return rc; } rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); spin_unlock(&conn->session->back_lock); break; case ISCSI_OP_SCSI_CMD_RSP: if (tcp_conn->in.datalen) { iscsi_tcp_data_recv_prep(tcp_conn); return 0; } rc = iscsi_complete_pdu(conn, hdr, NULL, 0); break; case ISCSI_OP_R2T: if (ahslen) { rc = ISCSI_ERR_AHSLEN; break; } rc = iscsi_tcp_r2t_rsp(conn, hdr); break; case ISCSI_OP_LOGIN_RSP: case ISCSI_OP_TEXT_RSP: case ISCSI_OP_REJECT: case ISCSI_OP_ASYNC_EVENT: /* * It is possible that we could get a PDU with a buffer larger * than 8K, but there are no targets that currently do this. * For now we fail until we find a vendor that needs it */ if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) { iscsi_conn_printk(KERN_ERR, conn, "iscsi_tcp: received buffer of " "len %u but conn buffer is only %u " "(opcode %0x)\n", tcp_conn->in.datalen, ISCSI_DEF_MAX_RECV_SEG_LEN, opcode); rc = ISCSI_ERR_PROTO; break; } /* If there's data coming in with the response, * receive it to the connection's buffer. */ if (tcp_conn->in.datalen) { iscsi_tcp_data_recv_prep(tcp_conn); return 0; } fallthrough; case ISCSI_OP_LOGOUT_RSP: case ISCSI_OP_NOOP_IN: case ISCSI_OP_SCSI_TMFUNC_RSP: rc = iscsi_complete_pdu(conn, hdr, NULL, 0); break; default: rc = ISCSI_ERR_BAD_OPCODE; break; } if (rc == 0) { /* Anything that comes with data should have * been handled above. */ if (tcp_conn->in.datalen) return ISCSI_ERR_PROTO; iscsi_tcp_hdr_recv_prep(tcp_conn); } return rc; } /** * iscsi_tcp_hdr_recv_done - process PDU header * @tcp_conn: iSCSI TCP connection * @segment: the buffer segment being processed * * This is the callback invoked when the PDU header has * been received. If the header is followed by additional * header segments, we go back for more data. */ static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment) { struct iscsi_conn *conn = tcp_conn->iscsi_conn; struct iscsi_hdr *hdr; /* Check if there are additional header segments * *prior* to computing the digest, because we * may need to go back to the caller for more. */ hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf; if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) { /* Bump the header length - the caller will * just loop around and get the AHS for us, and * call again. */ unsigned int ahslen = hdr->hlength << 2; /* Make sure we don't overflow */ if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf)) return ISCSI_ERR_AHSLEN; segment->total_size += ahslen; segment->size += ahslen; return 0; } /* We're done processing the header. See if we're doing * header digests; if so, set up the recv_digest buffer * and go back for more. */ if (conn->hdrdgst_en && !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) { if (segment->digest_len == 0) { /* * Even if we offload the digest processing we * splice it in so we can increment the skb/segment * counters in preparation for the data segment. */ iscsi_tcp_segment_splice_digest(segment, segment->recv_digest); return 0; } iscsi_tcp_dgst_header(tcp_conn->rx_hash, hdr, segment->total_copied - ISCSI_DIGEST_SIZE, segment->digest); if (!iscsi_tcp_dgst_verify(tcp_conn, segment)) return ISCSI_ERR_HDR_DGST; } tcp_conn->in.hdr = hdr; return iscsi_tcp_hdr_dissect(conn, hdr); } /** * iscsi_tcp_recv_segment_is_hdr - tests if we are reading in a header * @tcp_conn: iscsi tcp conn * * returns non zero if we are currently processing or setup to process * a header. */ inline int iscsi_tcp_recv_segment_is_hdr(struct iscsi_tcp_conn *tcp_conn) { return tcp_conn->in.segment.done == iscsi_tcp_hdr_recv_done; } EXPORT_SYMBOL_GPL(iscsi_tcp_recv_segment_is_hdr); /** * iscsi_tcp_recv_skb - Process skb * @conn: iscsi connection * @skb: network buffer with header and/or data segment * @offset: offset in skb * @offloaded: bool indicating if transfer was offloaded * @status: iscsi TCP status result * * Will return status of transfer in @status. And will return * number of bytes copied. */ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb, unsigned int offset, bool offloaded, int *status) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_segment *segment = &tcp_conn->in.segment; struct skb_seq_state seq; unsigned int consumed = 0; int rc = 0; ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset); /* * Update for each skb instead of pdu, because over slow networks a * data_in's data could take a while to read in. We also want to * account for r2ts. */ conn->last_recv = jiffies; if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) { ISCSI_DBG_TCP(conn, "Rx suspended!\n"); *status = ISCSI_TCP_SUSPENDED; return 0; } if (offloaded) { segment->total_copied = segment->total_size; goto segment_done; } skb_prepare_seq_read(skb, offset, skb->len, &seq); while (1) { unsigned int avail; const u8 *ptr; avail = skb_seq_read(consumed, &ptr, &seq); if (avail == 0) { ISCSI_DBG_TCP(conn, "no more data avail. Consumed %d\n", consumed); *status = ISCSI_TCP_SKB_DONE; goto skb_done; } BUG_ON(segment->copied >= segment->size); ISCSI_DBG_TCP(conn, "skb %p ptr=%p avail=%u\n", skb, ptr, avail); rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail); BUG_ON(rc == 0); consumed += rc; if (segment->total_copied >= segment->total_size) { skb_abort_seq_read(&seq); goto segment_done; } } segment_done: *status = ISCSI_TCP_SEGMENT_DONE; ISCSI_DBG_TCP(conn, "segment done\n"); rc = segment->done(tcp_conn, segment); if (rc != 0) { *status = ISCSI_TCP_CONN_ERR; ISCSI_DBG_TCP(conn, "Error receiving PDU, errno=%d\n", rc); iscsi_conn_failure(conn, rc); return 0; } /* The done() functions sets up the next segment. */ skb_done: conn->rxdata_octets += consumed; return consumed; } EXPORT_SYMBOL_GPL(iscsi_tcp_recv_skb); /** * iscsi_tcp_task_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands * @task: scsi command task */ int iscsi_tcp_task_init(struct iscsi_task *task) { struct iscsi_tcp_task *tcp_task = task->dd_data; struct iscsi_conn *conn = task->conn; struct scsi_cmnd *sc = task->sc; int err; if (!sc) { /* * mgmt tasks do not have a scatterlist since they come * in from the iscsi interface. */ ISCSI_DBG_TCP(conn, "mtask deq [itt 0x%x]\n", task->itt); return conn->session->tt->init_pdu(task, 0, task->data_count); } BUG_ON(kfifo_len(&tcp_task->r2tqueue)); tcp_task->exp_datasn = 0; /* Prepare PDU, optionally w/ immediate data */ ISCSI_DBG_TCP(conn, "task deq [itt 0x%x imm %d unsol %d]\n", task->itt, task->imm_count, task->unsol_r2t.data_length); err = conn->session->tt->init_pdu(task, 0, task->imm_count); if (err) return err; task->imm_count = 0; return 0; } EXPORT_SYMBOL_GPL(iscsi_tcp_task_init); static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task) { struct iscsi_tcp_task *tcp_task = task->dd_data; struct iscsi_r2t_info *r2t = NULL; if (iscsi_task_has_unsol_data(task)) r2t = &task->unsol_r2t; else { spin_lock_bh(&tcp_task->queue2pool); if (tcp_task->r2t) { r2t = tcp_task->r2t; /* Continue with this R2T? */ if (r2t->data_length <= r2t->sent) { ISCSI_DBG_TCP(task->conn, " done with r2t %p\n", r2t); kfifo_in(&tcp_task->r2tpool.queue, (void *)&tcp_task->r2t, sizeof(void *)); tcp_task->r2t = r2t = NULL; } } if (r2t == NULL) { if (kfifo_out(&tcp_task->r2tqueue, (void *)&tcp_task->r2t, sizeof(void *)) != sizeof(void *)) r2t = NULL; else r2t = tcp_task->r2t; } spin_unlock_bh(&tcp_task->queue2pool); } return r2t; } /** * iscsi_tcp_task_xmit - xmit normal PDU task * @task: iscsi command task * * We're expected to return 0 when everything was transmitted successfully, * -EAGAIN if there's still data in the queue, or != 0 for any other kind * of error. */ int iscsi_tcp_task_xmit(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct iscsi_session *session = conn->session; struct iscsi_r2t_info *r2t; int rc = 0; flush: /* Flush any pending data first. */ rc = session->tt->xmit_pdu(task); if (rc < 0) return rc; /* mgmt command */ if (!task->sc) { if (task->hdr->itt == RESERVED_ITT) iscsi_put_task(task); return 0; } /* Are we done already? */ if (task->sc->sc_data_direction != DMA_TO_DEVICE) return 0; r2t = iscsi_tcp_get_curr_r2t(task); if (r2t == NULL) { /* Waiting for more R2Ts to arrive. */ ISCSI_DBG_TCP(conn, "no R2Ts yet\n"); return 0; } rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_DATA_OUT); if (rc) return rc; iscsi_prep_data_out_pdu(task, r2t, (struct iscsi_data *) task->hdr); ISCSI_DBG_TCP(conn, "sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n", r2t, r2t->datasn - 1, task->hdr->itt, r2t->data_offset + r2t->sent, r2t->data_count); rc = conn->session->tt->init_pdu(task, r2t->data_offset + r2t->sent, r2t->data_count); if (rc) { iscsi_conn_failure(conn, ISCSI_ERR_XMIT_FAILED); return rc; } r2t->sent += r2t->data_count; goto flush; } EXPORT_SYMBOL_GPL(iscsi_tcp_task_xmit); struct iscsi_cls_conn * iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size, uint32_t conn_idx) { struct iscsi_conn *conn; struct iscsi_cls_conn *cls_conn; struct iscsi_tcp_conn *tcp_conn; cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn) + dd_data_size, conn_idx); if (!cls_conn) return NULL; conn = cls_conn->dd_data; /* * due to strange issues with iser these are not set * in iscsi_conn_setup */ conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN; tcp_conn = conn->dd_data; tcp_conn->iscsi_conn = conn; tcp_conn->dd_data = conn->dd_data + sizeof(*tcp_conn); return cls_conn; } EXPORT_SYMBOL_GPL(iscsi_tcp_conn_setup); void iscsi_tcp_conn_teardown(struct iscsi_cls_conn *cls_conn) { iscsi_conn_teardown(cls_conn); } EXPORT_SYMBOL_GPL(iscsi_tcp_conn_teardown); int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session) { int i; int cmd_i; /* * initialize per-task: R2T pool and xmit queue */ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { struct iscsi_task *task = session->cmds[cmd_i]; struct iscsi_tcp_task *tcp_task = task->dd_data; /* * pre-allocated x2 as much r2ts to handle race when * target acks DataOut faster than we data_xmit() queues * could replenish r2tqueue. */ /* R2T pool */ if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 2, NULL, sizeof(struct iscsi_r2t_info))) { goto r2t_alloc_fail; } /* R2T xmit queue */ if (kfifo_alloc(&tcp_task->r2tqueue, session->max_r2t * 4 * sizeof(void*), GFP_KERNEL)) { iscsi_pool_free(&tcp_task->r2tpool); goto r2t_alloc_fail; } spin_lock_init(&tcp_task->pool2queue); spin_lock_init(&tcp_task->queue2pool); } return 0; r2t_alloc_fail: for (i = 0; i < cmd_i; i++) { struct iscsi_task *task = session->cmds[i]; struct iscsi_tcp_task *tcp_task = task->dd_data; kfifo_free(&tcp_task->r2tqueue); iscsi_pool_free(&tcp_task->r2tpool); } return -ENOMEM; } EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_alloc); void iscsi_tcp_r2tpool_free(struct iscsi_session *session) { int i; for (i = 0; i < session->cmds_max; i++) { struct iscsi_task *task = session->cmds[i]; struct iscsi_tcp_task *tcp_task = task->dd_data; kfifo_free(&tcp_task->r2tqueue); iscsi_pool_free(&tcp_task->r2tpool); } } EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_free); int iscsi_tcp_set_max_r2t(struct iscsi_conn *conn, char *buf) { struct iscsi_session *session = conn->session; unsigned short r2ts = 0; sscanf(buf, "%hu", &r2ts); if (session->max_r2t == r2ts) return 0; if (!r2ts || !is_power_of_2(r2ts)) return -EINVAL; session->max_r2t = r2ts; iscsi_tcp_r2tpool_free(session); return iscsi_tcp_r2tpool_alloc(session); } EXPORT_SYMBOL_GPL(iscsi_tcp_set_max_r2t); void iscsi_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) { struct iscsi_conn *conn = cls_conn->dd_data; stats->txdata_octets = conn->txdata_octets; stats->rxdata_octets = conn->rxdata_octets; stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; stats->dataout_pdus = conn->dataout_pdus_cnt; stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; stats->datain_pdus = conn->datain_pdus_cnt; stats->r2t_pdus = conn->r2t_pdus_cnt; stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; } EXPORT_SYMBOL_GPL(iscsi_tcp_conn_get_stats);
linux-master
drivers/scsi/libiscsi_tcp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux. * Amiga Technologies A4000T SCSI controller. * * Written 1997 by Alan Hourihane <[email protected]> * plus modifications of the 53c7xx.c driver to support the Amiga. * * Rewritten to use 53c700.c by Kars de Jong <[email protected]> */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <asm/amigahw.h> #include <asm/amigaints.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_spi.h> #include "53c700.h" static struct scsi_host_template a4000t_scsi_driver_template = { .name = "A4000T builtin SCSI", .proc_name = "A4000t", .this_id = 7, .module = THIS_MODULE, }; #define A4000T_SCSI_OFFSET 0x40 static int __init amiga_a4000t_scsi_probe(struct platform_device *pdev) { struct resource *res; phys_addr_t scsi_addr; struct NCR_700_Host_Parameters *hostdata; struct Scsi_Host *host; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; if (!request_mem_region(res->start, resource_size(res), "A4000T builtin SCSI")) return -EBUSY; hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL); if (!hostdata) { dev_err(&pdev->dev, "Failed to allocate host data\n"); goto out_release; } scsi_addr = res->start + A4000T_SCSI_OFFSET; /* Fill in the required pieces of hostdata */ hostdata->base = ZTWO_VADDR(scsi_addr); hostdata->clock = 50; hostdata->chip710 = 1; hostdata->dmode_extra = DMODE_FC2; hostdata->dcntl_extra = EA_710; /* and register the chip */ host = NCR_700_detect(&a4000t_scsi_driver_template, hostdata, &pdev->dev); if (!host) { dev_err(&pdev->dev, "No host detected; board configuration problem?\n"); goto out_free; } host->this_id = 7; host->base = scsi_addr; host->irq = IRQ_AMIGA_PORTS; if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "a4000t-scsi", host)) { dev_err(&pdev->dev, "request_irq failed\n"); goto out_put_host; } platform_set_drvdata(pdev, host); scsi_scan_host(host); return 0; out_put_host: scsi_host_put(host); out_free: kfree(hostdata); out_release: release_mem_region(res->start, resource_size(res)); return -ENODEV; } static int __exit amiga_a4000t_scsi_remove(struct platform_device *pdev) { struct Scsi_Host *host = platform_get_drvdata(pdev); struct NCR_700_Host_Parameters *hostdata = shost_priv(host); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); scsi_remove_host(host); NCR_700_release(host); kfree(hostdata); free_irq(host->irq, host); release_mem_region(res->start, resource_size(res)); return 0; } static struct platform_driver amiga_a4000t_scsi_driver = { .remove = __exit_p(amiga_a4000t_scsi_remove), .driver = { .name = "amiga-a4000t-scsi", }, }; module_platform_driver_probe(amiga_a4000t_scsi_driver, amiga_a4000t_scsi_probe); MODULE_AUTHOR("Alan Hourihane <[email protected]> / " "Kars de Jong <[email protected]>"); MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:amiga-a4000t-scsi");
linux-master
drivers/scsi/a4000t.c
// SPDX-License-Identifier: GPL-2.0 /* * SCSI library functions depending on DMA */ #include <linux/blkdev.h> #include <linux/device.h> #include <linux/export.h> #include <linux/kernel.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> /** * scsi_dma_map - perform DMA mapping against command's sg lists * @cmd: scsi command * * Returns the number of sg lists actually used, zero if the sg lists * is NULL, or -ENOMEM if the mapping failed. */ int scsi_dma_map(struct scsi_cmnd *cmd) { int nseg = 0; if (scsi_sg_count(cmd)) { struct device *dev = cmd->device->host->dma_dev; nseg = dma_map_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd), cmd->sc_data_direction); if (unlikely(!nseg)) return -ENOMEM; } return nseg; } EXPORT_SYMBOL(scsi_dma_map); /** * scsi_dma_unmap - unmap command's sg lists mapped by scsi_dma_map * @cmd: scsi command */ void scsi_dma_unmap(struct scsi_cmnd *cmd) { if (scsi_sg_count(cmd)) { struct device *dev = cmd->device->host->dma_dev; dma_unmap_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd), cmd->sc_data_direction); } } EXPORT_SYMBOL(scsi_dma_unmap);
linux-master
drivers/scsi/scsi_lib_dma.c
// SPDX-License-Identifier: GPL-2.0-only /* * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux. * Amiga MacroSystemUS WarpEngine SCSI controller. * Amiga Technologies/DKB A4091 SCSI controller. * * Written 1997 by Alan Hourihane <[email protected]> * plus modifications of the 53c7xx.c driver to support the Amiga. * * Rewritten to use 53c700.c by Kars de Jong <[email protected]> */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/zorro.h> #include <linux/slab.h> #include <asm/amigahw.h> #include <asm/amigaints.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_spi.h> #include "53c700.h" MODULE_AUTHOR("Alan Hourihane <[email protected]> / Kars de Jong <[email protected]>"); MODULE_DESCRIPTION("Amiga Zorro NCR53C710 driver"); MODULE_LICENSE("GPL"); static struct scsi_host_template zorro7xx_scsi_driver_template = { .proc_name = "zorro7xx", .this_id = 7, .module = THIS_MODULE, }; static struct zorro_driver_data { const char *name; unsigned long offset; int absolute; /* offset is absolute address */ } zorro7xx_driver_data[] = { { .name = "PowerUP 603e+", .offset = 0xf40000, .absolute = 1 }, { .name = "WarpEngine 40xx", .offset = 0x40000 }, { .name = "A4091", .offset = 0x800000 }, { .name = "GForce 040/060", .offset = 0x40000 }, { 0 } }; static struct zorro_device_id zorro7xx_zorro_tbl[] = { { .id = ZORRO_PROD_PHASE5_BLIZZARD_603E_PLUS, .driver_data = (unsigned long)&zorro7xx_driver_data[0], }, { .id = ZORRO_PROD_MACROSYSTEMS_WARP_ENGINE_40xx, .driver_data = (unsigned long)&zorro7xx_driver_data[1], }, { .id = ZORRO_PROD_CBM_A4091_1, .driver_data = (unsigned long)&zorro7xx_driver_data[2], }, { .id = ZORRO_PROD_CBM_A4091_2, .driver_data = (unsigned long)&zorro7xx_driver_data[2], }, { .id = ZORRO_PROD_GVP_GFORCE_040_060, .driver_data = (unsigned long)&zorro7xx_driver_data[3], }, { 0 } }; MODULE_DEVICE_TABLE(zorro, zorro7xx_zorro_tbl); static int zorro7xx_init_one(struct zorro_dev *z, const struct zorro_device_id *ent) { struct Scsi_Host *host; struct NCR_700_Host_Parameters *hostdata; struct zorro_driver_data *zdd; unsigned long board, ioaddr; board = zorro_resource_start(z); zdd = (struct zorro_driver_data *)ent->driver_data; if (zdd->absolute) { ioaddr = zdd->offset; } else { ioaddr = board + zdd->offset; } if (!zorro_request_device(z, zdd->name)) { printk(KERN_ERR "zorro7xx: cannot reserve region 0x%lx, abort\n", board); return -EBUSY; } hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL); if (!hostdata) { printk(KERN_ERR "zorro7xx: Failed to allocate host data\n"); goto out_release; } /* Fill in the required pieces of hostdata */ if (ioaddr > 0x01000000) hostdata->base = ioremap(ioaddr, zorro_resource_len(z)); else hostdata->base = ZTWO_VADDR(ioaddr); hostdata->clock = 50; hostdata->chip710 = 1; /* Settings for at least WarpEngine 40xx */ hostdata->ctest7_extra = CTEST7_TT1; zorro7xx_scsi_driver_template.name = zdd->name; /* and register the chip */ host = NCR_700_detect(&zorro7xx_scsi_driver_template, hostdata, &z->dev); if (!host) { printk(KERN_ERR "zorro7xx: No host detected; " "board configuration problem?\n"); goto out_free; } host->this_id = 7; host->base = ioaddr; host->irq = IRQ_AMIGA_PORTS; if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "zorro7xx-scsi", host)) { printk(KERN_ERR "zorro7xx: request_irq failed\n"); goto out_put_host; } zorro_set_drvdata(z, host); scsi_scan_host(host); return 0; out_put_host: scsi_host_put(host); out_free: if (ioaddr > 0x01000000) iounmap(hostdata->base); kfree(hostdata); out_release: zorro_release_device(z); return -ENODEV; } static void zorro7xx_remove_one(struct zorro_dev *z) { struct Scsi_Host *host = zorro_get_drvdata(z); struct NCR_700_Host_Parameters *hostdata = shost_priv(host); scsi_remove_host(host); NCR_700_release(host); if (host->base > 0x01000000) iounmap(hostdata->base); kfree(hostdata); free_irq(host->irq, host); zorro_release_device(z); } static struct zorro_driver zorro7xx_driver = { .name = "zorro7xx-scsi", .id_table = zorro7xx_zorro_tbl, .probe = zorro7xx_init_one, .remove = zorro7xx_remove_one, }; static int __init zorro7xx_scsi_init(void) { return zorro_register_driver(&zorro7xx_driver); } static void __exit zorro7xx_scsi_exit(void) { zorro_unregister_driver(&zorro7xx_driver); } module_init(zorro7xx_scsi_init); module_exit(zorro7xx_scsi_exit);
linux-master
drivers/scsi/zorro7xx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * FiberChannel transport specific attributes exported to sysfs. * * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. * Copyright (C) 2004-2007 James Smart, Emulex Corporation * Rewrite for host, target, device, and remote port attributes, * statistics, and service functions... * Add vports, etc */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/bsg-lib.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_fc.h> #include <scsi/scsi_cmnd.h> #include <net/netlink.h> #include <scsi/scsi_netlink_fc.h> #include <scsi/scsi_bsg_fc.h> #include <uapi/scsi/fc/fc_els.h> #include "scsi_priv.h" static int fc_queue_work(struct Scsi_Host *, struct work_struct *); static void fc_vport_sched_delete(struct work_struct *work); static int fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev, struct fc_vport_identifiers *ids, struct fc_vport **vport); static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *); static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *); static void fc_bsg_remove(struct request_queue *); static void fc_bsg_goose_queue(struct fc_rport *); static void fc_li_stats_update(u16 event_type, struct fc_fpin_stats *stats); static void fc_delivery_stats_update(u32 reason_code, struct fc_fpin_stats *stats); static void fc_cn_stats_update(u16 event_type, struct fc_fpin_stats *stats); /* * Module Parameters */ /* * dev_loss_tmo: the default number of seconds that the FC transport * should insulate the loss of a remote port. * The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT. */ static unsigned int fc_dev_loss_tmo = 60; /* seconds */ module_param_named(dev_loss_tmo, fc_dev_loss_tmo, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(dev_loss_tmo, "Maximum number of seconds that the FC transport should" " insulate the loss of a remote port. Once this value is" " exceeded, the scsi target is removed. Value should be" " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if" " fast_io_fail_tmo is not set."); /* * Redefine so that we can have same named attributes in the * sdev/starget/host objects. */ #define FC_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \ struct device_attribute device_attr_##_prefix##_##_name = \ __ATTR(_name,_mode,_show,_store) #define fc_enum_name_search(title, table_type, table) \ static const char *get_fc_##title##_name(enum table_type table_key) \ { \ int i; \ char *name = NULL; \ \ for (i = 0; i < ARRAY_SIZE(table); i++) { \ if (table[i].value == table_key) { \ name = table[i].name; \ break; \ } \ } \ return name; \ } #define fc_enum_name_match(title, table_type, table) \ static int get_fc_##title##_match(const char *table_key, \ enum table_type *value) \ { \ int i; \ \ for (i = 0; i < ARRAY_SIZE(table); i++) { \ if (strncmp(table_key, table[i].name, \ table[i].matchlen) == 0) { \ *value = table[i].value; \ return 0; /* success */ \ } \ } \ return 1; /* failure */ \ } /* Convert fc_port_type values to ascii string name */ static struct { enum fc_port_type value; char *name; } fc_port_type_names[] = { { FC_PORTTYPE_UNKNOWN, "Unknown" }, { FC_PORTTYPE_OTHER, "Other" }, { FC_PORTTYPE_NOTPRESENT, "Not Present" }, { FC_PORTTYPE_NPORT, "NPort (fabric via point-to-point)" }, { FC_PORTTYPE_NLPORT, "NLPort (fabric via loop)" }, { FC_PORTTYPE_LPORT, "LPort (private loop)" }, { FC_PORTTYPE_PTP, "Point-To-Point (direct nport connection)" }, { FC_PORTTYPE_NPIV, "NPIV VPORT" }, }; fc_enum_name_search(port_type, fc_port_type, fc_port_type_names) #define FC_PORTTYPE_MAX_NAMELEN 50 /* Reuse fc_port_type enum function for vport_type */ #define get_fc_vport_type_name get_fc_port_type_name /* Convert fc_host_event_code values to ascii string name */ static const struct { enum fc_host_event_code value; char *name; } fc_host_event_code_names[] = { { FCH_EVT_LIP, "lip" }, { FCH_EVT_LINKUP, "link_up" }, { FCH_EVT_LINKDOWN, "link_down" }, { FCH_EVT_LIPRESET, "lip_reset" }, { FCH_EVT_RSCN, "rscn" }, { FCH_EVT_ADAPTER_CHANGE, "adapter_chg" }, { FCH_EVT_PORT_UNKNOWN, "port_unknown" }, { FCH_EVT_PORT_ONLINE, "port_online" }, { FCH_EVT_PORT_OFFLINE, "port_offline" }, { FCH_EVT_PORT_FABRIC, "port_fabric" }, { FCH_EVT_LINK_UNKNOWN, "link_unknown" }, { FCH_EVT_LINK_FPIN, "link_FPIN" }, { FCH_EVT_LINK_FPIN_ACK, "link_FPIN_ACK" }, { FCH_EVT_VENDOR_UNIQUE, "vendor_unique" }, }; fc_enum_name_search(host_event_code, fc_host_event_code, fc_host_event_code_names) #define FC_HOST_EVENT_CODE_MAX_NAMELEN 30 /* Convert fc_port_state values to ascii string name */ static struct { enum fc_port_state value; char *name; int matchlen; } fc_port_state_names[] = { { FC_PORTSTATE_UNKNOWN, "Unknown", 7}, { FC_PORTSTATE_NOTPRESENT, "Not Present", 11 }, { FC_PORTSTATE_ONLINE, "Online", 6 }, { FC_PORTSTATE_OFFLINE, "Offline", 7 }, { FC_PORTSTATE_BLOCKED, "Blocked", 7 }, { FC_PORTSTATE_BYPASSED, "Bypassed", 8 }, { FC_PORTSTATE_DIAGNOSTICS, "Diagnostics", 11 }, { FC_PORTSTATE_LINKDOWN, "Linkdown", 8 }, { FC_PORTSTATE_ERROR, "Error", 5 }, { FC_PORTSTATE_LOOPBACK, "Loopback", 8 }, { FC_PORTSTATE_DELETED, "Deleted", 7 }, { FC_PORTSTATE_MARGINAL, "Marginal", 8 }, }; fc_enum_name_search(port_state, fc_port_state, fc_port_state_names) fc_enum_name_match(port_state, fc_port_state, fc_port_state_names) #define FC_PORTSTATE_MAX_NAMELEN 20 /* Convert fc_vport_state values to ascii string name */ static struct { enum fc_vport_state value; char *name; } fc_vport_state_names[] = { { FC_VPORT_UNKNOWN, "Unknown" }, { FC_VPORT_ACTIVE, "Active" }, { FC_VPORT_DISABLED, "Disabled" }, { FC_VPORT_LINKDOWN, "Linkdown" }, { FC_VPORT_INITIALIZING, "Initializing" }, { FC_VPORT_NO_FABRIC_SUPP, "No Fabric Support" }, { FC_VPORT_NO_FABRIC_RSCS, "No Fabric Resources" }, { FC_VPORT_FABRIC_LOGOUT, "Fabric Logout" }, { FC_VPORT_FABRIC_REJ_WWN, "Fabric Rejected WWN" }, { FC_VPORT_FAILED, "VPort Failed" }, }; fc_enum_name_search(vport_state, fc_vport_state, fc_vport_state_names) #define FC_VPORTSTATE_MAX_NAMELEN 24 /* Reuse fc_vport_state enum function for vport_last_state */ #define get_fc_vport_last_state_name get_fc_vport_state_name /* Convert fc_tgtid_binding_type values to ascii string name */ static const struct { enum fc_tgtid_binding_type value; char *name; int matchlen; } fc_tgtid_binding_type_names[] = { { FC_TGTID_BIND_NONE, "none", 4 }, { FC_TGTID_BIND_BY_WWPN, "wwpn (World Wide Port Name)", 4 }, { FC_TGTID_BIND_BY_WWNN, "wwnn (World Wide Node Name)", 4 }, { FC_TGTID_BIND_BY_ID, "port_id (FC Address)", 7 }, }; fc_enum_name_search(tgtid_bind_type, fc_tgtid_binding_type, fc_tgtid_binding_type_names) fc_enum_name_match(tgtid_bind_type, fc_tgtid_binding_type, fc_tgtid_binding_type_names) #define FC_BINDTYPE_MAX_NAMELEN 30 #define fc_bitfield_name_search(title, table) \ static ssize_t \ get_fc_##title##_names(u32 table_key, char *buf) \ { \ char *prefix = ""; \ ssize_t len = 0; \ int i; \ \ for (i = 0; i < ARRAY_SIZE(table); i++) { \ if (table[i].value & table_key) { \ len += sprintf(buf + len, "%s%s", \ prefix, table[i].name); \ prefix = ", "; \ } \ } \ len += sprintf(buf + len, "\n"); \ return len; \ } /* Convert FC_COS bit values to ascii string name */ static const struct { u32 value; char *name; } fc_cos_names[] = { { FC_COS_CLASS1, "Class 1" }, { FC_COS_CLASS2, "Class 2" }, { FC_COS_CLASS3, "Class 3" }, { FC_COS_CLASS4, "Class 4" }, { FC_COS_CLASS6, "Class 6" }, }; fc_bitfield_name_search(cos, fc_cos_names) /* Convert FC_PORTSPEED bit values to ascii string name */ static const struct { u32 value; char *name; } fc_port_speed_names[] = { { FC_PORTSPEED_1GBIT, "1 Gbit" }, { FC_PORTSPEED_2GBIT, "2 Gbit" }, { FC_PORTSPEED_4GBIT, "4 Gbit" }, { FC_PORTSPEED_10GBIT, "10 Gbit" }, { FC_PORTSPEED_8GBIT, "8 Gbit" }, { FC_PORTSPEED_16GBIT, "16 Gbit" }, { FC_PORTSPEED_32GBIT, "32 Gbit" }, { FC_PORTSPEED_20GBIT, "20 Gbit" }, { FC_PORTSPEED_40GBIT, "40 Gbit" }, { FC_PORTSPEED_50GBIT, "50 Gbit" }, { FC_PORTSPEED_100GBIT, "100 Gbit" }, { FC_PORTSPEED_25GBIT, "25 Gbit" }, { FC_PORTSPEED_64GBIT, "64 Gbit" }, { FC_PORTSPEED_128GBIT, "128 Gbit" }, { FC_PORTSPEED_256GBIT, "256 Gbit" }, { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" }, }; fc_bitfield_name_search(port_speed, fc_port_speed_names) static int show_fc_fc4s (char *buf, u8 *fc4_list) { int i, len=0; for (i = 0; i < FC_FC4_LIST_SIZE; i++, fc4_list++) len += sprintf(buf + len , "0x%02x ", *fc4_list); len += sprintf(buf + len, "\n"); return len; } /* Convert FC_PORT_ROLE bit values to ascii string name */ static const struct { u32 value; char *name; } fc_port_role_names[] = { { FC_PORT_ROLE_FCP_TARGET, "FCP Target" }, { FC_PORT_ROLE_FCP_INITIATOR, "FCP Initiator" }, { FC_PORT_ROLE_IP_PORT, "IP Port" }, { FC_PORT_ROLE_FCP_DUMMY_INITIATOR, "FCP Dummy Initiator" }, { FC_PORT_ROLE_NVME_INITIATOR, "NVMe Initiator" }, { FC_PORT_ROLE_NVME_TARGET, "NVMe Target" }, { FC_PORT_ROLE_NVME_DISCOVERY, "NVMe Discovery" }, }; fc_bitfield_name_search(port_roles, fc_port_role_names) /* * Define roles that are specific to port_id. Values are relative to ROLE_MASK. */ #define FC_WELLKNOWN_PORTID_MASK 0xfffff0 #define FC_WELLKNOWN_ROLE_MASK 0x00000f #define FC_FPORT_PORTID 0x00000e #define FC_FABCTLR_PORTID 0x00000d #define FC_DIRSRVR_PORTID 0x00000c #define FC_TIMESRVR_PORTID 0x00000b #define FC_MGMTSRVR_PORTID 0x00000a static void fc_timeout_deleted_rport(struct work_struct *work); static void fc_timeout_fail_rport_io(struct work_struct *work); static void fc_scsi_scan_rport(struct work_struct *work); /* * Attribute counts pre object type... * Increase these values if you add attributes */ #define FC_STARGET_NUM_ATTRS 3 #define FC_RPORT_NUM_ATTRS 10 #define FC_VPORT_NUM_ATTRS 9 #define FC_HOST_NUM_ATTRS 29 struct fc_internal { struct scsi_transport_template t; struct fc_function_template *f; /* * For attributes : each object has : * An array of the actual attributes structures * An array of null-terminated pointers to the attribute * structures - used for mid-layer interaction. * * The attribute containers for the starget and host are are * part of the midlayer. As the remote port is specific to the * fc transport, we must provide the attribute container. */ struct device_attribute private_starget_attrs[ FC_STARGET_NUM_ATTRS]; struct device_attribute *starget_attrs[FC_STARGET_NUM_ATTRS + 1]; struct device_attribute private_host_attrs[FC_HOST_NUM_ATTRS]; struct device_attribute *host_attrs[FC_HOST_NUM_ATTRS + 1]; struct transport_container rport_attr_cont; struct device_attribute private_rport_attrs[FC_RPORT_NUM_ATTRS]; struct device_attribute *rport_attrs[FC_RPORT_NUM_ATTRS + 1]; struct transport_container vport_attr_cont; struct device_attribute private_vport_attrs[FC_VPORT_NUM_ATTRS]; struct device_attribute *vport_attrs[FC_VPORT_NUM_ATTRS + 1]; }; #define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t) static int fc_target_setup(struct transport_container *tc, struct device *dev, struct device *cdev) { struct scsi_target *starget = to_scsi_target(dev); struct fc_rport *rport = starget_to_rport(starget); /* * if parent is remote port, use values from remote port. * Otherwise, this host uses the fc_transport, but not the * remote port interface. As such, initialize to known non-values. */ if (rport) { fc_starget_node_name(starget) = rport->node_name; fc_starget_port_name(starget) = rport->port_name; fc_starget_port_id(starget) = rport->port_id; } else { fc_starget_node_name(starget) = -1; fc_starget_port_name(starget) = -1; fc_starget_port_id(starget) = -1; } return 0; } static DECLARE_TRANSPORT_CLASS(fc_transport_class, "fc_transport", fc_target_setup, NULL, NULL); static int fc_host_setup(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); struct fc_host_attrs *fc_host = shost_to_fc_host(shost); /* * Set default values easily detected by the midlayer as * failure cases. The scsi lldd is responsible for initializing * all transport attributes to valid values per host. */ fc_host->node_name = -1; fc_host->port_name = -1; fc_host->permanent_port_name = -1; fc_host->supported_classes = FC_COS_UNSPECIFIED; memset(fc_host->supported_fc4s, 0, sizeof(fc_host->supported_fc4s)); fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN; fc_host->maxframe_size = -1; fc_host->max_npiv_vports = 0; memset(fc_host->serial_number, 0, sizeof(fc_host->serial_number)); memset(fc_host->manufacturer, 0, sizeof(fc_host->manufacturer)); memset(fc_host->model, 0, sizeof(fc_host->model)); memset(fc_host->model_description, 0, sizeof(fc_host->model_description)); memset(fc_host->hardware_version, 0, sizeof(fc_host->hardware_version)); memset(fc_host->driver_version, 0, sizeof(fc_host->driver_version)); memset(fc_host->firmware_version, 0, sizeof(fc_host->firmware_version)); memset(fc_host->optionrom_version, 0, sizeof(fc_host->optionrom_version)); fc_host->port_id = -1; fc_host->port_type = FC_PORTTYPE_UNKNOWN; fc_host->port_state = FC_PORTSTATE_UNKNOWN; memset(fc_host->active_fc4s, 0, sizeof(fc_host->active_fc4s)); fc_host->speed = FC_PORTSPEED_UNKNOWN; fc_host->fabric_name = -1; memset(fc_host->symbolic_name, 0, sizeof(fc_host->symbolic_name)); memset(fc_host->system_hostname, 0, sizeof(fc_host->system_hostname)); memset(&fc_host->fpin_stats, 0, sizeof(fc_host->fpin_stats)); fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN; INIT_LIST_HEAD(&fc_host->rports); INIT_LIST_HEAD(&fc_host->rport_bindings); INIT_LIST_HEAD(&fc_host->vports); fc_host->next_rport_number = 0; fc_host->next_target_id = 0; fc_host->next_vport_number = 0; fc_host->npiv_vports_inuse = 0; snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name), "fc_wq_%d", shost->host_no); fc_host->work_q = alloc_workqueue("%s", 0, 0, fc_host->work_q_name); if (!fc_host->work_q) return -ENOMEM; fc_host->dev_loss_tmo = fc_dev_loss_tmo; snprintf(fc_host->devloss_work_q_name, sizeof(fc_host->devloss_work_q_name), "fc_dl_%d", shost->host_no); fc_host->devloss_work_q = alloc_workqueue("%s", 0, 0, fc_host->devloss_work_q_name); if (!fc_host->devloss_work_q) { destroy_workqueue(fc_host->work_q); fc_host->work_q = NULL; return -ENOMEM; } fc_bsg_hostadd(shost, fc_host); /* ignore any bsg add error - we just can't do sgio */ return 0; } static int fc_host_remove(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); struct fc_host_attrs *fc_host = shost_to_fc_host(shost); fc_bsg_remove(fc_host->rqst_q); return 0; } static DECLARE_TRANSPORT_CLASS(fc_host_class, "fc_host", fc_host_setup, fc_host_remove, NULL); /* * Setup and Remove actions for remote ports are handled * in the service functions below. */ static DECLARE_TRANSPORT_CLASS(fc_rport_class, "fc_remote_ports", NULL, NULL, NULL); /* * Setup and Remove actions for virtual ports are handled * in the service functions below. */ static DECLARE_TRANSPORT_CLASS(fc_vport_class, "fc_vports", NULL, NULL, NULL); /* * Netlink Infrastructure */ static atomic_t fc_event_seq; /** * fc_get_event_number - Obtain the next sequential FC event number * * Notes: * We could have inlined this, but it would have required fc_event_seq to * be exposed. For now, live with the subroutine call. * Atomic used to avoid lock/unlock... */ u32 fc_get_event_number(void) { return atomic_add_return(1, &fc_event_seq); } EXPORT_SYMBOL(fc_get_event_number); /** * fc_host_post_fc_event - routine to do the work of posting an event * on an fc_host. * @shost: host the event occurred on * @event_number: fc event number obtained from get_fc_event_number() * @event_code: fc_host event being posted * @data_len: amount, in bytes, of event data * @data_buf: pointer to event data * @vendor_id: value for Vendor id * * Notes: * This routine assumes no locks are held on entry. */ void fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number, enum fc_host_event_code event_code, u32 data_len, char *data_buf, u64 vendor_id) { struct sk_buff *skb; struct nlmsghdr *nlh; struct fc_nl_event *event; const char *name; size_t len, padding; int err; if (!data_buf || data_len < 4) data_len = 0; if (!scsi_nl_sock) { err = -ENOENT; goto send_fail; } len = FC_NL_MSGALIGN(sizeof(*event) - sizeof(event->event_data) + data_len); skb = nlmsg_new(len, GFP_KERNEL); if (!skb) { err = -ENOBUFS; goto send_fail; } nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0); if (!nlh) { err = -ENOBUFS; goto send_fail_skb; } event = nlmsg_data(nlh); INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC, FC_NL_ASYNC_EVENT, len); event->seconds = ktime_get_real_seconds(); event->vendor_id = vendor_id; event->host_no = shost->host_no; event->event_datalen = data_len; /* bytes */ event->event_num = event_number; event->event_code = event_code; if (data_len) memcpy(event->event_data_flex, data_buf, data_len); padding = len - offsetof(typeof(*event), event_data_flex) - data_len; memset(event->event_data_flex + data_len, 0, padding); nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS, GFP_KERNEL); return; send_fail_skb: kfree_skb(skb); send_fail: name = get_fc_host_event_code_name(event_code); printk(KERN_WARNING "%s: Dropped Event : host %d %s data 0x%08x - err %d\n", __func__, shost->host_no, (name) ? name : "<unknown>", (data_len) ? *((u32 *)data_buf) : 0xFFFFFFFF, err); return; } EXPORT_SYMBOL(fc_host_post_fc_event); /** * fc_host_post_event - called to post an even on an fc_host. * @shost: host the event occurred on * @event_number: fc event number obtained from get_fc_event_number() * @event_code: fc_host event being posted * @event_data: 32bits of data for the event being posted * * Notes: * This routine assumes no locks are held on entry. */ void fc_host_post_event(struct Scsi_Host *shost, u32 event_number, enum fc_host_event_code event_code, u32 event_data) { fc_host_post_fc_event(shost, event_number, event_code, (u32)sizeof(u32), (char *)&event_data, 0); } EXPORT_SYMBOL(fc_host_post_event); /** * fc_host_post_vendor_event - called to post a vendor unique event * on an fc_host * @shost: host the event occurred on * @event_number: fc event number obtained from get_fc_event_number() * @data_len: amount, in bytes, of vendor unique data * @data_buf: pointer to vendor unique data * @vendor_id: Vendor id * * Notes: * This routine assumes no locks are held on entry. */ void fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number, u32 data_len, char * data_buf, u64 vendor_id) { fc_host_post_fc_event(shost, event_number, FCH_EVT_VENDOR_UNIQUE, data_len, data_buf, vendor_id); } EXPORT_SYMBOL(fc_host_post_vendor_event); /** * fc_find_rport_by_wwpn - find the fc_rport pointer for a given wwpn * @shost: host the fc_rport is associated with * @wwpn: wwpn of the fc_rport device * * Notes: * This routine assumes no locks are held on entry. */ struct fc_rport * fc_find_rport_by_wwpn(struct Scsi_Host *shost, u64 wwpn) { struct fc_rport *rport; unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); list_for_each_entry(rport, &fc_host_rports(shost), peers) { if (rport->port_state != FC_PORTSTATE_ONLINE) continue; if (rport->port_name == wwpn) { spin_unlock_irqrestore(shost->host_lock, flags); return rport; } } spin_unlock_irqrestore(shost->host_lock, flags); return NULL; } EXPORT_SYMBOL(fc_find_rport_by_wwpn); static void fc_li_stats_update(u16 event_type, struct fc_fpin_stats *stats) { stats->li++; switch (event_type) { case FPIN_LI_UNKNOWN: stats->li_failure_unknown++; break; case FPIN_LI_LINK_FAILURE: stats->li_link_failure_count++; break; case FPIN_LI_LOSS_OF_SYNC: stats->li_loss_of_sync_count++; break; case FPIN_LI_LOSS_OF_SIG: stats->li_loss_of_signals_count++; break; case FPIN_LI_PRIM_SEQ_ERR: stats->li_prim_seq_err_count++; break; case FPIN_LI_INVALID_TX_WD: stats->li_invalid_tx_word_count++; break; case FPIN_LI_INVALID_CRC: stats->li_invalid_crc_count++; break; case FPIN_LI_DEVICE_SPEC: stats->li_device_specific++; break; } } static void fc_delivery_stats_update(u32 reason_code, struct fc_fpin_stats *stats) { stats->dn++; switch (reason_code) { case FPIN_DELI_UNKNOWN: stats->dn_unknown++; break; case FPIN_DELI_TIMEOUT: stats->dn_timeout++; break; case FPIN_DELI_UNABLE_TO_ROUTE: stats->dn_unable_to_route++; break; case FPIN_DELI_DEVICE_SPEC: stats->dn_device_specific++; break; } } static void fc_cn_stats_update(u16 event_type, struct fc_fpin_stats *stats) { stats->cn++; switch (event_type) { case FPIN_CONGN_CLEAR: stats->cn_clear++; break; case FPIN_CONGN_LOST_CREDIT: stats->cn_lost_credit++; break; case FPIN_CONGN_CREDIT_STALL: stats->cn_credit_stall++; break; case FPIN_CONGN_OVERSUBSCRIPTION: stats->cn_oversubscription++; break; case FPIN_CONGN_DEVICE_SPEC: stats->cn_device_specific++; } } /* * fc_fpin_li_stats_update - routine to update Link Integrity * event statistics. * @shost: host the FPIN was received on * @tlv: pointer to link integrity descriptor * */ static void fc_fpin_li_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv) { u8 i; struct fc_rport *rport = NULL; struct fc_rport *attach_rport = NULL; struct fc_host_attrs *fc_host = shost_to_fc_host(shost); struct fc_fn_li_desc *li_desc = (struct fc_fn_li_desc *)tlv; u16 event_type = be16_to_cpu(li_desc->event_type); u64 wwpn; rport = fc_find_rport_by_wwpn(shost, be64_to_cpu(li_desc->attached_wwpn)); if (rport && (rport->roles & FC_PORT_ROLE_FCP_TARGET || rport->roles & FC_PORT_ROLE_NVME_TARGET)) { attach_rport = rport; fc_li_stats_update(event_type, &attach_rport->fpin_stats); } if (be32_to_cpu(li_desc->pname_count) > 0) { for (i = 0; i < be32_to_cpu(li_desc->pname_count); i++) { wwpn = be64_to_cpu(li_desc->pname_list[i]); rport = fc_find_rport_by_wwpn(shost, wwpn); if (rport && (rport->roles & FC_PORT_ROLE_FCP_TARGET || rport->roles & FC_PORT_ROLE_NVME_TARGET)) { if (rport == attach_rport) continue; fc_li_stats_update(event_type, &rport->fpin_stats); } } } if (fc_host->port_name == be64_to_cpu(li_desc->attached_wwpn)) fc_li_stats_update(event_type, &fc_host->fpin_stats); } /* * fc_fpin_delivery_stats_update - routine to update Delivery Notification * event statistics. * @shost: host the FPIN was received on * @tlv: pointer to delivery descriptor * */ static void fc_fpin_delivery_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv) { struct fc_rport *rport = NULL; struct fc_rport *attach_rport = NULL; struct fc_host_attrs *fc_host = shost_to_fc_host(shost); struct fc_fn_deli_desc *dn_desc = (struct fc_fn_deli_desc *)tlv; u32 reason_code = be32_to_cpu(dn_desc->deli_reason_code); rport = fc_find_rport_by_wwpn(shost, be64_to_cpu(dn_desc->attached_wwpn)); if (rport && (rport->roles & FC_PORT_ROLE_FCP_TARGET || rport->roles & FC_PORT_ROLE_NVME_TARGET)) { attach_rport = rport; fc_delivery_stats_update(reason_code, &attach_rport->fpin_stats); } if (fc_host->port_name == be64_to_cpu(dn_desc->attached_wwpn)) fc_delivery_stats_update(reason_code, &fc_host->fpin_stats); } /* * fc_fpin_peer_congn_stats_update - routine to update Peer Congestion * event statistics. * @shost: host the FPIN was received on * @tlv: pointer to peer congestion descriptor * */ static void fc_fpin_peer_congn_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv) { u8 i; struct fc_rport *rport = NULL; struct fc_rport *attach_rport = NULL; struct fc_fn_peer_congn_desc *pc_desc = (struct fc_fn_peer_congn_desc *)tlv; u16 event_type = be16_to_cpu(pc_desc->event_type); u64 wwpn; rport = fc_find_rport_by_wwpn(shost, be64_to_cpu(pc_desc->attached_wwpn)); if (rport && (rport->roles & FC_PORT_ROLE_FCP_TARGET || rport->roles & FC_PORT_ROLE_NVME_TARGET)) { attach_rport = rport; fc_cn_stats_update(event_type, &attach_rport->fpin_stats); } if (be32_to_cpu(pc_desc->pname_count) > 0) { for (i = 0; i < be32_to_cpu(pc_desc->pname_count); i++) { wwpn = be64_to_cpu(pc_desc->pname_list[i]); rport = fc_find_rport_by_wwpn(shost, wwpn); if (rport && (rport->roles & FC_PORT_ROLE_FCP_TARGET || rport->roles & FC_PORT_ROLE_NVME_TARGET)) { if (rport == attach_rport) continue; fc_cn_stats_update(event_type, &rport->fpin_stats); } } } } /* * fc_fpin_congn_stats_update - routine to update Congestion * event statistics. * @shost: host the FPIN was received on * @tlv: pointer to congestion descriptor * */ static void fc_fpin_congn_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv) { struct fc_host_attrs *fc_host = shost_to_fc_host(shost); struct fc_fn_congn_desc *congn = (struct fc_fn_congn_desc *)tlv; fc_cn_stats_update(be16_to_cpu(congn->event_type), &fc_host->fpin_stats); } /** * fc_host_fpin_rcv - routine to process a received FPIN. * @shost: host the FPIN was received on * @fpin_len: length of FPIN payload, in bytes * @fpin_buf: pointer to FPIN payload * @event_acknowledge: 1, if LLDD handles this event. * Notes: * This routine assumes no locks are held on entry. */ void fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf, u8 event_acknowledge) { struct fc_els_fpin *fpin = (struct fc_els_fpin *)fpin_buf; struct fc_tlv_desc *tlv; u32 bytes_remain; u32 dtag; enum fc_host_event_code event_code = event_acknowledge ? FCH_EVT_LINK_FPIN_ACK : FCH_EVT_LINK_FPIN; /* Update Statistics */ tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; bytes_remain = fpin_len - offsetof(struct fc_els_fpin, fpin_desc); bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); while (bytes_remain >= FC_TLV_DESC_HDR_SZ && bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { dtag = be32_to_cpu(tlv->desc_tag); switch (dtag) { case ELS_DTAG_LNK_INTEGRITY: fc_fpin_li_stats_update(shost, tlv); break; case ELS_DTAG_DELIVERY: fc_fpin_delivery_stats_update(shost, tlv); break; case ELS_DTAG_PEER_CONGEST: fc_fpin_peer_congn_stats_update(shost, tlv); break; case ELS_DTAG_CONGESTION: fc_fpin_congn_stats_update(shost, tlv); } bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); tlv = fc_tlv_next_desc(tlv); } fc_host_post_fc_event(shost, fc_get_event_number(), event_code, fpin_len, fpin_buf, 0); } EXPORT_SYMBOL(fc_host_fpin_rcv); static __init int fc_transport_init(void) { int error; atomic_set(&fc_event_seq, 0); error = transport_class_register(&fc_host_class); if (error) return error; error = transport_class_register(&fc_vport_class); if (error) goto unreg_host_class; error = transport_class_register(&fc_rport_class); if (error) goto unreg_vport_class; error = transport_class_register(&fc_transport_class); if (error) goto unreg_rport_class; return 0; unreg_rport_class: transport_class_unregister(&fc_rport_class); unreg_vport_class: transport_class_unregister(&fc_vport_class); unreg_host_class: transport_class_unregister(&fc_host_class); return error; } static void __exit fc_transport_exit(void) { transport_class_unregister(&fc_transport_class); transport_class_unregister(&fc_rport_class); transport_class_unregister(&fc_host_class); transport_class_unregister(&fc_vport_class); } /* * FC Remote Port Attribute Management */ #define fc_rport_show_function(field, format_string, sz, cast) \ static ssize_t \ show_fc_rport_##field (struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct fc_rport *rport = transport_class_to_rport(dev); \ struct Scsi_Host *shost = rport_to_shost(rport); \ struct fc_internal *i = to_fc_internal(shost->transportt); \ if ((i->f->get_rport_##field) && \ !((rport->port_state == FC_PORTSTATE_BLOCKED) || \ (rport->port_state == FC_PORTSTATE_DELETED) || \ (rport->port_state == FC_PORTSTATE_NOTPRESENT))) \ i->f->get_rport_##field(rport); \ return snprintf(buf, sz, format_string, cast rport->field); \ } #define fc_rport_store_function(field) \ static ssize_t \ store_fc_rport_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ int val; \ struct fc_rport *rport = transport_class_to_rport(dev); \ struct Scsi_Host *shost = rport_to_shost(rport); \ struct fc_internal *i = to_fc_internal(shost->transportt); \ char *cp; \ if ((rport->port_state == FC_PORTSTATE_BLOCKED) || \ (rport->port_state == FC_PORTSTATE_DELETED) || \ (rport->port_state == FC_PORTSTATE_NOTPRESENT)) \ return -EBUSY; \ val = simple_strtoul(buf, &cp, 0); \ if (*cp && (*cp != '\n')) \ return -EINVAL; \ i->f->set_rport_##field(rport, val); \ return count; \ } #define fc_rport_rd_attr(field, format_string, sz) \ fc_rport_show_function(field, format_string, sz, ) \ static FC_DEVICE_ATTR(rport, field, S_IRUGO, \ show_fc_rport_##field, NULL) #define fc_rport_rd_attr_cast(field, format_string, sz, cast) \ fc_rport_show_function(field, format_string, sz, (cast)) \ static FC_DEVICE_ATTR(rport, field, S_IRUGO, \ show_fc_rport_##field, NULL) #define fc_rport_rw_attr(field, format_string, sz) \ fc_rport_show_function(field, format_string, sz, ) \ fc_rport_store_function(field) \ static FC_DEVICE_ATTR(rport, field, S_IRUGO | S_IWUSR, \ show_fc_rport_##field, \ store_fc_rport_##field) #define fc_private_rport_show_function(field, format_string, sz, cast) \ static ssize_t \ show_fc_rport_##field (struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct fc_rport *rport = transport_class_to_rport(dev); \ return snprintf(buf, sz, format_string, cast rport->field); \ } #define fc_private_rport_rd_attr(field, format_string, sz) \ fc_private_rport_show_function(field, format_string, sz, ) \ static FC_DEVICE_ATTR(rport, field, S_IRUGO, \ show_fc_rport_##field, NULL) #define fc_private_rport_rd_attr_cast(field, format_string, sz, cast) \ fc_private_rport_show_function(field, format_string, sz, (cast)) \ static FC_DEVICE_ATTR(rport, field, S_IRUGO, \ show_fc_rport_##field, NULL) #define fc_private_rport_rd_enum_attr(title, maxlen) \ static ssize_t \ show_fc_rport_##title (struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct fc_rport *rport = transport_class_to_rport(dev); \ const char *name; \ name = get_fc_##title##_name(rport->title); \ if (!name) \ return -EINVAL; \ return snprintf(buf, maxlen, "%s\n", name); \ } \ static FC_DEVICE_ATTR(rport, title, S_IRUGO, \ show_fc_rport_##title, NULL) #define SETUP_RPORT_ATTRIBUTE_RD(field) \ i->private_rport_attrs[count] = device_attr_rport_##field; \ i->private_rport_attrs[count].attr.mode = S_IRUGO; \ i->private_rport_attrs[count].store = NULL; \ i->rport_attrs[count] = &i->private_rport_attrs[count]; \ if (i->f->show_rport_##field) \ count++ #define SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(field) \ i->private_rport_attrs[count] = device_attr_rport_##field; \ i->private_rport_attrs[count].attr.mode = S_IRUGO; \ i->private_rport_attrs[count].store = NULL; \ i->rport_attrs[count] = &i->private_rport_attrs[count]; \ count++ #define SETUP_RPORT_ATTRIBUTE_RW(field) \ i->private_rport_attrs[count] = device_attr_rport_##field; \ if (!i->f->set_rport_##field) { \ i->private_rport_attrs[count].attr.mode = S_IRUGO; \ i->private_rport_attrs[count].store = NULL; \ } \ i->rport_attrs[count] = &i->private_rport_attrs[count]; \ if (i->f->show_rport_##field) \ count++ #define SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(field) \ { \ i->private_rport_attrs[count] = device_attr_rport_##field; \ i->rport_attrs[count] = &i->private_rport_attrs[count]; \ count++; \ } /* The FC Transport Remote Port Attributes: */ /* Fixed Remote Port Attributes */ fc_private_rport_rd_attr(maxframe_size, "%u bytes\n", 20); static ssize_t show_fc_rport_supported_classes (struct device *dev, struct device_attribute *attr, char *buf) { struct fc_rport *rport = transport_class_to_rport(dev); if (rport->supported_classes == FC_COS_UNSPECIFIED) return snprintf(buf, 20, "unspecified\n"); return get_fc_cos_names(rport->supported_classes, buf); } static FC_DEVICE_ATTR(rport, supported_classes, S_IRUGO, show_fc_rport_supported_classes, NULL); /* Dynamic Remote Port Attributes */ /* * dev_loss_tmo attribute */ static int fc_str_to_dev_loss(const char *buf, unsigned long *val) { char *cp; *val = simple_strtoul(buf, &cp, 0); if (*cp && (*cp != '\n')) return -EINVAL; /* * Check for overflow; dev_loss_tmo is u32 */ if (*val > UINT_MAX) return -EINVAL; return 0; } static int fc_rport_set_dev_loss_tmo(struct fc_rport *rport, unsigned long val) { struct Scsi_Host *shost = rport_to_shost(rport); struct fc_internal *i = to_fc_internal(shost->transportt); if ((rport->port_state == FC_PORTSTATE_BLOCKED) || (rport->port_state == FC_PORTSTATE_DELETED) || (rport->port_state == FC_PORTSTATE_NOTPRESENT)) return -EBUSY; /* * Check for overflow; dev_loss_tmo is u32 */ if (val > UINT_MAX) return -EINVAL; /* * If fast_io_fail is off we have to cap * dev_loss_tmo at SCSI_DEVICE_BLOCK_MAX_TIMEOUT */ if (rport->fast_io_fail_tmo == -1 && val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT) return -EINVAL; i->f->set_rport_dev_loss_tmo(rport, val); return 0; } fc_rport_show_function(dev_loss_tmo, "%u\n", 20, ) static ssize_t store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fc_rport *rport = transport_class_to_rport(dev); unsigned long val; int rc; rc = fc_str_to_dev_loss(buf, &val); if (rc) return rc; rc = fc_rport_set_dev_loss_tmo(rport, val); if (rc) return rc; return count; } static FC_DEVICE_ATTR(rport, dev_loss_tmo, S_IRUGO | S_IWUSR, show_fc_rport_dev_loss_tmo, store_fc_rport_dev_loss_tmo); /* Private Remote Port Attributes */ fc_private_rport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long); fc_private_rport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); fc_private_rport_rd_attr(port_id, "0x%06x\n", 20); static ssize_t show_fc_rport_roles (struct device *dev, struct device_attribute *attr, char *buf) { struct fc_rport *rport = transport_class_to_rport(dev); /* identify any roles that are port_id specific */ if ((rport->port_id != -1) && (rport->port_id & FC_WELLKNOWN_PORTID_MASK) == FC_WELLKNOWN_PORTID_MASK) { switch (rport->port_id & FC_WELLKNOWN_ROLE_MASK) { case FC_FPORT_PORTID: return snprintf(buf, 30, "Fabric Port\n"); case FC_FABCTLR_PORTID: return snprintf(buf, 30, "Fabric Controller\n"); case FC_DIRSRVR_PORTID: return snprintf(buf, 30, "Directory Server\n"); case FC_TIMESRVR_PORTID: return snprintf(buf, 30, "Time Server\n"); case FC_MGMTSRVR_PORTID: return snprintf(buf, 30, "Management Server\n"); default: return snprintf(buf, 30, "Unknown Fabric Entity\n"); } } else { if (rport->roles == FC_PORT_ROLE_UNKNOWN) return snprintf(buf, 20, "unknown\n"); return get_fc_port_roles_names(rport->roles, buf); } } static FC_DEVICE_ATTR(rport, roles, S_IRUGO, show_fc_rport_roles, NULL); static ssize_t fc_rport_set_marginal_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fc_rport *rport = transport_class_to_rport(dev); enum fc_port_state port_state; int ret = 0; ret = get_fc_port_state_match(buf, &port_state); if (ret) return -EINVAL; if (port_state == FC_PORTSTATE_MARGINAL) { /* * Change the state to Marginal only if the * current rport state is Online * Allow only Online->Marginal */ if (rport->port_state == FC_PORTSTATE_ONLINE) rport->port_state = port_state; else return -EINVAL; } else if (port_state == FC_PORTSTATE_ONLINE) { /* * Change the state to Online only if the * current rport state is Marginal * Allow only Marginal->Online */ if (rport->port_state == FC_PORTSTATE_MARGINAL) rport->port_state = port_state; else return -EINVAL; } else return -EINVAL; return count; } static ssize_t show_fc_rport_port_state(struct device *dev, struct device_attribute *attr, char *buf) { const char *name; struct fc_rport *rport = transport_class_to_rport(dev); name = get_fc_port_state_name(rport->port_state); if (!name) return -EINVAL; return snprintf(buf, 20, "%s\n", name); } static FC_DEVICE_ATTR(rport, port_state, 0444 | 0200, show_fc_rport_port_state, fc_rport_set_marginal_state); fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20); /* * fast_io_fail_tmo attribute */ static ssize_t show_fc_rport_fast_io_fail_tmo (struct device *dev, struct device_attribute *attr, char *buf) { struct fc_rport *rport = transport_class_to_rport(dev); if (rport->fast_io_fail_tmo == -1) return snprintf(buf, 5, "off\n"); return snprintf(buf, 20, "%d\n", rport->fast_io_fail_tmo); } static ssize_t store_fc_rport_fast_io_fail_tmo(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int val; char *cp; struct fc_rport *rport = transport_class_to_rport(dev); if ((rport->port_state == FC_PORTSTATE_BLOCKED) || (rport->port_state == FC_PORTSTATE_DELETED) || (rport->port_state == FC_PORTSTATE_NOTPRESENT)) return -EBUSY; if (strncmp(buf, "off", 3) == 0) rport->fast_io_fail_tmo = -1; else { val = simple_strtoul(buf, &cp, 0); if ((*cp && (*cp != '\n')) || (val < 0)) return -EINVAL; /* * Cap fast_io_fail by dev_loss_tmo or * SCSI_DEVICE_BLOCK_MAX_TIMEOUT. */ if ((val >= rport->dev_loss_tmo) || (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)) return -EINVAL; rport->fast_io_fail_tmo = val; } return count; } static FC_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR, show_fc_rport_fast_io_fail_tmo, store_fc_rport_fast_io_fail_tmo); #define fc_rport_fpin_statistic(name) \ static ssize_t fc_rport_fpinstat_##name(struct device *cd, \ struct device_attribute *attr, \ char *buf) \ { \ struct fc_rport *rport = transport_class_to_rport(cd); \ \ return snprintf(buf, 20, "0x%llx\n", rport->fpin_stats.name); \ } \ static FC_DEVICE_ATTR(rport, fpin_##name, 0444, fc_rport_fpinstat_##name, NULL) fc_rport_fpin_statistic(dn); fc_rport_fpin_statistic(dn_unknown); fc_rport_fpin_statistic(dn_timeout); fc_rport_fpin_statistic(dn_unable_to_route); fc_rport_fpin_statistic(dn_device_specific); fc_rport_fpin_statistic(cn); fc_rport_fpin_statistic(cn_clear); fc_rport_fpin_statistic(cn_lost_credit); fc_rport_fpin_statistic(cn_credit_stall); fc_rport_fpin_statistic(cn_oversubscription); fc_rport_fpin_statistic(cn_device_specific); fc_rport_fpin_statistic(li); fc_rport_fpin_statistic(li_failure_unknown); fc_rport_fpin_statistic(li_link_failure_count); fc_rport_fpin_statistic(li_loss_of_sync_count); fc_rport_fpin_statistic(li_loss_of_signals_count); fc_rport_fpin_statistic(li_prim_seq_err_count); fc_rport_fpin_statistic(li_invalid_tx_word_count); fc_rport_fpin_statistic(li_invalid_crc_count); fc_rport_fpin_statistic(li_device_specific); static struct attribute *fc_rport_statistics_attrs[] = { &device_attr_rport_fpin_dn.attr, &device_attr_rport_fpin_dn_unknown.attr, &device_attr_rport_fpin_dn_timeout.attr, &device_attr_rport_fpin_dn_unable_to_route.attr, &device_attr_rport_fpin_dn_device_specific.attr, &device_attr_rport_fpin_li.attr, &device_attr_rport_fpin_li_failure_unknown.attr, &device_attr_rport_fpin_li_link_failure_count.attr, &device_attr_rport_fpin_li_loss_of_sync_count.attr, &device_attr_rport_fpin_li_loss_of_signals_count.attr, &device_attr_rport_fpin_li_prim_seq_err_count.attr, &device_attr_rport_fpin_li_invalid_tx_word_count.attr, &device_attr_rport_fpin_li_invalid_crc_count.attr, &device_attr_rport_fpin_li_device_specific.attr, &device_attr_rport_fpin_cn.attr, &device_attr_rport_fpin_cn_clear.attr, &device_attr_rport_fpin_cn_lost_credit.attr, &device_attr_rport_fpin_cn_credit_stall.attr, &device_attr_rport_fpin_cn_oversubscription.attr, &device_attr_rport_fpin_cn_device_specific.attr, NULL }; static struct attribute_group fc_rport_statistics_group = { .name = "statistics", .attrs = fc_rport_statistics_attrs, }; /* * FC SCSI Target Attribute Management */ /* * Note: in the target show function we recognize when the remote * port is in the hierarchy and do not allow the driver to get * involved in sysfs functions. The driver only gets involved if * it's the "old" style that doesn't use rports. */ #define fc_starget_show_function(field, format_string, sz, cast) \ static ssize_t \ show_fc_starget_##field (struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct scsi_target *starget = transport_class_to_starget(dev); \ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ struct fc_internal *i = to_fc_internal(shost->transportt); \ struct fc_rport *rport = starget_to_rport(starget); \ if (rport) \ fc_starget_##field(starget) = rport->field; \ else if (i->f->get_starget_##field) \ i->f->get_starget_##field(starget); \ return snprintf(buf, sz, format_string, \ cast fc_starget_##field(starget)); \ } #define fc_starget_rd_attr(field, format_string, sz) \ fc_starget_show_function(field, format_string, sz, ) \ static FC_DEVICE_ATTR(starget, field, S_IRUGO, \ show_fc_starget_##field, NULL) #define fc_starget_rd_attr_cast(field, format_string, sz, cast) \ fc_starget_show_function(field, format_string, sz, (cast)) \ static FC_DEVICE_ATTR(starget, field, S_IRUGO, \ show_fc_starget_##field, NULL) #define SETUP_STARGET_ATTRIBUTE_RD(field) \ i->private_starget_attrs[count] = device_attr_starget_##field; \ i->private_starget_attrs[count].attr.mode = S_IRUGO; \ i->private_starget_attrs[count].store = NULL; \ i->starget_attrs[count] = &i->private_starget_attrs[count]; \ if (i->f->show_starget_##field) \ count++ #define SETUP_STARGET_ATTRIBUTE_RW(field) \ i->private_starget_attrs[count] = device_attr_starget_##field; \ if (!i->f->set_starget_##field) { \ i->private_starget_attrs[count].attr.mode = S_IRUGO; \ i->private_starget_attrs[count].store = NULL; \ } \ i->starget_attrs[count] = &i->private_starget_attrs[count]; \ if (i->f->show_starget_##field) \ count++ /* The FC Transport SCSI Target Attributes: */ fc_starget_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long); fc_starget_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); fc_starget_rd_attr(port_id, "0x%06x\n", 20); /* * FC Virtual Port Attribute Management */ #define fc_vport_show_function(field, format_string, sz, cast) \ static ssize_t \ show_fc_vport_##field (struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct fc_vport *vport = transport_class_to_vport(dev); \ struct Scsi_Host *shost = vport_to_shost(vport); \ struct fc_internal *i = to_fc_internal(shost->transportt); \ if ((i->f->get_vport_##field) && \ !(vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))) \ i->f->get_vport_##field(vport); \ return snprintf(buf, sz, format_string, cast vport->field); \ } #define fc_vport_store_function(field) \ static ssize_t \ store_fc_vport_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ int val; \ struct fc_vport *vport = transport_class_to_vport(dev); \ struct Scsi_Host *shost = vport_to_shost(vport); \ struct fc_internal *i = to_fc_internal(shost->transportt); \ char *cp; \ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \ return -EBUSY; \ val = simple_strtoul(buf, &cp, 0); \ if (*cp && (*cp != '\n')) \ return -EINVAL; \ i->f->set_vport_##field(vport, val); \ return count; \ } #define fc_vport_store_str_function(field, slen) \ static ssize_t \ store_fc_vport_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct fc_vport *vport = transport_class_to_vport(dev); \ struct Scsi_Host *shost = vport_to_shost(vport); \ struct fc_internal *i = to_fc_internal(shost->transportt); \ unsigned int cnt=count; \ \ /* count may include a LF at end of string */ \ if (buf[cnt-1] == '\n') \ cnt--; \ if (cnt > ((slen) - 1)) \ return -EINVAL; \ memcpy(vport->field, buf, cnt); \ i->f->set_vport_##field(vport); \ return count; \ } #define fc_vport_rd_attr(field, format_string, sz) \ fc_vport_show_function(field, format_string, sz, ) \ static FC_DEVICE_ATTR(vport, field, S_IRUGO, \ show_fc_vport_##field, NULL) #define fc_vport_rd_attr_cast(field, format_string, sz, cast) \ fc_vport_show_function(field, format_string, sz, (cast)) \ static FC_DEVICE_ATTR(vport, field, S_IRUGO, \ show_fc_vport_##field, NULL) #define fc_vport_rw_attr(field, format_string, sz) \ fc_vport_show_function(field, format_string, sz, ) \ fc_vport_store_function(field) \ static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \ show_fc_vport_##field, \ store_fc_vport_##field) #define fc_private_vport_show_function(field, format_string, sz, cast) \ static ssize_t \ show_fc_vport_##field (struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct fc_vport *vport = transport_class_to_vport(dev); \ return snprintf(buf, sz, format_string, cast vport->field); \ } #define fc_private_vport_store_u32_function(field) \ static ssize_t \ store_fc_vport_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ u32 val; \ struct fc_vport *vport = transport_class_to_vport(dev); \ char *cp; \ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \ return -EBUSY; \ val = simple_strtoul(buf, &cp, 0); \ if (*cp && (*cp != '\n')) \ return -EINVAL; \ vport->field = val; \ return count; \ } #define fc_private_vport_rd_attr(field, format_string, sz) \ fc_private_vport_show_function(field, format_string, sz, ) \ static FC_DEVICE_ATTR(vport, field, S_IRUGO, \ show_fc_vport_##field, NULL) #define fc_private_vport_rd_attr_cast(field, format_string, sz, cast) \ fc_private_vport_show_function(field, format_string, sz, (cast)) \ static FC_DEVICE_ATTR(vport, field, S_IRUGO, \ show_fc_vport_##field, NULL) #define fc_private_vport_rw_u32_attr(field, format_string, sz) \ fc_private_vport_show_function(field, format_string, sz, ) \ fc_private_vport_store_u32_function(field) \ static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \ show_fc_vport_##field, \ store_fc_vport_##field) #define fc_private_vport_rd_enum_attr(title, maxlen) \ static ssize_t \ show_fc_vport_##title (struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct fc_vport *vport = transport_class_to_vport(dev); \ const char *name; \ name = get_fc_##title##_name(vport->title); \ if (!name) \ return -EINVAL; \ return snprintf(buf, maxlen, "%s\n", name); \ } \ static FC_DEVICE_ATTR(vport, title, S_IRUGO, \ show_fc_vport_##title, NULL) #define SETUP_VPORT_ATTRIBUTE_RD(field) \ i->private_vport_attrs[count] = device_attr_vport_##field; \ i->private_vport_attrs[count].attr.mode = S_IRUGO; \ i->private_vport_attrs[count].store = NULL; \ i->vport_attrs[count] = &i->private_vport_attrs[count]; \ if (i->f->get_##field) \ count++ /* NOTE: Above MACRO differs: checks function not show bit */ #define SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(field) \ i->private_vport_attrs[count] = device_attr_vport_##field; \ i->private_vport_attrs[count].attr.mode = S_IRUGO; \ i->private_vport_attrs[count].store = NULL; \ i->vport_attrs[count] = &i->private_vport_attrs[count]; \ count++ #define SETUP_VPORT_ATTRIBUTE_WR(field) \ i->private_vport_attrs[count] = device_attr_vport_##field; \ i->vport_attrs[count] = &i->private_vport_attrs[count]; \ if (i->f->field) \ count++ /* NOTE: Above MACRO differs: checks function */ #define SETUP_VPORT_ATTRIBUTE_RW(field) \ i->private_vport_attrs[count] = device_attr_vport_##field; \ if (!i->f->set_vport_##field) { \ i->private_vport_attrs[count].attr.mode = S_IRUGO; \ i->private_vport_attrs[count].store = NULL; \ } \ i->vport_attrs[count] = &i->private_vport_attrs[count]; \ count++ /* NOTE: Above MACRO differs: does not check show bit */ #define SETUP_PRIVATE_VPORT_ATTRIBUTE_RW(field) \ { \ i->private_vport_attrs[count] = device_attr_vport_##field; \ i->vport_attrs[count] = &i->private_vport_attrs[count]; \ count++; \ } /* The FC Transport Virtual Port Attributes: */ /* Fixed Virtual Port Attributes */ /* Dynamic Virtual Port Attributes */ /* Private Virtual Port Attributes */ fc_private_vport_rd_enum_attr(vport_state, FC_VPORTSTATE_MAX_NAMELEN); fc_private_vport_rd_enum_attr(vport_last_state, FC_VPORTSTATE_MAX_NAMELEN); fc_private_vport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long); fc_private_vport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); static ssize_t show_fc_vport_roles (struct device *dev, struct device_attribute *attr, char *buf) { struct fc_vport *vport = transport_class_to_vport(dev); if (vport->roles == FC_PORT_ROLE_UNKNOWN) return snprintf(buf, 20, "unknown\n"); return get_fc_port_roles_names(vport->roles, buf); } static FC_DEVICE_ATTR(vport, roles, S_IRUGO, show_fc_vport_roles, NULL); fc_private_vport_rd_enum_attr(vport_type, FC_PORTTYPE_MAX_NAMELEN); fc_private_vport_show_function(symbolic_name, "%s\n", FC_VPORT_SYMBOLIC_NAMELEN + 1, ) fc_vport_store_str_function(symbolic_name, FC_VPORT_SYMBOLIC_NAMELEN) static FC_DEVICE_ATTR(vport, symbolic_name, S_IRUGO | S_IWUSR, show_fc_vport_symbolic_name, store_fc_vport_symbolic_name); static ssize_t store_fc_vport_delete(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fc_vport *vport = transport_class_to_vport(dev); struct Scsi_Host *shost = vport_to_shost(vport); unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) { spin_unlock_irqrestore(shost->host_lock, flags); return -EBUSY; } vport->flags |= FC_VPORT_DELETING; spin_unlock_irqrestore(shost->host_lock, flags); fc_queue_work(shost, &vport->vport_delete_work); return count; } static FC_DEVICE_ATTR(vport, vport_delete, S_IWUSR, NULL, store_fc_vport_delete); /* * Enable/Disable vport * Write "1" to disable, write "0" to enable */ static ssize_t store_fc_vport_disable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fc_vport *vport = transport_class_to_vport(dev); struct Scsi_Host *shost = vport_to_shost(vport); struct fc_internal *i = to_fc_internal(shost->transportt); int stat; if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) return -EBUSY; if (*buf == '0') { if (vport->vport_state != FC_VPORT_DISABLED) return -EALREADY; } else if (*buf == '1') { if (vport->vport_state == FC_VPORT_DISABLED) return -EALREADY; } else return -EINVAL; stat = i->f->vport_disable(vport, ((*buf == '0') ? false : true)); return stat ? stat : count; } static FC_DEVICE_ATTR(vport, vport_disable, S_IWUSR, NULL, store_fc_vport_disable); /* * Host Attribute Management */ #define fc_host_show_function(field, format_string, sz, cast) \ static ssize_t \ show_fc_host_##field (struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct Scsi_Host *shost = transport_class_to_shost(dev); \ struct fc_internal *i = to_fc_internal(shost->transportt); \ if (i->f->get_host_##field) \ i->f->get_host_##field(shost); \ return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \ } #define fc_host_store_function(field) \ static ssize_t \ store_fc_host_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ int val; \ struct Scsi_Host *shost = transport_class_to_shost(dev); \ struct fc_internal *i = to_fc_internal(shost->transportt); \ char *cp; \ \ val = simple_strtoul(buf, &cp, 0); \ if (*cp && (*cp != '\n')) \ return -EINVAL; \ i->f->set_host_##field(shost, val); \ return count; \ } #define fc_host_store_str_function(field, slen) \ static ssize_t \ store_fc_host_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct Scsi_Host *shost = transport_class_to_shost(dev); \ struct fc_internal *i = to_fc_internal(shost->transportt); \ unsigned int cnt=count; \ \ /* count may include a LF at end of string */ \ if (buf[cnt-1] == '\n') \ cnt--; \ if (cnt > ((slen) - 1)) \ return -EINVAL; \ memcpy(fc_host_##field(shost), buf, cnt); \ i->f->set_host_##field(shost); \ return count; \ } #define fc_host_rd_attr(field, format_string, sz) \ fc_host_show_function(field, format_string, sz, ) \ static FC_DEVICE_ATTR(host, field, S_IRUGO, \ show_fc_host_##field, NULL) #define fc_host_rd_attr_cast(field, format_string, sz, cast) \ fc_host_show_function(field, format_string, sz, (cast)) \ static FC_DEVICE_ATTR(host, field, S_IRUGO, \ show_fc_host_##field, NULL) #define fc_host_rw_attr(field, format_string, sz) \ fc_host_show_function(field, format_string, sz, ) \ fc_host_store_function(field) \ static FC_DEVICE_ATTR(host, field, S_IRUGO | S_IWUSR, \ show_fc_host_##field, \ store_fc_host_##field) #define fc_host_rd_enum_attr(title, maxlen) \ static ssize_t \ show_fc_host_##title (struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct Scsi_Host *shost = transport_class_to_shost(dev); \ struct fc_internal *i = to_fc_internal(shost->transportt); \ const char *name; \ if (i->f->get_host_##title) \ i->f->get_host_##title(shost); \ name = get_fc_##title##_name(fc_host_##title(shost)); \ if (!name) \ return -EINVAL; \ return snprintf(buf, maxlen, "%s\n", name); \ } \ static FC_DEVICE_ATTR(host, title, S_IRUGO, show_fc_host_##title, NULL) #define SETUP_HOST_ATTRIBUTE_RD(field) \ i->private_host_attrs[count] = device_attr_host_##field; \ i->private_host_attrs[count].attr.mode = S_IRUGO; \ i->private_host_attrs[count].store = NULL; \ i->host_attrs[count] = &i->private_host_attrs[count]; \ if (i->f->show_host_##field) \ count++ #define SETUP_HOST_ATTRIBUTE_RD_NS(field) \ i->private_host_attrs[count] = device_attr_host_##field; \ i->private_host_attrs[count].attr.mode = S_IRUGO; \ i->private_host_attrs[count].store = NULL; \ i->host_attrs[count] = &i->private_host_attrs[count]; \ count++ #define SETUP_HOST_ATTRIBUTE_RW(field) \ i->private_host_attrs[count] = device_attr_host_##field; \ if (!i->f->set_host_##field) { \ i->private_host_attrs[count].attr.mode = S_IRUGO; \ i->private_host_attrs[count].store = NULL; \ } \ i->host_attrs[count] = &i->private_host_attrs[count]; \ if (i->f->show_host_##field) \ count++ #define fc_private_host_show_function(field, format_string, sz, cast) \ static ssize_t \ show_fc_host_##field (struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct Scsi_Host *shost = transport_class_to_shost(dev); \ return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \ } #define fc_private_host_rd_attr(field, format_string, sz) \ fc_private_host_show_function(field, format_string, sz, ) \ static FC_DEVICE_ATTR(host, field, S_IRUGO, \ show_fc_host_##field, NULL) #define fc_private_host_rd_attr_cast(field, format_string, sz, cast) \ fc_private_host_show_function(field, format_string, sz, (cast)) \ static FC_DEVICE_ATTR(host, field, S_IRUGO, \ show_fc_host_##field, NULL) #define SETUP_PRIVATE_HOST_ATTRIBUTE_RD(field) \ i->private_host_attrs[count] = device_attr_host_##field; \ i->private_host_attrs[count].attr.mode = S_IRUGO; \ i->private_host_attrs[count].store = NULL; \ i->host_attrs[count] = &i->private_host_attrs[count]; \ count++ #define SETUP_PRIVATE_HOST_ATTRIBUTE_RW(field) \ { \ i->private_host_attrs[count] = device_attr_host_##field; \ i->host_attrs[count] = &i->private_host_attrs[count]; \ count++; \ } /* Fixed Host Attributes */ static ssize_t show_fc_host_supported_classes (struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = transport_class_to_shost(dev); if (fc_host_supported_classes(shost) == FC_COS_UNSPECIFIED) return snprintf(buf, 20, "unspecified\n"); return get_fc_cos_names(fc_host_supported_classes(shost), buf); } static FC_DEVICE_ATTR(host, supported_classes, S_IRUGO, show_fc_host_supported_classes, NULL); static ssize_t show_fc_host_supported_fc4s (struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = transport_class_to_shost(dev); return (ssize_t)show_fc_fc4s(buf, fc_host_supported_fc4s(shost)); } static FC_DEVICE_ATTR(host, supported_fc4s, S_IRUGO, show_fc_host_supported_fc4s, NULL); static ssize_t show_fc_host_supported_speeds (struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = transport_class_to_shost(dev); if (fc_host_supported_speeds(shost) == FC_PORTSPEED_UNKNOWN) return snprintf(buf, 20, "unknown\n"); return get_fc_port_speed_names(fc_host_supported_speeds(shost), buf); } static FC_DEVICE_ATTR(host, supported_speeds, S_IRUGO, show_fc_host_supported_speeds, NULL); fc_private_host_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long); fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20, unsigned long long); fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20); fc_private_host_rd_attr(max_npiv_vports, "%u\n", 20); fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1)); fc_private_host_rd_attr(manufacturer, "%s\n", FC_SERIAL_NUMBER_SIZE + 1); fc_private_host_rd_attr(model, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1); fc_private_host_rd_attr(model_description, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1); fc_private_host_rd_attr(hardware_version, "%s\n", FC_VERSION_STRING_SIZE + 1); fc_private_host_rd_attr(driver_version, "%s\n", FC_VERSION_STRING_SIZE + 1); fc_private_host_rd_attr(firmware_version, "%s\n", FC_VERSION_STRING_SIZE + 1); fc_private_host_rd_attr(optionrom_version, "%s\n", FC_VERSION_STRING_SIZE + 1); /* Dynamic Host Attributes */ static ssize_t show_fc_host_active_fc4s (struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = transport_class_to_shost(dev); struct fc_internal *i = to_fc_internal(shost->transportt); if (i->f->get_host_active_fc4s) i->f->get_host_active_fc4s(shost); return (ssize_t)show_fc_fc4s(buf, fc_host_active_fc4s(shost)); } static FC_DEVICE_ATTR(host, active_fc4s, S_IRUGO, show_fc_host_active_fc4s, NULL); static ssize_t show_fc_host_speed (struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = transport_class_to_shost(dev); struct fc_internal *i = to_fc_internal(shost->transportt); if (i->f->get_host_speed) i->f->get_host_speed(shost); if (fc_host_speed(shost) == FC_PORTSPEED_UNKNOWN) return snprintf(buf, 20, "unknown\n"); return get_fc_port_speed_names(fc_host_speed(shost), buf); } static FC_DEVICE_ATTR(host, speed, S_IRUGO, show_fc_host_speed, NULL); fc_host_rd_attr(port_id, "0x%06x\n", 20); fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN); fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN); fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long); fc_host_rd_attr(symbolic_name, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1); fc_private_host_show_function(system_hostname, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1, ) fc_host_store_str_function(system_hostname, FC_SYMBOLIC_NAME_SIZE) static FC_DEVICE_ATTR(host, system_hostname, S_IRUGO | S_IWUSR, show_fc_host_system_hostname, store_fc_host_system_hostname); /* Private Host Attributes */ static ssize_t show_fc_private_host_tgtid_bind_type(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = transport_class_to_shost(dev); const char *name; name = get_fc_tgtid_bind_type_name(fc_host_tgtid_bind_type(shost)); if (!name) return -EINVAL; return snprintf(buf, FC_BINDTYPE_MAX_NAMELEN, "%s\n", name); } #define get_list_head_entry(pos, head, member) \ pos = list_entry((head)->next, typeof(*pos), member) static ssize_t store_fc_private_host_tgtid_bind_type(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = transport_class_to_shost(dev); struct fc_rport *rport; enum fc_tgtid_binding_type val; unsigned long flags; if (get_fc_tgtid_bind_type_match(buf, &val)) return -EINVAL; /* if changing bind type, purge all unused consistent bindings */ if (val != fc_host_tgtid_bind_type(shost)) { spin_lock_irqsave(shost->host_lock, flags); while (!list_empty(&fc_host_rport_bindings(shost))) { get_list_head_entry(rport, &fc_host_rport_bindings(shost), peers); list_del(&rport->peers); rport->port_state = FC_PORTSTATE_DELETED; fc_queue_work(shost, &rport->rport_delete_work); } spin_unlock_irqrestore(shost->host_lock, flags); } fc_host_tgtid_bind_type(shost) = val; return count; } static FC_DEVICE_ATTR(host, tgtid_bind_type, S_IRUGO | S_IWUSR, show_fc_private_host_tgtid_bind_type, store_fc_private_host_tgtid_bind_type); static ssize_t store_fc_private_host_issue_lip(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = transport_class_to_shost(dev); struct fc_internal *i = to_fc_internal(shost->transportt); int ret; /* ignore any data value written to the attribute */ if (i->f->issue_fc_host_lip) { ret = i->f->issue_fc_host_lip(shost); return ret ? ret: count; } return -ENOENT; } static FC_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL, store_fc_private_host_issue_lip); static ssize_t store_fc_private_host_dev_loss_tmo(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = transport_class_to_shost(dev); struct fc_host_attrs *fc_host = shost_to_fc_host(shost); struct fc_rport *rport; unsigned long val, flags; int rc; rc = fc_str_to_dev_loss(buf, &val); if (rc) return rc; fc_host_dev_loss_tmo(shost) = val; spin_lock_irqsave(shost->host_lock, flags); list_for_each_entry(rport, &fc_host->rports, peers) fc_rport_set_dev_loss_tmo(rport, val); spin_unlock_irqrestore(shost->host_lock, flags); return count; } fc_private_host_show_function(dev_loss_tmo, "%d\n", 20, ); static FC_DEVICE_ATTR(host, dev_loss_tmo, S_IRUGO | S_IWUSR, show_fc_host_dev_loss_tmo, store_fc_private_host_dev_loss_tmo); fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20); /* * Host Statistics Management */ /* Show a given attribute in the statistics group */ static ssize_t fc_stat_show(const struct device *dev, char *buf, unsigned long offset) { struct Scsi_Host *shost = transport_class_to_shost(dev); struct fc_internal *i = to_fc_internal(shost->transportt); struct fc_host_statistics *stats; ssize_t ret = -ENOENT; if (offset > sizeof(struct fc_host_statistics) || offset % sizeof(u64) != 0) WARN_ON(1); if (i->f->get_fc_host_stats) { stats = (i->f->get_fc_host_stats)(shost); if (stats) ret = snprintf(buf, 20, "0x%llx\n", (unsigned long long)*(u64 *)(((u8 *) stats) + offset)); } return ret; } /* generate a read-only statistics attribute */ #define fc_host_statistic(name) \ static ssize_t show_fcstat_##name(struct device *cd, \ struct device_attribute *attr, \ char *buf) \ { \ return fc_stat_show(cd, buf, \ offsetof(struct fc_host_statistics, name)); \ } \ static FC_DEVICE_ATTR(host, name, S_IRUGO, show_fcstat_##name, NULL) fc_host_statistic(seconds_since_last_reset); fc_host_statistic(tx_frames); fc_host_statistic(tx_words); fc_host_statistic(rx_frames); fc_host_statistic(rx_words); fc_host_statistic(lip_count); fc_host_statistic(nos_count); fc_host_statistic(error_frames); fc_host_statistic(dumped_frames); fc_host_statistic(link_failure_count); fc_host_statistic(loss_of_sync_count); fc_host_statistic(loss_of_signal_count); fc_host_statistic(prim_seq_protocol_err_count); fc_host_statistic(invalid_tx_word_count); fc_host_statistic(invalid_crc_count); fc_host_statistic(fcp_input_requests); fc_host_statistic(fcp_output_requests); fc_host_statistic(fcp_control_requests); fc_host_statistic(fcp_input_megabytes); fc_host_statistic(fcp_output_megabytes); fc_host_statistic(fcp_packet_alloc_failures); fc_host_statistic(fcp_packet_aborts); fc_host_statistic(fcp_frame_alloc_failures); fc_host_statistic(fc_no_free_exch); fc_host_statistic(fc_no_free_exch_xid); fc_host_statistic(fc_xid_not_found); fc_host_statistic(fc_xid_busy); fc_host_statistic(fc_seq_not_found); fc_host_statistic(fc_non_bls_resp); fc_host_statistic(cn_sig_warn); fc_host_statistic(cn_sig_alarm); #define fc_host_fpin_statistic(name) \ static ssize_t fc_host_fpinstat_##name(struct device *cd, \ struct device_attribute *attr, \ char *buf) \ { \ struct Scsi_Host *shost = transport_class_to_shost(cd); \ struct fc_host_attrs *fc_host = shost_to_fc_host(shost); \ \ return snprintf(buf, 20, "0x%llx\n", fc_host->fpin_stats.name); \ } \ static FC_DEVICE_ATTR(host, fpin_##name, 0444, fc_host_fpinstat_##name, NULL) fc_host_fpin_statistic(dn); fc_host_fpin_statistic(dn_unknown); fc_host_fpin_statistic(dn_timeout); fc_host_fpin_statistic(dn_unable_to_route); fc_host_fpin_statistic(dn_device_specific); fc_host_fpin_statistic(cn); fc_host_fpin_statistic(cn_clear); fc_host_fpin_statistic(cn_lost_credit); fc_host_fpin_statistic(cn_credit_stall); fc_host_fpin_statistic(cn_oversubscription); fc_host_fpin_statistic(cn_device_specific); fc_host_fpin_statistic(li); fc_host_fpin_statistic(li_failure_unknown); fc_host_fpin_statistic(li_link_failure_count); fc_host_fpin_statistic(li_loss_of_sync_count); fc_host_fpin_statistic(li_loss_of_signals_count); fc_host_fpin_statistic(li_prim_seq_err_count); fc_host_fpin_statistic(li_invalid_tx_word_count); fc_host_fpin_statistic(li_invalid_crc_count); fc_host_fpin_statistic(li_device_specific); static ssize_t fc_reset_statistics(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = transport_class_to_shost(dev); struct fc_internal *i = to_fc_internal(shost->transportt); /* ignore any data value written to the attribute */ if (i->f->reset_fc_host_stats) { i->f->reset_fc_host_stats(shost); return count; } return -ENOENT; } static FC_DEVICE_ATTR(host, reset_statistics, S_IWUSR, NULL, fc_reset_statistics); static struct attribute *fc_statistics_attrs[] = { &device_attr_host_seconds_since_last_reset.attr, &device_attr_host_tx_frames.attr, &device_attr_host_tx_words.attr, &device_attr_host_rx_frames.attr, &device_attr_host_rx_words.attr, &device_attr_host_lip_count.attr, &device_attr_host_nos_count.attr, &device_attr_host_error_frames.attr, &device_attr_host_dumped_frames.attr, &device_attr_host_link_failure_count.attr, &device_attr_host_loss_of_sync_count.attr, &device_attr_host_loss_of_signal_count.attr, &device_attr_host_prim_seq_protocol_err_count.attr, &device_attr_host_invalid_tx_word_count.attr, &device_attr_host_invalid_crc_count.attr, &device_attr_host_fcp_input_requests.attr, &device_attr_host_fcp_output_requests.attr, &device_attr_host_fcp_control_requests.attr, &device_attr_host_fcp_input_megabytes.attr, &device_attr_host_fcp_output_megabytes.attr, &device_attr_host_fcp_packet_alloc_failures.attr, &device_attr_host_fcp_packet_aborts.attr, &device_attr_host_fcp_frame_alloc_failures.attr, &device_attr_host_fc_no_free_exch.attr, &device_attr_host_fc_no_free_exch_xid.attr, &device_attr_host_fc_xid_not_found.attr, &device_attr_host_fc_xid_busy.attr, &device_attr_host_fc_seq_not_found.attr, &device_attr_host_fc_non_bls_resp.attr, &device_attr_host_cn_sig_warn.attr, &device_attr_host_cn_sig_alarm.attr, &device_attr_host_reset_statistics.attr, &device_attr_host_fpin_dn.attr, &device_attr_host_fpin_dn_unknown.attr, &device_attr_host_fpin_dn_timeout.attr, &device_attr_host_fpin_dn_unable_to_route.attr, &device_attr_host_fpin_dn_device_specific.attr, &device_attr_host_fpin_li.attr, &device_attr_host_fpin_li_failure_unknown.attr, &device_attr_host_fpin_li_link_failure_count.attr, &device_attr_host_fpin_li_loss_of_sync_count.attr, &device_attr_host_fpin_li_loss_of_signals_count.attr, &device_attr_host_fpin_li_prim_seq_err_count.attr, &device_attr_host_fpin_li_invalid_tx_word_count.attr, &device_attr_host_fpin_li_invalid_crc_count.attr, &device_attr_host_fpin_li_device_specific.attr, &device_attr_host_fpin_cn.attr, &device_attr_host_fpin_cn_clear.attr, &device_attr_host_fpin_cn_lost_credit.attr, &device_attr_host_fpin_cn_credit_stall.attr, &device_attr_host_fpin_cn_oversubscription.attr, &device_attr_host_fpin_cn_device_specific.attr, NULL }; static struct attribute_group fc_statistics_group = { .name = "statistics", .attrs = fc_statistics_attrs, }; /* Host Vport Attributes */ static int fc_parse_wwn(const char *ns, u64 *nm) { unsigned int i, j; u8 wwn[8]; memset(wwn, 0, sizeof(wwn)); /* Validate and store the new name */ for (i=0, j=0; i < 16; i++) { int value; value = hex_to_bin(*ns++); if (value >= 0) j = (j << 4) | value; else return -EINVAL; if (i % 2) { wwn[i/2] = j & 0xff; j = 0; } } *nm = wwn_to_u64(wwn); return 0; } /* * "Short-cut" sysfs variable to create a new vport on a FC Host. * Input is a string of the form "<WWPN>:<WWNN>". Other attributes * will default to a NPIV-based FCP_Initiator; The WWNs are specified * as hex characters, and may *not* contain any prefixes (e.g. 0x, x, etc) */ static ssize_t store_fc_host_vport_create(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = transport_class_to_shost(dev); struct fc_vport_identifiers vid; struct fc_vport *vport; unsigned int cnt=count; int stat; memset(&vid, 0, sizeof(vid)); /* count may include a LF at end of string */ if (buf[cnt-1] == '\n') cnt--; /* validate we have enough characters for WWPN */ if ((cnt != (16+1+16)) || (buf[16] != ':')) return -EINVAL; stat = fc_parse_wwn(&buf[0], &vid.port_name); if (stat) return stat; stat = fc_parse_wwn(&buf[17], &vid.node_name); if (stat) return stat; vid.roles = FC_PORT_ROLE_FCP_INITIATOR; vid.vport_type = FC_PORTTYPE_NPIV; /* vid.symbolic_name is already zero/NULL's */ vid.disable = false; /* always enabled */ /* we only allow support on Channel 0 !!! */ stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport); return stat ? stat : count; } static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL, store_fc_host_vport_create); /* * "Short-cut" sysfs variable to delete a vport on a FC Host. * Vport is identified by a string containing "<WWPN>:<WWNN>". * The WWNs are specified as hex characters, and may *not* contain * any prefixes (e.g. 0x, x, etc) */ static ssize_t store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = transport_class_to_shost(dev); struct fc_host_attrs *fc_host = shost_to_fc_host(shost); struct fc_vport *vport; u64 wwpn, wwnn; unsigned long flags; unsigned int cnt=count; int stat, match; /* count may include a LF at end of string */ if (buf[cnt-1] == '\n') cnt--; /* validate we have enough characters for WWPN */ if ((cnt != (16+1+16)) || (buf[16] != ':')) return -EINVAL; stat = fc_parse_wwn(&buf[0], &wwpn); if (stat) return stat; stat = fc_parse_wwn(&buf[17], &wwnn); if (stat) return stat; spin_lock_irqsave(shost->host_lock, flags); match = 0; /* we only allow support on Channel 0 !!! */ list_for_each_entry(vport, &fc_host->vports, peers) { if ((vport->channel == 0) && (vport->port_name == wwpn) && (vport->node_name == wwnn)) { if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) break; vport->flags |= FC_VPORT_DELETING; match = 1; break; } } spin_unlock_irqrestore(shost->host_lock, flags); if (!match) return -ENODEV; stat = fc_vport_terminate(vport); return stat ? stat : count; } static FC_DEVICE_ATTR(host, vport_delete, S_IWUSR, NULL, store_fc_host_vport_delete); static int fc_host_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct fc_internal *i; if (!scsi_is_host_device(dev)) return 0; shost = dev_to_shost(dev); if (!shost->transportt || shost->transportt->host_attrs.ac.class != &fc_host_class.class) return 0; i = to_fc_internal(shost->transportt); return &i->t.host_attrs.ac == cont; } static int fc_target_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct fc_internal *i; if (!scsi_is_target_device(dev)) return 0; shost = dev_to_shost(dev->parent); if (!shost->transportt || shost->transportt->host_attrs.ac.class != &fc_host_class.class) return 0; i = to_fc_internal(shost->transportt); return &i->t.target_attrs.ac == cont; } static void fc_rport_dev_release(struct device *dev) { struct fc_rport *rport = dev_to_rport(dev); put_device(dev->parent); kfree(rport); } int scsi_is_fc_rport(const struct device *dev) { return dev->release == fc_rport_dev_release; } EXPORT_SYMBOL(scsi_is_fc_rport); static int fc_rport_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct fc_internal *i; if (!scsi_is_fc_rport(dev)) return 0; shost = dev_to_shost(dev->parent); if (!shost->transportt || shost->transportt->host_attrs.ac.class != &fc_host_class.class) return 0; i = to_fc_internal(shost->transportt); return &i->rport_attr_cont.ac == cont; } static void fc_vport_dev_release(struct device *dev) { struct fc_vport *vport = dev_to_vport(dev); put_device(dev->parent); /* release kobj parent */ kfree(vport); } static int scsi_is_fc_vport(const struct device *dev) { return dev->release == fc_vport_dev_release; } static int fc_vport_match(struct attribute_container *cont, struct device *dev) { struct fc_vport *vport; struct Scsi_Host *shost; struct fc_internal *i; if (!scsi_is_fc_vport(dev)) return 0; vport = dev_to_vport(dev); shost = vport_to_shost(vport); if (!shost->transportt || shost->transportt->host_attrs.ac.class != &fc_host_class.class) return 0; i = to_fc_internal(shost->transportt); return &i->vport_attr_cont.ac == cont; } /** * fc_eh_timed_out - FC Transport I/O timeout intercept handler * @scmd: The SCSI command which timed out * * This routine protects against error handlers getting invoked while a * rport is in a blocked state, typically due to a temporarily loss of * connectivity. If the error handlers are allowed to proceed, requests * to abort i/o, reset the target, etc will likely fail as there is no way * to communicate with the device to perform the requested function. These * failures may result in the midlayer taking the device offline, requiring * manual intervention to restore operation. * * This routine, called whenever an i/o times out, validates the state of * the underlying rport. If the rport is blocked, it returns * EH_RESET_TIMER, which will continue to reschedule the timeout. * Eventually, either the device will return, or devloss_tmo will fire, * and when the timeout then fires, it will be handled normally. * If the rport is not blocked, normal error handling continues. * * Notes: * This routine assumes no locks are held on entry. */ enum scsi_timeout_action fc_eh_timed_out(struct scsi_cmnd *scmd) { struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device)); if (rport->port_state == FC_PORTSTATE_BLOCKED) return SCSI_EH_RESET_TIMER; return SCSI_EH_NOT_HANDLED; } EXPORT_SYMBOL(fc_eh_timed_out); /* * Called by fc_user_scan to locate an rport on the shost that * matches the channel and target id, and invoke scsi_scan_target() * on the rport. */ static void fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, u64 lun) { struct fc_rport *rport; unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); list_for_each_entry(rport, &fc_host_rports(shost), peers) { if (rport->scsi_target_id == -1) continue; if ((rport->port_state != FC_PORTSTATE_ONLINE) && (rport->port_state != FC_PORTSTATE_MARGINAL)) continue; if ((channel == rport->channel) && (id == rport->scsi_target_id)) { spin_unlock_irqrestore(shost->host_lock, flags); scsi_scan_target(&rport->dev, channel, id, lun, SCSI_SCAN_MANUAL); return; } } spin_unlock_irqrestore(shost->host_lock, flags); } /* * Called via sysfs scan routines. Necessary, as the FC transport * wants to place all target objects below the rport object. So this * routine must invoke the scsi_scan_target() routine with the rport * object as the parent. */ static int fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, u64 lun) { uint chlo, chhi; uint tgtlo, tgthi; if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) || ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) || ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun))) return -EINVAL; if (channel == SCAN_WILD_CARD) { chlo = 0; chhi = shost->max_channel + 1; } else { chlo = channel; chhi = channel + 1; } if (id == SCAN_WILD_CARD) { tgtlo = 0; tgthi = shost->max_id; } else { tgtlo = id; tgthi = id + 1; } for ( ; chlo < chhi; chlo++) for ( ; tgtlo < tgthi; tgtlo++) fc_user_scan_tgt(shost, chlo, tgtlo, lun); return 0; } struct scsi_transport_template * fc_attach_transport(struct fc_function_template *ft) { int count; struct fc_internal *i = kzalloc(sizeof(struct fc_internal), GFP_KERNEL); if (unlikely(!i)) return NULL; i->t.target_attrs.ac.attrs = &i->starget_attrs[0]; i->t.target_attrs.ac.class = &fc_transport_class.class; i->t.target_attrs.ac.match = fc_target_match; i->t.target_size = sizeof(struct fc_starget_attrs); transport_container_register(&i->t.target_attrs); i->t.host_attrs.ac.attrs = &i->host_attrs[0]; i->t.host_attrs.ac.class = &fc_host_class.class; i->t.host_attrs.ac.match = fc_host_match; i->t.host_size = sizeof(struct fc_host_attrs); if (ft->get_fc_host_stats) i->t.host_attrs.statistics = &fc_statistics_group; transport_container_register(&i->t.host_attrs); i->rport_attr_cont.ac.attrs = &i->rport_attrs[0]; i->rport_attr_cont.ac.class = &fc_rport_class.class; i->rport_attr_cont.ac.match = fc_rport_match; i->rport_attr_cont.statistics = &fc_rport_statistics_group; transport_container_register(&i->rport_attr_cont); i->vport_attr_cont.ac.attrs = &i->vport_attrs[0]; i->vport_attr_cont.ac.class = &fc_vport_class.class; i->vport_attr_cont.ac.match = fc_vport_match; transport_container_register(&i->vport_attr_cont); i->f = ft; /* Transport uses the shost workq for scsi scanning */ i->t.create_work_queue = 1; i->t.user_scan = fc_user_scan; /* * Setup SCSI Target Attributes. */ count = 0; SETUP_STARGET_ATTRIBUTE_RD(node_name); SETUP_STARGET_ATTRIBUTE_RD(port_name); SETUP_STARGET_ATTRIBUTE_RD(port_id); BUG_ON(count > FC_STARGET_NUM_ATTRS); i->starget_attrs[count] = NULL; /* * Setup SCSI Host Attributes. */ count=0; SETUP_HOST_ATTRIBUTE_RD(node_name); SETUP_HOST_ATTRIBUTE_RD(port_name); SETUP_HOST_ATTRIBUTE_RD(permanent_port_name); SETUP_HOST_ATTRIBUTE_RD(supported_classes); SETUP_HOST_ATTRIBUTE_RD(supported_fc4s); SETUP_HOST_ATTRIBUTE_RD(supported_speeds); SETUP_HOST_ATTRIBUTE_RD(maxframe_size); if (ft->vport_create) { SETUP_HOST_ATTRIBUTE_RD_NS(max_npiv_vports); SETUP_HOST_ATTRIBUTE_RD_NS(npiv_vports_inuse); } SETUP_HOST_ATTRIBUTE_RD(serial_number); SETUP_HOST_ATTRIBUTE_RD(manufacturer); SETUP_HOST_ATTRIBUTE_RD(model); SETUP_HOST_ATTRIBUTE_RD(model_description); SETUP_HOST_ATTRIBUTE_RD(hardware_version); SETUP_HOST_ATTRIBUTE_RD(driver_version); SETUP_HOST_ATTRIBUTE_RD(firmware_version); SETUP_HOST_ATTRIBUTE_RD(optionrom_version); SETUP_HOST_ATTRIBUTE_RD(port_id); SETUP_HOST_ATTRIBUTE_RD(port_type); SETUP_HOST_ATTRIBUTE_RD(port_state); SETUP_HOST_ATTRIBUTE_RD(active_fc4s); SETUP_HOST_ATTRIBUTE_RD(speed); SETUP_HOST_ATTRIBUTE_RD(fabric_name); SETUP_HOST_ATTRIBUTE_RD(symbolic_name); SETUP_HOST_ATTRIBUTE_RW(system_hostname); /* Transport-managed attributes */ SETUP_PRIVATE_HOST_ATTRIBUTE_RW(dev_loss_tmo); SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type); if (ft->issue_fc_host_lip) SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip); if (ft->vport_create) SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_create); if (ft->vport_delete) SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_delete); BUG_ON(count > FC_HOST_NUM_ATTRS); i->host_attrs[count] = NULL; /* * Setup Remote Port Attributes. */ count=0; SETUP_RPORT_ATTRIBUTE_RD(maxframe_size); SETUP_RPORT_ATTRIBUTE_RD(supported_classes); SETUP_RPORT_ATTRIBUTE_RW(dev_loss_tmo); SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(node_name); SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_name); SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_id); SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles); SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(port_state); SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id); SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo); BUG_ON(count > FC_RPORT_NUM_ATTRS); i->rport_attrs[count] = NULL; /* * Setup Virtual Port Attributes. */ count=0; SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_state); SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_last_state); SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(node_name); SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(port_name); SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(roles); SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_type); SETUP_VPORT_ATTRIBUTE_RW(symbolic_name); SETUP_VPORT_ATTRIBUTE_WR(vport_delete); SETUP_VPORT_ATTRIBUTE_WR(vport_disable); BUG_ON(count > FC_VPORT_NUM_ATTRS); i->vport_attrs[count] = NULL; return &i->t; } EXPORT_SYMBOL(fc_attach_transport); void fc_release_transport(struct scsi_transport_template *t) { struct fc_internal *i = to_fc_internal(t); transport_container_unregister(&i->t.target_attrs); transport_container_unregister(&i->t.host_attrs); transport_container_unregister(&i->rport_attr_cont); transport_container_unregister(&i->vport_attr_cont); kfree(i); } EXPORT_SYMBOL(fc_release_transport); /** * fc_queue_work - Queue work to the fc_host workqueue. * @shost: Pointer to Scsi_Host bound to fc_host. * @work: Work to queue for execution. * * Return value: * 1 - work queued for execution * 0 - work is already queued * -EINVAL - work queue doesn't exist */ static int fc_queue_work(struct Scsi_Host *shost, struct work_struct *work) { if (unlikely(!fc_host_work_q(shost))) { printk(KERN_ERR "ERROR: FC host '%s' attempted to queue work, " "when no workqueue created.\n", shost->hostt->name); dump_stack(); return -EINVAL; } return queue_work(fc_host_work_q(shost), work); } /** * fc_flush_work - Flush a fc_host's workqueue. * @shost: Pointer to Scsi_Host bound to fc_host. */ static void fc_flush_work(struct Scsi_Host *shost) { if (!fc_host_work_q(shost)) { printk(KERN_ERR "ERROR: FC host '%s' attempted to flush work, " "when no workqueue created.\n", shost->hostt->name); dump_stack(); return; } flush_workqueue(fc_host_work_q(shost)); } /** * fc_queue_devloss_work - Schedule work for the fc_host devloss workqueue. * @shost: Pointer to Scsi_Host bound to fc_host. * @work: Work to queue for execution. * @delay: jiffies to delay the work queuing * * Return value: * 1 on success / 0 already queued / < 0 for error */ static int fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work, unsigned long delay) { if (unlikely(!fc_host_devloss_work_q(shost))) { printk(KERN_ERR "ERROR: FC host '%s' attempted to queue work, " "when no workqueue created.\n", shost->hostt->name); dump_stack(); return -EINVAL; } return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay); } /** * fc_flush_devloss - Flush a fc_host's devloss workqueue. * @shost: Pointer to Scsi_Host bound to fc_host. */ static void fc_flush_devloss(struct Scsi_Host *shost) { if (!fc_host_devloss_work_q(shost)) { printk(KERN_ERR "ERROR: FC host '%s' attempted to flush work, " "when no workqueue created.\n", shost->hostt->name); dump_stack(); return; } flush_workqueue(fc_host_devloss_work_q(shost)); } /** * fc_remove_host - called to terminate any fc_transport-related elements for a scsi host. * @shost: Which &Scsi_Host * * This routine is expected to be called immediately preceding the * a driver's call to scsi_remove_host(). * * WARNING: A driver utilizing the fc_transport, which fails to call * this routine prior to scsi_remove_host(), will leave dangling * objects in /sys/class/fc_remote_ports. Access to any of these * objects can result in a system crash !!! * * Notes: * This routine assumes no locks are held on entry. */ void fc_remove_host(struct Scsi_Host *shost) { struct fc_vport *vport = NULL, *next_vport = NULL; struct fc_rport *rport = NULL, *next_rport = NULL; struct workqueue_struct *work_q; struct fc_host_attrs *fc_host = shost_to_fc_host(shost); unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); /* Remove any vports */ list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) { vport->flags |= FC_VPORT_DELETING; fc_queue_work(shost, &vport->vport_delete_work); } /* Remove any remote ports */ list_for_each_entry_safe(rport, next_rport, &fc_host->rports, peers) { list_del(&rport->peers); rport->port_state = FC_PORTSTATE_DELETED; fc_queue_work(shost, &rport->rport_delete_work); } list_for_each_entry_safe(rport, next_rport, &fc_host->rport_bindings, peers) { list_del(&rport->peers); rport->port_state = FC_PORTSTATE_DELETED; fc_queue_work(shost, &rport->rport_delete_work); } spin_unlock_irqrestore(shost->host_lock, flags); /* flush all scan work items */ scsi_flush_work(shost); /* flush all stgt delete, and rport delete work items, then kill it */ if (fc_host->work_q) { work_q = fc_host->work_q; fc_host->work_q = NULL; destroy_workqueue(work_q); } /* flush all devloss work items, then kill it */ if (fc_host->devloss_work_q) { work_q = fc_host->devloss_work_q; fc_host->devloss_work_q = NULL; destroy_workqueue(work_q); } } EXPORT_SYMBOL(fc_remove_host); static void fc_terminate_rport_io(struct fc_rport *rport) { struct Scsi_Host *shost = rport_to_shost(rport); struct fc_internal *i = to_fc_internal(shost->transportt); /* Involve the LLDD if possible to terminate all io on the rport. */ if (i->f->terminate_rport_io) i->f->terminate_rport_io(rport); /* * Must unblock to flush queued IO. scsi-ml will fail incoming reqs. */ scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE); } /** * fc_starget_delete - called to delete the scsi descendants of an rport * @work: remote port to be operated on. * * Deletes target and all sdevs. */ static void fc_starget_delete(struct work_struct *work) { struct fc_rport *rport = container_of(work, struct fc_rport, stgt_delete_work); fc_terminate_rport_io(rport); scsi_remove_target(&rport->dev); } /** * fc_rport_final_delete - finish rport termination and delete it. * @work: remote port to be deleted. */ static void fc_rport_final_delete(struct work_struct *work) { struct fc_rport *rport = container_of(work, struct fc_rport, rport_delete_work); struct device *dev = &rport->dev; struct Scsi_Host *shost = rport_to_shost(rport); struct fc_internal *i = to_fc_internal(shost->transportt); unsigned long flags; int do_callback = 0; fc_terminate_rport_io(rport); /* * if a scan is pending, flush the SCSI Host work_q so that * that we can reclaim the rport scan work element. */ if (rport->flags & FC_RPORT_SCAN_PENDING) scsi_flush_work(shost); /* * Cancel any outstanding timers. These should really exist * only when rmmod'ing the LLDD and we're asking for * immediate termination of the rports */ spin_lock_irqsave(shost->host_lock, flags); if (rport->flags & FC_RPORT_DEVLOSS_PENDING) { spin_unlock_irqrestore(shost->host_lock, flags); if (!cancel_delayed_work(&rport->fail_io_work)) fc_flush_devloss(shost); if (!cancel_delayed_work(&rport->dev_loss_work)) fc_flush_devloss(shost); cancel_work_sync(&rport->scan_work); spin_lock_irqsave(shost->host_lock, flags); rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; } spin_unlock_irqrestore(shost->host_lock, flags); /* Delete SCSI target and sdevs */ if (rport->scsi_target_id != -1) fc_starget_delete(&rport->stgt_delete_work); /* * Notify the driver that the rport is now dead. The LLDD will * also guarantee that any communication to the rport is terminated * * Avoid this call if we already called it when we preserved the * rport for the binding. */ spin_lock_irqsave(shost->host_lock, flags); if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) && (i->f->dev_loss_tmo_callbk)) { rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE; do_callback = 1; } spin_unlock_irqrestore(shost->host_lock, flags); if (do_callback) i->f->dev_loss_tmo_callbk(rport); fc_bsg_remove(rport->rqst_q); transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); scsi_host_put(shost); /* for fc_host->rport list */ put_device(dev); /* for self-reference */ } /** * fc_remote_port_create - allocates and creates a remote FC port. * @shost: scsi host the remote port is connected to. * @channel: Channel on shost port connected to. * @ids: The world wide names, fc address, and FC4 port * roles for the remote port. * * Allocates and creates the remoter port structure, including the * class and sysfs creation. * * Notes: * This routine assumes no locks are held on entry. */ static struct fc_rport * fc_remote_port_create(struct Scsi_Host *shost, int channel, struct fc_rport_identifiers *ids) { struct fc_host_attrs *fc_host = shost_to_fc_host(shost); struct fc_internal *fci = to_fc_internal(shost->transportt); struct fc_rport *rport; struct device *dev; unsigned long flags; int error; size_t size; size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size); rport = kzalloc(size, GFP_KERNEL); if (unlikely(!rport)) { printk(KERN_ERR "%s: allocation failure\n", __func__); return NULL; } rport->maxframe_size = -1; rport->supported_classes = FC_COS_UNSPECIFIED; rport->dev_loss_tmo = fc_host->dev_loss_tmo; memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name)); memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name)); rport->port_id = ids->port_id; rport->roles = ids->roles; rport->port_state = FC_PORTSTATE_ONLINE; if (fci->f->dd_fcrport_size) rport->dd_data = &rport[1]; rport->channel = channel; rport->fast_io_fail_tmo = -1; INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport); INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io); INIT_WORK(&rport->scan_work, fc_scsi_scan_rport); INIT_WORK(&rport->stgt_delete_work, fc_starget_delete); INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete); spin_lock_irqsave(shost->host_lock, flags); rport->number = fc_host->next_rport_number++; if ((rport->roles & FC_PORT_ROLE_FCP_TARGET) || (rport->roles & FC_PORT_ROLE_FCP_DUMMY_INITIATOR)) rport->scsi_target_id = fc_host->next_target_id++; else rport->scsi_target_id = -1; list_add_tail(&rport->peers, &fc_host->rports); scsi_host_get(shost); /* for fc_host->rport list */ spin_unlock_irqrestore(shost->host_lock, flags); dev = &rport->dev; device_initialize(dev); /* takes self reference */ dev->parent = get_device(&shost->shost_gendev); /* parent reference */ dev->release = fc_rport_dev_release; dev_set_name(dev, "rport-%d:%d-%d", shost->host_no, channel, rport->number); transport_setup_device(dev); error = device_add(dev); if (error) { printk(KERN_ERR "FC Remote Port device_add failed\n"); goto delete_rport; } transport_add_device(dev); transport_configure_device(dev); fc_bsg_rportadd(shost, rport); /* ignore any bsg add error - we just can't do sgio */ if (rport->roles & FC_PORT_ROLE_FCP_TARGET) { /* initiate a scan of the target */ rport->flags |= FC_RPORT_SCAN_PENDING; scsi_queue_work(shost, &rport->scan_work); } return rport; delete_rport: transport_destroy_device(dev); spin_lock_irqsave(shost->host_lock, flags); list_del(&rport->peers); scsi_host_put(shost); /* for fc_host->rport list */ spin_unlock_irqrestore(shost->host_lock, flags); put_device(dev->parent); kfree(rport); return NULL; } /** * fc_remote_port_add - notify fc transport of the existence of a remote FC port. * @shost: scsi host the remote port is connected to. * @channel: Channel on shost port connected to. * @ids: The world wide names, fc address, and FC4 port * roles for the remote port. * * The LLDD calls this routine to notify the transport of the existence * of a remote port. The LLDD provides the unique identifiers (wwpn,wwn) * of the port, it's FC address (port_id), and the FC4 roles that are * active for the port. * * For ports that are FCP targets (aka scsi targets), the FC transport * maintains consistent target id bindings on behalf of the LLDD. * A consistent target id binding is an assignment of a target id to * a remote port identifier, which persists while the scsi host is * attached. The remote port can disappear, then later reappear, and * it's target id assignment remains the same. This allows for shifts * in FC addressing (if binding by wwpn or wwnn) with no apparent * changes to the scsi subsystem which is based on scsi host number and * target id values. Bindings are only valid during the attachment of * the scsi host. If the host detaches, then later re-attaches, target * id bindings may change. * * This routine is responsible for returning a remote port structure. * The routine will search the list of remote ports it maintains * internally on behalf of consistent target id mappings. If found, the * remote port structure will be reused. Otherwise, a new remote port * structure will be allocated. * * Whenever a remote port is allocated, a new fc_remote_port class * device is created. * * Should not be called from interrupt context. * * Notes: * This routine assumes no locks are held on entry. */ struct fc_rport * fc_remote_port_add(struct Scsi_Host *shost, int channel, struct fc_rport_identifiers *ids) { struct fc_internal *fci = to_fc_internal(shost->transportt); struct fc_host_attrs *fc_host = shost_to_fc_host(shost); struct fc_rport *rport; unsigned long flags; int match = 0; /* ensure any stgt delete functions are done */ fc_flush_work(shost); /* * Search the list of "active" rports, for an rport that has been * deleted, but we've held off the real delete while the target * is in a "blocked" state. */ spin_lock_irqsave(shost->host_lock, flags); list_for_each_entry(rport, &fc_host->rports, peers) { if ((rport->port_state == FC_PORTSTATE_BLOCKED || rport->port_state == FC_PORTSTATE_NOTPRESENT) && (rport->channel == channel)) { switch (fc_host->tgtid_bind_type) { case FC_TGTID_BIND_BY_WWPN: case FC_TGTID_BIND_NONE: if (rport->port_name == ids->port_name) match = 1; break; case FC_TGTID_BIND_BY_WWNN: if (rport->node_name == ids->node_name) match = 1; break; case FC_TGTID_BIND_BY_ID: if (rport->port_id == ids->port_id) match = 1; break; } if (match) { memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name)); memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name)); rport->port_id = ids->port_id; rport->port_state = FC_PORTSTATE_ONLINE; rport->roles = ids->roles; spin_unlock_irqrestore(shost->host_lock, flags); if (fci->f->dd_fcrport_size) memset(rport->dd_data, 0, fci->f->dd_fcrport_size); /* * If we were not a target, cancel the * io terminate and rport timers, and * we're done. * * If we were a target, but our new role * doesn't indicate a target, leave the * timers running expecting the role to * change as the target fully logs in. If * it doesn't, the target will be torn down. * * If we were a target, and our role shows * we're still a target, cancel the timers * and kick off a scan. */ /* was a target, not in roles */ if ((rport->scsi_target_id != -1) && (!(ids->roles & FC_PORT_ROLE_FCP_TARGET))) return rport; /* * Stop the fail io and dev_loss timers. * If they flush, the port_state will * be checked and will NOOP the function. */ if (!cancel_delayed_work(&rport->fail_io_work)) fc_flush_devloss(shost); if (!cancel_delayed_work(&rport->dev_loss_work)) fc_flush_devloss(shost); spin_lock_irqsave(shost->host_lock, flags); rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT | FC_RPORT_DEVLOSS_PENDING | FC_RPORT_DEVLOSS_CALLBK_DONE); spin_unlock_irqrestore(shost->host_lock, flags); /* if target, initiate a scan */ if (rport->scsi_target_id != -1) { scsi_target_unblock(&rport->dev, SDEV_RUNNING); spin_lock_irqsave(shost->host_lock, flags); rport->flags |= FC_RPORT_SCAN_PENDING; scsi_queue_work(shost, &rport->scan_work); spin_unlock_irqrestore(shost->host_lock, flags); } fc_bsg_goose_queue(rport); return rport; } } } /* * Search the bindings array * Note: if never a FCP target, you won't be on this list */ if (fc_host->tgtid_bind_type != FC_TGTID_BIND_NONE) { /* search for a matching consistent binding */ list_for_each_entry(rport, &fc_host->rport_bindings, peers) { if (rport->channel != channel) continue; switch (fc_host->tgtid_bind_type) { case FC_TGTID_BIND_BY_WWPN: if (rport->port_name == ids->port_name) match = 1; break; case FC_TGTID_BIND_BY_WWNN: if (rport->node_name == ids->node_name) match = 1; break; case FC_TGTID_BIND_BY_ID: if (rport->port_id == ids->port_id) match = 1; break; case FC_TGTID_BIND_NONE: /* to keep compiler happy */ break; } if (match) { list_move_tail(&rport->peers, &fc_host->rports); break; } } if (match) { memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name)); memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name)); rport->port_id = ids->port_id; rport->port_state = FC_PORTSTATE_ONLINE; rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; if (fci->f->dd_fcrport_size) memset(rport->dd_data, 0, fci->f->dd_fcrport_size); spin_unlock_irqrestore(shost->host_lock, flags); fc_remote_port_rolechg(rport, ids->roles); return rport; } } spin_unlock_irqrestore(shost->host_lock, flags); /* No consistent binding found - create new remote port entry */ rport = fc_remote_port_create(shost, channel, ids); return rport; } EXPORT_SYMBOL(fc_remote_port_add); /** * fc_remote_port_delete - notifies the fc transport that a remote port is no longer in existence. * @rport: The remote port that no longer exists * * The LLDD calls this routine to notify the transport that a remote * port is no longer part of the topology. Note: Although a port * may no longer be part of the topology, it may persist in the remote * ports displayed by the fc_host. We do this under 2 conditions: * * 1) If the port was a scsi target, we delay its deletion by "blocking" it. * This allows the port to temporarily disappear, then reappear without * disrupting the SCSI device tree attached to it. During the "blocked" * period the port will still exist. * * 2) If the port was a scsi target and disappears for longer than we * expect, we'll delete the port and the tear down the SCSI device tree * attached to it. However, we want to semi-persist the target id assigned * to that port if it eventually does exist. The port structure will * remain (although with minimal information) so that the target id * bindings also remain. * * If the remote port is not an FCP Target, it will be fully torn down * and deallocated, including the fc_remote_port class device. * * If the remote port is an FCP Target, the port will be placed in a * temporary blocked state. From the LLDD's perspective, the rport no * longer exists. From the SCSI midlayer's perspective, the SCSI target * exists, but all sdevs on it are blocked from further I/O. The following * is then expected. * * If the remote port does not return (signaled by a LLDD call to * fc_remote_port_add()) within the dev_loss_tmo timeout, then the * scsi target is removed - killing all outstanding i/o and removing the * scsi devices attached to it. The port structure will be marked Not * Present and be partially cleared, leaving only enough information to * recognize the remote port relative to the scsi target id binding if * it later appears. The port will remain as long as there is a valid * binding (e.g. until the user changes the binding type or unloads the * scsi host with the binding). * * If the remote port returns within the dev_loss_tmo value (and matches * according to the target id binding type), the port structure will be * reused. If it is no longer a SCSI target, the target will be torn * down. If it continues to be a SCSI target, then the target will be * unblocked (allowing i/o to be resumed), and a scan will be activated * to ensure that all luns are detected. * * Called from normal process context only - cannot be called from interrupt. * * Notes: * This routine assumes no locks are held on entry. */ void fc_remote_port_delete(struct fc_rport *rport) { struct Scsi_Host *shost = rport_to_shost(rport); unsigned long timeout = rport->dev_loss_tmo; unsigned long flags; /* * No need to flush the fc_host work_q's, as all adds are synchronous. * * We do need to reclaim the rport scan work element, so eventually * (in fc_rport_final_delete()) we'll flush the scsi host work_q if * there's still a scan pending. */ spin_lock_irqsave(shost->host_lock, flags); if ((rport->port_state != FC_PORTSTATE_ONLINE) && (rport->port_state != FC_PORTSTATE_MARGINAL)) { spin_unlock_irqrestore(shost->host_lock, flags); return; } /* * In the past, we if this was not an FCP-Target, we would * unconditionally just jump to deleting the rport. * However, rports can be used as node containers by the LLDD, * and its not appropriate to just terminate the rport at the * first sign of a loss in connectivity. The LLDD may want to * send ELS traffic to re-validate the login. If the rport is * immediately deleted, it makes it inappropriate for a node * container. * So... we now unconditionally wait dev_loss_tmo before * destroying an rport. */ rport->port_state = FC_PORTSTATE_BLOCKED; rport->flags |= FC_RPORT_DEVLOSS_PENDING; spin_unlock_irqrestore(shost->host_lock, flags); scsi_block_targets(shost, &rport->dev); /* see if we need to kill io faster than waiting for device loss */ if ((rport->fast_io_fail_tmo != -1) && (rport->fast_io_fail_tmo < timeout)) fc_queue_devloss_work(shost, &rport->fail_io_work, rport->fast_io_fail_tmo * HZ); /* cap the length the devices can be blocked until they are deleted */ fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ); } EXPORT_SYMBOL(fc_remote_port_delete); /** * fc_remote_port_rolechg - notifies the fc transport that the roles on a remote may have changed. * @rport: The remote port that changed. * @roles: New roles for this port. * * Description: The LLDD calls this routine to notify the transport that the * roles on a remote port may have changed. The largest effect of this is * if a port now becomes a FCP Target, it must be allocated a * scsi target id. If the port is no longer a FCP target, any * scsi target id value assigned to it will persist in case the * role changes back to include FCP Target. No changes in the scsi * midlayer will be invoked if the role changes (in the expectation * that the role will be resumed. If it doesn't normal error processing * will take place). * * Should not be called from interrupt context. * * Notes: * This routine assumes no locks are held on entry. */ void fc_remote_port_rolechg(struct fc_rport *rport, u32 roles) { struct Scsi_Host *shost = rport_to_shost(rport); struct fc_host_attrs *fc_host = shost_to_fc_host(shost); unsigned long flags; int create = 0; spin_lock_irqsave(shost->host_lock, flags); if (roles & FC_PORT_ROLE_FCP_TARGET) { if (rport->scsi_target_id == -1) { rport->scsi_target_id = fc_host->next_target_id++; create = 1; } else if (!(rport->roles & FC_PORT_ROLE_FCP_TARGET)) create = 1; } rport->roles = roles; spin_unlock_irqrestore(shost->host_lock, flags); if (create) { /* * There may have been a delete timer running on the * port. Ensure that it is cancelled as we now know * the port is an FCP Target. * Note: we know the rport exists and is in an online * state as the LLDD would not have had an rport * reference to pass us. * * Take no action on the del_timer failure as the state * machine state change will validate the * transaction. */ if (!cancel_delayed_work(&rport->fail_io_work)) fc_flush_devloss(shost); if (!cancel_delayed_work(&rport->dev_loss_work)) fc_flush_devloss(shost); spin_lock_irqsave(shost->host_lock, flags); rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT | FC_RPORT_DEVLOSS_PENDING | FC_RPORT_DEVLOSS_CALLBK_DONE); spin_unlock_irqrestore(shost->host_lock, flags); /* ensure any stgt delete functions are done */ fc_flush_work(shost); scsi_target_unblock(&rport->dev, SDEV_RUNNING); /* initiate a scan of the target */ spin_lock_irqsave(shost->host_lock, flags); rport->flags |= FC_RPORT_SCAN_PENDING; scsi_queue_work(shost, &rport->scan_work); spin_unlock_irqrestore(shost->host_lock, flags); } } EXPORT_SYMBOL(fc_remote_port_rolechg); /** * fc_timeout_deleted_rport - Timeout handler for a deleted remote port. * @work: rport target that failed to reappear in the allotted time. * * Description: An attempt to delete a remote port blocks, and if it fails * to return in the allotted time this gets called. */ static void fc_timeout_deleted_rport(struct work_struct *work) { struct fc_rport *rport = container_of(work, struct fc_rport, dev_loss_work.work); struct Scsi_Host *shost = rport_to_shost(rport); struct fc_internal *i = to_fc_internal(shost->transportt); struct fc_host_attrs *fc_host = shost_to_fc_host(shost); unsigned long flags; int do_callback = 0; spin_lock_irqsave(shost->host_lock, flags); rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; /* * If the port is ONLINE, then it came back. If it was a SCSI * target, validate it still is. If not, tear down the * scsi_target on it. */ if (((rport->port_state == FC_PORTSTATE_ONLINE) || (rport->port_state == FC_PORTSTATE_MARGINAL)) && (rport->scsi_target_id != -1) && !(rport->roles & FC_PORT_ROLE_FCP_TARGET)) { dev_printk(KERN_ERR, &rport->dev, "blocked FC remote port time out: no longer" " a FCP target, removing starget\n"); spin_unlock_irqrestore(shost->host_lock, flags); scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE); fc_queue_work(shost, &rport->stgt_delete_work); return; } /* NOOP state - we're flushing workq's */ if (rport->port_state != FC_PORTSTATE_BLOCKED) { spin_unlock_irqrestore(shost->host_lock, flags); dev_printk(KERN_ERR, &rport->dev, "blocked FC remote port time out: leaving" " rport%s alone\n", (rport->scsi_target_id != -1) ? " and starget" : ""); return; } if ((fc_host->tgtid_bind_type == FC_TGTID_BIND_NONE) || (rport->scsi_target_id == -1)) { list_del(&rport->peers); rport->port_state = FC_PORTSTATE_DELETED; dev_printk(KERN_ERR, &rport->dev, "blocked FC remote port time out: removing" " rport%s\n", (rport->scsi_target_id != -1) ? " and starget" : ""); fc_queue_work(shost, &rport->rport_delete_work); spin_unlock_irqrestore(shost->host_lock, flags); return; } dev_printk(KERN_ERR, &rport->dev, "blocked FC remote port time out: removing target and " "saving binding\n"); list_move_tail(&rport->peers, &fc_host->rport_bindings); /* * Note: We do not remove or clear the hostdata area. This allows * host-specific target data to persist along with the * scsi_target_id. It's up to the host to manage it's hostdata area. */ /* * Reinitialize port attributes that may change if the port comes back. */ rport->maxframe_size = -1; rport->supported_classes = FC_COS_UNSPECIFIED; rport->roles = FC_PORT_ROLE_UNKNOWN; rport->port_state = FC_PORTSTATE_NOTPRESENT; rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; /* * Pre-emptively kill I/O rather than waiting for the work queue * item to teardown the starget. (FCOE libFC folks prefer this * and to have the rport_port_id still set when it's done). */ spin_unlock_irqrestore(shost->host_lock, flags); fc_terminate_rport_io(rport); spin_lock_irqsave(shost->host_lock, flags); if (rport->port_state == FC_PORTSTATE_NOTPRESENT) { /* still missing */ /* remove the identifiers that aren't used in the consisting binding */ switch (fc_host->tgtid_bind_type) { case FC_TGTID_BIND_BY_WWPN: rport->node_name = -1; rport->port_id = -1; break; case FC_TGTID_BIND_BY_WWNN: rport->port_name = -1; rport->port_id = -1; break; case FC_TGTID_BIND_BY_ID: rport->node_name = -1; rport->port_name = -1; break; case FC_TGTID_BIND_NONE: /* to keep compiler happy */ break; } /* * As this only occurs if the remote port (scsi target) * went away and didn't come back - we'll remove * all attached scsi devices. */ rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE; fc_queue_work(shost, &rport->stgt_delete_work); do_callback = 1; } spin_unlock_irqrestore(shost->host_lock, flags); /* * Notify the driver that the rport is now dead. The LLDD will * also guarantee that any communication to the rport is terminated * * Note: we set the CALLBK_DONE flag above to correspond */ if (do_callback && i->f->dev_loss_tmo_callbk) i->f->dev_loss_tmo_callbk(rport); } /** * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a disconnected SCSI target. * @work: rport to terminate io on. * * Notes: Only requests the failure of the io, not that all are flushed * prior to returning. */ static void fc_timeout_fail_rport_io(struct work_struct *work) { struct fc_rport *rport = container_of(work, struct fc_rport, fail_io_work.work); if (rport->port_state != FC_PORTSTATE_BLOCKED) return; rport->flags |= FC_RPORT_FAST_FAIL_TIMEDOUT; fc_terminate_rport_io(rport); } /** * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. * @work: remote port to be scanned. */ static void fc_scsi_scan_rport(struct work_struct *work) { struct fc_rport *rport = container_of(work, struct fc_rport, scan_work); struct Scsi_Host *shost = rport_to_shost(rport); struct fc_internal *i = to_fc_internal(shost->transportt); unsigned long flags; if (((rport->port_state == FC_PORTSTATE_ONLINE) || (rport->port_state == FC_PORTSTATE_MARGINAL)) && (rport->roles & FC_PORT_ROLE_FCP_TARGET) && !(i->f->disable_target_scan)) { scsi_scan_target(&rport->dev, rport->channel, rport->scsi_target_id, SCAN_WILD_CARD, SCSI_SCAN_RESCAN); } spin_lock_irqsave(shost->host_lock, flags); rport->flags &= ~FC_RPORT_SCAN_PENDING; spin_unlock_irqrestore(shost->host_lock, flags); } /** * fc_block_rport() - Block SCSI eh thread for blocked fc_rport. * @rport: Remote port that scsi_eh is trying to recover. * * This routine can be called from a FC LLD scsi_eh callback. It * blocks the scsi_eh thread until the fc_rport leaves the * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is * necessary to avoid the scsi_eh failing recovery actions for blocked * rports which would lead to offlined SCSI devices. * * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED. * FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be * passed back to scsi_eh. */ int fc_block_rport(struct fc_rport *rport) { struct Scsi_Host *shost = rport_to_shost(rport); unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); while (rport->port_state == FC_PORTSTATE_BLOCKED && !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) { spin_unlock_irqrestore(shost->host_lock, flags); msleep(1000); spin_lock_irqsave(shost->host_lock, flags); } spin_unlock_irqrestore(shost->host_lock, flags); if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT) return FAST_IO_FAIL; return 0; } EXPORT_SYMBOL(fc_block_rport); /** * fc_block_scsi_eh - Block SCSI eh thread for blocked fc_rport * @cmnd: SCSI command that scsi_eh is trying to recover * * This routine can be called from a FC LLD scsi_eh callback. It * blocks the scsi_eh thread until the fc_rport leaves the * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is * necessary to avoid the scsi_eh failing recovery actions for blocked * rports which would lead to offlined SCSI devices. * * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED. * FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be * passed back to scsi_eh. */ int fc_block_scsi_eh(struct scsi_cmnd *cmnd) { struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); if (WARN_ON_ONCE(!rport)) return FAST_IO_FAIL; return fc_block_rport(rport); } EXPORT_SYMBOL(fc_block_scsi_eh); /* * fc_eh_should_retry_cmd - Checks if the cmd should be retried or not * @scmd: The SCSI command to be checked * * This checks the rport state to decide if a cmd is * retryable. * * Returns: true if the rport state is not in marginal state. */ bool fc_eh_should_retry_cmd(struct scsi_cmnd *scmd) { struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device)); if ((rport->port_state != FC_PORTSTATE_ONLINE) && (scsi_cmd_to_rq(scmd)->cmd_flags & REQ_FAILFAST_TRANSPORT)) { set_host_byte(scmd, DID_TRANSPORT_MARGINAL); return false; } return true; } EXPORT_SYMBOL_GPL(fc_eh_should_retry_cmd); /** * fc_vport_setup - allocates and creates a FC virtual port. * @shost: scsi host the virtual port is connected to. * @channel: Channel on shost port connected to. * @pdev: parent device for vport * @ids: The world wide names, FC4 port roles, etc for * the virtual port. * @ret_vport: The pointer to the created vport. * * Allocates and creates the vport structure, calls the parent host * to instantiate the vport, this completes w/ class and sysfs creation. * * Notes: * This routine assumes no locks are held on entry. */ static int fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev, struct fc_vport_identifiers *ids, struct fc_vport **ret_vport) { struct fc_host_attrs *fc_host = shost_to_fc_host(shost); struct fc_internal *fci = to_fc_internal(shost->transportt); struct fc_vport *vport; struct device *dev; unsigned long flags; size_t size; int error; *ret_vport = NULL; if ( ! fci->f->vport_create) return -ENOENT; size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size); vport = kzalloc(size, GFP_KERNEL); if (unlikely(!vport)) { printk(KERN_ERR "%s: allocation failure\n", __func__); return -ENOMEM; } vport->vport_state = FC_VPORT_UNKNOWN; vport->vport_last_state = FC_VPORT_UNKNOWN; vport->node_name = ids->node_name; vport->port_name = ids->port_name; vport->roles = ids->roles; vport->vport_type = ids->vport_type; if (fci->f->dd_fcvport_size) vport->dd_data = &vport[1]; vport->shost = shost; vport->channel = channel; vport->flags = FC_VPORT_CREATING; INIT_WORK(&vport->vport_delete_work, fc_vport_sched_delete); spin_lock_irqsave(shost->host_lock, flags); if (fc_host->npiv_vports_inuse >= fc_host->max_npiv_vports) { spin_unlock_irqrestore(shost->host_lock, flags); kfree(vport); return -ENOSPC; } fc_host->npiv_vports_inuse++; vport->number = fc_host->next_vport_number++; list_add_tail(&vport->peers, &fc_host->vports); scsi_host_get(shost); /* for fc_host->vport list */ spin_unlock_irqrestore(shost->host_lock, flags); dev = &vport->dev; device_initialize(dev); /* takes self reference */ dev->parent = get_device(pdev); /* takes parent reference */ dev->release = fc_vport_dev_release; dev_set_name(dev, "vport-%d:%d-%d", shost->host_no, channel, vport->number); transport_setup_device(dev); error = device_add(dev); if (error) { printk(KERN_ERR "FC Virtual Port device_add failed\n"); goto delete_vport; } transport_add_device(dev); transport_configure_device(dev); error = fci->f->vport_create(vport, ids->disable); if (error) { printk(KERN_ERR "FC Virtual Port LLDD Create failed\n"); goto delete_vport_all; } /* * if the parent isn't the physical adapter's Scsi_Host, ensure * the Scsi_Host at least contains a symlink to the vport. */ if (pdev != &shost->shost_gendev) { error = sysfs_create_link(&shost->shost_gendev.kobj, &dev->kobj, dev_name(dev)); if (error) printk(KERN_ERR "%s: Cannot create vport symlinks for " "%s, err=%d\n", __func__, dev_name(dev), error); } spin_lock_irqsave(shost->host_lock, flags); vport->flags &= ~FC_VPORT_CREATING; spin_unlock_irqrestore(shost->host_lock, flags); dev_printk(KERN_NOTICE, pdev, "%s created via shost%d channel %d\n", dev_name(dev), shost->host_no, channel); *ret_vport = vport; return 0; delete_vport_all: transport_remove_device(dev); device_del(dev); delete_vport: transport_destroy_device(dev); spin_lock_irqsave(shost->host_lock, flags); list_del(&vport->peers); scsi_host_put(shost); /* for fc_host->vport list */ fc_host->npiv_vports_inuse--; spin_unlock_irqrestore(shost->host_lock, flags); put_device(dev->parent); kfree(vport); return error; } /** * fc_vport_create - Admin App or LLDD requests creation of a vport * @shost: scsi host the virtual port is connected to. * @channel: channel on shost port connected to. * @ids: The world wide names, FC4 port roles, etc for * the virtual port. * * Notes: * This routine assumes no locks are held on entry. */ struct fc_vport * fc_vport_create(struct Scsi_Host *shost, int channel, struct fc_vport_identifiers *ids) { int stat; struct fc_vport *vport; stat = fc_vport_setup(shost, channel, &shost->shost_gendev, ids, &vport); return stat ? NULL : vport; } EXPORT_SYMBOL(fc_vport_create); /** * fc_vport_terminate - Admin App or LLDD requests termination of a vport * @vport: fc_vport to be terminated * * Calls the LLDD vport_delete() function, then deallocates and removes * the vport from the shost and object tree. * * Notes: * This routine assumes no locks are held on entry. */ int fc_vport_terminate(struct fc_vport *vport) { struct Scsi_Host *shost = vport_to_shost(vport); struct fc_host_attrs *fc_host = shost_to_fc_host(shost); struct fc_internal *i = to_fc_internal(shost->transportt); struct device *dev = &vport->dev; unsigned long flags; int stat; if (i->f->vport_delete) stat = i->f->vport_delete(vport); else stat = -ENOENT; spin_lock_irqsave(shost->host_lock, flags); vport->flags &= ~FC_VPORT_DELETING; if (!stat) { vport->flags |= FC_VPORT_DELETED; list_del(&vport->peers); fc_host->npiv_vports_inuse--; scsi_host_put(shost); /* for fc_host->vport list */ } spin_unlock_irqrestore(shost->host_lock, flags); if (stat) return stat; if (dev->parent != &shost->shost_gendev) sysfs_remove_link(&shost->shost_gendev.kobj, dev_name(dev)); transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); /* * Removing our self-reference should mean our * release function gets called, which will drop the remaining * parent reference and free the data structure. */ put_device(dev); /* for self-reference */ return 0; /* SUCCESS */ } EXPORT_SYMBOL(fc_vport_terminate); /** * fc_vport_sched_delete - workq-based delete request for a vport * @work: vport to be deleted. */ static void fc_vport_sched_delete(struct work_struct *work) { struct fc_vport *vport = container_of(work, struct fc_vport, vport_delete_work); int stat; stat = fc_vport_terminate(vport); if (stat) dev_printk(KERN_ERR, vport->dev.parent, "%s: %s could not be deleted created via " "shost%d channel %d - error %d\n", __func__, dev_name(&vport->dev), vport->shost->host_no, vport->channel, stat); } /* * BSG support */ /** * fc_bsg_job_timeout - handler for when a bsg request timesout * @req: request that timed out */ static enum blk_eh_timer_return fc_bsg_job_timeout(struct request *req) { struct bsg_job *job = blk_mq_rq_to_pdu(req); struct Scsi_Host *shost = fc_bsg_to_shost(job); struct fc_rport *rport = fc_bsg_to_rport(job); struct fc_internal *i = to_fc_internal(shost->transportt); int err = 0, inflight = 0; if (rport && rport->port_state == FC_PORTSTATE_BLOCKED) return BLK_EH_RESET_TIMER; inflight = bsg_job_get(job); if (inflight && i->f->bsg_timeout) { /* call LLDD to abort the i/o as it has timed out */ err = i->f->bsg_timeout(job); if (err == -EAGAIN) { bsg_job_put(job); return BLK_EH_RESET_TIMER; } else if (err) printk(KERN_ERR "ERROR: FC BSG request timeout - LLD " "abort failed with status %d\n", err); } /* the blk_end_sync_io() doesn't check the error */ if (inflight) blk_mq_end_request(req, BLK_STS_IOERR); return BLK_EH_DONE; } /** * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD * @shost: scsi host rport attached to * @job: bsg job to be processed */ static int fc_bsg_host_dispatch(struct Scsi_Host *shost, struct bsg_job *job) { struct fc_internal *i = to_fc_internal(shost->transportt); struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; int cmdlen = sizeof(uint32_t); /* start with length of msgcode */ int ret; /* check if we really have all the request data needed */ if (job->request_len < cmdlen) { ret = -ENOMSG; goto fail_host_msg; } /* Validate the host command */ switch (bsg_request->msgcode) { case FC_BSG_HST_ADD_RPORT: cmdlen += sizeof(struct fc_bsg_host_add_rport); break; case FC_BSG_HST_DEL_RPORT: cmdlen += sizeof(struct fc_bsg_host_del_rport); break; case FC_BSG_HST_ELS_NOLOGIN: cmdlen += sizeof(struct fc_bsg_host_els); /* there better be a xmt and rcv payloads */ if ((!job->request_payload.payload_len) || (!job->reply_payload.payload_len)) { ret = -EINVAL; goto fail_host_msg; } break; case FC_BSG_HST_CT: cmdlen += sizeof(struct fc_bsg_host_ct); /* there better be xmt and rcv payloads */ if ((!job->request_payload.payload_len) || (!job->reply_payload.payload_len)) { ret = -EINVAL; goto fail_host_msg; } break; case FC_BSG_HST_VENDOR: cmdlen += sizeof(struct fc_bsg_host_vendor); if ((shost->hostt->vendor_id == 0L) || (bsg_request->rqst_data.h_vendor.vendor_id != shost->hostt->vendor_id)) { ret = -ESRCH; goto fail_host_msg; } break; default: ret = -EBADR; goto fail_host_msg; } ret = i->f->bsg_request(job); if (!ret) return 0; fail_host_msg: /* return the errno failure code as the only status */ BUG_ON(job->reply_len < sizeof(uint32_t)); bsg_reply->reply_payload_rcv_len = 0; bsg_reply->result = ret; job->reply_len = sizeof(uint32_t); bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return 0; } /* * fc_bsg_goose_queue - restart rport queue in case it was stopped * @rport: rport to be restarted */ static void fc_bsg_goose_queue(struct fc_rport *rport) { struct request_queue *q = rport->rqst_q; if (q) blk_mq_run_hw_queues(q, true); } /** * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD * @shost: scsi host rport attached to * @job: bsg job to be processed */ static int fc_bsg_rport_dispatch(struct Scsi_Host *shost, struct bsg_job *job) { struct fc_internal *i = to_fc_internal(shost->transportt); struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; int cmdlen = sizeof(uint32_t); /* start with length of msgcode */ int ret; /* check if we really have all the request data needed */ if (job->request_len < cmdlen) { ret = -ENOMSG; goto fail_rport_msg; } /* Validate the rport command */ switch (bsg_request->msgcode) { case FC_BSG_RPT_ELS: cmdlen += sizeof(struct fc_bsg_rport_els); goto check_bidi; case FC_BSG_RPT_CT: cmdlen += sizeof(struct fc_bsg_rport_ct); check_bidi: /* there better be xmt and rcv payloads */ if ((!job->request_payload.payload_len) || (!job->reply_payload.payload_len)) { ret = -EINVAL; goto fail_rport_msg; } break; default: ret = -EBADR; goto fail_rport_msg; } ret = i->f->bsg_request(job); if (!ret) return 0; fail_rport_msg: /* return the errno failure code as the only status */ BUG_ON(job->reply_len < sizeof(uint32_t)); bsg_reply->reply_payload_rcv_len = 0; bsg_reply->result = ret; job->reply_len = sizeof(uint32_t); bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return 0; } static int fc_bsg_dispatch(struct bsg_job *job) { struct Scsi_Host *shost = fc_bsg_to_shost(job); if (scsi_is_fc_rport(job->dev)) return fc_bsg_rport_dispatch(shost, job); else return fc_bsg_host_dispatch(shost, job); } static blk_status_t fc_bsg_rport_prep(struct fc_rport *rport) { if (rport->port_state == FC_PORTSTATE_BLOCKED && !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) return BLK_STS_RESOURCE; if ((rport->port_state != FC_PORTSTATE_ONLINE) && (rport->port_state != FC_PORTSTATE_MARGINAL)) return BLK_STS_IOERR; return BLK_STS_OK; } static int fc_bsg_dispatch_prep(struct bsg_job *job) { struct fc_rport *rport = fc_bsg_to_rport(job); blk_status_t ret; ret = fc_bsg_rport_prep(rport); switch (ret) { case BLK_STS_OK: break; case BLK_STS_RESOURCE: return -EAGAIN; default: return -EIO; } return fc_bsg_dispatch(job); } /** * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests * @shost: shost for fc_host * @fc_host: fc_host adding the structures to */ static int fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host) { struct device *dev = &shost->shost_gendev; struct fc_internal *i = to_fc_internal(shost->transportt); struct request_queue *q; char bsg_name[20]; fc_host->rqst_q = NULL; if (!i->f->bsg_request) return -ENOTSUPP; snprintf(bsg_name, sizeof(bsg_name), "fc_host%d", shost->host_no); q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, fc_bsg_job_timeout, i->f->dd_bsg_size); if (IS_ERR(q)) { dev_err(dev, "fc_host%d: bsg interface failed to initialize - setup queue\n", shost->host_no); return PTR_ERR(q); } __scsi_init_queue(shost, q); blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT); fc_host->rqst_q = q; return 0; } /** * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests * @shost: shost that rport is attached to * @rport: rport that the bsg hooks are being attached to */ static int fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) { struct device *dev = &rport->dev; struct fc_internal *i = to_fc_internal(shost->transportt); struct request_queue *q; rport->rqst_q = NULL; if (!i->f->bsg_request) return -ENOTSUPP; q = bsg_setup_queue(dev, dev_name(dev), fc_bsg_dispatch_prep, fc_bsg_job_timeout, i->f->dd_bsg_size); if (IS_ERR(q)) { dev_err(dev, "failed to setup bsg queue\n"); return PTR_ERR(q); } __scsi_init_queue(shost, q); blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); rport->rqst_q = q; return 0; } /** * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports * @q: the request_queue that is to be torn down. * * Notes: * Before unregistering the queue empty any requests that are blocked * * */ static void fc_bsg_remove(struct request_queue *q) { bsg_remove_queue(q); } /* Original Author: Martin Hicks */ MODULE_AUTHOR("James Smart"); MODULE_DESCRIPTION("FC Transport Attributes"); MODULE_LICENSE("GPL"); module_init(fc_transport_init); module_exit(fc_transport_exit);
linux-master
drivers/scsi/scsi_transport_fc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2009, Microsoft Corporation. * * Authors: * Haiyang Zhang <[email protected]> * Hank Janssen <[email protected]> * K. Y. Srinivasan <[email protected]> */ #include <linux/kernel.h> #include <linux/wait.h> #include <linux/sched.h> #include <linux/completion.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/device.h> #include <linux/hyperv.h> #include <linux/blkdev.h> #include <linux/dma-mapping.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_devinfo.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_transport_fc.h> #include <scsi/scsi_transport.h> /* * All wire protocol details (storage protocol between the guest and the host) * are consolidated here. * * Begin protocol definitions. */ /* * Version history: * V1 Beta: 0.1 * V1 RC < 2008/1/31: 1.0 * V1 RC > 2008/1/31: 2.0 * Win7: 4.2 * Win8: 5.1 * Win8.1: 6.0 * Win10: 6.2 */ #define VMSTOR_PROTO_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \ (((MINOR_) & 0xff))) #define VMSTOR_PROTO_VERSION_WIN6 VMSTOR_PROTO_VERSION(2, 0) #define VMSTOR_PROTO_VERSION_WIN7 VMSTOR_PROTO_VERSION(4, 2) #define VMSTOR_PROTO_VERSION_WIN8 VMSTOR_PROTO_VERSION(5, 1) #define VMSTOR_PROTO_VERSION_WIN8_1 VMSTOR_PROTO_VERSION(6, 0) #define VMSTOR_PROTO_VERSION_WIN10 VMSTOR_PROTO_VERSION(6, 2) /* channel callback timeout in ms */ #define CALLBACK_TIMEOUT 2 /* Packet structure describing virtual storage requests. */ enum vstor_packet_operation { VSTOR_OPERATION_COMPLETE_IO = 1, VSTOR_OPERATION_REMOVE_DEVICE = 2, VSTOR_OPERATION_EXECUTE_SRB = 3, VSTOR_OPERATION_RESET_LUN = 4, VSTOR_OPERATION_RESET_ADAPTER = 5, VSTOR_OPERATION_RESET_BUS = 6, VSTOR_OPERATION_BEGIN_INITIALIZATION = 7, VSTOR_OPERATION_END_INITIALIZATION = 8, VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9, VSTOR_OPERATION_QUERY_PROPERTIES = 10, VSTOR_OPERATION_ENUMERATE_BUS = 11, VSTOR_OPERATION_FCHBA_DATA = 12, VSTOR_OPERATION_CREATE_SUB_CHANNELS = 13, VSTOR_OPERATION_MAXIMUM = 13 }; /* * WWN packet for Fibre Channel HBA */ struct hv_fc_wwn_packet { u8 primary_active; u8 reserved1[3]; u8 primary_port_wwn[8]; u8 primary_node_wwn[8]; u8 secondary_port_wwn[8]; u8 secondary_node_wwn[8]; }; /* * SRB Flag Bits */ #define SRB_FLAGS_QUEUE_ACTION_ENABLE 0x00000002 #define SRB_FLAGS_DISABLE_DISCONNECT 0x00000004 #define SRB_FLAGS_DISABLE_SYNCH_TRANSFER 0x00000008 #define SRB_FLAGS_BYPASS_FROZEN_QUEUE 0x00000010 #define SRB_FLAGS_DISABLE_AUTOSENSE 0x00000020 #define SRB_FLAGS_DATA_IN 0x00000040 #define SRB_FLAGS_DATA_OUT 0x00000080 #define SRB_FLAGS_NO_DATA_TRANSFER 0x00000000 #define SRB_FLAGS_UNSPECIFIED_DIRECTION (SRB_FLAGS_DATA_IN | SRB_FLAGS_DATA_OUT) #define SRB_FLAGS_NO_QUEUE_FREEZE 0x00000100 #define SRB_FLAGS_ADAPTER_CACHE_ENABLE 0x00000200 #define SRB_FLAGS_FREE_SENSE_BUFFER 0x00000400 /* * This flag indicates the request is part of the workflow for processing a D3. */ #define SRB_FLAGS_D3_PROCESSING 0x00000800 #define SRB_FLAGS_IS_ACTIVE 0x00010000 #define SRB_FLAGS_ALLOCATED_FROM_ZONE 0x00020000 #define SRB_FLAGS_SGLIST_FROM_POOL 0x00040000 #define SRB_FLAGS_BYPASS_LOCKED_QUEUE 0x00080000 #define SRB_FLAGS_NO_KEEP_AWAKE 0x00100000 #define SRB_FLAGS_PORT_DRIVER_ALLOCSENSE 0x00200000 #define SRB_FLAGS_PORT_DRIVER_SENSEHASPORT 0x00400000 #define SRB_FLAGS_DONT_START_NEXT_PACKET 0x00800000 #define SRB_FLAGS_PORT_DRIVER_RESERVED 0x0F000000 #define SRB_FLAGS_CLASS_DRIVER_RESERVED 0xF0000000 #define SP_UNTAGGED ((unsigned char) ~0) #define SRB_SIMPLE_TAG_REQUEST 0x20 /* * Platform neutral description of a scsi request - * this remains the same across the write regardless of 32/64 bit * note: it's patterned off the SCSI_PASS_THROUGH structure */ #define STORVSC_MAX_CMD_LEN 0x10 /* Sense buffer size is the same for all versions since Windows 8 */ #define STORVSC_SENSE_BUFFER_SIZE 0x14 #define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14 /* * The storage protocol version is determined during the * initial exchange with the host. It will indicate which * storage functionality is available in the host. */ static int vmstor_proto_version; #define STORVSC_LOGGING_NONE 0 #define STORVSC_LOGGING_ERROR 1 #define STORVSC_LOGGING_WARN 2 static int logging_level = STORVSC_LOGGING_ERROR; module_param(logging_level, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(logging_level, "Logging level, 0 - None, 1 - Error (default), 2 - Warning."); static inline bool do_logging(int level) { return logging_level >= level; } #define storvsc_log(dev, level, fmt, ...) \ do { \ if (do_logging(level)) \ dev_warn(&(dev)->device, fmt, ##__VA_ARGS__); \ } while (0) struct vmscsi_request { u16 length; u8 srb_status; u8 scsi_status; u8 port_number; u8 path_id; u8 target_id; u8 lun; u8 cdb_length; u8 sense_info_length; u8 data_in; u8 reserved; u32 data_transfer_length; union { u8 cdb[STORVSC_MAX_CMD_LEN]; u8 sense_data[STORVSC_SENSE_BUFFER_SIZE]; u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING]; }; /* * The following was added in win8. */ u16 reserve; u8 queue_tag; u8 queue_action; u32 srb_flags; u32 time_out_value; u32 queue_sort_ey; } __attribute((packed)); /* * The list of windows version in order of preference. */ static const int protocol_version[] = { VMSTOR_PROTO_VERSION_WIN10, VMSTOR_PROTO_VERSION_WIN8_1, VMSTOR_PROTO_VERSION_WIN8, }; /* * This structure is sent during the initialization phase to get the different * properties of the channel. */ #define STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL 0x1 struct vmstorage_channel_properties { u32 reserved; u16 max_channel_cnt; u16 reserved1; u32 flags; u32 max_transfer_bytes; u64 reserved2; } __packed; /* This structure is sent during the storage protocol negotiations. */ struct vmstorage_protocol_version { /* Major (MSW) and minor (LSW) version numbers. */ u16 major_minor; /* * Revision number is auto-incremented whenever this file is changed * (See FILL_VMSTOR_REVISION macro above). Mismatch does not * definitely indicate incompatibility--but it does indicate mismatched * builds. * This is only used on the windows side. Just set it to 0. */ u16 revision; } __packed; /* Channel Property Flags */ #define STORAGE_CHANNEL_REMOVABLE_FLAG 0x1 #define STORAGE_CHANNEL_EMULATED_IDE_FLAG 0x2 struct vstor_packet { /* Requested operation type */ enum vstor_packet_operation operation; /* Flags - see below for values */ u32 flags; /* Status of the request returned from the server side. */ u32 status; /* Data payload area */ union { /* * Structure used to forward SCSI commands from the * client to the server. */ struct vmscsi_request vm_srb; /* Structure used to query channel properties. */ struct vmstorage_channel_properties storage_channel_properties; /* Used during version negotiations. */ struct vmstorage_protocol_version version; /* Fibre channel address packet */ struct hv_fc_wwn_packet wwn_packet; /* Number of sub-channels to create */ u16 sub_channel_count; /* This will be the maximum of the union members */ u8 buffer[0x34]; }; } __packed; /* * Packet Flags: * * This flag indicates that the server should send back a completion for this * packet. */ #define REQUEST_COMPLETION_FLAG 0x1 /* Matches Windows-end */ enum storvsc_request_type { WRITE_TYPE = 0, READ_TYPE, UNKNOWN_TYPE, }; /* * SRB status codes and masks. In the 8-bit field, the two high order bits * are flags, while the remaining 6 bits are an integer status code. The * definitions here include only the subset of the integer status codes that * are tested for in this driver. */ #define SRB_STATUS_AUTOSENSE_VALID 0x80 #define SRB_STATUS_QUEUE_FROZEN 0x40 /* SRB status integer codes */ #define SRB_STATUS_SUCCESS 0x01 #define SRB_STATUS_ABORTED 0x02 #define SRB_STATUS_ERROR 0x04 #define SRB_STATUS_INVALID_REQUEST 0x06 #define SRB_STATUS_TIMEOUT 0x09 #define SRB_STATUS_SELECTION_TIMEOUT 0x0A #define SRB_STATUS_BUS_RESET 0x0E #define SRB_STATUS_DATA_OVERRUN 0x12 #define SRB_STATUS_INVALID_LUN 0x20 #define SRB_STATUS_INTERNAL_ERROR 0x30 #define SRB_STATUS(status) \ (status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN)) /* * This is the end of Protocol specific defines. */ static int storvsc_ringbuffer_size = (128 * 1024); static u32 max_outstanding_req_per_channel; static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth); static int storvsc_vcpus_per_sub_channel = 4; static unsigned int storvsc_max_hw_queues; module_param(storvsc_ringbuffer_size, int, S_IRUGO); MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)"); module_param(storvsc_max_hw_queues, uint, 0644); MODULE_PARM_DESC(storvsc_max_hw_queues, "Maximum number of hardware queues"); module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO); MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels"); static int ring_avail_percent_lowater = 10; module_param(ring_avail_percent_lowater, int, S_IRUGO); MODULE_PARM_DESC(ring_avail_percent_lowater, "Select a channel if available ring size > this in percent"); /* * Timeout in seconds for all devices managed by this driver. */ static int storvsc_timeout = 180; #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) static struct scsi_transport_template *fc_transport_template; #endif static struct scsi_host_template scsi_driver; static void storvsc_on_channel_callback(void *context); #define STORVSC_MAX_LUNS_PER_TARGET 255 #define STORVSC_MAX_TARGETS 2 #define STORVSC_MAX_CHANNELS 8 #define STORVSC_FC_MAX_LUNS_PER_TARGET 255 #define STORVSC_FC_MAX_TARGETS 128 #define STORVSC_FC_MAX_CHANNELS 8 #define STORVSC_FC_MAX_XFER_SIZE ((u32)(512 * 1024)) #define STORVSC_IDE_MAX_LUNS_PER_TARGET 64 #define STORVSC_IDE_MAX_TARGETS 1 #define STORVSC_IDE_MAX_CHANNELS 1 /* * Upper bound on the size of a storvsc packet. */ #define STORVSC_MAX_PKT_SIZE (sizeof(struct vmpacket_descriptor) +\ sizeof(struct vstor_packet)) struct storvsc_cmd_request { struct scsi_cmnd *cmd; struct hv_device *device; /* Synchronize the request/response if needed */ struct completion wait_event; struct vmbus_channel_packet_multipage_buffer mpb; struct vmbus_packet_mpb_array *payload; u32 payload_sz; struct vstor_packet vstor_packet; }; /* A storvsc device is a device object that contains a vmbus channel */ struct storvsc_device { struct hv_device *device; bool destroy; bool drain_notify; atomic_t num_outstanding_req; struct Scsi_Host *host; wait_queue_head_t waiting_to_drain; /* * Each unique Port/Path/Target represents 1 channel ie scsi * controller. In reality, the pathid, targetid is always 0 * and the port is set by us */ unsigned int port_number; unsigned char path_id; unsigned char target_id; /* * Max I/O, the device can support. */ u32 max_transfer_bytes; /* * Number of sub-channels we will open. */ u16 num_sc; struct vmbus_channel **stor_chns; /* * Mask of CPUs bound to subchannels. */ struct cpumask alloced_cpus; /* * Serializes modifications of stor_chns[] from storvsc_do_io() * and storvsc_change_target_cpu(). */ spinlock_t lock; /* Used for vsc/vsp channel reset process */ struct storvsc_cmd_request init_request; struct storvsc_cmd_request reset_request; /* * Currently active port and node names for FC devices. */ u64 node_name; u64 port_name; #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) struct fc_rport *rport; #endif }; struct hv_host_device { struct hv_device *dev; unsigned int port; unsigned char path; unsigned char target; struct workqueue_struct *handle_error_wq; struct work_struct host_scan_work; struct Scsi_Host *host; }; struct storvsc_scan_work { struct work_struct work; struct Scsi_Host *host; u8 lun; u8 tgt_id; }; static void storvsc_device_scan(struct work_struct *work) { struct storvsc_scan_work *wrk; struct scsi_device *sdev; wrk = container_of(work, struct storvsc_scan_work, work); sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); if (!sdev) goto done; scsi_rescan_device(sdev); scsi_device_put(sdev); done: kfree(wrk); } static void storvsc_host_scan(struct work_struct *work) { struct Scsi_Host *host; struct scsi_device *sdev; struct hv_host_device *host_device = container_of(work, struct hv_host_device, host_scan_work); host = host_device->host; /* * Before scanning the host, first check to see if any of the * currently known devices have been hot removed. We issue a * "unit ready" command against all currently known devices. * This I/O will result in an error for devices that have been * removed. As part of handling the I/O error, we remove the device. * * When a LUN is added or removed, the host sends us a signal to * scan the host. Thus we are forced to discover the LUNs that * may have been removed this way. */ mutex_lock(&host->scan_mutex); shost_for_each_device(sdev, host) scsi_test_unit_ready(sdev, 1, 1, NULL); mutex_unlock(&host->scan_mutex); /* * Now scan the host to discover LUNs that may have been added. */ scsi_scan_host(host); } static void storvsc_remove_lun(struct work_struct *work) { struct storvsc_scan_work *wrk; struct scsi_device *sdev; wrk = container_of(work, struct storvsc_scan_work, work); if (!scsi_host_get(wrk->host)) goto done; sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); } scsi_host_put(wrk->host); done: kfree(wrk); } /* * We can get incoming messages from the host that are not in response to * messages that we have sent out. An example of this would be messages * received by the guest to notify dynamic addition/removal of LUNs. To * deal with potential race conditions where the driver may be in the * midst of being unloaded when we might receive an unsolicited message * from the host, we have implemented a mechanism to gurantee sequential * consistency: * * 1) Once the device is marked as being destroyed, we will fail all * outgoing messages. * 2) We permit incoming messages when the device is being destroyed, * only to properly account for messages already sent out. */ static inline struct storvsc_device *get_out_stor_device( struct hv_device *device) { struct storvsc_device *stor_device; stor_device = hv_get_drvdata(device); if (stor_device && stor_device->destroy) stor_device = NULL; return stor_device; } static inline void storvsc_wait_to_drain(struct storvsc_device *dev) { dev->drain_notify = true; wait_event(dev->waiting_to_drain, atomic_read(&dev->num_outstanding_req) == 0); dev->drain_notify = false; } static inline struct storvsc_device *get_in_stor_device( struct hv_device *device) { struct storvsc_device *stor_device; stor_device = hv_get_drvdata(device); if (!stor_device) goto get_in_err; /* * If the device is being destroyed; allow incoming * traffic only to cleanup outstanding requests. */ if (stor_device->destroy && (atomic_read(&stor_device->num_outstanding_req) == 0)) stor_device = NULL; get_in_err: return stor_device; } static void storvsc_change_target_cpu(struct vmbus_channel *channel, u32 old, u32 new) { struct storvsc_device *stor_device; struct vmbus_channel *cur_chn; bool old_is_alloced = false; struct hv_device *device; unsigned long flags; int cpu; device = channel->primary_channel ? channel->primary_channel->device_obj : channel->device_obj; stor_device = get_out_stor_device(device); if (!stor_device) return; /* See storvsc_do_io() -> get_og_chn(). */ spin_lock_irqsave(&stor_device->lock, flags); /* * Determines if the storvsc device has other channels assigned to * the "old" CPU to update the alloced_cpus mask and the stor_chns * array. */ if (device->channel != channel && device->channel->target_cpu == old) { cur_chn = device->channel; old_is_alloced = true; goto old_is_alloced; } list_for_each_entry(cur_chn, &device->channel->sc_list, sc_list) { if (cur_chn == channel) continue; if (cur_chn->target_cpu == old) { old_is_alloced = true; goto old_is_alloced; } } old_is_alloced: if (old_is_alloced) WRITE_ONCE(stor_device->stor_chns[old], cur_chn); else cpumask_clear_cpu(old, &stor_device->alloced_cpus); /* "Flush" the stor_chns array. */ for_each_possible_cpu(cpu) { if (stor_device->stor_chns[cpu] && !cpumask_test_cpu( cpu, &stor_device->alloced_cpus)) WRITE_ONCE(stor_device->stor_chns[cpu], NULL); } WRITE_ONCE(stor_device->stor_chns[new], channel); cpumask_set_cpu(new, &stor_device->alloced_cpus); spin_unlock_irqrestore(&stor_device->lock, flags); } static u64 storvsc_next_request_id(struct vmbus_channel *channel, u64 rqst_addr) { struct storvsc_cmd_request *request = (struct storvsc_cmd_request *)(unsigned long)rqst_addr; if (rqst_addr == VMBUS_RQST_INIT) return VMBUS_RQST_INIT; if (rqst_addr == VMBUS_RQST_RESET) return VMBUS_RQST_RESET; /* * Cannot return an ID of 0, which is reserved for an unsolicited * message from Hyper-V. */ return (u64)blk_mq_unique_tag(scsi_cmd_to_rq(request->cmd)) + 1; } static void handle_sc_creation(struct vmbus_channel *new_sc) { struct hv_device *device = new_sc->primary_channel->device_obj; struct device *dev = &device->device; struct storvsc_device *stor_device; struct vmstorage_channel_properties props; int ret; stor_device = get_out_stor_device(device); if (!stor_device) return; memset(&props, 0, sizeof(struct vmstorage_channel_properties)); new_sc->max_pkt_size = STORVSC_MAX_PKT_SIZE; new_sc->next_request_id_callback = storvsc_next_request_id; ret = vmbus_open(new_sc, storvsc_ringbuffer_size, storvsc_ringbuffer_size, (void *)&props, sizeof(struct vmstorage_channel_properties), storvsc_on_channel_callback, new_sc); /* In case vmbus_open() fails, we don't use the sub-channel. */ if (ret != 0) { dev_err(dev, "Failed to open sub-channel: err=%d\n", ret); return; } new_sc->change_target_cpu_callback = storvsc_change_target_cpu; /* Add the sub-channel to the array of available channels. */ stor_device->stor_chns[new_sc->target_cpu] = new_sc; cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus); } static void handle_multichannel_storage(struct hv_device *device, int max_chns) { struct device *dev = &device->device; struct storvsc_device *stor_device; int num_sc; struct storvsc_cmd_request *request; struct vstor_packet *vstor_packet; int ret, t; /* * If the number of CPUs is artificially restricted, such as * with maxcpus=1 on the kernel boot line, Hyper-V could offer * sub-channels >= the number of CPUs. These sub-channels * should not be created. The primary channel is already created * and assigned to one CPU, so check against # CPUs - 1. */ num_sc = min((int)(num_online_cpus() - 1), max_chns); if (!num_sc) return; stor_device = get_out_stor_device(device); if (!stor_device) return; stor_device->num_sc = num_sc; request = &stor_device->init_request; vstor_packet = &request->vstor_packet; /* * Establish a handler for dealing with subchannels. */ vmbus_set_sc_create_callback(device->channel, handle_sc_creation); /* * Request the host to create sub-channels. */ memset(request, 0, sizeof(struct storvsc_cmd_request)); init_completion(&request->wait_event); vstor_packet->operation = VSTOR_OPERATION_CREATE_SUB_CHANNELS; vstor_packet->flags = REQUEST_COMPLETION_FLAG; vstor_packet->sub_channel_count = num_sc; ret = vmbus_sendpacket(device->channel, vstor_packet, sizeof(struct vstor_packet), VMBUS_RQST_INIT, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) { dev_err(dev, "Failed to create sub-channel: err=%d\n", ret); return; } t = wait_for_completion_timeout(&request->wait_event, 10*HZ); if (t == 0) { dev_err(dev, "Failed to create sub-channel: timed out\n"); return; } if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || vstor_packet->status != 0) { dev_err(dev, "Failed to create sub-channel: op=%d, sts=%d\n", vstor_packet->operation, vstor_packet->status); return; } /* * We need to do nothing here, because vmbus_process_offer() * invokes channel->sc_creation_callback, which will open and use * the sub-channel(s). */ } static void cache_wwn(struct storvsc_device *stor_device, struct vstor_packet *vstor_packet) { /* * Cache the currently active port and node ww names. */ if (vstor_packet->wwn_packet.primary_active) { stor_device->node_name = wwn_to_u64(vstor_packet->wwn_packet.primary_node_wwn); stor_device->port_name = wwn_to_u64(vstor_packet->wwn_packet.primary_port_wwn); } else { stor_device->node_name = wwn_to_u64(vstor_packet->wwn_packet.secondary_node_wwn); stor_device->port_name = wwn_to_u64(vstor_packet->wwn_packet.secondary_port_wwn); } } static int storvsc_execute_vstor_op(struct hv_device *device, struct storvsc_cmd_request *request, bool status_check) { struct storvsc_device *stor_device; struct vstor_packet *vstor_packet; int ret, t; stor_device = get_out_stor_device(device); if (!stor_device) return -ENODEV; vstor_packet = &request->vstor_packet; init_completion(&request->wait_event); vstor_packet->flags = REQUEST_COMPLETION_FLAG; ret = vmbus_sendpacket(device->channel, vstor_packet, sizeof(struct vstor_packet), VMBUS_RQST_INIT, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) return ret; t = wait_for_completion_timeout(&request->wait_event, 5*HZ); if (t == 0) return -ETIMEDOUT; if (!status_check) return ret; if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || vstor_packet->status != 0) return -EINVAL; return ret; } static int storvsc_channel_init(struct hv_device *device, bool is_fc) { struct storvsc_device *stor_device; struct storvsc_cmd_request *request; struct vstor_packet *vstor_packet; int ret, i; int max_chns; bool process_sub_channels = false; stor_device = get_out_stor_device(device); if (!stor_device) return -ENODEV; request = &stor_device->init_request; vstor_packet = &request->vstor_packet; /* * Now, initiate the vsc/vsp initialization protocol on the open * channel */ memset(request, 0, sizeof(struct storvsc_cmd_request)); vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION; ret = storvsc_execute_vstor_op(device, request, true); if (ret) return ret; /* * Query host supported protocol version. */ for (i = 0; i < ARRAY_SIZE(protocol_version); i++) { /* reuse the packet for version range supported */ memset(vstor_packet, 0, sizeof(struct vstor_packet)); vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION; vstor_packet->version.major_minor = protocol_version[i]; /* * The revision number is only used in Windows; set it to 0. */ vstor_packet->version.revision = 0; ret = storvsc_execute_vstor_op(device, request, false); if (ret != 0) return ret; if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO) return -EINVAL; if (vstor_packet->status == 0) { vmstor_proto_version = protocol_version[i]; break; } } if (vstor_packet->status != 0) { dev_err(&device->device, "Obsolete Hyper-V version\n"); return -EINVAL; } memset(vstor_packet, 0, sizeof(struct vstor_packet)); vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES; ret = storvsc_execute_vstor_op(device, request, true); if (ret != 0) return ret; /* * Check to see if multi-channel support is there. * Hosts that implement protocol version of 5.1 and above * support multi-channel. */ max_chns = vstor_packet->storage_channel_properties.max_channel_cnt; /* * Allocate state to manage the sub-channels. * We allocate an array based on the numbers of possible CPUs * (Hyper-V does not support cpu online/offline). * This Array will be sparseley populated with unique * channels - primary + sub-channels. * We will however populate all the slots to evenly distribute * the load. */ stor_device->stor_chns = kcalloc(num_possible_cpus(), sizeof(void *), GFP_KERNEL); if (stor_device->stor_chns == NULL) return -ENOMEM; device->channel->change_target_cpu_callback = storvsc_change_target_cpu; stor_device->stor_chns[device->channel->target_cpu] = device->channel; cpumask_set_cpu(device->channel->target_cpu, &stor_device->alloced_cpus); if (vstor_packet->storage_channel_properties.flags & STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL) process_sub_channels = true; stor_device->max_transfer_bytes = vstor_packet->storage_channel_properties.max_transfer_bytes; if (!is_fc) goto done; /* * For FC devices retrieve FC HBA data. */ memset(vstor_packet, 0, sizeof(struct vstor_packet)); vstor_packet->operation = VSTOR_OPERATION_FCHBA_DATA; ret = storvsc_execute_vstor_op(device, request, true); if (ret != 0) return ret; /* * Cache the currently active port and node ww names. */ cache_wwn(stor_device, vstor_packet); done: memset(vstor_packet, 0, sizeof(struct vstor_packet)); vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION; ret = storvsc_execute_vstor_op(device, request, true); if (ret != 0) return ret; if (process_sub_channels) handle_multichannel_storage(device, max_chns); return ret; } static void storvsc_handle_error(struct vmscsi_request *vm_srb, struct scsi_cmnd *scmnd, struct Scsi_Host *host, u8 asc, u8 ascq) { struct storvsc_scan_work *wrk; void (*process_err_fn)(struct work_struct *work); struct hv_host_device *host_dev = shost_priv(host); switch (SRB_STATUS(vm_srb->srb_status)) { case SRB_STATUS_ERROR: case SRB_STATUS_ABORTED: case SRB_STATUS_INVALID_REQUEST: case SRB_STATUS_INTERNAL_ERROR: case SRB_STATUS_TIMEOUT: case SRB_STATUS_SELECTION_TIMEOUT: case SRB_STATUS_BUS_RESET: case SRB_STATUS_DATA_OVERRUN: if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) { /* Check for capacity change */ if ((asc == 0x2a) && (ascq == 0x9)) { process_err_fn = storvsc_device_scan; /* Retry the I/O that triggered this. */ set_host_byte(scmnd, DID_REQUEUE); goto do_work; } /* * Check for "Operating parameters have changed" * due to Hyper-V changing the VHD/VHDX BlockSize * when adding/removing a differencing disk. This * causes discard_granularity to change, so do a * rescan to pick up the new granularity. We don't * want scsi_report_sense() to output a message * that a sysadmin wouldn't know what to do with. */ if ((asc == 0x3f) && (ascq != 0x03) && (ascq != 0x0e)) { process_err_fn = storvsc_device_scan; set_host_byte(scmnd, DID_REQUEUE); goto do_work; } /* * Otherwise, let upper layer deal with the * error when sense message is present */ return; } /* * If there is an error; offline the device since all * error recovery strategies would have already been * deployed on the host side. However, if the command * were a pass-through command deal with it appropriately. */ switch (scmnd->cmnd[0]) { case ATA_16: case ATA_12: set_host_byte(scmnd, DID_PASSTHROUGH); break; /* * On some Hyper-V hosts TEST_UNIT_READY command can * return SRB_STATUS_ERROR. Let the upper level code * deal with it based on the sense information. */ case TEST_UNIT_READY: break; default: set_host_byte(scmnd, DID_ERROR); } return; case SRB_STATUS_INVALID_LUN: set_host_byte(scmnd, DID_NO_CONNECT); process_err_fn = storvsc_remove_lun; goto do_work; } return; do_work: /* * We need to schedule work to process this error; schedule it. */ wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC); if (!wrk) { set_host_byte(scmnd, DID_BAD_TARGET); return; } wrk->host = host; wrk->lun = vm_srb->lun; wrk->tgt_id = vm_srb->target_id; INIT_WORK(&wrk->work, process_err_fn); queue_work(host_dev->handle_error_wq, &wrk->work); } static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request, struct storvsc_device *stor_dev) { struct scsi_cmnd *scmnd = cmd_request->cmd; struct scsi_sense_hdr sense_hdr; struct vmscsi_request *vm_srb; u32 data_transfer_length; struct Scsi_Host *host; u32 payload_sz = cmd_request->payload_sz; void *payload = cmd_request->payload; bool sense_ok; host = stor_dev->host; vm_srb = &cmd_request->vstor_packet.vm_srb; data_transfer_length = vm_srb->data_transfer_length; scmnd->result = vm_srb->scsi_status; if (scmnd->result) { sense_ok = scsi_normalize_sense(scmnd->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr); if (sense_ok && do_logging(STORVSC_LOGGING_WARN)) scsi_print_sense_hdr(scmnd->device, "storvsc", &sense_hdr); } if (vm_srb->srb_status != SRB_STATUS_SUCCESS) { storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc, sense_hdr.ascq); /* * The Windows driver set data_transfer_length on * SRB_STATUS_DATA_OVERRUN. On other errors, this value * is untouched. In these cases we set it to 0. */ if (vm_srb->srb_status != SRB_STATUS_DATA_OVERRUN) data_transfer_length = 0; } /* Validate data_transfer_length (from Hyper-V) */ if (data_transfer_length > cmd_request->payload->range.len) data_transfer_length = cmd_request->payload->range.len; scsi_set_resid(scmnd, cmd_request->payload->range.len - data_transfer_length); scsi_done(scmnd); if (payload_sz > sizeof(struct vmbus_channel_packet_multipage_buffer)) kfree(payload); } static void storvsc_on_io_completion(struct storvsc_device *stor_device, struct vstor_packet *vstor_packet, struct storvsc_cmd_request *request) { struct vstor_packet *stor_pkt; struct hv_device *device = stor_device->device; stor_pkt = &request->vstor_packet; /* * The current SCSI handling on the host side does * not correctly handle: * INQUIRY command with page code parameter set to 0x80 * MODE_SENSE command with cmd[2] == 0x1c * * Setup srb and scsi status so this won't be fatal. * We do this so we can distinguish truly fatal failues * (srb status == 0x4) and off-line the device in that case. */ if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) || (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) { vstor_packet->vm_srb.scsi_status = 0; vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS; } /* Copy over the status...etc */ stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status; stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status; /* * Copy over the sense_info_length, but limit to the known max * size if Hyper-V returns a bad value. */ stor_pkt->vm_srb.sense_info_length = min_t(u8, STORVSC_SENSE_BUFFER_SIZE, vstor_packet->vm_srb.sense_info_length); if (vstor_packet->vm_srb.scsi_status != 0 || vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS) { /* * Log TEST_UNIT_READY errors only as warnings. Hyper-V can * return errors when detecting devices using TEST_UNIT_READY, * and logging these as errors produces unhelpful noise. */ int loglevel = (stor_pkt->vm_srb.cdb[0] == TEST_UNIT_READY) ? STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR; storvsc_log(device, loglevel, "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n", scsi_cmd_to_rq(request->cmd)->tag, stor_pkt->vm_srb.cdb[0], vstor_packet->vm_srb.scsi_status, vstor_packet->vm_srb.srb_status, vstor_packet->status); } if (vstor_packet->vm_srb.scsi_status == SAM_STAT_CHECK_CONDITION && (vstor_packet->vm_srb.srb_status & SRB_STATUS_AUTOSENSE_VALID)) memcpy(request->cmd->sense_buffer, vstor_packet->vm_srb.sense_data, stor_pkt->vm_srb.sense_info_length); stor_pkt->vm_srb.data_transfer_length = vstor_packet->vm_srb.data_transfer_length; storvsc_command_completion(request, stor_device); if (atomic_dec_and_test(&stor_device->num_outstanding_req) && stor_device->drain_notify) wake_up(&stor_device->waiting_to_drain); } static void storvsc_on_receive(struct storvsc_device *stor_device, struct vstor_packet *vstor_packet, struct storvsc_cmd_request *request) { struct hv_host_device *host_dev; switch (vstor_packet->operation) { case VSTOR_OPERATION_COMPLETE_IO: storvsc_on_io_completion(stor_device, vstor_packet, request); break; case VSTOR_OPERATION_REMOVE_DEVICE: case VSTOR_OPERATION_ENUMERATE_BUS: host_dev = shost_priv(stor_device->host); queue_work( host_dev->handle_error_wq, &host_dev->host_scan_work); break; case VSTOR_OPERATION_FCHBA_DATA: cache_wwn(stor_device, vstor_packet); #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) fc_host_node_name(stor_device->host) = stor_device->node_name; fc_host_port_name(stor_device->host) = stor_device->port_name; #endif break; default: break; } } static void storvsc_on_channel_callback(void *context) { struct vmbus_channel *channel = (struct vmbus_channel *)context; const struct vmpacket_descriptor *desc; struct hv_device *device; struct storvsc_device *stor_device; struct Scsi_Host *shost; unsigned long time_limit = jiffies + msecs_to_jiffies(CALLBACK_TIMEOUT); if (channel->primary_channel != NULL) device = channel->primary_channel->device_obj; else device = channel->device_obj; stor_device = get_in_stor_device(device); if (!stor_device) return; shost = stor_device->host; foreach_vmbus_pkt(desc, channel) { struct vstor_packet *packet = hv_pkt_data(desc); struct storvsc_cmd_request *request = NULL; u32 pktlen = hv_pkt_datalen(desc); u64 rqst_id = desc->trans_id; u32 minlen = rqst_id ? sizeof(struct vstor_packet) : sizeof(enum vstor_packet_operation); if (unlikely(time_after(jiffies, time_limit))) { hv_pkt_iter_close(channel); return; } if (pktlen < minlen) { dev_err(&device->device, "Invalid pkt: id=%llu, len=%u, minlen=%u\n", rqst_id, pktlen, minlen); continue; } if (rqst_id == VMBUS_RQST_INIT) { request = &stor_device->init_request; } else if (rqst_id == VMBUS_RQST_RESET) { request = &stor_device->reset_request; } else { /* Hyper-V can send an unsolicited message with ID of 0 */ if (rqst_id == 0) { /* * storvsc_on_receive() looks at the vstor_packet in the message * from the ring buffer. * * - If the operation in the vstor_packet is COMPLETE_IO, then * we call storvsc_on_io_completion(), and dereference the * guest memory address. Make sure we don't call * storvsc_on_io_completion() with a guest memory address * that is zero if Hyper-V were to construct and send such * a bogus packet. * * - If the operation in the vstor_packet is FCHBA_DATA, then * we call cache_wwn(), and access the data payload area of * the packet (wwn_packet); however, there is no guarantee * that the packet is big enough to contain such area. * Future-proof the code by rejecting such a bogus packet. */ if (packet->operation == VSTOR_OPERATION_COMPLETE_IO || packet->operation == VSTOR_OPERATION_FCHBA_DATA) { dev_err(&device->device, "Invalid packet with ID of 0\n"); continue; } } else { struct scsi_cmnd *scmnd; /* Transaction 'rqst_id' corresponds to tag 'rqst_id - 1' */ scmnd = scsi_host_find_tag(shost, rqst_id - 1); if (scmnd == NULL) { dev_err(&device->device, "Incorrect transaction ID\n"); continue; } request = (struct storvsc_cmd_request *)scsi_cmd_priv(scmnd); scsi_dma_unmap(scmnd); } storvsc_on_receive(stor_device, packet, request); continue; } memcpy(&request->vstor_packet, packet, sizeof(struct vstor_packet)); complete(&request->wait_event); } } static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size, bool is_fc) { struct vmstorage_channel_properties props; int ret; memset(&props, 0, sizeof(struct vmstorage_channel_properties)); device->channel->max_pkt_size = STORVSC_MAX_PKT_SIZE; device->channel->next_request_id_callback = storvsc_next_request_id; ret = vmbus_open(device->channel, ring_size, ring_size, (void *)&props, sizeof(struct vmstorage_channel_properties), storvsc_on_channel_callback, device->channel); if (ret != 0) return ret; ret = storvsc_channel_init(device, is_fc); return ret; } static int storvsc_dev_remove(struct hv_device *device) { struct storvsc_device *stor_device; stor_device = hv_get_drvdata(device); stor_device->destroy = true; /* Make sure flag is set before waiting */ wmb(); /* * At this point, all outbound traffic should be disable. We * only allow inbound traffic (responses) to proceed so that * outstanding requests can be completed. */ storvsc_wait_to_drain(stor_device); /* * Since we have already drained, we don't need to busy wait * as was done in final_release_stor_device() * Note that we cannot set the ext pointer to NULL until * we have drained - to drain the outgoing packets, we need to * allow incoming packets. */ hv_set_drvdata(device, NULL); /* Close the channel */ vmbus_close(device->channel); kfree(stor_device->stor_chns); kfree(stor_device); return 0; } static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device, u16 q_num) { u16 slot = 0; u16 hash_qnum; const struct cpumask *node_mask; int num_channels, tgt_cpu; if (stor_device->num_sc == 0) { stor_device->stor_chns[q_num] = stor_device->device->channel; return stor_device->device->channel; } /* * Our channel array is sparsley populated and we * initiated I/O on a processor/hw-q that does not * currently have a designated channel. Fix this. * The strategy is simple: * I. Ensure NUMA locality * II. Distribute evenly (best effort) */ node_mask = cpumask_of_node(cpu_to_node(q_num)); num_channels = 0; for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) { if (cpumask_test_cpu(tgt_cpu, node_mask)) num_channels++; } if (num_channels == 0) { stor_device->stor_chns[q_num] = stor_device->device->channel; return stor_device->device->channel; } hash_qnum = q_num; while (hash_qnum >= num_channels) hash_qnum -= num_channels; for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) { if (!cpumask_test_cpu(tgt_cpu, node_mask)) continue; if (slot == hash_qnum) break; slot++; } stor_device->stor_chns[q_num] = stor_device->stor_chns[tgt_cpu]; return stor_device->stor_chns[q_num]; } static int storvsc_do_io(struct hv_device *device, struct storvsc_cmd_request *request, u16 q_num) { struct storvsc_device *stor_device; struct vstor_packet *vstor_packet; struct vmbus_channel *outgoing_channel, *channel; unsigned long flags; int ret = 0; const struct cpumask *node_mask; int tgt_cpu; vstor_packet = &request->vstor_packet; stor_device = get_out_stor_device(device); if (!stor_device) return -ENODEV; request->device = device; /* * Select an appropriate channel to send the request out. */ /* See storvsc_change_target_cpu(). */ outgoing_channel = READ_ONCE(stor_device->stor_chns[q_num]); if (outgoing_channel != NULL) { if (outgoing_channel->target_cpu == q_num) { /* * Ideally, we want to pick a different channel if * available on the same NUMA node. */ node_mask = cpumask_of_node(cpu_to_node(q_num)); for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus, q_num + 1) { if (!cpumask_test_cpu(tgt_cpu, node_mask)) continue; if (tgt_cpu == q_num) continue; channel = READ_ONCE( stor_device->stor_chns[tgt_cpu]); if (channel == NULL) continue; if (hv_get_avail_to_write_percent( &channel->outbound) > ring_avail_percent_lowater) { outgoing_channel = channel; goto found_channel; } } /* * All the other channels on the same NUMA node are * busy. Try to use the channel on the current CPU */ if (hv_get_avail_to_write_percent( &outgoing_channel->outbound) > ring_avail_percent_lowater) goto found_channel; /* * If we reach here, all the channels on the current * NUMA node are busy. Try to find a channel in * other NUMA nodes */ for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) { if (cpumask_test_cpu(tgt_cpu, node_mask)) continue; channel = READ_ONCE( stor_device->stor_chns[tgt_cpu]); if (channel == NULL) continue; if (hv_get_avail_to_write_percent( &channel->outbound) > ring_avail_percent_lowater) { outgoing_channel = channel; goto found_channel; } } } } else { spin_lock_irqsave(&stor_device->lock, flags); outgoing_channel = stor_device->stor_chns[q_num]; if (outgoing_channel != NULL) { spin_unlock_irqrestore(&stor_device->lock, flags); goto found_channel; } outgoing_channel = get_og_chn(stor_device, q_num); spin_unlock_irqrestore(&stor_device->lock, flags); } found_channel: vstor_packet->flags |= REQUEST_COMPLETION_FLAG; vstor_packet->vm_srb.length = sizeof(struct vmscsi_request); vstor_packet->vm_srb.sense_info_length = STORVSC_SENSE_BUFFER_SIZE; vstor_packet->vm_srb.data_transfer_length = request->payload->range.len; vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB; if (request->payload->range.len) { ret = vmbus_sendpacket_mpb_desc(outgoing_channel, request->payload, request->payload_sz, vstor_packet, sizeof(struct vstor_packet), (unsigned long)request); } else { ret = vmbus_sendpacket(outgoing_channel, vstor_packet, sizeof(struct vstor_packet), (unsigned long)request, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); } if (ret != 0) return ret; atomic_inc(&stor_device->num_outstanding_req); return ret; } static int storvsc_device_alloc(struct scsi_device *sdevice) { /* * Set blist flag to permit the reading of the VPD pages even when * the target may claim SPC-2 compliance. MSFT targets currently * claim SPC-2 compliance while they implement post SPC-2 features. * With this flag we can correctly handle WRITE_SAME_16 issues. * * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but * still supports REPORT LUN. */ sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES; return 0; } static int storvsc_device_configure(struct scsi_device *sdevice) { blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ)); /* storvsc devices don't support MAINTENANCE_IN SCSI cmd */ sdevice->no_report_opcodes = 1; sdevice->no_write_same = 1; /* * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3 * if the device is a MSFT virtual device. If the host is * WIN10 or newer, allow write_same. */ if (!strncmp(sdevice->vendor, "Msft", 4)) { switch (vmstor_proto_version) { case VMSTOR_PROTO_VERSION_WIN8: case VMSTOR_PROTO_VERSION_WIN8_1: sdevice->scsi_level = SCSI_SPC_3; break; } if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN10) sdevice->no_write_same = 0; } return 0; } static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev, sector_t capacity, int *info) { sector_t nsect = capacity; sector_t cylinders = nsect; int heads, sectors_pt; /* * We are making up these values; let us keep it simple. */ heads = 0xff; sectors_pt = 0x3f; /* Sectors per track */ sector_div(cylinders, heads * sectors_pt); if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect) cylinders = 0xffff; info[0] = heads; info[1] = sectors_pt; info[2] = (int)cylinders; return 0; } static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) { struct hv_host_device *host_dev = shost_priv(scmnd->device->host); struct hv_device *device = host_dev->dev; struct storvsc_device *stor_device; struct storvsc_cmd_request *request; struct vstor_packet *vstor_packet; int ret, t; stor_device = get_out_stor_device(device); if (!stor_device) return FAILED; request = &stor_device->reset_request; vstor_packet = &request->vstor_packet; memset(vstor_packet, 0, sizeof(struct vstor_packet)); init_completion(&request->wait_event); vstor_packet->operation = VSTOR_OPERATION_RESET_BUS; vstor_packet->flags = REQUEST_COMPLETION_FLAG; vstor_packet->vm_srb.path_id = stor_device->path_id; ret = vmbus_sendpacket(device->channel, vstor_packet, sizeof(struct vstor_packet), VMBUS_RQST_RESET, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) return FAILED; t = wait_for_completion_timeout(&request->wait_event, 5*HZ); if (t == 0) return TIMEOUT_ERROR; /* * At this point, all outstanding requests in the adapter * should have been flushed out and return to us * There is a potential race here where the host may be in * the process of responding when we return from here. * Just wait for all in-transit packets to be accounted for * before we return from here. */ storvsc_wait_to_drain(stor_device); return SUCCESS; } /* * The host guarantees to respond to each command, although I/O latencies might * be unbounded on Azure. Reset the timer unconditionally to give the host a * chance to perform EH. */ static enum scsi_timeout_action storvsc_eh_timed_out(struct scsi_cmnd *scmnd) { return SCSI_EH_RESET_TIMER; } static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd) { bool allowed = true; u8 scsi_op = scmnd->cmnd[0]; switch (scsi_op) { /* the host does not handle WRITE_SAME, log accident usage */ case WRITE_SAME: /* * smartd sends this command and the host does not handle * this. So, don't send it. */ case SET_WINDOW: set_host_byte(scmnd, DID_ERROR); allowed = false; break; default: break; } return allowed; } static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) { int ret; struct hv_host_device *host_dev = shost_priv(host); struct hv_device *dev = host_dev->dev; struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd); struct scatterlist *sgl; struct vmscsi_request *vm_srb; struct vmbus_packet_mpb_array *payload; u32 payload_sz; u32 length; if (vmstor_proto_version <= VMSTOR_PROTO_VERSION_WIN8) { /* * On legacy hosts filter unimplemented commands. * Future hosts are expected to correctly handle * unsupported commands. Furthermore, it is * possible that some of the currently * unsupported commands maybe supported in * future versions of the host. */ if (!storvsc_scsi_cmd_ok(scmnd)) { scsi_done(scmnd); return 0; } } /* Setup the cmd request */ cmd_request->cmd = scmnd; memset(&cmd_request->vstor_packet, 0, sizeof(struct vstor_packet)); vm_srb = &cmd_request->vstor_packet.vm_srb; vm_srb->time_out_value = 60; vm_srb->srb_flags |= SRB_FLAGS_DISABLE_SYNCH_TRANSFER; if (scmnd->device->tagged_supported) { vm_srb->srb_flags |= (SRB_FLAGS_QUEUE_ACTION_ENABLE | SRB_FLAGS_NO_QUEUE_FREEZE); vm_srb->queue_tag = SP_UNTAGGED; vm_srb->queue_action = SRB_SIMPLE_TAG_REQUEST; } /* Build the SRB */ switch (scmnd->sc_data_direction) { case DMA_TO_DEVICE: vm_srb->data_in = WRITE_TYPE; vm_srb->srb_flags |= SRB_FLAGS_DATA_OUT; break; case DMA_FROM_DEVICE: vm_srb->data_in = READ_TYPE; vm_srb->srb_flags |= SRB_FLAGS_DATA_IN; break; case DMA_NONE: vm_srb->data_in = UNKNOWN_TYPE; vm_srb->srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER; break; default: /* * This is DMA_BIDIRECTIONAL or something else we are never * supposed to see here. */ WARN(1, "Unexpected data direction: %d\n", scmnd->sc_data_direction); return -EINVAL; } vm_srb->port_number = host_dev->port; vm_srb->path_id = scmnd->device->channel; vm_srb->target_id = scmnd->device->id; vm_srb->lun = scmnd->device->lun; vm_srb->cdb_length = scmnd->cmd_len; memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length); sgl = (struct scatterlist *)scsi_sglist(scmnd); length = scsi_bufflen(scmnd); payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb; payload_sz = 0; if (scsi_sg_count(scmnd)) { unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset); unsigned int hvpg_count = HVPFN_UP(offset_in_hvpg + length); struct scatterlist *sg; unsigned long hvpfn, hvpfns_to_add; int j, i = 0, sg_count; payload_sz = (hvpg_count * sizeof(u64) + sizeof(struct vmbus_packet_mpb_array)); if (hvpg_count > MAX_PAGE_BUFFER_COUNT) { payload = kzalloc(payload_sz, GFP_ATOMIC); if (!payload) return SCSI_MLQUEUE_DEVICE_BUSY; } payload->range.len = length; payload->range.offset = offset_in_hvpg; sg_count = scsi_dma_map(scmnd); if (sg_count < 0) { ret = SCSI_MLQUEUE_DEVICE_BUSY; goto err_free_payload; } for_each_sg(sgl, sg, sg_count, j) { /* * Init values for the current sgl entry. hvpfns_to_add * is in units of Hyper-V size pages. Handling the * PAGE_SIZE != HV_HYP_PAGE_SIZE case also handles * values of sgl->offset that are larger than PAGE_SIZE. * Such offsets are handled even on other than the first * sgl entry, provided they are a multiple of PAGE_SIZE. */ hvpfn = HVPFN_DOWN(sg_dma_address(sg)); hvpfns_to_add = HVPFN_UP(sg_dma_address(sg) + sg_dma_len(sg)) - hvpfn; /* * Fill the next portion of the PFN array with * sequential Hyper-V PFNs for the continguous physical * memory described by the sgl entry. The end of the * last sgl should be reached at the same time that * the PFN array is filled. */ while (hvpfns_to_add--) payload->range.pfn_array[i++] = hvpfn++; } } cmd_request->payload = payload; cmd_request->payload_sz = payload_sz; /* Invokes the vsc to start an IO */ ret = storvsc_do_io(dev, cmd_request, get_cpu()); put_cpu(); if (ret) scsi_dma_unmap(scmnd); if (ret == -EAGAIN) { /* no more space */ ret = SCSI_MLQUEUE_DEVICE_BUSY; goto err_free_payload; } return 0; err_free_payload: if (payload_sz > sizeof(cmd_request->mpb)) kfree(payload); return ret; } static struct scsi_host_template scsi_driver = { .module = THIS_MODULE, .name = "storvsc_host_t", .cmd_size = sizeof(struct storvsc_cmd_request), .bios_param = storvsc_get_chs, .queuecommand = storvsc_queuecommand, .eh_host_reset_handler = storvsc_host_reset_handler, .proc_name = "storvsc_host", .eh_timed_out = storvsc_eh_timed_out, .slave_alloc = storvsc_device_alloc, .slave_configure = storvsc_device_configure, .cmd_per_lun = 2048, .this_id = -1, /* Ensure there are no gaps in presented sgls */ .virt_boundary_mask = HV_HYP_PAGE_SIZE - 1, .no_write_same = 1, .track_queue_depth = 1, .change_queue_depth = storvsc_change_queue_depth, }; enum { SCSI_GUID, IDE_GUID, SFC_GUID, }; static const struct hv_vmbus_device_id id_table[] = { /* SCSI guid */ { HV_SCSI_GUID, .driver_data = SCSI_GUID }, /* IDE guid */ { HV_IDE_GUID, .driver_data = IDE_GUID }, /* Fibre Channel GUID */ { HV_SYNTHFC_GUID, .driver_data = SFC_GUID }, { }, }; MODULE_DEVICE_TABLE(vmbus, id_table); static const struct { guid_t guid; } fc_guid = { HV_SYNTHFC_GUID }; static bool hv_dev_is_fc(struct hv_device *hv_dev) { return guid_equal(&fc_guid.guid, &hv_dev->dev_type); } static int storvsc_probe(struct hv_device *device, const struct hv_vmbus_device_id *dev_id) { int ret; int num_cpus = num_online_cpus(); int num_present_cpus = num_present_cpus(); struct Scsi_Host *host; struct hv_host_device *host_dev; bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false); bool is_fc = ((dev_id->driver_data == SFC_GUID) ? true : false); int target = 0; struct storvsc_device *stor_device; int max_sub_channels = 0; u32 max_xfer_bytes; /* * We support sub-channels for storage on SCSI and FC controllers. * The number of sub-channels offerred is based on the number of * VCPUs in the guest. */ if (!dev_is_ide) max_sub_channels = (num_cpus - 1) / storvsc_vcpus_per_sub_channel; scsi_driver.can_queue = max_outstanding_req_per_channel * (max_sub_channels + 1) * (100 - ring_avail_percent_lowater) / 100; host = scsi_host_alloc(&scsi_driver, sizeof(struct hv_host_device)); if (!host) return -ENOMEM; host_dev = shost_priv(host); memset(host_dev, 0, sizeof(struct hv_host_device)); host_dev->port = host->host_no; host_dev->dev = device; host_dev->host = host; stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL); if (!stor_device) { ret = -ENOMEM; goto err_out0; } stor_device->destroy = false; init_waitqueue_head(&stor_device->waiting_to_drain); stor_device->device = device; stor_device->host = host; spin_lock_init(&stor_device->lock); hv_set_drvdata(device, stor_device); dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1); stor_device->port_number = host->host_no; ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc); if (ret) goto err_out1; host_dev->path = stor_device->path_id; host_dev->target = stor_device->target_id; switch (dev_id->driver_data) { case SFC_GUID: host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET; host->max_id = STORVSC_FC_MAX_TARGETS; host->max_channel = STORVSC_FC_MAX_CHANNELS - 1; #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) host->transportt = fc_transport_template; #endif break; case SCSI_GUID: host->max_lun = STORVSC_MAX_LUNS_PER_TARGET; host->max_id = STORVSC_MAX_TARGETS; host->max_channel = STORVSC_MAX_CHANNELS - 1; break; default: host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET; host->max_id = STORVSC_IDE_MAX_TARGETS; host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1; break; } /* max cmd length */ host->max_cmd_len = STORVSC_MAX_CMD_LEN; /* * Any reasonable Hyper-V configuration should provide * max_transfer_bytes value aligning to HV_HYP_PAGE_SIZE, * protecting it from any weird value. */ max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE); if (is_fc) max_xfer_bytes = min(max_xfer_bytes, STORVSC_FC_MAX_XFER_SIZE); /* max_hw_sectors_kb */ host->max_sectors = max_xfer_bytes >> 9; /* * There are 2 requirements for Hyper-V storvsc sgl segments, * based on which the below calculation for max segments is * done: * * 1. Except for the first and last sgl segment, all sgl segments * should be align to HV_HYP_PAGE_SIZE, that also means the * maximum number of segments in a sgl can be calculated by * dividing the total max transfer length by HV_HYP_PAGE_SIZE. * * 2. Except for the first and last, each entry in the SGL must * have an offset that is a multiple of HV_HYP_PAGE_SIZE. */ host->sg_tablesize = (max_xfer_bytes >> HV_HYP_PAGE_SHIFT) + 1; /* * For non-IDE disks, the host supports multiple channels. * Set the number of HW queues we are supporting. */ if (!dev_is_ide) { if (storvsc_max_hw_queues > num_present_cpus) { storvsc_max_hw_queues = 0; storvsc_log(device, STORVSC_LOGGING_WARN, "Resetting invalid storvsc_max_hw_queues value to default.\n"); } if (storvsc_max_hw_queues) host->nr_hw_queues = storvsc_max_hw_queues; else host->nr_hw_queues = num_present_cpus; } /* * Set the error handler work queue. */ host_dev->handle_error_wq = alloc_ordered_workqueue("storvsc_error_wq_%d", 0, host->host_no); if (!host_dev->handle_error_wq) { ret = -ENOMEM; goto err_out2; } INIT_WORK(&host_dev->host_scan_work, storvsc_host_scan); /* Register the HBA and start the scsi bus scan */ ret = scsi_add_host(host, &device->device); if (ret != 0) goto err_out3; if (!dev_is_ide) { scsi_scan_host(host); } else { target = (device->dev_instance.b[5] << 8 | device->dev_instance.b[4]); ret = scsi_add_device(host, 0, target, 0); if (ret) goto err_out4; } #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) if (host->transportt == fc_transport_template) { struct fc_rport_identifiers ids = { .roles = FC_PORT_ROLE_FCP_DUMMY_INITIATOR, }; fc_host_node_name(host) = stor_device->node_name; fc_host_port_name(host) = stor_device->port_name; stor_device->rport = fc_remote_port_add(host, 0, &ids); if (!stor_device->rport) { ret = -ENOMEM; goto err_out4; } } #endif return 0; err_out4: scsi_remove_host(host); err_out3: destroy_workqueue(host_dev->handle_error_wq); err_out2: /* * Once we have connected with the host, we would need to * invoke storvsc_dev_remove() to rollback this state and * this call also frees up the stor_device; hence the jump around * err_out1 label. */ storvsc_dev_remove(device); goto err_out0; err_out1: kfree(stor_device->stor_chns); kfree(stor_device); err_out0: scsi_host_put(host); return ret; } /* Change a scsi target's queue depth */ static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth) { if (queue_depth > scsi_driver.can_queue) queue_depth = scsi_driver.can_queue; return scsi_change_queue_depth(sdev, queue_depth); } static void storvsc_remove(struct hv_device *dev) { struct storvsc_device *stor_device = hv_get_drvdata(dev); struct Scsi_Host *host = stor_device->host; struct hv_host_device *host_dev = shost_priv(host); #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) if (host->transportt == fc_transport_template) { fc_remote_port_delete(stor_device->rport); fc_remove_host(host); } #endif destroy_workqueue(host_dev->handle_error_wq); scsi_remove_host(host); storvsc_dev_remove(dev); scsi_host_put(host); } static int storvsc_suspend(struct hv_device *hv_dev) { struct storvsc_device *stor_device = hv_get_drvdata(hv_dev); struct Scsi_Host *host = stor_device->host; struct hv_host_device *host_dev = shost_priv(host); storvsc_wait_to_drain(stor_device); drain_workqueue(host_dev->handle_error_wq); vmbus_close(hv_dev->channel); kfree(stor_device->stor_chns); stor_device->stor_chns = NULL; cpumask_clear(&stor_device->alloced_cpus); return 0; } static int storvsc_resume(struct hv_device *hv_dev) { int ret; ret = storvsc_connect_to_vsp(hv_dev, storvsc_ringbuffer_size, hv_dev_is_fc(hv_dev)); return ret; } static struct hv_driver storvsc_drv = { .name = KBUILD_MODNAME, .id_table = id_table, .probe = storvsc_probe, .remove = storvsc_remove, .suspend = storvsc_suspend, .resume = storvsc_resume, .driver = { .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, }; #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) static struct fc_function_template fc_transport_functions = { .show_host_node_name = 1, .show_host_port_name = 1, }; #endif static int __init storvsc_drv_init(void) { int ret; /* * Divide the ring buffer data size (which is 1 page less * than the ring buffer size since that page is reserved for * the ring buffer indices) by the max request size (which is * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64) */ max_outstanding_req_per_channel = ((storvsc_ringbuffer_size - PAGE_SIZE) / ALIGN(MAX_MULTIPAGE_BUFFER_PACKET + sizeof(struct vstor_packet) + sizeof(u64), sizeof(u64))); #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) fc_transport_template = fc_attach_transport(&fc_transport_functions); if (!fc_transport_template) return -ENODEV; #endif ret = vmbus_driver_register(&storvsc_drv); #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) if (ret) fc_release_transport(fc_transport_template); #endif return ret; } static void __exit storvsc_drv_exit(void) { vmbus_driver_unregister(&storvsc_drv); #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) fc_release_transport(fc_transport_template); #endif } MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver"); module_init(storvsc_drv_init); module_exit(storvsc_drv_exit);
linux-master
drivers/scsi/storvsc_drv.c