python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0+ /* * PCI HotPlug Controller Core * * Copyright (C) 2001-2002 Greg Kroah-Hartman ([email protected]) * Copyright (C) 2001-2002 IBM Corp. * * All rights reserved. * * Send feedback to <[email protected]> * * Authors: * Greg Kroah-Hartman <[email protected]> * Scott Murray <[email protected]> */ #include <linux/module.h> /* try_module_get & module_put */ #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/list.h> #include <linux/kobject.h> #include <linux/sysfs.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/mutex.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/uaccess.h> #include "../pci.h" #include "cpci_hotplug.h" #define MY_NAME "pci_hotplug" #define dbg(fmt, arg...) do { if (debug) printk(KERN_DEBUG "%s: %s: " fmt, MY_NAME, __func__, ## arg); } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME, ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME, ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME, ## arg) /* local variables */ static bool debug; static LIST_HEAD(pci_hotplug_slot_list); static DEFINE_MUTEX(pci_hp_mutex); /* Weee, fun with macros... */ #define GET_STATUS(name, type) \ static int get_##name(struct hotplug_slot *slot, type *value) \ { \ const struct hotplug_slot_ops *ops = slot->ops; \ int retval = 0; \ if (!try_module_get(slot->owner)) \ return -ENODEV; \ if (ops->get_##name) \ retval = ops->get_##name(slot, value); \ module_put(slot->owner); \ return retval; \ } GET_STATUS(power_status, u8) GET_STATUS(attention_status, u8) GET_STATUS(latch_status, u8) GET_STATUS(adapter_status, u8) static ssize_t power_read_file(struct pci_slot *pci_slot, char *buf) { int retval; u8 value; retval = get_power_status(pci_slot->hotplug, &value); if (retval) return retval; return sysfs_emit(buf, "%d\n", value); } static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf, size_t count) { struct hotplug_slot *slot = pci_slot->hotplug; unsigned long lpower; u8 power; int retval = 0; lpower = simple_strtoul(buf, NULL, 10); power = (u8)(lpower & 0xff); dbg("power = %d\n", power); if (!try_module_get(slot->owner)) { retval = -ENODEV; goto exit; } switch (power) { case 0: if (slot->ops->disable_slot) retval = slot->ops->disable_slot(slot); break; case 1: if (slot->ops->enable_slot) retval = slot->ops->enable_slot(slot); break; default: err("Illegal value specified for power\n"); retval = -EINVAL; } module_put(slot->owner); exit: if (retval) return retval; return count; } static struct pci_slot_attribute hotplug_slot_attr_power = { .attr = {.name = "power", .mode = S_IFREG | S_IRUGO | S_IWUSR}, .show = power_read_file, .store = power_write_file }; static ssize_t attention_read_file(struct pci_slot *pci_slot, char *buf) { int retval; u8 value; retval = get_attention_status(pci_slot->hotplug, &value); if (retval) return retval; return sysfs_emit(buf, "%d\n", value); } static ssize_t attention_write_file(struct pci_slot *pci_slot, const char *buf, size_t count) { struct hotplug_slot *slot = pci_slot->hotplug; const struct hotplug_slot_ops *ops = slot->ops; unsigned long lattention; u8 attention; int retval = 0; lattention = simple_strtoul(buf, NULL, 10); attention = (u8)(lattention & 0xff); dbg(" - attention = %d\n", attention); if (!try_module_get(slot->owner)) { retval = -ENODEV; goto exit; } if (ops->set_attention_status) retval = ops->set_attention_status(slot, attention); module_put(slot->owner); exit: if (retval) return retval; return count; } static struct pci_slot_attribute hotplug_slot_attr_attention = { .attr = {.name = "attention", .mode = S_IFREG | S_IRUGO | S_IWUSR}, .show = attention_read_file, .store = attention_write_file }; static ssize_t latch_read_file(struct pci_slot *pci_slot, char *buf) { int retval; u8 value; retval = get_latch_status(pci_slot->hotplug, &value); if (retval) return retval; return sysfs_emit(buf, "%d\n", value); } static struct pci_slot_attribute hotplug_slot_attr_latch = { .attr = {.name = "latch", .mode = S_IFREG | S_IRUGO}, .show = latch_read_file, }; static ssize_t presence_read_file(struct pci_slot *pci_slot, char *buf) { int retval; u8 value; retval = get_adapter_status(pci_slot->hotplug, &value); if (retval) return retval; return sysfs_emit(buf, "%d\n", value); } static struct pci_slot_attribute hotplug_slot_attr_presence = { .attr = {.name = "adapter", .mode = S_IFREG | S_IRUGO}, .show = presence_read_file, }; static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf, size_t count) { struct hotplug_slot *slot = pci_slot->hotplug; unsigned long ltest; u32 test; int retval = 0; ltest = simple_strtoul(buf, NULL, 10); test = (u32)(ltest & 0xffffffff); dbg("test = %d\n", test); if (!try_module_get(slot->owner)) { retval = -ENODEV; goto exit; } if (slot->ops->hardware_test) retval = slot->ops->hardware_test(slot, test); module_put(slot->owner); exit: if (retval) return retval; return count; } static struct pci_slot_attribute hotplug_slot_attr_test = { .attr = {.name = "test", .mode = S_IFREG | S_IRUGO | S_IWUSR}, .store = test_write_file }; static bool has_power_file(struct pci_slot *pci_slot) { struct hotplug_slot *slot = pci_slot->hotplug; if ((!slot) || (!slot->ops)) return false; if ((slot->ops->enable_slot) || (slot->ops->disable_slot) || (slot->ops->get_power_status)) return true; return false; } static bool has_attention_file(struct pci_slot *pci_slot) { struct hotplug_slot *slot = pci_slot->hotplug; if ((!slot) || (!slot->ops)) return false; if ((slot->ops->set_attention_status) || (slot->ops->get_attention_status)) return true; return false; } static bool has_latch_file(struct pci_slot *pci_slot) { struct hotplug_slot *slot = pci_slot->hotplug; if ((!slot) || (!slot->ops)) return false; if (slot->ops->get_latch_status) return true; return false; } static bool has_adapter_file(struct pci_slot *pci_slot) { struct hotplug_slot *slot = pci_slot->hotplug; if ((!slot) || (!slot->ops)) return false; if (slot->ops->get_adapter_status) return true; return false; } static bool has_test_file(struct pci_slot *pci_slot) { struct hotplug_slot *slot = pci_slot->hotplug; if ((!slot) || (!slot->ops)) return false; if (slot->ops->hardware_test) return true; return false; } static int fs_add_slot(struct pci_slot *pci_slot) { int retval = 0; /* Create symbolic link to the hotplug driver module */ pci_hp_create_module_link(pci_slot); if (has_power_file(pci_slot)) { retval = sysfs_create_file(&pci_slot->kobj, &hotplug_slot_attr_power.attr); if (retval) goto exit_power; } if (has_attention_file(pci_slot)) { retval = sysfs_create_file(&pci_slot->kobj, &hotplug_slot_attr_attention.attr); if (retval) goto exit_attention; } if (has_latch_file(pci_slot)) { retval = sysfs_create_file(&pci_slot->kobj, &hotplug_slot_attr_latch.attr); if (retval) goto exit_latch; } if (has_adapter_file(pci_slot)) { retval = sysfs_create_file(&pci_slot->kobj, &hotplug_slot_attr_presence.attr); if (retval) goto exit_adapter; } if (has_test_file(pci_slot)) { retval = sysfs_create_file(&pci_slot->kobj, &hotplug_slot_attr_test.attr); if (retval) goto exit_test; } goto exit; exit_test: if (has_adapter_file(pci_slot)) sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_presence.attr); exit_adapter: if (has_latch_file(pci_slot)) sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_latch.attr); exit_latch: if (has_attention_file(pci_slot)) sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_attention.attr); exit_attention: if (has_power_file(pci_slot)) sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_power.attr); exit_power: pci_hp_remove_module_link(pci_slot); exit: return retval; } static void fs_remove_slot(struct pci_slot *pci_slot) { if (has_power_file(pci_slot)) sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_power.attr); if (has_attention_file(pci_slot)) sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_attention.attr); if (has_latch_file(pci_slot)) sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_latch.attr); if (has_adapter_file(pci_slot)) sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_presence.attr); if (has_test_file(pci_slot)) sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_test.attr); pci_hp_remove_module_link(pci_slot); } static struct hotplug_slot *get_slot_from_name(const char *name) { struct hotplug_slot *slot; list_for_each_entry(slot, &pci_hotplug_slot_list, slot_list) { if (strcmp(hotplug_slot_name(slot), name) == 0) return slot; } return NULL; } /** * __pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem * @bus: bus this slot is on * @slot: pointer to the &struct hotplug_slot to register * @devnr: device number * @name: name registered with kobject core * @owner: caller module owner * @mod_name: caller module name * * Prepares a hotplug slot for in-kernel use and immediately publishes it to * user space in one go. Drivers may alternatively carry out the two steps * separately by invoking pci_hp_initialize() and pci_hp_add(). * * Returns 0 if successful, anything else for an error. */ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int devnr, const char *name, struct module *owner, const char *mod_name) { int result; result = __pci_hp_initialize(slot, bus, devnr, name, owner, mod_name); if (result) return result; result = pci_hp_add(slot); if (result) pci_hp_destroy(slot); return result; } EXPORT_SYMBOL_GPL(__pci_hp_register); /** * __pci_hp_initialize - prepare hotplug slot for in-kernel use * @slot: pointer to the &struct hotplug_slot to initialize * @bus: bus this slot is on * @devnr: slot number * @name: name registered with kobject core * @owner: caller module owner * @mod_name: caller module name * * Allocate and fill in a PCI slot for use by a hotplug driver. Once this has * been called, the driver may invoke hotplug_slot_name() to get the slot's * unique name. The driver must be prepared to handle a ->reset_slot callback * from this point on. * * Returns 0 on success or a negative int on error. */ int __pci_hp_initialize(struct hotplug_slot *slot, struct pci_bus *bus, int devnr, const char *name, struct module *owner, const char *mod_name) { struct pci_slot *pci_slot; if (slot == NULL) return -ENODEV; if (slot->ops == NULL) return -EINVAL; slot->owner = owner; slot->mod_name = mod_name; /* * No problems if we call this interface from both ACPI_PCI_SLOT * driver and call it here again. If we've already created the * pci_slot, the interface will simply bump the refcount. */ pci_slot = pci_create_slot(bus, devnr, name, slot); if (IS_ERR(pci_slot)) return PTR_ERR(pci_slot); slot->pci_slot = pci_slot; pci_slot->hotplug = slot; return 0; } EXPORT_SYMBOL_GPL(__pci_hp_initialize); /** * pci_hp_add - publish hotplug slot to user space * @slot: pointer to the &struct hotplug_slot to publish * * Make a hotplug slot's sysfs interface available and inform user space of its * addition by sending a uevent. The hotplug driver must be prepared to handle * all &struct hotplug_slot_ops callbacks from this point on. * * Returns 0 on success or a negative int on error. */ int pci_hp_add(struct hotplug_slot *slot) { struct pci_slot *pci_slot = slot->pci_slot; int result; result = fs_add_slot(pci_slot); if (result) return result; kobject_uevent(&pci_slot->kobj, KOBJ_ADD); mutex_lock(&pci_hp_mutex); list_add(&slot->slot_list, &pci_hotplug_slot_list); mutex_unlock(&pci_hp_mutex); dbg("Added slot %s to the list\n", hotplug_slot_name(slot)); return 0; } EXPORT_SYMBOL_GPL(pci_hp_add); /** * pci_hp_deregister - deregister a hotplug_slot with the PCI hotplug subsystem * @slot: pointer to the &struct hotplug_slot to deregister * * The @slot must have been registered with the pci hotplug subsystem * previously with a call to pci_hp_register(). * * Returns 0 if successful, anything else for an error. */ void pci_hp_deregister(struct hotplug_slot *slot) { pci_hp_del(slot); pci_hp_destroy(slot); } EXPORT_SYMBOL_GPL(pci_hp_deregister); /** * pci_hp_del - unpublish hotplug slot from user space * @slot: pointer to the &struct hotplug_slot to unpublish * * Remove a hotplug slot's sysfs interface. * * Returns 0 on success or a negative int on error. */ void pci_hp_del(struct hotplug_slot *slot) { struct hotplug_slot *temp; if (WARN_ON(!slot)) return; mutex_lock(&pci_hp_mutex); temp = get_slot_from_name(hotplug_slot_name(slot)); if (WARN_ON(temp != slot)) { mutex_unlock(&pci_hp_mutex); return; } list_del(&slot->slot_list); mutex_unlock(&pci_hp_mutex); dbg("Removed slot %s from the list\n", hotplug_slot_name(slot)); fs_remove_slot(slot->pci_slot); } EXPORT_SYMBOL_GPL(pci_hp_del); /** * pci_hp_destroy - remove hotplug slot from in-kernel use * @slot: pointer to the &struct hotplug_slot to destroy * * Destroy a PCI slot used by a hotplug driver. Once this has been called, * the driver may no longer invoke hotplug_slot_name() to get the slot's * unique name. The driver no longer needs to handle a ->reset_slot callback * from this point on. * * Returns 0 on success or a negative int on error. */ void pci_hp_destroy(struct hotplug_slot *slot) { struct pci_slot *pci_slot = slot->pci_slot; slot->pci_slot = NULL; pci_slot->hotplug = NULL; pci_destroy_slot(pci_slot); } EXPORT_SYMBOL_GPL(pci_hp_destroy); static int __init pci_hotplug_init(void) { int result; result = cpci_hotplug_init(debug); if (result) { err("cpci_hotplug_init with error %d\n", result); return result; } return result; } device_initcall(pci_hotplug_init); /* * not really modular, but the easiest way to keep compat with existing * bootargs behaviour is to continue using module_param here. */ module_param(debug, bool, 0644); MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
linux-master
drivers/pci/hotplug/pci_hotplug_core.c
// SPDX-License-Identifier: GPL-2.0+ /* * ACPI PCI Hot Plug IBM Extension * * Copyright (C) 2004 Vernon Mauery <[email protected]> * Copyright (C) 2004 IBM Corp. * * All rights reserved. * * Send feedback to <[email protected]> * */ #define pr_fmt(fmt) "acpiphp_ibm: " fmt #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/sysfs.h> #include <linux/kobject.h> #include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/uaccess.h> #include "acpiphp.h" #include "../pci.h" #define DRIVER_VERSION "1.0.1" #define DRIVER_AUTHOR "Irene Zubarev <[email protected]>, Vernon Mauery <[email protected]>" #define DRIVER_DESC "ACPI Hot Plug PCI Controller Driver IBM extension" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION(DRIVER_VERSION); #define FOUND_APCI 0x61504349 /* these are the names for the IBM ACPI pseudo-device */ #define IBM_HARDWARE_ID1 "IBM37D0" #define IBM_HARDWARE_ID2 "IBM37D4" #define hpslot_to_sun(A) (to_slot(A)->sun) /* union apci_descriptor - allows access to the * various device descriptors that are embedded in the * aPCI table */ union apci_descriptor { struct { char sig[4]; u8 len; } header; struct { u8 type; u8 len; u16 slot_id; u8 bus_id; u8 dev_num; u8 slot_num; u8 slot_attr[2]; u8 attn; u8 status[2]; u8 sun; u8 res[3]; } slot; struct { u8 type; u8 len; } generic; }; /* struct notification - keeps info about the device * that cause the ACPI notification event */ struct notification { struct acpi_device *device; u8 event; }; static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status); static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status); static void ibm_handle_events(acpi_handle handle, u32 event, void *context); static int ibm_get_table_from_acpi(char **bufp); static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t size); static acpi_status __init ibm_find_acpi_device(acpi_handle handle, u32 lvl, void *context, void **rv); static int __init ibm_acpiphp_init(void); static void __exit ibm_acpiphp_exit(void); static acpi_handle ibm_acpi_handle; static struct notification ibm_note; static struct bin_attribute ibm_apci_table_attr __ro_after_init = { .attr = { .name = "apci_table", .mode = S_IRUGO, }, .read = ibm_read_apci_table, .write = NULL, }; static struct acpiphp_attention_info ibm_attention_info = { .set_attn = ibm_set_attention_status, .get_attn = ibm_get_attention_status, .owner = THIS_MODULE, }; /** * ibm_slot_from_id - workaround for bad ibm hardware * @id: the slot number that linux refers to the slot by * * Description: This method returns the aCPI slot descriptor * corresponding to the Linux slot number. This descriptor * has info about the aPCI slot id and attention status. * This descriptor must be freed using kfree when done. */ static union apci_descriptor *ibm_slot_from_id(int id) { int ind = 0, size; union apci_descriptor *ret = NULL, *des; char *table; size = ibm_get_table_from_acpi(&table); if (size < 0) return NULL; des = (union apci_descriptor *)table; if (memcmp(des->header.sig, "aPCI", 4) != 0) goto ibm_slot_done; des = (union apci_descriptor *)&table[ind += des->header.len]; while (ind < size && (des->generic.type != 0x82 || des->slot.slot_num != id)) { des = (union apci_descriptor *)&table[ind += des->generic.len]; } if (ind < size && des->slot.slot_num == id) ret = des; ibm_slot_done: if (ret) { ret = kmalloc(sizeof(union apci_descriptor), GFP_KERNEL); if (ret) memcpy(ret, des, sizeof(union apci_descriptor)); } kfree(table); return ret; } /** * ibm_set_attention_status - callback method to set the attention LED * @slot: the hotplug_slot to work with * @status: what to set the LED to (0 or 1) * * Description: This method is registered with the acpiphp module as a * callback to do the device specific task of setting the LED status. */ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status) { union acpi_object args[2]; struct acpi_object_list params = { .pointer = args, .count = 2 }; acpi_status stat; unsigned long long rc; union apci_descriptor *ibm_slot; int id = hpslot_to_sun(slot); ibm_slot = ibm_slot_from_id(id); if (!ibm_slot) { pr_err("APLS null ACPI descriptor for slot %d\n", id); return -ENODEV; } pr_debug("%s: set slot %d (%d) attention status to %d\n", __func__, ibm_slot->slot.slot_num, ibm_slot->slot.slot_id, (status ? 1 : 0)); args[0].type = ACPI_TYPE_INTEGER; args[0].integer.value = ibm_slot->slot.slot_id; args[1].type = ACPI_TYPE_INTEGER; args[1].integer.value = (status) ? 1 : 0; kfree(ibm_slot); stat = acpi_evaluate_integer(ibm_acpi_handle, "APLS", &params, &rc); if (ACPI_FAILURE(stat)) { pr_err("APLS evaluation failed: 0x%08x\n", stat); return -ENODEV; } else if (!rc) { pr_err("APLS method failed: 0x%08llx\n", rc); return -ERANGE; } return 0; } /** * ibm_get_attention_status - callback method to get attention LED status * @slot: the hotplug_slot to work with * @status: returns what the LED is set to (0 or 1) * * Description: This method is registered with the acpiphp module as a * callback to do the device specific task of getting the LED status. * * Because there is no direct method of getting the LED status directly * from an ACPI call, we read the aPCI table and parse out our * slot descriptor to read the status from that. */ static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status) { union apci_descriptor *ibm_slot; int id = hpslot_to_sun(slot); ibm_slot = ibm_slot_from_id(id); if (!ibm_slot) { pr_err("APLS null ACPI descriptor for slot %d\n", id); return -ENODEV; } if (ibm_slot->slot.attn & 0xa0 || ibm_slot->slot.status[1] & 0x08) *status = 1; else *status = 0; pr_debug("%s: get slot %d (%d) attention status is %d\n", __func__, ibm_slot->slot.slot_num, ibm_slot->slot.slot_id, *status); kfree(ibm_slot); return 0; } /** * ibm_handle_events - listens for ACPI events for the IBM37D0 device * @handle: an ACPI handle to the device that caused the event * @event: the event info (device specific) * @context: passed context (our notification struct) * * Description: This method is registered as a callback with the ACPI * subsystem it is called when this device has an event to notify the OS of. * * The events actually come from the device as two events that get * synthesized into one event with data by this function. The event * ID comes first and then the slot number that caused it. We report * this as one event to the OS. * * From section 5.6.2.2 of the ACPI 2.0 spec, I understand that the OSPM will * only re-enable the interrupt that causes this event AFTER this method * has returned, thereby enforcing serial access for the notification struct. */ static void ibm_handle_events(acpi_handle handle, u32 event, void *context) { u8 detail = event & 0x0f; u8 subevent = event & 0xf0; struct notification *note = context; pr_debug("%s: Received notification %02x\n", __func__, event); if (subevent == 0x80) { pr_debug("%s: generating bus event\n", __func__); acpi_bus_generate_netlink_event(note->device->pnp.device_class, dev_name(&note->device->dev), note->event, detail); } else note->event = event; } /** * ibm_get_table_from_acpi - reads the APLS buffer from ACPI * @bufp: address to pointer to allocate for the table * * Description: This method reads the APLS buffer in from ACPI and * stores the "stripped" table into a single buffer * it allocates and passes the address back in bufp. * * If NULL is passed in as buffer, this method only calculates * the size of the table and returns that without filling * in the buffer. * * Returns < 0 on error or the size of the table on success. */ static int ibm_get_table_from_acpi(char **bufp) { union acpi_object *package; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; acpi_status status; char *lbuf = NULL; int i, size = -EIO; status = acpi_evaluate_object(ibm_acpi_handle, "APCI", NULL, &buffer); if (ACPI_FAILURE(status)) { pr_err("%s: APCI evaluation failed\n", __func__); return -ENODEV; } package = (union acpi_object *) buffer.pointer; if (!(package) || (package->type != ACPI_TYPE_PACKAGE) || !(package->package.elements)) { pr_err("%s: Invalid APCI object\n", __func__); goto read_table_done; } for (size = 0, i = 0; i < package->package.count; i++) { if (package->package.elements[i].type != ACPI_TYPE_BUFFER) { pr_err("%s: Invalid APCI element %d\n", __func__, i); goto read_table_done; } size += package->package.elements[i].buffer.length; } if (bufp == NULL) goto read_table_done; lbuf = kzalloc(size, GFP_KERNEL); pr_debug("%s: element count: %i, ASL table size: %i, &table = 0x%p\n", __func__, package->package.count, size, lbuf); if (lbuf) { *bufp = lbuf; } else { size = -ENOMEM; goto read_table_done; } size = 0; for (i = 0; i < package->package.count; i++) { memcpy(&lbuf[size], package->package.elements[i].buffer.pointer, package->package.elements[i].buffer.length); size += package->package.elements[i].buffer.length; } read_table_done: kfree(buffer.pointer); return size; } /** * ibm_read_apci_table - callback for the sysfs apci_table file * @filp: the open sysfs file * @kobj: the kobject this binary attribute is a part of * @bin_attr: struct bin_attribute for this file * @buffer: the kernel space buffer to fill * @pos: the offset into the file * @size: the number of bytes requested * * Description: Gets registered with sysfs as the reader callback * to be executed when /sys/bus/pci/slots/apci_table gets read. * * Since we don't get notified on open and close for this file, * things get really tricky here... * our solution is to only allow reading the table in all at once. */ static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t size) { int bytes_read = -EINVAL; char *table = NULL; pr_debug("%s: pos = %d, size = %zd\n", __func__, (int)pos, size); if (pos == 0) { bytes_read = ibm_get_table_from_acpi(&table); if (bytes_read > 0 && bytes_read <= size) memcpy(buffer, table, bytes_read); kfree(table); } return bytes_read; } /** * ibm_find_acpi_device - callback to find our ACPI device * @handle: the ACPI handle of the device we are inspecting * @lvl: depth into the namespace tree * @context: a pointer to our handle to fill when we find the device * @rv: a return value to fill if desired * * Description: Used as a callback when calling acpi_walk_namespace * to find our device. When this method returns non-zero * acpi_walk_namespace quits its search and returns our value. */ static acpi_status __init ibm_find_acpi_device(acpi_handle handle, u32 lvl, void *context, void **rv) { acpi_handle *phandle = (acpi_handle *)context; unsigned long long current_status = 0; acpi_status status; struct acpi_device_info *info; int retval = 0; status = acpi_get_object_info(handle, &info); if (ACPI_FAILURE(status)) { pr_err("%s: Failed to get device information status=0x%x\n", __func__, status); return retval; } acpi_bus_get_status_handle(handle, &current_status); if (current_status && (info->valid & ACPI_VALID_HID) && (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) || !strcmp(info->hardware_id.string, IBM_HARDWARE_ID2))) { pr_debug("found hardware: %s, handle: %p\n", info->hardware_id.string, handle); *phandle = handle; /* returning non-zero causes the search to stop * and returns this value to the caller of * acpi_walk_namespace, but it also causes some warnings * in the acpi debug code to print... */ retval = FOUND_APCI; } kfree(info); return retval; } static int __init ibm_acpiphp_init(void) { int retval = 0; acpi_status status; struct acpi_device *device; struct kobject *sysdir = &pci_slots_kset->kobj; pr_debug("%s\n", __func__); if (acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, ibm_find_acpi_device, NULL, &ibm_acpi_handle, NULL) != FOUND_APCI) { pr_err("%s: acpi_walk_namespace failed\n", __func__); retval = -ENODEV; goto init_return; } pr_debug("%s: found IBM aPCI device\n", __func__); device = acpi_fetch_acpi_dev(ibm_acpi_handle); if (!device) { pr_err("%s: acpi_fetch_acpi_dev failed\n", __func__); retval = -ENODEV; goto init_return; } if (acpiphp_register_attention(&ibm_attention_info)) { retval = -ENODEV; goto init_return; } ibm_note.device = device; status = acpi_install_notify_handler(ibm_acpi_handle, ACPI_DEVICE_NOTIFY, ibm_handle_events, &ibm_note); if (ACPI_FAILURE(status)) { pr_err("%s: Failed to register notification handler\n", __func__); retval = -EBUSY; goto init_cleanup; } ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL); retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr); return retval; init_cleanup: acpiphp_unregister_attention(&ibm_attention_info); init_return: return retval; } static void __exit ibm_acpiphp_exit(void) { acpi_status status; struct kobject *sysdir = &pci_slots_kset->kobj; pr_debug("%s\n", __func__); if (acpiphp_unregister_attention(&ibm_attention_info)) pr_err("%s: attention info deregistration failed", __func__); status = acpi_remove_notify_handler( ibm_acpi_handle, ACPI_DEVICE_NOTIFY, ibm_handle_events); if (ACPI_FAILURE(status)) pr_err("%s: Notification handler removal failed\n", __func__); /* remove the /sys entries */ sysfs_remove_bin_file(sysdir, &ibm_apci_table_attr); } module_init(ibm_acpiphp_init); module_exit(ibm_acpiphp_exit);
linux-master
drivers/pci/hotplug/acpiphp_ibm.c
// SPDX-License-Identifier: GPL-2.0+ /* * Standard PCI Hot Plug Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman ([email protected]) * Copyright (C) 2001 IBM Corp. * Copyright (C) 2003-2004 Intel Corporation * * All rights reserved. * * Send feedback to <[email protected]>,<[email protected]> * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/interrupt.h> #include "shpchp.h" /* Slot Available Register I field definition */ #define SLOT_33MHZ 0x0000001f #define SLOT_66MHZ_PCIX 0x00001f00 #define SLOT_100MHZ_PCIX 0x001f0000 #define SLOT_133MHZ_PCIX 0x1f000000 /* Slot Available Register II field definition */ #define SLOT_66MHZ 0x0000001f #define SLOT_66MHZ_PCIX_266 0x00000f00 #define SLOT_100MHZ_PCIX_266 0x0000f000 #define SLOT_133MHZ_PCIX_266 0x000f0000 #define SLOT_66MHZ_PCIX_533 0x00f00000 #define SLOT_100MHZ_PCIX_533 0x0f000000 #define SLOT_133MHZ_PCIX_533 0xf0000000 /* Slot Configuration */ #define SLOT_NUM 0x0000001F #define FIRST_DEV_NUM 0x00001F00 #define PSN 0x07FF0000 #define UPDOWN 0x20000000 #define MRLSENSOR 0x40000000 #define ATTN_BUTTON 0x80000000 /* * Interrupt Locator Register definitions */ #define CMD_INTR_PENDING (1 << 0) #define SLOT_INTR_PENDING(i) (1 << (i + 1)) /* * Controller SERR-INT Register */ #define GLOBAL_INTR_MASK (1 << 0) #define GLOBAL_SERR_MASK (1 << 1) #define COMMAND_INTR_MASK (1 << 2) #define ARBITER_SERR_MASK (1 << 3) #define COMMAND_DETECTED (1 << 16) #define ARBITER_DETECTED (1 << 17) #define SERR_INTR_RSVDZ_MASK 0xfffc0000 /* * Logical Slot Register definitions */ #define SLOT_REG(i) (SLOT1 + (4 * i)) #define SLOT_STATE_SHIFT (0) #define SLOT_STATE_MASK (3 << 0) #define SLOT_STATE_PWRONLY (1) #define SLOT_STATE_ENABLED (2) #define SLOT_STATE_DISABLED (3) #define PWR_LED_STATE_SHIFT (2) #define PWR_LED_STATE_MASK (3 << 2) #define ATN_LED_STATE_SHIFT (4) #define ATN_LED_STATE_MASK (3 << 4) #define ATN_LED_STATE_ON (1) #define ATN_LED_STATE_BLINK (2) #define ATN_LED_STATE_OFF (3) #define POWER_FAULT (1 << 6) #define ATN_BUTTON (1 << 7) #define MRL_SENSOR (1 << 8) #define MHZ66_CAP (1 << 9) #define PRSNT_SHIFT (10) #define PRSNT_MASK (3 << 10) #define PCIX_CAP_SHIFT (12) #define PCIX_CAP_MASK_PI1 (3 << 12) #define PCIX_CAP_MASK_PI2 (7 << 12) #define PRSNT_CHANGE_DETECTED (1 << 16) #define ISO_PFAULT_DETECTED (1 << 17) #define BUTTON_PRESS_DETECTED (1 << 18) #define MRL_CHANGE_DETECTED (1 << 19) #define CON_PFAULT_DETECTED (1 << 20) #define PRSNT_CHANGE_INTR_MASK (1 << 24) #define ISO_PFAULT_INTR_MASK (1 << 25) #define BUTTON_PRESS_INTR_MASK (1 << 26) #define MRL_CHANGE_INTR_MASK (1 << 27) #define CON_PFAULT_INTR_MASK (1 << 28) #define MRL_CHANGE_SERR_MASK (1 << 29) #define CON_PFAULT_SERR_MASK (1 << 30) #define SLOT_REG_RSVDZ_MASK ((1 << 15) | (7 << 21)) /* * SHPC Command Code definitions * * Slot Operation 00h - 3Fh * Set Bus Segment Speed/Mode A 40h - 47h * Power-Only All Slots 48h * Enable All Slots 49h * Set Bus Segment Speed/Mode B (PI=2) 50h - 5Fh * Reserved Command Codes 60h - BFh * Vendor Specific Commands C0h - FFh */ #define SET_SLOT_PWR 0x01 /* Slot Operation */ #define SET_SLOT_ENABLE 0x02 #define SET_SLOT_DISABLE 0x03 #define SET_PWR_ON 0x04 #define SET_PWR_BLINK 0x08 #define SET_PWR_OFF 0x0c #define SET_ATTN_ON 0x10 #define SET_ATTN_BLINK 0x20 #define SET_ATTN_OFF 0x30 #define SETA_PCI_33MHZ 0x40 /* Set Bus Segment Speed/Mode A */ #define SETA_PCI_66MHZ 0x41 #define SETA_PCIX_66MHZ 0x42 #define SETA_PCIX_100MHZ 0x43 #define SETA_PCIX_133MHZ 0x44 #define SETA_RESERVED1 0x45 #define SETA_RESERVED2 0x46 #define SETA_RESERVED3 0x47 #define SET_PWR_ONLY_ALL 0x48 /* Power-Only All Slots */ #define SET_ENABLE_ALL 0x49 /* Enable All Slots */ #define SETB_PCI_33MHZ 0x50 /* Set Bus Segment Speed/Mode B */ #define SETB_PCI_66MHZ 0x51 #define SETB_PCIX_66MHZ_PM 0x52 #define SETB_PCIX_100MHZ_PM 0x53 #define SETB_PCIX_133MHZ_PM 0x54 #define SETB_PCIX_66MHZ_EM 0x55 #define SETB_PCIX_100MHZ_EM 0x56 #define SETB_PCIX_133MHZ_EM 0x57 #define SETB_PCIX_66MHZ_266 0x58 #define SETB_PCIX_100MHZ_266 0x59 #define SETB_PCIX_133MHZ_266 0x5a #define SETB_PCIX_66MHZ_533 0x5b #define SETB_PCIX_100MHZ_533 0x5c #define SETB_PCIX_133MHZ_533 0x5d #define SETB_RESERVED1 0x5e #define SETB_RESERVED2 0x5f /* * SHPC controller command error code */ #define SWITCH_OPEN 0x1 #define INVALID_CMD 0x2 #define INVALID_SPEED_MODE 0x4 /* * For accessing SHPC Working Register Set via PCI Configuration Space */ #define DWORD_SELECT 0x2 #define DWORD_DATA 0x4 /* Field Offset in Logical Slot Register - byte boundary */ #define SLOT_EVENT_LATCH 0x2 #define SLOT_SERR_INT_MASK 0x3 static irqreturn_t shpc_isr(int irq, void *dev_id); static void start_int_poll_timer(struct controller *ctrl, int sec); static int hpc_check_cmd_status(struct controller *ctrl); static inline u8 shpc_readb(struct controller *ctrl, int reg) { return readb(ctrl->creg + reg); } static inline u16 shpc_readw(struct controller *ctrl, int reg) { return readw(ctrl->creg + reg); } static inline void shpc_writew(struct controller *ctrl, int reg, u16 val) { writew(val, ctrl->creg + reg); } static inline u32 shpc_readl(struct controller *ctrl, int reg) { return readl(ctrl->creg + reg); } static inline void shpc_writel(struct controller *ctrl, int reg, u32 val) { writel(val, ctrl->creg + reg); } static inline int shpc_indirect_read(struct controller *ctrl, int index, u32 *value) { int rc; u32 cap_offset = ctrl->cap_offset; struct pci_dev *pdev = ctrl->pci_dev; rc = pci_write_config_byte(pdev, cap_offset + DWORD_SELECT, index); if (rc) return rc; return pci_read_config_dword(pdev, cap_offset + DWORD_DATA, value); } /* * This is the interrupt polling timeout function. */ static void int_poll_timeout(struct timer_list *t) { struct controller *ctrl = from_timer(ctrl, t, poll_timer); /* Poll for interrupt events. regs == NULL => polling */ shpc_isr(0, ctrl); if (!shpchp_poll_time) shpchp_poll_time = 2; /* default polling interval is 2 sec */ start_int_poll_timer(ctrl, shpchp_poll_time); } /* * This function starts the interrupt polling timer. */ static void start_int_poll_timer(struct controller *ctrl, int sec) { /* Clamp to sane value */ if ((sec <= 0) || (sec > 60)) sec = 2; ctrl->poll_timer.expires = jiffies + sec * HZ; add_timer(&ctrl->poll_timer); } static inline int is_ctrl_busy(struct controller *ctrl) { u16 cmd_status = shpc_readw(ctrl, CMD_STATUS); return cmd_status & 0x1; } /* * Returns 1 if SHPC finishes executing a command within 1 sec, * otherwise returns 0. */ static inline int shpc_poll_ctrl_busy(struct controller *ctrl) { int i; if (!is_ctrl_busy(ctrl)) return 1; /* Check every 0.1 sec for a total of 1 sec */ for (i = 0; i < 10; i++) { msleep(100); if (!is_ctrl_busy(ctrl)) return 1; } return 0; } static inline int shpc_wait_cmd(struct controller *ctrl) { int retval = 0; unsigned long timeout = msecs_to_jiffies(1000); int rc; if (shpchp_poll_mode) rc = shpc_poll_ctrl_busy(ctrl); else rc = wait_event_interruptible_timeout(ctrl->queue, !is_ctrl_busy(ctrl), timeout); if (!rc && is_ctrl_busy(ctrl)) { retval = -EIO; ctrl_err(ctrl, "Command not completed in 1000 msec\n"); } else if (rc < 0) { retval = -EINTR; ctrl_info(ctrl, "Command was interrupted by a signal\n"); } return retval; } static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd) { struct controller *ctrl = slot->ctrl; u16 cmd_status; int retval = 0; u16 temp_word; mutex_lock(&slot->ctrl->cmd_lock); if (!shpc_poll_ctrl_busy(ctrl)) { /* After 1 sec and the controller is still busy */ ctrl_err(ctrl, "Controller is still busy after 1 sec\n"); retval = -EBUSY; goto out; } ++t_slot; temp_word = (t_slot << 8) | (cmd & 0xFF); ctrl_dbg(ctrl, "%s: t_slot %x cmd %x\n", __func__, t_slot, cmd); /* To make sure the Controller Busy bit is 0 before we send out the * command. */ shpc_writew(ctrl, CMD, temp_word); /* * Wait for command completion. */ retval = shpc_wait_cmd(slot->ctrl); if (retval) goto out; cmd_status = hpc_check_cmd_status(slot->ctrl); if (cmd_status) { ctrl_err(ctrl, "Failed to issued command 0x%x (error code = %d)\n", cmd, cmd_status); retval = -EIO; } out: mutex_unlock(&slot->ctrl->cmd_lock); return retval; } static int hpc_check_cmd_status(struct controller *ctrl) { int retval = 0; u16 cmd_status = shpc_readw(ctrl, CMD_STATUS) & 0x000F; switch (cmd_status >> 1) { case 0: retval = 0; break; case 1: retval = SWITCH_OPEN; ctrl_err(ctrl, "Switch opened!\n"); break; case 2: retval = INVALID_CMD; ctrl_err(ctrl, "Invalid HPC command!\n"); break; case 4: retval = INVALID_SPEED_MODE; ctrl_err(ctrl, "Invalid bus speed/mode!\n"); break; default: retval = cmd_status; } return retval; } static int hpc_get_attention_status(struct slot *slot, u8 *status) { struct controller *ctrl = slot->ctrl; u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot)); u8 state = (slot_reg & ATN_LED_STATE_MASK) >> ATN_LED_STATE_SHIFT; switch (state) { case ATN_LED_STATE_ON: *status = 1; /* On */ break; case ATN_LED_STATE_BLINK: *status = 2; /* Blink */ break; case ATN_LED_STATE_OFF: *status = 0; /* Off */ break; default: *status = 0xFF; /* Reserved */ break; } return 0; } static int hpc_get_power_status(struct slot *slot, u8 *status) { struct controller *ctrl = slot->ctrl; u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot)); u8 state = (slot_reg & SLOT_STATE_MASK) >> SLOT_STATE_SHIFT; switch (state) { case SLOT_STATE_PWRONLY: *status = 2; /* Powered only */ break; case SLOT_STATE_ENABLED: *status = 1; /* Enabled */ break; case SLOT_STATE_DISABLED: *status = 0; /* Disabled */ break; default: *status = 0xFF; /* Reserved */ break; } return 0; } static int hpc_get_latch_status(struct slot *slot, u8 *status) { struct controller *ctrl = slot->ctrl; u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot)); *status = !!(slot_reg & MRL_SENSOR); /* 0 -> close; 1 -> open */ return 0; } static int hpc_get_adapter_status(struct slot *slot, u8 *status) { struct controller *ctrl = slot->ctrl; u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot)); u8 state = (slot_reg & PRSNT_MASK) >> PRSNT_SHIFT; *status = (state != 0x3) ? 1 : 0; return 0; } static int hpc_get_prog_int(struct slot *slot, u8 *prog_int) { struct controller *ctrl = slot->ctrl; *prog_int = shpc_readb(ctrl, PROG_INTERFACE); return 0; } static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value) { int retval = 0; struct controller *ctrl = slot->ctrl; u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot)); u8 m66_cap = !!(slot_reg & MHZ66_CAP); u8 pi, pcix_cap; retval = hpc_get_prog_int(slot, &pi); if (retval) return retval; switch (pi) { case 1: pcix_cap = (slot_reg & PCIX_CAP_MASK_PI1) >> PCIX_CAP_SHIFT; break; case 2: pcix_cap = (slot_reg & PCIX_CAP_MASK_PI2) >> PCIX_CAP_SHIFT; break; default: return -ENODEV; } ctrl_dbg(ctrl, "%s: slot_reg = %x, pcix_cap = %x, m66_cap = %x\n", __func__, slot_reg, pcix_cap, m66_cap); switch (pcix_cap) { case 0x0: *value = m66_cap ? PCI_SPEED_66MHz : PCI_SPEED_33MHz; break; case 0x1: *value = PCI_SPEED_66MHz_PCIX; break; case 0x3: *value = PCI_SPEED_133MHz_PCIX; break; case 0x4: *value = PCI_SPEED_133MHz_PCIX_266; break; case 0x5: *value = PCI_SPEED_133MHz_PCIX_533; break; case 0x2: default: *value = PCI_SPEED_UNKNOWN; retval = -ENODEV; break; } ctrl_dbg(ctrl, "Adapter speed = %d\n", *value); return retval; } static int hpc_query_power_fault(struct slot *slot) { struct controller *ctrl = slot->ctrl; u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot)); /* Note: Logic 0 => fault */ return !(slot_reg & POWER_FAULT); } static int hpc_set_attention_status(struct slot *slot, u8 value) { u8 slot_cmd = 0; switch (value) { case 0: slot_cmd = SET_ATTN_OFF; /* OFF */ break; case 1: slot_cmd = SET_ATTN_ON; /* ON */ break; case 2: slot_cmd = SET_ATTN_BLINK; /* BLINK */ break; default: return -1; } return shpc_write_cmd(slot, slot->hp_slot, slot_cmd); } static void hpc_set_green_led_on(struct slot *slot) { shpc_write_cmd(slot, slot->hp_slot, SET_PWR_ON); } static void hpc_set_green_led_off(struct slot *slot) { shpc_write_cmd(slot, slot->hp_slot, SET_PWR_OFF); } static void hpc_set_green_led_blink(struct slot *slot) { shpc_write_cmd(slot, slot->hp_slot, SET_PWR_BLINK); } static void hpc_release_ctlr(struct controller *ctrl) { int i; u32 slot_reg, serr_int; /* * Mask event interrupts and SERRs of all slots */ for (i = 0; i < ctrl->num_slots; i++) { slot_reg = shpc_readl(ctrl, SLOT_REG(i)); slot_reg |= (PRSNT_CHANGE_INTR_MASK | ISO_PFAULT_INTR_MASK | BUTTON_PRESS_INTR_MASK | MRL_CHANGE_INTR_MASK | CON_PFAULT_INTR_MASK | MRL_CHANGE_SERR_MASK | CON_PFAULT_SERR_MASK); slot_reg &= ~SLOT_REG_RSVDZ_MASK; shpc_writel(ctrl, SLOT_REG(i), slot_reg); } cleanup_slots(ctrl); /* * Mask SERR and System Interrupt generation */ serr_int = shpc_readl(ctrl, SERR_INTR_ENABLE); serr_int |= (GLOBAL_INTR_MASK | GLOBAL_SERR_MASK | COMMAND_INTR_MASK | ARBITER_SERR_MASK); serr_int &= ~SERR_INTR_RSVDZ_MASK; shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int); if (shpchp_poll_mode) del_timer(&ctrl->poll_timer); else { free_irq(ctrl->pci_dev->irq, ctrl); pci_disable_msi(ctrl->pci_dev); } iounmap(ctrl->creg); release_mem_region(ctrl->mmio_base, ctrl->mmio_size); } static int hpc_power_on_slot(struct slot *slot) { int retval; retval = shpc_write_cmd(slot, slot->hp_slot, SET_SLOT_PWR); if (retval) ctrl_err(slot->ctrl, "%s: Write command failed!\n", __func__); return retval; } static int hpc_slot_enable(struct slot *slot) { int retval; /* Slot - Enable, Power Indicator - Blink, Attention Indicator - Off */ retval = shpc_write_cmd(slot, slot->hp_slot, SET_SLOT_ENABLE | SET_PWR_BLINK | SET_ATTN_OFF); if (retval) ctrl_err(slot->ctrl, "%s: Write command failed!\n", __func__); return retval; } static int hpc_slot_disable(struct slot *slot) { int retval; /* Slot - Disable, Power Indicator - Off, Attention Indicator - On */ retval = shpc_write_cmd(slot, slot->hp_slot, SET_SLOT_DISABLE | SET_PWR_OFF | SET_ATTN_ON); if (retval) ctrl_err(slot->ctrl, "%s: Write command failed!\n", __func__); return retval; } static int shpc_get_cur_bus_speed(struct controller *ctrl) { int retval = 0; struct pci_bus *bus = ctrl->pci_dev->subordinate; enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN; u16 sec_bus_reg = shpc_readw(ctrl, SEC_BUS_CONFIG); u8 pi = shpc_readb(ctrl, PROG_INTERFACE); u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7); if ((pi == 1) && (speed_mode > 4)) { retval = -ENODEV; goto out; } switch (speed_mode) { case 0x0: bus_speed = PCI_SPEED_33MHz; break; case 0x1: bus_speed = PCI_SPEED_66MHz; break; case 0x2: bus_speed = PCI_SPEED_66MHz_PCIX; break; case 0x3: bus_speed = PCI_SPEED_100MHz_PCIX; break; case 0x4: bus_speed = PCI_SPEED_133MHz_PCIX; break; case 0x5: bus_speed = PCI_SPEED_66MHz_PCIX_ECC; break; case 0x6: bus_speed = PCI_SPEED_100MHz_PCIX_ECC; break; case 0x7: bus_speed = PCI_SPEED_133MHz_PCIX_ECC; break; case 0x8: bus_speed = PCI_SPEED_66MHz_PCIX_266; break; case 0x9: bus_speed = PCI_SPEED_100MHz_PCIX_266; break; case 0xa: bus_speed = PCI_SPEED_133MHz_PCIX_266; break; case 0xb: bus_speed = PCI_SPEED_66MHz_PCIX_533; break; case 0xc: bus_speed = PCI_SPEED_100MHz_PCIX_533; break; case 0xd: bus_speed = PCI_SPEED_133MHz_PCIX_533; break; default: retval = -ENODEV; break; } out: bus->cur_bus_speed = bus_speed; dbg("Current bus speed = %d\n", bus_speed); return retval; } static int hpc_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed value) { int retval; struct controller *ctrl = slot->ctrl; u8 pi, cmd; pi = shpc_readb(ctrl, PROG_INTERFACE); if ((pi == 1) && (value > PCI_SPEED_133MHz_PCIX)) return -EINVAL; switch (value) { case PCI_SPEED_33MHz: cmd = SETA_PCI_33MHZ; break; case PCI_SPEED_66MHz: cmd = SETA_PCI_66MHZ; break; case PCI_SPEED_66MHz_PCIX: cmd = SETA_PCIX_66MHZ; break; case PCI_SPEED_100MHz_PCIX: cmd = SETA_PCIX_100MHZ; break; case PCI_SPEED_133MHz_PCIX: cmd = SETA_PCIX_133MHZ; break; case PCI_SPEED_66MHz_PCIX_ECC: cmd = SETB_PCIX_66MHZ_EM; break; case PCI_SPEED_100MHz_PCIX_ECC: cmd = SETB_PCIX_100MHZ_EM; break; case PCI_SPEED_133MHz_PCIX_ECC: cmd = SETB_PCIX_133MHZ_EM; break; case PCI_SPEED_66MHz_PCIX_266: cmd = SETB_PCIX_66MHZ_266; break; case PCI_SPEED_100MHz_PCIX_266: cmd = SETB_PCIX_100MHZ_266; break; case PCI_SPEED_133MHz_PCIX_266: cmd = SETB_PCIX_133MHZ_266; break; case PCI_SPEED_66MHz_PCIX_533: cmd = SETB_PCIX_66MHZ_533; break; case PCI_SPEED_100MHz_PCIX_533: cmd = SETB_PCIX_100MHZ_533; break; case PCI_SPEED_133MHz_PCIX_533: cmd = SETB_PCIX_133MHZ_533; break; default: return -EINVAL; } retval = shpc_write_cmd(slot, 0, cmd); if (retval) ctrl_err(ctrl, "%s: Write command failed!\n", __func__); else shpc_get_cur_bus_speed(ctrl); return retval; } static irqreturn_t shpc_isr(int irq, void *dev_id) { struct controller *ctrl = (struct controller *)dev_id; u32 serr_int, slot_reg, intr_loc, intr_loc2; int hp_slot; /* Check to see if it was our interrupt */ intr_loc = shpc_readl(ctrl, INTR_LOC); if (!intr_loc) return IRQ_NONE; ctrl_dbg(ctrl, "%s: intr_loc = %x\n", __func__, intr_loc); if (!shpchp_poll_mode) { /* * Mask Global Interrupt Mask - see implementation * note on p. 139 of SHPC spec rev 1.0 */ serr_int = shpc_readl(ctrl, SERR_INTR_ENABLE); serr_int |= GLOBAL_INTR_MASK; serr_int &= ~SERR_INTR_RSVDZ_MASK; shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int); intr_loc2 = shpc_readl(ctrl, INTR_LOC); ctrl_dbg(ctrl, "%s: intr_loc2 = %x\n", __func__, intr_loc2); } if (intr_loc & CMD_INTR_PENDING) { /* * Command Complete Interrupt Pending * RO only - clear by writing 1 to the Command Completion * Detect bit in Controller SERR-INT register */ serr_int = shpc_readl(ctrl, SERR_INTR_ENABLE); serr_int &= ~SERR_INTR_RSVDZ_MASK; shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int); wake_up_interruptible(&ctrl->queue); } if (!(intr_loc & ~CMD_INTR_PENDING)) goto out; for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) { /* To find out which slot has interrupt pending */ if (!(intr_loc & SLOT_INTR_PENDING(hp_slot))) continue; slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot)); ctrl_dbg(ctrl, "Slot %x with intr, slot register = %x\n", hp_slot, slot_reg); if (slot_reg & MRL_CHANGE_DETECTED) shpchp_handle_switch_change(hp_slot, ctrl); if (slot_reg & BUTTON_PRESS_DETECTED) shpchp_handle_attention_button(hp_slot, ctrl); if (slot_reg & PRSNT_CHANGE_DETECTED) shpchp_handle_presence_change(hp_slot, ctrl); if (slot_reg & (ISO_PFAULT_DETECTED | CON_PFAULT_DETECTED)) shpchp_handle_power_fault(hp_slot, ctrl); /* Clear all slot events */ slot_reg &= ~SLOT_REG_RSVDZ_MASK; shpc_writel(ctrl, SLOT_REG(hp_slot), slot_reg); } out: if (!shpchp_poll_mode) { /* Unmask Global Interrupt Mask */ serr_int = shpc_readl(ctrl, SERR_INTR_ENABLE); serr_int &= ~(GLOBAL_INTR_MASK | SERR_INTR_RSVDZ_MASK); shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int); } return IRQ_HANDLED; } static int shpc_get_max_bus_speed(struct controller *ctrl) { int retval = 0; struct pci_bus *bus = ctrl->pci_dev->subordinate; enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN; u8 pi = shpc_readb(ctrl, PROG_INTERFACE); u32 slot_avail1 = shpc_readl(ctrl, SLOT_AVAIL1); u32 slot_avail2 = shpc_readl(ctrl, SLOT_AVAIL2); if (pi == 2) { if (slot_avail2 & SLOT_133MHZ_PCIX_533) bus_speed = PCI_SPEED_133MHz_PCIX_533; else if (slot_avail2 & SLOT_100MHZ_PCIX_533) bus_speed = PCI_SPEED_100MHz_PCIX_533; else if (slot_avail2 & SLOT_66MHZ_PCIX_533) bus_speed = PCI_SPEED_66MHz_PCIX_533; else if (slot_avail2 & SLOT_133MHZ_PCIX_266) bus_speed = PCI_SPEED_133MHz_PCIX_266; else if (slot_avail2 & SLOT_100MHZ_PCIX_266) bus_speed = PCI_SPEED_100MHz_PCIX_266; else if (slot_avail2 & SLOT_66MHZ_PCIX_266) bus_speed = PCI_SPEED_66MHz_PCIX_266; } if (bus_speed == PCI_SPEED_UNKNOWN) { if (slot_avail1 & SLOT_133MHZ_PCIX) bus_speed = PCI_SPEED_133MHz_PCIX; else if (slot_avail1 & SLOT_100MHZ_PCIX) bus_speed = PCI_SPEED_100MHz_PCIX; else if (slot_avail1 & SLOT_66MHZ_PCIX) bus_speed = PCI_SPEED_66MHz_PCIX; else if (slot_avail2 & SLOT_66MHZ) bus_speed = PCI_SPEED_66MHz; else if (slot_avail1 & SLOT_33MHZ) bus_speed = PCI_SPEED_33MHz; else retval = -ENODEV; } bus->max_bus_speed = bus_speed; ctrl_dbg(ctrl, "Max bus speed = %d\n", bus_speed); return retval; } static const struct hpc_ops shpchp_hpc_ops = { .power_on_slot = hpc_power_on_slot, .slot_enable = hpc_slot_enable, .slot_disable = hpc_slot_disable, .set_bus_speed_mode = hpc_set_bus_speed_mode, .set_attention_status = hpc_set_attention_status, .get_power_status = hpc_get_power_status, .get_attention_status = hpc_get_attention_status, .get_latch_status = hpc_get_latch_status, .get_adapter_status = hpc_get_adapter_status, .get_adapter_speed = hpc_get_adapter_speed, .get_prog_int = hpc_get_prog_int, .query_power_fault = hpc_query_power_fault, .green_led_on = hpc_set_green_led_on, .green_led_off = hpc_set_green_led_off, .green_led_blink = hpc_set_green_led_blink, .release_ctlr = hpc_release_ctlr, }; int shpc_init(struct controller *ctrl, struct pci_dev *pdev) { int rc = -1, num_slots = 0; u8 hp_slot; u32 shpc_base_offset; u32 tempdword, slot_reg, slot_config; u8 i; ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */ ctrl_dbg(ctrl, "Hotplug Controller:\n"); if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == PCI_DEVICE_ID_AMD_GOLAM_7450) { /* amd shpc driver doesn't use Base Offset; assume 0 */ ctrl->mmio_base = pci_resource_start(pdev, 0); ctrl->mmio_size = pci_resource_len(pdev, 0); } else { ctrl->cap_offset = pci_find_capability(pdev, PCI_CAP_ID_SHPC); if (!ctrl->cap_offset) { ctrl_err(ctrl, "Cannot find PCI capability\n"); goto abort; } ctrl_dbg(ctrl, " cap_offset = %x\n", ctrl->cap_offset); rc = shpc_indirect_read(ctrl, 0, &shpc_base_offset); if (rc) { ctrl_err(ctrl, "Cannot read base_offset\n"); goto abort; } rc = shpc_indirect_read(ctrl, 3, &tempdword); if (rc) { ctrl_err(ctrl, "Cannot read slot config\n"); goto abort; } num_slots = tempdword & SLOT_NUM; ctrl_dbg(ctrl, " num_slots (indirect) %x\n", num_slots); for (i = 0; i < 9 + num_slots; i++) { rc = shpc_indirect_read(ctrl, i, &tempdword); if (rc) { ctrl_err(ctrl, "Cannot read creg (index = %d)\n", i); goto abort; } ctrl_dbg(ctrl, " offset %d: value %x\n", i, tempdword); } ctrl->mmio_base = pci_resource_start(pdev, 0) + shpc_base_offset; ctrl->mmio_size = 0x24 + 0x4 * num_slots; } ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", pdev->vendor, pdev->device, pdev->subsystem_vendor, pdev->subsystem_device); rc = pci_enable_device(pdev); if (rc) { ctrl_err(ctrl, "pci_enable_device failed\n"); goto abort; } if (!request_mem_region(ctrl->mmio_base, ctrl->mmio_size, MY_NAME)) { ctrl_err(ctrl, "Cannot reserve MMIO region\n"); rc = -1; goto abort; } ctrl->creg = ioremap(ctrl->mmio_base, ctrl->mmio_size); if (!ctrl->creg) { ctrl_err(ctrl, "Cannot remap MMIO region %lx @ %lx\n", ctrl->mmio_size, ctrl->mmio_base); release_mem_region(ctrl->mmio_base, ctrl->mmio_size); rc = -1; goto abort; } ctrl_dbg(ctrl, "ctrl->creg %p\n", ctrl->creg); mutex_init(&ctrl->crit_sect); mutex_init(&ctrl->cmd_lock); /* Setup wait queue */ init_waitqueue_head(&ctrl->queue); ctrl->hpc_ops = &shpchp_hpc_ops; /* Return PCI Controller Info */ slot_config = shpc_readl(ctrl, SLOT_CONFIG); ctrl->slot_device_offset = (slot_config & FIRST_DEV_NUM) >> 8; ctrl->num_slots = slot_config & SLOT_NUM; ctrl->first_slot = (slot_config & PSN) >> 16; ctrl->slot_num_inc = ((slot_config & UPDOWN) >> 29) ? 1 : -1; /* Mask Global Interrupt Mask & Command Complete Interrupt Mask */ tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE); ctrl_dbg(ctrl, "SERR_INTR_ENABLE = %x\n", tempdword); tempdword |= (GLOBAL_INTR_MASK | GLOBAL_SERR_MASK | COMMAND_INTR_MASK | ARBITER_SERR_MASK); tempdword &= ~SERR_INTR_RSVDZ_MASK; shpc_writel(ctrl, SERR_INTR_ENABLE, tempdword); tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE); ctrl_dbg(ctrl, "SERR_INTR_ENABLE = %x\n", tempdword); /* Mask the MRL sensor SERR Mask of individual slot in * Slot SERR-INT Mask & clear all the existing event if any */ for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) { slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot)); ctrl_dbg(ctrl, "Default Logical Slot Register %d value %x\n", hp_slot, slot_reg); slot_reg |= (PRSNT_CHANGE_INTR_MASK | ISO_PFAULT_INTR_MASK | BUTTON_PRESS_INTR_MASK | MRL_CHANGE_INTR_MASK | CON_PFAULT_INTR_MASK | MRL_CHANGE_SERR_MASK | CON_PFAULT_SERR_MASK); slot_reg &= ~SLOT_REG_RSVDZ_MASK; shpc_writel(ctrl, SLOT_REG(hp_slot), slot_reg); } if (shpchp_poll_mode) { /* Install interrupt polling timer. Start with 10 sec delay */ timer_setup(&ctrl->poll_timer, int_poll_timeout, 0); start_int_poll_timer(ctrl, 10); } else { /* Installs the interrupt handler */ rc = pci_enable_msi(pdev); if (rc) { ctrl_info(ctrl, "Can't get msi for the hotplug controller\n"); ctrl_info(ctrl, "Use INTx for the hotplug controller\n"); } else { pci_set_master(pdev); } rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED, MY_NAME, (void *)ctrl); ctrl_dbg(ctrl, "request_irq %d (returns %d)\n", ctrl->pci_dev->irq, rc); if (rc) { ctrl_err(ctrl, "Can't get irq %d for the hotplug controller\n", ctrl->pci_dev->irq); goto abort_iounmap; } } ctrl_dbg(ctrl, "HPC at %s irq=%x\n", pci_name(pdev), pdev->irq); shpc_get_max_bus_speed(ctrl); shpc_get_cur_bus_speed(ctrl); /* * Unmask all event interrupts of all slots */ for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) { slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot)); ctrl_dbg(ctrl, "Default Logical Slot Register %d value %x\n", hp_slot, slot_reg); slot_reg &= ~(PRSNT_CHANGE_INTR_MASK | ISO_PFAULT_INTR_MASK | BUTTON_PRESS_INTR_MASK | MRL_CHANGE_INTR_MASK | CON_PFAULT_INTR_MASK | SLOT_REG_RSVDZ_MASK); shpc_writel(ctrl, SLOT_REG(hp_slot), slot_reg); } if (!shpchp_poll_mode) { /* Unmask all general input interrupts and SERR */ tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE); tempdword &= ~(GLOBAL_INTR_MASK | COMMAND_INTR_MASK | SERR_INTR_RSVDZ_MASK); shpc_writel(ctrl, SERR_INTR_ENABLE, tempdword); tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE); ctrl_dbg(ctrl, "SERR_INTR_ENABLE = %x\n", tempdword); } return 0; /* We end up here for the many possible ways to fail this API. */ abort_iounmap: iounmap(ctrl->creg); abort: return rc; }
linux-master
drivers/pci/hotplug/shpchp_hpc.c
// SPDX-License-Identifier: GPL-2.0+ /* * RPA Virtual I/O device functions * Copyright (C) 2004 Linda Xie <[email protected]> * * All rights reserved. * * Send feedback to <[email protected]> * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sysfs.h> #include <linux/of.h> #include <linux/pci.h> #include <linux/string.h> #include <linux/slab.h> #include <asm/rtas.h> #include "rpaphp.h" /* free up the memory used by a slot */ void dealloc_slot_struct(struct slot *slot) { of_node_put(slot->dn); kfree(slot->name); kfree(slot); } struct slot *alloc_slot_struct(struct device_node *dn, int drc_index, char *drc_name, int power_domain) { struct slot *slot; slot = kzalloc(sizeof(struct slot), GFP_KERNEL); if (!slot) goto error_nomem; slot->name = kstrdup(drc_name, GFP_KERNEL); if (!slot->name) goto error_slot; slot->dn = of_node_get(dn); slot->index = drc_index; slot->power_domain = power_domain; slot->hotplug_slot.ops = &rpaphp_hotplug_slot_ops; return (slot); error_slot: kfree(slot); error_nomem: return NULL; } static int is_registered(struct slot *slot) { struct slot *tmp_slot; list_for_each_entry(tmp_slot, &rpaphp_slot_head, rpaphp_slot_list) { if (!strcmp(tmp_slot->name, slot->name)) return 1; } return 0; } int rpaphp_deregister_slot(struct slot *slot) { int retval = 0; struct hotplug_slot *php_slot = &slot->hotplug_slot; dbg("%s - Entry: deregistering slot=%s\n", __func__, slot->name); list_del(&slot->rpaphp_slot_list); pci_hp_deregister(php_slot); dealloc_slot_struct(slot); dbg("%s - Exit: rc[%d]\n", __func__, retval); return retval; } EXPORT_SYMBOL_GPL(rpaphp_deregister_slot); int rpaphp_register_slot(struct slot *slot) { struct hotplug_slot *php_slot = &slot->hotplug_slot; struct device_node *child; u32 my_index; int retval; int slotno = -1; dbg("%s registering slot:path[%pOF] index[%x], name[%s] pdomain[%x] type[%d]\n", __func__, slot->dn, slot->index, slot->name, slot->power_domain, slot->type); /* should not try to register the same slot twice */ if (is_registered(slot)) { err("rpaphp_register_slot: slot[%s] is already registered\n", slot->name); return -EAGAIN; } for_each_child_of_node(slot->dn, child) { retval = of_property_read_u32(child, "ibm,my-drc-index", &my_index); if (my_index == slot->index) { slotno = PCI_SLOT(PCI_DN(child)->devfn); of_node_put(child); break; } } retval = pci_hp_register(php_slot, slot->bus, slotno, slot->name); if (retval) { err("pci_hp_register failed with error %d\n", retval); return retval; } /* add slot to our internal list */ list_add(&slot->rpaphp_slot_list, &rpaphp_slot_head); info("Slot [%s] registered\n", slot->name); return 0; }
linux-master
drivers/pci/hotplug/rpaphp_slot.c
// SPDX-License-Identifier: GPL-2.0+ /* * IBM Hot Plug Controller Driver * * Written By: Irene Zubarev, IBM Corporation * * Copyright (C) 2001 Greg Kroah-Hartman ([email protected]) * Copyright (C) 2001,2002 IBM Corp. * * All rights reserved. * * Send feedback to <[email protected]> * */ #include <linux/module.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/list.h> #include <linux/init.h> #include "ibmphp.h" static int flags = 0; /* for testing */ static void update_resources(struct bus_node *bus_cur, int type, int rangeno); static int once_over(void); static int remove_ranges(struct bus_node *, struct bus_node *); static int update_bridge_ranges(struct bus_node **); static int add_bus_range(int type, struct range_node *, struct bus_node *); static void fix_resources(struct bus_node *); static struct bus_node *find_bus_wprev(u8, struct bus_node **, u8); static LIST_HEAD(gbuses); static struct bus_node * __init alloc_error_bus(struct ebda_pci_rsrc *curr, u8 busno, int flag) { struct bus_node *newbus; if (!(curr) && !(flag)) { err("NULL pointer passed\n"); return NULL; } newbus = kzalloc(sizeof(struct bus_node), GFP_KERNEL); if (!newbus) return NULL; if (flag) newbus->busno = busno; else newbus->busno = curr->bus_num; list_add_tail(&newbus->bus_list, &gbuses); return newbus; } static struct resource_node * __init alloc_resources(struct ebda_pci_rsrc *curr) { struct resource_node *rs; if (!curr) { err("NULL passed to allocate\n"); return NULL; } rs = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!rs) return NULL; rs->busno = curr->bus_num; rs->devfunc = curr->dev_fun; rs->start = curr->start_addr; rs->end = curr->end_addr; rs->len = curr->end_addr - curr->start_addr + 1; return rs; } static int __init alloc_bus_range(struct bus_node **new_bus, struct range_node **new_range, struct ebda_pci_rsrc *curr, int flag, u8 first_bus) { struct bus_node *newbus; struct range_node *newrange; u8 num_ranges = 0; if (first_bus) { newbus = kzalloc(sizeof(struct bus_node), GFP_KERNEL); if (!newbus) return -ENOMEM; newbus->busno = curr->bus_num; } else { newbus = *new_bus; switch (flag) { case MEM: num_ranges = newbus->noMemRanges; break; case PFMEM: num_ranges = newbus->noPFMemRanges; break; case IO: num_ranges = newbus->noIORanges; break; } } newrange = kzalloc(sizeof(struct range_node), GFP_KERNEL); if (!newrange) { if (first_bus) kfree(newbus); return -ENOMEM; } newrange->start = curr->start_addr; newrange->end = curr->end_addr; if (first_bus || (!num_ranges)) newrange->rangeno = 1; else { /* need to insert our range */ add_bus_range(flag, newrange, newbus); debug("%d resource Primary Bus inserted on bus %x [%x - %x]\n", flag, newbus->busno, newrange->start, newrange->end); } switch (flag) { case MEM: newbus->rangeMem = newrange; if (first_bus) newbus->noMemRanges = 1; else { debug("First Memory Primary on bus %x, [%x - %x]\n", newbus->busno, newrange->start, newrange->end); ++newbus->noMemRanges; fix_resources(newbus); } break; case IO: newbus->rangeIO = newrange; if (first_bus) newbus->noIORanges = 1; else { debug("First IO Primary on bus %x, [%x - %x]\n", newbus->busno, newrange->start, newrange->end); ++newbus->noIORanges; fix_resources(newbus); } break; case PFMEM: newbus->rangePFMem = newrange; if (first_bus) newbus->noPFMemRanges = 1; else { debug("1st PFMemory Primary on Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); ++newbus->noPFMemRanges; fix_resources(newbus); } break; } *new_bus = newbus; *new_range = newrange; return 0; } /* Notes: * 1. The ranges are ordered. The buses are not ordered. (First come) * * 2. If cannot allocate out of PFMem range, allocate from Mem ranges. PFmemFromMem * are not sorted. (no need since use mem node). To not change the entire code, we * also add mem node whenever this case happens so as not to change * ibmphp_check_mem_resource etc(and since it really is taking Mem resource) */ /***************************************************************************** * This is the Resource Management initialization function. It will go through * the Resource list taken from EBDA and fill in this module's data structures * * THIS IS NOT TAKING INTO CONSIDERATION IO RESTRICTIONS OF PRIMARY BUSES, * SINCE WE'RE GOING TO ASSUME FOR NOW WE DON'T HAVE THOSE ON OUR BUSES FOR NOW * * Input: ptr to the head of the resource list from EBDA * Output: 0, -1 or error codes ***************************************************************************/ int __init ibmphp_rsrc_init(void) { struct ebda_pci_rsrc *curr; struct range_node *newrange = NULL; struct bus_node *newbus = NULL; struct bus_node *bus_cur; struct bus_node *bus_prev; struct resource_node *new_io = NULL; struct resource_node *new_mem = NULL; struct resource_node *new_pfmem = NULL; int rc; list_for_each_entry(curr, &ibmphp_ebda_pci_rsrc_head, ebda_pci_rsrc_list) { if (!(curr->rsrc_type & PCIDEVMASK)) { /* EBDA still lists non PCI devices, so ignore... */ debug("this is not a PCI DEVICE in rsrc_init, please take care\n"); // continue; } /* this is a primary bus resource */ if (curr->rsrc_type & PRIMARYBUSMASK) { /* memory */ if ((curr->rsrc_type & RESTYPE) == MMASK) { /* no bus structure exists in place yet */ if (list_empty(&gbuses)) { rc = alloc_bus_range(&newbus, &newrange, curr, MEM, 1); if (rc) return rc; list_add_tail(&newbus->bus_list, &gbuses); debug("gbuses = NULL, Memory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); } else { bus_cur = find_bus_wprev(curr->bus_num, &bus_prev, 1); /* found our bus */ if (bus_cur) { rc = alloc_bus_range(&bus_cur, &newrange, curr, MEM, 0); if (rc) return rc; } else { /* went through all the buses and didn't find ours, need to create a new bus node */ rc = alloc_bus_range(&newbus, &newrange, curr, MEM, 1); if (rc) return rc; list_add_tail(&newbus->bus_list, &gbuses); debug("New Bus, Memory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); } } } else if ((curr->rsrc_type & RESTYPE) == PFMASK) { /* prefetchable memory */ if (list_empty(&gbuses)) { /* no bus structure exists in place yet */ rc = alloc_bus_range(&newbus, &newrange, curr, PFMEM, 1); if (rc) return rc; list_add_tail(&newbus->bus_list, &gbuses); debug("gbuses = NULL, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); } else { bus_cur = find_bus_wprev(curr->bus_num, &bus_prev, 1); if (bus_cur) { /* found our bus */ rc = alloc_bus_range(&bus_cur, &newrange, curr, PFMEM, 0); if (rc) return rc; } else { /* went through all the buses and didn't find ours, need to create a new bus node */ rc = alloc_bus_range(&newbus, &newrange, curr, PFMEM, 1); if (rc) return rc; list_add_tail(&newbus->bus_list, &gbuses); debug("1st Bus, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); } } } else if ((curr->rsrc_type & RESTYPE) == IOMASK) { /* IO */ if (list_empty(&gbuses)) { /* no bus structure exists in place yet */ rc = alloc_bus_range(&newbus, &newrange, curr, IO, 1); if (rc) return rc; list_add_tail(&newbus->bus_list, &gbuses); debug("gbuses = NULL, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); } else { bus_cur = find_bus_wprev(curr->bus_num, &bus_prev, 1); if (bus_cur) { rc = alloc_bus_range(&bus_cur, &newrange, curr, IO, 0); if (rc) return rc; } else { /* went through all the buses and didn't find ours, need to create a new bus node */ rc = alloc_bus_range(&newbus, &newrange, curr, IO, 1); if (rc) return rc; list_add_tail(&newbus->bus_list, &gbuses); debug("1st Bus, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); } } } else { ; /* type is reserved WHAT TO DO IN THIS CASE??? NOTHING TO DO??? */ } } else { /* regular pci device resource */ if ((curr->rsrc_type & RESTYPE) == MMASK) { /* Memory resource */ new_mem = alloc_resources(curr); if (!new_mem) return -ENOMEM; new_mem->type = MEM; /* * if it didn't find the bus, means PCI dev * came b4 the Primary Bus info, so need to * create a bus rangeno becomes a problem... * assign a -1 and then update once the range * actually appears... */ if (ibmphp_add_resource(new_mem) < 0) { newbus = alloc_error_bus(curr, 0, 0); if (!newbus) return -ENOMEM; newbus->firstMem = new_mem; ++newbus->needMemUpdate; new_mem->rangeno = -1; } debug("Memory resource for device %x, bus %x, [%x - %x]\n", new_mem->devfunc, new_mem->busno, new_mem->start, new_mem->end); } else if ((curr->rsrc_type & RESTYPE) == PFMASK) { /* PFMemory resource */ new_pfmem = alloc_resources(curr); if (!new_pfmem) return -ENOMEM; new_pfmem->type = PFMEM; new_pfmem->fromMem = 0; if (ibmphp_add_resource(new_pfmem) < 0) { newbus = alloc_error_bus(curr, 0, 0); if (!newbus) return -ENOMEM; newbus->firstPFMem = new_pfmem; ++newbus->needPFMemUpdate; new_pfmem->rangeno = -1; } debug("PFMemory resource for device %x, bus %x, [%x - %x]\n", new_pfmem->devfunc, new_pfmem->busno, new_pfmem->start, new_pfmem->end); } else if ((curr->rsrc_type & RESTYPE) == IOMASK) { /* IO resource */ new_io = alloc_resources(curr); if (!new_io) return -ENOMEM; new_io->type = IO; /* * if it didn't find the bus, means PCI dev * came b4 the Primary Bus info, so need to * create a bus rangeno becomes a problem... * Can assign a -1 and then update once the * range actually appears... */ if (ibmphp_add_resource(new_io) < 0) { newbus = alloc_error_bus(curr, 0, 0); if (!newbus) return -ENOMEM; newbus->firstIO = new_io; ++newbus->needIOUpdate; new_io->rangeno = -1; } debug("IO resource for device %x, bus %x, [%x - %x]\n", new_io->devfunc, new_io->busno, new_io->start, new_io->end); } } } list_for_each_entry(bus_cur, &gbuses, bus_list) { /* This is to get info about PPB resources, since EBDA doesn't put this info into the primary bus info */ rc = update_bridge_ranges(&bus_cur); if (rc) return rc; } return once_over(); /* This is to align ranges (so no -1) */ } /******************************************************************************** * This function adds a range into a sorted list of ranges per bus for a particular * range type, it then calls another routine to update the range numbers on the * pci devices' resources for the appropriate resource * * Input: type of the resource, range to add, current bus * Output: 0 or -1, bus and range ptrs ********************************************************************************/ static int add_bus_range(int type, struct range_node *range, struct bus_node *bus_cur) { struct range_node *range_cur = NULL; struct range_node *range_prev; int count = 0, i_init; int noRanges = 0; switch (type) { case MEM: range_cur = bus_cur->rangeMem; noRanges = bus_cur->noMemRanges; break; case PFMEM: range_cur = bus_cur->rangePFMem; noRanges = bus_cur->noPFMemRanges; break; case IO: range_cur = bus_cur->rangeIO; noRanges = bus_cur->noIORanges; break; } range_prev = NULL; while (range_cur) { if (range->start < range_cur->start) break; range_prev = range_cur; range_cur = range_cur->next; count = count + 1; } if (!count) { /* our range will go at the beginning of the list */ switch (type) { case MEM: bus_cur->rangeMem = range; break; case PFMEM: bus_cur->rangePFMem = range; break; case IO: bus_cur->rangeIO = range; break; } range->next = range_cur; range->rangeno = 1; i_init = 0; } else if (!range_cur) { /* our range will go at the end of the list */ range->next = NULL; range_prev->next = range; range->rangeno = range_prev->rangeno + 1; return 0; } else { /* the range is in the middle */ range_prev->next = range; range->next = range_cur; range->rangeno = range_cur->rangeno; i_init = range_prev->rangeno; } for (count = i_init; count < noRanges; ++count) { ++range_cur->rangeno; range_cur = range_cur->next; } update_resources(bus_cur, type, i_init + 1); return 0; } /******************************************************************************* * This routine goes through the list of resources of type 'type' and updates * the range numbers that they correspond to. It was called from add_bus_range fnc * * Input: bus, type of the resource, the rangeno starting from which to update ******************************************************************************/ static void update_resources(struct bus_node *bus_cur, int type, int rangeno) { struct resource_node *res = NULL; u8 eol = 0; /* end of list indicator */ switch (type) { case MEM: if (bus_cur->firstMem) res = bus_cur->firstMem; break; case PFMEM: if (bus_cur->firstPFMem) res = bus_cur->firstPFMem; break; case IO: if (bus_cur->firstIO) res = bus_cur->firstIO; break; } if (res) { while (res) { if (res->rangeno == rangeno) break; if (res->next) res = res->next; else if (res->nextRange) res = res->nextRange; else { eol = 1; break; } } if (!eol) { /* found the range */ while (res) { ++res->rangeno; res = res->next; } } } } static void fix_me(struct resource_node *res, struct bus_node *bus_cur, struct range_node *range) { char *str = ""; switch (res->type) { case IO: str = "io"; break; case MEM: str = "mem"; break; case PFMEM: str = "pfmem"; break; } while (res) { if (res->rangeno == -1) { while (range) { if ((res->start >= range->start) && (res->end <= range->end)) { res->rangeno = range->rangeno; debug("%s->rangeno in fix_resources is %d\n", str, res->rangeno); switch (res->type) { case IO: --bus_cur->needIOUpdate; break; case MEM: --bus_cur->needMemUpdate; break; case PFMEM: --bus_cur->needPFMemUpdate; break; } break; } range = range->next; } } if (res->next) res = res->next; else res = res->nextRange; } } /***************************************************************************** * This routine reassigns the range numbers to the resources that had a -1 * This case can happen only if upon initialization, resources taken by pci dev * appear in EBDA before the resources allocated for that bus, since we don't * know the range, we assign -1, and this routine is called after a new range * is assigned to see the resources with unknown range belong to the added range * * Input: current bus * Output: none, list of resources for that bus are fixed if can be *******************************************************************************/ static void fix_resources(struct bus_node *bus_cur) { struct range_node *range; struct resource_node *res; debug("%s - bus_cur->busno = %d\n", __func__, bus_cur->busno); if (bus_cur->needIOUpdate) { res = bus_cur->firstIO; range = bus_cur->rangeIO; fix_me(res, bus_cur, range); } if (bus_cur->needMemUpdate) { res = bus_cur->firstMem; range = bus_cur->rangeMem; fix_me(res, bus_cur, range); } if (bus_cur->needPFMemUpdate) { res = bus_cur->firstPFMem; range = bus_cur->rangePFMem; fix_me(res, bus_cur, range); } } /******************************************************************************* * This routine adds a resource to the list of resources to the appropriate bus * based on their resource type and sorted by their starting addresses. It assigns * the ptrs to next and nextRange if needed. * * Input: resource ptr * Output: ptrs assigned (to the node) * 0 or -1 *******************************************************************************/ int ibmphp_add_resource(struct resource_node *res) { struct resource_node *res_cur; struct resource_node *res_prev; struct bus_node *bus_cur; struct range_node *range_cur = NULL; struct resource_node *res_start = NULL; debug("%s - enter\n", __func__); if (!res) { err("NULL passed to add\n"); return -ENODEV; } bus_cur = find_bus_wprev(res->busno, NULL, 0); if (!bus_cur) { /* didn't find a bus, something's wrong!!! */ debug("no bus in the system, either pci_dev's wrong or allocation failed\n"); return -ENODEV; } /* Normal case */ switch (res->type) { case IO: range_cur = bus_cur->rangeIO; res_start = bus_cur->firstIO; break; case MEM: range_cur = bus_cur->rangeMem; res_start = bus_cur->firstMem; break; case PFMEM: range_cur = bus_cur->rangePFMem; res_start = bus_cur->firstPFMem; break; default: err("cannot read the type of the resource to add... problem\n"); return -EINVAL; } while (range_cur) { if ((res->start >= range_cur->start) && (res->end <= range_cur->end)) { res->rangeno = range_cur->rangeno; break; } range_cur = range_cur->next; } /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! * this is again the case of rangeno = -1 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */ if (!range_cur) { switch (res->type) { case IO: ++bus_cur->needIOUpdate; break; case MEM: ++bus_cur->needMemUpdate; break; case PFMEM: ++bus_cur->needPFMemUpdate; break; } res->rangeno = -1; } debug("The range is %d\n", res->rangeno); if (!res_start) { /* no first{IO,Mem,Pfmem} on the bus, 1st IO/Mem/Pfmem resource ever */ switch (res->type) { case IO: bus_cur->firstIO = res; break; case MEM: bus_cur->firstMem = res; break; case PFMEM: bus_cur->firstPFMem = res; break; } res->next = NULL; res->nextRange = NULL; } else { res_cur = res_start; res_prev = NULL; debug("res_cur->rangeno is %d\n", res_cur->rangeno); while (res_cur) { if (res_cur->rangeno >= res->rangeno) break; res_prev = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; } if (!res_cur) { /* at the end of the resource list */ debug("i should be here, [%x - %x]\n", res->start, res->end); res_prev->nextRange = res; res->next = NULL; res->nextRange = NULL; } else if (res_cur->rangeno == res->rangeno) { /* in the same range */ while (res_cur) { if (res->start < res_cur->start) break; res_prev = res_cur; res_cur = res_cur->next; } if (!res_cur) { /* the last resource in this range */ res_prev->next = res; res->next = NULL; res->nextRange = res_prev->nextRange; res_prev->nextRange = NULL; } else if (res->start < res_cur->start) { /* at the beginning or middle of the range */ if (!res_prev) { switch (res->type) { case IO: bus_cur->firstIO = res; break; case MEM: bus_cur->firstMem = res; break; case PFMEM: bus_cur->firstPFMem = res; break; } } else if (res_prev->rangeno == res_cur->rangeno) res_prev->next = res; else res_prev->nextRange = res; res->next = res_cur; res->nextRange = NULL; } } else { /* this is the case where it is 1st occurrence of the range */ if (!res_prev) { /* at the beginning of the resource list */ res->next = NULL; switch (res->type) { case IO: res->nextRange = bus_cur->firstIO; bus_cur->firstIO = res; break; case MEM: res->nextRange = bus_cur->firstMem; bus_cur->firstMem = res; break; case PFMEM: res->nextRange = bus_cur->firstPFMem; bus_cur->firstPFMem = res; break; } } else if (res_cur->rangeno > res->rangeno) { /* in the middle of the resource list */ res_prev->nextRange = res; res->next = NULL; res->nextRange = res_cur; } } } debug("%s - exit\n", __func__); return 0; } /**************************************************************************** * This routine will remove the resource from the list of resources * * Input: io, mem, and/or pfmem resource to be deleted * Output: modified resource list * 0 or error code ****************************************************************************/ int ibmphp_remove_resource(struct resource_node *res) { struct bus_node *bus_cur; struct resource_node *res_cur = NULL; struct resource_node *res_prev; struct resource_node *mem_cur; char *type = ""; if (!res) { err("resource to remove is NULL\n"); return -ENODEV; } bus_cur = find_bus_wprev(res->busno, NULL, 0); if (!bus_cur) { err("cannot find corresponding bus of the io resource to remove bailing out...\n"); return -ENODEV; } switch (res->type) { case IO: res_cur = bus_cur->firstIO; type = "io"; break; case MEM: res_cur = bus_cur->firstMem; type = "mem"; break; case PFMEM: res_cur = bus_cur->firstPFMem; type = "pfmem"; break; default: err("unknown type for resource to remove\n"); return -EINVAL; } res_prev = NULL; while (res_cur) { if ((res_cur->start == res->start) && (res_cur->end == res->end)) break; res_prev = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; } if (!res_cur) { if (res->type == PFMEM) { /* * case where pfmem might be in the PFMemFromMem list * so will also need to remove the corresponding mem * entry */ res_cur = bus_cur->firstPFMemFromMem; res_prev = NULL; while (res_cur) { if ((res_cur->start == res->start) && (res_cur->end == res->end)) { mem_cur = bus_cur->firstMem; while (mem_cur) { if ((mem_cur->start == res_cur->start) && (mem_cur->end == res_cur->end)) break; if (mem_cur->next) mem_cur = mem_cur->next; else mem_cur = mem_cur->nextRange; } if (!mem_cur) { err("cannot find corresponding mem node for pfmem...\n"); return -EINVAL; } ibmphp_remove_resource(mem_cur); if (!res_prev) bus_cur->firstPFMemFromMem = res_cur->next; else res_prev->next = res_cur->next; kfree(res_cur); return 0; } res_prev = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; } if (!res_cur) { err("cannot find pfmem to delete...\n"); return -EINVAL; } } else { err("the %s resource is not in the list to be deleted...\n", type); return -EINVAL; } } if (!res_prev) { /* first device to be deleted */ if (res_cur->next) { switch (res->type) { case IO: bus_cur->firstIO = res_cur->next; break; case MEM: bus_cur->firstMem = res_cur->next; break; case PFMEM: bus_cur->firstPFMem = res_cur->next; break; } } else if (res_cur->nextRange) { switch (res->type) { case IO: bus_cur->firstIO = res_cur->nextRange; break; case MEM: bus_cur->firstMem = res_cur->nextRange; break; case PFMEM: bus_cur->firstPFMem = res_cur->nextRange; break; } } else { switch (res->type) { case IO: bus_cur->firstIO = NULL; break; case MEM: bus_cur->firstMem = NULL; break; case PFMEM: bus_cur->firstPFMem = NULL; break; } } kfree(res_cur); return 0; } else { if (res_cur->next) { if (res_prev->rangeno == res_cur->rangeno) res_prev->next = res_cur->next; else res_prev->nextRange = res_cur->next; } else if (res_cur->nextRange) { res_prev->next = NULL; res_prev->nextRange = res_cur->nextRange; } else { res_prev->next = NULL; res_prev->nextRange = NULL; } kfree(res_cur); return 0; } return 0; } static struct range_node *find_range(struct bus_node *bus_cur, struct resource_node *res) { struct range_node *range = NULL; switch (res->type) { case IO: range = bus_cur->rangeIO; break; case MEM: range = bus_cur->rangeMem; break; case PFMEM: range = bus_cur->rangePFMem; break; default: err("cannot read resource type in find_range\n"); } while (range) { if (res->rangeno == range->rangeno) break; range = range->next; } return range; } /***************************************************************************** * This routine will check to make sure the io/mem/pfmem->len that the device asked for * can fit w/i our list of available IO/MEM/PFMEM resources. If cannot, returns -EINVAL, * otherwise, returns 0 * * Input: resource * Output: the correct start and end address are inputted into the resource node, * 0 or -EINVAL *****************************************************************************/ int ibmphp_check_resource(struct resource_node *res, u8 bridge) { struct bus_node *bus_cur; struct range_node *range = NULL; struct resource_node *res_prev; struct resource_node *res_cur = NULL; u32 len_cur = 0, start_cur = 0, len_tmp = 0; int noranges = 0; u32 tmp_start; /* this is to make sure start address is divisible by the length needed */ u32 tmp_divide; u8 flag = 0; if (!res) return -EINVAL; if (bridge) { /* The rules for bridges are different, 4K divisible for IO, 1M for (pf)mem*/ if (res->type == IO) tmp_divide = IOBRIDGE; else tmp_divide = MEMBRIDGE; } else tmp_divide = res->len; bus_cur = find_bus_wprev(res->busno, NULL, 0); if (!bus_cur) { /* didn't find a bus, something's wrong!!! */ debug("no bus in the system, either pci_dev's wrong or allocation failed\n"); return -EINVAL; } debug("%s - enter\n", __func__); debug("bus_cur->busno is %d\n", bus_cur->busno); /* This is a quick fix to not mess up with the code very much. i.e., * 2000-2fff, len = 1000, but when we compare, we need it to be fff */ res->len -= 1; switch (res->type) { case IO: res_cur = bus_cur->firstIO; noranges = bus_cur->noIORanges; break; case MEM: res_cur = bus_cur->firstMem; noranges = bus_cur->noMemRanges; break; case PFMEM: res_cur = bus_cur->firstPFMem; noranges = bus_cur->noPFMemRanges; break; default: err("wrong type of resource to check\n"); return -EINVAL; } res_prev = NULL; while (res_cur) { range = find_range(bus_cur, res_cur); debug("%s - rangeno = %d\n", __func__, res_cur->rangeno); if (!range) { err("no range for the device exists... bailing out...\n"); return -EINVAL; } /* found our range */ if (!res_prev) { /* first time in the loop */ len_tmp = res_cur->start - 1 - range->start; if ((res_cur->start != range->start) && (len_tmp >= res->len)) { debug("len_tmp = %x\n", len_tmp); if ((len_tmp < len_cur) || (len_cur == 0)) { if ((range->start % tmp_divide) == 0) { /* just perfect, starting address is divisible by length */ flag = 1; len_cur = len_tmp; start_cur = range->start; } else { /* Needs adjusting */ tmp_start = range->start; flag = 0; while ((len_tmp = res_cur->start - 1 - tmp_start) >= res->len) { if ((tmp_start % tmp_divide) == 0) { flag = 1; len_cur = len_tmp; start_cur = tmp_start; break; } tmp_start += tmp_divide - tmp_start % tmp_divide; if (tmp_start >= res_cur->start - 1) break; } } if (flag && len_cur == res->len) { debug("but we are not here, right?\n"); res->start = start_cur; res->len += 1; /* To restore the balance */ res->end = res->start + res->len - 1; return 0; } } } } if (!res_cur->next) { /* last device on the range */ len_tmp = range->end - (res_cur->end + 1); if ((range->end != res_cur->end) && (len_tmp >= res->len)) { debug("len_tmp = %x\n", len_tmp); if ((len_tmp < len_cur) || (len_cur == 0)) { if (((res_cur->end + 1) % tmp_divide) == 0) { /* just perfect, starting address is divisible by length */ flag = 1; len_cur = len_tmp; start_cur = res_cur->end + 1; } else { /* Needs adjusting */ tmp_start = res_cur->end + 1; flag = 0; while ((len_tmp = range->end - tmp_start) >= res->len) { if ((tmp_start % tmp_divide) == 0) { flag = 1; len_cur = len_tmp; start_cur = tmp_start; break; } tmp_start += tmp_divide - tmp_start % tmp_divide; if (tmp_start >= range->end) break; } } if (flag && len_cur == res->len) { res->start = start_cur; res->len += 1; /* To restore the balance */ res->end = res->start + res->len - 1; return 0; } } } } if (res_prev) { if (res_prev->rangeno != res_cur->rangeno) { /* 1st device on this range */ len_tmp = res_cur->start - 1 - range->start; if ((res_cur->start != range->start) && (len_tmp >= res->len)) { if ((len_tmp < len_cur) || (len_cur == 0)) { if ((range->start % tmp_divide) == 0) { /* just perfect, starting address is divisible by length */ flag = 1; len_cur = len_tmp; start_cur = range->start; } else { /* Needs adjusting */ tmp_start = range->start; flag = 0; while ((len_tmp = res_cur->start - 1 - tmp_start) >= res->len) { if ((tmp_start % tmp_divide) == 0) { flag = 1; len_cur = len_tmp; start_cur = tmp_start; break; } tmp_start += tmp_divide - tmp_start % tmp_divide; if (tmp_start >= res_cur->start - 1) break; } } if (flag && len_cur == res->len) { res->start = start_cur; res->len += 1; /* To restore the balance */ res->end = res->start + res->len - 1; return 0; } } } } else { /* in the same range */ len_tmp = res_cur->start - 1 - res_prev->end - 1; if (len_tmp >= res->len) { if ((len_tmp < len_cur) || (len_cur == 0)) { if (((res_prev->end + 1) % tmp_divide) == 0) { /* just perfect, starting address's divisible by length */ flag = 1; len_cur = len_tmp; start_cur = res_prev->end + 1; } else { /* Needs adjusting */ tmp_start = res_prev->end + 1; flag = 0; while ((len_tmp = res_cur->start - 1 - tmp_start) >= res->len) { if ((tmp_start % tmp_divide) == 0) { flag = 1; len_cur = len_tmp; start_cur = tmp_start; break; } tmp_start += tmp_divide - tmp_start % tmp_divide; if (tmp_start >= res_cur->start - 1) break; } } if (flag && len_cur == res->len) { res->start = start_cur; res->len += 1; /* To restore the balance */ res->end = res->start + res->len - 1; return 0; } } } } } /* end if (res_prev) */ res_prev = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; } /* end of while */ if (!res_prev) { /* 1st device ever */ /* need to find appropriate range */ switch (res->type) { case IO: range = bus_cur->rangeIO; break; case MEM: range = bus_cur->rangeMem; break; case PFMEM: range = bus_cur->rangePFMem; break; } while (range) { len_tmp = range->end - range->start; if (len_tmp >= res->len) { if ((len_tmp < len_cur) || (len_cur == 0)) { if ((range->start % tmp_divide) == 0) { /* just perfect, starting address's divisible by length */ flag = 1; len_cur = len_tmp; start_cur = range->start; } else { /* Needs adjusting */ tmp_start = range->start; flag = 0; while ((len_tmp = range->end - tmp_start) >= res->len) { if ((tmp_start % tmp_divide) == 0) { flag = 1; len_cur = len_tmp; start_cur = tmp_start; break; } tmp_start += tmp_divide - tmp_start % tmp_divide; if (tmp_start >= range->end) break; } } if (flag && len_cur == res->len) { res->start = start_cur; res->len += 1; /* To restore the balance */ res->end = res->start + res->len - 1; return 0; } } } range = range->next; } /* end of while */ if ((!range) && (len_cur == 0)) { /* have gone through the list of devices and ranges and haven't found n.e.thing */ err("no appropriate range.. bailing out...\n"); return -EINVAL; } else if (len_cur) { res->start = start_cur; res->len += 1; /* To restore the balance */ res->end = res->start + res->len - 1; return 0; } } if (!res_cur) { debug("prev->rangeno = %d, noranges = %d\n", res_prev->rangeno, noranges); if (res_prev->rangeno < noranges) { /* if there're more ranges out there to check */ switch (res->type) { case IO: range = bus_cur->rangeIO; break; case MEM: range = bus_cur->rangeMem; break; case PFMEM: range = bus_cur->rangePFMem; break; } while (range) { len_tmp = range->end - range->start; if (len_tmp >= res->len) { if ((len_tmp < len_cur) || (len_cur == 0)) { if ((range->start % tmp_divide) == 0) { /* just perfect, starting address's divisible by length */ flag = 1; len_cur = len_tmp; start_cur = range->start; } else { /* Needs adjusting */ tmp_start = range->start; flag = 0; while ((len_tmp = range->end - tmp_start) >= res->len) { if ((tmp_start % tmp_divide) == 0) { flag = 1; len_cur = len_tmp; start_cur = tmp_start; break; } tmp_start += tmp_divide - tmp_start % tmp_divide; if (tmp_start >= range->end) break; } } if (flag && len_cur == res->len) { res->start = start_cur; res->len += 1; /* To restore the balance */ res->end = res->start + res->len - 1; return 0; } } } range = range->next; } /* end of while */ if ((!range) && (len_cur == 0)) { /* have gone through the list of devices and ranges and haven't found n.e.thing */ err("no appropriate range.. bailing out...\n"); return -EINVAL; } else if (len_cur) { res->start = start_cur; res->len += 1; /* To restore the balance */ res->end = res->start + res->len - 1; return 0; } } else { /* no more ranges to check on */ if (len_cur) { res->start = start_cur; res->len += 1; /* To restore the balance */ res->end = res->start + res->len - 1; return 0; } else { /* have gone through the list of devices and haven't found n.e.thing */ err("no appropriate range.. bailing out...\n"); return -EINVAL; } } } /* end if (!res_cur) */ return -EINVAL; } /******************************************************************************** * This routine is called from remove_card if the card contained PPB. * It will remove all the resources on the bus as well as the bus itself * Input: Bus * Output: 0, -ENODEV ********************************************************************************/ int ibmphp_remove_bus(struct bus_node *bus, u8 parent_busno) { struct resource_node *res_cur; struct resource_node *res_tmp; struct bus_node *prev_bus; int rc; prev_bus = find_bus_wprev(parent_busno, NULL, 0); if (!prev_bus) { debug("something terribly wrong. Cannot find parent bus to the one to remove\n"); return -ENODEV; } debug("In ibmphp_remove_bus... prev_bus->busno is %x\n", prev_bus->busno); rc = remove_ranges(bus, prev_bus); if (rc) return rc; if (bus->firstIO) { res_cur = bus->firstIO; while (res_cur) { res_tmp = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; kfree(res_tmp); res_tmp = NULL; } bus->firstIO = NULL; } if (bus->firstMem) { res_cur = bus->firstMem; while (res_cur) { res_tmp = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; kfree(res_tmp); res_tmp = NULL; } bus->firstMem = NULL; } if (bus->firstPFMem) { res_cur = bus->firstPFMem; while (res_cur) { res_tmp = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; kfree(res_tmp); res_tmp = NULL; } bus->firstPFMem = NULL; } if (bus->firstPFMemFromMem) { res_cur = bus->firstPFMemFromMem; while (res_cur) { res_tmp = res_cur; res_cur = res_cur->next; kfree(res_tmp); res_tmp = NULL; } bus->firstPFMemFromMem = NULL; } list_del(&bus->bus_list); kfree(bus); return 0; } /****************************************************************************** * This routine deletes the ranges from a given bus, and the entries from the * parent's bus in the resources * Input: current bus, previous bus * Output: 0, -EINVAL ******************************************************************************/ static int remove_ranges(struct bus_node *bus_cur, struct bus_node *bus_prev) { struct range_node *range_cur; struct range_node *range_tmp; int i; struct resource_node *res = NULL; if (bus_cur->noIORanges) { range_cur = bus_cur->rangeIO; for (i = 0; i < bus_cur->noIORanges; i++) { if (ibmphp_find_resource(bus_prev, range_cur->start, &res, IO) < 0) return -EINVAL; ibmphp_remove_resource(res); range_tmp = range_cur; range_cur = range_cur->next; kfree(range_tmp); range_tmp = NULL; } bus_cur->rangeIO = NULL; } if (bus_cur->noMemRanges) { range_cur = bus_cur->rangeMem; for (i = 0; i < bus_cur->noMemRanges; i++) { if (ibmphp_find_resource(bus_prev, range_cur->start, &res, MEM) < 0) return -EINVAL; ibmphp_remove_resource(res); range_tmp = range_cur; range_cur = range_cur->next; kfree(range_tmp); range_tmp = NULL; } bus_cur->rangeMem = NULL; } if (bus_cur->noPFMemRanges) { range_cur = bus_cur->rangePFMem; for (i = 0; i < bus_cur->noPFMemRanges; i++) { if (ibmphp_find_resource(bus_prev, range_cur->start, &res, PFMEM) < 0) return -EINVAL; ibmphp_remove_resource(res); range_tmp = range_cur; range_cur = range_cur->next; kfree(range_tmp); range_tmp = NULL; } bus_cur->rangePFMem = NULL; } return 0; } /* * find the resource node in the bus * Input: Resource needed, start address of the resource, type of resource */ int ibmphp_find_resource(struct bus_node *bus, u32 start_address, struct resource_node **res, int flag) { struct resource_node *res_cur = NULL; char *type = ""; if (!bus) { err("The bus passed in NULL to find resource\n"); return -ENODEV; } switch (flag) { case IO: res_cur = bus->firstIO; type = "io"; break; case MEM: res_cur = bus->firstMem; type = "mem"; break; case PFMEM: res_cur = bus->firstPFMem; type = "pfmem"; break; default: err("wrong type of flag\n"); return -EINVAL; } while (res_cur) { if (res_cur->start == start_address) { *res = res_cur; break; } if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; } if (!res_cur) { if (flag == PFMEM) { res_cur = bus->firstPFMemFromMem; while (res_cur) { if (res_cur->start == start_address) { *res = res_cur; break; } res_cur = res_cur->next; } if (!res_cur) { debug("SOS...cannot find %s resource in the bus.\n", type); return -EINVAL; } } else { debug("SOS... cannot find %s resource in the bus.\n", type); return -EINVAL; } } if (*res) debug("*res->start = %x\n", (*res)->start); return 0; } /*********************************************************************** * This routine will free the resource structures used by the * system. It is called from cleanup routine for the module * Parameters: none * Returns: none ***********************************************************************/ void ibmphp_free_resources(void) { struct bus_node *bus_cur = NULL, *next; struct bus_node *bus_tmp; struct range_node *range_cur; struct range_node *range_tmp; struct resource_node *res_cur; struct resource_node *res_tmp; int i = 0; flags = 1; list_for_each_entry_safe(bus_cur, next, &gbuses, bus_list) { if (bus_cur->noIORanges) { range_cur = bus_cur->rangeIO; for (i = 0; i < bus_cur->noIORanges; i++) { if (!range_cur) break; range_tmp = range_cur; range_cur = range_cur->next; kfree(range_tmp); range_tmp = NULL; } } if (bus_cur->noMemRanges) { range_cur = bus_cur->rangeMem; for (i = 0; i < bus_cur->noMemRanges; i++) { if (!range_cur) break; range_tmp = range_cur; range_cur = range_cur->next; kfree(range_tmp); range_tmp = NULL; } } if (bus_cur->noPFMemRanges) { range_cur = bus_cur->rangePFMem; for (i = 0; i < bus_cur->noPFMemRanges; i++) { if (!range_cur) break; range_tmp = range_cur; range_cur = range_cur->next; kfree(range_tmp); range_tmp = NULL; } } if (bus_cur->firstIO) { res_cur = bus_cur->firstIO; while (res_cur) { res_tmp = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; kfree(res_tmp); res_tmp = NULL; } bus_cur->firstIO = NULL; } if (bus_cur->firstMem) { res_cur = bus_cur->firstMem; while (res_cur) { res_tmp = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; kfree(res_tmp); res_tmp = NULL; } bus_cur->firstMem = NULL; } if (bus_cur->firstPFMem) { res_cur = bus_cur->firstPFMem; while (res_cur) { res_tmp = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; kfree(res_tmp); res_tmp = NULL; } bus_cur->firstPFMem = NULL; } if (bus_cur->firstPFMemFromMem) { res_cur = bus_cur->firstPFMemFromMem; while (res_cur) { res_tmp = res_cur; res_cur = res_cur->next; kfree(res_tmp); res_tmp = NULL; } bus_cur->firstPFMemFromMem = NULL; } bus_tmp = bus_cur; list_del(&bus_cur->bus_list); kfree(bus_tmp); bus_tmp = NULL; } } /********************************************************************************* * This function will go over the PFmem resources to check if the EBDA allocated * pfmem out of memory buckets of the bus. If so, it will change the range numbers * and a flag to indicate that this resource is out of memory. It will also move the * Pfmem out of the pfmem resource list to the PFMemFromMem list, and will create * a new Mem node * This routine is called right after initialization *******************************************************************************/ static int __init once_over(void) { struct resource_node *pfmem_cur; struct resource_node *pfmem_prev; struct resource_node *mem; struct bus_node *bus_cur; list_for_each_entry(bus_cur, &gbuses, bus_list) { if ((!bus_cur->rangePFMem) && (bus_cur->firstPFMem)) { for (pfmem_cur = bus_cur->firstPFMem, pfmem_prev = NULL; pfmem_cur; pfmem_prev = pfmem_cur, pfmem_cur = pfmem_cur->next) { pfmem_cur->fromMem = 1; if (pfmem_prev) pfmem_prev->next = pfmem_cur->next; else bus_cur->firstPFMem = pfmem_cur->next; if (!bus_cur->firstPFMemFromMem) pfmem_cur->next = NULL; else /* we don't need to sort PFMemFromMem since we're using mem node for all the real work anyways, so just insert at the beginning of the list */ pfmem_cur->next = bus_cur->firstPFMemFromMem; bus_cur->firstPFMemFromMem = pfmem_cur; mem = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!mem) return -ENOMEM; mem->type = MEM; mem->busno = pfmem_cur->busno; mem->devfunc = pfmem_cur->devfunc; mem->start = pfmem_cur->start; mem->end = pfmem_cur->end; mem->len = pfmem_cur->len; if (ibmphp_add_resource(mem) < 0) err("Trouble...trouble... EBDA allocated pfmem from mem, but system doesn't display it has this space... unless not PCI device...\n"); pfmem_cur->rangeno = mem->rangeno; } /* end for pfmem */ } /* end if */ } /* end list_for_each bus */ return 0; } int ibmphp_add_pfmem_from_mem(struct resource_node *pfmem) { struct bus_node *bus_cur = find_bus_wprev(pfmem->busno, NULL, 0); if (!bus_cur) { err("cannot find bus of pfmem to add...\n"); return -ENODEV; } if (bus_cur->firstPFMemFromMem) pfmem->next = bus_cur->firstPFMemFromMem; else pfmem->next = NULL; bus_cur->firstPFMemFromMem = pfmem; return 0; } /* This routine just goes through the buses to see if the bus already exists. * It is called from ibmphp_find_sec_number, to find out a secondary bus number for * bridged cards * Parameters: bus_number * Returns: Bus pointer or NULL */ struct bus_node *ibmphp_find_res_bus(u8 bus_number) { return find_bus_wprev(bus_number, NULL, 0); } static struct bus_node *find_bus_wprev(u8 bus_number, struct bus_node **prev, u8 flag) { struct bus_node *bus_cur; list_for_each_entry(bus_cur, &gbuses, bus_list) { if (flag) *prev = list_prev_entry(bus_cur, bus_list); if (bus_cur->busno == bus_number) return bus_cur; } return NULL; } void ibmphp_print_test(void) { int i = 0; struct bus_node *bus_cur = NULL; struct range_node *range; struct resource_node *res; debug_pci("*****************START**********************\n"); if ((!list_empty(&gbuses)) && flags) { err("The GBUSES is not NULL?!?!?!?!?\n"); return; } list_for_each_entry(bus_cur, &gbuses, bus_list) { debug_pci ("This is bus # %d. There are\n", bus_cur->busno); debug_pci ("IORanges = %d\t", bus_cur->noIORanges); debug_pci ("MemRanges = %d\t", bus_cur->noMemRanges); debug_pci ("PFMemRanges = %d\n", bus_cur->noPFMemRanges); debug_pci ("The IO Ranges are as follows:\n"); if (bus_cur->rangeIO) { range = bus_cur->rangeIO; for (i = 0; i < bus_cur->noIORanges; i++) { debug_pci("rangeno is %d\n", range->rangeno); debug_pci("[%x - %x]\n", range->start, range->end); range = range->next; } } debug_pci("The Mem Ranges are as follows:\n"); if (bus_cur->rangeMem) { range = bus_cur->rangeMem; for (i = 0; i < bus_cur->noMemRanges; i++) { debug_pci("rangeno is %d\n", range->rangeno); debug_pci("[%x - %x]\n", range->start, range->end); range = range->next; } } debug_pci("The PFMem Ranges are as follows:\n"); if (bus_cur->rangePFMem) { range = bus_cur->rangePFMem; for (i = 0; i < bus_cur->noPFMemRanges; i++) { debug_pci("rangeno is %d\n", range->rangeno); debug_pci("[%x - %x]\n", range->start, range->end); range = range->next; } } debug_pci("The resources on this bus are as follows\n"); debug_pci("IO...\n"); if (bus_cur->firstIO) { res = bus_cur->firstIO; while (res) { debug_pci("The range # is %d\n", res->rangeno); debug_pci("The bus, devfnc is %d, %x\n", res->busno, res->devfunc); debug_pci("[%x - %x], len=%x\n", res->start, res->end, res->len); if (res->next) res = res->next; else if (res->nextRange) res = res->nextRange; else break; } } debug_pci("Mem...\n"); if (bus_cur->firstMem) { res = bus_cur->firstMem; while (res) { debug_pci("The range # is %d\n", res->rangeno); debug_pci("The bus, devfnc is %d, %x\n", res->busno, res->devfunc); debug_pci("[%x - %x], len=%x\n", res->start, res->end, res->len); if (res->next) res = res->next; else if (res->nextRange) res = res->nextRange; else break; } } debug_pci("PFMem...\n"); if (bus_cur->firstPFMem) { res = bus_cur->firstPFMem; while (res) { debug_pci("The range # is %d\n", res->rangeno); debug_pci("The bus, devfnc is %d, %x\n", res->busno, res->devfunc); debug_pci("[%x - %x], len=%x\n", res->start, res->end, res->len); if (res->next) res = res->next; else if (res->nextRange) res = res->nextRange; else break; } } debug_pci("PFMemFromMem...\n"); if (bus_cur->firstPFMemFromMem) { res = bus_cur->firstPFMemFromMem; while (res) { debug_pci("The range # is %d\n", res->rangeno); debug_pci("The bus, devfnc is %d, %x\n", res->busno, res->devfunc); debug_pci("[%x - %x], len=%x\n", res->start, res->end, res->len); res = res->next; } } } debug_pci("***********************END***********************\n"); } static int range_exists_already(struct range_node *range, struct bus_node *bus_cur, u8 type) { struct range_node *range_cur = NULL; switch (type) { case IO: range_cur = bus_cur->rangeIO; break; case MEM: range_cur = bus_cur->rangeMem; break; case PFMEM: range_cur = bus_cur->rangePFMem; break; default: err("wrong type passed to find out if range already exists\n"); return -ENODEV; } while (range_cur) { if ((range_cur->start == range->start) && (range_cur->end == range->end)) return 1; range_cur = range_cur->next; } return 0; } /* This routine will read the windows for any PPB we have and update the * range info for the secondary bus, and will also input this info into * primary bus, since BIOS doesn't. This is for PPB that are in the system * on bootup. For bridged cards that were added during previous load of the * driver, only the ranges and the bus structure are added, the devices are * added from NVRAM * Input: primary busno * Returns: none * Note: this function doesn't take into account IO restrictions etc, * so will only work for bridges with no video/ISA devices behind them It * also will not work for onboard PPBs that can have more than 1 *bus * behind them All these are TO DO. * Also need to add more error checkings... (from fnc returns etc) */ static int __init update_bridge_ranges(struct bus_node **bus) { u8 sec_busno, device, function, hdr_type, start_io_address, end_io_address; u16 vendor_id, upper_io_start, upper_io_end, start_mem_address, end_mem_address; u32 start_address, end_address, upper_start, upper_end; struct bus_node *bus_sec; struct bus_node *bus_cur; struct resource_node *io; struct resource_node *mem; struct resource_node *pfmem; struct range_node *range; unsigned int devfn; bus_cur = *bus; if (!bus_cur) return -ENODEV; ibmphp_pci_bus->number = bus_cur->busno; debug("inside %s\n", __func__); debug("bus_cur->busno = %x\n", bus_cur->busno); for (device = 0; device < 32; device++) { for (function = 0x00; function < 0x08; function++) { devfn = PCI_DEVFN(device, function); pci_bus_read_config_word(ibmphp_pci_bus, devfn, PCI_VENDOR_ID, &vendor_id); if (vendor_id != PCI_VENDOR_ID_NOTVALID) { /* found correct device!!! */ pci_bus_read_config_byte(ibmphp_pci_bus, devfn, PCI_HEADER_TYPE, &hdr_type); switch (hdr_type) { case PCI_HEADER_TYPE_NORMAL: function = 0x8; break; case PCI_HEADER_TYPE_MULTIDEVICE: break; case PCI_HEADER_TYPE_BRIDGE: function = 0x8; fallthrough; case PCI_HEADER_TYPE_MULTIBRIDGE: /* We assume here that only 1 bus behind the bridge TO DO: add functionality for several: temp = secondary; while (temp < subordinate) { ... temp++; } */ pci_bus_read_config_byte(ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_busno); bus_sec = find_bus_wprev(sec_busno, NULL, 0); /* this bus structure doesn't exist yet, PPB was configured during previous loading of ibmphp */ if (!bus_sec) { alloc_error_bus(NULL, sec_busno, 1); /* the rest will be populated during NVRAM call */ return 0; } pci_bus_read_config_byte(ibmphp_pci_bus, devfn, PCI_IO_BASE, &start_io_address); pci_bus_read_config_byte(ibmphp_pci_bus, devfn, PCI_IO_LIMIT, &end_io_address); pci_bus_read_config_word(ibmphp_pci_bus, devfn, PCI_IO_BASE_UPPER16, &upper_io_start); pci_bus_read_config_word(ibmphp_pci_bus, devfn, PCI_IO_LIMIT_UPPER16, &upper_io_end); start_address = (start_io_address & PCI_IO_RANGE_MASK) << 8; start_address |= (upper_io_start << 16); end_address = (end_io_address & PCI_IO_RANGE_MASK) << 8; end_address |= (upper_io_end << 16); if ((start_address) && (start_address <= end_address)) { range = kzalloc(sizeof(struct range_node), GFP_KERNEL); if (!range) return -ENOMEM; range->start = start_address; range->end = end_address + 0xfff; if (bus_sec->noIORanges > 0) { if (!range_exists_already(range, bus_sec, IO)) { add_bus_range(IO, range, bus_sec); ++bus_sec->noIORanges; } else { kfree(range); range = NULL; } } else { /* 1st IO Range on the bus */ range->rangeno = 1; bus_sec->rangeIO = range; ++bus_sec->noIORanges; } fix_resources(bus_sec); if (ibmphp_find_resource(bus_cur, start_address, &io, IO)) { io = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!io) { kfree(range); return -ENOMEM; } io->type = IO; io->busno = bus_cur->busno; io->devfunc = ((device << 3) | (function & 0x7)); io->start = start_address; io->end = end_address + 0xfff; io->len = io->end - io->start + 1; ibmphp_add_resource(io); } } pci_bus_read_config_word(ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, &start_mem_address); pci_bus_read_config_word(ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, &end_mem_address); start_address = 0x00000000 | (start_mem_address & PCI_MEMORY_RANGE_MASK) << 16; end_address = 0x00000000 | (end_mem_address & PCI_MEMORY_RANGE_MASK) << 16; if ((start_address) && (start_address <= end_address)) { range = kzalloc(sizeof(struct range_node), GFP_KERNEL); if (!range) return -ENOMEM; range->start = start_address; range->end = end_address + 0xfffff; if (bus_sec->noMemRanges > 0) { if (!range_exists_already(range, bus_sec, MEM)) { add_bus_range(MEM, range, bus_sec); ++bus_sec->noMemRanges; } else { kfree(range); range = NULL; } } else { /* 1st Mem Range on the bus */ range->rangeno = 1; bus_sec->rangeMem = range; ++bus_sec->noMemRanges; } fix_resources(bus_sec); if (ibmphp_find_resource(bus_cur, start_address, &mem, MEM)) { mem = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!mem) { kfree(range); return -ENOMEM; } mem->type = MEM; mem->busno = bus_cur->busno; mem->devfunc = ((device << 3) | (function & 0x7)); mem->start = start_address; mem->end = end_address + 0xfffff; mem->len = mem->end - mem->start + 1; ibmphp_add_resource(mem); } } pci_bus_read_config_word(ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_BASE, &start_mem_address); pci_bus_read_config_word(ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &end_mem_address); pci_bus_read_config_dword(ibmphp_pci_bus, devfn, PCI_PREF_BASE_UPPER32, &upper_start); pci_bus_read_config_dword(ibmphp_pci_bus, devfn, PCI_PREF_LIMIT_UPPER32, &upper_end); start_address = 0x00000000 | (start_mem_address & PCI_MEMORY_RANGE_MASK) << 16; end_address = 0x00000000 | (end_mem_address & PCI_MEMORY_RANGE_MASK) << 16; #if BITS_PER_LONG == 64 start_address |= ((long) upper_start) << 32; end_address |= ((long) upper_end) << 32; #endif if ((start_address) && (start_address <= end_address)) { range = kzalloc(sizeof(struct range_node), GFP_KERNEL); if (!range) return -ENOMEM; range->start = start_address; range->end = end_address + 0xfffff; if (bus_sec->noPFMemRanges > 0) { if (!range_exists_already(range, bus_sec, PFMEM)) { add_bus_range(PFMEM, range, bus_sec); ++bus_sec->noPFMemRanges; } else { kfree(range); range = NULL; } } else { /* 1st PFMem Range on the bus */ range->rangeno = 1; bus_sec->rangePFMem = range; ++bus_sec->noPFMemRanges; } fix_resources(bus_sec); if (ibmphp_find_resource(bus_cur, start_address, &pfmem, PFMEM)) { pfmem = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!pfmem) { kfree(range); return -ENOMEM; } pfmem->type = PFMEM; pfmem->busno = bus_cur->busno; pfmem->devfunc = ((device << 3) | (function & 0x7)); pfmem->start = start_address; pfmem->end = end_address + 0xfffff; pfmem->len = pfmem->end - pfmem->start + 1; pfmem->fromMem = 0; ibmphp_add_resource(pfmem); } } break; } /* end of switch */ } /* end if vendor */ } /* end for function */ } /* end for device */ return 0; }
linux-master
drivers/pci/hotplug/ibmphp_res.c
// SPDX-License-Identifier: GPL-2.0+ /* * PCI Express PCI Hot Plug Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman ([email protected]) * Copyright (C) 2001 IBM Corp. * Copyright (C) 2003-2004 Intel Corporation * * All rights reserved. * * Send feedback to <[email protected]>,<[email protected]> */ #define dev_fmt(fmt) "pciehp: " fmt #include <linux/dmi.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/jiffies.h> #include <linux/kthread.h> #include <linux/pci.h> #include <linux/pm_runtime.h> #include <linux/interrupt.h> #include <linux/slab.h> #include "../pci.h" #include "pciehp.h" static const struct dmi_system_id inband_presence_disabled_dmi_table[] = { /* * Match all Dell systems, as some Dell systems have inband * presence disabled on NVMe slots (but don't support the bit to * report it). Setting inband presence disabled should have no * negative effect, except on broken hotplug slots that never * assert presence detect--and those will still work, they will * just have a bit of extra delay before being probed. */ { .ident = "Dell System", .matches = { DMI_MATCH(DMI_OEM_STRING, "Dell System"), }, }, {} }; static inline struct pci_dev *ctrl_dev(struct controller *ctrl) { return ctrl->pcie->port; } static irqreturn_t pciehp_isr(int irq, void *dev_id); static irqreturn_t pciehp_ist(int irq, void *dev_id); static int pciehp_poll(void *data); static inline int pciehp_request_irq(struct controller *ctrl) { int retval, irq = ctrl->pcie->irq; if (pciehp_poll_mode) { ctrl->poll_thread = kthread_run(&pciehp_poll, ctrl, "pciehp_poll-%s", slot_name(ctrl)); return PTR_ERR_OR_ZERO(ctrl->poll_thread); } /* Installs the interrupt handler */ retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist, IRQF_SHARED, "pciehp", ctrl); if (retval) ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n", irq); return retval; } static inline void pciehp_free_irq(struct controller *ctrl) { if (pciehp_poll_mode) kthread_stop(ctrl->poll_thread); else free_irq(ctrl->pcie->irq, ctrl); } static int pcie_poll_cmd(struct controller *ctrl, int timeout) { struct pci_dev *pdev = ctrl_dev(ctrl); u16 slot_status; do { pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); if (PCI_POSSIBLE_ERROR(slot_status)) { ctrl_info(ctrl, "%s: no response from device\n", __func__); return 0; } if (slot_status & PCI_EXP_SLTSTA_CC) { pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC); ctrl->cmd_busy = 0; smp_mb(); return 1; } msleep(10); timeout -= 10; } while (timeout >= 0); return 0; /* timeout */ } static void pcie_wait_cmd(struct controller *ctrl) { unsigned int msecs = pciehp_poll_mode ? 2500 : 1000; unsigned long duration = msecs_to_jiffies(msecs); unsigned long cmd_timeout = ctrl->cmd_started + duration; unsigned long now, timeout; int rc; /* * If the controller does not generate notifications for command * completions, we never need to wait between writes. */ if (NO_CMD_CMPL(ctrl)) return; if (!ctrl->cmd_busy) return; /* * Even if the command has already timed out, we want to call * pcie_poll_cmd() so it can clear PCI_EXP_SLTSTA_CC. */ now = jiffies; if (time_before_eq(cmd_timeout, now)) timeout = 1; else timeout = cmd_timeout - now; if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE && ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE) rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout); else rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout)); if (!rc) ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n", ctrl->slot_ctrl, jiffies_to_msecs(jiffies - ctrl->cmd_started)); } #define CC_ERRATUM_MASK (PCI_EXP_SLTCTL_PCC | \ PCI_EXP_SLTCTL_PIC | \ PCI_EXP_SLTCTL_AIC | \ PCI_EXP_SLTCTL_EIC) static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd, u16 mask, bool wait) { struct pci_dev *pdev = ctrl_dev(ctrl); u16 slot_ctrl_orig, slot_ctrl; mutex_lock(&ctrl->ctrl_lock); /* * Always wait for any previous command that might still be in progress */ pcie_wait_cmd(ctrl); pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl); if (PCI_POSSIBLE_ERROR(slot_ctrl)) { ctrl_info(ctrl, "%s: no response from device\n", __func__); goto out; } slot_ctrl_orig = slot_ctrl; slot_ctrl &= ~mask; slot_ctrl |= (cmd & mask); ctrl->cmd_busy = 1; smp_mb(); ctrl->slot_ctrl = slot_ctrl; pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl); ctrl->cmd_started = jiffies; /* * Controllers with the Intel CF118 and similar errata advertise * Command Completed support, but they only set Command Completed * if we change the "Control" bits for power, power indicator, * attention indicator, or interlock. If we only change the * "Enable" bits, they never set the Command Completed bit. */ if (pdev->broken_cmd_compl && (slot_ctrl_orig & CC_ERRATUM_MASK) == (slot_ctrl & CC_ERRATUM_MASK)) ctrl->cmd_busy = 0; /* * Optionally wait for the hardware to be ready for a new command, * indicating completion of the above issued command. */ if (wait) pcie_wait_cmd(ctrl); out: mutex_unlock(&ctrl->ctrl_lock); } /** * pcie_write_cmd - Issue controller command * @ctrl: controller to which the command is issued * @cmd: command value written to slot control register * @mask: bitmask of slot control register to be modified */ static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) { pcie_do_write_cmd(ctrl, cmd, mask, true); } /* Same as above without waiting for the hardware to latch */ static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask) { pcie_do_write_cmd(ctrl, cmd, mask, false); } /** * pciehp_check_link_active() - Is the link active * @ctrl: PCIe hotplug controller * * Check whether the downstream link is currently active. Note it is * possible that the card is removed immediately after this so the * caller may need to take it into account. * * If the hotplug controller itself is not available anymore returns * %-ENODEV. */ int pciehp_check_link_active(struct controller *ctrl) { struct pci_dev *pdev = ctrl_dev(ctrl); u16 lnk_status; int ret; ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(lnk_status)) return -ENODEV; ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA); ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); return ret; } static bool pci_bus_check_dev(struct pci_bus *bus, int devfn) { u32 l; int count = 0; int delay = 1000, step = 20; bool found = false; do { found = pci_bus_read_dev_vendor_id(bus, devfn, &l, 0); count++; if (found) break; msleep(step); delay -= step; } while (delay > 0); if (count > 1) pr_debug("pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n", pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), count, step, l); return found; } static void pcie_wait_for_presence(struct pci_dev *pdev) { int timeout = 1250; u16 slot_status; do { pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); if (slot_status & PCI_EXP_SLTSTA_PDS) return; msleep(10); timeout -= 10; } while (timeout > 0); } int pciehp_check_link_status(struct controller *ctrl) { struct pci_dev *pdev = ctrl_dev(ctrl); bool found; u16 lnk_status; if (!pcie_wait_for_link(pdev, true)) { ctrl_info(ctrl, "Slot(%s): No link\n", slot_name(ctrl)); return -1; } if (ctrl->inband_presence_disabled) pcie_wait_for_presence(pdev); found = pci_bus_check_dev(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0)); /* ignore link or presence changes up to this point */ if (found) atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC), &ctrl->pending_events); pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); if ((lnk_status & PCI_EXP_LNKSTA_LT) || !(lnk_status & PCI_EXP_LNKSTA_NLW)) { ctrl_info(ctrl, "Slot(%s): Cannot train link: status %#06x\n", slot_name(ctrl), lnk_status); return -1; } pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status); if (!found) { ctrl_info(ctrl, "Slot(%s): No device found\n", slot_name(ctrl)); return -1; } return 0; } static int __pciehp_link_set(struct controller *ctrl, bool enable) { struct pci_dev *pdev = ctrl_dev(ctrl); pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_LD, enable ? 0 : PCI_EXP_LNKCTL_LD); return 0; } static int pciehp_link_enable(struct controller *ctrl) { return __pciehp_link_set(ctrl, true); } int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot, u8 *status) { struct controller *ctrl = to_ctrl(hotplug_slot); struct pci_dev *pdev = ctrl_dev(ctrl); u16 slot_ctrl; pci_config_pm_runtime_get(pdev); pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl); pci_config_pm_runtime_put(pdev); *status = (slot_ctrl & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6; return 0; } int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status) { struct controller *ctrl = to_ctrl(hotplug_slot); struct pci_dev *pdev = ctrl_dev(ctrl); u16 slot_ctrl; pci_config_pm_runtime_get(pdev); pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl); pci_config_pm_runtime_put(pdev); ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl); switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) { case PCI_EXP_SLTCTL_ATTN_IND_ON: *status = 1; /* On */ break; case PCI_EXP_SLTCTL_ATTN_IND_BLINK: *status = 2; /* Blink */ break; case PCI_EXP_SLTCTL_ATTN_IND_OFF: *status = 0; /* Off */ break; default: *status = 0xFF; break; } return 0; } void pciehp_get_power_status(struct controller *ctrl, u8 *status) { struct pci_dev *pdev = ctrl_dev(ctrl); u16 slot_ctrl; pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl); ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl); switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) { case PCI_EXP_SLTCTL_PWR_ON: *status = 1; /* On */ break; case PCI_EXP_SLTCTL_PWR_OFF: *status = 0; /* Off */ break; default: *status = 0xFF; break; } } void pciehp_get_latch_status(struct controller *ctrl, u8 *status) { struct pci_dev *pdev = ctrl_dev(ctrl); u16 slot_status; pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS); } /** * pciehp_card_present() - Is the card present * @ctrl: PCIe hotplug controller * * Function checks whether the card is currently present in the slot and * in that case returns true. Note it is possible that the card is * removed immediately after the check so the caller may need to take * this into account. * * It the hotplug controller itself is not available anymore returns * %-ENODEV. */ int pciehp_card_present(struct controller *ctrl) { struct pci_dev *pdev = ctrl_dev(ctrl); u16 slot_status; int ret; ret = pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(slot_status)) return -ENODEV; return !!(slot_status & PCI_EXP_SLTSTA_PDS); } /** * pciehp_card_present_or_link_active() - whether given slot is occupied * @ctrl: PCIe hotplug controller * * Unlike pciehp_card_present(), which determines presence solely from the * Presence Detect State bit, this helper also returns true if the Link Active * bit is set. This is a concession to broken hotplug ports which hardwire * Presence Detect State to zero, such as Wilocity's [1ae9:0200]. * * Returns: %1 if the slot is occupied and %0 if it is not. If the hotplug * port is not present anymore returns %-ENODEV. */ int pciehp_card_present_or_link_active(struct controller *ctrl) { int ret; ret = pciehp_card_present(ctrl); if (ret) return ret; return pciehp_check_link_active(ctrl); } int pciehp_query_power_fault(struct controller *ctrl) { struct pci_dev *pdev = ctrl_dev(ctrl); u16 slot_status; pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); return !!(slot_status & PCI_EXP_SLTSTA_PFD); } int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot, u8 status) { struct controller *ctrl = to_ctrl(hotplug_slot); struct pci_dev *pdev = ctrl_dev(ctrl); pci_config_pm_runtime_get(pdev); pcie_write_cmd_nowait(ctrl, status << 6, PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC); pci_config_pm_runtime_put(pdev); return 0; } /** * pciehp_set_indicators() - set attention indicator, power indicator, or both * @ctrl: PCIe hotplug controller * @pwr: one of: * PCI_EXP_SLTCTL_PWR_IND_ON * PCI_EXP_SLTCTL_PWR_IND_BLINK * PCI_EXP_SLTCTL_PWR_IND_OFF * @attn: one of: * PCI_EXP_SLTCTL_ATTN_IND_ON * PCI_EXP_SLTCTL_ATTN_IND_BLINK * PCI_EXP_SLTCTL_ATTN_IND_OFF * * Either @pwr or @attn can also be INDICATOR_NOOP to leave that indicator * unchanged. */ void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn) { u16 cmd = 0, mask = 0; if (PWR_LED(ctrl) && pwr != INDICATOR_NOOP) { cmd |= (pwr & PCI_EXP_SLTCTL_PIC); mask |= PCI_EXP_SLTCTL_PIC; } if (ATTN_LED(ctrl) && attn != INDICATOR_NOOP) { cmd |= (attn & PCI_EXP_SLTCTL_AIC); mask |= PCI_EXP_SLTCTL_AIC; } if (cmd) { pcie_write_cmd_nowait(ctrl, cmd, mask); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd); } } int pciehp_power_on_slot(struct controller *ctrl) { struct pci_dev *pdev = ctrl_dev(ctrl); u16 slot_status; int retval; /* Clear power-fault bit from previous power failures */ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); if (slot_status & PCI_EXP_SLTSTA_PFD) pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PFD); ctrl->power_fault_detected = 0; pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PWR_ON); retval = pciehp_link_enable(ctrl); if (retval) ctrl_err(ctrl, "%s: Can not enable the link!\n", __func__); return retval; } void pciehp_power_off_slot(struct controller *ctrl) { pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PWR_OFF); } static void pciehp_ignore_dpc_link_change(struct controller *ctrl, struct pci_dev *pdev, int irq) { /* * Ignore link changes which occurred while waiting for DPC recovery. * Could be several if DPC triggered multiple times consecutively. */ synchronize_hardirq(irq); atomic_and(~PCI_EXP_SLTSTA_DLLSC, &ctrl->pending_events); if (pciehp_poll_mode) pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_DLLSC); ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored (recovered by DPC)\n", slot_name(ctrl)); /* * If the link is unexpectedly down after successful recovery, * the corresponding link change may have been ignored above. * Synthesize it to ensure that it is acted on. */ down_read_nested(&ctrl->reset_lock, ctrl->depth); if (!pciehp_check_link_active(ctrl)) pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC); up_read(&ctrl->reset_lock); } static irqreturn_t pciehp_isr(int irq, void *dev_id) { struct controller *ctrl = (struct controller *)dev_id; struct pci_dev *pdev = ctrl_dev(ctrl); struct device *parent = pdev->dev.parent; u16 status, events = 0; /* * Interrupts only occur in D3hot or shallower and only if enabled * in the Slot Control register (PCIe r4.0, sec 6.7.3.4). */ if (pdev->current_state == PCI_D3cold || (!(ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE) && !pciehp_poll_mode)) return IRQ_NONE; /* * Keep the port accessible by holding a runtime PM ref on its parent. * Defer resume of the parent to the IRQ thread if it's suspended. * Mask the interrupt until then. */ if (parent) { pm_runtime_get_noresume(parent); if (!pm_runtime_active(parent)) { pm_runtime_put(parent); disable_irq_nosync(irq); atomic_or(RERUN_ISR, &ctrl->pending_events); return IRQ_WAKE_THREAD; } } read_status: pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status); if (PCI_POSSIBLE_ERROR(status)) { ctrl_info(ctrl, "%s: no response from device\n", __func__); if (parent) pm_runtime_put(parent); return IRQ_NONE; } /* * Slot Status contains plain status bits as well as event * notification bits; right now we only want the event bits. */ status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC; /* * If we've already reported a power fault, don't report it again * until we've done something to handle it. */ if (ctrl->power_fault_detected) status &= ~PCI_EXP_SLTSTA_PFD; else if (status & PCI_EXP_SLTSTA_PFD) ctrl->power_fault_detected = true; events |= status; if (!events) { if (parent) pm_runtime_put(parent); return IRQ_NONE; } if (status) { pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, status); /* * In MSI mode, all event bits must be zero before the port * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4). * So re-read the Slot Status register in case a bit was set * between read and write. */ if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode) goto read_status; } ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events); if (parent) pm_runtime_put(parent); /* * Command Completed notifications are not deferred to the * IRQ thread because it may be waiting for their arrival. */ if (events & PCI_EXP_SLTSTA_CC) { ctrl->cmd_busy = 0; smp_mb(); wake_up(&ctrl->queue); if (events == PCI_EXP_SLTSTA_CC) return IRQ_HANDLED; events &= ~PCI_EXP_SLTSTA_CC; } if (pdev->ignore_hotplug) { ctrl_dbg(ctrl, "ignoring hotplug event %#06x\n", events); return IRQ_HANDLED; } /* Save pending events for consumption by IRQ thread. */ atomic_or(events, &ctrl->pending_events); return IRQ_WAKE_THREAD; } static irqreturn_t pciehp_ist(int irq, void *dev_id) { struct controller *ctrl = (struct controller *)dev_id; struct pci_dev *pdev = ctrl_dev(ctrl); irqreturn_t ret; u32 events; ctrl->ist_running = true; pci_config_pm_runtime_get(pdev); /* rerun pciehp_isr() if the port was inaccessible on interrupt */ if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) { ret = pciehp_isr(irq, dev_id); enable_irq(irq); if (ret != IRQ_WAKE_THREAD) goto out; } synchronize_hardirq(irq); events = atomic_xchg(&ctrl->pending_events, 0); if (!events) { ret = IRQ_NONE; goto out; } /* Check Attention Button Pressed */ if (events & PCI_EXP_SLTSTA_ABP) pciehp_handle_button_press(ctrl); /* Check Power Fault Detected */ if (events & PCI_EXP_SLTSTA_PFD) { ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl)); pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, PCI_EXP_SLTCTL_ATTN_IND_ON); } /* * Ignore Link Down/Up events caused by Downstream Port Containment * if recovery from the error succeeded. */ if ((events & PCI_EXP_SLTSTA_DLLSC) && pci_dpc_recovered(pdev) && ctrl->state == ON_STATE) { events &= ~PCI_EXP_SLTSTA_DLLSC; pciehp_ignore_dpc_link_change(ctrl, pdev, irq); } /* * Disable requests have higher priority than Presence Detect Changed * or Data Link Layer State Changed events. */ down_read_nested(&ctrl->reset_lock, ctrl->depth); if (events & DISABLE_SLOT) pciehp_handle_disable_request(ctrl); else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC)) pciehp_handle_presence_or_link_change(ctrl, events); up_read(&ctrl->reset_lock); ret = IRQ_HANDLED; out: pci_config_pm_runtime_put(pdev); ctrl->ist_running = false; wake_up(&ctrl->requester); return ret; } static int pciehp_poll(void *data) { struct controller *ctrl = data; schedule_timeout_idle(10 * HZ); /* start with 10 sec delay */ while (!kthread_should_stop()) { /* poll for interrupt events or user requests */ while (pciehp_isr(IRQ_NOTCONNECTED, ctrl) == IRQ_WAKE_THREAD || atomic_read(&ctrl->pending_events)) pciehp_ist(IRQ_NOTCONNECTED, ctrl); if (pciehp_poll_time <= 0 || pciehp_poll_time > 60) pciehp_poll_time = 2; /* clamp to sane value */ schedule_timeout_idle(pciehp_poll_time * HZ); } return 0; } static void pcie_enable_notification(struct controller *ctrl) { u16 cmd, mask; /* * TBD: Power fault detected software notification support. * * Power fault detected software notification is not enabled * now, because it caused power fault detected interrupt storm * on some machines. On those machines, power fault detected * bit in the slot status register was set again immediately * when it is cleared in the interrupt service routine, and * next power fault detected interrupt was notified again. */ /* * Always enable link events: thus link-up and link-down shall * always be treated as hotplug and unplug respectively. Enable * presence detect only if Attention Button is not present. */ cmd = PCI_EXP_SLTCTL_DLLSCE; if (ATTN_BUTTN(ctrl)) cmd |= PCI_EXP_SLTCTL_ABPE; else cmd |= PCI_EXP_SLTCTL_PDCE; if (!pciehp_poll_mode) cmd |= PCI_EXP_SLTCTL_HPIE; if (!pciehp_poll_mode && !NO_CMD_CMPL(ctrl)) cmd |= PCI_EXP_SLTCTL_CCIE; mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE | PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_DLLSCE); pcie_write_cmd_nowait(ctrl, cmd, mask); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd); } static void pcie_disable_notification(struct controller *ctrl) { u16 mask; mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE | PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_DLLSCE); pcie_write_cmd(ctrl, 0, mask); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0); } void pcie_clear_hotplug_events(struct controller *ctrl) { pcie_capability_write_word(ctrl_dev(ctrl), PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC); } void pcie_enable_interrupt(struct controller *ctrl) { u16 mask; mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE; pcie_write_cmd(ctrl, mask, mask); } void pcie_disable_interrupt(struct controller *ctrl) { u16 mask; /* * Mask hot-plug interrupt to prevent it triggering immediately * when the link goes inactive (we still get PME when any of the * enabled events is detected). Same goes with Link Layer State * changed event which generates PME immediately when the link goes * inactive so mask it as well. */ mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE; pcie_write_cmd(ctrl, 0, mask); } /** * pciehp_slot_reset() - ignore link event caused by error-induced hot reset * @dev: PCI Express port service device * * Called from pcie_portdrv_slot_reset() after AER or DPC initiated a reset * further up in the hierarchy to recover from an error. The reset was * propagated down to this hotplug port. Ignore the resulting link flap. * If the link failed to retrain successfully, synthesize the ignored event. * Surprise removal during reset is detected through Presence Detect Changed. */ int pciehp_slot_reset(struct pcie_device *dev) { struct controller *ctrl = get_service_data(dev); if (ctrl->state != ON_STATE) return 0; pcie_capability_write_word(dev->port, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_DLLSC); if (!pciehp_check_link_active(ctrl)) pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC); return 0; } /* * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary * bus reset of the bridge, but at the same time we want to ensure that it is * not seen as a hot-unplug, followed by the hot-plug of the device. Thus, * disable link state notification and presence detection change notification * momentarily, if we see that they could interfere. Also, clear any spurious * events after. */ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe) { struct controller *ctrl = to_ctrl(hotplug_slot); struct pci_dev *pdev = ctrl_dev(ctrl); u16 stat_mask = 0, ctrl_mask = 0; int rc; if (probe) return 0; down_write_nested(&ctrl->reset_lock, ctrl->depth); if (!ATTN_BUTTN(ctrl)) { ctrl_mask |= PCI_EXP_SLTCTL_PDCE; stat_mask |= PCI_EXP_SLTSTA_PDC; } ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE; stat_mask |= PCI_EXP_SLTSTA_DLLSC; pcie_write_cmd(ctrl, 0, ctrl_mask); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0); rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port); pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask); pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask); up_write(&ctrl->reset_lock); return rc; } int pcie_init_notification(struct controller *ctrl) { if (pciehp_request_irq(ctrl)) return -1; pcie_enable_notification(ctrl); ctrl->notification_enabled = 1; return 0; } void pcie_shutdown_notification(struct controller *ctrl) { if (ctrl->notification_enabled) { pcie_disable_notification(ctrl); pciehp_free_irq(ctrl); ctrl->notification_enabled = 0; } } static inline void dbg_ctrl(struct controller *ctrl) { struct pci_dev *pdev = ctrl->pcie->port; u16 reg16; ctrl_dbg(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap); pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16); ctrl_dbg(ctrl, "Slot Status : 0x%04x\n", reg16); pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16); ctrl_dbg(ctrl, "Slot Control : 0x%04x\n", reg16); } #define FLAG(x, y) (((x) & (y)) ? '+' : '-') static inline int pcie_hotplug_depth(struct pci_dev *dev) { struct pci_bus *bus = dev->bus; int depth = 0; while (bus->parent) { bus = bus->parent; if (bus->self && bus->self->is_hotplug_bridge) depth++; } return depth; } struct controller *pcie_init(struct pcie_device *dev) { struct controller *ctrl; u32 slot_cap, slot_cap2; u8 poweron; struct pci_dev *pdev = dev->port; struct pci_bus *subordinate = pdev->subordinate; ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) return NULL; ctrl->pcie = dev; ctrl->depth = pcie_hotplug_depth(dev->port); pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap); if (pdev->hotplug_user_indicators) slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP); /* * We assume no Thunderbolt controllers support Command Complete events, * but some controllers falsely claim they do. */ if (pdev->is_thunderbolt) slot_cap |= PCI_EXP_SLTCAP_NCCS; ctrl->slot_cap = slot_cap; mutex_init(&ctrl->ctrl_lock); mutex_init(&ctrl->state_lock); init_rwsem(&ctrl->reset_lock); init_waitqueue_head(&ctrl->requester); init_waitqueue_head(&ctrl->queue); INIT_DELAYED_WORK(&ctrl->button_work, pciehp_queue_pushbutton_work); dbg_ctrl(ctrl); down_read(&pci_bus_sem); ctrl->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE; up_read(&pci_bus_sem); pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP2, &slot_cap2); if (slot_cap2 & PCI_EXP_SLTCAP2_IBPD) { pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_IBPD_DISABLE, PCI_EXP_SLTCTL_IBPD_DISABLE); ctrl->inband_presence_disabled = 1; } if (dmi_first_match(inband_presence_disabled_dmi_table)) ctrl->inband_presence_disabled = 1; /* Clear all remaining event bits in Slot Status register. */ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC); ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c IbPresDis%c LLActRep%c%s\n", (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19, FLAG(slot_cap, PCI_EXP_SLTCAP_ABP), FLAG(slot_cap, PCI_EXP_SLTCAP_PCP), FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP), FLAG(slot_cap, PCI_EXP_SLTCAP_AIP), FLAG(slot_cap, PCI_EXP_SLTCAP_PIP), FLAG(slot_cap, PCI_EXP_SLTCAP_HPC), FLAG(slot_cap, PCI_EXP_SLTCAP_HPS), FLAG(slot_cap, PCI_EXP_SLTCAP_EIP), FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS), FLAG(slot_cap2, PCI_EXP_SLTCAP2_IBPD), FLAG(pdev->link_active_reporting, true), pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : ""); /* * If empty slot's power status is on, turn power off. The IRQ isn't * requested yet, so avoid triggering a notification with this command. */ if (POWER_CTRL(ctrl)) { pciehp_get_power_status(ctrl, &poweron); if (!pciehp_card_present_or_link_active(ctrl) && poweron) { pcie_disable_notification(ctrl); pciehp_power_off_slot(ctrl); } } return ctrl; } void pciehp_release_ctrl(struct controller *ctrl) { cancel_delayed_work_sync(&ctrl->button_work); kfree(ctrl); } static void quirk_cmd_compl(struct pci_dev *pdev) { u32 slot_cap; if (pci_is_pcie(pdev)) { pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap); if (slot_cap & PCI_EXP_SLTCAP_HPC && !(slot_cap & PCI_EXP_SLTCAP_NCCS)) pdev->broken_cmd_compl = 1; } } DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x010e, PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0110, PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400, PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401, PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_HXT, 0x0401, PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
linux-master
drivers/pci/hotplug/pciehp_hpc.c
// SPDX-License-Identifier: GPL-2.0+ /* * PCI Hotplug Driver for PowerPC PowerNV platform. * * Copyright Gavin Shan, IBM Corporation 2016. */ #include <linux/libfdt.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/of_fdt.h> #include <asm/opal.h> #include <asm/pnv-pci.h> #include <asm/ppc-pci.h> #define DRIVER_VERSION "0.1" #define DRIVER_AUTHOR "Gavin Shan, IBM Corporation" #define DRIVER_DESC "PowerPC PowerNV PCI Hotplug Driver" #define SLOT_WARN(sl, x...) \ ((sl)->pdev ? pci_warn((sl)->pdev, x) : dev_warn(&(sl)->bus->dev, x)) struct pnv_php_event { bool added; struct pnv_php_slot *php_slot; struct work_struct work; }; static LIST_HEAD(pnv_php_slot_list); static DEFINE_SPINLOCK(pnv_php_lock); static void pnv_php_register(struct device_node *dn); static void pnv_php_unregister_one(struct device_node *dn); static void pnv_php_unregister(struct device_node *dn); static void pnv_php_disable_irq(struct pnv_php_slot *php_slot, bool disable_device) { struct pci_dev *pdev = php_slot->pdev; int irq = php_slot->irq; u16 ctrl; if (php_slot->irq > 0) { pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &ctrl); ctrl &= ~(PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_DLLSCE); pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, ctrl); free_irq(php_slot->irq, php_slot); php_slot->irq = 0; } if (php_slot->wq) { destroy_workqueue(php_slot->wq); php_slot->wq = NULL; } if (disable_device || irq > 0) { if (pdev->msix_enabled) pci_disable_msix(pdev); else if (pdev->msi_enabled) pci_disable_msi(pdev); pci_disable_device(pdev); } } static void pnv_php_free_slot(struct kref *kref) { struct pnv_php_slot *php_slot = container_of(kref, struct pnv_php_slot, kref); WARN_ON(!list_empty(&php_slot->children)); pnv_php_disable_irq(php_slot, false); kfree(php_slot->name); kfree(php_slot); } static inline void pnv_php_put_slot(struct pnv_php_slot *php_slot) { if (!php_slot) return; kref_put(&php_slot->kref, pnv_php_free_slot); } static struct pnv_php_slot *pnv_php_match(struct device_node *dn, struct pnv_php_slot *php_slot) { struct pnv_php_slot *target, *tmp; if (php_slot->dn == dn) { kref_get(&php_slot->kref); return php_slot; } list_for_each_entry(tmp, &php_slot->children, link) { target = pnv_php_match(dn, tmp); if (target) return target; } return NULL; } struct pnv_php_slot *pnv_php_find_slot(struct device_node *dn) { struct pnv_php_slot *php_slot, *tmp; unsigned long flags; spin_lock_irqsave(&pnv_php_lock, flags); list_for_each_entry(tmp, &pnv_php_slot_list, link) { php_slot = pnv_php_match(dn, tmp); if (php_slot) { spin_unlock_irqrestore(&pnv_php_lock, flags); return php_slot; } } spin_unlock_irqrestore(&pnv_php_lock, flags); return NULL; } EXPORT_SYMBOL_GPL(pnv_php_find_slot); /* * Remove pdn for all children of the indicated device node. * The function should remove pdn in a depth-first manner. */ static void pnv_php_rmv_pdns(struct device_node *dn) { struct device_node *child; for_each_child_of_node(dn, child) { pnv_php_rmv_pdns(child); pci_remove_device_node_info(child); } } /* * Detach all child nodes of the indicated device nodes. The * function should handle device nodes in depth-first manner. * * We should not invoke of_node_release() as the memory for * individual device node is part of large memory block. The * large block is allocated from memblock (system bootup) or * kmalloc() when unflattening the device tree by OF changeset. * We can not free the large block allocated from memblock. For * later case, it should be released at once. */ static void pnv_php_detach_device_nodes(struct device_node *parent) { struct device_node *dn; for_each_child_of_node(parent, dn) { pnv_php_detach_device_nodes(dn); of_node_put(dn); of_detach_node(dn); } } static void pnv_php_rmv_devtree(struct pnv_php_slot *php_slot) { pnv_php_rmv_pdns(php_slot->dn); /* * Decrease the refcount if the device nodes were created * through OF changeset before detaching them. */ if (php_slot->fdt) of_changeset_destroy(&php_slot->ocs); pnv_php_detach_device_nodes(php_slot->dn); if (php_slot->fdt) { kfree(php_slot->dt); kfree(php_slot->fdt); php_slot->dt = NULL; php_slot->dn->child = NULL; php_slot->fdt = NULL; } } /* * As the nodes in OF changeset are applied in reverse order, we * need revert the nodes in advance so that we have correct node * order after the changeset is applied. */ static void pnv_php_reverse_nodes(struct device_node *parent) { struct device_node *child, *next; /* In-depth first */ for_each_child_of_node(parent, child) pnv_php_reverse_nodes(child); /* Reverse the nodes in the child list */ child = parent->child; parent->child = NULL; while (child) { next = child->sibling; child->sibling = parent->child; parent->child = child; child = next; } } static int pnv_php_populate_changeset(struct of_changeset *ocs, struct device_node *dn) { struct device_node *child; int ret = 0; for_each_child_of_node(dn, child) { ret = of_changeset_attach_node(ocs, child); if (ret) { of_node_put(child); break; } ret = pnv_php_populate_changeset(ocs, child); if (ret) { of_node_put(child); break; } } return ret; } static void *pnv_php_add_one_pdn(struct device_node *dn, void *data) { struct pci_controller *hose = (struct pci_controller *)data; struct pci_dn *pdn; pdn = pci_add_device_node_info(hose, dn); if (!pdn) return ERR_PTR(-ENOMEM); return NULL; } static void pnv_php_add_pdns(struct pnv_php_slot *slot) { struct pci_controller *hose = pci_bus_to_host(slot->bus); pci_traverse_device_nodes(slot->dn, pnv_php_add_one_pdn, hose); } static int pnv_php_add_devtree(struct pnv_php_slot *php_slot) { void *fdt, *fdt1, *dt; int ret; /* We don't know the FDT blob size. We try to get it through * maximal memory chunk and then copy it to another chunk that * fits the real size. */ fdt1 = kzalloc(0x10000, GFP_KERNEL); if (!fdt1) { ret = -ENOMEM; goto out; } ret = pnv_pci_get_device_tree(php_slot->dn->phandle, fdt1, 0x10000); if (ret) { SLOT_WARN(php_slot, "Error %d getting FDT blob\n", ret); goto free_fdt1; } fdt = kmemdup(fdt1, fdt_totalsize(fdt1), GFP_KERNEL); if (!fdt) { ret = -ENOMEM; goto free_fdt1; } /* Unflatten device tree blob */ dt = of_fdt_unflatten_tree(fdt, php_slot->dn, NULL); if (!dt) { ret = -EINVAL; SLOT_WARN(php_slot, "Cannot unflatten FDT\n"); goto free_fdt; } /* Initialize and apply the changeset */ of_changeset_init(&php_slot->ocs); pnv_php_reverse_nodes(php_slot->dn); ret = pnv_php_populate_changeset(&php_slot->ocs, php_slot->dn); if (ret) { pnv_php_reverse_nodes(php_slot->dn); SLOT_WARN(php_slot, "Error %d populating changeset\n", ret); goto free_dt; } php_slot->dn->child = NULL; ret = of_changeset_apply(&php_slot->ocs); if (ret) { SLOT_WARN(php_slot, "Error %d applying changeset\n", ret); goto destroy_changeset; } /* Add device node firmware data */ pnv_php_add_pdns(php_slot); php_slot->fdt = fdt; php_slot->dt = dt; kfree(fdt1); goto out; destroy_changeset: of_changeset_destroy(&php_slot->ocs); free_dt: kfree(dt); php_slot->dn->child = NULL; free_fdt: kfree(fdt); free_fdt1: kfree(fdt1); out: return ret; } static inline struct pnv_php_slot *to_pnv_php_slot(struct hotplug_slot *slot) { return container_of(slot, struct pnv_php_slot, slot); } int pnv_php_set_slot_power_state(struct hotplug_slot *slot, uint8_t state) { struct pnv_php_slot *php_slot = to_pnv_php_slot(slot); struct opal_msg msg; int ret; ret = pnv_pci_set_power_state(php_slot->id, state, &msg); if (ret > 0) { if (be64_to_cpu(msg.params[1]) != php_slot->dn->phandle || be64_to_cpu(msg.params[2]) != state) { SLOT_WARN(php_slot, "Wrong msg (%lld, %lld, %lld)\n", be64_to_cpu(msg.params[1]), be64_to_cpu(msg.params[2]), be64_to_cpu(msg.params[3])); return -ENOMSG; } if (be64_to_cpu(msg.params[3]) != OPAL_SUCCESS) { ret = -ENODEV; goto error; } } else if (ret < 0) { goto error; } if (state == OPAL_PCI_SLOT_POWER_OFF || state == OPAL_PCI_SLOT_OFFLINE) pnv_php_rmv_devtree(php_slot); else ret = pnv_php_add_devtree(php_slot); return ret; error: SLOT_WARN(php_slot, "Error %d powering %s\n", ret, (state == OPAL_PCI_SLOT_POWER_ON) ? "on" : "off"); return ret; } EXPORT_SYMBOL_GPL(pnv_php_set_slot_power_state); static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state) { struct pnv_php_slot *php_slot = to_pnv_php_slot(slot); uint8_t power_state = OPAL_PCI_SLOT_POWER_ON; int ret; /* * Retrieve power status from firmware. If we fail * getting that, the power status fails back to * be on. */ ret = pnv_pci_get_power_state(php_slot->id, &power_state); if (ret) { SLOT_WARN(php_slot, "Error %d getting power status\n", ret); } else { *state = power_state; } return 0; } static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state) { struct pnv_php_slot *php_slot = to_pnv_php_slot(slot); uint8_t presence = OPAL_PCI_SLOT_EMPTY; int ret; /* * Retrieve presence status from firmware. If we can't * get that, it will fail back to be empty. */ ret = pnv_pci_get_presence_state(php_slot->id, &presence); if (ret >= 0) { *state = presence; ret = 0; } else { SLOT_WARN(php_slot, "Error %d getting presence\n", ret); } return ret; } static int pnv_php_get_attention_state(struct hotplug_slot *slot, u8 *state) { struct pnv_php_slot *php_slot = to_pnv_php_slot(slot); *state = php_slot->attention_state; return 0; } static int pnv_php_set_attention_state(struct hotplug_slot *slot, u8 state) { struct pnv_php_slot *php_slot = to_pnv_php_slot(slot); struct pci_dev *bridge = php_slot->pdev; u16 new, mask; php_slot->attention_state = state; if (!bridge) return 0; mask = PCI_EXP_SLTCTL_AIC; if (state) new = PCI_EXP_SLTCTL_ATTN_IND_ON; else new = PCI_EXP_SLTCTL_ATTN_IND_OFF; pcie_capability_clear_and_set_word(bridge, PCI_EXP_SLTCTL, mask, new); return 0; } static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan) { struct hotplug_slot *slot = &php_slot->slot; uint8_t presence = OPAL_PCI_SLOT_EMPTY; uint8_t power_status = OPAL_PCI_SLOT_POWER_ON; int ret; /* Check if the slot has been configured */ if (php_slot->state != PNV_PHP_STATE_REGISTERED) return 0; /* Retrieve slot presence status */ ret = pnv_php_get_adapter_state(slot, &presence); if (ret) return ret; /* * Proceed if there have nothing behind the slot. However, * we should leave the slot in registered state at the * beginning. Otherwise, the PCI devices inserted afterwards * won't be probed and populated. */ if (presence == OPAL_PCI_SLOT_EMPTY) { if (!php_slot->power_state_check) { php_slot->power_state_check = true; return 0; } goto scan; } /* * If the power supply to the slot is off, we can't detect * adapter presence state. That means we have to turn the * slot on before going to probe slot's presence state. * * On the first time, we don't change the power status to * boost system boot with assumption that the firmware * supplies consistent slot power status: empty slot always * has its power off and non-empty slot has its power on. */ if (!php_slot->power_state_check) { php_slot->power_state_check = true; ret = pnv_php_get_power_state(slot, &power_status); if (ret) return ret; if (power_status != OPAL_PCI_SLOT_POWER_ON) return 0; } /* Check the power status. Scan the slot if it is already on */ ret = pnv_php_get_power_state(slot, &power_status); if (ret) return ret; if (power_status == OPAL_PCI_SLOT_POWER_ON) goto scan; /* Power is off, turn it on and then scan the slot */ ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON); if (ret) return ret; scan: if (presence == OPAL_PCI_SLOT_PRESENT) { if (rescan) { pci_lock_rescan_remove(); pci_hp_add_devices(php_slot->bus); pci_unlock_rescan_remove(); } /* Rescan for child hotpluggable slots */ php_slot->state = PNV_PHP_STATE_POPULATED; if (rescan) pnv_php_register(php_slot->dn); } else { php_slot->state = PNV_PHP_STATE_POPULATED; } return 0; } static int pnv_php_reset_slot(struct hotplug_slot *slot, bool probe) { struct pnv_php_slot *php_slot = to_pnv_php_slot(slot); struct pci_dev *bridge = php_slot->pdev; uint16_t sts; /* * The CAPI folks want pnv_php to drive OpenCAPI slots * which don't have a bridge. Only claim to support * reset_slot() if we have a bridge device (for now...) */ if (probe) return !bridge; /* mask our interrupt while resetting the bridge */ if (php_slot->irq > 0) disable_irq(php_slot->irq); pci_bridge_secondary_bus_reset(bridge); /* clear any state changes that happened due to the reset */ pcie_capability_read_word(php_slot->pdev, PCI_EXP_SLTSTA, &sts); sts &= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC); pcie_capability_write_word(php_slot->pdev, PCI_EXP_SLTSTA, sts); if (php_slot->irq > 0) enable_irq(php_slot->irq); return 0; } static int pnv_php_enable_slot(struct hotplug_slot *slot) { struct pnv_php_slot *php_slot = to_pnv_php_slot(slot); return pnv_php_enable(php_slot, true); } static int pnv_php_disable_slot(struct hotplug_slot *slot) { struct pnv_php_slot *php_slot = to_pnv_php_slot(slot); int ret; /* * Allow to disable a slot already in the registered state to * cover cases where the slot couldn't be enabled and never * reached the populated state */ if (php_slot->state != PNV_PHP_STATE_POPULATED && php_slot->state != PNV_PHP_STATE_REGISTERED) return 0; /* Remove all devices behind the slot */ pci_lock_rescan_remove(); pci_hp_remove_devices(php_slot->bus); pci_unlock_rescan_remove(); /* Detach the child hotpluggable slots */ pnv_php_unregister(php_slot->dn); /* Notify firmware and remove device nodes */ ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_OFF); php_slot->state = PNV_PHP_STATE_REGISTERED; return ret; } static const struct hotplug_slot_ops php_slot_ops = { .get_power_status = pnv_php_get_power_state, .get_adapter_status = pnv_php_get_adapter_state, .get_attention_status = pnv_php_get_attention_state, .set_attention_status = pnv_php_set_attention_state, .enable_slot = pnv_php_enable_slot, .disable_slot = pnv_php_disable_slot, .reset_slot = pnv_php_reset_slot, }; static void pnv_php_release(struct pnv_php_slot *php_slot) { unsigned long flags; /* Remove from global or child list */ spin_lock_irqsave(&pnv_php_lock, flags); list_del(&php_slot->link); spin_unlock_irqrestore(&pnv_php_lock, flags); /* Detach from parent */ pnv_php_put_slot(php_slot); pnv_php_put_slot(php_slot->parent); } static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn) { struct pnv_php_slot *php_slot; struct pci_bus *bus; const char *label; uint64_t id; int ret; ret = of_property_read_string(dn, "ibm,slot-label", &label); if (ret) return NULL; if (pnv_pci_get_slot_id(dn, &id)) return NULL; bus = pci_find_bus_by_node(dn); if (!bus) return NULL; php_slot = kzalloc(sizeof(*php_slot), GFP_KERNEL); if (!php_slot) return NULL; php_slot->name = kstrdup(label, GFP_KERNEL); if (!php_slot->name) { kfree(php_slot); return NULL; } if (dn->child && PCI_DN(dn->child)) php_slot->slot_no = PCI_SLOT(PCI_DN(dn->child)->devfn); else php_slot->slot_no = -1; /* Placeholder slot */ kref_init(&php_slot->kref); php_slot->state = PNV_PHP_STATE_INITIALIZED; php_slot->dn = dn; php_slot->pdev = bus->self; php_slot->bus = bus; php_slot->id = id; php_slot->power_state_check = false; php_slot->slot.ops = &php_slot_ops; INIT_LIST_HEAD(&php_slot->children); INIT_LIST_HEAD(&php_slot->link); return php_slot; } static int pnv_php_register_slot(struct pnv_php_slot *php_slot) { struct pnv_php_slot *parent; struct device_node *dn = php_slot->dn; unsigned long flags; int ret; /* Check if the slot is registered or not */ parent = pnv_php_find_slot(php_slot->dn); if (parent) { pnv_php_put_slot(parent); return -EEXIST; } /* Register PCI slot */ ret = pci_hp_register(&php_slot->slot, php_slot->bus, php_slot->slot_no, php_slot->name); if (ret) { SLOT_WARN(php_slot, "Error %d registering slot\n", ret); return ret; } /* Attach to the parent's child list or global list */ while ((dn = of_get_parent(dn))) { if (!PCI_DN(dn)) { of_node_put(dn); break; } parent = pnv_php_find_slot(dn); if (parent) { of_node_put(dn); break; } of_node_put(dn); } spin_lock_irqsave(&pnv_php_lock, flags); php_slot->parent = parent; if (parent) list_add_tail(&php_slot->link, &parent->children); else list_add_tail(&php_slot->link, &pnv_php_slot_list); spin_unlock_irqrestore(&pnv_php_lock, flags); php_slot->state = PNV_PHP_STATE_REGISTERED; return 0; } static int pnv_php_enable_msix(struct pnv_php_slot *php_slot) { struct pci_dev *pdev = php_slot->pdev; struct msix_entry entry; int nr_entries, ret; u16 pcie_flag; /* Get total number of MSIx entries */ nr_entries = pci_msix_vec_count(pdev); if (nr_entries < 0) return nr_entries; /* Check hotplug MSIx entry is in range */ pcie_capability_read_word(pdev, PCI_EXP_FLAGS, &pcie_flag); entry.entry = (pcie_flag & PCI_EXP_FLAGS_IRQ) >> 9; if (entry.entry >= nr_entries) return -ERANGE; /* Enable MSIx */ ret = pci_enable_msix_exact(pdev, &entry, 1); if (ret) { SLOT_WARN(php_slot, "Error %d enabling MSIx\n", ret); return ret; } return entry.vector; } static void pnv_php_event_handler(struct work_struct *work) { struct pnv_php_event *event = container_of(work, struct pnv_php_event, work); struct pnv_php_slot *php_slot = event->php_slot; if (event->added) pnv_php_enable_slot(&php_slot->slot); else pnv_php_disable_slot(&php_slot->slot); kfree(event); } static irqreturn_t pnv_php_interrupt(int irq, void *data) { struct pnv_php_slot *php_slot = data; struct pci_dev *pchild, *pdev = php_slot->pdev; struct eeh_dev *edev; struct eeh_pe *pe; struct pnv_php_event *event; u16 sts, lsts; u8 presence; bool added; unsigned long flags; int ret; pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &sts); sts &= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC); pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, sts); pci_dbg(pdev, "PCI slot [%s]: HP int! DLAct: %d, PresDet: %d\n", php_slot->name, !!(sts & PCI_EXP_SLTSTA_DLLSC), !!(sts & PCI_EXP_SLTSTA_PDC)); if (sts & PCI_EXP_SLTSTA_DLLSC) { pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lsts); added = !!(lsts & PCI_EXP_LNKSTA_DLLLA); } else if (!(php_slot->flags & PNV_PHP_FLAG_BROKEN_PDC) && (sts & PCI_EXP_SLTSTA_PDC)) { ret = pnv_pci_get_presence_state(php_slot->id, &presence); if (ret) { SLOT_WARN(php_slot, "PCI slot [%s] error %d getting presence (0x%04x), to retry the operation.\n", php_slot->name, ret, sts); return IRQ_HANDLED; } added = !!(presence == OPAL_PCI_SLOT_PRESENT); } else { pci_dbg(pdev, "PCI slot [%s]: Spurious IRQ?\n", php_slot->name); return IRQ_NONE; } /* Freeze the removed PE to avoid unexpected error reporting */ if (!added) { pchild = list_first_entry_or_null(&php_slot->bus->devices, struct pci_dev, bus_list); edev = pchild ? pci_dev_to_eeh_dev(pchild) : NULL; pe = edev ? edev->pe : NULL; if (pe) { eeh_serialize_lock(&flags); eeh_pe_mark_isolated(pe); eeh_serialize_unlock(flags); eeh_pe_set_option(pe, EEH_OPT_FREEZE_PE); } } /* * The PE is left in frozen state if the event is missed. It's * fine as the PCI devices (PE) aren't functional any more. */ event = kzalloc(sizeof(*event), GFP_ATOMIC); if (!event) { SLOT_WARN(php_slot, "PCI slot [%s] missed hotplug event 0x%04x\n", php_slot->name, sts); return IRQ_HANDLED; } pci_info(pdev, "PCI slot [%s] %s (IRQ: %d)\n", php_slot->name, added ? "added" : "removed", irq); INIT_WORK(&event->work, pnv_php_event_handler); event->added = added; event->php_slot = php_slot; queue_work(php_slot->wq, &event->work); return IRQ_HANDLED; } static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq) { struct pci_dev *pdev = php_slot->pdev; u32 broken_pdc = 0; u16 sts, ctrl; int ret; /* Allocate workqueue */ php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name); if (!php_slot->wq) { SLOT_WARN(php_slot, "Cannot alloc workqueue\n"); pnv_php_disable_irq(php_slot, true); return; } /* Check PDC (Presence Detection Change) is broken or not */ ret = of_property_read_u32(php_slot->dn, "ibm,slot-broken-pdc", &broken_pdc); if (!ret && broken_pdc) php_slot->flags |= PNV_PHP_FLAG_BROKEN_PDC; /* Clear pending interrupts */ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &sts); if (php_slot->flags & PNV_PHP_FLAG_BROKEN_PDC) sts |= PCI_EXP_SLTSTA_DLLSC; else sts |= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC); pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, sts); /* Request the interrupt */ ret = request_irq(irq, pnv_php_interrupt, IRQF_SHARED, php_slot->name, php_slot); if (ret) { pnv_php_disable_irq(php_slot, true); SLOT_WARN(php_slot, "Error %d enabling IRQ %d\n", ret, irq); return; } /* Enable the interrupts */ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &ctrl); if (php_slot->flags & PNV_PHP_FLAG_BROKEN_PDC) { ctrl &= ~PCI_EXP_SLTCTL_PDCE; ctrl |= (PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE); } else { ctrl |= (PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_DLLSCE); } pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, ctrl); /* The interrupt is initialized successfully when @irq is valid */ php_slot->irq = irq; } static void pnv_php_enable_irq(struct pnv_php_slot *php_slot) { struct pci_dev *pdev = php_slot->pdev; int irq, ret; /* * The MSI/MSIx interrupt might have been occupied by other * drivers. Don't populate the surprise hotplug capability * in that case. */ if (pci_dev_msi_enabled(pdev)) return; ret = pci_enable_device(pdev); if (ret) { SLOT_WARN(php_slot, "Error %d enabling device\n", ret); return; } pci_set_master(pdev); /* Enable MSIx interrupt */ irq = pnv_php_enable_msix(php_slot); if (irq > 0) { pnv_php_init_irq(php_slot, irq); return; } /* * Use MSI if MSIx doesn't work. Fail back to legacy INTx * if MSI doesn't work either */ ret = pci_enable_msi(pdev); if (!ret || pdev->irq) { irq = pdev->irq; pnv_php_init_irq(php_slot, irq); } } static int pnv_php_register_one(struct device_node *dn) { struct pnv_php_slot *php_slot; u32 prop32; int ret; /* Check if it's hotpluggable slot */ ret = of_property_read_u32(dn, "ibm,slot-pluggable", &prop32); if (ret || !prop32) return -ENXIO; ret = of_property_read_u32(dn, "ibm,reset-by-firmware", &prop32); if (ret || !prop32) return -ENXIO; php_slot = pnv_php_alloc_slot(dn); if (!php_slot) return -ENODEV; ret = pnv_php_register_slot(php_slot); if (ret) goto free_slot; ret = pnv_php_enable(php_slot, false); if (ret) goto unregister_slot; /* Enable interrupt if the slot supports surprise hotplug */ ret = of_property_read_u32(dn, "ibm,slot-surprise-pluggable", &prop32); if (!ret && prop32) pnv_php_enable_irq(php_slot); return 0; unregister_slot: pnv_php_unregister_one(php_slot->dn); free_slot: pnv_php_put_slot(php_slot); return ret; } static void pnv_php_register(struct device_node *dn) { struct device_node *child; /* * The parent slots should be registered before their * child slots. */ for_each_child_of_node(dn, child) { pnv_php_register_one(child); pnv_php_register(child); } } static void pnv_php_unregister_one(struct device_node *dn) { struct pnv_php_slot *php_slot; php_slot = pnv_php_find_slot(dn); if (!php_slot) return; php_slot->state = PNV_PHP_STATE_OFFLINE; pci_hp_deregister(&php_slot->slot); pnv_php_release(php_slot); pnv_php_put_slot(php_slot); } static void pnv_php_unregister(struct device_node *dn) { struct device_node *child; /* The child slots should go before their parent slots */ for_each_child_of_node(dn, child) { pnv_php_unregister(child); pnv_php_unregister_one(child); } } static int __init pnv_php_init(void) { struct device_node *dn; pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); for_each_compatible_node(dn, NULL, "ibm,ioda2-phb") pnv_php_register(dn); for_each_compatible_node(dn, NULL, "ibm,ioda3-phb") pnv_php_register(dn); for_each_compatible_node(dn, NULL, "ibm,ioda2-npu2-opencapi-phb") pnv_php_register_one(dn); /* slot directly under the PHB */ return 0; } static void __exit pnv_php_exit(void) { struct device_node *dn; for_each_compatible_node(dn, NULL, "ibm,ioda2-phb") pnv_php_unregister(dn); for_each_compatible_node(dn, NULL, "ibm,ioda3-phb") pnv_php_unregister(dn); for_each_compatible_node(dn, NULL, "ibm,ioda2-npu2-opencapi-phb") pnv_php_unregister_one(dn); /* slot directly under the PHB */ } module_init(pnv_php_init); module_exit(pnv_php_exit); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC);
linux-master
drivers/pci/hotplug/pnv_php.c
// SPDX-License-Identifier: GPL-2.0+ /* * ACPI PCI Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman ([email protected]) * Copyright (C) 2001 IBM Corp. * Copyright (C) 2002 Hiroshi Aono ([email protected]) * Copyright (C) 2002,2003 Takayoshi Kochi ([email protected]) * Copyright (C) 2002,2003 NEC Corporation * Copyright (C) 2003-2005 Matthew Wilcox ([email protected]) * Copyright (C) 2003-2005 Hewlett Packard * * All rights reserved. * * Send feedback to <[email protected]> * */ #define pr_fmt(fmt) "acpiphp: " fmt #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/pci-acpi.h> #include <linux/pci_hotplug.h> #include <linux/slab.h> #include <linux/smp.h> #include "acpiphp.h" /* name size which is used for entries in pcihpfs */ #define SLOT_NAME_SIZE 21 /* {_SUN} */ bool acpiphp_disabled; /* local variables */ static struct acpiphp_attention_info *attention_info; #define DRIVER_VERSION "0.5" #define DRIVER_AUTHOR "Greg Kroah-Hartman <[email protected]>, Takayoshi Kochi <[email protected]>, Matthew Wilcox <[email protected]>" #define DRIVER_DESC "ACPI Hot Plug PCI Controller Driver" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_PARM_DESC(disable, "disable acpiphp driver"); module_param_named(disable, acpiphp_disabled, bool, 0444); static int enable_slot(struct hotplug_slot *slot); static int disable_slot(struct hotplug_slot *slot); static int set_attention_status(struct hotplug_slot *slot, u8 value); static int get_power_status(struct hotplug_slot *slot, u8 *value); static int get_attention_status(struct hotplug_slot *slot, u8 *value); static int get_latch_status(struct hotplug_slot *slot, u8 *value); static int get_adapter_status(struct hotplug_slot *slot, u8 *value); static const struct hotplug_slot_ops acpi_hotplug_slot_ops = { .enable_slot = enable_slot, .disable_slot = disable_slot, .set_attention_status = set_attention_status, .get_power_status = get_power_status, .get_attention_status = get_attention_status, .get_latch_status = get_latch_status, .get_adapter_status = get_adapter_status, }; /** * acpiphp_register_attention - set attention LED callback * @info: must be completely filled with LED callbacks * * Description: This is used to register a hardware specific ACPI * driver that manipulates the attention LED. All the fields in * info must be set. */ int acpiphp_register_attention(struct acpiphp_attention_info *info) { int retval = -EINVAL; if (info && info->owner && info->set_attn && info->get_attn && !attention_info) { retval = 0; attention_info = info; } return retval; } EXPORT_SYMBOL_GPL(acpiphp_register_attention); /** * acpiphp_unregister_attention - unset attention LED callback * @info: must match the pointer used to register * * Description: This is used to un-register a hardware specific acpi * driver that manipulates the attention LED. The pointer to the * info struct must be the same as the one used to set it. */ int acpiphp_unregister_attention(struct acpiphp_attention_info *info) { int retval = -EINVAL; if (info && attention_info == info) { attention_info = NULL; retval = 0; } return retval; } EXPORT_SYMBOL_GPL(acpiphp_unregister_attention); /** * enable_slot - power on and enable a slot * @hotplug_slot: slot to enable * * Actual tasks are done in acpiphp_enable_slot() */ static int enable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = to_slot(hotplug_slot); pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot)); /* enable the specified slot */ return acpiphp_enable_slot(slot->acpi_slot); } /** * disable_slot - disable and power off a slot * @hotplug_slot: slot to disable * * Actual tasks are done in acpiphp_disable_slot() */ static int disable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = to_slot(hotplug_slot); pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot)); /* disable the specified slot */ return acpiphp_disable_slot(slot->acpi_slot); } /** * set_attention_status - set attention LED * @hotplug_slot: slot to set attention LED on * @status: value to set attention LED to (0 or 1) * * attention status LED, so we use a callback that * was registered with us. This allows hardware specific * ACPI implementations to blink the light for us. */ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) { int retval = -ENODEV; pr_debug("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot)); if (attention_info && try_module_get(attention_info->owner)) { retval = attention_info->set_attn(hotplug_slot, status); module_put(attention_info->owner); } else attention_info = NULL; return retval; } /** * get_power_status - get power status of a slot * @hotplug_slot: slot to get status * @value: pointer to store status * * Some platforms may not implement _STA method properly. * In that case, the value returned may not be reliable. */ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = to_slot(hotplug_slot); pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot)); *value = acpiphp_get_power_status(slot->acpi_slot); return 0; } /** * get_attention_status - get attention LED status * @hotplug_slot: slot to get status from * @value: returns with value of attention LED * * ACPI doesn't have known method to determine the state * of the attention status LED, so we use a callback that * was registered with us. This allows hardware specific * ACPI implementations to determine its state. */ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) { int retval = -EINVAL; pr_debug("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot)); if (attention_info && try_module_get(attention_info->owner)) { retval = attention_info->get_attn(hotplug_slot, value); module_put(attention_info->owner); } else attention_info = NULL; return retval; } /** * get_latch_status - get latch status of a slot * @hotplug_slot: slot to get status * @value: pointer to store status * * ACPI doesn't provide any formal means to access latch status. * Instead, we fake latch status from _STA. */ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = to_slot(hotplug_slot); pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot)); *value = acpiphp_get_latch_status(slot->acpi_slot); return 0; } /** * get_adapter_status - get adapter status of a slot * @hotplug_slot: slot to get status * @value: pointer to store status * * ACPI doesn't provide any formal means to access adapter status. * Instead, we fake adapter status from _STA. */ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = to_slot(hotplug_slot); pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot)); *value = acpiphp_get_adapter_status(slot->acpi_slot); return 0; } /* callback routine to initialize 'struct slot' for each slot */ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot, unsigned int sun) { struct slot *slot; int retval = -ENOMEM; char name[SLOT_NAME_SIZE]; slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) goto error; slot->hotplug_slot.ops = &acpi_hotplug_slot_ops; slot->acpi_slot = acpiphp_slot; acpiphp_slot->slot = slot; slot->sun = sun; snprintf(name, SLOT_NAME_SIZE, "%u", sun); retval = pci_hp_register(&slot->hotplug_slot, acpiphp_slot->bus, acpiphp_slot->device, name); if (retval == -EBUSY) goto error_slot; if (retval) { pr_err("pci_hp_register failed with error %d\n", retval); goto error_slot; } pr_info("Slot [%s] registered\n", slot_name(slot)); return 0; error_slot: kfree(slot); error: return retval; } void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot) { struct slot *slot = acpiphp_slot->slot; pr_info("Slot [%s] unregistered\n", slot_name(slot)); pci_hp_deregister(&slot->hotplug_slot); kfree(slot); } void __init acpiphp_init(void) { pr_info(DRIVER_DESC " version: " DRIVER_VERSION "%s\n", acpiphp_disabled ? ", disabled by user; please report a bug" : ""); }
linux-master
drivers/pci/hotplug/acpiphp_core.c
// SPDX-License-Identifier: GPL-2.0+ /* * Compaq Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001,2003 Greg Kroah-Hartman ([email protected]) * Copyright (C) 2001 IBM Corp. * * All rights reserved. * * Send feedback to <[email protected]> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/workqueue.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/mutex.h> #include <linux/debugfs.h> #include "cpqphp.h" static DEFINE_MUTEX(cpqphp_mutex); static int show_ctrl(struct controller *ctrl, char *buf) { char *out = buf; int index; struct pci_resource *res; out += sprintf(buf, "Free resources: memory\n"); index = 11; res = ctrl->mem_head; while (res && index--) { out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length); res = res->next; } out += sprintf(out, "Free resources: prefetchable memory\n"); index = 11; res = ctrl->p_mem_head; while (res && index--) { out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length); res = res->next; } out += sprintf(out, "Free resources: IO\n"); index = 11; res = ctrl->io_head; while (res && index--) { out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length); res = res->next; } out += sprintf(out, "Free resources: bus numbers\n"); index = 11; res = ctrl->bus_head; while (res && index--) { out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length); res = res->next; } return out - buf; } static int show_dev(struct controller *ctrl, char *buf) { char *out = buf; int index; struct pci_resource *res; struct pci_func *new_slot; struct slot *slot; slot = ctrl->slot; while (slot) { new_slot = cpqhp_slot_find(slot->bus, slot->device, 0); if (!new_slot) break; out += sprintf(out, "assigned resources: memory\n"); index = 11; res = new_slot->mem_head; while (res && index--) { out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length); res = res->next; } out += sprintf(out, "assigned resources: prefetchable memory\n"); index = 11; res = new_slot->p_mem_head; while (res && index--) { out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length); res = res->next; } out += sprintf(out, "assigned resources: IO\n"); index = 11; res = new_slot->io_head; while (res && index--) { out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length); res = res->next; } out += sprintf(out, "assigned resources: bus numbers\n"); index = 11; res = new_slot->bus_head; while (res && index--) { out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length); res = res->next; } slot = slot->next; } return out - buf; } static int spew_debug_info(struct controller *ctrl, char *data, int size) { int used; used = size - show_ctrl(ctrl, data); used = (size - used) - show_dev(ctrl, &data[used]); return used; } struct ctrl_dbg { int size; char *data; struct controller *ctrl; }; #define MAX_OUTPUT (4*PAGE_SIZE) static int open(struct inode *inode, struct file *file) { struct controller *ctrl = inode->i_private; struct ctrl_dbg *dbg; int retval = -ENOMEM; mutex_lock(&cpqphp_mutex); dbg = kmalloc(sizeof(*dbg), GFP_KERNEL); if (!dbg) goto exit; dbg->data = kmalloc(MAX_OUTPUT, GFP_KERNEL); if (!dbg->data) { kfree(dbg); goto exit; } dbg->size = spew_debug_info(ctrl, dbg->data, MAX_OUTPUT); file->private_data = dbg; retval = 0; exit: mutex_unlock(&cpqphp_mutex); return retval; } static loff_t lseek(struct file *file, loff_t off, int whence) { struct ctrl_dbg *dbg = file->private_data; return fixed_size_llseek(file, off, whence, dbg->size); } static ssize_t read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct ctrl_dbg *dbg = file->private_data; return simple_read_from_buffer(buf, nbytes, ppos, dbg->data, dbg->size); } static int release(struct inode *inode, struct file *file) { struct ctrl_dbg *dbg = file->private_data; kfree(dbg->data); kfree(dbg); return 0; } static const struct file_operations debug_ops = { .owner = THIS_MODULE, .open = open, .llseek = lseek, .read = read, .release = release, }; static struct dentry *root; void cpqhp_initialize_debugfs(void) { if (!root) root = debugfs_create_dir("cpqhp", NULL); } void cpqhp_shutdown_debugfs(void) { debugfs_remove(root); } void cpqhp_create_debugfs_files(struct controller *ctrl) { ctrl->dentry = debugfs_create_file(dev_name(&ctrl->pci_dev->dev), S_IRUGO, root, ctrl, &debug_ops); } void cpqhp_remove_debugfs_files(struct controller *ctrl) { debugfs_remove(ctrl->dentry); ctrl->dentry = NULL; }
linux-master
drivers/pci/hotplug/cpqphp_sysfs.c
// SPDX-License-Identifier: GPL-2.0+ /* * ACPI PCI HotPlug glue functions to ACPI CA subsystem * * Copyright (C) 2002,2003 Takayoshi Kochi ([email protected]) * Copyright (C) 2002 Hiroshi Aono ([email protected]) * Copyright (C) 2002,2003 NEC Corporation * Copyright (C) 2003-2005 Matthew Wilcox ([email protected]) * Copyright (C) 2003-2005 Hewlett Packard * Copyright (C) 2005 Rajesh Shah ([email protected]) * Copyright (C) 2005 Intel Corporation * * All rights reserved. * * Send feedback to <[email protected]> * */ /* * Lifetime rules for pci_dev: * - The one in acpiphp_bridge has its refcount elevated by pci_get_slot() * when the bridge is scanned and it loses a refcount when the bridge * is removed. * - When a P2P bridge is present, we elevate the refcount on the subordinate * bus. It loses the refcount when the driver unloads. */ #define pr_fmt(fmt) "acpiphp_glue: " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/pci-acpi.h> #include <linux/pm_runtime.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/acpi.h> #include "../pci.h" #include "acpiphp.h" static LIST_HEAD(bridge_list); static DEFINE_MUTEX(bridge_mutex); static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type); static void acpiphp_post_dock_fixup(struct acpi_device *adev); static void acpiphp_sanitize_bus(struct pci_bus *bus); static void hotplug_event(u32 type, struct acpiphp_context *context); static void free_bridge(struct kref *kref); /** * acpiphp_init_context - Create hotplug context and grab a reference to it. * @adev: ACPI device object to create the context for. * * Call under acpi_hp_context_lock. */ static struct acpiphp_context *acpiphp_init_context(struct acpi_device *adev) { struct acpiphp_context *context; context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) return NULL; context->refcount = 1; context->hp.notify = acpiphp_hotplug_notify; context->hp.fixup = acpiphp_post_dock_fixup; acpi_set_hp_context(adev, &context->hp); return context; } /** * acpiphp_get_context - Get hotplug context and grab a reference to it. * @adev: ACPI device object to get the context for. * * Call under acpi_hp_context_lock. */ static struct acpiphp_context *acpiphp_get_context(struct acpi_device *adev) { struct acpiphp_context *context; if (!adev->hp) return NULL; context = to_acpiphp_context(adev->hp); context->refcount++; return context; } /** * acpiphp_put_context - Drop a reference to ACPI hotplug context. * @context: ACPI hotplug context to drop a reference to. * * The context object is removed if there are no more references to it. * * Call under acpi_hp_context_lock. */ static void acpiphp_put_context(struct acpiphp_context *context) { if (--context->refcount) return; WARN_ON(context->bridge); context->hp.self->hp = NULL; kfree(context); } static inline void get_bridge(struct acpiphp_bridge *bridge) { kref_get(&bridge->ref); } static inline void put_bridge(struct acpiphp_bridge *bridge) { kref_put(&bridge->ref, free_bridge); } static struct acpiphp_context *acpiphp_grab_context(struct acpi_device *adev) { struct acpiphp_context *context; acpi_lock_hp_context(); context = acpiphp_get_context(adev); if (!context) goto unlock; if (context->func.parent->is_going_away) { acpiphp_put_context(context); context = NULL; goto unlock; } get_bridge(context->func.parent); acpiphp_put_context(context); unlock: acpi_unlock_hp_context(); return context; } static void acpiphp_let_context_go(struct acpiphp_context *context) { put_bridge(context->func.parent); } static void free_bridge(struct kref *kref) { struct acpiphp_context *context; struct acpiphp_bridge *bridge; struct acpiphp_slot *slot, *next; struct acpiphp_func *func, *tmp; acpi_lock_hp_context(); bridge = container_of(kref, struct acpiphp_bridge, ref); list_for_each_entry_safe(slot, next, &bridge->slots, node) { list_for_each_entry_safe(func, tmp, &slot->funcs, sibling) acpiphp_put_context(func_to_context(func)); kfree(slot); } context = bridge->context; /* Root bridges will not have hotplug context. */ if (context) { /* Release the reference taken by acpiphp_enumerate_slots(). */ put_bridge(context->func.parent); context->bridge = NULL; acpiphp_put_context(context); } put_device(&bridge->pci_bus->dev); pci_dev_put(bridge->pci_dev); kfree(bridge); acpi_unlock_hp_context(); } /** * acpiphp_post_dock_fixup - Post-dock fixups for PCI devices. * @adev: ACPI device object corresponding to a PCI device. * * TBD - figure out a way to only call fixups for systems that require them. */ static void acpiphp_post_dock_fixup(struct acpi_device *adev) { struct acpiphp_context *context = acpiphp_grab_context(adev); struct pci_bus *bus; u32 buses; if (!context) return; bus = context->func.slot->bus; if (!bus->self) goto out; /* fixup bad _DCK function that rewrites * secondary bridge on slot */ pci_read_config_dword(bus->self, PCI_PRIMARY_BUS, &buses); if (((buses >> 8) & 0xff) != bus->busn_res.start) { buses = (buses & 0xff000000) | ((unsigned int)(bus->primary) << 0) | ((unsigned int)(bus->busn_res.start) << 8) | ((unsigned int)(bus->busn_res.end) << 16); pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses); } out: acpiphp_let_context_go(context); } /** * acpiphp_add_context - Add ACPIPHP context to an ACPI device object. * @handle: ACPI handle of the object to add a context to. * @lvl: Not used. * @data: The object's parent ACPIPHP bridge. * @rv: Not used. */ static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data, void **rv) { struct acpi_device *adev = acpi_fetch_acpi_dev(handle); struct acpiphp_bridge *bridge = data; struct acpiphp_context *context; struct acpiphp_slot *slot; struct acpiphp_func *newfunc; acpi_status status = AE_OK; unsigned long long adr; int device, function; struct pci_bus *pbus = bridge->pci_bus; struct pci_dev *pdev = bridge->pci_dev; u32 val; if (!adev) return AE_OK; status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) acpi_handle_warn(handle, "can't evaluate _ADR (%#x)\n", status); return AE_OK; } device = (adr >> 16) & 0xffff; function = adr & 0xffff; acpi_lock_hp_context(); context = acpiphp_init_context(adev); if (!context) { acpi_unlock_hp_context(); acpi_handle_err(handle, "No hotplug context\n"); return AE_NOT_EXIST; } newfunc = &context->func; newfunc->function = function; newfunc->parent = bridge; acpi_unlock_hp_context(); /* * If this is a dock device, its _EJ0 should be executed by the dock * notify handler after calling _DCK. */ if (!is_dock_device(adev) && acpi_has_method(handle, "_EJ0")) newfunc->flags = FUNC_HAS_EJ0; if (acpi_has_method(handle, "_STA")) newfunc->flags |= FUNC_HAS_STA; /* search for objects that share the same slot */ list_for_each_entry(slot, &bridge->slots, node) if (slot->device == device) goto slot_found; slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL); if (!slot) { acpi_lock_hp_context(); acpiphp_put_context(context); acpi_unlock_hp_context(); return AE_NO_MEMORY; } slot->bus = bridge->pci_bus; slot->device = device; INIT_LIST_HEAD(&slot->funcs); list_add_tail(&slot->node, &bridge->slots); /* * Expose slots to user space for functions that have _EJ0 or _RMV or * are located in dock stations. Do not expose them for devices handled * by the native PCIe hotplug (PCIeHP) or standard PCI hotplug * (SHPCHP), because that code is supposed to expose slots to user * space in those cases. */ if ((acpi_pci_check_ejectable(pbus, handle) || is_dock_device(adev)) && !(pdev && hotplug_is_native(pdev))) { unsigned long long sun; int retval; bridge->nr_slots++; status = acpi_evaluate_integer(handle, "_SUN", NULL, &sun); if (ACPI_FAILURE(status)) sun = bridge->nr_slots; pr_debug("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n", sun, pci_domain_nr(pbus), pbus->number, device); retval = acpiphp_register_hotplug_slot(slot, sun); if (retval) { slot->slot = NULL; bridge->nr_slots--; if (retval == -EBUSY) pr_warn("Slot %llu already registered by another hotplug driver\n", sun); else pr_warn("acpiphp_register_hotplug_slot failed (err code = 0x%x)\n", retval); } /* Even if the slot registration fails, we can still use it. */ } slot_found: newfunc->slot = slot; list_add_tail(&newfunc->sibling, &slot->funcs); if (pci_bus_read_dev_vendor_id(pbus, PCI_DEVFN(device, function), &val, 60*1000)) slot->flags |= SLOT_ENABLED; return AE_OK; } static void cleanup_bridge(struct acpiphp_bridge *bridge) { struct acpiphp_slot *slot; struct acpiphp_func *func; list_for_each_entry(slot, &bridge->slots, node) { list_for_each_entry(func, &slot->funcs, sibling) { struct acpi_device *adev = func_to_acpi_device(func); acpi_lock_hp_context(); adev->hp->notify = NULL; adev->hp->fixup = NULL; acpi_unlock_hp_context(); } slot->flags |= SLOT_IS_GOING_AWAY; if (slot->slot) acpiphp_unregister_hotplug_slot(slot); } mutex_lock(&bridge_mutex); list_del(&bridge->list); mutex_unlock(&bridge_mutex); acpi_lock_hp_context(); bridge->is_going_away = true; acpi_unlock_hp_context(); } /** * acpiphp_max_busnr - return the highest reserved bus number under the given bus. * @bus: bus to start search with */ static unsigned char acpiphp_max_busnr(struct pci_bus *bus) { struct pci_bus *tmp; unsigned char max, n; /* * pci_bus_max_busnr will return the highest * reserved busnr for all these children. * that is equivalent to the bus->subordinate * value. We don't want to use the parent's * bus->subordinate value because it could have * padding in it. */ max = bus->busn_res.start; list_for_each_entry(tmp, &bus->children, node) { n = pci_bus_max_busnr(tmp); if (n > max) max = n; } return max; } static void acpiphp_set_acpi_region(struct acpiphp_slot *slot) { struct acpiphp_func *func; list_for_each_entry(func, &slot->funcs, sibling) { /* _REG is optional, we don't care about if there is failure */ acpi_evaluate_reg(func_to_handle(func), ACPI_ADR_SPACE_PCI_CONFIG, ACPI_REG_CONNECT); } } static void check_hotplug_bridge(struct acpiphp_slot *slot, struct pci_dev *dev) { struct acpiphp_func *func; /* quirk, or pcie could set it already */ if (dev->is_hotplug_bridge) return; /* * In the PCIe case, only Root Ports and Downstream Ports are capable of * accommodating hotplug devices, so avoid marking Upstream Ports as * "hotplug bridges". */ if (pci_is_pcie(dev) && pci_pcie_type(dev) == PCI_EXP_TYPE_UPSTREAM) return; list_for_each_entry(func, &slot->funcs, sibling) { if (PCI_FUNC(dev->devfn) == func->function) { dev->is_hotplug_bridge = 1; break; } } } static int acpiphp_rescan_slot(struct acpiphp_slot *slot) { struct acpiphp_func *func; list_for_each_entry(func, &slot->funcs, sibling) { struct acpi_device *adev = func_to_acpi_device(func); acpi_bus_scan(adev->handle); if (acpi_device_enumerated(adev)) acpi_device_set_power(adev, ACPI_STATE_D0); } return pci_scan_slot(slot->bus, PCI_DEVFN(slot->device, 0)); } static void acpiphp_native_scan_bridge(struct pci_dev *bridge) { struct pci_bus *bus = bridge->subordinate; struct pci_dev *dev; int max; if (!bus) return; max = bus->busn_res.start; /* Scan already configured non-hotplug bridges */ for_each_pci_bridge(dev, bus) { if (!hotplug_is_native(dev)) max = pci_scan_bridge(bus, dev, max, 0); } /* Scan non-hotplug bridges that need to be reconfigured */ for_each_pci_bridge(dev, bus) { if (hotplug_is_native(dev)) continue; max = pci_scan_bridge(bus, dev, max, 1); if (dev->subordinate) { pcibios_resource_survey_bus(dev->subordinate); pci_bus_size_bridges(dev->subordinate); pci_bus_assign_resources(dev->subordinate); } } } /** * enable_slot - enable, configure a slot * @slot: slot to be enabled * @bridge: true if enable is for the whole bridge (not a single slot) * * This function should be called per *physical slot*, * not per each slot object in ACPI namespace. */ static void enable_slot(struct acpiphp_slot *slot, bool bridge) { struct pci_dev *dev; struct pci_bus *bus = slot->bus; struct acpiphp_func *func; if (bridge && bus->self && hotplug_is_native(bus->self)) { /* * If native hotplug is used, it will take care of hotplug * slot management and resource allocation for hotplug * bridges. However, ACPI hotplug may still be used for * non-hotplug bridges to bring in additional devices such * as a Thunderbolt host controller. */ for_each_pci_bridge(dev, bus) { if (PCI_SLOT(dev->devfn) == slot->device) acpiphp_native_scan_bridge(dev); } } else { LIST_HEAD(add_list); int max, pass; acpiphp_rescan_slot(slot); max = acpiphp_max_busnr(bus); for (pass = 0; pass < 2; pass++) { for_each_pci_bridge(dev, bus) { if (PCI_SLOT(dev->devfn) != slot->device) continue; max = pci_scan_bridge(bus, dev, max, pass); if (pass && dev->subordinate) { check_hotplug_bridge(slot, dev); pcibios_resource_survey_bus(dev->subordinate); if (pci_is_root_bus(bus)) __pci_bus_size_bridges(dev->subordinate, &add_list); } } } if (pci_is_root_bus(bus)) __pci_bus_assign_resources(bus, &add_list, NULL); else pci_assign_unassigned_bridge_resources(bus->self); } acpiphp_sanitize_bus(bus); pcie_bus_configure_settings(bus); acpiphp_set_acpi_region(slot); list_for_each_entry(dev, &bus->devices, bus_list) { /* Assume that newly added devices are powered on already. */ if (!pci_dev_is_added(dev)) dev->current_state = PCI_D0; } pci_bus_add_devices(bus); slot->flags |= SLOT_ENABLED; list_for_each_entry(func, &slot->funcs, sibling) { dev = pci_get_slot(bus, PCI_DEVFN(slot->device, func->function)); if (!dev) { /* Do not set SLOT_ENABLED flag if some funcs are not added. */ slot->flags &= ~SLOT_ENABLED; continue; } pci_dev_put(dev); } } /** * disable_slot - disable a slot * @slot: ACPI PHP slot */ static void disable_slot(struct acpiphp_slot *slot) { struct pci_bus *bus = slot->bus; struct pci_dev *dev, *prev; struct acpiphp_func *func; /* * enable_slot() enumerates all functions in this device via * pci_scan_slot(), whether they have associated ACPI hotplug * methods (_EJ0, etc.) or not. Therefore, we remove all functions * here. */ list_for_each_entry_safe_reverse(dev, prev, &bus->devices, bus_list) if (PCI_SLOT(dev->devfn) == slot->device) pci_stop_and_remove_bus_device(dev); list_for_each_entry(func, &slot->funcs, sibling) acpi_bus_trim(func_to_acpi_device(func)); slot->flags &= ~SLOT_ENABLED; } static bool slot_no_hotplug(struct acpiphp_slot *slot) { struct pci_bus *bus = slot->bus; struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { if (PCI_SLOT(dev->devfn) == slot->device && dev->ignore_hotplug) return true; } return false; } /** * get_slot_status - get ACPI slot status * @slot: ACPI PHP slot * * If a slot has _STA for each function and if any one of them * returned non-zero status, return it. * * If a slot doesn't have _STA and if any one of its functions' * configuration space is configured, return 0x0f as a _STA. * * Otherwise return 0. */ static unsigned int get_slot_status(struct acpiphp_slot *slot) { unsigned long long sta = 0; struct acpiphp_func *func; u32 dvid; list_for_each_entry(func, &slot->funcs, sibling) { if (func->flags & FUNC_HAS_STA) { acpi_status status; status = acpi_evaluate_integer(func_to_handle(func), "_STA", NULL, &sta); if (ACPI_SUCCESS(status) && sta) break; } else { if (pci_bus_read_dev_vendor_id(slot->bus, PCI_DEVFN(slot->device, func->function), &dvid, 0)) { sta = ACPI_STA_ALL; break; } } } if (!sta) { /* * Check for the slot itself since it may be that the * ACPI slot is a device below PCIe upstream port so in * that case it may not even be reachable yet. */ if (pci_bus_read_dev_vendor_id(slot->bus, PCI_DEVFN(slot->device, 0), &dvid, 0)) { sta = ACPI_STA_ALL; } } return (unsigned int)sta; } static inline bool device_status_valid(unsigned int sta) { /* * ACPI spec says that _STA may return bit 0 clear with bit 3 set * if the device is valid but does not require a device driver to be * loaded (Section 6.3.7 of ACPI 5.0A). */ unsigned int mask = ACPI_STA_DEVICE_ENABLED | ACPI_STA_DEVICE_FUNCTIONING; return (sta & mask) == mask; } /** * trim_stale_devices - remove PCI devices that are not responding. * @dev: PCI device to start walking the hierarchy from. */ static void trim_stale_devices(struct pci_dev *dev) { struct acpi_device *adev = ACPI_COMPANION(&dev->dev); struct pci_bus *bus = dev->subordinate; bool alive = dev->ignore_hotplug; if (adev) { acpi_status status; unsigned long long sta; status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta); alive = alive || (ACPI_SUCCESS(status) && device_status_valid(sta)); } if (!alive) alive = pci_device_is_present(dev); if (!alive) { pci_dev_set_disconnected(dev, NULL); if (pci_has_subordinate(dev)) pci_walk_bus(dev->subordinate, pci_dev_set_disconnected, NULL); pci_stop_and_remove_bus_device(dev); if (adev) acpi_bus_trim(adev); } else if (bus) { struct pci_dev *child, *tmp; /* The device is a bridge. so check the bus below it. */ pm_runtime_get_sync(&dev->dev); list_for_each_entry_safe_reverse(child, tmp, &bus->devices, bus_list) trim_stale_devices(child); pm_runtime_put(&dev->dev); } } /** * acpiphp_check_bridge - re-enumerate devices * @bridge: where to begin re-enumeration * * Iterate over all slots under this bridge and make sure that if a * card is present they are enabled, and if not they are disabled. */ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge) { struct acpiphp_slot *slot; /* Bail out if the bridge is going away. */ if (bridge->is_going_away) return; if (bridge->pci_dev) pm_runtime_get_sync(&bridge->pci_dev->dev); list_for_each_entry(slot, &bridge->slots, node) { struct pci_bus *bus = slot->bus; struct pci_dev *dev, *tmp; if (slot_no_hotplug(slot)) { ; /* do nothing */ } else if (device_status_valid(get_slot_status(slot))) { /* remove stale devices if any */ list_for_each_entry_safe_reverse(dev, tmp, &bus->devices, bus_list) if (PCI_SLOT(dev->devfn) == slot->device) trim_stale_devices(dev); /* configure all functions */ enable_slot(slot, true); } else { disable_slot(slot); } } if (bridge->pci_dev) pm_runtime_put(&bridge->pci_dev->dev); } /* * Remove devices for which we could not assign resources, call * arch specific code to fix-up the bus */ static void acpiphp_sanitize_bus(struct pci_bus *bus) { struct pci_dev *dev, *tmp; int i; unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM; list_for_each_entry_safe_reverse(dev, tmp, &bus->devices, bus_list) { for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) { struct resource *res = &dev->resource[i]; if ((res->flags & type_mask) && !res->start && res->end) { /* Could not assign a required resources * for this device, remove it */ pci_stop_and_remove_bus_device(dev); break; } } } } /* * ACPI event handlers */ void acpiphp_check_host_bridge(struct acpi_device *adev) { struct acpiphp_bridge *bridge = NULL; acpi_lock_hp_context(); if (adev->hp) { bridge = to_acpiphp_root_context(adev->hp)->root_bridge; if (bridge) get_bridge(bridge); } acpi_unlock_hp_context(); if (bridge) { pci_lock_rescan_remove(); acpiphp_check_bridge(bridge); pci_unlock_rescan_remove(); put_bridge(bridge); } } static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot); static void hotplug_event(u32 type, struct acpiphp_context *context) { acpi_handle handle = context->hp.self->handle; struct acpiphp_func *func = &context->func; struct acpiphp_slot *slot = func->slot; struct acpiphp_bridge *bridge; acpi_lock_hp_context(); bridge = context->bridge; if (bridge) get_bridge(bridge); acpi_unlock_hp_context(); pci_lock_rescan_remove(); switch (type) { case ACPI_NOTIFY_BUS_CHECK: /* bus re-enumerate */ acpi_handle_debug(handle, "Bus check in %s()\n", __func__); if (bridge) acpiphp_check_bridge(bridge); else if (!(slot->flags & SLOT_IS_GOING_AWAY)) enable_slot(slot, false); break; case ACPI_NOTIFY_DEVICE_CHECK: /* device check */ acpi_handle_debug(handle, "Device check in %s()\n", __func__); if (bridge) { acpiphp_check_bridge(bridge); } else if (!(slot->flags & SLOT_IS_GOING_AWAY)) { /* * Check if anything has changed in the slot and rescan * from the parent if that's the case. */ if (acpiphp_rescan_slot(slot)) acpiphp_check_bridge(func->parent); } break; case ACPI_NOTIFY_EJECT_REQUEST: /* request device eject */ acpi_handle_debug(handle, "Eject request in %s()\n", __func__); acpiphp_disable_and_eject_slot(slot); break; } pci_unlock_rescan_remove(); if (bridge) put_bridge(bridge); } static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type) { struct acpiphp_context *context; context = acpiphp_grab_context(adev); if (!context) return -ENODATA; hotplug_event(type, context); acpiphp_let_context_go(context); return 0; } /** * acpiphp_enumerate_slots - Enumerate PCI slots for a given bus. * @bus: PCI bus to enumerate the slots for. * * A "slot" is an object associated with a PCI device number. All functions * (PCI devices) with the same bus and device number belong to the same slot. */ void acpiphp_enumerate_slots(struct pci_bus *bus) { struct acpiphp_bridge *bridge; struct acpi_device *adev; acpi_handle handle; acpi_status status; if (acpiphp_disabled) return; adev = ACPI_COMPANION(bus->bridge); if (!adev) return; handle = adev->handle; bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL); if (!bridge) return; INIT_LIST_HEAD(&bridge->slots); kref_init(&bridge->ref); bridge->pci_dev = pci_dev_get(bus->self); bridge->pci_bus = bus; /* * Grab a ref to the subordinate PCI bus in case the bus is * removed via PCI core logical hotplug. The ref pins the bus * (which we access during module unload). */ get_device(&bus->dev); acpi_lock_hp_context(); if (pci_is_root_bus(bridge->pci_bus)) { struct acpiphp_root_context *root_context; root_context = kzalloc(sizeof(*root_context), GFP_KERNEL); if (!root_context) goto err; root_context->root_bridge = bridge; acpi_set_hp_context(adev, &root_context->hp); } else { struct acpiphp_context *context; /* * This bridge should have been registered as a hotplug function * under its parent, so the context should be there, unless the * parent is going to be handled by pciehp, in which case this * bridge is not interesting to us either. */ context = acpiphp_get_context(adev); if (!context) goto err; bridge->context = context; context->bridge = bridge; /* Get a reference to the parent bridge. */ get_bridge(context->func.parent); } acpi_unlock_hp_context(); /* Must be added to the list prior to calling acpiphp_add_context(). */ mutex_lock(&bridge_mutex); list_add(&bridge->list, &bridge_list); mutex_unlock(&bridge_mutex); /* register all slot objects under this bridge */ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, acpiphp_add_context, NULL, bridge, NULL); if (ACPI_FAILURE(status)) { acpi_handle_err(handle, "failed to register slots\n"); cleanup_bridge(bridge); put_bridge(bridge); } return; err: acpi_unlock_hp_context(); put_device(&bus->dev); pci_dev_put(bridge->pci_dev); kfree(bridge); } static void acpiphp_drop_bridge(struct acpiphp_bridge *bridge) { if (pci_is_root_bus(bridge->pci_bus)) { struct acpiphp_root_context *root_context; struct acpi_device *adev; acpi_lock_hp_context(); adev = ACPI_COMPANION(bridge->pci_bus->bridge); root_context = to_acpiphp_root_context(adev->hp); adev->hp = NULL; acpi_unlock_hp_context(); kfree(root_context); } cleanup_bridge(bridge); put_bridge(bridge); } /** * acpiphp_remove_slots - Remove slot objects associated with a given bus. * @bus: PCI bus to remove the slot objects for. */ void acpiphp_remove_slots(struct pci_bus *bus) { struct acpiphp_bridge *bridge; if (acpiphp_disabled) return; mutex_lock(&bridge_mutex); list_for_each_entry(bridge, &bridge_list, list) if (bridge->pci_bus == bus) { mutex_unlock(&bridge_mutex); acpiphp_drop_bridge(bridge); return; } mutex_unlock(&bridge_mutex); } /** * acpiphp_enable_slot - power on slot * @slot: ACPI PHP slot */ int acpiphp_enable_slot(struct acpiphp_slot *slot) { pci_lock_rescan_remove(); if (slot->flags & SLOT_IS_GOING_AWAY) { pci_unlock_rescan_remove(); return -ENODEV; } /* configure all functions */ if (!(slot->flags & SLOT_ENABLED)) enable_slot(slot, false); pci_unlock_rescan_remove(); return 0; } /** * acpiphp_disable_and_eject_slot - power off and eject slot * @slot: ACPI PHP slot */ static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot) { struct acpiphp_func *func; if (slot->flags & SLOT_IS_GOING_AWAY) return -ENODEV; /* unconfigure all functions */ disable_slot(slot); list_for_each_entry(func, &slot->funcs, sibling) if (func->flags & FUNC_HAS_EJ0) { acpi_handle handle = func_to_handle(func); if (ACPI_FAILURE(acpi_evaluate_ej0(handle))) acpi_handle_err(handle, "_EJ0 failed\n"); break; } return 0; } int acpiphp_disable_slot(struct acpiphp_slot *slot) { int ret; /* * Acquire acpi_scan_lock to ensure that the execution of _EJ0 in * acpiphp_disable_and_eject_slot() will be synchronized properly. */ acpi_scan_lock_acquire(); pci_lock_rescan_remove(); ret = acpiphp_disable_and_eject_slot(slot); pci_unlock_rescan_remove(); acpi_scan_lock_release(); return ret; } /* * slot enabled: 1 * slot disabled: 0 */ u8 acpiphp_get_power_status(struct acpiphp_slot *slot) { return (slot->flags & SLOT_ENABLED); } /* * latch open: 1 * latch closed: 0 */ u8 acpiphp_get_latch_status(struct acpiphp_slot *slot) { return !(get_slot_status(slot) & ACPI_STA_DEVICE_UI); } /* * adapter presence : 1 * absence : 0 */ u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot) { return !!get_slot_status(slot); }
linux-master
drivers/pci/hotplug/acpiphp_glue.c
// SPDX-License-Identifier: GPL-2.0+ /* * IBM Hot Plug Controller Driver * * Written By: Tong Yu, IBM Corporation * * Copyright (C) 2001,2003 Greg Kroah-Hartman ([email protected]) * Copyright (C) 2001-2003 IBM Corp. * * All rights reserved. * * Send feedback to <[email protected]> * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/list.h> #include <linux/init.h> #include "ibmphp.h" /* * POST builds data blocks(in this data block definition, a char-1 * byte, short(or word)-2 byte, long(dword)-4 byte) in the Extended * BIOS Data Area which describe the configuration of the hot-plug * controllers and resources used by the PCI Hot-Plug devices. * * This file walks EBDA, maps data block from physical addr, * reconstruct linked lists about all system resource(MEM, PFM, IO) * already assigned by POST, as well as linked lists about hot plug * controllers (ctlr#, slot#, bus&slot features...) */ /* Global lists */ LIST_HEAD(ibmphp_ebda_pci_rsrc_head); LIST_HEAD(ibmphp_slot_head); /* Local variables */ static struct ebda_hpc_list *hpc_list_ptr; static struct ebda_rsrc_list *rsrc_list_ptr; static struct rio_table_hdr *rio_table_ptr = NULL; static LIST_HEAD(ebda_hpc_head); static LIST_HEAD(bus_info_head); static LIST_HEAD(rio_vg_head); static LIST_HEAD(rio_lo_head); static LIST_HEAD(opt_vg_head); static LIST_HEAD(opt_lo_head); static void __iomem *io_mem; /* Local functions */ static int ebda_rsrc_controller(void); static int ebda_rsrc_rsrc(void); static int ebda_rio_table(void); static struct ebda_hpc_list * __init alloc_ebda_hpc_list(void) { return kzalloc(sizeof(struct ebda_hpc_list), GFP_KERNEL); } static struct controller *alloc_ebda_hpc(u32 slot_count, u32 bus_count) { struct controller *controller; struct ebda_hpc_slot *slots; struct ebda_hpc_bus *buses; controller = kzalloc(sizeof(struct controller), GFP_KERNEL); if (!controller) goto error; slots = kcalloc(slot_count, sizeof(struct ebda_hpc_slot), GFP_KERNEL); if (!slots) goto error_contr; controller->slots = slots; buses = kcalloc(bus_count, sizeof(struct ebda_hpc_bus), GFP_KERNEL); if (!buses) goto error_slots; controller->buses = buses; return controller; error_slots: kfree(controller->slots); error_contr: kfree(controller); error: return NULL; } static void free_ebda_hpc(struct controller *controller) { kfree(controller->slots); kfree(controller->buses); kfree(controller); } static struct ebda_rsrc_list * __init alloc_ebda_rsrc_list(void) { return kzalloc(sizeof(struct ebda_rsrc_list), GFP_KERNEL); } static struct ebda_pci_rsrc *alloc_ebda_pci_rsrc(void) { return kzalloc(sizeof(struct ebda_pci_rsrc), GFP_KERNEL); } static void __init print_bus_info(void) { struct bus_info *ptr; list_for_each_entry(ptr, &bus_info_head, bus_info_list) { debug("%s - slot_min = %x\n", __func__, ptr->slot_min); debug("%s - slot_max = %x\n", __func__, ptr->slot_max); debug("%s - slot_count = %x\n", __func__, ptr->slot_count); debug("%s - bus# = %x\n", __func__, ptr->busno); debug("%s - current_speed = %x\n", __func__, ptr->current_speed); debug("%s - controller_id = %x\n", __func__, ptr->controller_id); debug("%s - slots_at_33_conv = %x\n", __func__, ptr->slots_at_33_conv); debug("%s - slots_at_66_conv = %x\n", __func__, ptr->slots_at_66_conv); debug("%s - slots_at_66_pcix = %x\n", __func__, ptr->slots_at_66_pcix); debug("%s - slots_at_100_pcix = %x\n", __func__, ptr->slots_at_100_pcix); debug("%s - slots_at_133_pcix = %x\n", __func__, ptr->slots_at_133_pcix); } } static void print_lo_info(void) { struct rio_detail *ptr; debug("print_lo_info ----\n"); list_for_each_entry(ptr, &rio_lo_head, rio_detail_list) { debug("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id); debug("%s - rio_type = %x\n", __func__, ptr->rio_type); debug("%s - owner_id = %x\n", __func__, ptr->owner_id); debug("%s - first_slot_num = %x\n", __func__, ptr->first_slot_num); debug("%s - wpindex = %x\n", __func__, ptr->wpindex); debug("%s - chassis_num = %x\n", __func__, ptr->chassis_num); } } static void print_vg_info(void) { struct rio_detail *ptr; debug("%s ---\n", __func__); list_for_each_entry(ptr, &rio_vg_head, rio_detail_list) { debug("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id); debug("%s - rio_type = %x\n", __func__, ptr->rio_type); debug("%s - owner_id = %x\n", __func__, ptr->owner_id); debug("%s - first_slot_num = %x\n", __func__, ptr->first_slot_num); debug("%s - wpindex = %x\n", __func__, ptr->wpindex); debug("%s - chassis_num = %x\n", __func__, ptr->chassis_num); } } static void __init print_ebda_pci_rsrc(void) { struct ebda_pci_rsrc *ptr; list_for_each_entry(ptr, &ibmphp_ebda_pci_rsrc_head, ebda_pci_rsrc_list) { debug("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", __func__, ptr->rsrc_type, ptr->bus_num, ptr->dev_fun, ptr->start_addr, ptr->end_addr); } } static void __init print_ibm_slot(void) { struct slot *ptr; list_for_each_entry(ptr, &ibmphp_slot_head, ibm_slot_list) { debug("%s - slot_number: %x\n", __func__, ptr->number); } } static void __init print_opt_vg(void) { struct opt_rio *ptr; debug("%s ---\n", __func__); list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) { debug("%s - rio_type %x\n", __func__, ptr->rio_type); debug("%s - chassis_num: %x\n", __func__, ptr->chassis_num); debug("%s - first_slot_num: %x\n", __func__, ptr->first_slot_num); debug("%s - middle_num: %x\n", __func__, ptr->middle_num); } } static void __init print_ebda_hpc(void) { struct controller *hpc_ptr; u16 index; list_for_each_entry(hpc_ptr, &ebda_hpc_head, ebda_hpc_list) { for (index = 0; index < hpc_ptr->slot_count; index++) { debug("%s - physical slot#: %x\n", __func__, hpc_ptr->slots[index].slot_num); debug("%s - pci bus# of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_bus_num); debug("%s - index into ctlr addr: %x\n", __func__, hpc_ptr->slots[index].ctl_index); debug("%s - cap of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_cap); } for (index = 0; index < hpc_ptr->bus_count; index++) debug("%s - bus# of each bus controlled by this ctlr: %x\n", __func__, hpc_ptr->buses[index].bus_num); debug("%s - type of hpc: %x\n", __func__, hpc_ptr->ctlr_type); switch (hpc_ptr->ctlr_type) { case 1: debug("%s - bus: %x\n", __func__, hpc_ptr->u.pci_ctlr.bus); debug("%s - dev_fun: %x\n", __func__, hpc_ptr->u.pci_ctlr.dev_fun); debug("%s - irq: %x\n", __func__, hpc_ptr->irq); break; case 0: debug("%s - io_start: %x\n", __func__, hpc_ptr->u.isa_ctlr.io_start); debug("%s - io_end: %x\n", __func__, hpc_ptr->u.isa_ctlr.io_end); debug("%s - irq: %x\n", __func__, hpc_ptr->irq); break; case 2: case 4: debug("%s - wpegbbar: %lx\n", __func__, hpc_ptr->u.wpeg_ctlr.wpegbbar); debug("%s - i2c_addr: %x\n", __func__, hpc_ptr->u.wpeg_ctlr.i2c_addr); debug("%s - irq: %x\n", __func__, hpc_ptr->irq); break; } } } int __init ibmphp_access_ebda(void) { u8 format, num_ctlrs, rio_complete, hs_complete, ebda_sz; u16 ebda_seg, num_entries, next_offset, offset, blk_id, sub_addr, re, rc_id, re_id, base; int rc = 0; rio_complete = 0; hs_complete = 0; io_mem = ioremap((0x40 << 4) + 0x0e, 2); if (!io_mem) return -ENOMEM; ebda_seg = readw(io_mem); iounmap(io_mem); debug("returned ebda segment: %x\n", ebda_seg); io_mem = ioremap(ebda_seg<<4, 1); if (!io_mem) return -ENOMEM; ebda_sz = readb(io_mem); iounmap(io_mem); debug("ebda size: %d(KiB)\n", ebda_sz); if (ebda_sz == 0) return -ENOMEM; io_mem = ioremap(ebda_seg<<4, (ebda_sz * 1024)); if (!io_mem) return -ENOMEM; next_offset = 0x180; for (;;) { offset = next_offset; /* Make sure what we read is still in the mapped section */ if (WARN(offset > (ebda_sz * 1024 - 4), "ibmphp_ebda: next read is beyond ebda_sz\n")) break; next_offset = readw(io_mem + offset); /* offset of next blk */ offset += 2; if (next_offset == 0) /* 0 indicate it's last blk */ break; blk_id = readw(io_mem + offset); /* this blk id */ offset += 2; /* check if it is hot swap block or rio block */ if (blk_id != 0x4853 && blk_id != 0x4752) continue; /* found hs table */ if (blk_id == 0x4853) { debug("now enter hot swap block---\n"); debug("hot blk id: %x\n", blk_id); format = readb(io_mem + offset); offset += 1; if (format != 4) goto error_nodev; debug("hot blk format: %x\n", format); /* hot swap sub blk */ base = offset; sub_addr = base; re = readw(io_mem + sub_addr); /* next sub blk */ sub_addr += 2; rc_id = readw(io_mem + sub_addr); /* sub blk id */ sub_addr += 2; if (rc_id != 0x5243) goto error_nodev; /* rc sub blk signature */ num_ctlrs = readb(io_mem + sub_addr); sub_addr += 1; hpc_list_ptr = alloc_ebda_hpc_list(); if (!hpc_list_ptr) { rc = -ENOMEM; goto out; } hpc_list_ptr->format = format; hpc_list_ptr->num_ctlrs = num_ctlrs; hpc_list_ptr->phys_addr = sub_addr; /* offset of RSRC_CONTROLLER blk */ debug("info about hpc descriptor---\n"); debug("hot blk format: %x\n", format); debug("num of controller: %x\n", num_ctlrs); debug("offset of hpc data structure entries: %x\n ", sub_addr); sub_addr = base + re; /* re sub blk */ /* FIXME: rc is never used/checked */ rc = readw(io_mem + sub_addr); /* next sub blk */ sub_addr += 2; re_id = readw(io_mem + sub_addr); /* sub blk id */ sub_addr += 2; if (re_id != 0x5245) goto error_nodev; /* signature of re */ num_entries = readw(io_mem + sub_addr); sub_addr += 2; /* offset of RSRC_ENTRIES blk */ rsrc_list_ptr = alloc_ebda_rsrc_list(); if (!rsrc_list_ptr) { rc = -ENOMEM; goto out; } rsrc_list_ptr->format = format; rsrc_list_ptr->num_entries = num_entries; rsrc_list_ptr->phys_addr = sub_addr; debug("info about rsrc descriptor---\n"); debug("format: %x\n", format); debug("num of rsrc: %x\n", num_entries); debug("offset of rsrc data structure entries: %x\n ", sub_addr); hs_complete = 1; } else { /* found rio table, blk_id == 0x4752 */ debug("now enter io table ---\n"); debug("rio blk id: %x\n", blk_id); rio_table_ptr = kzalloc(sizeof(struct rio_table_hdr), GFP_KERNEL); if (!rio_table_ptr) { rc = -ENOMEM; goto out; } rio_table_ptr->ver_num = readb(io_mem + offset); rio_table_ptr->scal_count = readb(io_mem + offset + 1); rio_table_ptr->riodev_count = readb(io_mem + offset + 2); rio_table_ptr->offset = offset + 3 ; debug("info about rio table hdr ---\n"); debug("ver_num: %x\nscal_count: %x\nriodev_count: %x\noffset of rio table: %x\n ", rio_table_ptr->ver_num, rio_table_ptr->scal_count, rio_table_ptr->riodev_count, rio_table_ptr->offset); rio_complete = 1; } } if (!hs_complete && !rio_complete) goto error_nodev; if (rio_table_ptr) { if (rio_complete && rio_table_ptr->ver_num == 3) { rc = ebda_rio_table(); if (rc) goto out; } } rc = ebda_rsrc_controller(); if (rc) goto out; rc = ebda_rsrc_rsrc(); goto out; error_nodev: rc = -ENODEV; out: iounmap(io_mem); return rc; } /* * map info of scalability details and rio details from physical address */ static int __init ebda_rio_table(void) { u16 offset; u8 i; struct rio_detail *rio_detail_ptr; offset = rio_table_ptr->offset; offset += 12 * rio_table_ptr->scal_count; // we do concern about rio details for (i = 0; i < rio_table_ptr->riodev_count; i++) { rio_detail_ptr = kzalloc(sizeof(struct rio_detail), GFP_KERNEL); if (!rio_detail_ptr) return -ENOMEM; rio_detail_ptr->rio_node_id = readb(io_mem + offset); rio_detail_ptr->bbar = readl(io_mem + offset + 1); rio_detail_ptr->rio_type = readb(io_mem + offset + 5); rio_detail_ptr->owner_id = readb(io_mem + offset + 6); rio_detail_ptr->port0_node_connect = readb(io_mem + offset + 7); rio_detail_ptr->port0_port_connect = readb(io_mem + offset + 8); rio_detail_ptr->port1_node_connect = readb(io_mem + offset + 9); rio_detail_ptr->port1_port_connect = readb(io_mem + offset + 10); rio_detail_ptr->first_slot_num = readb(io_mem + offset + 11); rio_detail_ptr->status = readb(io_mem + offset + 12); rio_detail_ptr->wpindex = readb(io_mem + offset + 13); rio_detail_ptr->chassis_num = readb(io_mem + offset + 14); // debug("rio_node_id: %x\nbbar: %x\nrio_type: %x\nowner_id: %x\nport0_node: %x\nport0_port: %x\nport1_node: %x\nport1_port: %x\nfirst_slot_num: %x\nstatus: %x\n", rio_detail_ptr->rio_node_id, rio_detail_ptr->bbar, rio_detail_ptr->rio_type, rio_detail_ptr->owner_id, rio_detail_ptr->port0_node_connect, rio_detail_ptr->port0_port_connect, rio_detail_ptr->port1_node_connect, rio_detail_ptr->port1_port_connect, rio_detail_ptr->first_slot_num, rio_detail_ptr->status); //create linked list of chassis if (rio_detail_ptr->rio_type == 4 || rio_detail_ptr->rio_type == 5) list_add(&rio_detail_ptr->rio_detail_list, &rio_vg_head); //create linked list of expansion box else if (rio_detail_ptr->rio_type == 6 || rio_detail_ptr->rio_type == 7) list_add(&rio_detail_ptr->rio_detail_list, &rio_lo_head); else // not in my concern kfree(rio_detail_ptr); offset += 15; } print_lo_info(); print_vg_info(); return 0; } /* * reorganizing linked list of chassis */ static struct opt_rio *search_opt_vg(u8 chassis_num) { struct opt_rio *ptr; list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) { if (ptr->chassis_num == chassis_num) return ptr; } return NULL; } static int __init combine_wpg_for_chassis(void) { struct opt_rio *opt_rio_ptr = NULL; struct rio_detail *rio_detail_ptr = NULL; list_for_each_entry(rio_detail_ptr, &rio_vg_head, rio_detail_list) { opt_rio_ptr = search_opt_vg(rio_detail_ptr->chassis_num); if (!opt_rio_ptr) { opt_rio_ptr = kzalloc(sizeof(struct opt_rio), GFP_KERNEL); if (!opt_rio_ptr) return -ENOMEM; opt_rio_ptr->rio_type = rio_detail_ptr->rio_type; opt_rio_ptr->chassis_num = rio_detail_ptr->chassis_num; opt_rio_ptr->first_slot_num = rio_detail_ptr->first_slot_num; opt_rio_ptr->middle_num = rio_detail_ptr->first_slot_num; list_add(&opt_rio_ptr->opt_rio_list, &opt_vg_head); } else { opt_rio_ptr->first_slot_num = min(opt_rio_ptr->first_slot_num, rio_detail_ptr->first_slot_num); opt_rio_ptr->middle_num = max(opt_rio_ptr->middle_num, rio_detail_ptr->first_slot_num); } } print_opt_vg(); return 0; } /* * reorganizing linked list of expansion box */ static struct opt_rio_lo *search_opt_lo(u8 chassis_num) { struct opt_rio_lo *ptr; list_for_each_entry(ptr, &opt_lo_head, opt_rio_lo_list) { if (ptr->chassis_num == chassis_num) return ptr; } return NULL; } static int combine_wpg_for_expansion(void) { struct opt_rio_lo *opt_rio_lo_ptr = NULL; struct rio_detail *rio_detail_ptr = NULL; list_for_each_entry(rio_detail_ptr, &rio_lo_head, rio_detail_list) { opt_rio_lo_ptr = search_opt_lo(rio_detail_ptr->chassis_num); if (!opt_rio_lo_ptr) { opt_rio_lo_ptr = kzalloc(sizeof(struct opt_rio_lo), GFP_KERNEL); if (!opt_rio_lo_ptr) return -ENOMEM; opt_rio_lo_ptr->rio_type = rio_detail_ptr->rio_type; opt_rio_lo_ptr->chassis_num = rio_detail_ptr->chassis_num; opt_rio_lo_ptr->first_slot_num = rio_detail_ptr->first_slot_num; opt_rio_lo_ptr->middle_num = rio_detail_ptr->first_slot_num; opt_rio_lo_ptr->pack_count = 1; list_add(&opt_rio_lo_ptr->opt_rio_lo_list, &opt_lo_head); } else { opt_rio_lo_ptr->first_slot_num = min(opt_rio_lo_ptr->first_slot_num, rio_detail_ptr->first_slot_num); opt_rio_lo_ptr->middle_num = max(opt_rio_lo_ptr->middle_num, rio_detail_ptr->first_slot_num); opt_rio_lo_ptr->pack_count = 2; } } return 0; } /* Since we don't know the max slot number per each chassis, hence go * through the list of all chassis to find out the range * Arguments: slot_num, 1st slot number of the chassis we think we are on, * var (0 = chassis, 1 = expansion box) */ static int first_slot_num(u8 slot_num, u8 first_slot, u8 var) { struct opt_rio *opt_vg_ptr = NULL; struct opt_rio_lo *opt_lo_ptr = NULL; int rc = 0; if (!var) { list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) { if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) { rc = -ENODEV; break; } } } else { list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) { if ((first_slot < opt_lo_ptr->first_slot_num) && (slot_num >= opt_lo_ptr->first_slot_num)) { rc = -ENODEV; break; } } } return rc; } static struct opt_rio_lo *find_rxe_num(u8 slot_num) { struct opt_rio_lo *opt_lo_ptr; list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) { //check to see if this slot_num belongs to expansion box if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num(slot_num, opt_lo_ptr->first_slot_num, 1))) return opt_lo_ptr; } return NULL; } static struct opt_rio *find_chassis_num(u8 slot_num) { struct opt_rio *opt_vg_ptr; list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) { //check to see if this slot_num belongs to chassis if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num(slot_num, opt_vg_ptr->first_slot_num, 0))) return opt_vg_ptr; } return NULL; } /* This routine will find out how many slots are in the chassis, so that * the slot numbers for rxe100 would start from 1, and not from 7, or 6 etc */ static u8 calculate_first_slot(u8 slot_num) { u8 first_slot = 1; struct slot *slot_cur; list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) { if (slot_cur->ctrl) { if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num)) first_slot = slot_cur->ctrl->ending_slot_num; } } return first_slot + 1; } #define SLOT_NAME_SIZE 30 static char *create_file_name(struct slot *slot_cur) { struct opt_rio *opt_vg_ptr = NULL; struct opt_rio_lo *opt_lo_ptr = NULL; static char str[SLOT_NAME_SIZE]; int which = 0; /* rxe = 1, chassis = 0 */ u8 number = 1; /* either chassis or rxe # */ u8 first_slot = 1; u8 slot_num; u8 flag = 0; if (!slot_cur) { err("Structure passed is empty\n"); return NULL; } slot_num = slot_cur->number; memset(str, 0, sizeof(str)); if (rio_table_ptr) { if (rio_table_ptr->ver_num == 3) { opt_vg_ptr = find_chassis_num(slot_num); opt_lo_ptr = find_rxe_num(slot_num); } } if (opt_vg_ptr) { if (opt_lo_ptr) { if ((slot_num - opt_vg_ptr->first_slot_num) > (slot_num - opt_lo_ptr->first_slot_num)) { number = opt_lo_ptr->chassis_num; first_slot = opt_lo_ptr->first_slot_num; which = 1; /* it is RXE */ } else { first_slot = opt_vg_ptr->first_slot_num; number = opt_vg_ptr->chassis_num; which = 0; } } else { first_slot = opt_vg_ptr->first_slot_num; number = opt_vg_ptr->chassis_num; which = 0; } ++flag; } else if (opt_lo_ptr) { number = opt_lo_ptr->chassis_num; first_slot = opt_lo_ptr->first_slot_num; which = 1; ++flag; } else if (rio_table_ptr) { if (rio_table_ptr->ver_num == 3) { /* if both NULL and we DO have correct RIO table in BIOS */ return NULL; } } if (!flag) { if (slot_cur->ctrl->ctlr_type == 4) { first_slot = calculate_first_slot(slot_num); which = 1; } else { which = 0; } } sprintf(str, "%s%dslot%d", which == 0 ? "chassis" : "rxe", number, slot_num - first_slot + 1); return str; } static int fillslotinfo(struct hotplug_slot *hotplug_slot) { struct slot *slot; int rc = 0; slot = to_slot(hotplug_slot); rc = ibmphp_hpc_readslot(slot, READ_ALLSTAT, NULL); return rc; } static struct pci_driver ibmphp_driver; /* * map info (ctlr-id, slot count, slot#.. bus count, bus#, ctlr type...) of * each hpc from physical address to a list of hot plug controllers based on * hpc descriptors. */ static int __init ebda_rsrc_controller(void) { u16 addr, addr_slot, addr_bus; u8 ctlr_id, temp, bus_index; u16 ctlr, slot, bus; u16 slot_num, bus_num, index; struct controller *hpc_ptr; struct ebda_hpc_bus *bus_ptr; struct ebda_hpc_slot *slot_ptr; struct bus_info *bus_info_ptr1, *bus_info_ptr2; int rc; struct slot *tmp_slot; char name[SLOT_NAME_SIZE]; addr = hpc_list_ptr->phys_addr; for (ctlr = 0; ctlr < hpc_list_ptr->num_ctlrs; ctlr++) { bus_index = 1; ctlr_id = readb(io_mem + addr); addr += 1; slot_num = readb(io_mem + addr); addr += 1; addr_slot = addr; /* offset of slot structure */ addr += (slot_num * 4); bus_num = readb(io_mem + addr); addr += 1; addr_bus = addr; /* offset of bus */ addr += (bus_num * 9); /* offset of ctlr_type */ temp = readb(io_mem + addr); addr += 1; /* init hpc structure */ hpc_ptr = alloc_ebda_hpc(slot_num, bus_num); if (!hpc_ptr) { return -ENOMEM; } hpc_ptr->ctlr_id = ctlr_id; hpc_ptr->ctlr_relative_id = ctlr; hpc_ptr->slot_count = slot_num; hpc_ptr->bus_count = bus_num; debug("now enter ctlr data structure ---\n"); debug("ctlr id: %x\n", ctlr_id); debug("ctlr_relative_id: %x\n", hpc_ptr->ctlr_relative_id); debug("count of slots controlled by this ctlr: %x\n", slot_num); debug("count of buses controlled by this ctlr: %x\n", bus_num); /* init slot structure, fetch slot, bus, cap... */ slot_ptr = hpc_ptr->slots; for (slot = 0; slot < slot_num; slot++) { slot_ptr->slot_num = readb(io_mem + addr_slot); slot_ptr->slot_bus_num = readb(io_mem + addr_slot + slot_num); slot_ptr->ctl_index = readb(io_mem + addr_slot + 2*slot_num); slot_ptr->slot_cap = readb(io_mem + addr_slot + 3*slot_num); // create bus_info lined list --- if only one slot per bus: slot_min = slot_max bus_info_ptr2 = ibmphp_find_same_bus_num(slot_ptr->slot_bus_num); if (!bus_info_ptr2) { bus_info_ptr1 = kzalloc(sizeof(struct bus_info), GFP_KERNEL); if (!bus_info_ptr1) { rc = -ENOMEM; goto error_no_slot; } bus_info_ptr1->slot_min = slot_ptr->slot_num; bus_info_ptr1->slot_max = slot_ptr->slot_num; bus_info_ptr1->slot_count += 1; bus_info_ptr1->busno = slot_ptr->slot_bus_num; bus_info_ptr1->index = bus_index++; bus_info_ptr1->current_speed = 0xff; bus_info_ptr1->current_bus_mode = 0xff; bus_info_ptr1->controller_id = hpc_ptr->ctlr_id; list_add_tail(&bus_info_ptr1->bus_info_list, &bus_info_head); } else { bus_info_ptr2->slot_min = min(bus_info_ptr2->slot_min, slot_ptr->slot_num); bus_info_ptr2->slot_max = max(bus_info_ptr2->slot_max, slot_ptr->slot_num); bus_info_ptr2->slot_count += 1; } // end of creating the bus_info linked list slot_ptr++; addr_slot += 1; } /* init bus structure */ bus_ptr = hpc_ptr->buses; for (bus = 0; bus < bus_num; bus++) { bus_ptr->bus_num = readb(io_mem + addr_bus + bus); bus_ptr->slots_at_33_conv = readb(io_mem + addr_bus + bus_num + 8 * bus); bus_ptr->slots_at_66_conv = readb(io_mem + addr_bus + bus_num + 8 * bus + 1); bus_ptr->slots_at_66_pcix = readb(io_mem + addr_bus + bus_num + 8 * bus + 2); bus_ptr->slots_at_100_pcix = readb(io_mem + addr_bus + bus_num + 8 * bus + 3); bus_ptr->slots_at_133_pcix = readb(io_mem + addr_bus + bus_num + 8 * bus + 4); bus_info_ptr2 = ibmphp_find_same_bus_num(bus_ptr->bus_num); if (bus_info_ptr2) { bus_info_ptr2->slots_at_33_conv = bus_ptr->slots_at_33_conv; bus_info_ptr2->slots_at_66_conv = bus_ptr->slots_at_66_conv; bus_info_ptr2->slots_at_66_pcix = bus_ptr->slots_at_66_pcix; bus_info_ptr2->slots_at_100_pcix = bus_ptr->slots_at_100_pcix; bus_info_ptr2->slots_at_133_pcix = bus_ptr->slots_at_133_pcix; } bus_ptr++; } hpc_ptr->ctlr_type = temp; switch (hpc_ptr->ctlr_type) { case 1: hpc_ptr->u.pci_ctlr.bus = readb(io_mem + addr); hpc_ptr->u.pci_ctlr.dev_fun = readb(io_mem + addr + 1); hpc_ptr->irq = readb(io_mem + addr + 2); addr += 3; debug("ctrl bus = %x, ctlr devfun = %x, irq = %x\n", hpc_ptr->u.pci_ctlr.bus, hpc_ptr->u.pci_ctlr.dev_fun, hpc_ptr->irq); break; case 0: hpc_ptr->u.isa_ctlr.io_start = readw(io_mem + addr); hpc_ptr->u.isa_ctlr.io_end = readw(io_mem + addr + 2); if (!request_region(hpc_ptr->u.isa_ctlr.io_start, (hpc_ptr->u.isa_ctlr.io_end - hpc_ptr->u.isa_ctlr.io_start + 1), "ibmphp")) { rc = -ENODEV; goto error_no_slot; } hpc_ptr->irq = readb(io_mem + addr + 4); addr += 5; break; case 2: case 4: hpc_ptr->u.wpeg_ctlr.wpegbbar = readl(io_mem + addr); hpc_ptr->u.wpeg_ctlr.i2c_addr = readb(io_mem + addr + 4); hpc_ptr->irq = readb(io_mem + addr + 5); addr += 6; break; default: rc = -ENODEV; goto error_no_slot; } //reorganize chassis' linked list combine_wpg_for_chassis(); combine_wpg_for_expansion(); hpc_ptr->revision = 0xff; hpc_ptr->options = 0xff; hpc_ptr->starting_slot_num = hpc_ptr->slots[0].slot_num; hpc_ptr->ending_slot_num = hpc_ptr->slots[slot_num-1].slot_num; // register slots with hpc core as well as create linked list of ibm slot for (index = 0; index < hpc_ptr->slot_count; index++) { tmp_slot = kzalloc(sizeof(*tmp_slot), GFP_KERNEL); if (!tmp_slot) { rc = -ENOMEM; goto error_no_slot; } tmp_slot->flag = 1; tmp_slot->capabilities = hpc_ptr->slots[index].slot_cap; if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_133_MAX) == EBDA_SLOT_133_MAX) tmp_slot->supported_speed = 3; else if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_100_MAX) == EBDA_SLOT_100_MAX) tmp_slot->supported_speed = 2; else if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_66_MAX) == EBDA_SLOT_66_MAX) tmp_slot->supported_speed = 1; if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_PCIX_CAP) == EBDA_SLOT_PCIX_CAP) tmp_slot->supported_bus_mode = 1; else tmp_slot->supported_bus_mode = 0; tmp_slot->bus = hpc_ptr->slots[index].slot_bus_num; bus_info_ptr1 = ibmphp_find_same_bus_num(hpc_ptr->slots[index].slot_bus_num); if (!bus_info_ptr1) { rc = -ENODEV; goto error; } tmp_slot->bus_on = bus_info_ptr1; bus_info_ptr1 = NULL; tmp_slot->ctrl = hpc_ptr; tmp_slot->ctlr_index = hpc_ptr->slots[index].ctl_index; tmp_slot->number = hpc_ptr->slots[index].slot_num; rc = fillslotinfo(&tmp_slot->hotplug_slot); if (rc) goto error; rc = ibmphp_init_devno(&tmp_slot); if (rc) goto error; tmp_slot->hotplug_slot.ops = &ibmphp_hotplug_slot_ops; // end of registering ibm slot with hotplug core list_add(&tmp_slot->ibm_slot_list, &ibmphp_slot_head); } print_bus_info(); list_add(&hpc_ptr->ebda_hpc_list, &ebda_hpc_head); } /* each hpc */ list_for_each_entry(tmp_slot, &ibmphp_slot_head, ibm_slot_list) { snprintf(name, SLOT_NAME_SIZE, "%s", create_file_name(tmp_slot)); pci_hp_register(&tmp_slot->hotplug_slot, pci_find_bus(0, tmp_slot->bus), tmp_slot->device, name); } print_ebda_hpc(); print_ibm_slot(); return 0; error: kfree(tmp_slot); error_no_slot: free_ebda_hpc(hpc_ptr); return rc; } /* * map info (bus, devfun, start addr, end addr..) of i/o, memory, * pfm from the physical addr to a list of resource. */ static int __init ebda_rsrc_rsrc(void) { u16 addr; short rsrc; u8 type, rsrc_type; struct ebda_pci_rsrc *rsrc_ptr; addr = rsrc_list_ptr->phys_addr; debug("now entering rsrc land\n"); debug("offset of rsrc: %x\n", rsrc_list_ptr->phys_addr); for (rsrc = 0; rsrc < rsrc_list_ptr->num_entries; rsrc++) { type = readb(io_mem + addr); addr += 1; rsrc_type = type & EBDA_RSRC_TYPE_MASK; if (rsrc_type == EBDA_IO_RSRC_TYPE) { rsrc_ptr = alloc_ebda_pci_rsrc(); if (!rsrc_ptr) { iounmap(io_mem); return -ENOMEM; } rsrc_ptr->rsrc_type = type; rsrc_ptr->bus_num = readb(io_mem + addr); rsrc_ptr->dev_fun = readb(io_mem + addr + 1); rsrc_ptr->start_addr = readw(io_mem + addr + 2); rsrc_ptr->end_addr = readw(io_mem + addr + 4); addr += 6; debug("rsrc from io type ----\n"); debug("rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", rsrc_ptr->rsrc_type, rsrc_ptr->bus_num, rsrc_ptr->dev_fun, rsrc_ptr->start_addr, rsrc_ptr->end_addr); list_add(&rsrc_ptr->ebda_pci_rsrc_list, &ibmphp_ebda_pci_rsrc_head); } if (rsrc_type == EBDA_MEM_RSRC_TYPE || rsrc_type == EBDA_PFM_RSRC_TYPE) { rsrc_ptr = alloc_ebda_pci_rsrc(); if (!rsrc_ptr) { iounmap(io_mem); return -ENOMEM; } rsrc_ptr->rsrc_type = type; rsrc_ptr->bus_num = readb(io_mem + addr); rsrc_ptr->dev_fun = readb(io_mem + addr + 1); rsrc_ptr->start_addr = readl(io_mem + addr + 2); rsrc_ptr->end_addr = readl(io_mem + addr + 6); addr += 10; debug("rsrc from mem or pfm ---\n"); debug("rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", rsrc_ptr->rsrc_type, rsrc_ptr->bus_num, rsrc_ptr->dev_fun, rsrc_ptr->start_addr, rsrc_ptr->end_addr); list_add(&rsrc_ptr->ebda_pci_rsrc_list, &ibmphp_ebda_pci_rsrc_head); } } kfree(rsrc_list_ptr); rsrc_list_ptr = NULL; print_ebda_pci_rsrc(); return 0; } u16 ibmphp_get_total_controllers(void) { return hpc_list_ptr->num_ctlrs; } struct slot *ibmphp_get_slot_from_physical_num(u8 physical_num) { struct slot *slot; list_for_each_entry(slot, &ibmphp_slot_head, ibm_slot_list) { if (slot->number == physical_num) return slot; } return NULL; } /* To find: * - the smallest slot number * - the largest slot number * - the total number of the slots based on each bus * (if only one slot per bus slot_min = slot_max ) */ struct bus_info *ibmphp_find_same_bus_num(u32 num) { struct bus_info *ptr; list_for_each_entry(ptr, &bus_info_head, bus_info_list) { if (ptr->busno == num) return ptr; } return NULL; } /* Finding relative bus number, in order to map corresponding * bus register */ int ibmphp_get_bus_index(u8 num) { struct bus_info *ptr; list_for_each_entry(ptr, &bus_info_head, bus_info_list) { if (ptr->busno == num) return ptr->index; } return -ENODEV; } void ibmphp_free_bus_info_queue(void) { struct bus_info *bus_info, *next; list_for_each_entry_safe(bus_info, next, &bus_info_head, bus_info_list) { kfree (bus_info); } } void ibmphp_free_ebda_hpc_queue(void) { struct controller *controller = NULL, *next; int pci_flag = 0; list_for_each_entry_safe(controller, next, &ebda_hpc_head, ebda_hpc_list) { if (controller->ctlr_type == 0) release_region(controller->u.isa_ctlr.io_start, (controller->u.isa_ctlr.io_end - controller->u.isa_ctlr.io_start + 1)); else if ((controller->ctlr_type == 1) && (!pci_flag)) { ++pci_flag; pci_unregister_driver(&ibmphp_driver); } free_ebda_hpc(controller); } } void ibmphp_free_ebda_pci_rsrc_queue(void) { struct ebda_pci_rsrc *resource, *next; list_for_each_entry_safe(resource, next, &ibmphp_ebda_pci_rsrc_head, ebda_pci_rsrc_list) { kfree (resource); resource = NULL; } } static const struct pci_device_id id_table[] = { { .vendor = PCI_VENDOR_ID_IBM, .device = HPC_DEVICE_ID, .subvendor = PCI_VENDOR_ID_IBM, .subdevice = HPC_SUBSYSTEM_ID, .class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00), }, {} }; MODULE_DEVICE_TABLE(pci, id_table); static int ibmphp_probe(struct pci_dev *, const struct pci_device_id *); static struct pci_driver ibmphp_driver = { .name = "ibmphp", .id_table = id_table, .probe = ibmphp_probe, }; int ibmphp_register_pci(void) { struct controller *ctrl; int rc = 0; list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) { if (ctrl->ctlr_type == 1) { rc = pci_register_driver(&ibmphp_driver); break; } } return rc; } static int ibmphp_probe(struct pci_dev *dev, const struct pci_device_id *ids) { struct controller *ctrl; debug("inside ibmphp_probe\n"); list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) { if (ctrl->ctlr_type == 1) { if ((dev->devfn == ctrl->u.pci_ctlr.dev_fun) && (dev->bus->number == ctrl->u.pci_ctlr.bus)) { ctrl->ctrl_dev = dev; debug("found device!!!\n"); debug("dev->device = %x, dev->subsystem_device = %x\n", dev->device, dev->subsystem_device); return 0; } } } return -ENODEV; }
linux-master
drivers/pci/hotplug/ibmphp_ebda.c
// SPDX-License-Identifier: GPL-2.0+ /* * IBM Hot Plug Controller Driver * * Written By: Irene Zubarev, IBM Corporation * * Copyright (C) 2001 Greg Kroah-Hartman ([email protected]) * Copyright (C) 2001,2002 IBM Corp. * * All rights reserved. * * Send feedback to <[email protected]> * */ #include <linux/module.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/list.h> #include "ibmphp.h" static int configure_device(struct pci_func *); static int configure_bridge(struct pci_func **, u8); static struct res_needed *scan_behind_bridge(struct pci_func *, u8); static int add_new_bus(struct bus_node *, struct resource_node *, struct resource_node *, struct resource_node *, u8); static u8 find_sec_number(u8 primary_busno, u8 slotno); /* * NOTE..... If BIOS doesn't provide default routing, we assign: * 9 for SCSI, 10 for LAN adapters, and 11 for everything else. * If adapter is bridged, then we assign 11 to it and devices behind it. * We also assign the same irq numbers for multi function devices. * These are PIC mode, so shouldn't matter n.e.ways (hopefully) */ static void assign_alt_irq(struct pci_func *cur_func, u8 class_code) { int j; for (j = 0; j < 4; j++) { if (cur_func->irq[j] == 0xff) { switch (class_code) { case PCI_BASE_CLASS_STORAGE: cur_func->irq[j] = SCSI_IRQ; break; case PCI_BASE_CLASS_NETWORK: cur_func->irq[j] = LAN_IRQ; break; default: cur_func->irq[j] = OTHER_IRQ; break; } } } } /* * Configures the device to be added (will allocate needed resources if it * can), the device can be a bridge or a regular pci device, can also be * multi-functional * * Input: function to be added * * TO DO: The error case with Multifunction device or multi function bridge, * if there is an error, will need to go through all previous functions and * unconfigure....or can add some code into unconfigure_card.... */ int ibmphp_configure_card(struct pci_func *func, u8 slotno) { u16 vendor_id; u32 class; u8 class_code; u8 hdr_type, device, sec_number; u8 function; struct pci_func *newfunc; /* for multi devices */ struct pci_func *cur_func, *prev_func; int rc, i, j; int cleanup_count; u8 flag; u8 valid_device = 0x00; /* to see if we are able to read from card any device info at all */ debug("inside configure_card, func->busno = %x\n", func->busno); device = func->device; cur_func = func; /* We only get bus and device from IRQ routing table. So at this point, * func->busno is correct, and func->device contains only device (at the 5 * highest bits) */ /* For every function on the card */ for (function = 0x00; function < 0x08; function++) { unsigned int devfn = PCI_DEVFN(device, function); ibmphp_pci_bus->number = cur_func->busno; cur_func->function = function; debug("inside the loop, cur_func->busno = %x, cur_func->device = %x, cur_func->function = %x\n", cur_func->busno, cur_func->device, cur_func->function); pci_bus_read_config_word(ibmphp_pci_bus, devfn, PCI_VENDOR_ID, &vendor_id); debug("vendor_id is %x\n", vendor_id); if (vendor_id != PCI_VENDOR_ID_NOTVALID) { /* found correct device!!! */ debug("found valid device, vendor_id = %x\n", vendor_id); ++valid_device; /* header: x x x x x x x x * | |___________|=> 1=PPB bridge, 0=normal device, 2=CardBus Bridge * |_=> 0 = single function device, 1 = multi-function device */ pci_bus_read_config_byte(ibmphp_pci_bus, devfn, PCI_HEADER_TYPE, &hdr_type); pci_bus_read_config_dword(ibmphp_pci_bus, devfn, PCI_CLASS_REVISION, &class); class_code = class >> 24; debug("hrd_type = %x, class = %x, class_code %x\n", hdr_type, class, class_code); class >>= 8; /* to take revision out, class = class.subclass.prog i/f */ if (class == PCI_CLASS_NOT_DEFINED_VGA) { err("The device %x is VGA compatible and as is not supported for hot plugging. " "Please choose another device.\n", cur_func->device); return -ENODEV; } else if (class == PCI_CLASS_DISPLAY_VGA) { err("The device %x is not supported for hot plugging. Please choose another device.\n", cur_func->device); return -ENODEV; } switch (hdr_type) { case PCI_HEADER_TYPE_NORMAL: debug("single device case.... vendor id = %x, hdr_type = %x, class = %x\n", vendor_id, hdr_type, class); assign_alt_irq(cur_func, class_code); rc = configure_device(cur_func); if (rc < 0) { /* We need to do this in case some other BARs were properly inserted */ err("was not able to configure devfunc %x on bus %x.\n", cur_func->device, cur_func->busno); cleanup_count = 6; goto error; } cur_func->next = NULL; function = 0x8; break; case PCI_HEADER_TYPE_MULTIDEVICE: assign_alt_irq(cur_func, class_code); rc = configure_device(cur_func); if (rc < 0) { /* We need to do this in case some other BARs were properly inserted */ err("was not able to configure devfunc %x on bus %x...bailing out\n", cur_func->device, cur_func->busno); cleanup_count = 6; goto error; } newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL); if (!newfunc) return -ENOMEM; newfunc->busno = cur_func->busno; newfunc->device = device; cur_func->next = newfunc; cur_func = newfunc; for (j = 0; j < 4; j++) newfunc->irq[j] = cur_func->irq[j]; break; case PCI_HEADER_TYPE_MULTIBRIDGE: class >>= 8; if (class != PCI_CLASS_BRIDGE_PCI) { err("This %x is not PCI-to-PCI bridge, and as is not supported for hot-plugging. Please insert another card.\n", cur_func->device); return -ENODEV; } assign_alt_irq(cur_func, class_code); rc = configure_bridge(&cur_func, slotno); if (rc == -ENODEV) { err("You chose to insert Single Bridge, or nested bridges, this is not supported...\n"); err("Bus %x, devfunc %x\n", cur_func->busno, cur_func->device); return rc; } if (rc) { /* We need to do this in case some other BARs were properly inserted */ err("was not able to hot-add PPB properly.\n"); func->bus = 1; /* To indicate to the unconfigure function that this is a PPB */ cleanup_count = 2; goto error; } pci_bus_read_config_byte(ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_number); flag = 0; for (i = 0; i < 32; i++) { if (func->devices[i]) { newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL); if (!newfunc) return -ENOMEM; newfunc->busno = sec_number; newfunc->device = (u8) i; for (j = 0; j < 4; j++) newfunc->irq[j] = cur_func->irq[j]; if (flag) { for (prev_func = cur_func; prev_func->next; prev_func = prev_func->next) ; prev_func->next = newfunc; } else cur_func->next = newfunc; rc = ibmphp_configure_card(newfunc, slotno); /* This could only happen if kmalloc failed */ if (rc) { /* We need to do this in case bridge itself got configured properly, but devices behind it failed */ func->bus = 1; /* To indicate to the unconfigure function that this is a PPB */ cleanup_count = 2; goto error; } flag = 1; } } newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL); if (!newfunc) return -ENOMEM; newfunc->busno = cur_func->busno; newfunc->device = device; for (j = 0; j < 4; j++) newfunc->irq[j] = cur_func->irq[j]; for (prev_func = cur_func; prev_func->next; prev_func = prev_func->next); prev_func->next = newfunc; cur_func = newfunc; break; case PCI_HEADER_TYPE_BRIDGE: class >>= 8; debug("class now is %x\n", class); if (class != PCI_CLASS_BRIDGE_PCI) { err("This %x is not PCI-to-PCI bridge, and as is not supported for hot-plugging. Please insert another card.\n", cur_func->device); return -ENODEV; } assign_alt_irq(cur_func, class_code); debug("cur_func->busno b4 configure_bridge is %x\n", cur_func->busno); rc = configure_bridge(&cur_func, slotno); if (rc == -ENODEV) { err("You chose to insert Single Bridge, or nested bridges, this is not supported...\n"); err("Bus %x, devfunc %x\n", cur_func->busno, cur_func->device); return rc; } if (rc) { /* We need to do this in case some other BARs were properly inserted */ func->bus = 1; /* To indicate to the unconfigure function that this is a PPB */ err("was not able to hot-add PPB properly.\n"); cleanup_count = 2; goto error; } debug("cur_func->busno = %x, device = %x, function = %x\n", cur_func->busno, device, function); pci_bus_read_config_byte(ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_number); debug("after configuring bridge..., sec_number = %x\n", sec_number); flag = 0; for (i = 0; i < 32; i++) { if (func->devices[i]) { debug("inside for loop, device is %x\n", i); newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL); if (!newfunc) return -ENOMEM; newfunc->busno = sec_number; newfunc->device = (u8) i; for (j = 0; j < 4; j++) newfunc->irq[j] = cur_func->irq[j]; if (flag) { for (prev_func = cur_func; prev_func->next; prev_func = prev_func->next); prev_func->next = newfunc; } else cur_func->next = newfunc; rc = ibmphp_configure_card(newfunc, slotno); /* Again, this case should not happen... For complete paranoia, will need to call remove_bus */ if (rc) { /* We need to do this in case some other BARs were properly inserted */ func->bus = 1; /* To indicate to the unconfigure function that this is a PPB */ cleanup_count = 2; goto error; } flag = 1; } } function = 0x8; break; default: err("MAJOR PROBLEM!!!!, header type not supported? %x\n", hdr_type); return -ENXIO; } /* end of switch */ } /* end of valid device */ } /* end of for */ if (!valid_device) { err("Cannot find any valid devices on the card. Or unable to read from card.\n"); return -ENODEV; } return 0; error: for (i = 0; i < cleanup_count; i++) { if (cur_func->io[i]) { ibmphp_remove_resource(cur_func->io[i]); cur_func->io[i] = NULL; } else if (cur_func->pfmem[i]) { ibmphp_remove_resource(cur_func->pfmem[i]); cur_func->pfmem[i] = NULL; } else if (cur_func->mem[i]) { ibmphp_remove_resource(cur_func->mem[i]); cur_func->mem[i] = NULL; } } return rc; } /* * This function configures the pci BARs of a single device. * Input: pointer to the pci_func * Output: configured PCI, 0, or error */ static int configure_device(struct pci_func *func) { u32 bar[6]; static const u32 address[] = { PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_1, PCI_BASE_ADDRESS_2, PCI_BASE_ADDRESS_3, PCI_BASE_ADDRESS_4, PCI_BASE_ADDRESS_5, 0 }; u8 irq; int count; int len[6]; struct resource_node *io[6]; struct resource_node *mem[6]; struct resource_node *mem_tmp; struct resource_node *pfmem[6]; unsigned int devfn; debug("%s - inside\n", __func__); devfn = PCI_DEVFN(func->device, func->function); ibmphp_pci_bus->number = func->busno; for (count = 0; address[count]; count++) { /* for 6 BARs */ /* not sure if i need this. per scott, said maybe need * something like this if devices don't adhere 100% to the spec, so don't want to write to the reserved bits pcibios_read_config_byte(cur_func->busno, cur_func->device, PCI_BASE_ADDRESS_0 + 4 * count, &tmp); if (tmp & 0x01) // IO pcibios_write_config_dword(cur_func->busno, cur_func->device, PCI_BASE_ADDRESS_0 + 4 * count, 0xFFFFFFFD); else // Memory pcibios_write_config_dword(cur_func->busno, cur_func->device, PCI_BASE_ADDRESS_0 + 4 * count, 0xFFFFFFFF); */ pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], 0xFFFFFFFF); pci_bus_read_config_dword(ibmphp_pci_bus, devfn, address[count], &bar[count]); if (!bar[count]) /* This BAR is not implemented */ continue; debug("Device %x BAR %d wants %x\n", func->device, count, bar[count]); if (bar[count] & PCI_BASE_ADDRESS_SPACE_IO) { /* This is IO */ debug("inside IO SPACE\n"); len[count] = bar[count] & 0xFFFFFFFC; len[count] = ~len[count] + 1; debug("len[count] in IO %x, count %d\n", len[count], count); io[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!io[count]) return -ENOMEM; io[count]->type = IO; io[count]->busno = func->busno; io[count]->devfunc = PCI_DEVFN(func->device, func->function); io[count]->len = len[count]; if (ibmphp_check_resource(io[count], 0) == 0) { ibmphp_add_resource(io[count]); func->io[count] = io[count]; } else { err("cannot allocate requested io for bus %x device %x function %x len %x\n", func->busno, func->device, func->function, len[count]); kfree(io[count]); return -EIO; } pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], func->io[count]->start); /* _______________This is for debugging purposes only_____________________ */ debug("b4 writing, the IO address is %x\n", func->io[count]->start); pci_bus_read_config_dword(ibmphp_pci_bus, devfn, address[count], &bar[count]); debug("after writing.... the start address is %x\n", bar[count]); /* _________________________________________________________________________*/ } else { /* This is Memory */ if (bar[count] & PCI_BASE_ADDRESS_MEM_PREFETCH) { /* pfmem */ debug("PFMEM SPACE\n"); len[count] = bar[count] & 0xFFFFFFF0; len[count] = ~len[count] + 1; debug("len[count] in PFMEM %x, count %d\n", len[count], count); pfmem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!pfmem[count]) return -ENOMEM; pfmem[count]->type = PFMEM; pfmem[count]->busno = func->busno; pfmem[count]->devfunc = PCI_DEVFN(func->device, func->function); pfmem[count]->len = len[count]; pfmem[count]->fromMem = 0; if (ibmphp_check_resource(pfmem[count], 0) == 0) { ibmphp_add_resource(pfmem[count]); func->pfmem[count] = pfmem[count]; } else { mem_tmp = kzalloc(sizeof(*mem_tmp), GFP_KERNEL); if (!mem_tmp) { kfree(pfmem[count]); return -ENOMEM; } mem_tmp->type = MEM; mem_tmp->busno = pfmem[count]->busno; mem_tmp->devfunc = pfmem[count]->devfunc; mem_tmp->len = pfmem[count]->len; debug("there's no pfmem... going into mem.\n"); if (ibmphp_check_resource(mem_tmp, 0) == 0) { ibmphp_add_resource(mem_tmp); pfmem[count]->fromMem = 1; pfmem[count]->rangeno = mem_tmp->rangeno; pfmem[count]->start = mem_tmp->start; pfmem[count]->end = mem_tmp->end; ibmphp_add_pfmem_from_mem(pfmem[count]); func->pfmem[count] = pfmem[count]; } else { err("cannot allocate requested pfmem for bus %x, device %x, len %x\n", func->busno, func->device, len[count]); kfree(mem_tmp); kfree(pfmem[count]); return -EIO; } } pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], func->pfmem[count]->start); /*_______________This is for debugging purposes only______________________________*/ debug("b4 writing, start address is %x\n", func->pfmem[count]->start); pci_bus_read_config_dword(ibmphp_pci_bus, devfn, address[count], &bar[count]); debug("after writing, start address is %x\n", bar[count]); /*_________________________________________________________________________________*/ if (bar[count] & PCI_BASE_ADDRESS_MEM_TYPE_64) { /* takes up another dword */ debug("inside the mem 64 case, count %d\n", count); count += 1; /* on the 2nd dword, write all 0s, since we can't handle them n.e.ways */ pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], 0x00000000); } } else { /* regular memory */ debug("REGULAR MEM SPACE\n"); len[count] = bar[count] & 0xFFFFFFF0; len[count] = ~len[count] + 1; debug("len[count] in Mem %x, count %d\n", len[count], count); mem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!mem[count]) return -ENOMEM; mem[count]->type = MEM; mem[count]->busno = func->busno; mem[count]->devfunc = PCI_DEVFN(func->device, func->function); mem[count]->len = len[count]; if (ibmphp_check_resource(mem[count], 0) == 0) { ibmphp_add_resource(mem[count]); func->mem[count] = mem[count]; } else { err("cannot allocate requested mem for bus %x, device %x, len %x\n", func->busno, func->device, len[count]); kfree(mem[count]); return -EIO; } pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], func->mem[count]->start); /* _______________________This is for debugging purposes only _______________________*/ debug("b4 writing, start address is %x\n", func->mem[count]->start); pci_bus_read_config_dword(ibmphp_pci_bus, devfn, address[count], &bar[count]); debug("after writing, the address is %x\n", bar[count]); /* __________________________________________________________________________________*/ if (bar[count] & PCI_BASE_ADDRESS_MEM_TYPE_64) { /* takes up another dword */ debug("inside mem 64 case, reg. mem, count %d\n", count); count += 1; /* on the 2nd dword, write all 0s, since we can't handle them n.e.ways */ pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], 0x00000000); } } } /* end of mem */ } /* end of for */ func->bus = 0; /* To indicate that this is not a PPB */ pci_bus_read_config_byte(ibmphp_pci_bus, devfn, PCI_INTERRUPT_PIN, &irq); if ((irq > 0x00) && (irq < 0x05)) pci_bus_write_config_byte(ibmphp_pci_bus, devfn, PCI_INTERRUPT_LINE, func->irq[irq - 1]); pci_bus_write_config_byte(ibmphp_pci_bus, devfn, PCI_CACHE_LINE_SIZE, CACHE); pci_bus_write_config_byte(ibmphp_pci_bus, devfn, PCI_LATENCY_TIMER, LATENCY); pci_bus_write_config_dword(ibmphp_pci_bus, devfn, PCI_ROM_ADDRESS, 0x00L); pci_bus_write_config_word(ibmphp_pci_bus, devfn, PCI_COMMAND, DEVICEENABLE); return 0; } /****************************************************************************** * This routine configures a PCI-2-PCI bridge and the functions behind it * Parameters: pci_func * Returns: ******************************************************************************/ static int configure_bridge(struct pci_func **func_passed, u8 slotno) { int count; int i; int rc; u8 sec_number; u8 io_base; u16 pfmem_base; u32 bar[2]; u32 len[2]; u8 flag_io = 0; u8 flag_mem = 0; u8 flag_pfmem = 0; u8 need_io_upper = 0; u8 need_pfmem_upper = 0; struct res_needed *amount_needed = NULL; struct resource_node *io = NULL; struct resource_node *bus_io[2] = {NULL, NULL}; struct resource_node *mem = NULL; struct resource_node *bus_mem[2] = {NULL, NULL}; struct resource_node *mem_tmp = NULL; struct resource_node *pfmem = NULL; struct resource_node *bus_pfmem[2] = {NULL, NULL}; struct bus_node *bus; static const u32 address[] = { PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_1, 0 }; struct pci_func *func = *func_passed; unsigned int devfn; u8 irq; int retval; debug("%s - enter\n", __func__); devfn = PCI_DEVFN(func->function, func->device); ibmphp_pci_bus->number = func->busno; /* Configuring necessary info for the bridge so that we could see the devices * behind it */ pci_bus_write_config_byte(ibmphp_pci_bus, devfn, PCI_PRIMARY_BUS, func->busno); /* _____________________For debugging purposes only __________________________ pci_bus_config_byte(ibmphp_pci_bus, devfn, PCI_PRIMARY_BUS, &pri_number); debug("primary # written into the bridge is %x\n", pri_number); ___________________________________________________________________________*/ /* in EBDA, only get allocated 1 additional bus # per slot */ sec_number = find_sec_number(func->busno, slotno); if (sec_number == 0xff) { err("cannot allocate secondary bus number for the bridged device\n"); return -EINVAL; } debug("after find_sec_number, the number we got is %x\n", sec_number); debug("AFTER FIND_SEC_NUMBER, func->busno IS %x\n", func->busno); pci_bus_write_config_byte(ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, sec_number); /* __________________For debugging purposes only __________________________________ pci_bus_read_config_byte(ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_number); debug("sec_number after write/read is %x\n", sec_number); ________________________________________________________________________________*/ pci_bus_write_config_byte(ibmphp_pci_bus, devfn, PCI_SUBORDINATE_BUS, sec_number); /* __________________For debugging purposes only ____________________________________ pci_bus_read_config_byte(ibmphp_pci_bus, devfn, PCI_SUBORDINATE_BUS, &sec_number); debug("subordinate number after write/read is %x\n", sec_number); __________________________________________________________________________________*/ pci_bus_write_config_byte(ibmphp_pci_bus, devfn, PCI_CACHE_LINE_SIZE, CACHE); pci_bus_write_config_byte(ibmphp_pci_bus, devfn, PCI_LATENCY_TIMER, LATENCY); pci_bus_write_config_byte(ibmphp_pci_bus, devfn, PCI_SEC_LATENCY_TIMER, LATENCY); debug("func->busno is %x\n", func->busno); debug("sec_number after writing is %x\n", sec_number); /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !!!!!!!!!!!!!!!NEED TO ADD!!! FAST BACK-TO-BACK ENABLE!!!!!!!!!!!!!!!!!!!! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/ /* First we need to allocate mem/io for the bridge itself in case it needs it */ for (count = 0; address[count]; count++) { /* for 2 BARs */ pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], 0xFFFFFFFF); pci_bus_read_config_dword(ibmphp_pci_bus, devfn, address[count], &bar[count]); if (!bar[count]) { /* This BAR is not implemented */ debug("so we come here then, eh?, count = %d\n", count); continue; } // tmp_bar = bar[count]; debug("Bar %d wants %x\n", count, bar[count]); if (bar[count] & PCI_BASE_ADDRESS_SPACE_IO) { /* This is IO */ len[count] = bar[count] & 0xFFFFFFFC; len[count] = ~len[count] + 1; debug("len[count] in IO = %x\n", len[count]); bus_io[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!bus_io[count]) { retval = -ENOMEM; goto error; } bus_io[count]->type = IO; bus_io[count]->busno = func->busno; bus_io[count]->devfunc = PCI_DEVFN(func->device, func->function); bus_io[count]->len = len[count]; if (ibmphp_check_resource(bus_io[count], 0) == 0) { ibmphp_add_resource(bus_io[count]); func->io[count] = bus_io[count]; } else { err("cannot allocate requested io for bus %x, device %x, len %x\n", func->busno, func->device, len[count]); kfree(bus_io[count]); return -EIO; } pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], func->io[count]->start); } else { /* This is Memory */ if (bar[count] & PCI_BASE_ADDRESS_MEM_PREFETCH) { /* pfmem */ len[count] = bar[count] & 0xFFFFFFF0; len[count] = ~len[count] + 1; debug("len[count] in PFMEM = %x\n", len[count]); bus_pfmem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!bus_pfmem[count]) { retval = -ENOMEM; goto error; } bus_pfmem[count]->type = PFMEM; bus_pfmem[count]->busno = func->busno; bus_pfmem[count]->devfunc = PCI_DEVFN(func->device, func->function); bus_pfmem[count]->len = len[count]; bus_pfmem[count]->fromMem = 0; if (ibmphp_check_resource(bus_pfmem[count], 0) == 0) { ibmphp_add_resource(bus_pfmem[count]); func->pfmem[count] = bus_pfmem[count]; } else { mem_tmp = kzalloc(sizeof(*mem_tmp), GFP_KERNEL); if (!mem_tmp) { retval = -ENOMEM; goto error; } mem_tmp->type = MEM; mem_tmp->busno = bus_pfmem[count]->busno; mem_tmp->devfunc = bus_pfmem[count]->devfunc; mem_tmp->len = bus_pfmem[count]->len; if (ibmphp_check_resource(mem_tmp, 0) == 0) { ibmphp_add_resource(mem_tmp); bus_pfmem[count]->fromMem = 1; bus_pfmem[count]->rangeno = mem_tmp->rangeno; ibmphp_add_pfmem_from_mem(bus_pfmem[count]); func->pfmem[count] = bus_pfmem[count]; } else { err("cannot allocate requested pfmem for bus %x, device %x, len %x\n", func->busno, func->device, len[count]); kfree(mem_tmp); kfree(bus_pfmem[count]); return -EIO; } } pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], func->pfmem[count]->start); if (bar[count] & PCI_BASE_ADDRESS_MEM_TYPE_64) { /* takes up another dword */ count += 1; /* on the 2nd dword, write all 0s, since we can't handle them n.e.ways */ pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], 0x00000000); } } else { /* regular memory */ len[count] = bar[count] & 0xFFFFFFF0; len[count] = ~len[count] + 1; debug("len[count] in Memory is %x\n", len[count]); bus_mem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!bus_mem[count]) { retval = -ENOMEM; goto error; } bus_mem[count]->type = MEM; bus_mem[count]->busno = func->busno; bus_mem[count]->devfunc = PCI_DEVFN(func->device, func->function); bus_mem[count]->len = len[count]; if (ibmphp_check_resource(bus_mem[count], 0) == 0) { ibmphp_add_resource(bus_mem[count]); func->mem[count] = bus_mem[count]; } else { err("cannot allocate requested mem for bus %x, device %x, len %x\n", func->busno, func->device, len[count]); kfree(bus_mem[count]); return -EIO; } pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], func->mem[count]->start); if (bar[count] & PCI_BASE_ADDRESS_MEM_TYPE_64) { /* takes up another dword */ count += 1; /* on the 2nd dword, write all 0s, since we can't handle them n.e.ways */ pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], 0x00000000); } } } /* end of mem */ } /* end of for */ /* Now need to see how much space the devices behind the bridge needed */ amount_needed = scan_behind_bridge(func, sec_number); if (amount_needed == NULL) return -ENOMEM; ibmphp_pci_bus->number = func->busno; debug("after coming back from scan_behind_bridge\n"); debug("amount_needed->not_correct = %x\n", amount_needed->not_correct); debug("amount_needed->io = %x\n", amount_needed->io); debug("amount_needed->mem = %x\n", amount_needed->mem); debug("amount_needed->pfmem = %x\n", amount_needed->pfmem); if (amount_needed->not_correct) { debug("amount_needed is not correct\n"); for (count = 0; address[count]; count++) { /* for 2 BARs */ if (bus_io[count]) { ibmphp_remove_resource(bus_io[count]); func->io[count] = NULL; } else if (bus_pfmem[count]) { ibmphp_remove_resource(bus_pfmem[count]); func->pfmem[count] = NULL; } else if (bus_mem[count]) { ibmphp_remove_resource(bus_mem[count]); func->mem[count] = NULL; } } kfree(amount_needed); return -ENODEV; } if (!amount_needed->io) { debug("it doesn't want IO?\n"); flag_io = 1; } else { debug("it wants %x IO behind the bridge\n", amount_needed->io); io = kzalloc(sizeof(*io), GFP_KERNEL); if (!io) { retval = -ENOMEM; goto error; } io->type = IO; io->busno = func->busno; io->devfunc = PCI_DEVFN(func->device, func->function); io->len = amount_needed->io; if (ibmphp_check_resource(io, 1) == 0) { debug("were we able to add io\n"); ibmphp_add_resource(io); flag_io = 1; } } if (!amount_needed->mem) { debug("it doesn't want n.e.memory?\n"); flag_mem = 1; } else { debug("it wants %x memory behind the bridge\n", amount_needed->mem); mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) { retval = -ENOMEM; goto error; } mem->type = MEM; mem->busno = func->busno; mem->devfunc = PCI_DEVFN(func->device, func->function); mem->len = amount_needed->mem; if (ibmphp_check_resource(mem, 1) == 0) { ibmphp_add_resource(mem); flag_mem = 1; debug("were we able to add mem\n"); } } if (!amount_needed->pfmem) { debug("it doesn't want n.e.pfmem mem?\n"); flag_pfmem = 1; } else { debug("it wants %x pfmemory behind the bridge\n", amount_needed->pfmem); pfmem = kzalloc(sizeof(*pfmem), GFP_KERNEL); if (!pfmem) { retval = -ENOMEM; goto error; } pfmem->type = PFMEM; pfmem->busno = func->busno; pfmem->devfunc = PCI_DEVFN(func->device, func->function); pfmem->len = amount_needed->pfmem; pfmem->fromMem = 0; if (ibmphp_check_resource(pfmem, 1) == 0) { ibmphp_add_resource(pfmem); flag_pfmem = 1; } else { mem_tmp = kzalloc(sizeof(*mem_tmp), GFP_KERNEL); if (!mem_tmp) { retval = -ENOMEM; goto error; } mem_tmp->type = MEM; mem_tmp->busno = pfmem->busno; mem_tmp->devfunc = pfmem->devfunc; mem_tmp->len = pfmem->len; if (ibmphp_check_resource(mem_tmp, 1) == 0) { ibmphp_add_resource(mem_tmp); pfmem->fromMem = 1; pfmem->rangeno = mem_tmp->rangeno; ibmphp_add_pfmem_from_mem(pfmem); flag_pfmem = 1; } } } debug("b4 if (flag_io && flag_mem && flag_pfmem)\n"); debug("flag_io = %x, flag_mem = %x, flag_pfmem = %x\n", flag_io, flag_mem, flag_pfmem); if (flag_io && flag_mem && flag_pfmem) { /* If on bootup, there was a bridged card in this slot, * then card was removed and ibmphp got unloaded and loaded * back again, there's no way for us to remove the bus * struct, so no need to kmalloc, can use existing node */ bus = ibmphp_find_res_bus(sec_number); if (!bus) { bus = kzalloc(sizeof(*bus), GFP_KERNEL); if (!bus) { retval = -ENOMEM; goto error; } bus->busno = sec_number; debug("b4 adding new bus\n"); rc = add_new_bus(bus, io, mem, pfmem, func->busno); } else if (!(bus->rangeIO) && !(bus->rangeMem) && !(bus->rangePFMem)) rc = add_new_bus(bus, io, mem, pfmem, 0xFF); else { err("expected bus structure not empty?\n"); retval = -EIO; goto error; } if (rc) { if (rc == -ENOMEM) { ibmphp_remove_bus(bus, func->busno); kfree(amount_needed); return rc; } retval = rc; goto error; } pci_bus_read_config_byte(ibmphp_pci_bus, devfn, PCI_IO_BASE, &io_base); pci_bus_read_config_word(ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_BASE, &pfmem_base); if ((io_base & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { debug("io 32\n"); need_io_upper = 1; } if ((pfmem_base & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { debug("pfmem 64\n"); need_pfmem_upper = 1; } if (bus->noIORanges) { pci_bus_write_config_byte(ibmphp_pci_bus, devfn, PCI_IO_BASE, 0x00 | bus->rangeIO->start >> 8); pci_bus_write_config_byte(ibmphp_pci_bus, devfn, PCI_IO_LIMIT, 0x00 | bus->rangeIO->end >> 8); /* _______________This is for debugging purposes only ____________________ pci_bus_read_config_byte(ibmphp_pci_bus, devfn, PCI_IO_BASE, &temp); debug("io_base = %x\n", (temp & PCI_IO_RANGE_TYPE_MASK) << 8); pci_bus_read_config_byte(ibmphp_pci_bus, devfn, PCI_IO_LIMIT, &temp); debug("io_limit = %x\n", (temp & PCI_IO_RANGE_TYPE_MASK) << 8); ________________________________________________________________________*/ if (need_io_upper) { /* since can't support n.e.ways */ pci_bus_write_config_word(ibmphp_pci_bus, devfn, PCI_IO_BASE_UPPER16, 0x0000); pci_bus_write_config_word(ibmphp_pci_bus, devfn, PCI_IO_LIMIT_UPPER16, 0x0000); } } else { pci_bus_write_config_byte(ibmphp_pci_bus, devfn, PCI_IO_BASE, 0x00); pci_bus_write_config_byte(ibmphp_pci_bus, devfn, PCI_IO_LIMIT, 0x00); } if (bus->noMemRanges) { pci_bus_write_config_word(ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, 0x0000 | bus->rangeMem->start >> 16); pci_bus_write_config_word(ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, 0x0000 | bus->rangeMem->end >> 16); /* ____________________This is for debugging purposes only ________________________ pci_bus_read_config_word(ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, &temp); debug("mem_base = %x\n", (temp & PCI_MEMORY_RANGE_TYPE_MASK) << 16); pci_bus_read_config_word(ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, &temp); debug("mem_limit = %x\n", (temp & PCI_MEMORY_RANGE_TYPE_MASK) << 16); __________________________________________________________________________________*/ } else { pci_bus_write_config_word(ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, 0xffff); pci_bus_write_config_word(ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, 0x0000); } if (bus->noPFMemRanges) { pci_bus_write_config_word(ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_BASE, 0x0000 | bus->rangePFMem->start >> 16); pci_bus_write_config_word(ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, 0x0000 | bus->rangePFMem->end >> 16); /* __________________________This is for debugging purposes only _______________________ pci_bus_read_config_word(ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_BASE, &temp); debug("pfmem_base = %x", (temp & PCI_MEMORY_RANGE_TYPE_MASK) << 16); pci_bus_read_config_word(ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &temp); debug("pfmem_limit = %x\n", (temp & PCI_MEMORY_RANGE_TYPE_MASK) << 16); ______________________________________________________________________________________*/ if (need_pfmem_upper) { /* since can't support n.e.ways */ pci_bus_write_config_dword(ibmphp_pci_bus, devfn, PCI_PREF_BASE_UPPER32, 0x00000000); pci_bus_write_config_dword(ibmphp_pci_bus, devfn, PCI_PREF_LIMIT_UPPER32, 0x00000000); } } else { pci_bus_write_config_word(ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_BASE, 0xffff); pci_bus_write_config_word(ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, 0x0000); } debug("b4 writing control information\n"); pci_bus_read_config_byte(ibmphp_pci_bus, devfn, PCI_INTERRUPT_PIN, &irq); if ((irq > 0x00) && (irq < 0x05)) pci_bus_write_config_byte(ibmphp_pci_bus, devfn, PCI_INTERRUPT_LINE, func->irq[irq - 1]); /* pci_bus_write_config_byte(ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, ctrl); pci_bus_write_config_byte(ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_PARITY); pci_bus_write_config_byte(ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_SERR); */ pci_bus_write_config_word(ibmphp_pci_bus, devfn, PCI_COMMAND, DEVICEENABLE); pci_bus_write_config_word(ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, 0x07); for (i = 0; i < 32; i++) { if (amount_needed->devices[i]) { debug("device where devices[i] is 1 = %x\n", i); func->devices[i] = 1; } } func->bus = 1; /* For unconfiguring, to indicate it's PPB */ func_passed = &func; debug("func->busno b4 returning is %x\n", func->busno); debug("func->busno b4 returning in the other structure is %x\n", (*func_passed)->busno); kfree(amount_needed); return 0; } else { err("Configuring bridge was unsuccessful...\n"); mem_tmp = NULL; retval = -EIO; goto error; } error: kfree(amount_needed); if (pfmem) ibmphp_remove_resource(pfmem); if (io) ibmphp_remove_resource(io); if (mem) ibmphp_remove_resource(mem); for (i = 0; i < 2; i++) { /* for 2 BARs */ if (bus_io[i]) { ibmphp_remove_resource(bus_io[i]); func->io[i] = NULL; } else if (bus_pfmem[i]) { ibmphp_remove_resource(bus_pfmem[i]); func->pfmem[i] = NULL; } else if (bus_mem[i]) { ibmphp_remove_resource(bus_mem[i]); func->mem[i] = NULL; } } return retval; } /***************************************************************************** * This function adds up the amount of resources needed behind the PPB bridge * and passes it to the configure_bridge function * Input: bridge function * Output: amount of resources needed *****************************************************************************/ static struct res_needed *scan_behind_bridge(struct pci_func *func, u8 busno) { int count, len[6]; u16 vendor_id; u8 hdr_type; u8 device, function; unsigned int devfn; int howmany = 0; /*this is to see if there are any devices behind the bridge */ u32 bar[6], class; static const u32 address[] = { PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_1, PCI_BASE_ADDRESS_2, PCI_BASE_ADDRESS_3, PCI_BASE_ADDRESS_4, PCI_BASE_ADDRESS_5, 0 }; struct res_needed *amount; amount = kzalloc(sizeof(*amount), GFP_KERNEL); if (amount == NULL) return NULL; ibmphp_pci_bus->number = busno; debug("the bus_no behind the bridge is %x\n", busno); debug("scanning devices behind the bridge...\n"); for (device = 0; device < 32; device++) { amount->devices[device] = 0; for (function = 0; function < 8; function++) { devfn = PCI_DEVFN(device, function); pci_bus_read_config_word(ibmphp_pci_bus, devfn, PCI_VENDOR_ID, &vendor_id); if (vendor_id != PCI_VENDOR_ID_NOTVALID) { /* found correct device!!! */ howmany++; pci_bus_read_config_byte(ibmphp_pci_bus, devfn, PCI_HEADER_TYPE, &hdr_type); pci_bus_read_config_dword(ibmphp_pci_bus, devfn, PCI_CLASS_REVISION, &class); debug("hdr_type behind the bridge is %x\n", hdr_type); if ((hdr_type & 0x7f) == PCI_HEADER_TYPE_BRIDGE) { err("embedded bridges not supported for hot-plugging.\n"); amount->not_correct = 1; return amount; } class >>= 8; /* to take revision out, class = class.subclass.prog i/f */ if (class == PCI_CLASS_NOT_DEFINED_VGA) { err("The device %x is VGA compatible and as is not supported for hot plugging. Please choose another device.\n", device); amount->not_correct = 1; return amount; } else if (class == PCI_CLASS_DISPLAY_VGA) { err("The device %x is not supported for hot plugging. Please choose another device.\n", device); amount->not_correct = 1; return amount; } amount->devices[device] = 1; for (count = 0; address[count]; count++) { /* for 6 BARs */ /* pci_bus_read_config_byte(ibmphp_pci_bus, devfn, address[count], &tmp); if (tmp & 0x01) // IO pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], 0xFFFFFFFD); else // MEMORY pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], 0xFFFFFFFF); */ pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], 0xFFFFFFFF); pci_bus_read_config_dword(ibmphp_pci_bus, devfn, address[count], &bar[count]); debug("what is bar[count]? %x, count = %d\n", bar[count], count); if (!bar[count]) /* This BAR is not implemented */ continue; //tmp_bar = bar[count]; debug("count %d device %x function %x wants %x resources\n", count, device, function, bar[count]); if (bar[count] & PCI_BASE_ADDRESS_SPACE_IO) { /* This is IO */ len[count] = bar[count] & 0xFFFFFFFC; len[count] = ~len[count] + 1; amount->io += len[count]; } else { /* This is Memory */ if (bar[count] & PCI_BASE_ADDRESS_MEM_PREFETCH) { /* pfmem */ len[count] = bar[count] & 0xFFFFFFF0; len[count] = ~len[count] + 1; amount->pfmem += len[count]; if (bar[count] & PCI_BASE_ADDRESS_MEM_TYPE_64) /* takes up another dword */ count += 1; } else { /* regular memory */ len[count] = bar[count] & 0xFFFFFFF0; len[count] = ~len[count] + 1; amount->mem += len[count]; if (bar[count] & PCI_BASE_ADDRESS_MEM_TYPE_64) { /* takes up another dword */ count += 1; } } } } /* end for */ } /* end if (valid) */ } /* end for */ } /* end for */ if (!howmany) amount->not_correct = 1; else amount->not_correct = 0; if ((amount->io) && (amount->io < IOBRIDGE)) amount->io = IOBRIDGE; if ((amount->mem) && (amount->mem < MEMBRIDGE)) amount->mem = MEMBRIDGE; if ((amount->pfmem) && (amount->pfmem < MEMBRIDGE)) amount->pfmem = MEMBRIDGE; return amount; } /* The following 3 unconfigure_boot_ routines deal with the case when we had the card * upon bootup in the system, since we don't allocate func to such case, we need to read * the start addresses from pci config space and then find the corresponding entries in * our resource lists. The functions return either 0, -ENODEV, or -1 (general failure) * Change: we also call these functions even if we configured the card ourselves (i.e., not * the bootup case), since it should work same way */ static int unconfigure_boot_device(u8 busno, u8 device, u8 function) { u32 start_address; static const u32 address[] = { PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_1, PCI_BASE_ADDRESS_2, PCI_BASE_ADDRESS_3, PCI_BASE_ADDRESS_4, PCI_BASE_ADDRESS_5, 0 }; int count; struct resource_node *io; struct resource_node *mem; struct resource_node *pfmem; struct bus_node *bus; u32 end_address; u32 temp_end; u32 size; u32 tmp_address; unsigned int devfn; debug("%s - enter\n", __func__); bus = ibmphp_find_res_bus(busno); if (!bus) { debug("cannot find corresponding bus.\n"); return -EINVAL; } devfn = PCI_DEVFN(device, function); ibmphp_pci_bus->number = busno; for (count = 0; address[count]; count++) { /* for 6 BARs */ pci_bus_read_config_dword(ibmphp_pci_bus, devfn, address[count], &start_address); /* We can do this here, b/c by that time the device driver of the card has been stopped */ pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], 0xFFFFFFFF); pci_bus_read_config_dword(ibmphp_pci_bus, devfn, address[count], &size); pci_bus_write_config_dword(ibmphp_pci_bus, devfn, address[count], start_address); debug("start_address is %x\n", start_address); debug("busno, device, function %x %x %x\n", busno, device, function); if (!size) { /* This BAR is not implemented */ debug("is this bar no implemented?, count = %d\n", count); continue; } tmp_address = start_address; if (start_address & PCI_BASE_ADDRESS_SPACE_IO) { /* This is IO */ start_address &= PCI_BASE_ADDRESS_IO_MASK; size = size & 0xFFFFFFFC; size = ~size + 1; end_address = start_address + size - 1; if (ibmphp_find_resource(bus, start_address, &io, IO)) goto report_search_failure; debug("io->start = %x\n", io->start); temp_end = io->end; start_address = io->end + 1; ibmphp_remove_resource(io); /* This is needed b/c of the old I/O restrictions in the BIOS */ while (temp_end < end_address) { if (ibmphp_find_resource(bus, start_address, &io, IO)) goto report_search_failure; debug("io->start = %x\n", io->start); temp_end = io->end; start_address = io->end + 1; ibmphp_remove_resource(io); } /* ????????? DO WE NEED TO WRITE ANYTHING INTO THE PCI CONFIG SPACE BACK ?????????? */ } else { /* This is Memory */ if (start_address & PCI_BASE_ADDRESS_MEM_PREFETCH) { /* pfmem */ debug("start address of pfmem is %x\n", start_address); start_address &= PCI_BASE_ADDRESS_MEM_MASK; if (ibmphp_find_resource(bus, start_address, &pfmem, PFMEM) < 0) { err("cannot find corresponding PFMEM resource to remove\n"); return -EIO; } if (pfmem) { debug("pfmem->start = %x\n", pfmem->start); ibmphp_remove_resource(pfmem); } } else { /* regular memory */ debug("start address of mem is %x\n", start_address); start_address &= PCI_BASE_ADDRESS_MEM_MASK; if (ibmphp_find_resource(bus, start_address, &mem, MEM) < 0) { err("cannot find corresponding MEM resource to remove\n"); return -EIO; } if (mem) { debug("mem->start = %x\n", mem->start); ibmphp_remove_resource(mem); } } if (tmp_address & PCI_BASE_ADDRESS_MEM_TYPE_64) { /* takes up another dword */ count += 1; } } /* end of mem */ } /* end of for */ return 0; report_search_failure: err("cannot find corresponding IO resource to remove\n"); return -EIO; } static int unconfigure_boot_bridge(u8 busno, u8 device, u8 function) { int count; int bus_no, pri_no, sub_no, sec_no = 0; u32 start_address, tmp_address; u8 sec_number, sub_number, pri_number; struct resource_node *io = NULL; struct resource_node *mem = NULL; struct resource_node *pfmem = NULL; struct bus_node *bus; static const u32 address[] = { PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_1, 0 }; unsigned int devfn; devfn = PCI_DEVFN(device, function); ibmphp_pci_bus->number = busno; bus_no = (int) busno; debug("busno is %x\n", busno); pci_bus_read_config_byte(ibmphp_pci_bus, devfn, PCI_PRIMARY_BUS, &pri_number); debug("%s - busno = %x, primary_number = %x\n", __func__, busno, pri_number); pci_bus_read_config_byte(ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_number); debug("sec_number is %x\n", sec_number); sec_no = (int) sec_number; pri_no = (int) pri_number; if (pri_no != bus_no) { err("primary numbers in our structures and pci config space don't match.\n"); return -EINVAL; } pci_bus_read_config_byte(ibmphp_pci_bus, devfn, PCI_SUBORDINATE_BUS, &sub_number); sub_no = (int) sub_number; debug("sub_no is %d, sec_no is %d\n", sub_no, sec_no); if (sec_no != sub_number) { err("there're more buses behind this bridge. Hot removal is not supported. Please choose another card\n"); return -ENODEV; } bus = ibmphp_find_res_bus(sec_number); if (!bus) { err("cannot find Bus structure for the bridged device\n"); return -EINVAL; } debug("bus->busno is %x\n", bus->busno); debug("sec_number is %x\n", sec_number); ibmphp_remove_bus(bus, busno); for (count = 0; address[count]; count++) { /* for 2 BARs */ pci_bus_read_config_dword(ibmphp_pci_bus, devfn, address[count], &start_address); if (!start_address) { /* This BAR is not implemented */ continue; } tmp_address = start_address; if (start_address & PCI_BASE_ADDRESS_SPACE_IO) { /* This is IO */ start_address &= PCI_BASE_ADDRESS_IO_MASK; if (ibmphp_find_resource(bus, start_address, &io, IO) < 0) { err("cannot find corresponding IO resource to remove\n"); return -EIO; } if (io) debug("io->start = %x\n", io->start); ibmphp_remove_resource(io); /* ????????? DO WE NEED TO WRITE ANYTHING INTO THE PCI CONFIG SPACE BACK ?????????? */ } else { /* This is Memory */ if (start_address & PCI_BASE_ADDRESS_MEM_PREFETCH) { /* pfmem */ start_address &= PCI_BASE_ADDRESS_MEM_MASK; if (ibmphp_find_resource(bus, start_address, &pfmem, PFMEM) < 0) { err("cannot find corresponding PFMEM resource to remove\n"); return -EINVAL; } if (pfmem) { debug("pfmem->start = %x\n", pfmem->start); ibmphp_remove_resource(pfmem); } } else { /* regular memory */ start_address &= PCI_BASE_ADDRESS_MEM_MASK; if (ibmphp_find_resource(bus, start_address, &mem, MEM) < 0) { err("cannot find corresponding MEM resource to remove\n"); return -EINVAL; } if (mem) { debug("mem->start = %x\n", mem->start); ibmphp_remove_resource(mem); } } if (tmp_address & PCI_BASE_ADDRESS_MEM_TYPE_64) { /* takes up another dword */ count += 1; } } /* end of mem */ } /* end of for */ debug("%s - exiting, returning success\n", __func__); return 0; } static int unconfigure_boot_card(struct slot *slot_cur) { u16 vendor_id; u32 class; u8 hdr_type; u8 device; u8 busno; u8 function; int rc; unsigned int devfn; u8 valid_device = 0x00; /* To see if we are ever able to find valid device and read it */ debug("%s - enter\n", __func__); device = slot_cur->device; busno = slot_cur->bus; debug("b4 for loop, device is %x\n", device); /* For every function on the card */ for (function = 0x0; function < 0x08; function++) { devfn = PCI_DEVFN(device, function); ibmphp_pci_bus->number = busno; pci_bus_read_config_word(ibmphp_pci_bus, devfn, PCI_VENDOR_ID, &vendor_id); if (vendor_id != PCI_VENDOR_ID_NOTVALID) { /* found correct device!!! */ ++valid_device; debug("%s - found correct device\n", __func__); /* header: x x x x x x x x * | |___________|=> 1=PPB bridge, 0=normal device, 2=CardBus Bridge * |_=> 0 = single function device, 1 = multi-function device */ pci_bus_read_config_byte(ibmphp_pci_bus, devfn, PCI_HEADER_TYPE, &hdr_type); pci_bus_read_config_dword(ibmphp_pci_bus, devfn, PCI_CLASS_REVISION, &class); debug("hdr_type %x, class %x\n", hdr_type, class); class >>= 8; /* to take revision out, class = class.subclass.prog i/f */ if (class == PCI_CLASS_NOT_DEFINED_VGA) { err("The device %x function %x is VGA compatible and is not supported for hot removing. Please choose another device.\n", device, function); return -ENODEV; } else if (class == PCI_CLASS_DISPLAY_VGA) { err("The device %x function %x is not supported for hot removing. Please choose another device.\n", device, function); return -ENODEV; } switch (hdr_type) { case PCI_HEADER_TYPE_NORMAL: rc = unconfigure_boot_device(busno, device, function); if (rc) { err("was not able to unconfigure device %x func %x on bus %x. bailing out...\n", device, function, busno); return rc; } function = 0x8; break; case PCI_HEADER_TYPE_MULTIDEVICE: rc = unconfigure_boot_device(busno, device, function); if (rc) { err("was not able to unconfigure device %x func %x on bus %x. bailing out...\n", device, function, busno); return rc; } break; case PCI_HEADER_TYPE_BRIDGE: class >>= 8; if (class != PCI_CLASS_BRIDGE_PCI) { err("This device %x function %x is not PCI-to-PCI bridge, and is not supported for hot-removing. Please try another card.\n", device, function); return -ENODEV; } rc = unconfigure_boot_bridge(busno, device, function); if (rc != 0) { err("was not able to hot-remove PPB properly.\n"); return rc; } function = 0x8; break; case PCI_HEADER_TYPE_MULTIBRIDGE: class >>= 8; if (class != PCI_CLASS_BRIDGE_PCI) { err("This device %x function %x is not PCI-to-PCI bridge, and is not supported for hot-removing. Please try another card.\n", device, function); return -ENODEV; } rc = unconfigure_boot_bridge(busno, device, function); if (rc != 0) { err("was not able to hot-remove PPB properly.\n"); return rc; } break; default: err("MAJOR PROBLEM!!!! Cannot read device's header\n"); return -1; } /* end of switch */ } /* end of valid device */ } /* end of for */ if (!valid_device) { err("Could not find device to unconfigure. Or could not read the card.\n"); return -1; } return 0; } /* * free the resources of the card (multi, single, or bridged) * Parameters: slot, flag to say if this is for removing entire module or just * unconfiguring the device * TO DO: will probably need to add some code in case there was some resource, * to remove it... this is from when we have errors in the configure_card... * !!!!!!!!!!!!!!!!!!!!!!!!!FOR BUSES!!!!!!!!!!!! * Returns: 0, -1, -ENODEV */ int ibmphp_unconfigure_card(struct slot **slot_cur, int the_end) { int i; int count; int rc; struct slot *sl = *slot_cur; struct pci_func *cur_func = NULL; struct pci_func *temp_func; debug("%s - enter\n", __func__); if (!the_end) { /* Need to unconfigure the card */ rc = unconfigure_boot_card(sl); if ((rc == -ENODEV) || (rc == -EIO) || (rc == -EINVAL)) { /* In all other cases, will still need to get rid of func structure if it exists */ return rc; } } if (sl->func) { cur_func = sl->func; while (cur_func) { /* TO DO: WILL MOST LIKELY NEED TO GET RID OF THE BUS STRUCTURE FROM RESOURCES AS WELL */ if (cur_func->bus) { /* in other words, it's a PPB */ count = 2; } else { count = 6; } for (i = 0; i < count; i++) { if (cur_func->io[i]) { debug("io[%d] exists\n", i); if (the_end > 0) ibmphp_remove_resource(cur_func->io[i]); cur_func->io[i] = NULL; } if (cur_func->mem[i]) { debug("mem[%d] exists\n", i); if (the_end > 0) ibmphp_remove_resource(cur_func->mem[i]); cur_func->mem[i] = NULL; } if (cur_func->pfmem[i]) { debug("pfmem[%d] exists\n", i); if (the_end > 0) ibmphp_remove_resource(cur_func->pfmem[i]); cur_func->pfmem[i] = NULL; } } temp_func = cur_func->next; kfree(cur_func); cur_func = temp_func; } } sl->func = NULL; *slot_cur = sl; debug("%s - exit\n", __func__); return 0; } /* * add a new bus resulting from hot-plugging a PPB bridge with devices * * Input: bus and the amount of resources needed (we know we can assign those, * since they've been checked already * Output: bus added to the correct spot * 0, -1, error */ static int add_new_bus(struct bus_node *bus, struct resource_node *io, struct resource_node *mem, struct resource_node *pfmem, u8 parent_busno) { struct range_node *io_range = NULL; struct range_node *mem_range = NULL; struct range_node *pfmem_range = NULL; struct bus_node *cur_bus = NULL; /* Trying to find the parent bus number */ if (parent_busno != 0xFF) { cur_bus = ibmphp_find_res_bus(parent_busno); if (!cur_bus) { err("strange, cannot find bus which is supposed to be at the system... something is terribly wrong...\n"); return -ENODEV; } list_add(&bus->bus_list, &cur_bus->bus_list); } if (io) { io_range = kzalloc(sizeof(*io_range), GFP_KERNEL); if (!io_range) return -ENOMEM; io_range->start = io->start; io_range->end = io->end; io_range->rangeno = 1; bus->noIORanges = 1; bus->rangeIO = io_range; } if (mem) { mem_range = kzalloc(sizeof(*mem_range), GFP_KERNEL); if (!mem_range) return -ENOMEM; mem_range->start = mem->start; mem_range->end = mem->end; mem_range->rangeno = 1; bus->noMemRanges = 1; bus->rangeMem = mem_range; } if (pfmem) { pfmem_range = kzalloc(sizeof(*pfmem_range), GFP_KERNEL); if (!pfmem_range) return -ENOMEM; pfmem_range->start = pfmem->start; pfmem_range->end = pfmem->end; pfmem_range->rangeno = 1; bus->noPFMemRanges = 1; bus->rangePFMem = pfmem_range; } return 0; } /* * find the 1st available bus number for PPB to set as its secondary bus * Parameters: bus_number of the primary bus * Returns: bus_number of the secondary bus or 0xff in case of failure */ static u8 find_sec_number(u8 primary_busno, u8 slotno) { int min, max; u8 busno; struct bus_info *bus; struct bus_node *bus_cur; bus = ibmphp_find_same_bus_num(primary_busno); if (!bus) { err("cannot get slot range of the bus from the BIOS\n"); return 0xff; } max = bus->slot_max; min = bus->slot_min; if ((slotno > max) || (slotno < min)) { err("got the wrong range\n"); return 0xff; } busno = (u8) (slotno - (u8) min); busno += primary_busno + 0x01; bus_cur = ibmphp_find_res_bus(busno); /* either there is no such bus number, or there are no ranges, which * can only happen if we removed the bridged device in previous load * of the driver, and now only have the skeleton bus struct */ if ((!bus_cur) || (!(bus_cur->rangeIO) && !(bus_cur->rangeMem) && !(bus_cur->rangePFMem))) return busno; return 0xff; }
linux-master
drivers/pci/hotplug/ibmphp_pci.c
// SPDX-License-Identifier: GPL-2.0+ /* * Interface for Dynamic Logical Partitioning of I/O Slots on * RPA-compliant PPC64 platform. * * John Rose <[email protected]> * Linda Xie <[email protected]> * * October 2003 * * Copyright (C) 2003 IBM. */ #undef DEBUG #include <linux/init.h> #include <linux/module.h> #include <linux/of.h> #include <linux/pci.h> #include <linux/string.h> #include <linux/vmalloc.h> #include <asm/pci-bridge.h> #include <linux/mutex.h> #include <asm/rtas.h> #include <asm/vio.h> #include <linux/firmware.h> #include "../pci.h" #include "rpaphp.h" #include "rpadlpar.h" static DEFINE_MUTEX(rpadlpar_mutex); #define DLPAR_MODULE_NAME "rpadlpar_io" #define NODE_TYPE_VIO 1 #define NODE_TYPE_SLOT 2 #define NODE_TYPE_PHB 3 static struct device_node *find_vio_slot_node(char *drc_name) { struct device_node *parent = of_find_node_by_name(NULL, "vdevice"); struct device_node *dn; int rc; if (!parent) return NULL; for_each_child_of_node(parent, dn) { rc = rpaphp_check_drc_props(dn, drc_name, NULL); if (rc == 0) break; } of_node_put(parent); return dn; } /* Find dlpar-capable pci node that contains the specified name and type */ static struct device_node *find_php_slot_pci_node(char *drc_name, char *drc_type) { struct device_node *np; int rc; for_each_node_by_name(np, "pci") { rc = rpaphp_check_drc_props(np, drc_name, drc_type); if (rc == 0) break; } return np; } /* Returns a device_node with its reference count incremented */ static struct device_node *find_dlpar_node(char *drc_name, int *node_type) { struct device_node *dn; dn = find_php_slot_pci_node(drc_name, "SLOT"); if (dn) { *node_type = NODE_TYPE_SLOT; return dn; } dn = find_php_slot_pci_node(drc_name, "PHB"); if (dn) { *node_type = NODE_TYPE_PHB; return dn; } dn = find_vio_slot_node(drc_name); if (dn) { *node_type = NODE_TYPE_VIO; return dn; } return NULL; } /** * find_php_slot - return hotplug slot structure for device node * @dn: target &device_node * * This routine will return the hotplug slot structure * for a given device node. Note that built-in PCI slots * may be dlpar-able, but not hot-pluggable, so this routine * will return NULL for built-in PCI slots. */ static struct slot *find_php_slot(struct device_node *dn) { struct slot *slot, *next; list_for_each_entry_safe(slot, next, &rpaphp_slot_head, rpaphp_slot_list) { if (slot->dn == dn) return slot; } return NULL; } static struct pci_dev *dlpar_find_new_dev(struct pci_bus *parent, struct device_node *dev_dn) { struct pci_dev *tmp = NULL; struct device_node *child_dn; list_for_each_entry(tmp, &parent->devices, bus_list) { child_dn = pci_device_to_OF_node(tmp); if (child_dn == dev_dn) return tmp; } return NULL; } static void dlpar_pci_add_bus(struct device_node *dn) { struct pci_dn *pdn = PCI_DN(dn); struct pci_controller *phb = pdn->phb; struct pci_dev *dev = NULL; pseries_eeh_init_edev_recursive(pdn); /* Add EADS device to PHB bus, adding new entry to bus->devices */ dev = of_create_pci_dev(dn, phb->bus, pdn->devfn); if (!dev) { printk(KERN_ERR "%s: failed to create pci dev for %pOF\n", __func__, dn); return; } /* Scan below the new bridge */ if (pci_is_bridge(dev)) of_scan_pci_bridge(dev); /* Map IO space for child bus, which may or may not succeed */ pcibios_map_io_space(dev->subordinate); /* Finish adding it : resource allocation, adding devices, etc... * Note that we need to perform the finish pass on the -parent- * bus of the EADS bridge so the bridge device itself gets * properly added */ pcibios_finish_adding_to_bus(phb->bus); } static int dlpar_add_pci_slot(char *drc_name, struct device_node *dn) { struct pci_dev *dev; struct pci_controller *phb; if (pci_find_bus_by_node(dn)) return -EINVAL; /* Add pci bus */ dlpar_pci_add_bus(dn); /* Confirm new bridge dev was created */ phb = PCI_DN(dn)->phb; dev = dlpar_find_new_dev(phb->bus, dn); if (!dev) { printk(KERN_ERR "%s: unable to add bus %s\n", __func__, drc_name); return -EIO; } if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) { printk(KERN_ERR "%s: unexpected header type %d, unable to add bus %s\n", __func__, dev->hdr_type, drc_name); return -EIO; } /* Add hotplug slot */ if (rpaphp_add_slot(dn)) { printk(KERN_ERR "%s: unable to add hotplug slot %s\n", __func__, drc_name); return -EIO; } return 0; } static int dlpar_remove_phb(char *drc_name, struct device_node *dn) { struct slot *slot; struct pci_dn *pdn; int rc = 0; if (!pci_find_bus_by_node(dn)) return -EINVAL; /* If pci slot is hotpluggable, use hotplug to remove it */ slot = find_php_slot(dn); if (slot && rpaphp_deregister_slot(slot)) { printk(KERN_ERR "%s: unable to remove hotplug slot %s\n", __func__, drc_name); return -EIO; } pdn = dn->data; BUG_ON(!pdn || !pdn->phb); rc = remove_phb_dynamic(pdn->phb); if (rc < 0) return rc; pdn->phb = NULL; return 0; } static int dlpar_add_phb(char *drc_name, struct device_node *dn) { struct pci_controller *phb; if (PCI_DN(dn) && PCI_DN(dn)->phb) { /* PHB already exists */ return -EINVAL; } phb = init_phb_dynamic(dn); if (!phb) return -EIO; if (rpaphp_add_slot(dn)) { printk(KERN_ERR "%s: unable to add hotplug slot %s\n", __func__, drc_name); return -EIO; } return 0; } static int dlpar_add_vio_slot(char *drc_name, struct device_node *dn) { struct vio_dev *vio_dev; vio_dev = vio_find_node(dn); if (vio_dev) { put_device(&vio_dev->dev); return -EINVAL; } if (!vio_register_device_node(dn)) { printk(KERN_ERR "%s: failed to register vio node %s\n", __func__, drc_name); return -EIO; } return 0; } /** * dlpar_add_slot - DLPAR add an I/O Slot * @drc_name: drc-name of newly added slot * * Make the hotplug module and the kernel aware of a newly added I/O Slot. * Return Codes: * 0 Success * -ENODEV Not a valid drc_name * -EINVAL Slot already added * -ERESTARTSYS Signalled before obtaining lock * -EIO Internal PCI Error */ int dlpar_add_slot(char *drc_name) { struct device_node *dn = NULL; int node_type; int rc = -EIO; if (mutex_lock_interruptible(&rpadlpar_mutex)) return -ERESTARTSYS; /* Find newly added node */ dn = find_dlpar_node(drc_name, &node_type); if (!dn) { rc = -ENODEV; goto exit; } switch (node_type) { case NODE_TYPE_VIO: rc = dlpar_add_vio_slot(drc_name, dn); break; case NODE_TYPE_SLOT: rc = dlpar_add_pci_slot(drc_name, dn); break; case NODE_TYPE_PHB: rc = dlpar_add_phb(drc_name, dn); break; } of_node_put(dn); printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name); exit: mutex_unlock(&rpadlpar_mutex); return rc; } /** * dlpar_remove_vio_slot - DLPAR remove a virtual I/O Slot * @drc_name: drc-name of newly added slot * @dn: &device_node * * Remove the kernel and hotplug representations of an I/O Slot. * Return Codes: * 0 Success * -EINVAL Vio dev doesn't exist */ static int dlpar_remove_vio_slot(char *drc_name, struct device_node *dn) { struct vio_dev *vio_dev; vio_dev = vio_find_node(dn); if (!vio_dev) return -EINVAL; vio_unregister_device(vio_dev); put_device(&vio_dev->dev); return 0; } /** * dlpar_remove_pci_slot - DLPAR remove a PCI I/O Slot * @drc_name: drc-name of newly added slot * @dn: &device_node * * Remove the kernel and hotplug representations of a PCI I/O Slot. * Return Codes: * 0 Success * -ENODEV Not a valid drc_name * -EIO Internal PCI Error */ static int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn) { struct pci_bus *bus; struct slot *slot; int ret = 0; pci_lock_rescan_remove(); bus = pci_find_bus_by_node(dn); if (!bus) { ret = -EINVAL; goto out; } pr_debug("PCI: Removing PCI slot below EADS bridge %s\n", bus->self ? pci_name(bus->self) : "<!PHB!>"); slot = find_php_slot(dn); if (slot) { pr_debug("PCI: Removing hotplug slot for %04x:%02x...\n", pci_domain_nr(bus), bus->number); if (rpaphp_deregister_slot(slot)) { printk(KERN_ERR "%s: unable to remove hotplug slot %s\n", __func__, drc_name); ret = -EIO; goto out; } } /* Remove all devices below slot */ pci_hp_remove_devices(bus); /* Unmap PCI IO space */ if (pcibios_unmap_io_space(bus)) { printk(KERN_ERR "%s: failed to unmap bus range\n", __func__); ret = -ERANGE; goto out; } /* Remove the EADS bridge device itself */ BUG_ON(!bus->self); pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self)); pci_stop_and_remove_bus_device(bus->self); out: pci_unlock_rescan_remove(); return ret; } /** * dlpar_remove_slot - DLPAR remove an I/O Slot * @drc_name: drc-name of newly added slot * * Remove the kernel and hotplug representations of an I/O Slot. * Return Codes: * 0 Success * -ENODEV Not a valid drc_name * -EINVAL Slot already removed * -ERESTARTSYS Signalled before obtaining lock * -EIO Internal Error */ int dlpar_remove_slot(char *drc_name) { struct device_node *dn; int node_type; int rc = 0; if (mutex_lock_interruptible(&rpadlpar_mutex)) return -ERESTARTSYS; dn = find_dlpar_node(drc_name, &node_type); if (!dn) { rc = -ENODEV; goto exit; } switch (node_type) { case NODE_TYPE_VIO: rc = dlpar_remove_vio_slot(drc_name, dn); break; case NODE_TYPE_PHB: rc = dlpar_remove_phb(drc_name, dn); break; case NODE_TYPE_SLOT: rc = dlpar_remove_pci_slot(drc_name, dn); break; } of_node_put(dn); vm_unmap_aliases(); printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name); exit: mutex_unlock(&rpadlpar_mutex); return rc; } static inline int is_dlpar_capable(void) { int rc = rtas_token("ibm,configure-connector"); return (int) (rc != RTAS_UNKNOWN_SERVICE); } static int __init rpadlpar_io_init(void) { if (!is_dlpar_capable()) { printk(KERN_WARNING "%s: partition not DLPAR capable\n", __func__); return -EPERM; } return dlpar_sysfs_init(); } static void __exit rpadlpar_io_exit(void) { dlpar_sysfs_exit(); } module_init(rpadlpar_io_init); module_exit(rpadlpar_io_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("RPA Dynamic Logical Partitioning driver for I/O slots");
linux-master
drivers/pci/hotplug/rpadlpar_core.c
// SPDX-License-Identifier: GPL-2.0+ /* * Standard Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman ([email protected]) * Copyright (C) 2001 IBM Corp. * Copyright (C) 2003-2004 Intel Corporation * * All rights reserved. * * Send feedback to <[email protected]>, <[email protected]> * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/pci.h> #include "shpchp.h" /* Global variables */ bool shpchp_debug; bool shpchp_poll_mode; int shpchp_poll_time; #define DRIVER_VERSION "0.4" #define DRIVER_AUTHOR "Dan Zink <[email protected]>, Greg Kroah-Hartman <[email protected]>, Dely Sy <[email protected]>" #define DRIVER_DESC "Standard Hot Plug PCI Controller Driver" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); module_param(shpchp_debug, bool, 0644); module_param(shpchp_poll_mode, bool, 0644); module_param(shpchp_poll_time, int, 0644); MODULE_PARM_DESC(shpchp_debug, "Debugging mode enabled or not"); MODULE_PARM_DESC(shpchp_poll_mode, "Using polling mechanism for hot-plug events or not"); MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds"); #define SHPC_MODULE_NAME "shpchp" static int set_attention_status(struct hotplug_slot *slot, u8 value); static int enable_slot(struct hotplug_slot *slot); static int disable_slot(struct hotplug_slot *slot); static int get_power_status(struct hotplug_slot *slot, u8 *value); static int get_attention_status(struct hotplug_slot *slot, u8 *value); static int get_latch_status(struct hotplug_slot *slot, u8 *value); static int get_adapter_status(struct hotplug_slot *slot, u8 *value); static const struct hotplug_slot_ops shpchp_hotplug_slot_ops = { .set_attention_status = set_attention_status, .enable_slot = enable_slot, .disable_slot = disable_slot, .get_power_status = get_power_status, .get_attention_status = get_attention_status, .get_latch_status = get_latch_status, .get_adapter_status = get_adapter_status, }; static int init_slots(struct controller *ctrl) { struct slot *slot; struct hotplug_slot *hotplug_slot; char name[SLOT_NAME_SIZE]; int retval; int i; for (i = 0; i < ctrl->num_slots; i++) { slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) { retval = -ENOMEM; goto error; } hotplug_slot = &slot->hotplug_slot; slot->hp_slot = i; slot->ctrl = ctrl; slot->bus = ctrl->pci_dev->subordinate->number; slot->device = ctrl->slot_device_offset + i; slot->hpc_ops = ctrl->hpc_ops; slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i); slot->wq = alloc_workqueue("shpchp-%d", 0, 0, slot->number); if (!slot->wq) { retval = -ENOMEM; goto error_slot; } mutex_init(&slot->lock); INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work); /* register this slot with the hotplug pci core */ snprintf(name, SLOT_NAME_SIZE, "%d", slot->number); hotplug_slot->ops = &shpchp_hotplug_slot_ops; ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x hp_slot=%x sun=%x slot_device_offset=%x\n", pci_domain_nr(ctrl->pci_dev->subordinate), slot->bus, slot->device, slot->hp_slot, slot->number, ctrl->slot_device_offset); retval = pci_hp_register(hotplug_slot, ctrl->pci_dev->subordinate, slot->device, name); if (retval) { ctrl_err(ctrl, "pci_hp_register failed with error %d\n", retval); goto error_slotwq; } get_power_status(hotplug_slot, &slot->pwr_save); get_attention_status(hotplug_slot, &slot->attention_save); get_latch_status(hotplug_slot, &slot->latch_save); get_adapter_status(hotplug_slot, &slot->presence_save); list_add(&slot->slot_list, &ctrl->slot_list); } return 0; error_slotwq: destroy_workqueue(slot->wq); error_slot: kfree(slot); error: return retval; } void cleanup_slots(struct controller *ctrl) { struct slot *slot, *next; list_for_each_entry_safe(slot, next, &ctrl->slot_list, slot_list) { list_del(&slot->slot_list); cancel_delayed_work(&slot->work); destroy_workqueue(slot->wq); pci_hp_deregister(&slot->hotplug_slot); kfree(slot); } } /* * set_attention_status - Turns the Amber LED for a slot on, off or blink */ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) { struct slot *slot = get_slot(hotplug_slot); ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); slot->attention_save = status; slot->hpc_ops->set_attention_status(slot, status); return 0; } static int enable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = get_slot(hotplug_slot); ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); return shpchp_sysfs_enable_slot(slot); } static int disable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = get_slot(hotplug_slot); ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); return shpchp_sysfs_disable_slot(slot); } static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = get_slot(hotplug_slot); int retval; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); retval = slot->hpc_ops->get_power_status(slot, value); if (retval < 0) *value = slot->pwr_save; return 0; } static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = get_slot(hotplug_slot); int retval; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); retval = slot->hpc_ops->get_attention_status(slot, value); if (retval < 0) *value = slot->attention_save; return 0; } static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = get_slot(hotplug_slot); int retval; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); retval = slot->hpc_ops->get_latch_status(slot, value); if (retval < 0) *value = slot->latch_save; return 0; } static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = get_slot(hotplug_slot); int retval; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); retval = slot->hpc_ops->get_adapter_status(slot, value); if (retval < 0) *value = slot->presence_save; return 0; } static bool shpc_capable(struct pci_dev *bridge) { /* * It is assumed that AMD GOLAM chips support SHPC but they do not * have SHPC capability. */ if (bridge->vendor == PCI_VENDOR_ID_AMD && bridge->device == PCI_DEVICE_ID_AMD_GOLAM_7450) return true; if (pci_find_capability(bridge, PCI_CAP_ID_SHPC)) return true; return false; } static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int rc; struct controller *ctrl; if (!shpc_capable(pdev)) return -ENODEV; if (acpi_get_hp_hw_control_from_firmware(pdev)) return -ENODEV; ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) goto err_out_none; INIT_LIST_HEAD(&ctrl->slot_list); rc = shpc_init(ctrl, pdev); if (rc) { ctrl_dbg(ctrl, "Controller initialization failed\n"); goto err_out_free_ctrl; } pci_set_drvdata(pdev, ctrl); /* Setup the slot information structures */ rc = init_slots(ctrl); if (rc) { ctrl_err(ctrl, "Slot initialization failed\n"); goto err_out_release_ctlr; } rc = shpchp_create_ctrl_files(ctrl); if (rc) goto err_cleanup_slots; pdev->shpc_managed = 1; return 0; err_cleanup_slots: cleanup_slots(ctrl); err_out_release_ctlr: ctrl->hpc_ops->release_ctlr(ctrl); err_out_free_ctrl: kfree(ctrl); err_out_none: return -ENODEV; } static void shpc_remove(struct pci_dev *dev) { struct controller *ctrl = pci_get_drvdata(dev); dev->shpc_managed = 0; shpchp_remove_ctrl_files(ctrl); ctrl->hpc_ops->release_ctlr(ctrl); kfree(ctrl); } static const struct pci_device_id shpcd_pci_tbl[] = { {PCI_DEVICE_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL, ~0)}, { /* end: all zeroes */ } }; MODULE_DEVICE_TABLE(pci, shpcd_pci_tbl); static struct pci_driver shpc_driver = { .name = SHPC_MODULE_NAME, .id_table = shpcd_pci_tbl, .probe = shpc_probe, .remove = shpc_remove, }; static int __init shpcd_init(void) { int retval; retval = pci_register_driver(&shpc_driver); dbg("%s: pci_register_driver = %d\n", __func__, retval); info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); return retval; } static void __exit shpcd_cleanup(void) { dbg("unload_shpchpd()\n"); pci_unregister_driver(&shpc_driver); info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); } module_init(shpcd_init); module_exit(shpcd_cleanup);
linux-master
drivers/pci/hotplug/shpchp_core.c
// SPDX-License-Identifier: GPL-2.0+ /* * PCI Express Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman ([email protected]) * Copyright (C) 2001 IBM Corp. * Copyright (C) 2003-2004 Intel Corporation * * All rights reserved. * * Send feedback to <[email protected]>, <[email protected]> * */ #define dev_fmt(fmt) "pciehp: " fmt #include <linux/kernel.h> #include <linux/types.h> #include <linux/pm_runtime.h> #include <linux/pci.h> #include "pciehp.h" /* The following routines constitute the bulk of the hotplug controller logic */ #define SAFE_REMOVAL true #define SURPRISE_REMOVAL false static void set_slot_off(struct controller *ctrl) { /* * Turn off slot, turn on attention indicator, turn off power * indicator */ if (POWER_CTRL(ctrl)) { pciehp_power_off_slot(ctrl); /* * After turning power off, we must wait for at least 1 second * before taking any action that relies on power having been * removed from the slot/adapter. */ msleep(1000); } pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, PCI_EXP_SLTCTL_ATTN_IND_ON); } /** * board_added - Called after a board has been added to the system. * @ctrl: PCIe hotplug controller where board is added * * Turns power on for the board. * Configures board. */ static int board_added(struct controller *ctrl) { int retval = 0; struct pci_bus *parent = ctrl->pcie->port->subordinate; if (POWER_CTRL(ctrl)) { /* Power on slot */ retval = pciehp_power_on_slot(ctrl); if (retval) return retval; } pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, INDICATOR_NOOP); /* Check link training status */ retval = pciehp_check_link_status(ctrl); if (retval) goto err_exit; /* Check for a power fault */ if (ctrl->power_fault_detected || pciehp_query_power_fault(ctrl)) { ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl)); retval = -EIO; goto err_exit; } retval = pciehp_configure_device(ctrl); if (retval) { if (retval != -EEXIST) { ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n", pci_domain_nr(parent), parent->number); goto err_exit; } } pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, PCI_EXP_SLTCTL_ATTN_IND_OFF); return 0; err_exit: set_slot_off(ctrl); return retval; } /** * remove_board - Turn off slot and Power Indicator * @ctrl: PCIe hotplug controller where board is being removed * @safe_removal: whether the board is safely removed (versus surprise removed) */ static void remove_board(struct controller *ctrl, bool safe_removal) { pciehp_unconfigure_device(ctrl, safe_removal); if (POWER_CTRL(ctrl)) { pciehp_power_off_slot(ctrl); /* * After turning power off, we must wait for at least 1 second * before taking any action that relies on power having been * removed from the slot/adapter. */ msleep(1000); /* Ignore link or presence changes caused by power off */ atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC), &ctrl->pending_events); } pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, INDICATOR_NOOP); } static int pciehp_enable_slot(struct controller *ctrl); static int pciehp_disable_slot(struct controller *ctrl, bool safe_removal); void pciehp_request(struct controller *ctrl, int action) { atomic_or(action, &ctrl->pending_events); if (!pciehp_poll_mode) irq_wake_thread(ctrl->pcie->irq, ctrl); } void pciehp_queue_pushbutton_work(struct work_struct *work) { struct controller *ctrl = container_of(work, struct controller, button_work.work); mutex_lock(&ctrl->state_lock); switch (ctrl->state) { case BLINKINGOFF_STATE: pciehp_request(ctrl, DISABLE_SLOT); break; case BLINKINGON_STATE: pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC); break; default: break; } mutex_unlock(&ctrl->state_lock); } void pciehp_handle_button_press(struct controller *ctrl) { mutex_lock(&ctrl->state_lock); switch (ctrl->state) { case OFF_STATE: case ON_STATE: if (ctrl->state == ON_STATE) { ctrl->state = BLINKINGOFF_STATE; ctrl_info(ctrl, "Slot(%s): Button press: will power off in 5 sec\n", slot_name(ctrl)); } else { ctrl->state = BLINKINGON_STATE; ctrl_info(ctrl, "Slot(%s): Button press: will power on in 5 sec\n", slot_name(ctrl)); } /* blink power indicator and turn off attention */ pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, PCI_EXP_SLTCTL_ATTN_IND_OFF); schedule_delayed_work(&ctrl->button_work, 5 * HZ); break; case BLINKINGOFF_STATE: case BLINKINGON_STATE: /* * Cancel if we are still blinking; this means that we * press the attention again before the 5 sec. limit * expires to cancel hot-add or hot-remove */ cancel_delayed_work(&ctrl->button_work); if (ctrl->state == BLINKINGOFF_STATE) { ctrl->state = ON_STATE; pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, PCI_EXP_SLTCTL_ATTN_IND_OFF); ctrl_info(ctrl, "Slot(%s): Button press: canceling request to power off\n", slot_name(ctrl)); } else { ctrl->state = OFF_STATE; pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, PCI_EXP_SLTCTL_ATTN_IND_OFF); ctrl_info(ctrl, "Slot(%s): Button press: canceling request to power on\n", slot_name(ctrl)); } break; default: ctrl_err(ctrl, "Slot(%s): Button press: ignoring invalid state %#x\n", slot_name(ctrl), ctrl->state); break; } mutex_unlock(&ctrl->state_lock); } void pciehp_handle_disable_request(struct controller *ctrl) { mutex_lock(&ctrl->state_lock); switch (ctrl->state) { case BLINKINGON_STATE: case BLINKINGOFF_STATE: cancel_delayed_work(&ctrl->button_work); break; } ctrl->state = POWEROFF_STATE; mutex_unlock(&ctrl->state_lock); ctrl->request_result = pciehp_disable_slot(ctrl, SAFE_REMOVAL); } void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events) { int present, link_active; /* * If the slot is on and presence or link has changed, turn it off. * Even if it's occupied again, we cannot assume the card is the same. */ mutex_lock(&ctrl->state_lock); switch (ctrl->state) { case BLINKINGOFF_STATE: cancel_delayed_work(&ctrl->button_work); fallthrough; case ON_STATE: ctrl->state = POWEROFF_STATE; mutex_unlock(&ctrl->state_lock); if (events & PCI_EXP_SLTSTA_DLLSC) ctrl_info(ctrl, "Slot(%s): Link Down\n", slot_name(ctrl)); if (events & PCI_EXP_SLTSTA_PDC) ctrl_info(ctrl, "Slot(%s): Card not present\n", slot_name(ctrl)); pciehp_disable_slot(ctrl, SURPRISE_REMOVAL); break; default: mutex_unlock(&ctrl->state_lock); break; } /* Turn the slot on if it's occupied or link is up */ mutex_lock(&ctrl->state_lock); present = pciehp_card_present(ctrl); link_active = pciehp_check_link_active(ctrl); if (present <= 0 && link_active <= 0) { if (ctrl->state == BLINKINGON_STATE) { ctrl->state = OFF_STATE; cancel_delayed_work(&ctrl->button_work); pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, INDICATOR_NOOP); ctrl_info(ctrl, "Slot(%s): Card not present\n", slot_name(ctrl)); } mutex_unlock(&ctrl->state_lock); return; } switch (ctrl->state) { case BLINKINGON_STATE: cancel_delayed_work(&ctrl->button_work); fallthrough; case OFF_STATE: ctrl->state = POWERON_STATE; mutex_unlock(&ctrl->state_lock); if (present) ctrl_info(ctrl, "Slot(%s): Card present\n", slot_name(ctrl)); if (link_active) ctrl_info(ctrl, "Slot(%s): Link Up\n", slot_name(ctrl)); ctrl->request_result = pciehp_enable_slot(ctrl); break; default: mutex_unlock(&ctrl->state_lock); break; } } static int __pciehp_enable_slot(struct controller *ctrl) { u8 getstatus = 0; if (MRL_SENS(ctrl)) { pciehp_get_latch_status(ctrl, &getstatus); if (getstatus) { ctrl_info(ctrl, "Slot(%s): Latch open\n", slot_name(ctrl)); return -ENODEV; } } if (POWER_CTRL(ctrl)) { pciehp_get_power_status(ctrl, &getstatus); if (getstatus) { ctrl_info(ctrl, "Slot(%s): Already enabled\n", slot_name(ctrl)); return 0; } } return board_added(ctrl); } static int pciehp_enable_slot(struct controller *ctrl) { int ret; pm_runtime_get_sync(&ctrl->pcie->port->dev); ret = __pciehp_enable_slot(ctrl); if (ret && ATTN_BUTTN(ctrl)) /* may be blinking */ pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, INDICATOR_NOOP); pm_runtime_put(&ctrl->pcie->port->dev); mutex_lock(&ctrl->state_lock); ctrl->state = ret ? OFF_STATE : ON_STATE; mutex_unlock(&ctrl->state_lock); return ret; } static int __pciehp_disable_slot(struct controller *ctrl, bool safe_removal) { u8 getstatus = 0; if (POWER_CTRL(ctrl)) { pciehp_get_power_status(ctrl, &getstatus); if (!getstatus) { ctrl_info(ctrl, "Slot(%s): Already disabled\n", slot_name(ctrl)); return -EINVAL; } } remove_board(ctrl, safe_removal); return 0; } static int pciehp_disable_slot(struct controller *ctrl, bool safe_removal) { int ret; pm_runtime_get_sync(&ctrl->pcie->port->dev); ret = __pciehp_disable_slot(ctrl, safe_removal); pm_runtime_put(&ctrl->pcie->port->dev); mutex_lock(&ctrl->state_lock); ctrl->state = OFF_STATE; mutex_unlock(&ctrl->state_lock); return ret; } int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot) { struct controller *ctrl = to_ctrl(hotplug_slot); mutex_lock(&ctrl->state_lock); switch (ctrl->state) { case BLINKINGON_STATE: case OFF_STATE: mutex_unlock(&ctrl->state_lock); /* * The IRQ thread becomes a no-op if the user pulls out the * card before the thread wakes up, so initialize to -ENODEV. */ ctrl->request_result = -ENODEV; pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC); wait_event(ctrl->requester, !atomic_read(&ctrl->pending_events) && !ctrl->ist_running); return ctrl->request_result; case POWERON_STATE: ctrl_info(ctrl, "Slot(%s): Already in powering on state\n", slot_name(ctrl)); break; case BLINKINGOFF_STATE: case ON_STATE: case POWEROFF_STATE: ctrl_info(ctrl, "Slot(%s): Already enabled\n", slot_name(ctrl)); break; default: ctrl_err(ctrl, "Slot(%s): Invalid state %#x\n", slot_name(ctrl), ctrl->state); break; } mutex_unlock(&ctrl->state_lock); return -ENODEV; } int pciehp_sysfs_disable_slot(struct hotplug_slot *hotplug_slot) { struct controller *ctrl = to_ctrl(hotplug_slot); mutex_lock(&ctrl->state_lock); switch (ctrl->state) { case BLINKINGOFF_STATE: case ON_STATE: mutex_unlock(&ctrl->state_lock); pciehp_request(ctrl, DISABLE_SLOT); wait_event(ctrl->requester, !atomic_read(&ctrl->pending_events) && !ctrl->ist_running); return ctrl->request_result; case POWEROFF_STATE: ctrl_info(ctrl, "Slot(%s): Already in powering off state\n", slot_name(ctrl)); break; case BLINKINGON_STATE: case OFF_STATE: case POWERON_STATE: ctrl_info(ctrl, "Slot(%s): Already disabled\n", slot_name(ctrl)); break; default: ctrl_err(ctrl, "Slot(%s): Invalid state %#x\n", slot_name(ctrl), ctrl->state); break; } mutex_unlock(&ctrl->state_lock); return -ENODEV; }
linux-master
drivers/pci/hotplug/pciehp_ctrl.c
// SPDX-License-Identifier: GPL-2.0+ /* * Compaq Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman ([email protected]) * Copyright (C) 2001 IBM Corp. * * All rights reserved. * * Send feedback to <[email protected]> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/wait.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/kthread.h> #include "cpqphp.h" static u32 configure_new_device(struct controller *ctrl, struct pci_func *func, u8 behind_bridge, struct resource_lists *resources); static int configure_new_function(struct controller *ctrl, struct pci_func *func, u8 behind_bridge, struct resource_lists *resources); static void interrupt_event_handler(struct controller *ctrl); static struct task_struct *cpqhp_event_thread; static struct timer_list *pushbutton_pending; /* = NULL */ /* delay is in jiffies to wait for */ static void long_delay(int delay) { /* * XXX(hch): if someone is bored please convert all callers * to call msleep_interruptible directly. They really want * to specify timeouts in natural units and spend a lot of * effort converting them to jiffies.. */ msleep_interruptible(jiffies_to_msecs(delay)); } /* FIXME: The following line needs to be somewhere else... */ #define WRONG_BUS_FREQUENCY 0x07 static u8 handle_switch_change(u8 change, struct controller *ctrl) { int hp_slot; u8 rc = 0; u16 temp_word; struct pci_func *func; struct event_info *taskInfo; if (!change) return 0; /* Switch Change */ dbg("cpqsbd: Switch interrupt received.\n"); for (hp_slot = 0; hp_slot < 6; hp_slot++) { if (change & (0x1L << hp_slot)) { /* * this one changed. */ func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); /* this is the structure that tells the worker thread * what to do */ taskInfo = &(ctrl->event_queue[ctrl->next_event]); ctrl->next_event = (ctrl->next_event + 1) % 10; taskInfo->hp_slot = hp_slot; rc++; temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { /* * Switch opened */ func->switch_save = 0; taskInfo->event_type = INT_SWITCH_OPEN; } else { /* * Switch closed */ func->switch_save = 0x10; taskInfo->event_type = INT_SWITCH_CLOSE; } } } return rc; } /** * cpqhp_find_slot - find the struct slot of given device * @ctrl: scan lots of this controller * @device: the device id to find */ static struct slot *cpqhp_find_slot(struct controller *ctrl, u8 device) { struct slot *slot = ctrl->slot; while (slot && (slot->device != device)) slot = slot->next; return slot; } static u8 handle_presence_change(u16 change, struct controller *ctrl) { int hp_slot; u8 rc = 0; u8 temp_byte; u16 temp_word; struct pci_func *func; struct event_info *taskInfo; struct slot *p_slot; if (!change) return 0; /* * Presence Change */ dbg("cpqsbd: Presence/Notify input change.\n"); dbg(" Changed bits are 0x%4.4x\n", change); for (hp_slot = 0; hp_slot < 6; hp_slot++) { if (change & (0x0101 << hp_slot)) { /* * this one changed. */ func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); taskInfo = &(ctrl->event_queue[ctrl->next_event]); ctrl->next_event = (ctrl->next_event + 1) % 10; taskInfo->hp_slot = hp_slot; rc++; p_slot = cpqhp_find_slot(ctrl, hp_slot + (readb(ctrl->hpc_reg + SLOT_MASK) >> 4)); if (!p_slot) return 0; /* If the switch closed, must be a button * If not in button mode, nevermind */ if (func->switch_save && (ctrl->push_button == 1)) { temp_word = ctrl->ctrl_int_comp >> 16; temp_byte = (temp_word >> hp_slot) & 0x01; temp_byte |= (temp_word >> (hp_slot + 7)) & 0x02; if (temp_byte != func->presence_save) { /* * button Pressed (doesn't do anything) */ dbg("hp_slot %d button pressed\n", hp_slot); taskInfo->event_type = INT_BUTTON_PRESS; } else { /* * button Released - TAKE ACTION!!!! */ dbg("hp_slot %d button released\n", hp_slot); taskInfo->event_type = INT_BUTTON_RELEASE; /* Cancel if we are still blinking */ if ((p_slot->state == BLINKINGON_STATE) || (p_slot->state == BLINKINGOFF_STATE)) { taskInfo->event_type = INT_BUTTON_CANCEL; dbg("hp_slot %d button cancel\n", hp_slot); } else if ((p_slot->state == POWERON_STATE) || (p_slot->state == POWEROFF_STATE)) { /* info(msg_button_ignore, p_slot->number); */ taskInfo->event_type = INT_BUTTON_IGNORE; dbg("hp_slot %d button ignore\n", hp_slot); } } } else { /* Switch is open, assume a presence change * Save the presence state */ temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if ((!(ctrl->ctrl_int_comp & (0x010000 << hp_slot))) || (!(ctrl->ctrl_int_comp & (0x01000000 << hp_slot)))) { /* Present */ taskInfo->event_type = INT_PRESENCE_ON; } else { /* Not Present */ taskInfo->event_type = INT_PRESENCE_OFF; } } } } return rc; } static u8 handle_power_fault(u8 change, struct controller *ctrl) { int hp_slot; u8 rc = 0; struct pci_func *func; struct event_info *taskInfo; if (!change) return 0; /* * power fault */ info("power fault interrupt\n"); for (hp_slot = 0; hp_slot < 6; hp_slot++) { if (change & (0x01 << hp_slot)) { /* * this one changed. */ func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); taskInfo = &(ctrl->event_queue[ctrl->next_event]); ctrl->next_event = (ctrl->next_event + 1) % 10; taskInfo->hp_slot = hp_slot; rc++; if (ctrl->ctrl_int_comp & (0x00000100 << hp_slot)) { /* * power fault Cleared */ func->status = 0x00; taskInfo->event_type = INT_POWER_FAULT_CLEAR; } else { /* * power fault */ taskInfo->event_type = INT_POWER_FAULT; if (ctrl->rev < 4) { amber_LED_on(ctrl, hp_slot); green_LED_off(ctrl, hp_slot); set_SOGO(ctrl); /* this is a fatal condition, we want * to crash the machine to protect from * data corruption. simulated_NMI * shouldn't ever return */ /* FIXME simulated_NMI(hp_slot, ctrl); */ /* The following code causes a software * crash just in case simulated_NMI did * return */ /*FIXME panic(msg_power_fault); */ } else { /* set power fault status for this board */ func->status = 0xFF; info("power fault bit %x set\n", hp_slot); } } } } return rc; } /** * sort_by_size - sort nodes on the list by their length, smallest first. * @head: list to sort */ static int sort_by_size(struct pci_resource **head) { struct pci_resource *current_res; struct pci_resource *next_res; int out_of_order = 1; if (!(*head)) return 1; if (!((*head)->next)) return 0; while (out_of_order) { out_of_order = 0; /* Special case for swapping list head */ if (((*head)->next) && ((*head)->length > (*head)->next->length)) { out_of_order++; current_res = *head; *head = (*head)->next; current_res->next = (*head)->next; (*head)->next = current_res; } current_res = *head; while (current_res->next && current_res->next->next) { if (current_res->next->length > current_res->next->next->length) { out_of_order++; next_res = current_res->next; current_res->next = current_res->next->next; current_res = current_res->next; next_res->next = current_res->next; current_res->next = next_res; } else current_res = current_res->next; } } /* End of out_of_order loop */ return 0; } /** * sort_by_max_size - sort nodes on the list by their length, largest first. * @head: list to sort */ static int sort_by_max_size(struct pci_resource **head) { struct pci_resource *current_res; struct pci_resource *next_res; int out_of_order = 1; if (!(*head)) return 1; if (!((*head)->next)) return 0; while (out_of_order) { out_of_order = 0; /* Special case for swapping list head */ if (((*head)->next) && ((*head)->length < (*head)->next->length)) { out_of_order++; current_res = *head; *head = (*head)->next; current_res->next = (*head)->next; (*head)->next = current_res; } current_res = *head; while (current_res->next && current_res->next->next) { if (current_res->next->length < current_res->next->next->length) { out_of_order++; next_res = current_res->next; current_res->next = current_res->next->next; current_res = current_res->next; next_res->next = current_res->next; current_res->next = next_res; } else current_res = current_res->next; } } /* End of out_of_order loop */ return 0; } /** * do_pre_bridge_resource_split - find node of resources that are unused * @head: new list head * @orig_head: original list head * @alignment: max node size (?) */ static struct pci_resource *do_pre_bridge_resource_split(struct pci_resource **head, struct pci_resource **orig_head, u32 alignment) { struct pci_resource *prevnode = NULL; struct pci_resource *node; struct pci_resource *split_node; u32 rc; u32 temp_dword; dbg("do_pre_bridge_resource_split\n"); if (!(*head) || !(*orig_head)) return NULL; rc = cpqhp_resource_sort_and_combine(head); if (rc) return NULL; if ((*head)->base != (*orig_head)->base) return NULL; if ((*head)->length == (*orig_head)->length) return NULL; /* If we got here, there the bridge requires some of the resource, but * we may be able to split some off of the front */ node = *head; if (node->length & (alignment - 1)) { /* this one isn't an aligned length, so we'll make a new entry * and split it up. */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; temp_dword = (node->length | (alignment-1)) + 1 - alignment; split_node->base = node->base; split_node->length = temp_dword; node->length -= temp_dword; node->base += split_node->length; /* Put it in the list */ *head = split_node; split_node->next = node; } if (node->length < alignment) return NULL; /* Now unlink it */ if (*head == node) { *head = node->next; } else { prevnode = *head; while (prevnode->next != node) prevnode = prevnode->next; prevnode->next = node->next; } node->next = NULL; return node; } /** * do_bridge_resource_split - find one node of resources that aren't in use * @head: list head * @alignment: max node size (?) */ static struct pci_resource *do_bridge_resource_split(struct pci_resource **head, u32 alignment) { struct pci_resource *prevnode = NULL; struct pci_resource *node; u32 rc; u32 temp_dword; rc = cpqhp_resource_sort_and_combine(head); if (rc) return NULL; node = *head; while (node->next) { prevnode = node; node = node->next; kfree(prevnode); } if (node->length < alignment) goto error; if (node->base & (alignment - 1)) { /* Short circuit if adjusted size is too small */ temp_dword = (node->base | (alignment-1)) + 1; if ((node->length - (temp_dword - node->base)) < alignment) goto error; node->length -= (temp_dword - node->base); node->base = temp_dword; } if (node->length & (alignment - 1)) /* There's stuff in use after this node */ goto error; return node; error: kfree(node); return NULL; } /** * get_io_resource - find first node of given size not in ISA aliasing window. * @head: list to search * @size: size of node to find, must be a power of two. * * Description: This function sorts the resource list by size and then * returns the first node of "size" length that is not in the ISA aliasing * window. If it finds a node larger than "size" it will split it up. */ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size) { struct pci_resource *prevnode; struct pci_resource *node; struct pci_resource *split_node; u32 temp_dword; if (!(*head)) return NULL; if (cpqhp_resource_sort_and_combine(head)) return NULL; if (sort_by_size(head)) return NULL; for (node = *head; node; node = node->next) { if (node->length < size) continue; if (node->base & (size - 1)) { /* this one isn't base aligned properly * so we'll make a new entry and split it up */ temp_dword = (node->base | (size-1)) + 1; /* Short circuit if adjusted size is too small */ if ((node->length - (temp_dword - node->base)) < size) continue; split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base; split_node->length = temp_dword - node->base; node->base = temp_dword; node->length -= split_node->length; /* Put it in the list */ split_node->next = node->next; node->next = split_node; } /* End of non-aligned base */ /* Don't need to check if too small since we already did */ if (node->length > size) { /* this one is longer than we need * so we'll make a new entry and split it up */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base + size; split_node->length = node->length - size; node->length = size; /* Put it in the list */ split_node->next = node->next; node->next = split_node; } /* End of too big on top end */ /* For IO make sure it's not in the ISA aliasing space */ if (node->base & 0x300L) continue; /* If we got here, then it is the right size * Now take it out of the list and break */ if (*head == node) { *head = node->next; } else { prevnode = *head; while (prevnode->next != node) prevnode = prevnode->next; prevnode->next = node->next; } node->next = NULL; break; } return node; } /** * get_max_resource - get largest node which has at least the given size. * @head: the list to search the node in * @size: the minimum size of the node to find * * Description: Gets the largest node that is at least "size" big from the * list pointed to by head. It aligns the node on top and bottom * to "size" alignment before returning it. */ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 size) { struct pci_resource *max; struct pci_resource *temp; struct pci_resource *split_node; u32 temp_dword; if (cpqhp_resource_sort_and_combine(head)) return NULL; if (sort_by_max_size(head)) return NULL; for (max = *head; max; max = max->next) { /* If not big enough we could probably just bail, * instead we'll continue to the next. */ if (max->length < size) continue; if (max->base & (size - 1)) { /* this one isn't base aligned properly * so we'll make a new entry and split it up */ temp_dword = (max->base | (size-1)) + 1; /* Short circuit if adjusted size is too small */ if ((max->length - (temp_dword - max->base)) < size) continue; split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = max->base; split_node->length = temp_dword - max->base; max->base = temp_dword; max->length -= split_node->length; split_node->next = max->next; max->next = split_node; } if ((max->base + max->length) & (size - 1)) { /* this one isn't end aligned properly at the top * so we'll make a new entry and split it up */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; temp_dword = ((max->base + max->length) & ~(size - 1)); split_node->base = temp_dword; split_node->length = max->length + max->base - split_node->base; max->length -= split_node->length; split_node->next = max->next; max->next = split_node; } /* Make sure it didn't shrink too much when we aligned it */ if (max->length < size) continue; /* Now take it out of the list */ temp = *head; if (temp == max) { *head = max->next; } else { while (temp && temp->next != max) temp = temp->next; if (temp) temp->next = max->next; } max->next = NULL; break; } return max; } /** * get_resource - find resource of given size and split up larger ones. * @head: the list to search for resources * @size: the size limit to use * * Description: This function sorts the resource list by size and then * returns the first node of "size" length. If it finds a node * larger than "size" it will split it up. * * size must be a power of two. */ static struct pci_resource *get_resource(struct pci_resource **head, u32 size) { struct pci_resource *prevnode; struct pci_resource *node; struct pci_resource *split_node; u32 temp_dword; if (cpqhp_resource_sort_and_combine(head)) return NULL; if (sort_by_size(head)) return NULL; for (node = *head; node; node = node->next) { dbg("%s: req_size =%x node=%p, base=%x, length=%x\n", __func__, size, node, node->base, node->length); if (node->length < size) continue; if (node->base & (size - 1)) { dbg("%s: not aligned\n", __func__); /* this one isn't base aligned properly * so we'll make a new entry and split it up */ temp_dword = (node->base | (size-1)) + 1; /* Short circuit if adjusted size is too small */ if ((node->length - (temp_dword - node->base)) < size) continue; split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base; split_node->length = temp_dword - node->base; node->base = temp_dword; node->length -= split_node->length; split_node->next = node->next; node->next = split_node; } /* End of non-aligned base */ /* Don't need to check if too small since we already did */ if (node->length > size) { dbg("%s: too big\n", __func__); /* this one is longer than we need * so we'll make a new entry and split it up */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base + size; split_node->length = node->length - size; node->length = size; /* Put it in the list */ split_node->next = node->next; node->next = split_node; } /* End of too big on top end */ dbg("%s: got one!!!\n", __func__); /* If we got here, then it is the right size * Now take it out of the list */ if (*head == node) { *head = node->next; } else { prevnode = *head; while (prevnode->next != node) prevnode = prevnode->next; prevnode->next = node->next; } node->next = NULL; break; } return node; } /** * cpqhp_resource_sort_and_combine - sort nodes by base addresses and clean up * @head: the list to sort and clean up * * Description: Sorts all of the nodes in the list in ascending order by * their base addresses. Also does garbage collection by * combining adjacent nodes. * * Returns %0 if success. */ int cpqhp_resource_sort_and_combine(struct pci_resource **head) { struct pci_resource *node1; struct pci_resource *node2; int out_of_order = 1; dbg("%s: head = %p, *head = %p\n", __func__, head, *head); if (!(*head)) return 1; dbg("*head->next = %p\n", (*head)->next); if (!(*head)->next) return 0; /* only one item on the list, already sorted! */ dbg("*head->base = 0x%x\n", (*head)->base); dbg("*head->next->base = 0x%x\n", (*head)->next->base); while (out_of_order) { out_of_order = 0; /* Special case for swapping list head */ if (((*head)->next) && ((*head)->base > (*head)->next->base)) { node1 = *head; (*head) = (*head)->next; node1->next = (*head)->next; (*head)->next = node1; out_of_order++; } node1 = (*head); while (node1->next && node1->next->next) { if (node1->next->base > node1->next->next->base) { out_of_order++; node2 = node1->next; node1->next = node1->next->next; node1 = node1->next; node2->next = node1->next; node1->next = node2; } else node1 = node1->next; } } /* End of out_of_order loop */ node1 = *head; while (node1 && node1->next) { if ((node1->base + node1->length) == node1->next->base) { /* Combine */ dbg("8..\n"); node1->length += node1->next->length; node2 = node1->next; node1->next = node1->next->next; kfree(node2); } else node1 = node1->next; } return 0; } irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data) { struct controller *ctrl = data; u8 schedule_flag = 0; u8 reset; u16 misc; u32 Diff; misc = readw(ctrl->hpc_reg + MISC); /* * Check to see if it was our interrupt */ if (!(misc & 0x000C)) return IRQ_NONE; if (misc & 0x0004) { /* * Serial Output interrupt Pending */ /* Clear the interrupt */ misc |= 0x0004; writew(misc, ctrl->hpc_reg + MISC); /* Read to clear posted writes */ misc = readw(ctrl->hpc_reg + MISC); dbg("%s - waking up\n", __func__); wake_up_interruptible(&ctrl->queue); } if (misc & 0x0008) { /* General-interrupt-input interrupt Pending */ Diff = readl(ctrl->hpc_reg + INT_INPUT_CLEAR) ^ ctrl->ctrl_int_comp; ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); /* Clear the interrupt */ writel(Diff, ctrl->hpc_reg + INT_INPUT_CLEAR); /* Read it back to clear any posted writes */ readl(ctrl->hpc_reg + INT_INPUT_CLEAR); if (!Diff) /* Clear all interrupts */ writel(0xFFFFFFFF, ctrl->hpc_reg + INT_INPUT_CLEAR); schedule_flag += handle_switch_change((u8)(Diff & 0xFFL), ctrl); schedule_flag += handle_presence_change((u16)((Diff & 0xFFFF0000L) >> 16), ctrl); schedule_flag += handle_power_fault((u8)((Diff & 0xFF00L) >> 8), ctrl); } reset = readb(ctrl->hpc_reg + RESET_FREQ_MODE); if (reset & 0x40) { /* Bus reset has completed */ reset &= 0xCF; writeb(reset, ctrl->hpc_reg + RESET_FREQ_MODE); reset = readb(ctrl->hpc_reg + RESET_FREQ_MODE); wake_up_interruptible(&ctrl->queue); } if (schedule_flag) { wake_up_process(cpqhp_event_thread); dbg("Waking even thread"); } return IRQ_HANDLED; } /** * cpqhp_slot_create - Creates a node and adds it to the proper bus. * @busnumber: bus where new node is to be located * * Returns pointer to the new node or %NULL if unsuccessful. */ struct pci_func *cpqhp_slot_create(u8 busnumber) { struct pci_func *new_slot; struct pci_func *next; new_slot = kzalloc(sizeof(*new_slot), GFP_KERNEL); if (new_slot == NULL) return new_slot; new_slot->next = NULL; new_slot->configured = 1; if (cpqhp_slot_list[busnumber] == NULL) { cpqhp_slot_list[busnumber] = new_slot; } else { next = cpqhp_slot_list[busnumber]; while (next->next != NULL) next = next->next; next->next = new_slot; } return new_slot; } /** * slot_remove - Removes a node from the linked list of slots. * @old_slot: slot to remove * * Returns %0 if successful, !0 otherwise. */ static int slot_remove(struct pci_func *old_slot) { struct pci_func *next; if (old_slot == NULL) return 1; next = cpqhp_slot_list[old_slot->bus]; if (next == NULL) return 1; if (next == old_slot) { cpqhp_slot_list[old_slot->bus] = old_slot->next; cpqhp_destroy_board_resources(old_slot); kfree(old_slot); return 0; } while ((next->next != old_slot) && (next->next != NULL)) next = next->next; if (next->next == old_slot) { next->next = old_slot->next; cpqhp_destroy_board_resources(old_slot); kfree(old_slot); return 0; } else return 2; } /** * bridge_slot_remove - Removes a node from the linked list of slots. * @bridge: bridge to remove * * Returns %0 if successful, !0 otherwise. */ static int bridge_slot_remove(struct pci_func *bridge) { u8 subordinateBus, secondaryBus; u8 tempBus; struct pci_func *next; secondaryBus = (bridge->config_space[0x06] >> 8) & 0xFF; subordinateBus = (bridge->config_space[0x06] >> 16) & 0xFF; for (tempBus = secondaryBus; tempBus <= subordinateBus; tempBus++) { next = cpqhp_slot_list[tempBus]; while (!slot_remove(next)) next = cpqhp_slot_list[tempBus]; } next = cpqhp_slot_list[bridge->bus]; if (next == NULL) return 1; if (next == bridge) { cpqhp_slot_list[bridge->bus] = bridge->next; goto out; } while ((next->next != bridge) && (next->next != NULL)) next = next->next; if (next->next != bridge) return 2; next->next = bridge->next; out: kfree(bridge); return 0; } /** * cpqhp_slot_find - Looks for a node by bus, and device, multiple functions accessed * @bus: bus to find * @device: device to find * @index: is %0 for first function found, %1 for the second... * * Returns pointer to the node if successful, %NULL otherwise. */ struct pci_func *cpqhp_slot_find(u8 bus, u8 device, u8 index) { int found = -1; struct pci_func *func; func = cpqhp_slot_list[bus]; if ((func == NULL) || ((func->device == device) && (index == 0))) return func; if (func->device == device) found++; while (func->next != NULL) { func = func->next; if (func->device == device) found++; if (found == index) return func; } return NULL; } /* DJZ: I don't think is_bridge will work as is. * FIXME */ static int is_bridge(struct pci_func *func) { /* Check the header type */ if (((func->config_space[0x03] >> 16) & 0xFF) == 0x01) return 1; else return 0; } /** * set_controller_speed - set the frequency and/or mode of a specific controller segment. * @ctrl: controller to change frequency/mode for. * @adapter_speed: the speed of the adapter we want to match. * @hp_slot: the slot number where the adapter is installed. * * Returns %0 if we successfully change frequency and/or mode to match the * adapter speed. */ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_slot) { struct slot *slot; struct pci_bus *bus = ctrl->pci_bus; u8 reg; u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER); u16 reg16; u32 leds = readl(ctrl->hpc_reg + LED_CONTROL); if (bus->cur_bus_speed == adapter_speed) return 0; /* We don't allow freq/mode changes if we find another adapter running * in another slot on this controller */ for (slot = ctrl->slot; slot; slot = slot->next) { if (slot->device == (hp_slot + ctrl->slot_device_offset)) continue; if (get_presence_status(ctrl, slot) == 0) continue; /* If another adapter is running on the same segment but at a * lower speed/mode, we allow the new adapter to function at * this rate if supported */ if (bus->cur_bus_speed < adapter_speed) return 0; return 1; } /* If the controller doesn't support freq/mode changes and the * controller is running at a higher mode, we bail */ if ((bus->cur_bus_speed > adapter_speed) && (!ctrl->pcix_speed_capability)) return 1; /* But we allow the adapter to run at a lower rate if possible */ if ((bus->cur_bus_speed < adapter_speed) && (!ctrl->pcix_speed_capability)) return 0; /* We try to set the max speed supported by both the adapter and * controller */ if (bus->max_bus_speed < adapter_speed) { if (bus->cur_bus_speed == bus->max_bus_speed) return 0; adapter_speed = bus->max_bus_speed; } writel(0x0L, ctrl->hpc_reg + LED_CONTROL); writeb(0x00, ctrl->hpc_reg + SLOT_ENABLE); set_SOGO(ctrl); wait_for_ctrl_irq(ctrl); if (adapter_speed != PCI_SPEED_133MHz_PCIX) reg = 0xF5; else reg = 0xF4; pci_write_config_byte(ctrl->pci_dev, 0x41, reg); reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ); reg16 &= ~0x000F; switch (adapter_speed) { case(PCI_SPEED_133MHz_PCIX): reg = 0x75; reg16 |= 0xB; break; case(PCI_SPEED_100MHz_PCIX): reg = 0x74; reg16 |= 0xA; break; case(PCI_SPEED_66MHz_PCIX): reg = 0x73; reg16 |= 0x9; break; case(PCI_SPEED_66MHz): reg = 0x73; reg16 |= 0x1; break; default: /* 33MHz PCI 2.2 */ reg = 0x71; break; } reg16 |= 0xB << 12; writew(reg16, ctrl->hpc_reg + NEXT_CURR_FREQ); mdelay(5); /* Re-enable interrupts */ writel(0, ctrl->hpc_reg + INT_MASK); pci_write_config_byte(ctrl->pci_dev, 0x41, reg); /* Restart state machine */ reg = ~0xF; pci_read_config_byte(ctrl->pci_dev, 0x43, &reg); pci_write_config_byte(ctrl->pci_dev, 0x43, reg); /* Only if mode change...*/ if (((bus->cur_bus_speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) || ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) set_SOGO(ctrl); wait_for_ctrl_irq(ctrl); mdelay(1100); /* Restore LED/Slot state */ writel(leds, ctrl->hpc_reg + LED_CONTROL); writeb(slot_power, ctrl->hpc_reg + SLOT_ENABLE); set_SOGO(ctrl); wait_for_ctrl_irq(ctrl); bus->cur_bus_speed = adapter_speed; slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); info("Successfully changed frequency/mode for adapter in slot %d\n", slot->number); return 0; } /* the following routines constitute the bulk of the * hotplug controller logic */ /** * board_replaced - Called after a board has been replaced in the system. * @func: PCI device/function information * @ctrl: hotplug controller * * This is only used if we don't have resources for hot add. * Turns power on for the board. * Checks to see if board is the same. * If board is same, reconfigures it. * If board isn't same, turns it back off. */ static u32 board_replaced(struct pci_func *func, struct controller *ctrl) { struct pci_bus *bus = ctrl->pci_bus; u8 hp_slot; u8 temp_byte; u8 adapter_speed; u32 rc = 0; hp_slot = func->device - ctrl->slot_device_offset; /* * The switch is open. */ if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot)) rc = INTERLOCK_OPEN; /* * The board is already on */ else if (is_slot_enabled(ctrl, hp_slot)) rc = CARD_FUNCTIONING; else { mutex_lock(&ctrl->crit_sect); /* turn on board without attaching to the bus */ enable_slot_power(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); /* Change bits in slot power register to force another shift out * NOTE: this is to work around the timer bug */ temp_byte = readb(ctrl->hpc_reg + SLOT_POWER); writeb(0x00, ctrl->hpc_reg + SLOT_POWER); writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); adapter_speed = get_adapter_speed(ctrl, hp_slot); if (bus->cur_bus_speed != adapter_speed) if (set_controller_speed(ctrl, adapter_speed, hp_slot)) rc = WRONG_BUS_FREQUENCY; /* turn off board without attaching to the bus */ disable_slot_power(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); mutex_unlock(&ctrl->crit_sect); if (rc) return rc; mutex_lock(&ctrl->crit_sect); slot_enable(ctrl, hp_slot); green_LED_blink(ctrl, hp_slot); amber_LED_off(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); mutex_unlock(&ctrl->crit_sect); /* Wait for ~1 second because of hot plug spec */ long_delay(1*HZ); /* Check for a power fault */ if (func->status == 0xFF) { /* power fault occurred, but it was benign */ rc = POWER_FAILURE; func->status = 0; } else rc = cpqhp_valid_replace(ctrl, func); if (!rc) { /* It must be the same board */ rc = cpqhp_configure_board(ctrl, func); /* If configuration fails, turn it off * Get slot won't work for devices behind * bridges, but in this case it will always be * called for the "base" bus/dev/func of an * adapter. */ mutex_lock(&ctrl->crit_sect); amber_LED_on(ctrl, hp_slot); green_LED_off(ctrl, hp_slot); slot_disable(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); mutex_unlock(&ctrl->crit_sect); if (rc) return rc; else return 1; } else { /* Something is wrong * Get slot won't work for devices behind bridges, but * in this case it will always be called for the "base" * bus/dev/func of an adapter. */ mutex_lock(&ctrl->crit_sect); amber_LED_on(ctrl, hp_slot); green_LED_off(ctrl, hp_slot); slot_disable(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); mutex_unlock(&ctrl->crit_sect); } } return rc; } /** * board_added - Called after a board has been added to the system. * @func: PCI device/function info * @ctrl: hotplug controller * * Turns power on for the board. * Configures board. */ static u32 board_added(struct pci_func *func, struct controller *ctrl) { u8 hp_slot; u8 temp_byte; u8 adapter_speed; int index; u32 temp_register = 0xFFFFFFFF; u32 rc = 0; struct pci_func *new_slot = NULL; struct pci_bus *bus = ctrl->pci_bus; struct resource_lists res_lists; hp_slot = func->device - ctrl->slot_device_offset; dbg("%s: func->device, slot_offset, hp_slot = %d, %d ,%d\n", __func__, func->device, ctrl->slot_device_offset, hp_slot); mutex_lock(&ctrl->crit_sect); /* turn on board without attaching to the bus */ enable_slot_power(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); /* Change bits in slot power register to force another shift out * NOTE: this is to work around the timer bug */ temp_byte = readb(ctrl->hpc_reg + SLOT_POWER); writeb(0x00, ctrl->hpc_reg + SLOT_POWER); writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); adapter_speed = get_adapter_speed(ctrl, hp_slot); if (bus->cur_bus_speed != adapter_speed) if (set_controller_speed(ctrl, adapter_speed, hp_slot)) rc = WRONG_BUS_FREQUENCY; /* turn off board without attaching to the bus */ disable_slot_power(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); mutex_unlock(&ctrl->crit_sect); if (rc) return rc; cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); /* turn on board and blink green LED */ dbg("%s: before down\n", __func__); mutex_lock(&ctrl->crit_sect); dbg("%s: after down\n", __func__); dbg("%s: before slot_enable\n", __func__); slot_enable(ctrl, hp_slot); dbg("%s: before green_LED_blink\n", __func__); green_LED_blink(ctrl, hp_slot); dbg("%s: before amber_LED_blink\n", __func__); amber_LED_off(ctrl, hp_slot); dbg("%s: before set_SOGO\n", __func__); set_SOGO(ctrl); /* Wait for SOBS to be unset */ dbg("%s: before wait_for_ctrl_irq\n", __func__); wait_for_ctrl_irq(ctrl); dbg("%s: after wait_for_ctrl_irq\n", __func__); dbg("%s: before up\n", __func__); mutex_unlock(&ctrl->crit_sect); dbg("%s: after up\n", __func__); /* Wait for ~1 second because of hot plug spec */ dbg("%s: before long_delay\n", __func__); long_delay(1*HZ); dbg("%s: after long_delay\n", __func__); dbg("%s: func status = %x\n", __func__, func->status); /* Check for a power fault */ if (func->status == 0xFF) { /* power fault occurred, but it was benign */ temp_register = 0xFFFFFFFF; dbg("%s: temp register set to %x by power fault\n", __func__, temp_register); rc = POWER_FAILURE; func->status = 0; } else { /* Get vendor/device ID u32 */ ctrl->pci_bus->number = func->bus; rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(func->device, func->function), PCI_VENDOR_ID, &temp_register); dbg("%s: pci_read_config_dword returns %d\n", __func__, rc); dbg("%s: temp_register is %x\n", __func__, temp_register); if (rc != 0) { /* Something's wrong here */ temp_register = 0xFFFFFFFF; dbg("%s: temp register set to %x by error\n", __func__, temp_register); } /* Preset return code. It will be changed later if things go okay. */ rc = NO_ADAPTER_PRESENT; } /* All F's is an empty slot or an invalid board */ if (temp_register != 0xFFFFFFFF) { res_lists.io_head = ctrl->io_head; res_lists.mem_head = ctrl->mem_head; res_lists.p_mem_head = ctrl->p_mem_head; res_lists.bus_head = ctrl->bus_head; res_lists.irqs = NULL; rc = configure_new_device(ctrl, func, 0, &res_lists); dbg("%s: back from configure_new_device\n", __func__); ctrl->io_head = res_lists.io_head; ctrl->mem_head = res_lists.mem_head; ctrl->p_mem_head = res_lists.p_mem_head; ctrl->bus_head = res_lists.bus_head; cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->io_head)); cpqhp_resource_sort_and_combine(&(ctrl->bus_head)); if (rc) { mutex_lock(&ctrl->crit_sect); amber_LED_on(ctrl, hp_slot); green_LED_off(ctrl, hp_slot); slot_disable(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); mutex_unlock(&ctrl->crit_sect); return rc; } else { cpqhp_save_slot_config(ctrl, func); } func->status = 0; func->switch_save = 0x10; func->is_a_board = 0x01; /* next, we will instantiate the linux pci_dev structures (with * appropriate driver notification, if already present) */ dbg("%s: configure linux pci_dev structure\n", __func__); index = 0; do { new_slot = cpqhp_slot_find(ctrl->bus, func->device, index++); if (new_slot && !new_slot->pci_dev) cpqhp_configure_device(ctrl, new_slot); } while (new_slot); mutex_lock(&ctrl->crit_sect); green_LED_on(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); mutex_unlock(&ctrl->crit_sect); } else { mutex_lock(&ctrl->crit_sect); amber_LED_on(ctrl, hp_slot); green_LED_off(ctrl, hp_slot); slot_disable(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); mutex_unlock(&ctrl->crit_sect); return rc; } return 0; } /** * remove_board - Turns off slot and LEDs * @func: PCI device/function info * @replace_flag: whether replacing or adding a new device * @ctrl: target controller */ static u32 remove_board(struct pci_func *func, u32 replace_flag, struct controller *ctrl) { int index; u8 skip = 0; u8 device; u8 hp_slot; u8 temp_byte; struct resource_lists res_lists; struct pci_func *temp_func; if (cpqhp_unconfigure_device(func)) return 1; device = func->device; hp_slot = func->device - ctrl->slot_device_offset; dbg("In %s, hp_slot = %d\n", __func__, hp_slot); /* When we get here, it is safe to change base address registers. * We will attempt to save the base address register lengths */ if (replace_flag || !ctrl->add_support) cpqhp_save_base_addr_length(ctrl, func); else if (!func->bus_head && !func->mem_head && !func->p_mem_head && !func->io_head) { /* Here we check to see if we've saved any of the board's * resources already. If so, we'll skip the attempt to * determine what's being used. */ index = 0; temp_func = cpqhp_slot_find(func->bus, func->device, index++); while (temp_func) { if (temp_func->bus_head || temp_func->mem_head || temp_func->p_mem_head || temp_func->io_head) { skip = 1; break; } temp_func = cpqhp_slot_find(temp_func->bus, temp_func->device, index++); } if (!skip) cpqhp_save_used_resources(ctrl, func); } /* Change status to shutdown */ if (func->is_a_board) func->status = 0x01; func->configured = 0; mutex_lock(&ctrl->crit_sect); green_LED_off(ctrl, hp_slot); slot_disable(ctrl, hp_slot); set_SOGO(ctrl); /* turn off SERR for slot */ temp_byte = readb(ctrl->hpc_reg + SLOT_SERR); temp_byte &= ~(0x01 << hp_slot); writeb(temp_byte, ctrl->hpc_reg + SLOT_SERR); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); mutex_unlock(&ctrl->crit_sect); if (!replace_flag && ctrl->add_support) { while (func) { res_lists.io_head = ctrl->io_head; res_lists.mem_head = ctrl->mem_head; res_lists.p_mem_head = ctrl->p_mem_head; res_lists.bus_head = ctrl->bus_head; cpqhp_return_board_resources(func, &res_lists); ctrl->io_head = res_lists.io_head; ctrl->mem_head = res_lists.mem_head; ctrl->p_mem_head = res_lists.p_mem_head; ctrl->bus_head = res_lists.bus_head; cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->io_head)); cpqhp_resource_sort_and_combine(&(ctrl->bus_head)); if (is_bridge(func)) { bridge_slot_remove(func); } else slot_remove(func); func = cpqhp_slot_find(ctrl->bus, device, 0); } /* Setup slot structure with entry for empty slot */ func = cpqhp_slot_create(ctrl->bus); if (func == NULL) return 1; func->bus = ctrl->bus; func->device = device; func->function = 0; func->configured = 0; func->switch_save = 0x10; func->is_a_board = 0; func->p_task_event = NULL; } return 0; } static void pushbutton_helper_thread(struct timer_list *t) { pushbutton_pending = t; wake_up_process(cpqhp_event_thread); } /* this is the main worker thread */ static int event_thread(void *data) { struct controller *ctrl; while (1) { dbg("!!!!event_thread sleeping\n"); set_current_state(TASK_INTERRUPTIBLE); schedule(); if (kthread_should_stop()) break; /* Do stuff here */ if (pushbutton_pending) cpqhp_pushbutton_thread(pushbutton_pending); else for (ctrl = cpqhp_ctrl_list; ctrl; ctrl = ctrl->next) interrupt_event_handler(ctrl); } dbg("event_thread signals exit\n"); return 0; } int cpqhp_event_start_thread(void) { cpqhp_event_thread = kthread_run(event_thread, NULL, "phpd_event"); if (IS_ERR(cpqhp_event_thread)) { err("Can't start up our event thread\n"); return PTR_ERR(cpqhp_event_thread); } return 0; } void cpqhp_event_stop_thread(void) { kthread_stop(cpqhp_event_thread); } static void interrupt_event_handler(struct controller *ctrl) { int loop; int change = 1; struct pci_func *func; u8 hp_slot; struct slot *p_slot; while (change) { change = 0; for (loop = 0; loop < 10; loop++) { /* dbg("loop %d\n", loop); */ if (ctrl->event_queue[loop].event_type != 0) { hp_slot = ctrl->event_queue[loop].hp_slot; func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); if (!func) return; p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); if (!p_slot) return; dbg("hp_slot %d, func %p, p_slot %p\n", hp_slot, func, p_slot); if (ctrl->event_queue[loop].event_type == INT_BUTTON_PRESS) { dbg("button pressed\n"); } else if (ctrl->event_queue[loop].event_type == INT_BUTTON_CANCEL) { dbg("button cancel\n"); del_timer(&p_slot->task_event); mutex_lock(&ctrl->crit_sect); if (p_slot->state == BLINKINGOFF_STATE) { /* slot is on */ dbg("turn on green LED\n"); green_LED_on(ctrl, hp_slot); } else if (p_slot->state == BLINKINGON_STATE) { /* slot is off */ dbg("turn off green LED\n"); green_LED_off(ctrl, hp_slot); } info(msg_button_cancel, p_slot->number); p_slot->state = STATIC_STATE; amber_LED_off(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); mutex_unlock(&ctrl->crit_sect); } /*** button Released (No action on press...) */ else if (ctrl->event_queue[loop].event_type == INT_BUTTON_RELEASE) { dbg("button release\n"); if (is_slot_enabled(ctrl, hp_slot)) { dbg("slot is on\n"); p_slot->state = BLINKINGOFF_STATE; info(msg_button_off, p_slot->number); } else { dbg("slot is off\n"); p_slot->state = BLINKINGON_STATE; info(msg_button_on, p_slot->number); } mutex_lock(&ctrl->crit_sect); dbg("blink green LED and turn off amber\n"); amber_LED_off(ctrl, hp_slot); green_LED_blink(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); mutex_unlock(&ctrl->crit_sect); timer_setup(&p_slot->task_event, pushbutton_helper_thread, 0); p_slot->hp_slot = hp_slot; p_slot->ctrl = ctrl; /* p_slot->physical_slot = physical_slot; */ p_slot->task_event.expires = jiffies + 5 * HZ; /* 5 second delay */ dbg("add_timer p_slot = %p\n", p_slot); add_timer(&p_slot->task_event); } /***********POWER FAULT */ else if (ctrl->event_queue[loop].event_type == INT_POWER_FAULT) { dbg("power fault\n"); } ctrl->event_queue[loop].event_type = 0; change = 1; } } /* End of FOR loop */ } } /** * cpqhp_pushbutton_thread - handle pushbutton events * @t: pointer to struct timer_list which holds all timer-related callbacks * * Scheduled procedure to handle blocking stuff for the pushbuttons. * Handles all pending events and exits. */ void cpqhp_pushbutton_thread(struct timer_list *t) { u8 hp_slot; struct pci_func *func; struct slot *p_slot = from_timer(p_slot, t, task_event); struct controller *ctrl = (struct controller *) p_slot->ctrl; pushbutton_pending = NULL; hp_slot = p_slot->hp_slot; if (is_slot_enabled(ctrl, hp_slot)) { p_slot->state = POWEROFF_STATE; /* power Down board */ func = cpqhp_slot_find(p_slot->bus, p_slot->device, 0); dbg("In power_down_board, func = %p, ctrl = %p\n", func, ctrl); if (!func) { dbg("Error! func NULL in %s\n", __func__); return; } if (cpqhp_process_SS(ctrl, func) != 0) { amber_LED_on(ctrl, hp_slot); green_LED_on(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); } p_slot->state = STATIC_STATE; } else { p_slot->state = POWERON_STATE; /* slot is off */ func = cpqhp_slot_find(p_slot->bus, p_slot->device, 0); dbg("In add_board, func = %p, ctrl = %p\n", func, ctrl); if (!func) { dbg("Error! func NULL in %s\n", __func__); return; } if (ctrl != NULL) { if (cpqhp_process_SI(ctrl, func) != 0) { amber_LED_on(ctrl, hp_slot); green_LED_off(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); } } p_slot->state = STATIC_STATE; } } int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func) { u8 device, hp_slot; u16 temp_word; u32 tempdword; int rc; struct slot *p_slot; tempdword = 0; device = func->device; hp_slot = device - ctrl->slot_device_offset; p_slot = cpqhp_find_slot(ctrl, device); /* Check to see if the interlock is closed */ tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); if (tempdword & (0x01 << hp_slot)) return 1; if (func->is_a_board) { rc = board_replaced(func, ctrl); } else { /* add board */ slot_remove(func); func = cpqhp_slot_create(ctrl->bus); if (func == NULL) return 1; func->bus = ctrl->bus; func->device = device; func->function = 0; func->configured = 0; func->is_a_board = 1; /* We have to save the presence info for these slots */ temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { func->switch_save = 0; } else { func->switch_save = 0x10; } rc = board_added(func, ctrl); if (rc) { if (is_bridge(func)) { bridge_slot_remove(func); } else slot_remove(func); /* Setup slot structure with entry for empty slot */ func = cpqhp_slot_create(ctrl->bus); if (func == NULL) return 1; func->bus = ctrl->bus; func->device = device; func->function = 0; func->configured = 0; func->is_a_board = 0; /* We have to save the presence info for these slots */ temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { func->switch_save = 0; } else { func->switch_save = 0x10; } } } if (rc) dbg("%s: rc = %d\n", __func__, rc); return rc; } int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func) { u8 device, class_code, header_type, BCR; u8 index = 0; u8 replace_flag; u32 rc = 0; unsigned int devfn; struct slot *p_slot; struct pci_bus *pci_bus = ctrl->pci_bus; device = func->device; func = cpqhp_slot_find(ctrl->bus, device, index++); p_slot = cpqhp_find_slot(ctrl, device); /* Make sure there are no video controllers here */ while (func && !rc) { pci_bus->number = func->bus; devfn = PCI_DEVFN(func->device, func->function); /* Check the Class Code */ rc = pci_bus_read_config_byte(pci_bus, devfn, 0x0B, &class_code); if (rc) return rc; if (class_code == PCI_BASE_CLASS_DISPLAY) { /* Display/Video adapter (not supported) */ rc = REMOVE_NOT_SUPPORTED; } else { /* See if it's a bridge */ rc = pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type); if (rc) return rc; /* If it's a bridge, check the VGA Enable bit */ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { rc = pci_bus_read_config_byte(pci_bus, devfn, PCI_BRIDGE_CONTROL, &BCR); if (rc) return rc; /* If the VGA Enable bit is set, remove isn't * supported */ if (BCR & PCI_BRIDGE_CTL_VGA) rc = REMOVE_NOT_SUPPORTED; } } func = cpqhp_slot_find(ctrl->bus, device, index++); } func = cpqhp_slot_find(ctrl->bus, device, 0); if ((func != NULL) && !rc) { /* FIXME: Replace flag should be passed into process_SS */ replace_flag = !(ctrl->add_support); rc = remove_board(func, replace_flag, ctrl); } else if (!rc) { rc = 1; } return rc; } /** * switch_leds - switch the leds, go from one site to the other. * @ctrl: controller to use * @num_of_slots: number of slots to use * @work_LED: LED control value * @direction: 1 to start from the left side, 0 to start right. */ static void switch_leds(struct controller *ctrl, const int num_of_slots, u32 *work_LED, const int direction) { int loop; for (loop = 0; loop < num_of_slots; loop++) { if (direction) *work_LED = *work_LED >> 1; else *work_LED = *work_LED << 1; writel(*work_LED, ctrl->hpc_reg + LED_CONTROL); set_SOGO(ctrl); /* Wait for SOGO interrupt */ wait_for_ctrl_irq(ctrl); /* Get ready for next iteration */ long_delay((2*HZ)/10); } } /** * cpqhp_hardware_test - runs hardware tests * @ctrl: target controller * @test_num: the number written to the "test" file in sysfs. * * For hot plug ctrl folks to play with. */ int cpqhp_hardware_test(struct controller *ctrl, int test_num) { u32 save_LED; u32 work_LED; int loop; int num_of_slots; num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0f; switch (test_num) { case 1: /* Do stuff here! */ /* Do that funky LED thing */ /* so we can restore them later */ save_LED = readl(ctrl->hpc_reg + LED_CONTROL); work_LED = 0x01010101; switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); work_LED = 0x01010000; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); work_LED = 0x00000101; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); work_LED = 0x01010000; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); for (loop = 0; loop < num_of_slots; loop++) { set_SOGO(ctrl); /* Wait for SOGO interrupt */ wait_for_ctrl_irq(ctrl); /* Get ready for next iteration */ long_delay((3*HZ)/10); work_LED = work_LED >> 16; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); set_SOGO(ctrl); /* Wait for SOGO interrupt */ wait_for_ctrl_irq(ctrl); /* Get ready for next iteration */ long_delay((3*HZ)/10); work_LED = work_LED << 16; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); work_LED = work_LED << 1; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); } /* put it back the way it was */ writel(save_LED, ctrl->hpc_reg + LED_CONTROL); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); break; case 2: /* Do other stuff here! */ break; case 3: /* and more... */ break; } return 0; } /** * configure_new_device - Configures the PCI header information of one board. * @ctrl: pointer to controller structure * @func: pointer to function structure * @behind_bridge: 1 if this is a recursive call, 0 if not * @resources: pointer to set of resource lists * * Returns 0 if success. */ static u32 configure_new_device(struct controller *ctrl, struct pci_func *func, u8 behind_bridge, struct resource_lists *resources) { u8 temp_byte, function, max_functions, stop_it; int rc; u32 ID; struct pci_func *new_slot; int index; new_slot = func; dbg("%s\n", __func__); /* Check for Multi-function device */ ctrl->pci_bus->number = func->bus; rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(func->device, func->function), 0x0E, &temp_byte); if (rc) { dbg("%s: rc = %d\n", __func__, rc); return rc; } if (temp_byte & 0x80) /* Multi-function device */ max_functions = 8; else max_functions = 1; function = 0; do { rc = configure_new_function(ctrl, new_slot, behind_bridge, resources); if (rc) { dbg("configure_new_function failed %d\n", rc); index = 0; while (new_slot) { new_slot = cpqhp_slot_find(new_slot->bus, new_slot->device, index++); if (new_slot) cpqhp_return_board_resources(new_slot, resources); } return rc; } function++; stop_it = 0; /* The following loop skips to the next present function * and creates a board structure */ while ((function < max_functions) && (!stop_it)) { pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID); if (PCI_POSSIBLE_ERROR(ID)) { function++; } else { /* Setup slot structure. */ new_slot = cpqhp_slot_create(func->bus); if (new_slot == NULL) return 1; new_slot->bus = func->bus; new_slot->device = func->device; new_slot->function = function; new_slot->is_a_board = 1; new_slot->status = 0; stop_it++; } } } while (function < max_functions); dbg("returning from configure_new_device\n"); return 0; } /* * Configuration logic that involves the hotplug data structures and * their bookkeeping */ /** * configure_new_function - Configures the PCI header information of one device * @ctrl: pointer to controller structure * @func: pointer to function structure * @behind_bridge: 1 if this is a recursive call, 0 if not * @resources: pointer to set of resource lists * * Calls itself recursively for bridged devices. * Returns 0 if success. */ static int configure_new_function(struct controller *ctrl, struct pci_func *func, u8 behind_bridge, struct resource_lists *resources) { int cloop; u8 IRQ = 0; u8 temp_byte; u8 device; u8 class_code; u16 command; u16 temp_word; u32 temp_dword; u32 rc; u32 temp_register; u32 base; u32 ID; unsigned int devfn; struct pci_resource *mem_node; struct pci_resource *p_mem_node; struct pci_resource *io_node; struct pci_resource *bus_node; struct pci_resource *hold_mem_node; struct pci_resource *hold_p_mem_node; struct pci_resource *hold_IO_node; struct pci_resource *hold_bus_node; struct irq_mapping irqs; struct pci_func *new_slot; struct pci_bus *pci_bus; struct resource_lists temp_resources; pci_bus = ctrl->pci_bus; pci_bus->number = func->bus; devfn = PCI_DEVFN(func->device, func->function); /* Check for Bridge */ rc = pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &temp_byte); if (rc) return rc; if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* set Primary bus */ dbg("set Primary bus = %d\n", func->bus); rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_PRIMARY_BUS, func->bus); if (rc) return rc; /* find range of buses to use */ dbg("find ranges of buses to use\n"); bus_node = get_max_resource(&(resources->bus_head), 1); /* If we don't have any buses to allocate, we can't continue */ if (!bus_node) return -ENOMEM; /* set Secondary bus */ temp_byte = bus_node->base; dbg("set Secondary bus = %d\n", bus_node->base); rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, temp_byte); if (rc) return rc; /* set subordinate bus */ temp_byte = bus_node->base + bus_node->length - 1; dbg("set subordinate bus = %d\n", bus_node->base + bus_node->length - 1); rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SUBORDINATE_BUS, temp_byte); if (rc) return rc; /* set subordinate Latency Timer and base Latency Timer */ temp_byte = 0x40; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SEC_LATENCY_TIMER, temp_byte); if (rc) return rc; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_LATENCY_TIMER, temp_byte); if (rc) return rc; /* set Cache Line size */ temp_byte = 0x08; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_CACHE_LINE_SIZE, temp_byte); if (rc) return rc; /* Setup the IO, memory, and prefetchable windows */ io_node = get_max_resource(&(resources->io_head), 0x1000); if (!io_node) return -ENOMEM; mem_node = get_max_resource(&(resources->mem_head), 0x100000); if (!mem_node) return -ENOMEM; p_mem_node = get_max_resource(&(resources->p_mem_head), 0x100000); if (!p_mem_node) return -ENOMEM; dbg("Setup the IO, memory, and prefetchable windows\n"); dbg("io_node\n"); dbg("(base, len, next) (%x, %x, %p)\n", io_node->base, io_node->length, io_node->next); dbg("mem_node\n"); dbg("(base, len, next) (%x, %x, %p)\n", mem_node->base, mem_node->length, mem_node->next); dbg("p_mem_node\n"); dbg("(base, len, next) (%x, %x, %p)\n", p_mem_node->base, p_mem_node->length, p_mem_node->next); /* set up the IRQ info */ if (!resources->irqs) { irqs.barber_pole = 0; irqs.interrupt[0] = 0; irqs.interrupt[1] = 0; irqs.interrupt[2] = 0; irqs.interrupt[3] = 0; irqs.valid_INT = 0; } else { irqs.barber_pole = resources->irqs->barber_pole; irqs.interrupt[0] = resources->irqs->interrupt[0]; irqs.interrupt[1] = resources->irqs->interrupt[1]; irqs.interrupt[2] = resources->irqs->interrupt[2]; irqs.interrupt[3] = resources->irqs->interrupt[3]; irqs.valid_INT = resources->irqs->valid_INT; } /* set up resource lists that are now aligned on top and bottom * for anything behind the bridge. */ temp_resources.bus_head = bus_node; temp_resources.io_head = io_node; temp_resources.mem_head = mem_node; temp_resources.p_mem_head = p_mem_node; temp_resources.irqs = &irqs; /* Make copies of the nodes we are going to pass down so that * if there is a problem,we can just use these to free resources */ hold_bus_node = kmalloc(sizeof(*hold_bus_node), GFP_KERNEL); hold_IO_node = kmalloc(sizeof(*hold_IO_node), GFP_KERNEL); hold_mem_node = kmalloc(sizeof(*hold_mem_node), GFP_KERNEL); hold_p_mem_node = kmalloc(sizeof(*hold_p_mem_node), GFP_KERNEL); if (!hold_bus_node || !hold_IO_node || !hold_mem_node || !hold_p_mem_node) { kfree(hold_bus_node); kfree(hold_IO_node); kfree(hold_mem_node); kfree(hold_p_mem_node); return 1; } memcpy(hold_bus_node, bus_node, sizeof(struct pci_resource)); bus_node->base += 1; bus_node->length -= 1; bus_node->next = NULL; /* If we have IO resources copy them and fill in the bridge's * IO range registers */ memcpy(hold_IO_node, io_node, sizeof(struct pci_resource)); io_node->next = NULL; /* set IO base and Limit registers */ temp_byte = io_node->base >> 8; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_BASE, temp_byte); temp_byte = (io_node->base + io_node->length - 1) >> 8; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_LIMIT, temp_byte); /* Copy the memory resources and fill in the bridge's memory * range registers. */ memcpy(hold_mem_node, mem_node, sizeof(struct pci_resource)); mem_node->next = NULL; /* set Mem base and Limit registers */ temp_word = mem_node->base >> 16; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_BASE, temp_word); temp_word = (mem_node->base + mem_node->length - 1) >> 16; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word); memcpy(hold_p_mem_node, p_mem_node, sizeof(struct pci_resource)); p_mem_node->next = NULL; /* set Pre Mem base and Limit registers */ temp_word = p_mem_node->base >> 16; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_PREF_MEMORY_BASE, temp_word); temp_word = (p_mem_node->base + p_mem_node->length - 1) >> 16; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word); /* Adjust this to compensate for extra adjustment in first loop */ irqs.barber_pole--; rc = 0; /* Here we actually find the devices and configure them */ for (device = 0; (device <= 0x1F) && !rc; device++) { irqs.barber_pole = (irqs.barber_pole + 1) & 0x03; ID = 0xFFFFFFFF; pci_bus->number = hold_bus_node->base; pci_bus_read_config_dword(pci_bus, PCI_DEVFN(device, 0), 0x00, &ID); pci_bus->number = func->bus; if (!PCI_POSSIBLE_ERROR(ID)) { /* device present */ /* Setup slot structure. */ new_slot = cpqhp_slot_create(hold_bus_node->base); if (new_slot == NULL) { rc = -ENOMEM; continue; } new_slot->bus = hold_bus_node->base; new_slot->device = device; new_slot->function = 0; new_slot->is_a_board = 1; new_slot->status = 0; rc = configure_new_device(ctrl, new_slot, 1, &temp_resources); dbg("configure_new_device rc=0x%x\n", rc); } /* End of IF (device in slot?) */ } /* End of FOR loop */ if (rc) goto free_and_out; /* save the interrupt routing information */ if (resources->irqs) { resources->irqs->interrupt[0] = irqs.interrupt[0]; resources->irqs->interrupt[1] = irqs.interrupt[1]; resources->irqs->interrupt[2] = irqs.interrupt[2]; resources->irqs->interrupt[3] = irqs.interrupt[3]; resources->irqs->valid_INT = irqs.valid_INT; } else if (!behind_bridge) { /* We need to hook up the interrupts here */ for (cloop = 0; cloop < 4; cloop++) { if (irqs.valid_INT & (0x01 << cloop)) { rc = cpqhp_set_irq(func->bus, func->device, cloop + 1, irqs.interrupt[cloop]); if (rc) goto free_and_out; } } /* end of for loop */ } /* Return unused bus resources * First use the temporary node to store information for * the board */ if (bus_node && temp_resources.bus_head) { hold_bus_node->length = bus_node->base - hold_bus_node->base; hold_bus_node->next = func->bus_head; func->bus_head = hold_bus_node; temp_byte = temp_resources.bus_head->base - 1; /* set subordinate bus */ rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SUBORDINATE_BUS, temp_byte); if (temp_resources.bus_head->length == 0) { kfree(temp_resources.bus_head); temp_resources.bus_head = NULL; } else { return_resource(&(resources->bus_head), temp_resources.bus_head); } } /* If we have IO space available and there is some left, * return the unused portion */ if (hold_IO_node && temp_resources.io_head) { io_node = do_pre_bridge_resource_split(&(temp_resources.io_head), &hold_IO_node, 0x1000); /* Check if we were able to split something off */ if (io_node) { hold_IO_node->base = io_node->base + io_node->length; temp_byte = (hold_IO_node->base) >> 8; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_IO_BASE, temp_byte); return_resource(&(resources->io_head), io_node); } io_node = do_bridge_resource_split(&(temp_resources.io_head), 0x1000); /* Check if we were able to split something off */ if (io_node) { /* First use the temporary node to store * information for the board */ hold_IO_node->length = io_node->base - hold_IO_node->base; /* If we used any, add it to the board's list */ if (hold_IO_node->length) { hold_IO_node->next = func->io_head; func->io_head = hold_IO_node; temp_byte = (io_node->base - 1) >> 8; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_LIMIT, temp_byte); return_resource(&(resources->io_head), io_node); } else { /* it doesn't need any IO */ temp_word = 0x0000; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_IO_LIMIT, temp_word); return_resource(&(resources->io_head), io_node); kfree(hold_IO_node); } } else { /* it used most of the range */ hold_IO_node->next = func->io_head; func->io_head = hold_IO_node; } } else if (hold_IO_node) { /* it used the whole range */ hold_IO_node->next = func->io_head; func->io_head = hold_IO_node; } /* If we have memory space available and there is some left, * return the unused portion */ if (hold_mem_node && temp_resources.mem_head) { mem_node = do_pre_bridge_resource_split(&(temp_resources. mem_head), &hold_mem_node, 0x100000); /* Check if we were able to split something off */ if (mem_node) { hold_mem_node->base = mem_node->base + mem_node->length; temp_word = (hold_mem_node->base) >> 16; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_BASE, temp_word); return_resource(&(resources->mem_head), mem_node); } mem_node = do_bridge_resource_split(&(temp_resources.mem_head), 0x100000); /* Check if we were able to split something off */ if (mem_node) { /* First use the temporary node to store * information for the board */ hold_mem_node->length = mem_node->base - hold_mem_node->base; if (hold_mem_node->length) { hold_mem_node->next = func->mem_head; func->mem_head = hold_mem_node; /* configure end address */ temp_word = (mem_node->base - 1) >> 16; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word); /* Return unused resources to the pool */ return_resource(&(resources->mem_head), mem_node); } else { /* it doesn't need any Mem */ temp_word = 0x0000; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word); return_resource(&(resources->mem_head), mem_node); kfree(hold_mem_node); } } else { /* it used most of the range */ hold_mem_node->next = func->mem_head; func->mem_head = hold_mem_node; } } else if (hold_mem_node) { /* it used the whole range */ hold_mem_node->next = func->mem_head; func->mem_head = hold_mem_node; } /* If we have prefetchable memory space available and there * is some left at the end, return the unused portion */ if (temp_resources.p_mem_head) { p_mem_node = do_pre_bridge_resource_split(&(temp_resources.p_mem_head), &hold_p_mem_node, 0x100000); /* Check if we were able to split something off */ if (p_mem_node) { hold_p_mem_node->base = p_mem_node->base + p_mem_node->length; temp_word = (hold_p_mem_node->base) >> 16; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_PREF_MEMORY_BASE, temp_word); return_resource(&(resources->p_mem_head), p_mem_node); } p_mem_node = do_bridge_resource_split(&(temp_resources.p_mem_head), 0x100000); /* Check if we were able to split something off */ if (p_mem_node) { /* First use the temporary node to store * information for the board */ hold_p_mem_node->length = p_mem_node->base - hold_p_mem_node->base; /* If we used any, add it to the board's list */ if (hold_p_mem_node->length) { hold_p_mem_node->next = func->p_mem_head; func->p_mem_head = hold_p_mem_node; temp_word = (p_mem_node->base - 1) >> 16; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word); return_resource(&(resources->p_mem_head), p_mem_node); } else { /* it doesn't need any PMem */ temp_word = 0x0000; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word); return_resource(&(resources->p_mem_head), p_mem_node); kfree(hold_p_mem_node); } } else { /* it used the most of the range */ hold_p_mem_node->next = func->p_mem_head; func->p_mem_head = hold_p_mem_node; } } else if (hold_p_mem_node) { /* it used the whole range */ hold_p_mem_node->next = func->p_mem_head; func->p_mem_head = hold_p_mem_node; } /* We should be configuring an IRQ and the bridge's base address * registers if it needs them. Although we have never seen such * a device */ /* enable card */ command = 0x0157; /* = PCI_COMMAND_IO | * PCI_COMMAND_MEMORY | * PCI_COMMAND_MASTER | * PCI_COMMAND_INVALIDATE | * PCI_COMMAND_PARITY | * PCI_COMMAND_SERR */ rc = pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, command); /* set Bridge Control Register */ command = 0x07; /* = PCI_BRIDGE_CTL_PARITY | * PCI_BRIDGE_CTL_SERR | * PCI_BRIDGE_CTL_NO_ISA */ rc = pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, command); } else if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_NORMAL) { /* Standard device */ rc = pci_bus_read_config_byte(pci_bus, devfn, 0x0B, &class_code); if (class_code == PCI_BASE_CLASS_DISPLAY) { /* Display (video) adapter (not supported) */ return DEVICE_TYPE_NOT_SUPPORTED; } /* Figure out IO and memory needs */ for (cloop = 0x10; cloop <= 0x24; cloop += 4) { temp_register = 0xFFFFFFFF; dbg("CND: bus=%d, devfn=%d, offset=%d\n", pci_bus->number, devfn, cloop); rc = pci_bus_write_config_dword(pci_bus, devfn, cloop, temp_register); rc = pci_bus_read_config_dword(pci_bus, devfn, cloop, &temp_register); dbg("CND: base = 0x%x\n", temp_register); if (temp_register) { /* If this register is implemented */ if ((temp_register & 0x03L) == 0x01) { /* Map IO */ /* set base = amount of IO space */ base = temp_register & 0xFFFFFFFC; base = ~base + 1; dbg("CND: length = 0x%x\n", base); io_node = get_io_resource(&(resources->io_head), base); if (!io_node) return -ENOMEM; dbg("Got io_node start = %8.8x, length = %8.8x next (%p)\n", io_node->base, io_node->length, io_node->next); dbg("func (%p) io_head (%p)\n", func, func->io_head); /* allocate the resource to the board */ base = io_node->base; io_node->next = func->io_head; func->io_head = io_node; } else if ((temp_register & 0x0BL) == 0x08) { /* Map prefetchable memory */ base = temp_register & 0xFFFFFFF0; base = ~base + 1; dbg("CND: length = 0x%x\n", base); p_mem_node = get_resource(&(resources->p_mem_head), base); /* allocate the resource to the board */ if (p_mem_node) { base = p_mem_node->base; p_mem_node->next = func->p_mem_head; func->p_mem_head = p_mem_node; } else return -ENOMEM; } else if ((temp_register & 0x0BL) == 0x00) { /* Map memory */ base = temp_register & 0xFFFFFFF0; base = ~base + 1; dbg("CND: length = 0x%x\n", base); mem_node = get_resource(&(resources->mem_head), base); /* allocate the resource to the board */ if (mem_node) { base = mem_node->base; mem_node->next = func->mem_head; func->mem_head = mem_node; } else return -ENOMEM; } else { /* Reserved bits or requesting space below 1M */ return NOT_ENOUGH_RESOURCES; } rc = pci_bus_write_config_dword(pci_bus, devfn, cloop, base); /* Check for 64-bit base */ if ((temp_register & 0x07L) == 0x04) { cloop += 4; /* Upper 32 bits of address always zero * on today's systems */ /* FIXME this is probably not true on * Alpha and ia64??? */ base = 0; rc = pci_bus_write_config_dword(pci_bus, devfn, cloop, base); } } } /* End of base register loop */ if (cpqhp_legacy_mode) { /* Figure out which interrupt pin this function uses */ rc = pci_bus_read_config_byte(pci_bus, devfn, PCI_INTERRUPT_PIN, &temp_byte); /* If this function needs an interrupt and we are behind * a bridge and the pin is tied to something that's * already mapped, set this one the same */ if (temp_byte && resources->irqs && (resources->irqs->valid_INT & (0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) { /* We have to share with something already set up */ IRQ = resources->irqs->interrupt[(temp_byte + resources->irqs->barber_pole - 1) & 0x03]; } else { /* Program IRQ based on card type */ rc = pci_bus_read_config_byte(pci_bus, devfn, 0x0B, &class_code); if (class_code == PCI_BASE_CLASS_STORAGE) IRQ = cpqhp_disk_irq; else IRQ = cpqhp_nic_irq; } /* IRQ Line */ rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_INTERRUPT_LINE, IRQ); } if (!behind_bridge) { rc = cpqhp_set_irq(func->bus, func->device, temp_byte, IRQ); if (rc) return 1; } else { /* TBD - this code may also belong in the other clause * of this If statement */ resources->irqs->interrupt[(temp_byte + resources->irqs->barber_pole - 1) & 0x03] = IRQ; resources->irqs->valid_INT |= 0x01 << (temp_byte + resources->irqs->barber_pole - 1) & 0x03; } /* Latency Timer */ temp_byte = 0x40; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_LATENCY_TIMER, temp_byte); /* Cache Line size */ temp_byte = 0x08; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_CACHE_LINE_SIZE, temp_byte); /* disable ROM base Address */ temp_dword = 0x00L; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_ROM_ADDRESS, temp_dword); /* enable card */ temp_word = 0x0157; /* = PCI_COMMAND_IO | * PCI_COMMAND_MEMORY | * PCI_COMMAND_MASTER | * PCI_COMMAND_INVALIDATE | * PCI_COMMAND_PARITY | * PCI_COMMAND_SERR */ rc = pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, temp_word); } else { /* End of Not-A-Bridge else */ /* It's some strange type of PCI adapter (Cardbus?) */ return DEVICE_TYPE_NOT_SUPPORTED; } func->configured = 1; return 0; free_and_out: cpqhp_destroy_resource_list(&temp_resources); return_resource(&(resources->bus_head), hold_bus_node); return_resource(&(resources->io_head), hold_IO_node); return_resource(&(resources->mem_head), hold_mem_node); return_resource(&(resources->p_mem_head), hold_p_mem_node); return rc; }
linux-master
drivers/pci/hotplug/cpqphp_ctrl.c
// SPDX-License-Identifier: GPL-2.0+ /* * PCI Hot Plug Controller Driver for RPA-compliant PPC64 platform. * Copyright (C) 2003 Linda Xie <[email protected]> * * All rights reserved. * * Send feedback to <[email protected]> * */ #include <linux/of.h> #include <linux/pci.h> #include <linux/string.h> #include <asm/pci-bridge.h> #include <asm/rtas.h> #include <asm/machdep.h> #include "../pci.h" /* for pci_add_new_bus */ #include "rpaphp.h" /* * RTAS call get-sensor-state(DR_ENTITY_SENSE) return values as per PAPR: * -- generic return codes --- * -1: Hardware Error * -2: RTAS_BUSY * -3: Invalid sensor. RTAS Parameter Error. * -- rtas_get_sensor function specific return codes --- * -9000: Need DR entity to be powered up and unisolated before RTAS call * -9001: Need DR entity to be powered up, but not unisolated, before RTAS call * -9002: DR entity unusable * 990x: Extended delay - where x is a number in the range of 0-5 */ #define RTAS_SLOT_UNISOLATED -9000 #define RTAS_SLOT_NOT_UNISOLATED -9001 #define RTAS_SLOT_NOT_USABLE -9002 static int rtas_get_sensor_errno(int rtas_rc) { switch (rtas_rc) { case 0: /* Success case */ return 0; case RTAS_SLOT_UNISOLATED: case RTAS_SLOT_NOT_UNISOLATED: return -EFAULT; case RTAS_SLOT_NOT_USABLE: return -ENODEV; case RTAS_BUSY: case RTAS_EXTENDED_DELAY_MIN...RTAS_EXTENDED_DELAY_MAX: return -EBUSY; default: return rtas_error_rc(rtas_rc); } } /* * get_adapter_status() can be called by the EEH handler during EEH recovery. * On certain PHB failures, the RTAS call rtas_call(get-sensor-state) returns * extended busy error (9902) until PHB is recovered by pHyp. The RTAS call * interface rtas_get_sensor() loops over the RTAS call on extended delay * return code (9902) until the return value is either success (0) or error * (-1). This causes the EEH handler to get stuck for ~6 seconds before it * could notify that the PCI error has been detected and stop any active * operations. This sometimes causes EEH recovery to fail. To avoid this issue, * invoke rtas_call(get-sensor-state) directly if the respective PE is in EEH * recovery state and return -EBUSY error based on RTAS return status. This * will help the EEH handler to notify the driver about the PCI error * immediately and successfully proceed with EEH recovery steps. */ static int __rpaphp_get_sensor_state(struct slot *slot, int *state) { int rc; int token = rtas_token("get-sensor-state"); struct pci_dn *pdn; struct eeh_pe *pe; struct pci_controller *phb = PCI_DN(slot->dn)->phb; if (token == RTAS_UNKNOWN_SERVICE) return -ENOENT; /* * Fallback to existing method for empty slot or PE isn't in EEH * recovery. */ pdn = list_first_entry_or_null(&PCI_DN(phb->dn)->child_list, struct pci_dn, list); if (!pdn) goto fallback; pe = eeh_dev_to_pe(pdn->edev); if (pe && (pe->state & EEH_PE_RECOVERING)) { rc = rtas_call(token, 2, 2, state, DR_ENTITY_SENSE, slot->index); return rtas_get_sensor_errno(rc); } fallback: return rtas_get_sensor(DR_ENTITY_SENSE, slot->index, state); } int rpaphp_get_sensor_state(struct slot *slot, int *state) { int rc; int setlevel; rc = __rpaphp_get_sensor_state(slot, state); if (rc < 0) { if (rc == -EFAULT || rc == -EEXIST) { dbg("%s: slot must be power up to get sensor-state\n", __func__); /* some slots have to be powered up * before get-sensor will succeed. */ rc = rtas_set_power_level(slot->power_domain, POWER_ON, &setlevel); if (rc < 0) { dbg("%s: power on slot[%s] failed rc=%d.\n", __func__, slot->name, rc); } else { rc = __rpaphp_get_sensor_state(slot, state); } } else if (rc == -ENODEV) info("%s: slot is unusable\n", __func__); else err("%s failed to get sensor state\n", __func__); } return rc; } /** * rpaphp_enable_slot - record slot state, config pci device * @slot: target &slot * * Initialize values in the slot structure to indicate if there is a pci card * plugged into the slot. If the slot is not empty, run the pcibios routine * to get pcibios stuff correctly set up. */ int rpaphp_enable_slot(struct slot *slot) { int rc, level, state; struct pci_bus *bus; slot->state = EMPTY; /* Find out if the power is turned on for the slot */ rc = rtas_get_power_level(slot->power_domain, &level); if (rc) return rc; /* Figure out if there is an adapter in the slot */ rc = rpaphp_get_sensor_state(slot, &state); if (rc) return rc; bus = pci_find_bus_by_node(slot->dn); if (!bus) { err("%s: no pci_bus for dn %pOF\n", __func__, slot->dn); return -EINVAL; } slot->bus = bus; slot->pci_devs = &bus->devices; /* if there's an adapter in the slot, go add the pci devices */ if (state == PRESENT) { slot->state = NOT_CONFIGURED; /* non-empty slot has to have child */ if (!slot->dn->child) { err("%s: slot[%s]'s device_node doesn't have child for adapter\n", __func__, slot->name); return -EINVAL; } if (list_empty(&bus->devices)) { pseries_eeh_init_edev_recursive(PCI_DN(slot->dn)); pci_hp_add_devices(bus); } if (!list_empty(&bus->devices)) { slot->state = CONFIGURED; } if (rpaphp_debug) { struct pci_dev *dev; dbg("%s: pci_devs of slot[%pOF]\n", __func__, slot->dn); list_for_each_entry(dev, &bus->devices, bus_list) dbg("\t%s\n", pci_name(dev)); } } return 0; }
linux-master
drivers/pci/hotplug/rpaphp_pci.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2014 Hauke Mehrtens <[email protected]> * Copyright (C) 2015 Broadcom Corporation */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/pci-ecam.h> #include <linux/msi.h> #include <linux/clk.h> #include <linux/module.h> #include <linux/mbus.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irqchip/arm-gic-v3.h> #include <linux/platform_device.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/of_platform.h> #include <linux/phy/phy.h> #include "pcie-iproc.h" #define EP_PERST_SOURCE_SELECT_SHIFT 2 #define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT) #define EP_MODE_SURVIVE_PERST_SHIFT 1 #define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT) #define RC_PCIE_RST_OUTPUT_SHIFT 0 #define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT) #define PAXC_RESET_MASK 0x7f #define GIC_V3_CFG_SHIFT 0 #define GIC_V3_CFG BIT(GIC_V3_CFG_SHIFT) #define MSI_ENABLE_CFG_SHIFT 0 #define MSI_ENABLE_CFG BIT(MSI_ENABLE_CFG_SHIFT) #define CFG_IND_ADDR_MASK 0x00001ffc #define CFG_ADDR_REG_NUM_MASK 0x00000ffc #define CFG_ADDR_CFG_TYPE_1 1 #define SYS_RC_INTX_MASK 0xf #define PCIE_PHYLINKUP_SHIFT 3 #define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT) #define PCIE_DL_ACTIVE_SHIFT 2 #define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT) #define APB_ERR_EN_SHIFT 0 #define APB_ERR_EN BIT(APB_ERR_EN_SHIFT) #define CFG_RD_SUCCESS 0 #define CFG_RD_UR 1 #define CFG_RD_CRS 2 #define CFG_RD_CA 3 #define CFG_RETRY_STATUS 0xffff0001 #define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */ /* derive the enum index of the outbound/inbound mapping registers */ #define MAP_REG(base_reg, index) ((base_reg) + (index) * 2) /* * Maximum number of outbound mapping window sizes that can be supported by any * OARR/OMAP mapping pair */ #define MAX_NUM_OB_WINDOW_SIZES 4 #define OARR_VALID_SHIFT 0 #define OARR_VALID BIT(OARR_VALID_SHIFT) #define OARR_SIZE_CFG_SHIFT 1 /* * Maximum number of inbound mapping region sizes that can be supported by an * IARR */ #define MAX_NUM_IB_REGION_SIZES 9 #define IMAP_VALID_SHIFT 0 #define IMAP_VALID BIT(IMAP_VALID_SHIFT) #define IPROC_PCI_PM_CAP 0x48 #define IPROC_PCI_PM_CAP_MASK 0xffff #define IPROC_PCI_EXP_CAP 0xac #define IPROC_PCIE_REG_INVALID 0xffff /** * struct iproc_pcie_ob_map - iProc PCIe outbound mapping controller-specific * parameters * @window_sizes: list of supported outbound mapping window sizes in MB * @nr_sizes: number of supported outbound mapping window sizes */ struct iproc_pcie_ob_map { resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES]; unsigned int nr_sizes; }; static const struct iproc_pcie_ob_map paxb_ob_map[] = { { /* OARR0/OMAP0 */ .window_sizes = { 128, 256 }, .nr_sizes = 2, }, { /* OARR1/OMAP1 */ .window_sizes = { 128, 256 }, .nr_sizes = 2, }, }; static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = { { /* OARR0/OMAP0 */ .window_sizes = { 128, 256 }, .nr_sizes = 2, }, { /* OARR1/OMAP1 */ .window_sizes = { 128, 256 }, .nr_sizes = 2, }, { /* OARR2/OMAP2 */ .window_sizes = { 128, 256, 512, 1024 }, .nr_sizes = 4, }, { /* OARR3/OMAP3 */ .window_sizes = { 128, 256, 512, 1024 }, .nr_sizes = 4, }, }; /** * enum iproc_pcie_ib_map_type - iProc PCIe inbound mapping type * @IPROC_PCIE_IB_MAP_MEM: DDR memory * @IPROC_PCIE_IB_MAP_IO: device I/O memory * @IPROC_PCIE_IB_MAP_INVALID: invalid or unused */ enum iproc_pcie_ib_map_type { IPROC_PCIE_IB_MAP_MEM = 0, IPROC_PCIE_IB_MAP_IO, IPROC_PCIE_IB_MAP_INVALID }; /** * struct iproc_pcie_ib_map - iProc PCIe inbound mapping controller-specific * parameters * @type: inbound mapping region type * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or * SZ_1G * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or * GB, depending on the size unit * @nr_sizes: number of supported inbound mapping region sizes * @nr_windows: number of supported inbound mapping windows for the region * @imap_addr_offset: register offset between the upper and lower 32-bit * IMAP address registers * @imap_window_offset: register offset between each IMAP window */ struct iproc_pcie_ib_map { enum iproc_pcie_ib_map_type type; unsigned int size_unit; resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES]; unsigned int nr_sizes; unsigned int nr_windows; u16 imap_addr_offset; u16 imap_window_offset; }; static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = { { /* IARR0/IMAP0 */ .type = IPROC_PCIE_IB_MAP_IO, .size_unit = SZ_1K, .region_sizes = { 32 }, .nr_sizes = 1, .nr_windows = 8, .imap_addr_offset = 0x40, .imap_window_offset = 0x4, }, { /* IARR1/IMAP1 */ .type = IPROC_PCIE_IB_MAP_MEM, .size_unit = SZ_1M, .region_sizes = { 8 }, .nr_sizes = 1, .nr_windows = 8, .imap_addr_offset = 0x4, .imap_window_offset = 0x8, }, { /* IARR2/IMAP2 */ .type = IPROC_PCIE_IB_MAP_MEM, .size_unit = SZ_1M, .region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384 }, .nr_sizes = 9, .nr_windows = 1, .imap_addr_offset = 0x4, .imap_window_offset = 0x8, }, { /* IARR3/IMAP3 */ .type = IPROC_PCIE_IB_MAP_MEM, .size_unit = SZ_1G, .region_sizes = { 1, 2, 4, 8, 16, 32 }, .nr_sizes = 6, .nr_windows = 8, .imap_addr_offset = 0x4, .imap_window_offset = 0x8, }, { /* IARR4/IMAP4 */ .type = IPROC_PCIE_IB_MAP_MEM, .size_unit = SZ_1G, .region_sizes = { 32, 64, 128, 256, 512 }, .nr_sizes = 5, .nr_windows = 8, .imap_addr_offset = 0x4, .imap_window_offset = 0x8, }, }; /* * iProc PCIe host registers */ enum iproc_pcie_reg { /* clock/reset signal control */ IPROC_PCIE_CLK_CTRL = 0, /* * To allow MSI to be steered to an external MSI controller (e.g., ARM * GICv3 ITS) */ IPROC_PCIE_MSI_GIC_MODE, /* * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the * window where the MSI posted writes are written, for the writes to be * interpreted as MSI writes. */ IPROC_PCIE_MSI_BASE_ADDR, IPROC_PCIE_MSI_WINDOW_SIZE, /* * To hold the address of the register where the MSI writes are * programmed. When ARM GICv3 ITS is used, this should be programmed * with the address of the GITS_TRANSLATER register. */ IPROC_PCIE_MSI_ADDR_LO, IPROC_PCIE_MSI_ADDR_HI, /* enable MSI */ IPROC_PCIE_MSI_EN_CFG, /* allow access to root complex configuration space */ IPROC_PCIE_CFG_IND_ADDR, IPROC_PCIE_CFG_IND_DATA, /* allow access to device configuration space */ IPROC_PCIE_CFG_ADDR, IPROC_PCIE_CFG_DATA, /* enable INTx */ IPROC_PCIE_INTX_EN, /* outbound address mapping */ IPROC_PCIE_OARR0, IPROC_PCIE_OMAP0, IPROC_PCIE_OARR1, IPROC_PCIE_OMAP1, IPROC_PCIE_OARR2, IPROC_PCIE_OMAP2, IPROC_PCIE_OARR3, IPROC_PCIE_OMAP3, /* inbound address mapping */ IPROC_PCIE_IARR0, IPROC_PCIE_IMAP0, IPROC_PCIE_IARR1, IPROC_PCIE_IMAP1, IPROC_PCIE_IARR2, IPROC_PCIE_IMAP2, IPROC_PCIE_IARR3, IPROC_PCIE_IMAP3, IPROC_PCIE_IARR4, IPROC_PCIE_IMAP4, /* config read status */ IPROC_PCIE_CFG_RD_STATUS, /* link status */ IPROC_PCIE_LINK_STATUS, /* enable APB error for unsupported requests */ IPROC_PCIE_APB_ERR_EN, /* total number of core registers */ IPROC_PCIE_MAX_NUM_REG, }; /* iProc PCIe PAXB BCMA registers */ static const u16 iproc_pcie_reg_paxb_bcma[IPROC_PCIE_MAX_NUM_REG] = { [IPROC_PCIE_CLK_CTRL] = 0x000, [IPROC_PCIE_CFG_IND_ADDR] = 0x120, [IPROC_PCIE_CFG_IND_DATA] = 0x124, [IPROC_PCIE_CFG_ADDR] = 0x1f8, [IPROC_PCIE_CFG_DATA] = 0x1fc, [IPROC_PCIE_INTX_EN] = 0x330, [IPROC_PCIE_LINK_STATUS] = 0xf0c, }; /* iProc PCIe PAXB registers */ static const u16 iproc_pcie_reg_paxb[IPROC_PCIE_MAX_NUM_REG] = { [IPROC_PCIE_CLK_CTRL] = 0x000, [IPROC_PCIE_CFG_IND_ADDR] = 0x120, [IPROC_PCIE_CFG_IND_DATA] = 0x124, [IPROC_PCIE_CFG_ADDR] = 0x1f8, [IPROC_PCIE_CFG_DATA] = 0x1fc, [IPROC_PCIE_INTX_EN] = 0x330, [IPROC_PCIE_OARR0] = 0xd20, [IPROC_PCIE_OMAP0] = 0xd40, [IPROC_PCIE_OARR1] = 0xd28, [IPROC_PCIE_OMAP1] = 0xd48, [IPROC_PCIE_LINK_STATUS] = 0xf0c, [IPROC_PCIE_APB_ERR_EN] = 0xf40, }; /* iProc PCIe PAXB v2 registers */ static const u16 iproc_pcie_reg_paxb_v2[IPROC_PCIE_MAX_NUM_REG] = { [IPROC_PCIE_CLK_CTRL] = 0x000, [IPROC_PCIE_CFG_IND_ADDR] = 0x120, [IPROC_PCIE_CFG_IND_DATA] = 0x124, [IPROC_PCIE_CFG_ADDR] = 0x1f8, [IPROC_PCIE_CFG_DATA] = 0x1fc, [IPROC_PCIE_INTX_EN] = 0x330, [IPROC_PCIE_OARR0] = 0xd20, [IPROC_PCIE_OMAP0] = 0xd40, [IPROC_PCIE_OARR1] = 0xd28, [IPROC_PCIE_OMAP1] = 0xd48, [IPROC_PCIE_OARR2] = 0xd60, [IPROC_PCIE_OMAP2] = 0xd68, [IPROC_PCIE_OARR3] = 0xdf0, [IPROC_PCIE_OMAP3] = 0xdf8, [IPROC_PCIE_IARR0] = 0xd00, [IPROC_PCIE_IMAP0] = 0xc00, [IPROC_PCIE_IARR1] = 0xd08, [IPROC_PCIE_IMAP1] = 0xd70, [IPROC_PCIE_IARR2] = 0xd10, [IPROC_PCIE_IMAP2] = 0xcc0, [IPROC_PCIE_IARR3] = 0xe00, [IPROC_PCIE_IMAP3] = 0xe08, [IPROC_PCIE_IARR4] = 0xe68, [IPROC_PCIE_IMAP4] = 0xe70, [IPROC_PCIE_CFG_RD_STATUS] = 0xee0, [IPROC_PCIE_LINK_STATUS] = 0xf0c, [IPROC_PCIE_APB_ERR_EN] = 0xf40, }; /* iProc PCIe PAXC v1 registers */ static const u16 iproc_pcie_reg_paxc[IPROC_PCIE_MAX_NUM_REG] = { [IPROC_PCIE_CLK_CTRL] = 0x000, [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, [IPROC_PCIE_CFG_ADDR] = 0x1f8, [IPROC_PCIE_CFG_DATA] = 0x1fc, }; /* iProc PCIe PAXC v2 registers */ static const u16 iproc_pcie_reg_paxc_v2[IPROC_PCIE_MAX_NUM_REG] = { [IPROC_PCIE_MSI_GIC_MODE] = 0x050, [IPROC_PCIE_MSI_BASE_ADDR] = 0x074, [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078, [IPROC_PCIE_MSI_ADDR_LO] = 0x07c, [IPROC_PCIE_MSI_ADDR_HI] = 0x080, [IPROC_PCIE_MSI_EN_CFG] = 0x09c, [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, [IPROC_PCIE_CFG_ADDR] = 0x1f8, [IPROC_PCIE_CFG_DATA] = 0x1fc, }; /* * List of device IDs of controllers that have corrupted capability list that * require SW fixup */ static const u16 iproc_pcie_corrupt_cap_did[] = { 0x16cd, 0x16f0, 0xd802, 0xd804 }; static inline struct iproc_pcie *iproc_data(struct pci_bus *bus) { struct iproc_pcie *pcie = bus->sysdata; return pcie; } static inline bool iproc_pcie_reg_is_invalid(u16 reg_offset) { return !!(reg_offset == IPROC_PCIE_REG_INVALID); } static inline u16 iproc_pcie_reg_offset(struct iproc_pcie *pcie, enum iproc_pcie_reg reg) { return pcie->reg_offsets[reg]; } static inline u32 iproc_pcie_read_reg(struct iproc_pcie *pcie, enum iproc_pcie_reg reg) { u16 offset = iproc_pcie_reg_offset(pcie, reg); if (iproc_pcie_reg_is_invalid(offset)) return 0; return readl(pcie->base + offset); } static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie, enum iproc_pcie_reg reg, u32 val) { u16 offset = iproc_pcie_reg_offset(pcie, reg); if (iproc_pcie_reg_is_invalid(offset)) return; writel(val, pcie->base + offset); } /* * APB error forwarding can be disabled during access of configuration * registers of the endpoint device, to prevent unsupported requests * (typically seen during enumeration with multi-function devices) from * triggering a system exception. */ static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus, bool disable) { struct iproc_pcie *pcie = iproc_data(bus); u32 val; if (bus->number && pcie->has_apb_err_disable) { val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN); if (disable) val &= ~APB_ERR_EN; else val |= APB_ERR_EN; iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val); } } static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie, unsigned int busno, unsigned int devfn, int where) { u16 offset; u32 val; /* EP device access */ val = ALIGN_DOWN(PCIE_ECAM_OFFSET(busno, devfn, where), 4) | CFG_ADDR_CFG_TYPE_1; iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val); offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA); if (iproc_pcie_reg_is_invalid(offset)) return NULL; return (pcie->base + offset); } static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie, void __iomem *cfg_data_p) { int timeout = CFG_RETRY_STATUS_TIMEOUT_US; unsigned int data; u32 status; /* * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only * affects config reads of the Vendor ID. For config writes or any * other config reads, the Root may automatically reissue the * configuration request again as a new request. * * For config reads, this hardware returns CFG_RETRY_STATUS data * when it receives a CRS completion, regardless of the address of * the read or the CRS Software Visibility Enable bit. As a * partial workaround for this, we retry in software any read that * returns CFG_RETRY_STATUS. * * Note that a non-Vendor ID config register may have a value of * CFG_RETRY_STATUS. If we read that, we can't distinguish it from * a CRS completion, so we will incorrectly retry the read and * eventually return the wrong data (0xffffffff). */ data = readl(cfg_data_p); while (data == CFG_RETRY_STATUS && timeout--) { /* * CRS state is set in CFG_RD status register * This will handle the case where CFG_RETRY_STATUS is * valid config data. */ status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS); if (status != CFG_RD_CRS) return data; udelay(1); data = readl(cfg_data_p); } if (data == CFG_RETRY_STATUS) data = 0xffffffff; return data; } static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val) { u32 i, dev_id; switch (where & ~0x3) { case PCI_VENDOR_ID: dev_id = *val >> 16; /* * Activate fixup for those controllers that have corrupted * capability list registers */ for (i = 0; i < ARRAY_SIZE(iproc_pcie_corrupt_cap_did); i++) if (dev_id == iproc_pcie_corrupt_cap_did[i]) pcie->fix_paxc_cap = true; break; case IPROC_PCI_PM_CAP: if (pcie->fix_paxc_cap) { /* advertise PM, force next capability to PCIe */ *val &= ~IPROC_PCI_PM_CAP_MASK; *val |= IPROC_PCI_EXP_CAP << 8 | PCI_CAP_ID_PM; } break; case IPROC_PCI_EXP_CAP: if (pcie->fix_paxc_cap) { /* advertise root port, version 2, terminate here */ *val = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2) << 16 | PCI_CAP_ID_EXP; } break; case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL: /* Don't advertise CRS SV support */ *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); break; default: break; } } static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct iproc_pcie *pcie = iproc_data(bus); unsigned int busno = bus->number; void __iomem *cfg_data_p; unsigned int data; int ret; /* root complex access */ if (busno == 0) { ret = pci_generic_config_read32(bus, devfn, where, size, val); if (ret == PCIBIOS_SUCCESSFUL) iproc_pcie_fix_cap(pcie, where, val); return ret; } cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, devfn, where); if (!cfg_data_p) return PCIBIOS_DEVICE_NOT_FOUND; data = iproc_pcie_cfg_retry(pcie, cfg_data_p); *val = data; if (size <= 2) *val = (data >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); /* * For PAXC and PAXCv2, the total number of PFs that one can enumerate * depends on the firmware configuration. Unfortunately, due to an ASIC * bug, unconfigured PFs cannot be properly hidden from the root * complex. As a result, write access to these PFs will cause bus lock * up on the embedded processor * * Since all unconfigured PFs are left with an incorrect, staled device * ID of 0x168e (PCI_DEVICE_ID_NX2_57810), we try to catch those access * early here and reject them all */ #define DEVICE_ID_MASK 0xffff0000 #define DEVICE_ID_SHIFT 16 if (pcie->rej_unconfig_pf && (where & CFG_ADDR_REG_NUM_MASK) == PCI_VENDOR_ID) if ((*val & DEVICE_ID_MASK) == (PCI_DEVICE_ID_NX2_57810 << DEVICE_ID_SHIFT)) return PCIBIOS_FUNC_NOT_SUPPORTED; return PCIBIOS_SUCCESSFUL; } /* * Note access to the configuration registers are protected at the higher layer * by 'pci_lock' in drivers/pci/access.c */ static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie, int busno, unsigned int devfn, int where) { u16 offset; /* root complex access */ if (busno == 0) { if (PCIE_ECAM_DEVFN(devfn) > 0) return NULL; iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR, where & CFG_IND_ADDR_MASK); offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA); if (iproc_pcie_reg_is_invalid(offset)) return NULL; else return (pcie->base + offset); } return iproc_pcie_map_ep_cfg_reg(pcie, busno, devfn, where); } static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus, unsigned int devfn, int where) { return iproc_pcie_map_cfg_bus(iproc_data(bus), bus->number, devfn, where); } static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie, unsigned int devfn, int where, int size, u32 *val) { void __iomem *addr; addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3); if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; *val = readl(addr); if (size <= 2) *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); return PCIBIOS_SUCCESSFUL; } static int iproc_pci_raw_config_write32(struct iproc_pcie *pcie, unsigned int devfn, int where, int size, u32 val) { void __iomem *addr; u32 mask, tmp; addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3); if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; if (size == 4) { writel(val, addr); return PCIBIOS_SUCCESSFUL; } mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); tmp = readl(addr) & mask; tmp |= val << ((where & 0x3) * 8); writel(tmp, addr); return PCIBIOS_SUCCESSFUL; } static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { int ret; struct iproc_pcie *pcie = iproc_data(bus); iproc_pcie_apb_err_disable(bus, true); if (pcie->iproc_cfg_read) ret = iproc_pcie_config_read(bus, devfn, where, size, val); else ret = pci_generic_config_read32(bus, devfn, where, size, val); iproc_pcie_apb_err_disable(bus, false); return ret; } static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { int ret; iproc_pcie_apb_err_disable(bus, true); ret = pci_generic_config_write32(bus, devfn, where, size, val); iproc_pcie_apb_err_disable(bus, false); return ret; } static struct pci_ops iproc_pcie_ops = { .map_bus = iproc_pcie_bus_map_cfg_bus, .read = iproc_pcie_config_read32, .write = iproc_pcie_config_write32, }; static void iproc_pcie_perst_ctrl(struct iproc_pcie *pcie, bool assert) { u32 val; /* * PAXC and the internal emulated endpoint device downstream should not * be reset. If firmware has been loaded on the endpoint device at an * earlier boot stage, reset here causes issues. */ if (pcie->ep_is_internal) return; if (assert) { val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL); val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST & ~RC_PCIE_RST_OUTPUT; iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); udelay(250); } else { val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL); val |= RC_PCIE_RST_OUTPUT; iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); msleep(100); } } int iproc_pcie_shutdown(struct iproc_pcie *pcie) { iproc_pcie_perst_ctrl(pcie, true); msleep(500); return 0; } EXPORT_SYMBOL_GPL(iproc_pcie_shutdown); static int iproc_pcie_check_link(struct iproc_pcie *pcie) { struct device *dev = pcie->dev; u32 hdr_type, link_ctrl, link_status, class, val; bool link_is_active = false; /* * PAXC connects to emulated endpoint devices directly and does not * have a Serdes. Therefore skip the link detection logic here. */ if (pcie->ep_is_internal) return 0; val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS); if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) { dev_err(dev, "PHY or data link is INACTIVE!\n"); return -ENODEV; } /* make sure we are not in EP mode */ iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type); if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) { dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type); return -EFAULT; } /* force class to PCI_CLASS_BRIDGE_PCI_NORMAL (0x060400) */ #define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c #define PCI_BRIDGE_CTRL_REG_CLASS_MASK 0xffffff iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET, 4, &class); class &= ~PCI_BRIDGE_CTRL_REG_CLASS_MASK; class |= PCI_CLASS_BRIDGE_PCI_NORMAL; iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET, 4, class); /* check link status to see if link is active */ iproc_pci_raw_config_read32(pcie, 0, IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA, 2, &link_status); if (link_status & PCI_EXP_LNKSTA_NLW) link_is_active = true; if (!link_is_active) { /* try GEN 1 link speed */ #define PCI_TARGET_LINK_SPEED_MASK 0xf #define PCI_TARGET_LINK_SPEED_GEN2 0x2 #define PCI_TARGET_LINK_SPEED_GEN1 0x1 iproc_pci_raw_config_read32(pcie, 0, IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2, 4, &link_ctrl); if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) == PCI_TARGET_LINK_SPEED_GEN2) { link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK; link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1; iproc_pci_raw_config_write32(pcie, 0, IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2, 4, link_ctrl); msleep(100); iproc_pci_raw_config_read32(pcie, 0, IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA, 2, &link_status); if (link_status & PCI_EXP_LNKSTA_NLW) link_is_active = true; } } dev_info(dev, "link: %s\n", link_is_active ? "UP" : "DOWN"); return link_is_active ? 0 : -ENODEV; } static void iproc_pcie_enable(struct iproc_pcie *pcie) { iproc_pcie_write_reg(pcie, IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK); } static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie, int window_idx) { u32 val; val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx)); return !!(val & OARR_VALID); } static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx, int size_idx, u64 axi_addr, u64 pci_addr) { struct device *dev = pcie->dev; u16 oarr_offset, omap_offset; /* * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based * on window index. */ oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx)); omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0, window_idx)); if (iproc_pcie_reg_is_invalid(oarr_offset) || iproc_pcie_reg_is_invalid(omap_offset)) return -EINVAL; /* * Program the OARR registers. The upper 32-bit OARR register is * always right after the lower 32-bit OARR register. */ writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) | OARR_VALID, pcie->base + oarr_offset); writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4); /* now program the OMAP registers */ writel(lower_32_bits(pci_addr), pcie->base + omap_offset); writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4); dev_dbg(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n", window_idx, oarr_offset, &axi_addr, &pci_addr); dev_dbg(dev, "oarr lo 0x%x oarr hi 0x%x\n", readl(pcie->base + oarr_offset), readl(pcie->base + oarr_offset + 4)); dev_dbg(dev, "omap lo 0x%x omap hi 0x%x\n", readl(pcie->base + omap_offset), readl(pcie->base + omap_offset + 4)); return 0; } /* * Some iProc SoCs require the SW to configure the outbound address mapping * * Outbound address translation: * * iproc_pcie_address = axi_address - axi_offset * OARR = iproc_pcie_address * OMAP = pci_addr * * axi_addr -> iproc_pcie_address -> OARR -> OMAP -> pci_address */ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr, u64 pci_addr, resource_size_t size) { struct iproc_pcie_ob *ob = &pcie->ob; struct device *dev = pcie->dev; int ret = -EINVAL, window_idx, size_idx; if (axi_addr < ob->axi_offset) { dev_err(dev, "axi address %pap less than offset %pap\n", &axi_addr, &ob->axi_offset); return -EINVAL; } /* * Translate the AXI address to the internal address used by the iProc * PCIe core before programming the OARR */ axi_addr -= ob->axi_offset; /* iterate through all OARR/OMAP mapping windows */ for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) { const struct iproc_pcie_ob_map *ob_map = &pcie->ob_map[window_idx]; /* * If current outbound window is already in use, move on to the * next one. */ if (iproc_pcie_ob_is_valid(pcie, window_idx)) continue; /* * Iterate through all supported window sizes within the * OARR/OMAP pair to find a match. Go through the window sizes * in a descending order. */ for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0; size_idx--) { resource_size_t window_size = ob_map->window_sizes[size_idx] * SZ_1M; /* * Keep iterating until we reach the last window and * with the minimal window size at index zero. In this * case, we take a compromise by mapping it using the * minimum window size that can be supported */ if (size < window_size) { if (size_idx > 0 || window_idx > 0) continue; /* * For the corner case of reaching the minimal * window size that can be supported on the * last window */ axi_addr = ALIGN_DOWN(axi_addr, window_size); pci_addr = ALIGN_DOWN(pci_addr, window_size); size = window_size; } if (!IS_ALIGNED(axi_addr, window_size) || !IS_ALIGNED(pci_addr, window_size)) { dev_err(dev, "axi %pap or pci %pap not aligned\n", &axi_addr, &pci_addr); return -EINVAL; } /* * Match found! Program both OARR and OMAP and mark * them as a valid entry. */ ret = iproc_pcie_ob_write(pcie, window_idx, size_idx, axi_addr, pci_addr); if (ret) goto err_ob; size -= window_size; if (size == 0) return 0; /* * If we are here, we are done with the current window, * but not yet finished all mappings. Need to move on * to the next window. */ axi_addr += window_size; pci_addr += window_size; break; } } err_ob: dev_err(dev, "unable to configure outbound mapping\n"); dev_err(dev, "axi %pap, axi offset %pap, pci %pap, res size %pap\n", &axi_addr, &ob->axi_offset, &pci_addr, &size); return ret; } static int iproc_pcie_map_ranges(struct iproc_pcie *pcie, struct list_head *resources) { struct device *dev = pcie->dev; struct resource_entry *window; int ret; resource_list_for_each_entry(window, resources) { struct resource *res = window->res; u64 res_type = resource_type(res); switch (res_type) { case IORESOURCE_IO: case IORESOURCE_BUS: break; case IORESOURCE_MEM: ret = iproc_pcie_setup_ob(pcie, res->start, res->start - window->offset, resource_size(res)); if (ret) return ret; break; default: dev_err(dev, "invalid resource %pR\n", res); return -EINVAL; } } return 0; } static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie, int region_idx) { const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx]; u32 val; val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx)); return !!(val & (BIT(ib_map->nr_sizes) - 1)); } static inline bool iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map, enum iproc_pcie_ib_map_type type) { return !!(ib_map->type == type); } static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx, int size_idx, int nr_windows, u64 axi_addr, u64 pci_addr, resource_size_t size) { struct device *dev = pcie->dev; const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx]; u16 iarr_offset, imap_offset; u32 val; int window_idx; iarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx)); imap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_IMAP0, region_idx)); if (iproc_pcie_reg_is_invalid(iarr_offset) || iproc_pcie_reg_is_invalid(imap_offset)) return -EINVAL; dev_dbg(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n", region_idx, iarr_offset, &axi_addr, &pci_addr); /* * Program the IARR registers. The upper 32-bit IARR register is * always right after the lower 32-bit IARR register. */ writel(lower_32_bits(pci_addr) | BIT(size_idx), pcie->base + iarr_offset); writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4); dev_dbg(dev, "iarr lo 0x%x iarr hi 0x%x\n", readl(pcie->base + iarr_offset), readl(pcie->base + iarr_offset + 4)); /* * Now program the IMAP registers. Each IARR region may have one or * more IMAP windows. */ size >>= ilog2(nr_windows); for (window_idx = 0; window_idx < nr_windows; window_idx++) { val = readl(pcie->base + imap_offset); val |= lower_32_bits(axi_addr) | IMAP_VALID; writel(val, pcie->base + imap_offset); writel(upper_32_bits(axi_addr), pcie->base + imap_offset + ib_map->imap_addr_offset); dev_dbg(dev, "imap window [%d] lo 0x%x hi 0x%x\n", window_idx, readl(pcie->base + imap_offset), readl(pcie->base + imap_offset + ib_map->imap_addr_offset)); imap_offset += ib_map->imap_window_offset; axi_addr += size; } return 0; } static int iproc_pcie_setup_ib(struct iproc_pcie *pcie, struct resource_entry *entry, enum iproc_pcie_ib_map_type type) { struct device *dev = pcie->dev; struct iproc_pcie_ib *ib = &pcie->ib; int ret; unsigned int region_idx, size_idx; u64 axi_addr = entry->res->start; u64 pci_addr = entry->res->start - entry->offset; resource_size_t size = resource_size(entry->res); /* iterate through all IARR mapping regions */ for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) { const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx]; /* * If current inbound region is already in use or not a * compatible type, move on to the next. */ if (iproc_pcie_ib_is_in_use(pcie, region_idx) || !iproc_pcie_ib_check_type(ib_map, type)) continue; /* iterate through all supported region sizes to find a match */ for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) { resource_size_t region_size = ib_map->region_sizes[size_idx] * ib_map->size_unit; if (size != region_size) continue; if (!IS_ALIGNED(axi_addr, region_size) || !IS_ALIGNED(pci_addr, region_size)) { dev_err(dev, "axi %pap or pci %pap not aligned\n", &axi_addr, &pci_addr); return -EINVAL; } /* Match found! Program IARR and all IMAP windows. */ ret = iproc_pcie_ib_write(pcie, region_idx, size_idx, ib_map->nr_windows, axi_addr, pci_addr, size); if (ret) goto err_ib; else return 0; } } ret = -EINVAL; err_ib: dev_err(dev, "unable to configure inbound mapping\n"); dev_err(dev, "axi %pap, pci %pap, res size %pap\n", &axi_addr, &pci_addr, &size); return ret; } static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie) { struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); struct resource_entry *entry; int ret = 0; resource_list_for_each_entry(entry, &host->dma_ranges) { /* Each range entry corresponds to an inbound mapping region */ ret = iproc_pcie_setup_ib(pcie, entry, IPROC_PCIE_IB_MAP_MEM); if (ret) break; } return ret; } static void iproc_pcie_invalidate_mapping(struct iproc_pcie *pcie) { struct iproc_pcie_ib *ib = &pcie->ib; struct iproc_pcie_ob *ob = &pcie->ob; int idx; if (pcie->ep_is_internal) return; if (pcie->need_ob_cfg) { /* iterate through all OARR mapping regions */ for (idx = ob->nr_windows - 1; idx >= 0; idx--) { iproc_pcie_write_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, idx), 0); } } if (pcie->need_ib_cfg) { /* iterate through all IARR mapping regions */ for (idx = 0; idx < ib->nr_regions; idx++) { iproc_pcie_write_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, idx), 0); } } } static int iproce_pcie_get_msi(struct iproc_pcie *pcie, struct device_node *msi_node, u64 *msi_addr) { struct device *dev = pcie->dev; int ret; struct resource res; /* * Check if 'msi-map' points to ARM GICv3 ITS, which is the only * supported external MSI controller that requires steering. */ if (!of_device_is_compatible(msi_node, "arm,gic-v3-its")) { dev_err(dev, "unable to find compatible MSI controller\n"); return -ENODEV; } /* derive GITS_TRANSLATER address from GICv3 */ ret = of_address_to_resource(msi_node, 0, &res); if (ret < 0) { dev_err(dev, "unable to obtain MSI controller resources\n"); return ret; } *msi_addr = res.start + GITS_TRANSLATER; return 0; } static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr) { int ret; struct resource_entry entry; memset(&entry, 0, sizeof(entry)); entry.res = &entry.__res; msi_addr &= ~(SZ_32K - 1); entry.res->start = msi_addr; entry.res->end = msi_addr + SZ_32K - 1; ret = iproc_pcie_setup_ib(pcie, &entry, IPROC_PCIE_IB_MAP_IO); return ret; } static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr, bool enable) { u32 val; if (!enable) { /* * Disable PAXC MSI steering. All write transfers will be * treated as non-MSI transfers */ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG); val &= ~MSI_ENABLE_CFG; iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val); return; } /* * Program bits [43:13] of address of GITS_TRANSLATER register into * bits [30:0] of the MSI base address register. In fact, in all iProc * based SoCs, all I/O register bases are well below the 32-bit * boundary, so we can safely assume bits [43:32] are always zeros. */ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_BASE_ADDR, (u32)(msi_addr >> 13)); /* use a default 8K window size */ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_WINDOW_SIZE, 0); /* steering MSI to GICv3 ITS */ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_GIC_MODE); val |= GIC_V3_CFG; iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_GIC_MODE, val); /* * Program bits [43:2] of address of GITS_TRANSLATER register into the * iProc MSI address registers. */ msi_addr >>= 2; iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_HI, upper_32_bits(msi_addr)); iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_LO, lower_32_bits(msi_addr)); /* enable MSI */ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG); val |= MSI_ENABLE_CFG; iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val); } static int iproc_pcie_msi_steer(struct iproc_pcie *pcie, struct device_node *msi_node) { struct device *dev = pcie->dev; int ret; u64 msi_addr; ret = iproce_pcie_get_msi(pcie, msi_node, &msi_addr); if (ret < 0) { dev_err(dev, "msi steering failed\n"); return ret; } switch (pcie->type) { case IPROC_PCIE_PAXB_V2: ret = iproc_pcie_paxb_v2_msi_steer(pcie, msi_addr); if (ret) return ret; break; case IPROC_PCIE_PAXC_V2: iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr, true); break; default: return -EINVAL; } return 0; } static int iproc_pcie_msi_enable(struct iproc_pcie *pcie) { struct device_node *msi_node; int ret; /* * Either the "msi-parent" or the "msi-map" phandle needs to exist * for us to obtain the MSI node. */ msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0); if (!msi_node) { const __be32 *msi_map = NULL; int len; u32 phandle; msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len); if (!msi_map) return -ENODEV; phandle = be32_to_cpup(msi_map + 1); msi_node = of_find_node_by_phandle(phandle); if (!msi_node) return -ENODEV; } /* * Certain revisions of the iProc PCIe controller require additional * configurations to steer the MSI writes towards an external MSI * controller. */ if (pcie->need_msi_steer) { ret = iproc_pcie_msi_steer(pcie, msi_node); if (ret) goto out_put_node; } /* * If another MSI controller is being used, the call below should fail * but that is okay */ ret = iproc_msi_init(pcie, msi_node); out_put_node: of_node_put(msi_node); return ret; } static void iproc_pcie_msi_disable(struct iproc_pcie *pcie) { iproc_msi_exit(pcie); } static int iproc_pcie_rev_init(struct iproc_pcie *pcie) { struct device *dev = pcie->dev; unsigned int reg_idx; const u16 *regs; switch (pcie->type) { case IPROC_PCIE_PAXB_BCMA: regs = iproc_pcie_reg_paxb_bcma; break; case IPROC_PCIE_PAXB: regs = iproc_pcie_reg_paxb; pcie->has_apb_err_disable = true; if (pcie->need_ob_cfg) { pcie->ob_map = paxb_ob_map; pcie->ob.nr_windows = ARRAY_SIZE(paxb_ob_map); } break; case IPROC_PCIE_PAXB_V2: regs = iproc_pcie_reg_paxb_v2; pcie->iproc_cfg_read = true; pcie->has_apb_err_disable = true; if (pcie->need_ob_cfg) { pcie->ob_map = paxb_v2_ob_map; pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map); } pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map); pcie->ib_map = paxb_v2_ib_map; pcie->need_msi_steer = true; dev_warn(dev, "reads of config registers that contain %#x return incorrect data\n", CFG_RETRY_STATUS); break; case IPROC_PCIE_PAXC: regs = iproc_pcie_reg_paxc; pcie->ep_is_internal = true; pcie->iproc_cfg_read = true; pcie->rej_unconfig_pf = true; break; case IPROC_PCIE_PAXC_V2: regs = iproc_pcie_reg_paxc_v2; pcie->ep_is_internal = true; pcie->iproc_cfg_read = true; pcie->rej_unconfig_pf = true; pcie->need_msi_steer = true; break; default: dev_err(dev, "incompatible iProc PCIe interface\n"); return -EINVAL; } pcie->reg_offsets = devm_kcalloc(dev, IPROC_PCIE_MAX_NUM_REG, sizeof(*pcie->reg_offsets), GFP_KERNEL); if (!pcie->reg_offsets) return -ENOMEM; /* go through the register table and populate all valid registers */ pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ? IPROC_PCIE_REG_INVALID : regs[0]; for (reg_idx = 1; reg_idx < IPROC_PCIE_MAX_NUM_REG; reg_idx++) pcie->reg_offsets[reg_idx] = regs[reg_idx] ? regs[reg_idx] : IPROC_PCIE_REG_INVALID; return 0; } int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) { struct device *dev; int ret; struct pci_dev *pdev; struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); dev = pcie->dev; ret = iproc_pcie_rev_init(pcie); if (ret) { dev_err(dev, "unable to initialize controller parameters\n"); return ret; } ret = phy_init(pcie->phy); if (ret) { dev_err(dev, "unable to initialize PCIe PHY\n"); return ret; } ret = phy_power_on(pcie->phy); if (ret) { dev_err(dev, "unable to power on PCIe PHY\n"); goto err_exit_phy; } iproc_pcie_perst_ctrl(pcie, true); iproc_pcie_perst_ctrl(pcie, false); iproc_pcie_invalidate_mapping(pcie); if (pcie->need_ob_cfg) { ret = iproc_pcie_map_ranges(pcie, res); if (ret) { dev_err(dev, "map failed\n"); goto err_power_off_phy; } } if (pcie->need_ib_cfg) { ret = iproc_pcie_map_dma_ranges(pcie); if (ret && ret != -ENOENT) goto err_power_off_phy; } ret = iproc_pcie_check_link(pcie); if (ret) { dev_err(dev, "no PCIe EP device detected\n"); goto err_power_off_phy; } iproc_pcie_enable(pcie); if (IS_ENABLED(CONFIG_PCI_MSI)) if (iproc_pcie_msi_enable(pcie)) dev_info(dev, "not using iProc MSI\n"); host->ops = &iproc_pcie_ops; host->sysdata = pcie; host->map_irq = pcie->map_irq; ret = pci_host_probe(host); if (ret < 0) { dev_err(dev, "failed to scan host: %d\n", ret); goto err_power_off_phy; } for_each_pci_bridge(pdev, host->bus) { if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) pcie_print_link_status(pdev); } return 0; err_power_off_phy: phy_power_off(pcie->phy); err_exit_phy: phy_exit(pcie->phy); return ret; } EXPORT_SYMBOL(iproc_pcie_setup); void iproc_pcie_remove(struct iproc_pcie *pcie) { struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); pci_stop_root_bus(host->bus); pci_remove_root_bus(host->bus); iproc_pcie_msi_disable(pcie); phy_power_off(pcie->phy); phy_exit(pcie->phy); } EXPORT_SYMBOL(iproc_pcie_remove); /* * The MSI parsing logic in certain revisions of Broadcom PAXC based root * complex does not work and needs to be disabled */ static void quirk_paxc_disable_msi_parsing(struct pci_dev *pdev) { struct iproc_pcie *pcie = iproc_data(pdev->bus); if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) iproc_pcie_paxc_v2_msi_steer(pcie, 0, false); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_disable_msi_parsing); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_disable_msi_parsing); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_disable_msi_parsing); static void quirk_paxc_bridge(struct pci_dev *pdev) { /* * The PCI config space is shared with the PAXC root port and the first * Ethernet device. So, we need to workaround this by telling the PCI * code that the bridge is not an Ethernet device. */ if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) pdev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; /* * MPSS is not being set properly (as it is currently 0). This is * because that area of the PCI config space is hard coded to zero, and * is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS) * so that the MPS can be set to the real max value. */ pdev->pcie_mpss = 2; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge); MODULE_AUTHOR("Ray Jui <[email protected]>"); MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/pcie-iproc.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel IXP4xx PCI host controller * * Copyright (C) 2017 Linus Walleij <[email protected]> * * Based on the IXP4xx arch/arm/mach-ixp4xx/common-pci.c driver * Copyright (C) 2002 Intel Corporation * Copyright (C) 2003 Greg Ungerer <[email protected]> * Copyright (C) 2003-2004 MontaVista Software, Inc. * Copyright (C) 2005 Deepak Saxena <[email protected]> * Copyright (C) 2005 Alessandro Zummo <[email protected]> * * TODO: * - Test IO-space access * - DMA support */ #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_pci.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/bits.h> #include "../pci.h" /* Register offsets */ #define IXP4XX_PCI_NP_AD 0x00 #define IXP4XX_PCI_NP_CBE 0x04 #define IXP4XX_PCI_NP_WDATA 0x08 #define IXP4XX_PCI_NP_RDATA 0x0c #define IXP4XX_PCI_CRP_AD_CBE 0x10 #define IXP4XX_PCI_CRP_WDATA 0x14 #define IXP4XX_PCI_CRP_RDATA 0x18 #define IXP4XX_PCI_CSR 0x1c #define IXP4XX_PCI_ISR 0x20 #define IXP4XX_PCI_INTEN 0x24 #define IXP4XX_PCI_DMACTRL 0x28 #define IXP4XX_PCI_AHBMEMBASE 0x2c #define IXP4XX_PCI_AHBIOBASE 0x30 #define IXP4XX_PCI_PCIMEMBASE 0x34 #define IXP4XX_PCI_AHBDOORBELL 0x38 #define IXP4XX_PCI_PCIDOORBELL 0x3c #define IXP4XX_PCI_ATPDMA0_AHBADDR 0x40 #define IXP4XX_PCI_ATPDMA0_PCIADDR 0x44 #define IXP4XX_PCI_ATPDMA0_LENADDR 0x48 #define IXP4XX_PCI_ATPDMA1_AHBADDR 0x4c #define IXP4XX_PCI_ATPDMA1_PCIADDR 0x50 #define IXP4XX_PCI_ATPDMA1_LENADDR 0x54 /* CSR bit definitions */ #define IXP4XX_PCI_CSR_HOST BIT(0) #define IXP4XX_PCI_CSR_ARBEN BIT(1) #define IXP4XX_PCI_CSR_ADS BIT(2) #define IXP4XX_PCI_CSR_PDS BIT(3) #define IXP4XX_PCI_CSR_ABE BIT(4) #define IXP4XX_PCI_CSR_DBT BIT(5) #define IXP4XX_PCI_CSR_ASE BIT(8) #define IXP4XX_PCI_CSR_IC BIT(15) #define IXP4XX_PCI_CSR_PRST BIT(16) /* ISR (Interrupt status) Register bit definitions */ #define IXP4XX_PCI_ISR_PSE BIT(0) #define IXP4XX_PCI_ISR_PFE BIT(1) #define IXP4XX_PCI_ISR_PPE BIT(2) #define IXP4XX_PCI_ISR_AHBE BIT(3) #define IXP4XX_PCI_ISR_APDC BIT(4) #define IXP4XX_PCI_ISR_PADC BIT(5) #define IXP4XX_PCI_ISR_ADB BIT(6) #define IXP4XX_PCI_ISR_PDB BIT(7) /* INTEN (Interrupt Enable) Register bit definitions */ #define IXP4XX_PCI_INTEN_PSE BIT(0) #define IXP4XX_PCI_INTEN_PFE BIT(1) #define IXP4XX_PCI_INTEN_PPE BIT(2) #define IXP4XX_PCI_INTEN_AHBE BIT(3) #define IXP4XX_PCI_INTEN_APDC BIT(4) #define IXP4XX_PCI_INTEN_PADC BIT(5) #define IXP4XX_PCI_INTEN_ADB BIT(6) #define IXP4XX_PCI_INTEN_PDB BIT(7) /* Shift value for byte enable on NP cmd/byte enable register */ #define IXP4XX_PCI_NP_CBE_BESL 4 /* PCI commands supported by NP access unit */ #define NP_CMD_IOREAD 0x2 #define NP_CMD_IOWRITE 0x3 #define NP_CMD_CONFIGREAD 0xa #define NP_CMD_CONFIGWRITE 0xb #define NP_CMD_MEMREAD 0x6 #define NP_CMD_MEMWRITE 0x7 /* Constants for CRP access into local config space */ #define CRP_AD_CBE_BESL 20 #define CRP_AD_CBE_WRITE 0x00010000 /* Special PCI configuration space registers for this controller */ #define IXP4XX_PCI_RTOTTO 0x40 struct ixp4xx_pci { struct device *dev; void __iomem *base; bool errata_hammer; bool host_mode; }; /* * The IXP4xx has a peculiar address bus that will change the * byte order on SoC peripherals depending on whether the device * operates in big-endian or little-endian mode. That means that * readl() and writel() that always use little-endian access * will not work for SoC peripherals such as the PCI controller * when used in big-endian mode. The accesses to the individual * PCI devices on the other hand, are always little-endian and * can use readl() and writel(). * * For local AHB bus access we need to use __raw_[readl|writel]() * to make sure that we access the SoC devices in the CPU native * endianness. */ static inline u32 ixp4xx_readl(struct ixp4xx_pci *p, u32 reg) { return __raw_readl(p->base + reg); } static inline void ixp4xx_writel(struct ixp4xx_pci *p, u32 reg, u32 val) { __raw_writel(val, p->base + reg); } static int ixp4xx_pci_check_master_abort(struct ixp4xx_pci *p) { u32 isr = ixp4xx_readl(p, IXP4XX_PCI_ISR); if (isr & IXP4XX_PCI_ISR_PFE) { /* Make sure the master abort bit is reset */ ixp4xx_writel(p, IXP4XX_PCI_ISR, IXP4XX_PCI_ISR_PFE); dev_dbg(p->dev, "master abort detected\n"); return -EINVAL; } return 0; } static int ixp4xx_pci_read_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data) { ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr); if (p->errata_hammer) { int i; /* * PCI workaround - only works if NP PCI space reads have * no side effects. Hammer the register and read twice 8 * times. last one will be good. */ for (i = 0; i < 8; i++) { ixp4xx_writel(p, IXP4XX_PCI_NP_CBE, cmd); *data = ixp4xx_readl(p, IXP4XX_PCI_NP_RDATA); *data = ixp4xx_readl(p, IXP4XX_PCI_NP_RDATA); } } else { ixp4xx_writel(p, IXP4XX_PCI_NP_CBE, cmd); *data = ixp4xx_readl(p, IXP4XX_PCI_NP_RDATA); } return ixp4xx_pci_check_master_abort(p); } static int ixp4xx_pci_write_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 data) { ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr); /* Set up the write */ ixp4xx_writel(p, IXP4XX_PCI_NP_CBE, cmd); /* Execute the write by writing to NP_WDATA */ ixp4xx_writel(p, IXP4XX_PCI_NP_WDATA, data); return ixp4xx_pci_check_master_abort(p); } static u32 ixp4xx_config_addr(u8 bus_num, u16 devfn, int where) { /* Root bus is always 0 in this hardware */ if (bus_num == 0) { /* type 0 */ return (PCI_CONF1_ADDRESS(0, 0, PCI_FUNC(devfn), where) & ~PCI_CONF1_ENABLE) | BIT(32-PCI_SLOT(devfn)); } else { /* type 1 */ return (PCI_CONF1_ADDRESS(bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), where) & ~PCI_CONF1_ENABLE) | 1; } } /* * CRP functions are "Controller Configuration Port" accesses * initiated from within this driver itself to read/write PCI * control information in the config space. */ static u32 ixp4xx_crp_byte_lane_enable_bits(u32 n, int size) { if (size == 1) return (0xf & ~BIT(n)) << CRP_AD_CBE_BESL; if (size == 2) return (0xf & ~(BIT(n) | BIT(n+1))) << CRP_AD_CBE_BESL; if (size == 4) return 0; return 0xffffffff; } static int ixp4xx_crp_read_config(struct ixp4xx_pci *p, int where, int size, u32 *value) { u32 n, cmd, val; n = where % 4; cmd = where & ~3; dev_dbg(p->dev, "%s from %d size %d cmd %08x\n", __func__, where, size, cmd); ixp4xx_writel(p, IXP4XX_PCI_CRP_AD_CBE, cmd); val = ixp4xx_readl(p, IXP4XX_PCI_CRP_RDATA); val >>= (8*n); switch (size) { case 1: val &= U8_MAX; dev_dbg(p->dev, "%s read byte %02x\n", __func__, val); break; case 2: val &= U16_MAX; dev_dbg(p->dev, "%s read word %04x\n", __func__, val); break; case 4: val &= U32_MAX; dev_dbg(p->dev, "%s read long %08x\n", __func__, val); break; default: /* Should not happen */ dev_err(p->dev, "%s illegal size\n", __func__); return PCIBIOS_DEVICE_NOT_FOUND; } *value = val; return PCIBIOS_SUCCESSFUL; } static int ixp4xx_crp_write_config(struct ixp4xx_pci *p, int where, int size, u32 value) { u32 n, cmd, val; n = where % 4; cmd = ixp4xx_crp_byte_lane_enable_bits(n, size); if (cmd == 0xffffffff) return PCIBIOS_BAD_REGISTER_NUMBER; cmd |= where & ~3; cmd |= CRP_AD_CBE_WRITE; val = value << (8*n); dev_dbg(p->dev, "%s to %d size %d cmd %08x val %08x\n", __func__, where, size, cmd, val); ixp4xx_writel(p, IXP4XX_PCI_CRP_AD_CBE, cmd); ixp4xx_writel(p, IXP4XX_PCI_CRP_WDATA, val); return PCIBIOS_SUCCESSFUL; } /* * Then follows the functions that read and write from the common PCI * configuration space. */ static u32 ixp4xx_byte_lane_enable_bits(u32 n, int size) { if (size == 1) return (0xf & ~BIT(n)) << 4; if (size == 2) return (0xf & ~(BIT(n) | BIT(n+1))) << 4; if (size == 4) return 0; return 0xffffffff; } static int ixp4xx_pci_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { struct ixp4xx_pci *p = bus->sysdata; u32 n, addr, val, cmd; u8 bus_num = bus->number; int ret; *value = 0xffffffff; n = where % 4; cmd = ixp4xx_byte_lane_enable_bits(n, size); if (cmd == 0xffffffff) return PCIBIOS_BAD_REGISTER_NUMBER; addr = ixp4xx_config_addr(bus_num, devfn, where); cmd |= NP_CMD_CONFIGREAD; dev_dbg(p->dev, "read_config from %d size %d dev %d:%d:%d address: %08x cmd: %08x\n", where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd); ret = ixp4xx_pci_read_indirect(p, addr, cmd, &val); if (ret) return PCIBIOS_DEVICE_NOT_FOUND; val >>= (8*n); switch (size) { case 1: val &= U8_MAX; dev_dbg(p->dev, "%s read byte %02x\n", __func__, val); break; case 2: val &= U16_MAX; dev_dbg(p->dev, "%s read word %04x\n", __func__, val); break; case 4: val &= U32_MAX; dev_dbg(p->dev, "%s read long %08x\n", __func__, val); break; default: /* Should not happen */ dev_err(p->dev, "%s illegal size\n", __func__); return PCIBIOS_DEVICE_NOT_FOUND; } *value = val; return PCIBIOS_SUCCESSFUL; } static int ixp4xx_pci_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { struct ixp4xx_pci *p = bus->sysdata; u32 n, addr, val, cmd; u8 bus_num = bus->number; int ret; n = where % 4; cmd = ixp4xx_byte_lane_enable_bits(n, size); if (cmd == 0xffffffff) return PCIBIOS_BAD_REGISTER_NUMBER; addr = ixp4xx_config_addr(bus_num, devfn, where); cmd |= NP_CMD_CONFIGWRITE; val = value << (8*n); dev_dbg(p->dev, "write_config_byte %#x to %d size %d dev %d:%d:%d addr: %08x cmd %08x\n", value, where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd); ret = ixp4xx_pci_write_indirect(p, addr, cmd, val); if (ret) return PCIBIOS_DEVICE_NOT_FOUND; return PCIBIOS_SUCCESSFUL; } static struct pci_ops ixp4xx_pci_ops = { .read = ixp4xx_pci_read_config, .write = ixp4xx_pci_write_config, }; static u32 ixp4xx_pci_addr_to_64mconf(phys_addr_t addr) { u8 base; base = ((addr & 0xff000000) >> 24); return (base << 24) | ((base + 1) << 16) | ((base + 2) << 8) | (base + 3); } static int ixp4xx_pci_parse_map_ranges(struct ixp4xx_pci *p) { struct device *dev = p->dev; struct pci_host_bridge *bridge = pci_host_bridge_from_priv(p); struct resource_entry *win; struct resource *res; phys_addr_t addr; win = resource_list_first_type(&bridge->windows, IORESOURCE_MEM); if (win) { u32 pcimembase; res = win->res; addr = res->start - win->offset; if (res->flags & IORESOURCE_PREFETCH) res->name = "IXP4xx PCI PRE-MEM"; else res->name = "IXP4xx PCI NON-PRE-MEM"; dev_dbg(dev, "%s window %pR, bus addr %pa\n", res->name, res, &addr); if (resource_size(res) != SZ_64M) { dev_err(dev, "memory range is not 64MB\n"); return -EINVAL; } pcimembase = ixp4xx_pci_addr_to_64mconf(addr); /* Commit configuration */ ixp4xx_writel(p, IXP4XX_PCI_PCIMEMBASE, pcimembase); } else { dev_err(dev, "no AHB memory mapping defined\n"); } win = resource_list_first_type(&bridge->windows, IORESOURCE_IO); if (win) { res = win->res; addr = pci_pio_to_address(res->start); if (addr & 0xff) { dev_err(dev, "IO mem at uneven address: %pa\n", &addr); return -EINVAL; } res->name = "IXP4xx PCI IO MEM"; /* * Setup I/O space location for PCI->AHB access, the * upper 24 bits of the address goes into the lower * 24 bits of this register. */ ixp4xx_writel(p, IXP4XX_PCI_AHBIOBASE, (addr >> 8)); } else { dev_info(dev, "no IO space AHB memory mapping defined\n"); } return 0; } static int ixp4xx_pci_parse_map_dma_ranges(struct ixp4xx_pci *p) { struct device *dev = p->dev; struct pci_host_bridge *bridge = pci_host_bridge_from_priv(p); struct resource_entry *win; struct resource *res; phys_addr_t addr; u32 ahbmembase; win = resource_list_first_type(&bridge->dma_ranges, IORESOURCE_MEM); if (win) { res = win->res; addr = res->start - win->offset; if (resource_size(res) != SZ_64M) { dev_err(dev, "DMA memory range is not 64MB\n"); return -EINVAL; } dev_dbg(dev, "DMA MEM BASE: %pa\n", &addr); /* * 4 PCI-to-AHB windows of 16 MB each, write the 8 high bits * into each byte of the PCI_AHBMEMBASE register. */ ahbmembase = ixp4xx_pci_addr_to_64mconf(addr); /* Commit AHB membase */ ixp4xx_writel(p, IXP4XX_PCI_AHBMEMBASE, ahbmembase); } else { dev_err(dev, "no DMA memory range defined\n"); } return 0; } /* Only used to get context for abort handling */ static struct ixp4xx_pci *ixp4xx_pci_abort_singleton; static int ixp4xx_pci_abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { struct ixp4xx_pci *p = ixp4xx_pci_abort_singleton; u32 isr, status; int ret; isr = ixp4xx_readl(p, IXP4XX_PCI_ISR); ret = ixp4xx_crp_read_config(p, PCI_STATUS, 2, &status); if (ret) { dev_err(p->dev, "unable to read abort status\n"); return -EINVAL; } dev_err(p->dev, "PCI: abort_handler addr = %#lx, isr = %#x, status = %#x\n", addr, isr, status); /* Make sure the Master Abort bit is reset */ ixp4xx_writel(p, IXP4XX_PCI_ISR, IXP4XX_PCI_ISR_PFE); status |= PCI_STATUS_REC_MASTER_ABORT; ret = ixp4xx_crp_write_config(p, PCI_STATUS, 2, status); if (ret) dev_err(p->dev, "unable to clear abort status bit\n"); /* * If it was an imprecise abort, then we need to correct the * return address to be _after_ the instruction. */ if (fsr & (1 << 10)) { dev_err(p->dev, "imprecise abort\n"); regs->ARM_pc += 4; } return 0; } static int __init ixp4xx_pci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct ixp4xx_pci *p; struct pci_host_bridge *host; int ret; u32 val; phys_addr_t addr; u32 basereg[4] = { PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_1, PCI_BASE_ADDRESS_2, PCI_BASE_ADDRESS_3, }; int i; host = devm_pci_alloc_host_bridge(dev, sizeof(*p)); if (!host) return -ENOMEM; host->ops = &ixp4xx_pci_ops; p = pci_host_bridge_priv(host); host->sysdata = p; p->dev = dev; dev_set_drvdata(dev, p); /* * Set up quirk for erratic behaviour in the 42x variant * when accessing config space. */ if (of_device_is_compatible(np, "intel,ixp42x-pci")) { p->errata_hammer = true; dev_info(dev, "activate hammering errata\n"); } p->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(p->base)) return PTR_ERR(p->base); val = ixp4xx_readl(p, IXP4XX_PCI_CSR); p->host_mode = !!(val & IXP4XX_PCI_CSR_HOST); dev_info(dev, "controller is in %s mode\n", p->host_mode ? "host" : "option"); /* Hook in our fault handler for PCI errors */ ixp4xx_pci_abort_singleton = p; hook_fault_code(16+6, ixp4xx_pci_abort_handler, SIGBUS, 0, "imprecise external abort"); ret = ixp4xx_pci_parse_map_ranges(p); if (ret) return ret; ret = ixp4xx_pci_parse_map_dma_ranges(p); if (ret) return ret; /* This is only configured in host mode */ if (p->host_mode) { addr = __pa(PAGE_OFFSET); /* This is a noop (0x00) but explains what is going on */ addr |= PCI_BASE_ADDRESS_SPACE_MEMORY; for (i = 0; i < 4; i++) { /* Write this directly into the config space */ ret = ixp4xx_crp_write_config(p, basereg[i], 4, addr); if (ret) dev_err(dev, "failed to set up PCI_BASE_ADDRESS_%d\n", i); else dev_info(dev, "set PCI_BASE_ADDR_%d to %pa\n", i, &addr); addr += SZ_16M; } /* * Enable CSR window at 64 MiB to allow PCI masters to continue * prefetching past the 64 MiB boundary, if all AHB to PCI * windows are consecutive. */ ret = ixp4xx_crp_write_config(p, PCI_BASE_ADDRESS_4, 4, addr); if (ret) dev_err(dev, "failed to set up PCI_BASE_ADDRESS_4\n"); else dev_info(dev, "set PCI_BASE_ADDR_4 to %pa\n", &addr); /* * Put the IO memory window at the very end of physical memory * at 0xfffffc00. This is when the system is trying to access IO * memory over AHB. */ addr = 0xfffffc00; addr |= PCI_BASE_ADDRESS_SPACE_IO; ret = ixp4xx_crp_write_config(p, PCI_BASE_ADDRESS_5, 4, addr); if (ret) dev_err(dev, "failed to set up PCI_BASE_ADDRESS_5\n"); else dev_info(dev, "set PCI_BASE_ADDR_5 to %pa\n", &addr); /* * Retry timeout to 0x80 * Transfer ready timeout to 0xff */ ret = ixp4xx_crp_write_config(p, IXP4XX_PCI_RTOTTO, 4, 0x000080ff); if (ret) dev_err(dev, "failed to set up TRDY limit\n"); else dev_info(dev, "set TRDY limit to 0x80ff\n"); } /* Clear interrupts */ val = IXP4XX_PCI_ISR_PSE | IXP4XX_PCI_ISR_PFE | IXP4XX_PCI_ISR_PPE | IXP4XX_PCI_ISR_AHBE; ixp4xx_writel(p, IXP4XX_PCI_ISR, val); /* * Set Initialize Complete in PCI Control Register: allow IXP4XX to * generate PCI configuration cycles. Specify that the AHB bus is * operating in big-endian mode. Set up byte lane swapping between * little-endian PCI and the big-endian AHB bus. */ val = IXP4XX_PCI_CSR_IC | IXP4XX_PCI_CSR_ABE; if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) val |= (IXP4XX_PCI_CSR_PDS | IXP4XX_PCI_CSR_ADS); ixp4xx_writel(p, IXP4XX_PCI_CSR, val); ret = ixp4xx_crp_write_config(p, PCI_COMMAND, 2, PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY); if (ret) dev_err(dev, "unable to initialize master and command memory\n"); else dev_info(dev, "initialized as master\n"); pci_host_probe(host); return 0; } static const struct of_device_id ixp4xx_pci_of_match[] = { { .compatible = "intel,ixp42x-pci", }, { .compatible = "intel,ixp43x-pci", }, {}, }; /* * This driver needs to be a builtin module with suppressed bind * attributes since the probe() is initializing a hard exception * handler and this can only be done from __init-tagged code * sections. This module cannot be removed and inserted at all. */ static struct platform_driver ixp4xx_pci_driver = { .driver = { .name = "ixp4xx-pci", .suppress_bind_attrs = true, .of_match_table = ixp4xx_pci_of_match, }, }; builtin_platform_driver_probe(ixp4xx_pci_driver, ixp4xx_pci_probe);
linux-master
drivers/pci/controller/pci-ixp4xx.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe endpoint driver for Renesas R-Car SoCs * Copyright (c) 2020 Renesas Electronics Europe GmbH * * Author: Lad Prabhakar <[email protected]> */ #include <linux/delay.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/pci.h> #include <linux/pci-epc.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include "pcie-rcar.h" #define RCAR_EPC_MAX_FUNCTIONS 1 /* Structure representing the PCIe interface */ struct rcar_pcie_endpoint { struct rcar_pcie pcie; phys_addr_t *ob_mapped_addr; struct pci_epc_mem_window *ob_window; u8 max_functions; unsigned int bar_to_atu[MAX_NR_INBOUND_MAPS]; unsigned long *ib_window_map; u32 num_ib_windows; u32 num_ob_windows; }; static void rcar_pcie_ep_hw_init(struct rcar_pcie *pcie) { u32 val; rcar_pci_write_reg(pcie, 0, PCIETCTLR); /* Set endpoint mode */ rcar_pci_write_reg(pcie, 0, PCIEMSR); /* Initialize default capabilities. */ rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP); rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS), PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ENDPOINT << 4); rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f, PCI_HEADER_TYPE_NORMAL); /* Write out the physical slot number = 0 */ rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0); val = rcar_pci_read_reg(pcie, EXPCAP(1)); /* device supports fixed 128 bytes MPSS */ val &= ~GENMASK(2, 0); rcar_pci_write_reg(pcie, val, EXPCAP(1)); val = rcar_pci_read_reg(pcie, EXPCAP(2)); /* read requests size 128 bytes */ val &= ~GENMASK(14, 12); /* payload size 128 bytes */ val &= ~GENMASK(7, 5); rcar_pci_write_reg(pcie, val, EXPCAP(2)); /* Set target link speed to 5.0 GT/s */ rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS, PCI_EXP_LNKSTA_CLS_5_0GB); /* Set the completion timer timeout to the maximum 50ms. */ rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50); /* Terminate list of capabilities (Next Capability Offset=0) */ rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0); /* flush modifications */ wmb(); } static int rcar_pcie_ep_get_window(struct rcar_pcie_endpoint *ep, phys_addr_t addr) { int i; for (i = 0; i < ep->num_ob_windows; i++) if (ep->ob_window[i].phys_base == addr) return i; return -EINVAL; } static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep, struct platform_device *pdev) { struct rcar_pcie *pcie = &ep->pcie; char outbound_name[10]; struct resource *res; unsigned int i = 0; ep->num_ob_windows = 0; for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) { sprintf(outbound_name, "memory%u", i); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, outbound_name); if (!res) { dev_err(pcie->dev, "missing outbound window %u\n", i); return -EINVAL; } if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), outbound_name)) { dev_err(pcie->dev, "Cannot request memory region %s.\n", outbound_name); return -EIO; } ep->ob_window[i].phys_base = res->start; ep->ob_window[i].size = resource_size(res); /* controller doesn't support multiple allocation * from same window, so set page_size to window size */ ep->ob_window[i].page_size = resource_size(res); } ep->num_ob_windows = i; return 0; } static int rcar_pcie_ep_get_pdata(struct rcar_pcie_endpoint *ep, struct platform_device *pdev) { struct rcar_pcie *pcie = &ep->pcie; struct pci_epc_mem_window *window; struct device *dev = pcie->dev; struct resource res; int err; err = of_address_to_resource(dev->of_node, 0, &res); if (err) return err; pcie->base = devm_ioremap_resource(dev, &res); if (IS_ERR(pcie->base)) return PTR_ERR(pcie->base); ep->ob_window = devm_kcalloc(dev, RCAR_PCI_MAX_RESOURCES, sizeof(*window), GFP_KERNEL); if (!ep->ob_window) return -ENOMEM; rcar_pcie_parse_outbound_ranges(ep, pdev); err = of_property_read_u8(dev->of_node, "max-functions", &ep->max_functions); if (err < 0 || ep->max_functions > RCAR_EPC_MAX_FUNCTIONS) ep->max_functions = RCAR_EPC_MAX_FUNCTIONS; return 0; } static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn, struct pci_epf_header *hdr) { struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); struct rcar_pcie *pcie = &ep->pcie; u32 val; if (!fn) val = hdr->vendorid; else val = rcar_pci_read_reg(pcie, IDSETR0); val |= hdr->deviceid << 16; rcar_pci_write_reg(pcie, val, IDSETR0); val = hdr->revid; val |= hdr->progif_code << 8; val |= hdr->subclass_code << 16; val |= hdr->baseclass_code << 24; rcar_pci_write_reg(pcie, val, IDSETR1); if (!fn) val = hdr->subsys_vendor_id; else val = rcar_pci_read_reg(pcie, SUBIDSETR); val |= hdr->subsys_id << 16; rcar_pci_write_reg(pcie, val, SUBIDSETR); if (hdr->interrupt_pin > PCI_INTERRUPT_INTA) return -EINVAL; val = rcar_pci_read_reg(pcie, PCICONF(15)); val |= (hdr->interrupt_pin << 8); rcar_pci_write_reg(pcie, val, PCICONF(15)); return 0; } static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar) { int flags = epf_bar->flags | LAR_ENABLE | LAM_64BIT; struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); u64 size = 1ULL << fls64(epf_bar->size - 1); dma_addr_t cpu_addr = epf_bar->phys_addr; enum pci_barno bar = epf_bar->barno; struct rcar_pcie *pcie = &ep->pcie; u32 mask; int idx; int err; idx = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows); if (idx >= ep->num_ib_windows) { dev_err(pcie->dev, "no free inbound window\n"); return -EINVAL; } if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) flags |= IO_SPACE; ep->bar_to_atu[bar] = idx; /* use 64-bit BARs */ set_bit(idx, ep->ib_window_map); set_bit(idx + 1, ep->ib_window_map); if (cpu_addr > 0) { unsigned long nr_zeros = __ffs64(cpu_addr); u64 alignment = 1ULL << nr_zeros; size = min(size, alignment); } size = min(size, 1ULL << 32); mask = roundup_pow_of_two(size) - 1; mask &= ~0xf; rcar_pcie_set_inbound(pcie, cpu_addr, 0x0, mask | flags, idx, false); err = rcar_pcie_wait_for_phyrdy(pcie); if (err) { dev_err(pcie->dev, "phy not ready\n"); return -EINVAL; } return 0; } static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn, struct pci_epf_bar *epf_bar) { struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); enum pci_barno bar = epf_bar->barno; u32 atu_index = ep->bar_to_atu[bar]; rcar_pcie_set_inbound(&ep->pcie, 0x0, 0x0, 0x0, bar, false); clear_bit(atu_index, ep->ib_window_map); clear_bit(atu_index + 1, ep->ib_window_map); } static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 interrupts) { struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); struct rcar_pcie *pcie = &ep->pcie; u32 flags; flags = rcar_pci_read_reg(pcie, MSICAP(fn)); flags |= interrupts << MSICAP0_MMESCAP_OFFSET; rcar_pci_write_reg(pcie, flags, MSICAP(fn)); return 0; } static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn) { struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); struct rcar_pcie *pcie = &ep->pcie; u32 flags; flags = rcar_pci_read_reg(pcie, MSICAP(fn)); if (!(flags & MSICAP0_MSIE)) return -EINVAL; return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET); } static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn, phys_addr_t addr, u64 pci_addr, size_t size) { struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); struct rcar_pcie *pcie = &ep->pcie; struct resource_entry win; struct resource res; int window; int err; /* check if we have a link. */ err = rcar_pcie_wait_for_dl(pcie); if (err) { dev_err(pcie->dev, "link not up\n"); return err; } window = rcar_pcie_ep_get_window(ep, addr); if (window < 0) { dev_err(pcie->dev, "failed to get corresponding window\n"); return -EINVAL; } memset(&win, 0x0, sizeof(win)); memset(&res, 0x0, sizeof(res)); res.start = pci_addr; res.end = pci_addr + size - 1; res.flags = IORESOURCE_MEM; win.res = &res; rcar_pcie_set_outbound(pcie, window, &win); ep->ob_mapped_addr[window] = addr; return 0; } static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn, phys_addr_t addr) { struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); struct resource_entry win; struct resource res; int idx; for (idx = 0; idx < ep->num_ob_windows; idx++) if (ep->ob_mapped_addr[idx] == addr) break; if (idx >= ep->num_ob_windows) return; memset(&win, 0x0, sizeof(win)); memset(&res, 0x0, sizeof(res)); win.res = &res; rcar_pcie_set_outbound(&ep->pcie, idx, &win); ep->ob_mapped_addr[idx] = 0; } static int rcar_pcie_ep_assert_intx(struct rcar_pcie_endpoint *ep, u8 fn, u8 intx) { struct rcar_pcie *pcie = &ep->pcie; u32 val; val = rcar_pci_read_reg(pcie, PCIEMSITXR); if ((val & PCI_MSI_FLAGS_ENABLE)) { dev_err(pcie->dev, "MSI is enabled, cannot assert INTx\n"); return -EINVAL; } val = rcar_pci_read_reg(pcie, PCICONF(1)); if ((val & INTDIS)) { dev_err(pcie->dev, "INTx message transmission is disabled\n"); return -EINVAL; } val = rcar_pci_read_reg(pcie, PCIEINTXR); if ((val & ASTINTX)) { dev_err(pcie->dev, "INTx is already asserted\n"); return -EINVAL; } val |= ASTINTX; rcar_pci_write_reg(pcie, val, PCIEINTXR); usleep_range(1000, 1001); val = rcar_pci_read_reg(pcie, PCIEINTXR); val &= ~ASTINTX; rcar_pci_write_reg(pcie, val, PCIEINTXR); return 0; } static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie, u8 fn, u8 interrupt_num) { u16 msi_count; u32 val; /* Check MSI enable bit */ val = rcar_pci_read_reg(pcie, MSICAP(fn)); if (!(val & MSICAP0_MSIE)) return -EINVAL; /* Get MSI numbers from MME */ msi_count = ((val & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET); msi_count = 1 << msi_count; if (!interrupt_num || interrupt_num > msi_count) return -EINVAL; val = rcar_pci_read_reg(pcie, PCIEMSITXR); rcar_pci_write_reg(pcie, val | (interrupt_num - 1), PCIEMSITXR); return 0; } static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, enum pci_epc_irq_type type, u16 interrupt_num) { struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); switch (type) { case PCI_EPC_IRQ_LEGACY: return rcar_pcie_ep_assert_intx(ep, fn, 0); case PCI_EPC_IRQ_MSI: return rcar_pcie_ep_assert_msi(&ep->pcie, fn, interrupt_num); default: return -EINVAL; } } static int rcar_pcie_ep_start(struct pci_epc *epc) { struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); rcar_pci_write_reg(&ep->pcie, MACCTLR_INIT_VAL, MACCTLR); rcar_pci_write_reg(&ep->pcie, CFINIT, PCIETCTLR); return 0; } static void rcar_pcie_ep_stop(struct pci_epc *epc) { struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); rcar_pci_write_reg(&ep->pcie, 0, PCIETCTLR); } static const struct pci_epc_features rcar_pcie_epc_features = { .linkup_notifier = false, .msi_capable = true, .msix_capable = false, /* use 64-bit BARs so mark BAR[1,3,5] as reserved */ .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5, .bar_fixed_64bit = 1 << BAR_0 | 1 << BAR_2 | 1 << BAR_4, .bar_fixed_size[0] = 128, .bar_fixed_size[2] = 256, .bar_fixed_size[4] = 256, }; static const struct pci_epc_features* rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) { return &rcar_pcie_epc_features; } static const struct pci_epc_ops rcar_pcie_epc_ops = { .write_header = rcar_pcie_ep_write_header, .set_bar = rcar_pcie_ep_set_bar, .clear_bar = rcar_pcie_ep_clear_bar, .set_msi = rcar_pcie_ep_set_msi, .get_msi = rcar_pcie_ep_get_msi, .map_addr = rcar_pcie_ep_map_addr, .unmap_addr = rcar_pcie_ep_unmap_addr, .raise_irq = rcar_pcie_ep_raise_irq, .start = rcar_pcie_ep_start, .stop = rcar_pcie_ep_stop, .get_features = rcar_pcie_ep_get_features, }; static const struct of_device_id rcar_pcie_ep_of_match[] = { { .compatible = "renesas,r8a774c0-pcie-ep", }, { .compatible = "renesas,rcar-gen3-pcie-ep" }, { }, }; static int rcar_pcie_ep_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rcar_pcie_endpoint *ep; struct rcar_pcie *pcie; struct pci_epc *epc; int err; ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); if (!ep) return -ENOMEM; pcie = &ep->pcie; pcie->dev = dev; pm_runtime_enable(dev); err = pm_runtime_resume_and_get(dev); if (err < 0) { dev_err(dev, "pm_runtime_resume_and_get failed\n"); goto err_pm_disable; } err = rcar_pcie_ep_get_pdata(ep, pdev); if (err < 0) { dev_err(dev, "failed to request resources: %d\n", err); goto err_pm_put; } ep->num_ib_windows = MAX_NR_INBOUND_MAPS; ep->ib_window_map = devm_kcalloc(dev, BITS_TO_LONGS(ep->num_ib_windows), sizeof(long), GFP_KERNEL); if (!ep->ib_window_map) { err = -ENOMEM; dev_err(dev, "failed to allocate memory for inbound map\n"); goto err_pm_put; } ep->ob_mapped_addr = devm_kcalloc(dev, ep->num_ob_windows, sizeof(*ep->ob_mapped_addr), GFP_KERNEL); if (!ep->ob_mapped_addr) { err = -ENOMEM; dev_err(dev, "failed to allocate memory for outbound memory pointers\n"); goto err_pm_put; } epc = devm_pci_epc_create(dev, &rcar_pcie_epc_ops); if (IS_ERR(epc)) { dev_err(dev, "failed to create epc device\n"); err = PTR_ERR(epc); goto err_pm_put; } epc->max_functions = ep->max_functions; epc_set_drvdata(epc, ep); rcar_pcie_ep_hw_init(pcie); err = pci_epc_multi_mem_init(epc, ep->ob_window, ep->num_ob_windows); if (err < 0) { dev_err(dev, "failed to initialize the epc memory space\n"); goto err_pm_put; } return 0; err_pm_put: pm_runtime_put(dev); err_pm_disable: pm_runtime_disable(dev); return err; } static struct platform_driver rcar_pcie_ep_driver = { .driver = { .name = "rcar-pcie-ep", .of_match_table = rcar_pcie_ep_of_match, .suppress_bind_attrs = true, }, .probe = rcar_pcie_ep_probe, }; builtin_platform_driver(rcar_pcie_ep_driver);
linux-master
drivers/pci/controller/pcie-rcar-ep.c
// SPDX-License-Identifier: GPL-2.0 /* * Microchip AXI PCIe Bridge host controller driver * * Copyright (c) 2018 - 2020 Microchip Corporation. All rights reserved. * * Author: Daire McNamara <[email protected]> */ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/pci-ecam.h> #include <linux/platform_device.h> #include "../pci.h" /* Number of MSI IRQs */ #define MC_MAX_NUM_MSI_IRQS 32 /* PCIe Bridge Phy and Controller Phy offsets */ #define MC_PCIE1_BRIDGE_ADDR 0x00008000u #define MC_PCIE1_CTRL_ADDR 0x0000a000u #define MC_PCIE_BRIDGE_ADDR (MC_PCIE1_BRIDGE_ADDR) #define MC_PCIE_CTRL_ADDR (MC_PCIE1_CTRL_ADDR) /* PCIe Bridge Phy Regs */ #define PCIE_PCI_IRQ_DW0 0xa8 #define MSIX_CAP_MASK BIT(31) #define NUM_MSI_MSGS_MASK GENMASK(6, 4) #define NUM_MSI_MSGS_SHIFT 4 #define IMASK_LOCAL 0x180 #define DMA_END_ENGINE_0_MASK 0x00000000u #define DMA_END_ENGINE_0_SHIFT 0 #define DMA_END_ENGINE_1_MASK 0x00000000u #define DMA_END_ENGINE_1_SHIFT 1 #define DMA_ERROR_ENGINE_0_MASK 0x00000100u #define DMA_ERROR_ENGINE_0_SHIFT 8 #define DMA_ERROR_ENGINE_1_MASK 0x00000200u #define DMA_ERROR_ENGINE_1_SHIFT 9 #define A_ATR_EVT_POST_ERR_MASK 0x00010000u #define A_ATR_EVT_POST_ERR_SHIFT 16 #define A_ATR_EVT_FETCH_ERR_MASK 0x00020000u #define A_ATR_EVT_FETCH_ERR_SHIFT 17 #define A_ATR_EVT_DISCARD_ERR_MASK 0x00040000u #define A_ATR_EVT_DISCARD_ERR_SHIFT 18 #define A_ATR_EVT_DOORBELL_MASK 0x00000000u #define A_ATR_EVT_DOORBELL_SHIFT 19 #define P_ATR_EVT_POST_ERR_MASK 0x00100000u #define P_ATR_EVT_POST_ERR_SHIFT 20 #define P_ATR_EVT_FETCH_ERR_MASK 0x00200000u #define P_ATR_EVT_FETCH_ERR_SHIFT 21 #define P_ATR_EVT_DISCARD_ERR_MASK 0x00400000u #define P_ATR_EVT_DISCARD_ERR_SHIFT 22 #define P_ATR_EVT_DOORBELL_MASK 0x00000000u #define P_ATR_EVT_DOORBELL_SHIFT 23 #define PM_MSI_INT_INTA_MASK 0x01000000u #define PM_MSI_INT_INTA_SHIFT 24 #define PM_MSI_INT_INTB_MASK 0x02000000u #define PM_MSI_INT_INTB_SHIFT 25 #define PM_MSI_INT_INTC_MASK 0x04000000u #define PM_MSI_INT_INTC_SHIFT 26 #define PM_MSI_INT_INTD_MASK 0x08000000u #define PM_MSI_INT_INTD_SHIFT 27 #define PM_MSI_INT_INTX_MASK 0x0f000000u #define PM_MSI_INT_INTX_SHIFT 24 #define PM_MSI_INT_MSI_MASK 0x10000000u #define PM_MSI_INT_MSI_SHIFT 28 #define PM_MSI_INT_AER_EVT_MASK 0x20000000u #define PM_MSI_INT_AER_EVT_SHIFT 29 #define PM_MSI_INT_EVENTS_MASK 0x40000000u #define PM_MSI_INT_EVENTS_SHIFT 30 #define PM_MSI_INT_SYS_ERR_MASK 0x80000000u #define PM_MSI_INT_SYS_ERR_SHIFT 31 #define NUM_LOCAL_EVENTS 15 #define ISTATUS_LOCAL 0x184 #define IMASK_HOST 0x188 #define ISTATUS_HOST 0x18c #define IMSI_ADDR 0x190 #define ISTATUS_MSI 0x194 /* PCIe Master table init defines */ #define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u #define ATR0_PCIE_ATR_SIZE 0x25 #define ATR0_PCIE_ATR_SIZE_SHIFT 1 #define ATR0_PCIE_WIN0_SRC_ADDR 0x604u #define ATR0_PCIE_WIN0_TRSL_ADDR_LSB 0x608u #define ATR0_PCIE_WIN0_TRSL_ADDR_UDW 0x60cu #define ATR0_PCIE_WIN0_TRSL_PARAM 0x610u /* PCIe AXI slave table init defines */ #define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u #define ATR_SIZE_SHIFT 1 #define ATR_IMPL_ENABLE 1 #define ATR0_AXI4_SLV0_SRC_ADDR 0x804u #define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u #define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu #define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u #define PCIE_TX_RX_INTERFACE 0x00000000u #define PCIE_CONFIG_INTERFACE 0x00000001u #define ATR_ENTRY_SIZE 32 /* PCIe Controller Phy Regs */ #define SEC_ERROR_EVENT_CNT 0x20 #define DED_ERROR_EVENT_CNT 0x24 #define SEC_ERROR_INT 0x28 #define SEC_ERROR_INT_TX_RAM_SEC_ERR_INT GENMASK(3, 0) #define SEC_ERROR_INT_RX_RAM_SEC_ERR_INT GENMASK(7, 4) #define SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT GENMASK(11, 8) #define SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT GENMASK(15, 12) #define SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT GENMASK(15, 0) #define NUM_SEC_ERROR_INTS (4) #define SEC_ERROR_INT_MASK 0x2c #define DED_ERROR_INT 0x30 #define DED_ERROR_INT_TX_RAM_DED_ERR_INT GENMASK(3, 0) #define DED_ERROR_INT_RX_RAM_DED_ERR_INT GENMASK(7, 4) #define DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT GENMASK(11, 8) #define DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT GENMASK(15, 12) #define DED_ERROR_INT_ALL_RAM_DED_ERR_INT GENMASK(15, 0) #define NUM_DED_ERROR_INTS (4) #define DED_ERROR_INT_MASK 0x34 #define ECC_CONTROL 0x38 #define ECC_CONTROL_TX_RAM_INJ_ERROR_0 BIT(0) #define ECC_CONTROL_TX_RAM_INJ_ERROR_1 BIT(1) #define ECC_CONTROL_TX_RAM_INJ_ERROR_2 BIT(2) #define ECC_CONTROL_TX_RAM_INJ_ERROR_3 BIT(3) #define ECC_CONTROL_RX_RAM_INJ_ERROR_0 BIT(4) #define ECC_CONTROL_RX_RAM_INJ_ERROR_1 BIT(5) #define ECC_CONTROL_RX_RAM_INJ_ERROR_2 BIT(6) #define ECC_CONTROL_RX_RAM_INJ_ERROR_3 BIT(7) #define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_0 BIT(8) #define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_1 BIT(9) #define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_2 BIT(10) #define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_3 BIT(11) #define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_0 BIT(12) #define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_1 BIT(13) #define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_2 BIT(14) #define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_3 BIT(15) #define ECC_CONTROL_TX_RAM_ECC_BYPASS BIT(24) #define ECC_CONTROL_RX_RAM_ECC_BYPASS BIT(25) #define ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS BIT(26) #define ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS BIT(27) #define PCIE_EVENT_INT 0x14c #define PCIE_EVENT_INT_L2_EXIT_INT BIT(0) #define PCIE_EVENT_INT_HOTRST_EXIT_INT BIT(1) #define PCIE_EVENT_INT_DLUP_EXIT_INT BIT(2) #define PCIE_EVENT_INT_MASK GENMASK(2, 0) #define PCIE_EVENT_INT_L2_EXIT_INT_MASK BIT(16) #define PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK BIT(17) #define PCIE_EVENT_INT_DLUP_EXIT_INT_MASK BIT(18) #define PCIE_EVENT_INT_ENB_MASK GENMASK(18, 16) #define PCIE_EVENT_INT_ENB_SHIFT 16 #define NUM_PCIE_EVENTS (3) /* PCIe Config space MSI capability structure */ #define MC_MSI_CAP_CTRL_OFFSET 0xe0u /* Events */ #define EVENT_PCIE_L2_EXIT 0 #define EVENT_PCIE_HOTRST_EXIT 1 #define EVENT_PCIE_DLUP_EXIT 2 #define EVENT_SEC_TX_RAM_SEC_ERR 3 #define EVENT_SEC_RX_RAM_SEC_ERR 4 #define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR 5 #define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR 6 #define EVENT_DED_TX_RAM_DED_ERR 7 #define EVENT_DED_RX_RAM_DED_ERR 8 #define EVENT_DED_PCIE2AXI_RAM_DED_ERR 9 #define EVENT_DED_AXI2PCIE_RAM_DED_ERR 10 #define EVENT_LOCAL_DMA_END_ENGINE_0 11 #define EVENT_LOCAL_DMA_END_ENGINE_1 12 #define EVENT_LOCAL_DMA_ERROR_ENGINE_0 13 #define EVENT_LOCAL_DMA_ERROR_ENGINE_1 14 #define EVENT_LOCAL_A_ATR_EVT_POST_ERR 15 #define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR 16 #define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR 17 #define EVENT_LOCAL_A_ATR_EVT_DOORBELL 18 #define EVENT_LOCAL_P_ATR_EVT_POST_ERR 19 #define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR 20 #define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR 21 #define EVENT_LOCAL_P_ATR_EVT_DOORBELL 22 #define EVENT_LOCAL_PM_MSI_INT_INTX 23 #define EVENT_LOCAL_PM_MSI_INT_MSI 24 #define EVENT_LOCAL_PM_MSI_INT_AER_EVT 25 #define EVENT_LOCAL_PM_MSI_INT_EVENTS 26 #define EVENT_LOCAL_PM_MSI_INT_SYS_ERR 27 #define NUM_EVENTS 28 #define PCIE_EVENT_CAUSE(x, s) \ [EVENT_PCIE_ ## x] = { __stringify(x), s } #define SEC_ERROR_CAUSE(x, s) \ [EVENT_SEC_ ## x] = { __stringify(x), s } #define DED_ERROR_CAUSE(x, s) \ [EVENT_DED_ ## x] = { __stringify(x), s } #define LOCAL_EVENT_CAUSE(x, s) \ [EVENT_LOCAL_ ## x] = { __stringify(x), s } #define PCIE_EVENT(x) \ .base = MC_PCIE_CTRL_ADDR, \ .offset = PCIE_EVENT_INT, \ .mask_offset = PCIE_EVENT_INT, \ .mask_high = 1, \ .mask = PCIE_EVENT_INT_ ## x ## _INT, \ .enb_mask = PCIE_EVENT_INT_ENB_MASK #define SEC_EVENT(x) \ .base = MC_PCIE_CTRL_ADDR, \ .offset = SEC_ERROR_INT, \ .mask_offset = SEC_ERROR_INT_MASK, \ .mask = SEC_ERROR_INT_ ## x ## _INT, \ .mask_high = 1, \ .enb_mask = 0 #define DED_EVENT(x) \ .base = MC_PCIE_CTRL_ADDR, \ .offset = DED_ERROR_INT, \ .mask_offset = DED_ERROR_INT_MASK, \ .mask_high = 1, \ .mask = DED_ERROR_INT_ ## x ## _INT, \ .enb_mask = 0 #define LOCAL_EVENT(x) \ .base = MC_PCIE_BRIDGE_ADDR, \ .offset = ISTATUS_LOCAL, \ .mask_offset = IMASK_LOCAL, \ .mask_high = 0, \ .mask = x ## _MASK, \ .enb_mask = 0 #define PCIE_EVENT_TO_EVENT_MAP(x) \ { PCIE_EVENT_INT_ ## x ## _INT, EVENT_PCIE_ ## x } #define SEC_ERROR_TO_EVENT_MAP(x) \ { SEC_ERROR_INT_ ## x ## _INT, EVENT_SEC_ ## x } #define DED_ERROR_TO_EVENT_MAP(x) \ { DED_ERROR_INT_ ## x ## _INT, EVENT_DED_ ## x } #define LOCAL_STATUS_TO_EVENT_MAP(x) \ { x ## _MASK, EVENT_LOCAL_ ## x } struct event_map { u32 reg_mask; u32 event_bit; }; struct mc_msi { struct mutex lock; /* Protect used bitmap */ struct irq_domain *msi_domain; struct irq_domain *dev_domain; u32 num_vectors; u64 vector_phy; DECLARE_BITMAP(used, MC_MAX_NUM_MSI_IRQS); }; struct mc_pcie { void __iomem *axi_base_addr; struct device *dev; struct irq_domain *intx_domain; struct irq_domain *event_domain; raw_spinlock_t lock; struct mc_msi msi; }; struct cause { const char *sym; const char *str; }; static const struct cause event_cause[NUM_EVENTS] = { PCIE_EVENT_CAUSE(L2_EXIT, "L2 exit event"), PCIE_EVENT_CAUSE(HOTRST_EXIT, "Hot reset exit event"), PCIE_EVENT_CAUSE(DLUP_EXIT, "DLUP exit event"), SEC_ERROR_CAUSE(TX_RAM_SEC_ERR, "sec error in tx buffer"), SEC_ERROR_CAUSE(RX_RAM_SEC_ERR, "sec error in rx buffer"), SEC_ERROR_CAUSE(PCIE2AXI_RAM_SEC_ERR, "sec error in pcie2axi buffer"), SEC_ERROR_CAUSE(AXI2PCIE_RAM_SEC_ERR, "sec error in axi2pcie buffer"), DED_ERROR_CAUSE(TX_RAM_DED_ERR, "ded error in tx buffer"), DED_ERROR_CAUSE(RX_RAM_DED_ERR, "ded error in rx buffer"), DED_ERROR_CAUSE(PCIE2AXI_RAM_DED_ERR, "ded error in pcie2axi buffer"), DED_ERROR_CAUSE(AXI2PCIE_RAM_DED_ERR, "ded error in axi2pcie buffer"), LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_0, "dma engine 0 error"), LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_1, "dma engine 1 error"), LOCAL_EVENT_CAUSE(A_ATR_EVT_POST_ERR, "axi write request error"), LOCAL_EVENT_CAUSE(A_ATR_EVT_FETCH_ERR, "axi read request error"), LOCAL_EVENT_CAUSE(A_ATR_EVT_DISCARD_ERR, "axi read timeout"), LOCAL_EVENT_CAUSE(P_ATR_EVT_POST_ERR, "pcie write request error"), LOCAL_EVENT_CAUSE(P_ATR_EVT_FETCH_ERR, "pcie read request error"), LOCAL_EVENT_CAUSE(P_ATR_EVT_DISCARD_ERR, "pcie read timeout"), LOCAL_EVENT_CAUSE(PM_MSI_INT_AER_EVT, "aer event"), LOCAL_EVENT_CAUSE(PM_MSI_INT_EVENTS, "pm/ltr/hotplug event"), LOCAL_EVENT_CAUSE(PM_MSI_INT_SYS_ERR, "system error"), }; static struct event_map pcie_event_to_event[] = { PCIE_EVENT_TO_EVENT_MAP(L2_EXIT), PCIE_EVENT_TO_EVENT_MAP(HOTRST_EXIT), PCIE_EVENT_TO_EVENT_MAP(DLUP_EXIT), }; static struct event_map sec_error_to_event[] = { SEC_ERROR_TO_EVENT_MAP(TX_RAM_SEC_ERR), SEC_ERROR_TO_EVENT_MAP(RX_RAM_SEC_ERR), SEC_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_SEC_ERR), SEC_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_SEC_ERR), }; static struct event_map ded_error_to_event[] = { DED_ERROR_TO_EVENT_MAP(TX_RAM_DED_ERR), DED_ERROR_TO_EVENT_MAP(RX_RAM_DED_ERR), DED_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_DED_ERR), DED_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_DED_ERR), }; static struct event_map local_status_to_event[] = { LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_0), LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_1), LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_0), LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_1), LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_POST_ERR), LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_FETCH_ERR), LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DISCARD_ERR), LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DOORBELL), LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_POST_ERR), LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_FETCH_ERR), LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DISCARD_ERR), LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DOORBELL), LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_INTX), LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_MSI), LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_AER_EVT), LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_EVENTS), LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_SYS_ERR), }; static struct { u32 base; u32 offset; u32 mask; u32 shift; u32 enb_mask; u32 mask_high; u32 mask_offset; } event_descs[] = { { PCIE_EVENT(L2_EXIT) }, { PCIE_EVENT(HOTRST_EXIT) }, { PCIE_EVENT(DLUP_EXIT) }, { SEC_EVENT(TX_RAM_SEC_ERR) }, { SEC_EVENT(RX_RAM_SEC_ERR) }, { SEC_EVENT(PCIE2AXI_RAM_SEC_ERR) }, { SEC_EVENT(AXI2PCIE_RAM_SEC_ERR) }, { DED_EVENT(TX_RAM_DED_ERR) }, { DED_EVENT(RX_RAM_DED_ERR) }, { DED_EVENT(PCIE2AXI_RAM_DED_ERR) }, { DED_EVENT(AXI2PCIE_RAM_DED_ERR) }, { LOCAL_EVENT(DMA_END_ENGINE_0) }, { LOCAL_EVENT(DMA_END_ENGINE_1) }, { LOCAL_EVENT(DMA_ERROR_ENGINE_0) }, { LOCAL_EVENT(DMA_ERROR_ENGINE_1) }, { LOCAL_EVENT(A_ATR_EVT_POST_ERR) }, { LOCAL_EVENT(A_ATR_EVT_FETCH_ERR) }, { LOCAL_EVENT(A_ATR_EVT_DISCARD_ERR) }, { LOCAL_EVENT(A_ATR_EVT_DOORBELL) }, { LOCAL_EVENT(P_ATR_EVT_POST_ERR) }, { LOCAL_EVENT(P_ATR_EVT_FETCH_ERR) }, { LOCAL_EVENT(P_ATR_EVT_DISCARD_ERR) }, { LOCAL_EVENT(P_ATR_EVT_DOORBELL) }, { LOCAL_EVENT(PM_MSI_INT_INTX) }, { LOCAL_EVENT(PM_MSI_INT_MSI) }, { LOCAL_EVENT(PM_MSI_INT_AER_EVT) }, { LOCAL_EVENT(PM_MSI_INT_EVENTS) }, { LOCAL_EVENT(PM_MSI_INT_SYS_ERR) }, }; static char poss_clks[][5] = { "fic0", "fic1", "fic2", "fic3" }; static struct mc_pcie *port; static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *ecam) { struct mc_msi *msi = &port->msi; u16 reg; u8 queue_size; /* Fixup MSI enable flag */ reg = readw_relaxed(ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS); reg |= PCI_MSI_FLAGS_ENABLE; writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS); /* Fixup PCI MSI queue flags */ queue_size = FIELD_GET(PCI_MSI_FLAGS_QMASK, reg); reg |= FIELD_PREP(PCI_MSI_FLAGS_QSIZE, queue_size); writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS); /* Fixup MSI addr fields */ writel_relaxed(lower_32_bits(msi->vector_phy), ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_LO); writel_relaxed(upper_32_bits(msi->vector_phy), ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_HI); } static void mc_handle_msi(struct irq_desc *desc) { struct mc_pcie *port = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); struct device *dev = port->dev; struct mc_msi *msi = &port->msi; void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; unsigned long status; u32 bit; int ret; chained_irq_enter(chip, desc); status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL); if (status & PM_MSI_INT_MSI_MASK) { writel_relaxed(status & PM_MSI_INT_MSI_MASK, bridge_base_addr + ISTATUS_LOCAL); status = readl_relaxed(bridge_base_addr + ISTATUS_MSI); for_each_set_bit(bit, &status, msi->num_vectors) { ret = generic_handle_domain_irq(msi->dev_domain, bit); if (ret) dev_err_ratelimited(dev, "bad MSI IRQ %d\n", bit); } } chained_irq_exit(chip, desc); } static void mc_msi_bottom_irq_ack(struct irq_data *data) { struct mc_pcie *port = irq_data_get_irq_chip_data(data); void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; u32 bitpos = data->hwirq; writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI); } static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct mc_pcie *port = irq_data_get_irq_chip_data(data); phys_addr_t addr = port->msi.vector_phy; msg->address_lo = lower_32_bits(addr); msg->address_hi = upper_32_bits(addr); msg->data = data->hwirq; dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n", (int)data->hwirq, msg->address_hi, msg->address_lo); } static int mc_msi_set_affinity(struct irq_data *irq_data, const struct cpumask *mask, bool force) { return -EINVAL; } static struct irq_chip mc_msi_bottom_irq_chip = { .name = "Microchip MSI", .irq_ack = mc_msi_bottom_irq_ack, .irq_compose_msi_msg = mc_compose_msi_msg, .irq_set_affinity = mc_msi_set_affinity, }; static int mc_irq_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct mc_pcie *port = domain->host_data; struct mc_msi *msi = &port->msi; unsigned long bit; mutex_lock(&msi->lock); bit = find_first_zero_bit(msi->used, msi->num_vectors); if (bit >= msi->num_vectors) { mutex_unlock(&msi->lock); return -ENOSPC; } set_bit(bit, msi->used); irq_domain_set_info(domain, virq, bit, &mc_msi_bottom_irq_chip, domain->host_data, handle_edge_irq, NULL, NULL); mutex_unlock(&msi->lock); return 0; } static void mc_irq_msi_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct mc_pcie *port = irq_data_get_irq_chip_data(d); struct mc_msi *msi = &port->msi; mutex_lock(&msi->lock); if (test_bit(d->hwirq, msi->used)) __clear_bit(d->hwirq, msi->used); else dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq); mutex_unlock(&msi->lock); } static const struct irq_domain_ops msi_domain_ops = { .alloc = mc_irq_msi_domain_alloc, .free = mc_irq_msi_domain_free, }; static struct irq_chip mc_msi_irq_chip = { .name = "Microchip PCIe MSI", .irq_ack = irq_chip_ack_parent, .irq_mask = pci_msi_mask_irq, .irq_unmask = pci_msi_unmask_irq, }; static struct msi_domain_info mc_msi_domain_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_PCI_MSIX), .chip = &mc_msi_irq_chip, }; static int mc_allocate_msi_domains(struct mc_pcie *port) { struct device *dev = port->dev; struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); struct mc_msi *msi = &port->msi; mutex_init(&port->msi.lock); msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors, &msi_domain_ops, port); if (!msi->dev_domain) { dev_err(dev, "failed to create IRQ domain\n"); return -ENOMEM; } msi->msi_domain = pci_msi_create_irq_domain(fwnode, &mc_msi_domain_info, msi->dev_domain); if (!msi->msi_domain) { dev_err(dev, "failed to create MSI domain\n"); irq_domain_remove(msi->dev_domain); return -ENOMEM; } return 0; } static void mc_handle_intx(struct irq_desc *desc) { struct mc_pcie *port = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); struct device *dev = port->dev; void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; unsigned long status; u32 bit; int ret; chained_irq_enter(chip, desc); status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL); if (status & PM_MSI_INT_INTX_MASK) { status &= PM_MSI_INT_INTX_MASK; status >>= PM_MSI_INT_INTX_SHIFT; for_each_set_bit(bit, &status, PCI_NUM_INTX) { ret = generic_handle_domain_irq(port->intx_domain, bit); if (ret) dev_err_ratelimited(dev, "bad INTx IRQ %d\n", bit); } } chained_irq_exit(chip, desc); } static void mc_ack_intx_irq(struct irq_data *data) { struct mc_pcie *port = irq_data_get_irq_chip_data(data); void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT); writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL); } static void mc_mask_intx_irq(struct irq_data *data) { struct mc_pcie *port = irq_data_get_irq_chip_data(data); void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; unsigned long flags; u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT); u32 val; raw_spin_lock_irqsave(&port->lock, flags); val = readl_relaxed(bridge_base_addr + IMASK_LOCAL); val &= ~mask; writel_relaxed(val, bridge_base_addr + IMASK_LOCAL); raw_spin_unlock_irqrestore(&port->lock, flags); } static void mc_unmask_intx_irq(struct irq_data *data) { struct mc_pcie *port = irq_data_get_irq_chip_data(data); void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; unsigned long flags; u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT); u32 val; raw_spin_lock_irqsave(&port->lock, flags); val = readl_relaxed(bridge_base_addr + IMASK_LOCAL); val |= mask; writel_relaxed(val, bridge_base_addr + IMASK_LOCAL); raw_spin_unlock_irqrestore(&port->lock, flags); } static struct irq_chip mc_intx_irq_chip = { .name = "Microchip PCIe INTx", .irq_ack = mc_ack_intx_irq, .irq_mask = mc_mask_intx_irq, .irq_unmask = mc_unmask_intx_irq, }; static int mc_pcie_intx_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &mc_intx_irq_chip, handle_level_irq); irq_set_chip_data(irq, domain->host_data); return 0; } static const struct irq_domain_ops intx_domain_ops = { .map = mc_pcie_intx_map, }; static inline u32 reg_to_event(u32 reg, struct event_map field) { return (reg & field.reg_mask) ? BIT(field.event_bit) : 0; } static u32 pcie_events(struct mc_pcie *port) { void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR; u32 reg = readl_relaxed(ctrl_base_addr + PCIE_EVENT_INT); u32 val = 0; int i; for (i = 0; i < ARRAY_SIZE(pcie_event_to_event); i++) val |= reg_to_event(reg, pcie_event_to_event[i]); return val; } static u32 sec_errors(struct mc_pcie *port) { void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR; u32 reg = readl_relaxed(ctrl_base_addr + SEC_ERROR_INT); u32 val = 0; int i; for (i = 0; i < ARRAY_SIZE(sec_error_to_event); i++) val |= reg_to_event(reg, sec_error_to_event[i]); return val; } static u32 ded_errors(struct mc_pcie *port) { void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR; u32 reg = readl_relaxed(ctrl_base_addr + DED_ERROR_INT); u32 val = 0; int i; for (i = 0; i < ARRAY_SIZE(ded_error_to_event); i++) val |= reg_to_event(reg, ded_error_to_event[i]); return val; } static u32 local_events(struct mc_pcie *port) { void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; u32 reg = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL); u32 val = 0; int i; for (i = 0; i < ARRAY_SIZE(local_status_to_event); i++) val |= reg_to_event(reg, local_status_to_event[i]); return val; } static u32 get_events(struct mc_pcie *port) { u32 events = 0; events |= pcie_events(port); events |= sec_errors(port); events |= ded_errors(port); events |= local_events(port); return events; } static irqreturn_t mc_event_handler(int irq, void *dev_id) { struct mc_pcie *port = dev_id; struct device *dev = port->dev; struct irq_data *data; data = irq_domain_get_irq_data(port->event_domain, irq); if (event_cause[data->hwirq].str) dev_err_ratelimited(dev, "%s\n", event_cause[data->hwirq].str); else dev_err_ratelimited(dev, "bad event IRQ %ld\n", data->hwirq); return IRQ_HANDLED; } static void mc_handle_event(struct irq_desc *desc) { struct mc_pcie *port = irq_desc_get_handler_data(desc); unsigned long events; u32 bit; struct irq_chip *chip = irq_desc_get_chip(desc); chained_irq_enter(chip, desc); events = get_events(port); for_each_set_bit(bit, &events, NUM_EVENTS) generic_handle_domain_irq(port->event_domain, bit); chained_irq_exit(chip, desc); } static void mc_ack_event_irq(struct irq_data *data) { struct mc_pcie *port = irq_data_get_irq_chip_data(data); u32 event = data->hwirq; void __iomem *addr; u32 mask; addr = port->axi_base_addr + event_descs[event].base + event_descs[event].offset; mask = event_descs[event].mask; mask |= event_descs[event].enb_mask; writel_relaxed(mask, addr); } static void mc_mask_event_irq(struct irq_data *data) { struct mc_pcie *port = irq_data_get_irq_chip_data(data); u32 event = data->hwirq; void __iomem *addr; u32 mask; u32 val; addr = port->axi_base_addr + event_descs[event].base + event_descs[event].mask_offset; mask = event_descs[event].mask; if (event_descs[event].enb_mask) { mask <<= PCIE_EVENT_INT_ENB_SHIFT; mask &= PCIE_EVENT_INT_ENB_MASK; } if (!event_descs[event].mask_high) mask = ~mask; raw_spin_lock(&port->lock); val = readl_relaxed(addr); if (event_descs[event].mask_high) val |= mask; else val &= mask; writel_relaxed(val, addr); raw_spin_unlock(&port->lock); } static void mc_unmask_event_irq(struct irq_data *data) { struct mc_pcie *port = irq_data_get_irq_chip_data(data); u32 event = data->hwirq; void __iomem *addr; u32 mask; u32 val; addr = port->axi_base_addr + event_descs[event].base + event_descs[event].mask_offset; mask = event_descs[event].mask; if (event_descs[event].enb_mask) mask <<= PCIE_EVENT_INT_ENB_SHIFT; if (event_descs[event].mask_high) mask = ~mask; if (event_descs[event].enb_mask) mask &= PCIE_EVENT_INT_ENB_MASK; raw_spin_lock(&port->lock); val = readl_relaxed(addr); if (event_descs[event].mask_high) val &= mask; else val |= mask; writel_relaxed(val, addr); raw_spin_unlock(&port->lock); } static struct irq_chip mc_event_irq_chip = { .name = "Microchip PCIe EVENT", .irq_ack = mc_ack_event_irq, .irq_mask = mc_mask_event_irq, .irq_unmask = mc_unmask_event_irq, }; static int mc_pcie_event_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &mc_event_irq_chip, handle_level_irq); irq_set_chip_data(irq, domain->host_data); return 0; } static const struct irq_domain_ops event_domain_ops = { .map = mc_pcie_event_map, }; static inline void mc_pcie_deinit_clk(void *data) { struct clk *clk = data; clk_disable_unprepare(clk); } static inline struct clk *mc_pcie_init_clk(struct device *dev, const char *id) { struct clk *clk; int ret; clk = devm_clk_get_optional(dev, id); if (IS_ERR(clk)) return clk; if (!clk) return clk; ret = clk_prepare_enable(clk); if (ret) return ERR_PTR(ret); devm_add_action_or_reset(dev, mc_pcie_deinit_clk, clk); return clk; } static int mc_pcie_init_clks(struct device *dev) { int i; struct clk *fic; /* * PCIe may be clocked via Fabric Interface using between 1 and 4 * clocks. Scan DT for clocks and enable them if present */ for (i = 0; i < ARRAY_SIZE(poss_clks); i++) { fic = mc_pcie_init_clk(dev, poss_clks[i]); if (IS_ERR(fic)) return PTR_ERR(fic); } return 0; } static int mc_pcie_init_irq_domains(struct mc_pcie *port) { struct device *dev = port->dev; struct device_node *node = dev->of_node; struct device_node *pcie_intc_node; /* Setup INTx */ pcie_intc_node = of_get_next_child(node, NULL); if (!pcie_intc_node) { dev_err(dev, "failed to find PCIe Intc node\n"); return -EINVAL; } port->event_domain = irq_domain_add_linear(pcie_intc_node, NUM_EVENTS, &event_domain_ops, port); if (!port->event_domain) { dev_err(dev, "failed to get event domain\n"); of_node_put(pcie_intc_node); return -ENOMEM; } irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS); port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, &intx_domain_ops, port); if (!port->intx_domain) { dev_err(dev, "failed to get an INTx IRQ domain\n"); of_node_put(pcie_intc_node); return -ENOMEM; } irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED); of_node_put(pcie_intc_node); raw_spin_lock_init(&port->lock); return mc_allocate_msi_domains(port); } static void mc_pcie_setup_window(void __iomem *bridge_base_addr, u32 index, phys_addr_t axi_addr, phys_addr_t pci_addr, size_t size) { u32 atr_sz = ilog2(size) - 1; u32 val; if (index == 0) val = PCIE_CONFIG_INTERFACE; else val = PCIE_TX_RX_INTERFACE; writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) + ATR0_AXI4_SLV0_TRSL_PARAM); val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) | ATR_IMPL_ENABLE; writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) + ATR0_AXI4_SLV0_SRCADDR_PARAM); val = upper_32_bits(axi_addr); writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) + ATR0_AXI4_SLV0_SRC_ADDR); val = lower_32_bits(pci_addr); writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) + ATR0_AXI4_SLV0_TRSL_ADDR_LSB); val = upper_32_bits(pci_addr); writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) + ATR0_AXI4_SLV0_TRSL_ADDR_UDW); val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM); val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT); writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM); writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR); } static int mc_pcie_setup_windows(struct platform_device *pdev, struct mc_pcie *port) { void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; struct pci_host_bridge *bridge = platform_get_drvdata(pdev); struct resource_entry *entry; u64 pci_addr; u32 index = 1; resource_list_for_each_entry(entry, &bridge->windows) { if (resource_type(entry->res) == IORESOURCE_MEM) { pci_addr = entry->res->start - entry->offset; mc_pcie_setup_window(bridge_base_addr, index, entry->res->start, pci_addr, resource_size(entry->res)); index++; } } return 0; } static inline void mc_clear_secs(struct mc_pcie *port) { void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR; writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, ctrl_base_addr + SEC_ERROR_INT); writel_relaxed(0, ctrl_base_addr + SEC_ERROR_EVENT_CNT); } static inline void mc_clear_deds(struct mc_pcie *port) { void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR; writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, ctrl_base_addr + DED_ERROR_INT); writel_relaxed(0, ctrl_base_addr + DED_ERROR_EVENT_CNT); } static void mc_disable_interrupts(struct mc_pcie *port) { void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR; u32 val; /* Ensure ECC bypass is enabled */ val = ECC_CONTROL_TX_RAM_ECC_BYPASS | ECC_CONTROL_RX_RAM_ECC_BYPASS | ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS | ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS; writel_relaxed(val, ctrl_base_addr + ECC_CONTROL); /* Disable SEC errors and clear any outstanding */ writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, ctrl_base_addr + SEC_ERROR_INT_MASK); mc_clear_secs(port); /* Disable DED errors and clear any outstanding */ writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, ctrl_base_addr + DED_ERROR_INT_MASK); mc_clear_deds(port); /* Disable local interrupts and clear any outstanding */ writel_relaxed(0, bridge_base_addr + IMASK_LOCAL); writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_LOCAL); writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_MSI); /* Disable PCIe events and clear any outstanding */ val = PCIE_EVENT_INT_L2_EXIT_INT | PCIE_EVENT_INT_HOTRST_EXIT_INT | PCIE_EVENT_INT_DLUP_EXIT_INT | PCIE_EVENT_INT_L2_EXIT_INT_MASK | PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK | PCIE_EVENT_INT_DLUP_EXIT_INT_MASK; writel_relaxed(val, ctrl_base_addr + PCIE_EVENT_INT); /* Disable host interrupts and clear any outstanding */ writel_relaxed(0, bridge_base_addr + IMASK_HOST); writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_HOST); } static int mc_init_interrupts(struct platform_device *pdev, struct mc_pcie *port) { struct device *dev = &pdev->dev; int irq; int i, intx_irq, msi_irq, event_irq; int ret; ret = mc_pcie_init_irq_domains(port); if (ret) { dev_err(dev, "failed creating IRQ domains\n"); return ret; } irq = platform_get_irq(pdev, 0); if (irq < 0) return -ENODEV; for (i = 0; i < NUM_EVENTS; i++) { event_irq = irq_create_mapping(port->event_domain, i); if (!event_irq) { dev_err(dev, "failed to map hwirq %d\n", i); return -ENXIO; } ret = devm_request_irq(dev, event_irq, mc_event_handler, 0, event_cause[i].sym, port); if (ret) { dev_err(dev, "failed to request IRQ %d\n", event_irq); return ret; } } intx_irq = irq_create_mapping(port->event_domain, EVENT_LOCAL_PM_MSI_INT_INTX); if (!intx_irq) { dev_err(dev, "failed to map INTx interrupt\n"); return -ENXIO; } /* Plug the INTx chained handler */ irq_set_chained_handler_and_data(intx_irq, mc_handle_intx, port); msi_irq = irq_create_mapping(port->event_domain, EVENT_LOCAL_PM_MSI_INT_MSI); if (!msi_irq) return -ENXIO; /* Plug the MSI chained handler */ irq_set_chained_handler_and_data(msi_irq, mc_handle_msi, port); /* Plug the main event chained handler */ irq_set_chained_handler_and_data(irq, mc_handle_event, port); return 0; } static int mc_platform_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; struct platform_device *pdev = to_platform_device(dev); void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; int ret; /* Configure address translation table 0 for PCIe config space */ mc_pcie_setup_window(bridge_base_addr, 0, cfg->res.start, cfg->res.start, resource_size(&cfg->res)); /* Need some fixups in config space */ mc_pcie_enable_msi(port, cfg->win); /* Configure non-config space outbound ranges */ ret = mc_pcie_setup_windows(pdev, port); if (ret) return ret; /* Address translation is up; safe to enable interrupts */ ret = mc_init_interrupts(pdev, port); if (ret) return ret; return 0; } static int mc_host_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; void __iomem *bridge_base_addr; int ret; u32 val; port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); if (!port) return -ENOMEM; port->dev = dev; port->axi_base_addr = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(port->axi_base_addr)) return PTR_ERR(port->axi_base_addr); mc_disable_interrupts(port); bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; /* Allow enabling MSI by disabling MSI-X */ val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0); val &= ~MSIX_CAP_MASK; writel(val, bridge_base_addr + PCIE_PCI_IRQ_DW0); /* Pick num vectors from bitfile programmed onto FPGA fabric */ val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0); val &= NUM_MSI_MSGS_MASK; val >>= NUM_MSI_MSGS_SHIFT; port->msi.num_vectors = 1 << val; /* Pick vector address from design */ port->msi.vector_phy = readl_relaxed(bridge_base_addr + IMSI_ADDR); ret = mc_pcie_init_clks(dev); if (ret) { dev_err(dev, "failed to get clock resources, error %d\n", ret); return -ENODEV; } return pci_host_common_probe(pdev); } static const struct pci_ecam_ops mc_ecam_ops = { .init = mc_platform_init, .pci_ops = { .map_bus = pci_ecam_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, } }; static const struct of_device_id mc_pcie_of_match[] = { { .compatible = "microchip,pcie-host-1.0", .data = &mc_ecam_ops, }, {}, }; MODULE_DEVICE_TABLE(of, mc_pcie_of_match); static struct platform_driver mc_pcie_driver = { .probe = mc_host_probe, .driver = { .name = "microchip-pcie", .of_match_table = mc_pcie_of_match, .suppress_bind_attrs = true, }, }; builtin_platform_driver(mc_pcie_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Microchip PCIe host controller driver"); MODULE_AUTHOR("Daire McNamara <[email protected]>");
linux-master
drivers/pci/controller/pcie-microchip-host.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe driver for Marvell Armada 370 and Armada XP SoCs * * Author: Thomas Petazzoni <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/mbus.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_pci.h> #include <linux/of_platform.h> #include "../pci.h" #include "../pci-bridge-emul.h" /* * PCIe unit register offsets. */ #define PCIE_DEV_ID_OFF 0x0000 #define PCIE_CMD_OFF 0x0004 #define PCIE_DEV_REV_OFF 0x0008 #define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3)) #define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3)) #define PCIE_SSDEV_ID_OFF 0x002c #define PCIE_CAP_PCIEXP 0x0060 #define PCIE_CAP_PCIERR_OFF 0x0100 #define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4)) #define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4)) #define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4)) #define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4)) #define PCIE_WIN5_CTRL_OFF 0x1880 #define PCIE_WIN5_BASE_OFF 0x1884 #define PCIE_WIN5_REMAP_OFF 0x188c #define PCIE_CONF_ADDR_OFF 0x18f8 #define PCIE_CONF_ADDR_EN 0x80000000 #define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc)) #define PCIE_CONF_BUS(b) (((b) & 0xff) << 16) #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11) #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8) #define PCIE_CONF_ADDR(bus, devfn, where) \ (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \ PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \ PCIE_CONF_ADDR_EN) #define PCIE_CONF_DATA_OFF 0x18fc #define PCIE_INT_CAUSE_OFF 0x1900 #define PCIE_INT_UNMASK_OFF 0x1910 #define PCIE_INT_INTX(i) BIT(24+i) #define PCIE_INT_PM_PME BIT(28) #define PCIE_INT_ALL_MASK GENMASK(31, 0) #define PCIE_CTRL_OFF 0x1a00 #define PCIE_CTRL_X1_MODE 0x0001 #define PCIE_CTRL_RC_MODE BIT(1) #define PCIE_CTRL_MASTER_HOT_RESET BIT(24) #define PCIE_STAT_OFF 0x1a04 #define PCIE_STAT_BUS 0xff00 #define PCIE_STAT_DEV 0x1f0000 #define PCIE_STAT_LINK_DOWN BIT(0) #define PCIE_SSPL_OFF 0x1a0c #define PCIE_SSPL_VALUE_SHIFT 0 #define PCIE_SSPL_VALUE_MASK GENMASK(7, 0) #define PCIE_SSPL_SCALE_SHIFT 8 #define PCIE_SSPL_SCALE_MASK GENMASK(9, 8) #define PCIE_SSPL_ENABLE BIT(16) #define PCIE_RC_RTSTA 0x1a14 #define PCIE_DEBUG_CTRL 0x1a60 #define PCIE_DEBUG_SOFT_RESET BIT(20) struct mvebu_pcie_port; /* Structure representing all PCIe interfaces */ struct mvebu_pcie { struct platform_device *pdev; struct mvebu_pcie_port *ports; struct resource io; struct resource realio; struct resource mem; int nports; }; struct mvebu_pcie_window { phys_addr_t base; phys_addr_t remap; size_t size; }; /* Structure representing one PCIe interface */ struct mvebu_pcie_port { char *name; void __iomem *base; u32 port; u32 lane; bool is_x4; int devfn; unsigned int mem_target; unsigned int mem_attr; unsigned int io_target; unsigned int io_attr; struct clk *clk; struct gpio_desc *reset_gpio; char *reset_name; struct pci_bridge_emul bridge; struct device_node *dn; struct mvebu_pcie *pcie; struct mvebu_pcie_window memwin; struct mvebu_pcie_window iowin; u32 saved_pcie_stat; struct resource regs; u8 slot_power_limit_value; u8 slot_power_limit_scale; struct irq_domain *intx_irq_domain; raw_spinlock_t irq_lock; int intx_irq; }; static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg) { writel(val, port->base + reg); } static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg) { return readl(port->base + reg); } static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port) { return port->io_target != -1 && port->io_attr != -1; } static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port) { return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN); } static u8 mvebu_pcie_get_local_bus_nr(struct mvebu_pcie_port *port) { return (mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_BUS) >> 8; } static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr) { u32 stat; stat = mvebu_readl(port, PCIE_STAT_OFF); stat &= ~PCIE_STAT_BUS; stat |= nr << 8; mvebu_writel(port, stat, PCIE_STAT_OFF); } static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr) { u32 stat; stat = mvebu_readl(port, PCIE_STAT_OFF); stat &= ~PCIE_STAT_DEV; stat |= nr << 16; mvebu_writel(port, stat, PCIE_STAT_OFF); } static void mvebu_pcie_disable_wins(struct mvebu_pcie_port *port) { int i; mvebu_writel(port, 0, PCIE_BAR_LO_OFF(0)); mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0)); for (i = 1; i < 3; i++) { mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i)); mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i)); mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i)); } for (i = 0; i < 5; i++) { mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i)); mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i)); mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i)); } mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF); mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF); mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF); } /* * Setup PCIE BARs and Address Decode Wins: * BAR[0] -> internal registers (needed for MSI) * BAR[1] -> covers all DRAM banks * BAR[2] -> Disabled * WIN[0-3] -> DRAM bank[0-3] */ static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) { const struct mbus_dram_target_info *dram; u32 size; int i; dram = mv_mbus_dram_info(); /* First, disable and clear BARs and windows. */ mvebu_pcie_disable_wins(port); /* Setup windows for DDR banks. Count total DDR size on the fly. */ size = 0; for (i = 0; i < dram->num_cs; i++) { const struct mbus_dram_window *cs = dram->cs + i; mvebu_writel(port, cs->base & 0xffff0000, PCIE_WIN04_BASE_OFF(i)); mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i)); mvebu_writel(port, ((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) | (dram->mbus_dram_target_id << 4) | 1, PCIE_WIN04_CTRL_OFF(i)); size += cs->size; } /* Round up 'size' to the nearest power of two. */ if ((size & (size - 1)) != 0) size = 1 << fls(size); /* Setup BAR[1] to all DRAM banks. */ mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1)); mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1)); mvebu_writel(port, ((size - 1) & 0xffff0000) | 1, PCIE_BAR_CTRL_OFF(1)); /* * Point BAR[0] to the device's internal registers. */ mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0)); mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0)); } static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) { u32 ctrl, lnkcap, cmd, dev_rev, unmask, sspl; /* Setup PCIe controller to Root Complex mode. */ ctrl = mvebu_readl(port, PCIE_CTRL_OFF); ctrl |= PCIE_CTRL_RC_MODE; mvebu_writel(port, ctrl, PCIE_CTRL_OFF); /* * Set Maximum Link Width to X1 or X4 in Root Port's PCIe Link * Capability register. This register is defined by PCIe specification * as read-only but this mvebu controller has it as read-write and must * be set to number of SerDes PCIe lanes (1 or 4). If this register is * not set correctly then link with endpoint card is not established. */ lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP); lnkcap &= ~PCI_EXP_LNKCAP_MLW; lnkcap |= (port->is_x4 ? 4 : 1) << 4; mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP); /* Disable Root Bridge I/O space, memory space and bus mastering. */ cmd = mvebu_readl(port, PCIE_CMD_OFF); cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); mvebu_writel(port, cmd, PCIE_CMD_OFF); /* * Change Class Code of PCI Bridge device to PCI Bridge (0x6004) * because default value is Memory controller (0x5080). * * Note that this mvebu PCI Bridge does not have compliant Type 1 * Configuration Space. Header Type is reported as Type 0 and it * has format of Type 0 config space. * * Moreover Type 0 BAR registers (ranges 0x10 - 0x28 and 0x30 - 0x34) * have the same format in Marvell's specification as in PCIe * specification, but their meaning is totally different and they do * different things: they are aliased into internal mvebu registers * (e.g. PCIE_BAR_LO_OFF) and these should not be changed or * reconfigured by pci device drivers. * * Therefore driver uses emulation of PCI Bridge which emulates * access to configuration space via internal mvebu registers or * emulated configuration buffer. Driver access these PCI Bridge * directly for simplification, but these registers can be accessed * also via standard mvebu way for accessing PCI config space. */ dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF); dev_rev &= ~0xffffff00; dev_rev |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; mvebu_writel(port, dev_rev, PCIE_DEV_REV_OFF); /* Point PCIe unit MBUS decode windows to DRAM space. */ mvebu_pcie_setup_wins(port); /* * Program Root Port to automatically send Set_Slot_Power_Limit * PCIe Message when changing status from Dl_Down to Dl_Up and valid * slot power limit was specified. */ sspl = mvebu_readl(port, PCIE_SSPL_OFF); sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE); if (port->slot_power_limit_value) { sspl |= port->slot_power_limit_value << PCIE_SSPL_VALUE_SHIFT; sspl |= port->slot_power_limit_scale << PCIE_SSPL_SCALE_SHIFT; sspl |= PCIE_SSPL_ENABLE; } mvebu_writel(port, sspl, PCIE_SSPL_OFF); /* Mask all interrupt sources. */ mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF); /* Clear all interrupt causes. */ mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF); /* Check if "intx" interrupt was specified in DT. */ if (port->intx_irq > 0) return; /* * Fallback code when "intx" interrupt was not specified in DT: * Unmask all legacy INTx interrupts as driver does not provide a way * for masking and unmasking of individual legacy INTx interrupts. * Legacy INTx are reported via one shared GIC source and therefore * kernel cannot distinguish which individual legacy INTx was triggered. * These interrupts are shared, so it should not cause any issue. Just * performance penalty as every PCIe interrupt handler needs to be * called when some interrupt is triggered. */ unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); unmask |= PCIE_INT_INTX(0) | PCIE_INT_INTX(1) | PCIE_INT_INTX(2) | PCIE_INT_INTX(3); mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF); } static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie, struct pci_bus *bus, int devfn); static int mvebu_pcie_child_rd_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { struct mvebu_pcie *pcie = bus->sysdata; struct mvebu_pcie_port *port; void __iomem *conf_data; port = mvebu_pcie_find_port(pcie, bus, devfn); if (!port) return PCIBIOS_DEVICE_NOT_FOUND; if (!mvebu_pcie_link_up(port)) return PCIBIOS_DEVICE_NOT_FOUND; conf_data = port->base + PCIE_CONF_DATA_OFF; mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), PCIE_CONF_ADDR_OFF); switch (size) { case 1: *val = readb_relaxed(conf_data + (where & 3)); break; case 2: *val = readw_relaxed(conf_data + (where & 2)); break; case 4: *val = readl_relaxed(conf_data); break; default: return PCIBIOS_BAD_REGISTER_NUMBER; } return PCIBIOS_SUCCESSFUL; } static int mvebu_pcie_child_wr_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { struct mvebu_pcie *pcie = bus->sysdata; struct mvebu_pcie_port *port; void __iomem *conf_data; port = mvebu_pcie_find_port(pcie, bus, devfn); if (!port) return PCIBIOS_DEVICE_NOT_FOUND; if (!mvebu_pcie_link_up(port)) return PCIBIOS_DEVICE_NOT_FOUND; conf_data = port->base + PCIE_CONF_DATA_OFF; mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), PCIE_CONF_ADDR_OFF); switch (size) { case 1: writeb(val, conf_data + (where & 3)); break; case 2: writew(val, conf_data + (where & 2)); break; case 4: writel(val, conf_data); break; default: return PCIBIOS_BAD_REGISTER_NUMBER; } return PCIBIOS_SUCCESSFUL; } static struct pci_ops mvebu_pcie_child_ops = { .read = mvebu_pcie_child_rd_conf, .write = mvebu_pcie_child_wr_conf, }; /* * Remove windows, starting from the largest ones to the smallest * ones. */ static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port, phys_addr_t base, size_t size) { while (size) { size_t sz = 1 << (fls(size) - 1); mvebu_mbus_del_window(base, sz); base += sz; size -= sz; } } /* * MBus windows can only have a power of two size, but PCI BARs do not * have this constraint. Therefore, we have to split the PCI BAR into * areas each having a power of two size. We start from the largest * one (i.e highest order bit set in the size). */ static int mvebu_pcie_add_windows(struct mvebu_pcie_port *port, unsigned int target, unsigned int attribute, phys_addr_t base, size_t size, phys_addr_t remap) { size_t size_mapped = 0; while (size) { size_t sz = 1 << (fls(size) - 1); int ret; ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base, sz, remap); if (ret) { phys_addr_t end = base + sz - 1; dev_err(&port->pcie->pdev->dev, "Could not create MBus window at [mem %pa-%pa]: %d\n", &base, &end, ret); mvebu_pcie_del_windows(port, base - size_mapped, size_mapped); return ret; } size -= sz; size_mapped += sz; base += sz; if (remap != MVEBU_MBUS_NO_REMAP) remap += sz; } return 0; } static int mvebu_pcie_set_window(struct mvebu_pcie_port *port, unsigned int target, unsigned int attribute, const struct mvebu_pcie_window *desired, struct mvebu_pcie_window *cur) { int ret; if (desired->base == cur->base && desired->remap == cur->remap && desired->size == cur->size) return 0; if (cur->size != 0) { mvebu_pcie_del_windows(port, cur->base, cur->size); cur->size = 0; cur->base = 0; /* * If something tries to change the window while it is enabled * the change will not be done atomically. That would be * difficult to do in the general case. */ } if (desired->size == 0) return 0; ret = mvebu_pcie_add_windows(port, target, attribute, desired->base, desired->size, desired->remap); if (ret) { cur->size = 0; cur->base = 0; return ret; } *cur = *desired; return 0; } static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) { struct mvebu_pcie_window desired = {}; struct pci_bridge_emul_conf *conf = &port->bridge.conf; /* Are the new iobase/iolimit values invalid? */ if (conf->iolimit < conf->iobase || le16_to_cpu(conf->iolimitupper) < le16_to_cpu(conf->iobaseupper)) return mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired, &port->iowin); /* * We read the PCI-to-PCI bridge emulated registers, and * calculate the base address and size of the address decoding * window to setup, according to the PCI-to-PCI bridge * specifications. iobase is the bus address, port->iowin_base * is the CPU address. */ desired.remap = ((conf->iobase & 0xF0) << 8) | (le16_to_cpu(conf->iobaseupper) << 16); desired.base = port->pcie->io.start + desired.remap; desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) | (le16_to_cpu(conf->iolimitupper) << 16)) - desired.remap) + 1; return mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired, &port->iowin); } static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) { struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP}; struct pci_bridge_emul_conf *conf = &port->bridge.conf; /* Are the new membase/memlimit values invalid? */ if (le16_to_cpu(conf->memlimit) < le16_to_cpu(conf->membase)) return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired, &port->memwin); /* * We read the PCI-to-PCI bridge emulated registers, and * calculate the base address and size of the address decoding * window to setup, according to the PCI-to-PCI bridge * specifications. */ desired.base = ((le16_to_cpu(conf->membase) & 0xFFF0) << 16); desired.size = (((le16_to_cpu(conf->memlimit) & 0xFFF0) << 16) | 0xFFFFF) - desired.base + 1; return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired, &port->memwin); } static pci_bridge_emul_read_status_t mvebu_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge, int reg, u32 *value) { struct mvebu_pcie_port *port = bridge->data; switch (reg) { case PCI_COMMAND: *value = mvebu_readl(port, PCIE_CMD_OFF); break; case PCI_PRIMARY_BUS: { /* * From the whole 32bit register we support reading from HW only * secondary bus number which is mvebu local bus number. * Other bits are retrieved only from emulated config buffer. */ __le32 *cfgspace = (__le32 *)&bridge->conf; u32 val = le32_to_cpu(cfgspace[PCI_PRIMARY_BUS / 4]); val &= ~0xff00; val |= mvebu_pcie_get_local_bus_nr(port) << 8; *value = val; break; } case PCI_INTERRUPT_LINE: { /* * From the whole 32bit register we support reading from HW only * one bit: PCI_BRIDGE_CTL_BUS_RESET. * Other bits are retrieved only from emulated config buffer. */ __le32 *cfgspace = (__le32 *)&bridge->conf; u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]); if (mvebu_readl(port, PCIE_CTRL_OFF) & PCIE_CTRL_MASTER_HOT_RESET) val |= PCI_BRIDGE_CTL_BUS_RESET << 16; else val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16); *value = val; break; } default: return PCI_BRIDGE_EMUL_NOT_HANDLED; } return PCI_BRIDGE_EMUL_HANDLED; } static pci_bridge_emul_read_status_t mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, int reg, u32 *value) { struct mvebu_pcie_port *port = bridge->data; switch (reg) { case PCI_EXP_DEVCAP: *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP); break; case PCI_EXP_DEVCTL: *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL); break; case PCI_EXP_LNKCAP: /* * PCIe requires that the Clock Power Management capability bit * is hard-wired to zero for downstream ports but HW returns 1. * Additionally enable Data Link Layer Link Active Reporting * Capable bit as DL_Active indication is provided too. */ *value = (mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) & ~PCI_EXP_LNKCAP_CLKPM) | PCI_EXP_LNKCAP_DLLLARC; break; case PCI_EXP_LNKCTL: /* DL_Active indication is provided via PCIE_STAT_OFF */ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL) | (mvebu_pcie_link_up(port) ? (PCI_EXP_LNKSTA_DLLLA << 16) : 0); break; case PCI_EXP_SLTCTL: { u16 slotctl = le16_to_cpu(bridge->pcie_conf.slotctl); u16 slotsta = le16_to_cpu(bridge->pcie_conf.slotsta); u32 val = 0; /* * When slot power limit was not specified in DT then * ASPL_DISABLE bit is stored only in emulated config space. * Otherwise reflect status of PCIE_SSPL_ENABLE bit in HW. */ if (!port->slot_power_limit_value) val |= slotctl & PCI_EXP_SLTCTL_ASPL_DISABLE; else if (!(mvebu_readl(port, PCIE_SSPL_OFF) & PCIE_SSPL_ENABLE)) val |= PCI_EXP_SLTCTL_ASPL_DISABLE; /* This callback is 32-bit and in high bits is slot status. */ val |= slotsta << 16; *value = val; break; } case PCI_EXP_RTSTA: *value = mvebu_readl(port, PCIE_RC_RTSTA); break; case PCI_EXP_DEVCAP2: *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP2); break; case PCI_EXP_DEVCTL2: *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2); break; case PCI_EXP_LNKCTL2: *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2); break; default: return PCI_BRIDGE_EMUL_NOT_HANDLED; } return PCI_BRIDGE_EMUL_HANDLED; } static pci_bridge_emul_read_status_t mvebu_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge, int reg, u32 *value) { struct mvebu_pcie_port *port = bridge->data; switch (reg) { case 0: case PCI_ERR_UNCOR_STATUS: case PCI_ERR_UNCOR_MASK: case PCI_ERR_UNCOR_SEVER: case PCI_ERR_COR_STATUS: case PCI_ERR_COR_MASK: case PCI_ERR_CAP: case PCI_ERR_HEADER_LOG+0: case PCI_ERR_HEADER_LOG+4: case PCI_ERR_HEADER_LOG+8: case PCI_ERR_HEADER_LOG+12: case PCI_ERR_ROOT_COMMAND: case PCI_ERR_ROOT_STATUS: case PCI_ERR_ROOT_ERR_SRC: *value = mvebu_readl(port, PCIE_CAP_PCIERR_OFF + reg); break; default: return PCI_BRIDGE_EMUL_NOT_HANDLED; } return PCI_BRIDGE_EMUL_HANDLED; } static void mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, int reg, u32 old, u32 new, u32 mask) { struct mvebu_pcie_port *port = bridge->data; struct pci_bridge_emul_conf *conf = &bridge->conf; switch (reg) { case PCI_COMMAND: mvebu_writel(port, new, PCIE_CMD_OFF); break; case PCI_IO_BASE: if ((mask & 0xffff) && mvebu_has_ioport(port) && mvebu_pcie_handle_iobase_change(port)) { /* On error disable IO range */ conf->iobase &= ~0xf0; conf->iolimit &= ~0xf0; conf->iobase |= 0xf0; conf->iobaseupper = cpu_to_le16(0x0000); conf->iolimitupper = cpu_to_le16(0x0000); } break; case PCI_MEMORY_BASE: if (mvebu_pcie_handle_membase_change(port)) { /* On error disable mem range */ conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) & ~0xfff0); conf->memlimit = cpu_to_le16(le16_to_cpu(conf->memlimit) & ~0xfff0); conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) | 0xfff0); } break; case PCI_IO_BASE_UPPER16: if (mvebu_has_ioport(port) && mvebu_pcie_handle_iobase_change(port)) { /* On error disable IO range */ conf->iobase &= ~0xf0; conf->iolimit &= ~0xf0; conf->iobase |= 0xf0; conf->iobaseupper = cpu_to_le16(0x0000); conf->iolimitupper = cpu_to_le16(0x0000); } break; case PCI_PRIMARY_BUS: if (mask & 0xff00) mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus); break; case PCI_INTERRUPT_LINE: if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) { u32 ctrl = mvebu_readl(port, PCIE_CTRL_OFF); if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16)) ctrl |= PCIE_CTRL_MASTER_HOT_RESET; else ctrl &= ~PCIE_CTRL_MASTER_HOT_RESET; mvebu_writel(port, ctrl, PCIE_CTRL_OFF); } break; default: break; } } static void mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, int reg, u32 old, u32 new, u32 mask) { struct mvebu_pcie_port *port = bridge->data; switch (reg) { case PCI_EXP_DEVCTL: mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL); break; case PCI_EXP_LNKCTL: /* * PCIe requires that the Enable Clock Power Management bit * is hard-wired to zero for downstream ports but HW allows * to change it. */ new &= ~PCI_EXP_LNKCTL_CLKREQ_EN; mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); break; case PCI_EXP_SLTCTL: /* * Allow to change PCIE_SSPL_ENABLE bit only when slot power * limit was specified in DT and configured into HW. */ if ((mask & PCI_EXP_SLTCTL_ASPL_DISABLE) && port->slot_power_limit_value) { u32 sspl = mvebu_readl(port, PCIE_SSPL_OFF); if (new & PCI_EXP_SLTCTL_ASPL_DISABLE) sspl &= ~PCIE_SSPL_ENABLE; else sspl |= PCIE_SSPL_ENABLE; mvebu_writel(port, sspl, PCIE_SSPL_OFF); } break; case PCI_EXP_RTSTA: /* * PME Status bit in Root Status Register (PCIE_RC_RTSTA) * is read-only and can be cleared only by writing 0b to the * Interrupt Cause RW0C register (PCIE_INT_CAUSE_OFF). So * clear PME via Interrupt Cause. */ if (new & PCI_EXP_RTSTA_PME) mvebu_writel(port, ~PCIE_INT_PM_PME, PCIE_INT_CAUSE_OFF); break; case PCI_EXP_DEVCTL2: mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2); break; case PCI_EXP_LNKCTL2: mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2); break; default: break; } } static void mvebu_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge, int reg, u32 old, u32 new, u32 mask) { struct mvebu_pcie_port *port = bridge->data; switch (reg) { /* These are W1C registers, so clear other bits */ case PCI_ERR_UNCOR_STATUS: case PCI_ERR_COR_STATUS: case PCI_ERR_ROOT_STATUS: new &= mask; fallthrough; case PCI_ERR_UNCOR_MASK: case PCI_ERR_UNCOR_SEVER: case PCI_ERR_COR_MASK: case PCI_ERR_CAP: case PCI_ERR_HEADER_LOG+0: case PCI_ERR_HEADER_LOG+4: case PCI_ERR_HEADER_LOG+8: case PCI_ERR_HEADER_LOG+12: case PCI_ERR_ROOT_COMMAND: case PCI_ERR_ROOT_ERR_SRC: mvebu_writel(port, new, PCIE_CAP_PCIERR_OFF + reg); break; default: break; } } static const struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = { .read_base = mvebu_pci_bridge_emul_base_conf_read, .write_base = mvebu_pci_bridge_emul_base_conf_write, .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read, .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write, .read_ext = mvebu_pci_bridge_emul_ext_conf_read, .write_ext = mvebu_pci_bridge_emul_ext_conf_write, }; /* * Initialize the configuration space of the PCI-to-PCI bridge * associated with the given PCIe interface. */ static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port) { unsigned int bridge_flags = PCI_BRIDGE_EMUL_NO_PREFMEM_FORWARD; struct pci_bridge_emul *bridge = &port->bridge; u32 dev_id = mvebu_readl(port, PCIE_DEV_ID_OFF); u32 dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF); u32 ssdev_id = mvebu_readl(port, PCIE_SSDEV_ID_OFF); u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP); u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS); bridge->conf.vendor = cpu_to_le16(dev_id & 0xffff); bridge->conf.device = cpu_to_le16(dev_id >> 16); bridge->conf.class_revision = cpu_to_le32(dev_rev & 0xff); if (mvebu_has_ioport(port)) { /* We support 32 bits I/O addressing */ bridge->conf.iobase = PCI_IO_RANGE_TYPE_32; bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32; } else { bridge_flags |= PCI_BRIDGE_EMUL_NO_IO_FORWARD; } /* * Older mvebu hardware provides PCIe Capability structure only in * version 1. New hardware provides it in version 2. * Enable slot support which is emulated. */ bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver | PCI_EXP_FLAGS_SLOT); /* * Set Presence Detect State bit permanently as there is no support for * unplugging PCIe card from the slot. Assume that PCIe card is always * connected in slot. * * Set physical slot number to port+1 as mvebu ports are indexed from * zero and zero value is reserved for ports within the same silicon * as Root Port which is not mvebu case. * * Also set correct slot power limit. */ bridge->pcie_conf.slotcap = cpu_to_le32( FIELD_PREP(PCI_EXP_SLTCAP_SPLV, port->slot_power_limit_value) | FIELD_PREP(PCI_EXP_SLTCAP_SPLS, port->slot_power_limit_scale) | FIELD_PREP(PCI_EXP_SLTCAP_PSN, port->port+1)); bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS); bridge->subsystem_vendor_id = ssdev_id & 0xffff; bridge->subsystem_id = ssdev_id >> 16; bridge->has_pcie = true; bridge->pcie_start = PCIE_CAP_PCIEXP; bridge->data = port; bridge->ops = &mvebu_pci_bridge_emul_ops; return pci_bridge_emul_init(bridge, bridge_flags); } static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys) { return sys->private_data; } static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie, struct pci_bus *bus, int devfn) { int i; for (i = 0; i < pcie->nports; i++) { struct mvebu_pcie_port *port = &pcie->ports[i]; if (!port->base) continue; if (bus->number == 0 && port->devfn == devfn) return port; if (bus->number != 0 && bus->number >= port->bridge.conf.secondary_bus && bus->number <= port->bridge.conf.subordinate_bus) return port; } return NULL; } /* PCI configuration space write function */ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { struct mvebu_pcie *pcie = bus->sysdata; struct mvebu_pcie_port *port; port = mvebu_pcie_find_port(pcie, bus, devfn); if (!port) return PCIBIOS_DEVICE_NOT_FOUND; return pci_bridge_emul_conf_write(&port->bridge, where, size, val); } /* PCI configuration space read function */ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { struct mvebu_pcie *pcie = bus->sysdata; struct mvebu_pcie_port *port; port = mvebu_pcie_find_port(pcie, bus, devfn); if (!port) return PCIBIOS_DEVICE_NOT_FOUND; return pci_bridge_emul_conf_read(&port->bridge, where, size, val); } static struct pci_ops mvebu_pcie_ops = { .read = mvebu_pcie_rd_conf, .write = mvebu_pcie_wr_conf, }; static void mvebu_pcie_intx_irq_mask(struct irq_data *d) { struct mvebu_pcie_port *port = d->domain->host_data; irq_hw_number_t hwirq = irqd_to_hwirq(d); unsigned long flags; u32 unmask; raw_spin_lock_irqsave(&port->irq_lock, flags); unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); unmask &= ~PCIE_INT_INTX(hwirq); mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF); raw_spin_unlock_irqrestore(&port->irq_lock, flags); } static void mvebu_pcie_intx_irq_unmask(struct irq_data *d) { struct mvebu_pcie_port *port = d->domain->host_data; irq_hw_number_t hwirq = irqd_to_hwirq(d); unsigned long flags; u32 unmask; raw_spin_lock_irqsave(&port->irq_lock, flags); unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); unmask |= PCIE_INT_INTX(hwirq); mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF); raw_spin_unlock_irqrestore(&port->irq_lock, flags); } static struct irq_chip intx_irq_chip = { .name = "mvebu-INTx", .irq_mask = mvebu_pcie_intx_irq_mask, .irq_unmask = mvebu_pcie_intx_irq_unmask, }; static int mvebu_pcie_intx_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hwirq) { struct mvebu_pcie_port *port = h->host_data; irq_set_status_flags(virq, IRQ_LEVEL); irq_set_chip_and_handler(virq, &intx_irq_chip, handle_level_irq); irq_set_chip_data(virq, port); return 0; } static const struct irq_domain_ops mvebu_pcie_intx_irq_domain_ops = { .map = mvebu_pcie_intx_irq_map, .xlate = irq_domain_xlate_onecell, }; static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port) { struct device *dev = &port->pcie->pdev->dev; struct device_node *pcie_intc_node; raw_spin_lock_init(&port->irq_lock); pcie_intc_node = of_get_next_child(port->dn, NULL); if (!pcie_intc_node) { dev_err(dev, "No PCIe Intc node found for %s\n", port->name); return -ENODEV; } port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, &mvebu_pcie_intx_irq_domain_ops, port); of_node_put(pcie_intc_node); if (!port->intx_irq_domain) { dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name); return -ENOMEM; } return 0; } static void mvebu_pcie_irq_handler(struct irq_desc *desc) { struct mvebu_pcie_port *port = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); struct device *dev = &port->pcie->pdev->dev; u32 cause, unmask, status; int i; chained_irq_enter(chip, desc); cause = mvebu_readl(port, PCIE_INT_CAUSE_OFF); unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); status = cause & unmask; /* Process legacy INTx interrupts */ for (i = 0; i < PCI_NUM_INTX; i++) { if (!(status & PCIE_INT_INTX(i))) continue; if (generic_handle_domain_irq(port->intx_irq_domain, i) == -EINVAL) dev_err_ratelimited(dev, "unexpected INT%c IRQ\n", (char)i+'A'); } chained_irq_exit(chip, desc); } static int mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { /* Interrupt support on mvebu emulated bridges is not implemented yet */ if (dev->bus->number == 0) return 0; /* Proper return code 0 == NO_IRQ */ return of_irq_parse_and_map_pci(dev, slot, pin); } static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, const struct resource *res, resource_size_t start, resource_size_t size, resource_size_t align) { if (dev->bus->number != 0) return start; /* * On the PCI-to-PCI bridge side, the I/O windows must have at * least a 64 KB size and the memory windows must have at * least a 1 MB size. Moreover, MBus windows need to have a * base address aligned on their size, and their size must be * a power of two. This means that if the BAR doesn't have a * power of two size, several MBus windows will actually be * created. We need to ensure that the biggest MBus window * (which will be the first one) is aligned on its size, which * explains the rounddown_pow_of_two() being done here. */ if (res->flags & IORESOURCE_IO) return round_up(start, max_t(resource_size_t, SZ_64K, rounddown_pow_of_two(size))); else if (res->flags & IORESOURCE_MEM) return round_up(start, max_t(resource_size_t, SZ_1M, rounddown_pow_of_two(size))); else return start; } static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev, struct device_node *np, struct mvebu_pcie_port *port) { int ret = 0; ret = of_address_to_resource(np, 0, &port->regs); if (ret) return (void __iomem *)ERR_PTR(ret); return devm_ioremap_resource(&pdev->dev, &port->regs); } #define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03) #define DT_TYPE_IO 0x1 #define DT_TYPE_MEM32 0x2 #define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF) #define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF) static int mvebu_get_tgt_attr(struct device_node *np, int devfn, unsigned long type, unsigned int *tgt, unsigned int *attr) { const int na = 3, ns = 2; const __be32 *range; int rlen, nranges, rangesz, pna, i; *tgt = -1; *attr = -1; range = of_get_property(np, "ranges", &rlen); if (!range) return -EINVAL; pna = of_n_addr_cells(np); rangesz = pna + na + ns; nranges = rlen / sizeof(__be32) / rangesz; for (i = 0; i < nranges; i++, range += rangesz) { u32 flags = of_read_number(range, 1); u32 slot = of_read_number(range + 1, 1); u64 cpuaddr = of_read_number(range + na, pna); unsigned long rtype; if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO) rtype = IORESOURCE_IO; else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32) rtype = IORESOURCE_MEM; else continue; if (slot == PCI_SLOT(devfn) && type == rtype) { *tgt = DT_CPUADDR_TO_TARGET(cpuaddr); *attr = DT_CPUADDR_TO_ATTR(cpuaddr); return 0; } } return -ENOENT; } static int mvebu_pcie_suspend(struct device *dev) { struct mvebu_pcie *pcie; int i; pcie = dev_get_drvdata(dev); for (i = 0; i < pcie->nports; i++) { struct mvebu_pcie_port *port = pcie->ports + i; if (!port->base) continue; port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF); } return 0; } static int mvebu_pcie_resume(struct device *dev) { struct mvebu_pcie *pcie; int i; pcie = dev_get_drvdata(dev); for (i = 0; i < pcie->nports; i++) { struct mvebu_pcie_port *port = pcie->ports + i; if (!port->base) continue; mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF); mvebu_pcie_setup_hw(port); } return 0; } static void mvebu_pcie_port_clk_put(void *data) { struct mvebu_pcie_port *port = data; clk_put(port->clk); } static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, struct mvebu_pcie_port *port, struct device_node *child) { struct device *dev = &pcie->pdev->dev; u32 slot_power_limit; int ret; u32 num_lanes; port->pcie = pcie; if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) { dev_warn(dev, "ignoring %pOF, missing pcie-port property\n", child); goto skip; } if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane)) port->lane = 0; if (!of_property_read_u32(child, "num-lanes", &num_lanes) && num_lanes == 4) port->is_x4 = true; port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port, port->lane); if (!port->name) { ret = -ENOMEM; goto err; } port->devfn = of_pci_get_devfn(child); if (port->devfn < 0) goto skip; if (PCI_FUNC(port->devfn) != 0) { dev_err(dev, "%s: invalid function number, must be zero\n", port->name); goto skip; } ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM, &port->mem_target, &port->mem_attr); if (ret < 0) { dev_err(dev, "%s: cannot get tgt/attr for mem window\n", port->name); goto skip; } if (resource_size(&pcie->io) != 0) { mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO, &port->io_target, &port->io_attr); } else { port->io_target = -1; port->io_attr = -1; } /* * Old DT bindings do not contain "intx" interrupt * so do not fail probing driver when interrupt does not exist. */ port->intx_irq = of_irq_get_byname(child, "intx"); if (port->intx_irq == -EPROBE_DEFER) { ret = port->intx_irq; goto err; } if (port->intx_irq <= 0) { dev_warn(dev, "%s: legacy INTx interrupts cannot be masked individually, " "%pOF does not contain intx interrupt\n", port->name, child); } port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset", port->name); if (!port->reset_name) { ret = -ENOMEM; goto err; } port->reset_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(child), "reset", GPIOD_OUT_HIGH, port->name); ret = PTR_ERR_OR_ZERO(port->reset_gpio); if (ret) { if (ret != -ENOENT) goto err; /* reset gpio is optional */ port->reset_gpio = NULL; devm_kfree(dev, port->reset_name); port->reset_name = NULL; } slot_power_limit = of_pci_get_slot_power_limit(child, &port->slot_power_limit_value, &port->slot_power_limit_scale); if (slot_power_limit) dev_info(dev, "%s: Slot power limit %u.%uW\n", port->name, slot_power_limit / 1000, (slot_power_limit / 100) % 10); port->clk = of_clk_get_by_name(child, NULL); if (IS_ERR(port->clk)) { dev_err(dev, "%s: cannot get clock\n", port->name); goto skip; } ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port); if (ret < 0) { clk_put(port->clk); goto err; } return 1; skip: ret = 0; /* In the case of skipping, we need to free these */ devm_kfree(dev, port->reset_name); port->reset_name = NULL; devm_kfree(dev, port->name); port->name = NULL; err: return ret; } /* * Power up a PCIe port. PCIe requires the refclk to be stable for 100µs * prior to releasing PERST. See table 2-4 in section 2.6.2 AC Specifications * of the PCI Express Card Electromechanical Specification, 1.1. */ static int mvebu_pcie_powerup(struct mvebu_pcie_port *port) { int ret; ret = clk_prepare_enable(port->clk); if (ret < 0) return ret; if (port->reset_gpio) { u32 reset_udelay = PCI_PM_D3COLD_WAIT * 1000; of_property_read_u32(port->dn, "reset-delay-us", &reset_udelay); udelay(100); gpiod_set_value_cansleep(port->reset_gpio, 0); msleep(reset_udelay / 1000); } return 0; } /* * Power down a PCIe port. Strictly, PCIe requires us to place the card * in D3hot state before asserting PERST#. */ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port) { gpiod_set_value_cansleep(port->reset_gpio, 1); clk_disable_unprepare(port->clk); } /* * devm_of_pci_get_host_bridge_resources() only sets up translateable resources, * so we need extra resource setup parsing our special DT properties encoding * the MEM and IO apertures. */ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie) { struct device *dev = &pcie->pdev->dev; struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); int ret; /* Get the PCIe memory aperture */ mvebu_mbus_get_pcie_mem_aperture(&pcie->mem); if (resource_size(&pcie->mem) == 0) { dev_err(dev, "invalid memory aperture size\n"); return -EINVAL; } pcie->mem.name = "PCI MEM"; pci_add_resource(&bridge->windows, &pcie->mem); ret = devm_request_resource(dev, &iomem_resource, &pcie->mem); if (ret) return ret; /* Get the PCIe IO aperture */ mvebu_mbus_get_pcie_io_aperture(&pcie->io); if (resource_size(&pcie->io) != 0) { pcie->realio.flags = pcie->io.flags; pcie->realio.start = PCIBIOS_MIN_IO; pcie->realio.end = min_t(resource_size_t, IO_SPACE_LIMIT - SZ_64K, resource_size(&pcie->io) - 1); pcie->realio.name = "PCI I/O"; ret = devm_pci_remap_iospace(dev, &pcie->realio, pcie->io.start); if (ret) return ret; pci_add_resource(&bridge->windows, &pcie->realio); ret = devm_request_resource(dev, &ioport_resource, &pcie->realio); if (ret) return ret; } return 0; } static int mvebu_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mvebu_pcie *pcie; struct pci_host_bridge *bridge; struct device_node *np = dev->of_node; struct device_node *child; int num, i, ret; bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct mvebu_pcie)); if (!bridge) return -ENOMEM; pcie = pci_host_bridge_priv(bridge); pcie->pdev = pdev; platform_set_drvdata(pdev, pcie); ret = mvebu_pcie_parse_request_resources(pcie); if (ret) return ret; num = of_get_available_child_count(np); pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL); if (!pcie->ports) return -ENOMEM; i = 0; for_each_available_child_of_node(np, child) { struct mvebu_pcie_port *port = &pcie->ports[i]; ret = mvebu_pcie_parse_port(pcie, port, child); if (ret < 0) { of_node_put(child); return ret; } else if (ret == 0) { continue; } port->dn = child; i++; } pcie->nports = i; for (i = 0; i < pcie->nports; i++) { struct mvebu_pcie_port *port = &pcie->ports[i]; int irq = port->intx_irq; child = port->dn; if (!child) continue; ret = mvebu_pcie_powerup(port); if (ret < 0) continue; port->base = mvebu_pcie_map_registers(pdev, child, port); if (IS_ERR(port->base)) { dev_err(dev, "%s: cannot map registers\n", port->name); port->base = NULL; mvebu_pcie_powerdown(port); continue; } ret = mvebu_pci_bridge_emul_init(port); if (ret < 0) { dev_err(dev, "%s: cannot init emulated bridge\n", port->name); devm_iounmap(dev, port->base); port->base = NULL; mvebu_pcie_powerdown(port); continue; } if (irq > 0) { ret = mvebu_pcie_init_irq_domain(port); if (ret) { dev_err(dev, "%s: cannot init irq domain\n", port->name); pci_bridge_emul_cleanup(&port->bridge); devm_iounmap(dev, port->base); port->base = NULL; mvebu_pcie_powerdown(port); continue; } irq_set_chained_handler_and_data(irq, mvebu_pcie_irq_handler, port); } /* * PCIe topology exported by mvebu hw is quite complicated. In * reality has something like N fully independent host bridges * where each host bridge has one PCIe Root Port (which acts as * PCI Bridge device). Each host bridge has its own independent * internal registers, independent access to PCI config space, * independent interrupt lines, independent window and memory * access configuration. But additionally there is some kind of * peer-to-peer support between PCIe devices behind different * host bridges limited just to forwarding of memory and I/O * transactions (forwarding of error messages and config cycles * is not supported). So we could say there are N independent * PCIe Root Complexes. * * For this kind of setup DT should have been structured into * N independent PCIe controllers / host bridges. But instead * structure in past was defined to put PCIe Root Ports of all * host bridges into one bus zero, like in classic multi-port * Root Complex setup with just one host bridge. * * This means that pci-mvebu.c driver provides "virtual" bus 0 * on which registers all PCIe Root Ports (PCI Bridge devices) * specified in DT by their BDF addresses and virtually routes * PCI config access of each PCI bridge device to specific PCIe * host bridge. * * Normally PCI Bridge should choose between Type 0 and Type 1 * config requests based on primary and secondary bus numbers * configured on the bridge itself. But because mvebu PCI Bridge * does not have registers for primary and secondary bus numbers * in its config space, it determinates type of config requests * via its own custom way. * * There are two options how mvebu determinate type of config * request. * * 1. If Secondary Bus Number Enable bit is not set or is not * available (applies for pre-XP PCIe controllers) then Type 0 * is used if target bus number equals Local Bus Number (bits * [15:8] in register 0x1a04) and target device number differs * from Local Device Number (bits [20:16] in register 0x1a04). * Type 1 is used if target bus number differs from Local Bus * Number. And when target bus number equals Local Bus Number * and target device equals Local Device Number then request is * routed to Local PCI Bridge (PCIe Root Port). * * 2. If Secondary Bus Number Enable bit is set (bit 7 in * register 0x1a2c) then mvebu hw determinate type of config * request like compliant PCI Bridge based on primary bus number * which is configured via Local Bus Number (bits [15:8] in * register 0x1a04) and secondary bus number which is configured * via Secondary Bus Number (bits [7:0] in register 0x1a2c). * Local PCI Bridge (PCIe Root Port) is available on primary bus * as device with Local Device Number (bits [20:16] in register * 0x1a04). * * Secondary Bus Number Enable bit is disabled by default and * option 2. is not available on pre-XP PCIe controllers. Hence * this driver always use option 1. * * Basically it means that primary and secondary buses shares * one virtual number configured via Local Bus Number bits and * Local Device Number bits determinates if accessing primary * or secondary bus. Set Local Device Number to 1 and redirect * all writes of PCI Bridge Secondary Bus Number register to * Local Bus Number (bits [15:8] in register 0x1a04). * * So when accessing devices on buses behind secondary bus * number it would work correctly. And also when accessing * device 0 at secondary bus number via config space would be * correctly routed to secondary bus. Due to issues described * in mvebu_pcie_setup_hw(), PCI Bridges at primary bus (zero) * are not accessed directly via PCI config space but rarher * indirectly via kernel emulated PCI bridge driver. */ mvebu_pcie_setup_hw(port); mvebu_pcie_set_local_dev_nr(port, 1); mvebu_pcie_set_local_bus_nr(port, 0); } bridge->sysdata = pcie; bridge->ops = &mvebu_pcie_ops; bridge->child_ops = &mvebu_pcie_child_ops; bridge->align_resource = mvebu_pcie_align_resource; bridge->map_irq = mvebu_pcie_map_irq; return pci_host_probe(bridge); } static void mvebu_pcie_remove(struct platform_device *pdev) { struct mvebu_pcie *pcie = platform_get_drvdata(pdev); struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); u32 cmd, sspl; int i; /* Remove PCI bus with all devices. */ pci_lock_rescan_remove(); pci_stop_root_bus(bridge->bus); pci_remove_root_bus(bridge->bus); pci_unlock_rescan_remove(); for (i = 0; i < pcie->nports; i++) { struct mvebu_pcie_port *port = &pcie->ports[i]; int irq = port->intx_irq; if (!port->base) continue; /* Disable Root Bridge I/O space, memory space and bus mastering. */ cmd = mvebu_readl(port, PCIE_CMD_OFF); cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); mvebu_writel(port, cmd, PCIE_CMD_OFF); /* Mask all interrupt sources. */ mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF); /* Clear all interrupt causes. */ mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF); if (irq > 0) irq_set_chained_handler_and_data(irq, NULL, NULL); /* Remove IRQ domains. */ if (port->intx_irq_domain) irq_domain_remove(port->intx_irq_domain); /* Free config space for emulated root bridge. */ pci_bridge_emul_cleanup(&port->bridge); /* Disable sending Set_Slot_Power_Limit PCIe Message. */ sspl = mvebu_readl(port, PCIE_SSPL_OFF); sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE); mvebu_writel(port, sspl, PCIE_SSPL_OFF); /* Disable and clear BARs and windows. */ mvebu_pcie_disable_wins(port); /* Delete PCIe IO and MEM windows. */ if (port->iowin.size) mvebu_pcie_del_windows(port, port->iowin.base, port->iowin.size); if (port->memwin.size) mvebu_pcie_del_windows(port, port->memwin.base, port->memwin.size); /* Power down card and disable clocks. Must be the last step. */ mvebu_pcie_powerdown(port); } } static const struct of_device_id mvebu_pcie_of_match_table[] = { { .compatible = "marvell,armada-xp-pcie", }, { .compatible = "marvell,armada-370-pcie", }, { .compatible = "marvell,dove-pcie", }, { .compatible = "marvell,kirkwood-pcie", }, {}, }; static const struct dev_pm_ops mvebu_pcie_pm_ops = { NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume) }; static struct platform_driver mvebu_pcie_driver = { .driver = { .name = "mvebu-pcie", .of_match_table = mvebu_pcie_of_match_table, .pm = &mvebu_pcie_pm_ops, }, .probe = mvebu_pcie_probe, .remove_new = mvebu_pcie_remove, }; module_platform_driver(mvebu_pcie_driver); MODULE_AUTHOR("Thomas Petazzoni <[email protected]>"); MODULE_AUTHOR("Pali Rohár <[email protected]>"); MODULE_DESCRIPTION("Marvell EBU PCIe controller"); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/pci-mvebu.c
// SPDX-License-Identifier: GPL-2.0 /* * Driver for handling the PCIe controller errors on * HiSilicon HIP SoCs. * * Copyright (c) 2020 HiSilicon Limited. */ #include <linux/acpi.h> #include <acpi/ghes.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/kfifo.h> #include <linux/spinlock.h> /* HISI PCIe controller error definitions */ #define HISI_PCIE_ERR_MISC_REGS 33 #define HISI_PCIE_LOCAL_VALID_VERSION BIT(0) #define HISI_PCIE_LOCAL_VALID_SOC_ID BIT(1) #define HISI_PCIE_LOCAL_VALID_SOCKET_ID BIT(2) #define HISI_PCIE_LOCAL_VALID_NIMBUS_ID BIT(3) #define HISI_PCIE_LOCAL_VALID_SUB_MODULE_ID BIT(4) #define HISI_PCIE_LOCAL_VALID_CORE_ID BIT(5) #define HISI_PCIE_LOCAL_VALID_PORT_ID BIT(6) #define HISI_PCIE_LOCAL_VALID_ERR_TYPE BIT(7) #define HISI_PCIE_LOCAL_VALID_ERR_SEVERITY BIT(8) #define HISI_PCIE_LOCAL_VALID_ERR_MISC 9 static guid_t hisi_pcie_sec_guid = GUID_INIT(0xB2889FC9, 0xE7D7, 0x4F9D, 0xA8, 0x67, 0xAF, 0x42, 0xE9, 0x8B, 0xE7, 0x72); /* * Firmware reports the socket port ID where the error occurred. These * macros convert that to the core ID and core port ID required by the * ACPI reset method. */ #define HISI_PCIE_PORT_ID(core, v) (((v) >> 1) + ((core) << 3)) #define HISI_PCIE_CORE_ID(v) ((v) >> 3) #define HISI_PCIE_CORE_PORT_ID(v) (((v) & 7) << 1) struct hisi_pcie_error_data { u64 val_bits; u8 version; u8 soc_id; u8 socket_id; u8 nimbus_id; u8 sub_module_id; u8 core_id; u8 port_id; u8 err_severity; u16 err_type; u8 reserv[2]; u32 err_misc[HISI_PCIE_ERR_MISC_REGS]; }; struct hisi_pcie_error_private { struct notifier_block nb; struct device *dev; }; enum hisi_pcie_submodule_id { HISI_PCIE_SUB_MODULE_ID_AP, HISI_PCIE_SUB_MODULE_ID_TL, HISI_PCIE_SUB_MODULE_ID_MAC, HISI_PCIE_SUB_MODULE_ID_DL, HISI_PCIE_SUB_MODULE_ID_SDI, }; static const char * const hisi_pcie_sub_module[] = { [HISI_PCIE_SUB_MODULE_ID_AP] = "AP Layer", [HISI_PCIE_SUB_MODULE_ID_TL] = "TL Layer", [HISI_PCIE_SUB_MODULE_ID_MAC] = "MAC Layer", [HISI_PCIE_SUB_MODULE_ID_DL] = "DL Layer", [HISI_PCIE_SUB_MODULE_ID_SDI] = "SDI Layer", }; enum hisi_pcie_err_severity { HISI_PCIE_ERR_SEV_RECOVERABLE, HISI_PCIE_ERR_SEV_FATAL, HISI_PCIE_ERR_SEV_CORRECTED, HISI_PCIE_ERR_SEV_NONE, }; static const char * const hisi_pcie_error_sev[] = { [HISI_PCIE_ERR_SEV_RECOVERABLE] = "recoverable", [HISI_PCIE_ERR_SEV_FATAL] = "fatal", [HISI_PCIE_ERR_SEV_CORRECTED] = "corrected", [HISI_PCIE_ERR_SEV_NONE] = "none", }; static const char *hisi_pcie_get_string(const char * const *array, size_t n, u32 id) { u32 index; for (index = 0; index < n; index++) { if (index == id && array[index]) return array[index]; } return "unknown"; } static int hisi_pcie_port_reset(struct platform_device *pdev, u32 chip_id, u32 port_id) { struct device *dev = &pdev->dev; acpi_handle handle = ACPI_HANDLE(dev); union acpi_object arg[3]; struct acpi_object_list arg_list; acpi_status s; unsigned long long data = 0; arg[0].type = ACPI_TYPE_INTEGER; arg[0].integer.value = chip_id; arg[1].type = ACPI_TYPE_INTEGER; arg[1].integer.value = HISI_PCIE_CORE_ID(port_id); arg[2].type = ACPI_TYPE_INTEGER; arg[2].integer.value = HISI_PCIE_CORE_PORT_ID(port_id); arg_list.count = 3; arg_list.pointer = arg; s = acpi_evaluate_integer(handle, "RST", &arg_list, &data); if (ACPI_FAILURE(s)) { dev_err(dev, "No RST method\n"); return -EIO; } if (data) { dev_err(dev, "Failed to Reset\n"); return -EIO; } return 0; } static int hisi_pcie_port_do_recovery(struct platform_device *dev, u32 chip_id, u32 port_id) { acpi_status s; struct device *device = &dev->dev; acpi_handle root_handle = ACPI_HANDLE(device); struct acpi_pci_root *pci_root; struct pci_bus *root_bus; struct pci_dev *pdev; u32 domain, busnr, devfn; s = acpi_get_parent(root_handle, &root_handle); if (ACPI_FAILURE(s)) return -ENODEV; pci_root = acpi_pci_find_root(root_handle); if (!pci_root) return -ENODEV; root_bus = pci_root->bus; domain = pci_root->segment; busnr = root_bus->number; devfn = PCI_DEVFN(port_id, 0); pdev = pci_get_domain_bus_and_slot(domain, busnr, devfn); if (!pdev) { dev_info(device, "Fail to get root port %04x:%02x:%02x.%d device\n", domain, busnr, PCI_SLOT(devfn), PCI_FUNC(devfn)); return -ENODEV; } pci_stop_and_remove_bus_device_locked(pdev); pci_dev_put(pdev); if (hisi_pcie_port_reset(dev, chip_id, port_id)) return -EIO; /* * The initialization time of subordinate devices after * hot reset is no more than 1s, which is required by * the PCI spec v5.0 sec 6.6.1. The time will shorten * if Readiness Notifications mechanisms are used. But * wait 1s here to adapt any conditions. */ ssleep(1UL); /* add root port and downstream devices */ pci_lock_rescan_remove(); pci_rescan_bus(root_bus); pci_unlock_rescan_remove(); return 0; } static void hisi_pcie_handle_error(struct platform_device *pdev, const struct hisi_pcie_error_data *edata) { struct device *dev = &pdev->dev; int idx, rc; const unsigned long valid_bits[] = {BITMAP_FROM_U64(edata->val_bits)}; if (edata->val_bits == 0) { dev_warn(dev, "%s: no valid error information\n", __func__); return; } dev_info(dev, "\nHISI : HIP : PCIe controller error\n"); if (edata->val_bits & HISI_PCIE_LOCAL_VALID_SOC_ID) dev_info(dev, "Table version = %d\n", edata->version); if (edata->val_bits & HISI_PCIE_LOCAL_VALID_SOCKET_ID) dev_info(dev, "Socket ID = %d\n", edata->socket_id); if (edata->val_bits & HISI_PCIE_LOCAL_VALID_NIMBUS_ID) dev_info(dev, "Nimbus ID = %d\n", edata->nimbus_id); if (edata->val_bits & HISI_PCIE_LOCAL_VALID_SUB_MODULE_ID) dev_info(dev, "Sub Module = %s\n", hisi_pcie_get_string(hisi_pcie_sub_module, ARRAY_SIZE(hisi_pcie_sub_module), edata->sub_module_id)); if (edata->val_bits & HISI_PCIE_LOCAL_VALID_CORE_ID) dev_info(dev, "Core ID = core%d\n", edata->core_id); if (edata->val_bits & HISI_PCIE_LOCAL_VALID_PORT_ID) dev_info(dev, "Port ID = port%d\n", edata->port_id); if (edata->val_bits & HISI_PCIE_LOCAL_VALID_ERR_SEVERITY) dev_info(dev, "Error severity = %s\n", hisi_pcie_get_string(hisi_pcie_error_sev, ARRAY_SIZE(hisi_pcie_error_sev), edata->err_severity)); if (edata->val_bits & HISI_PCIE_LOCAL_VALID_ERR_TYPE) dev_info(dev, "Error type = 0x%x\n", edata->err_type); dev_info(dev, "Reg Dump:\n"); idx = HISI_PCIE_LOCAL_VALID_ERR_MISC; for_each_set_bit_from(idx, valid_bits, HISI_PCIE_LOCAL_VALID_ERR_MISC + HISI_PCIE_ERR_MISC_REGS) dev_info(dev, "ERR_MISC_%d = 0x%x\n", idx - HISI_PCIE_LOCAL_VALID_ERR_MISC, edata->err_misc[idx - HISI_PCIE_LOCAL_VALID_ERR_MISC]); if (edata->err_severity != HISI_PCIE_ERR_SEV_RECOVERABLE) return; /* Recovery for the PCIe controller errors, try reset * PCI port for the error recovery */ rc = hisi_pcie_port_do_recovery(pdev, edata->socket_id, HISI_PCIE_PORT_ID(edata->core_id, edata->port_id)); if (rc) dev_info(dev, "fail to do hisi pcie port reset\n"); } static int hisi_pcie_notify_error(struct notifier_block *nb, unsigned long event, void *data) { struct acpi_hest_generic_data *gdata = data; const struct hisi_pcie_error_data *error_data = acpi_hest_get_payload(gdata); struct hisi_pcie_error_private *priv; struct device *dev; struct platform_device *pdev; guid_t err_sec_guid; u8 socket; import_guid(&err_sec_guid, gdata->section_type); if (!guid_equal(&err_sec_guid, &hisi_pcie_sec_guid)) return NOTIFY_DONE; priv = container_of(nb, struct hisi_pcie_error_private, nb); dev = priv->dev; if (device_property_read_u8(dev, "socket", &socket)) return NOTIFY_DONE; if (error_data->socket_id != socket) return NOTIFY_DONE; pdev = container_of(dev, struct platform_device, dev); hisi_pcie_handle_error(pdev, error_data); return NOTIFY_OK; } static int hisi_pcie_error_handler_probe(struct platform_device *pdev) { struct hisi_pcie_error_private *priv; int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->nb.notifier_call = hisi_pcie_notify_error; priv->dev = &pdev->dev; ret = ghes_register_vendor_record_notifier(&priv->nb); if (ret) { dev_err(&pdev->dev, "Failed to register hisi pcie controller error handler with apei\n"); return ret; } platform_set_drvdata(pdev, priv); return 0; } static void hisi_pcie_error_handler_remove(struct platform_device *pdev) { struct hisi_pcie_error_private *priv = platform_get_drvdata(pdev); ghes_unregister_vendor_record_notifier(&priv->nb); } static const struct acpi_device_id hisi_pcie_acpi_match[] = { { "HISI0361", 0 }, { } }; static struct platform_driver hisi_pcie_error_handler_driver = { .driver = { .name = "hisi-pcie-error-handler", .acpi_match_table = hisi_pcie_acpi_match, }, .probe = hisi_pcie_error_handler_probe, .remove_new = hisi_pcie_error_handler_remove, }; module_platform_driver(hisi_pcie_error_handler_driver); MODULE_DESCRIPTION("HiSilicon HIP PCIe controller error handling driver");
linux-master
drivers/pci/controller/pcie-hisi-error.c
// SPDX-License-Identifier: GPL-2.0+ /* Copyright (C) 2009 - 2019 Broadcom */ #include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/clk.h> #include <linux/compiler.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/ioport.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/log2.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_pci.h> #include <linux/of_platform.h> #include <linux/pci.h> #include <linux/pci-ecam.h> #include <linux/printk.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> #include <linux/sizes.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/types.h> #include "../pci.h" /* BRCM_PCIE_CAP_REGS - Offset for the mandatory capability config regs */ #define BRCM_PCIE_CAP_REGS 0x00ac /* Broadcom STB PCIe Register Offsets */ #define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1 0x0188 #define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK 0xc #define PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN 0x0 #define PCIE_RC_CFG_PRIV1_ID_VAL3 0x043c #define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00 #define PCIE_RC_DL_MDIO_ADDR 0x1100 #define PCIE_RC_DL_MDIO_WR_DATA 0x1104 #define PCIE_RC_DL_MDIO_RD_DATA 0x1108 #define PCIE_MISC_MISC_CTRL 0x4008 #define PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK 0x80 #define PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK 0x400 #define PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK 0x1000 #define PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK 0x2000 #define PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK 0x300000 #define PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK 0xf8000000 #define PCIE_MISC_MISC_CTRL_SCB1_SIZE_MASK 0x07c00000 #define PCIE_MISC_MISC_CTRL_SCB2_SIZE_MASK 0x0000001f #define SCB_SIZE_MASK(x) PCIE_MISC_MISC_CTRL_SCB ## x ## _SIZE_MASK #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO 0x400c #define PCIE_MEM_WIN0_LO(win) \ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO + ((win) * 8) #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI 0x4010 #define PCIE_MEM_WIN0_HI(win) \ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8) #define PCIE_MISC_RC_BAR1_CONFIG_LO 0x402c #define PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK 0x1f #define PCIE_MISC_RC_BAR2_CONFIG_LO 0x4034 #define PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK 0x1f #define PCIE_MISC_RC_BAR2_CONFIG_HI 0x4038 #define PCIE_MISC_RC_BAR3_CONFIG_LO 0x403c #define PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK 0x1f #define PCIE_MISC_MSI_BAR_CONFIG_LO 0x4044 #define PCIE_MISC_MSI_BAR_CONFIG_HI 0x4048 #define PCIE_MISC_MSI_DATA_CONFIG 0x404c #define PCIE_MISC_MSI_DATA_CONFIG_VAL_32 0xffe06540 #define PCIE_MISC_MSI_DATA_CONFIG_VAL_8 0xfff86540 #define PCIE_MISC_PCIE_CTRL 0x4064 #define PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK 0x1 #define PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK 0x4 #define PCIE_MISC_PCIE_STATUS 0x4068 #define PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK 0x80 #define PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK 0x20 #define PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK 0x10 #define PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK 0x40 #define PCIE_MISC_REVISION 0x406c #define BRCM_PCIE_HW_REV_33 0x0303 #define BRCM_PCIE_HW_REV_3_20 0x0320 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT 0x4070 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK 0xfff00000 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK 0xfff0 #define PCIE_MEM_WIN0_BASE_LIMIT(win) \ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT + ((win) * 4) #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI 0x4080 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK 0xff #define PCIE_MEM_WIN0_BASE_HI(win) \ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI + ((win) * 8) #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI 0x4084 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK 0xff #define PCIE_MEM_WIN0_LIMIT_HI(win) \ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8) #define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000 #define PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x00800000 #define PCIE_INTR2_CPU_BASE 0x4300 #define PCIE_MSI_INTR2_BASE 0x4500 /* Offsets from PCIE_INTR2_CPU_BASE and PCIE_MSI_INTR2_BASE */ #define MSI_INT_STATUS 0x0 #define MSI_INT_CLR 0x8 #define MSI_INT_MASK_SET 0x10 #define MSI_INT_MASK_CLR 0x14 #define PCIE_EXT_CFG_DATA 0x8000 #define PCIE_EXT_CFG_INDEX 0x9000 #define PCIE_RGR1_SW_INIT_1_PERST_MASK 0x1 #define PCIE_RGR1_SW_INIT_1_PERST_SHIFT 0x0 #define RGR1_SW_INIT_1_INIT_GENERIC_MASK 0x2 #define RGR1_SW_INIT_1_INIT_GENERIC_SHIFT 0x1 #define RGR1_SW_INIT_1_INIT_7278_MASK 0x1 #define RGR1_SW_INIT_1_INIT_7278_SHIFT 0x0 /* PCIe parameters */ #define BRCM_NUM_PCIE_OUT_WINS 0x4 #define BRCM_INT_PCI_MSI_NR 32 #define BRCM_INT_PCI_MSI_LEGACY_NR 8 #define BRCM_INT_PCI_MSI_SHIFT 0 #define BRCM_INT_PCI_MSI_MASK GENMASK(BRCM_INT_PCI_MSI_NR - 1, 0) #define BRCM_INT_PCI_MSI_LEGACY_MASK GENMASK(31, \ 32 - BRCM_INT_PCI_MSI_LEGACY_NR) /* MSI target addresses */ #define BRCM_MSI_TARGET_ADDR_LT_4GB 0x0fffffffcULL #define BRCM_MSI_TARGET_ADDR_GT_4GB 0xffffffffcULL /* MDIO registers */ #define MDIO_PORT0 0x0 #define MDIO_DATA_MASK 0x7fffffff #define MDIO_PORT_MASK 0xf0000 #define MDIO_REGAD_MASK 0xffff #define MDIO_CMD_MASK 0xfff00000 #define MDIO_CMD_READ 0x1 #define MDIO_CMD_WRITE 0x0 #define MDIO_DATA_DONE_MASK 0x80000000 #define MDIO_RD_DONE(x) (((x) & MDIO_DATA_DONE_MASK) ? 1 : 0) #define MDIO_WT_DONE(x) (((x) & MDIO_DATA_DONE_MASK) ? 0 : 1) #define SSC_REGS_ADDR 0x1100 #define SET_ADDR_OFFSET 0x1f #define SSC_CNTL_OFFSET 0x2 #define SSC_CNTL_OVRD_EN_MASK 0x8000 #define SSC_CNTL_OVRD_VAL_MASK 0x4000 #define SSC_STATUS_OFFSET 0x1 #define SSC_STATUS_SSC_MASK 0x400 #define SSC_STATUS_PLL_LOCK_MASK 0x800 #define PCIE_BRCM_MAX_MEMC 3 #define IDX_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_INDEX]) #define DATA_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_DATA]) #define PCIE_RGR1_SW_INIT_1(pcie) (pcie->reg_offsets[RGR1_SW_INIT_1]) /* Rescal registers */ #define PCIE_DVT_PMU_PCIE_PHY_CTRL 0xc700 #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS 0x3 #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK 0x4 #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT 0x2 #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK 0x2 #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT 0x1 #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK 0x1 #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT 0x0 /* Forward declarations */ struct brcm_pcie; enum { RGR1_SW_INIT_1, EXT_CFG_INDEX, EXT_CFG_DATA, }; enum { RGR1_SW_INIT_1_INIT_MASK, RGR1_SW_INIT_1_INIT_SHIFT, }; enum pcie_type { GENERIC, BCM7425, BCM7435, BCM4908, BCM7278, BCM2711, }; struct pcie_cfg_data { const int *offsets; const enum pcie_type type; void (*perst_set)(struct brcm_pcie *pcie, u32 val); void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val); }; struct subdev_regulators { unsigned int num_supplies; struct regulator_bulk_data supplies[]; }; struct brcm_msi { struct device *dev; void __iomem *base; struct device_node *np; struct irq_domain *msi_domain; struct irq_domain *inner_domain; struct mutex lock; /* guards the alloc/free operations */ u64 target_addr; int irq; DECLARE_BITMAP(used, BRCM_INT_PCI_MSI_NR); bool legacy; /* Some chips have MSIs in bits [31..24] of a shared register. */ int legacy_shift; int nr; /* No. of MSI available, depends on chip */ /* This is the base pointer for interrupt status/set/clr regs */ void __iomem *intr_base; }; /* Internal PCIe Host Controller Information.*/ struct brcm_pcie { struct device *dev; void __iomem *base; struct clk *clk; struct device_node *np; bool ssc; int gen; u64 msi_target_addr; struct brcm_msi *msi; const int *reg_offsets; enum pcie_type type; struct reset_control *rescal; struct reset_control *perst_reset; int num_memc; u64 memc_size[PCIE_BRCM_MAX_MEMC]; u32 hw_rev; void (*perst_set)(struct brcm_pcie *pcie, u32 val); void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val); struct subdev_regulators *sr; bool ep_wakeup_capable; }; static inline bool is_bmips(const struct brcm_pcie *pcie) { return pcie->type == BCM7435 || pcie->type == BCM7425; } /* * This is to convert the size of the inbound "BAR" region to the * non-linear values of PCIE_X_MISC_RC_BAR[123]_CONFIG_LO.SIZE */ static int brcm_pcie_encode_ibar_size(u64 size) { int log2_in = ilog2(size); if (log2_in >= 12 && log2_in <= 15) /* Covers 4KB to 32KB (inclusive) */ return (log2_in - 12) + 0x1c; else if (log2_in >= 16 && log2_in <= 35) /* Covers 64KB to 32GB, (inclusive) */ return log2_in - 15; /* Something is awry so disable */ return 0; } static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd) { u32 pkt = 0; pkt |= FIELD_PREP(MDIO_PORT_MASK, port); pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad); pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd); return pkt; } /* negative return value indicates error */ static int brcm_pcie_mdio_read(void __iomem *base, u8 port, u8 regad, u32 *val) { u32 data; int err; writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_READ), base + PCIE_RC_DL_MDIO_ADDR); readl(base + PCIE_RC_DL_MDIO_ADDR); err = readl_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_RD_DATA, data, MDIO_RD_DONE(data), 10, 100); *val = FIELD_GET(MDIO_DATA_MASK, data); return err; } /* negative return value indicates error */ static int brcm_pcie_mdio_write(void __iomem *base, u8 port, u8 regad, u16 wrdata) { u32 data; int err; writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_WRITE), base + PCIE_RC_DL_MDIO_ADDR); readl(base + PCIE_RC_DL_MDIO_ADDR); writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA); err = readw_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data, MDIO_WT_DONE(data), 10, 100); return err; } /* * Configures device for Spread Spectrum Clocking (SSC) mode; a negative * return value indicates error. */ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie) { int pll, ssc; int ret; u32 tmp; ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET, SSC_REGS_ADDR); if (ret < 0) return ret; ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0, SSC_CNTL_OFFSET, &tmp); if (ret < 0) return ret; u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_EN_MASK); u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_VAL_MASK); ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SSC_CNTL_OFFSET, tmp); if (ret < 0) return ret; usleep_range(1000, 2000); ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0, SSC_STATUS_OFFSET, &tmp); if (ret < 0) return ret; ssc = FIELD_GET(SSC_STATUS_SSC_MASK, tmp); pll = FIELD_GET(SSC_STATUS_PLL_LOCK_MASK, tmp); return ssc && pll ? 0 : -EIO; } /* Limits operation to a specific generation (1, 2, or 3) */ static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen) { u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2); u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP); lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen; writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP); lnkctl2 = (lnkctl2 & ~0xf) | gen; writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2); } static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie, unsigned int win, u64 cpu_addr, u64 pcie_addr, u64 size) { u32 cpu_addr_mb_high, limit_addr_mb_high; phys_addr_t cpu_addr_mb, limit_addr_mb; int high_addr_shift; u32 tmp; /* Set the base of the pcie_addr window */ writel(lower_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_LO(win)); writel(upper_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_HI(win)); /* Write the addr base & limit lower bits (in MBs) */ cpu_addr_mb = cpu_addr / SZ_1M; limit_addr_mb = (cpu_addr + size - 1) / SZ_1M; tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win)); u32p_replace_bits(&tmp, cpu_addr_mb, PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK); u32p_replace_bits(&tmp, limit_addr_mb, PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK); writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win)); if (is_bmips(pcie)) return; /* Write the cpu & limit addr upper bits */ high_addr_shift = HWEIGHT32(PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK); cpu_addr_mb_high = cpu_addr_mb >> high_addr_shift; tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_HI(win)); u32p_replace_bits(&tmp, cpu_addr_mb_high, PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK); writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_HI(win)); limit_addr_mb_high = limit_addr_mb >> high_addr_shift; tmp = readl(pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win)); u32p_replace_bits(&tmp, limit_addr_mb_high, PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK); writel(tmp, pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win)); } static struct irq_chip brcm_msi_irq_chip = { .name = "BRCM STB PCIe MSI", .irq_ack = irq_chip_ack_parent, .irq_mask = pci_msi_mask_irq, .irq_unmask = pci_msi_unmask_irq, }; static struct msi_domain_info brcm_msi_domain_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI), .chip = &brcm_msi_irq_chip, }; static void brcm_pcie_msi_isr(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long status; struct brcm_msi *msi; struct device *dev; u32 bit; chained_irq_enter(chip, desc); msi = irq_desc_get_handler_data(desc); dev = msi->dev; status = readl(msi->intr_base + MSI_INT_STATUS); status >>= msi->legacy_shift; for_each_set_bit(bit, &status, msi->nr) { int ret; ret = generic_handle_domain_irq(msi->inner_domain, bit); if (ret) dev_dbg(dev, "unexpected MSI\n"); } chained_irq_exit(chip, desc); } static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct brcm_msi *msi = irq_data_get_irq_chip_data(data); msg->address_lo = lower_32_bits(msi->target_addr); msg->address_hi = upper_32_bits(msi->target_addr); msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq; } static int brcm_msi_set_affinity(struct irq_data *irq_data, const struct cpumask *mask, bool force) { return -EINVAL; } static void brcm_msi_ack_irq(struct irq_data *data) { struct brcm_msi *msi = irq_data_get_irq_chip_data(data); const int shift_amt = data->hwirq + msi->legacy_shift; writel(1 << shift_amt, msi->intr_base + MSI_INT_CLR); } static struct irq_chip brcm_msi_bottom_irq_chip = { .name = "BRCM STB MSI", .irq_compose_msi_msg = brcm_msi_compose_msi_msg, .irq_set_affinity = brcm_msi_set_affinity, .irq_ack = brcm_msi_ack_irq, }; static int brcm_msi_alloc(struct brcm_msi *msi, unsigned int nr_irqs) { int hwirq; mutex_lock(&msi->lock); hwirq = bitmap_find_free_region(msi->used, msi->nr, order_base_2(nr_irqs)); mutex_unlock(&msi->lock); return hwirq; } static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq, unsigned int nr_irqs) { mutex_lock(&msi->lock); bitmap_release_region(msi->used, hwirq, order_base_2(nr_irqs)); mutex_unlock(&msi->lock); } static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct brcm_msi *msi = domain->host_data; int hwirq, i; hwirq = brcm_msi_alloc(msi, nr_irqs); if (hwirq < 0) return hwirq; for (i = 0; i < nr_irqs; i++) irq_domain_set_info(domain, virq + i, hwirq + i, &brcm_msi_bottom_irq_chip, domain->host_data, handle_edge_irq, NULL, NULL); return 0; } static void brcm_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct brcm_msi *msi = irq_data_get_irq_chip_data(d); brcm_msi_free(msi, d->hwirq, nr_irqs); } static const struct irq_domain_ops msi_domain_ops = { .alloc = brcm_irq_domain_alloc, .free = brcm_irq_domain_free, }; static int brcm_allocate_domains(struct brcm_msi *msi) { struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np); struct device *dev = msi->dev; msi->inner_domain = irq_domain_add_linear(NULL, msi->nr, &msi_domain_ops, msi); if (!msi->inner_domain) { dev_err(dev, "failed to create IRQ domain\n"); return -ENOMEM; } msi->msi_domain = pci_msi_create_irq_domain(fwnode, &brcm_msi_domain_info, msi->inner_domain); if (!msi->msi_domain) { dev_err(dev, "failed to create MSI domain\n"); irq_domain_remove(msi->inner_domain); return -ENOMEM; } return 0; } static void brcm_free_domains(struct brcm_msi *msi) { irq_domain_remove(msi->msi_domain); irq_domain_remove(msi->inner_domain); } static void brcm_msi_remove(struct brcm_pcie *pcie) { struct brcm_msi *msi = pcie->msi; if (!msi) return; irq_set_chained_handler_and_data(msi->irq, NULL, NULL); brcm_free_domains(msi); } static void brcm_msi_set_regs(struct brcm_msi *msi) { u32 val = msi->legacy ? BRCM_INT_PCI_MSI_LEGACY_MASK : BRCM_INT_PCI_MSI_MASK; writel(val, msi->intr_base + MSI_INT_MASK_CLR); writel(val, msi->intr_base + MSI_INT_CLR); /* * The 0 bit of PCIE_MISC_MSI_BAR_CONFIG_LO is repurposed to MSI * enable, which we set to 1. */ writel(lower_32_bits(msi->target_addr) | 0x1, msi->base + PCIE_MISC_MSI_BAR_CONFIG_LO); writel(upper_32_bits(msi->target_addr), msi->base + PCIE_MISC_MSI_BAR_CONFIG_HI); val = msi->legacy ? PCIE_MISC_MSI_DATA_CONFIG_VAL_8 : PCIE_MISC_MSI_DATA_CONFIG_VAL_32; writel(val, msi->base + PCIE_MISC_MSI_DATA_CONFIG); } static int brcm_pcie_enable_msi(struct brcm_pcie *pcie) { struct brcm_msi *msi; int irq, ret; struct device *dev = pcie->dev; irq = irq_of_parse_and_map(dev->of_node, 1); if (irq <= 0) { dev_err(dev, "cannot map MSI interrupt\n"); return -ENODEV; } msi = devm_kzalloc(dev, sizeof(struct brcm_msi), GFP_KERNEL); if (!msi) return -ENOMEM; mutex_init(&msi->lock); msi->dev = dev; msi->base = pcie->base; msi->np = pcie->np; msi->target_addr = pcie->msi_target_addr; msi->irq = irq; msi->legacy = pcie->hw_rev < BRCM_PCIE_HW_REV_33; /* * Sanity check to make sure that the 'used' bitmap in struct brcm_msi * is large enough. */ BUILD_BUG_ON(BRCM_INT_PCI_MSI_LEGACY_NR > BRCM_INT_PCI_MSI_NR); if (msi->legacy) { msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE; msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR; msi->legacy_shift = 24; } else { msi->intr_base = msi->base + PCIE_MSI_INTR2_BASE; msi->nr = BRCM_INT_PCI_MSI_NR; msi->legacy_shift = 0; } ret = brcm_allocate_domains(msi); if (ret) return ret; irq_set_chained_handler_and_data(msi->irq, brcm_pcie_msi_isr, msi); brcm_msi_set_regs(msi); pcie->msi = msi; return 0; } /* The controller is capable of serving in both RC and EP roles */ static bool brcm_pcie_rc_mode(struct brcm_pcie *pcie) { void __iomem *base = pcie->base; u32 val = readl(base + PCIE_MISC_PCIE_STATUS); return !!FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK, val); } static bool brcm_pcie_link_up(struct brcm_pcie *pcie) { u32 val = readl(pcie->base + PCIE_MISC_PCIE_STATUS); u32 dla = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK, val); u32 plu = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK, val); return dla && plu; } static void __iomem *brcm_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct brcm_pcie *pcie = bus->sysdata; void __iomem *base = pcie->base; int idx; /* Accesses to the RC go right to the RC registers if !devfn */ if (pci_is_root_bus(bus)) return devfn ? NULL : base + PCIE_ECAM_REG(where); /* An access to our HW w/o link-up will cause a CPU Abort */ if (!brcm_pcie_link_up(pcie)) return NULL; /* For devices, write to the config space index register */ idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0); writel(idx, pcie->base + PCIE_EXT_CFG_INDEX); return base + PCIE_EXT_CFG_DATA + PCIE_ECAM_REG(where); } static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct brcm_pcie *pcie = bus->sysdata; void __iomem *base = pcie->base; int idx; /* Accesses to the RC go right to the RC registers if !devfn */ if (pci_is_root_bus(bus)) return devfn ? NULL : base + PCIE_ECAM_REG(where); /* An access to our HW w/o link-up will cause a CPU Abort */ if (!brcm_pcie_link_up(pcie)) return NULL; /* For devices, write to the config space index register */ idx = PCIE_ECAM_OFFSET(bus->number, devfn, where); writel(idx, base + IDX_ADDR(pcie)); return base + DATA_ADDR(pcie); } static void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val) { u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK; u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT; tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); tmp = (tmp & ~mask) | ((val << shift) & mask); writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); } static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val) { u32 tmp, mask = RGR1_SW_INIT_1_INIT_7278_MASK; u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT; tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); tmp = (tmp & ~mask) | ((val << shift) & mask); writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); } static void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val) { if (WARN_ONCE(!pcie->perst_reset, "missing PERST# reset controller\n")) return; if (val) reset_control_assert(pcie->perst_reset); else reset_control_deassert(pcie->perst_reset); } static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val) { u32 tmp; /* Perst bit has moved and assert value is 0 */ tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL); u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK); writel(tmp, pcie->base + PCIE_MISC_PCIE_CTRL); } static void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val) { u32 tmp; tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK); writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); } static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie, u64 *rc_bar2_size, u64 *rc_bar2_offset) { struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); struct resource_entry *entry; struct device *dev = pcie->dev; u64 lowest_pcie_addr = ~(u64)0; int ret, i = 0; u64 size = 0; resource_list_for_each_entry(entry, &bridge->dma_ranges) { u64 pcie_beg = entry->res->start - entry->offset; size += entry->res->end - entry->res->start + 1; if (pcie_beg < lowest_pcie_addr) lowest_pcie_addr = pcie_beg; } if (lowest_pcie_addr == ~(u64)0) { dev_err(dev, "DT node has no dma-ranges\n"); return -EINVAL; } ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1, PCIE_BRCM_MAX_MEMC); if (ret <= 0) { /* Make an educated guess */ pcie->num_memc = 1; pcie->memc_size[0] = 1ULL << fls64(size - 1); } else { pcie->num_memc = ret; } /* Each memc is viewed through a "port" that is a power of 2 */ for (i = 0, size = 0; i < pcie->num_memc; i++) size += pcie->memc_size[i]; /* System memory starts at this address in PCIe-space */ *rc_bar2_offset = lowest_pcie_addr; /* The sum of all memc views must also be a power of 2 */ *rc_bar2_size = 1ULL << fls64(size - 1); /* * We validate the inbound memory view even though we should trust * whatever the device-tree provides. This is because of an HW issue on * early Raspberry Pi 4's revisions (bcm2711). It turns out its * firmware has to dynamically edit dma-ranges due to a bug on the * PCIe controller integration, which prohibits any access above the * lower 3GB of memory. Given this, we decided to keep the dma-ranges * in check, avoiding hard to debug device-tree related issues in the * future: * * The PCIe host controller by design must set the inbound viewport to * be a contiguous arrangement of all of the system's memory. In * addition, its size mut be a power of two. To further complicate * matters, the viewport must start on a pcie-address that is aligned * on a multiple of its size. If a portion of the viewport does not * represent system memory -- e.g. 3GB of memory requires a 4GB * viewport -- we can map the outbound memory in or after 3GB and even * though the viewport will overlap the outbound memory the controller * will know to send outbound memory downstream and everything else * upstream. * * For example: * * - The best-case scenario, memory up to 3GB, is to place the inbound * region in the first 4GB of pcie-space, as some legacy devices can * only address 32bits. We would also like to put the MSI under 4GB * as well, since some devices require a 32bit MSI target address. * * - If the system memory is 4GB or larger we cannot start the inbound * region at location 0 (since we have to allow some space for * outbound memory @ 3GB). So instead it will start at the 1x * multiple of its size */ if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) || (*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) { dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n", *rc_bar2_size, *rc_bar2_offset); return -EINVAL; } return 0; } static int brcm_pcie_setup(struct brcm_pcie *pcie) { u64 rc_bar2_offset, rc_bar2_size; void __iomem *base = pcie->base; struct pci_host_bridge *bridge; struct resource_entry *entry; u32 tmp, burst, aspm_support; int num_out_wins = 0; int ret, memc; /* Reset the bridge */ pcie->bridge_sw_init_set(pcie, 1); /* Ensure that PERST# is asserted; some bootloaders may deassert it. */ if (pcie->type == BCM2711) pcie->perst_set(pcie, 1); usleep_range(100, 200); /* Take the bridge out of reset */ pcie->bridge_sw_init_set(pcie, 0); tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); if (is_bmips(pcie)) tmp &= ~PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK; else tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK; writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); /* Wait for SerDes to be stable */ usleep_range(100, 200); /* * SCB_MAX_BURST_SIZE is a two bit field. For GENERIC chips it * is encoded as 0=128, 1=256, 2=512, 3=Rsvd, for BCM7278 it * is encoded as 0=Rsvd, 1=128, 2=256, 3=512. */ if (is_bmips(pcie)) burst = 0x1; /* 256 bytes */ else if (pcie->type == BCM2711) burst = 0x0; /* 128 bytes */ else if (pcie->type == BCM7278) burst = 0x3; /* 512 bytes */ else burst = 0x2; /* 512 bytes */ /* * Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN, * RCB_MPS_MODE, RCB_64B_MODE */ tmp = readl(base + PCIE_MISC_MISC_CTRL); u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK); u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK); u32p_replace_bits(&tmp, burst, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK); u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK); u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK); writel(tmp, base + PCIE_MISC_MISC_CTRL); ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size, &rc_bar2_offset); if (ret) return ret; tmp = lower_32_bits(rc_bar2_offset); u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size), PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK); writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO); writel(upper_32_bits(rc_bar2_offset), base + PCIE_MISC_RC_BAR2_CONFIG_HI); tmp = readl(base + PCIE_MISC_MISC_CTRL); for (memc = 0; memc < pcie->num_memc; memc++) { u32 scb_size_val = ilog2(pcie->memc_size[memc]) - 15; if (memc == 0) u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(0)); else if (memc == 1) u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(1)); else if (memc == 2) u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(2)); } writel(tmp, base + PCIE_MISC_MISC_CTRL); /* * We ideally want the MSI target address to be located in the 32bit * addressable memory area. Some devices might depend on it. This is * possible either when the inbound window is located above the lower * 4GB or when the inbound area is smaller than 4GB (taking into * account the rounding-up we're forced to perform). */ if (rc_bar2_offset >= SZ_4G || (rc_bar2_size + rc_bar2_offset) < SZ_4G) pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB; else pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB; if (!brcm_pcie_rc_mode(pcie)) { dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n"); return -EINVAL; } /* disable the PCIe->GISB memory window (RC_BAR1) */ tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO); tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK; writel(tmp, base + PCIE_MISC_RC_BAR1_CONFIG_LO); /* disable the PCIe->SCB memory window (RC_BAR3) */ tmp = readl(base + PCIE_MISC_RC_BAR3_CONFIG_LO); tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK; writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO); /* Don't advertise L0s capability if 'aspm-no-l0s' */ aspm_support = PCIE_LINK_STATE_L1; if (!of_property_read_bool(pcie->np, "aspm-no-l0s")) aspm_support |= PCIE_LINK_STATE_L0S; tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); u32p_replace_bits(&tmp, aspm_support, PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK); writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); /* * For config space accesses on the RC, show the right class for * a PCIe-PCIe bridge (the default setting is to be EP mode). */ tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3); u32p_replace_bits(&tmp, 0x060400, PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK); writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3); bridge = pci_host_bridge_from_priv(pcie); resource_list_for_each_entry(entry, &bridge->windows) { struct resource *res = entry->res; if (resource_type(res) != IORESOURCE_MEM) continue; if (num_out_wins >= BRCM_NUM_PCIE_OUT_WINS) { dev_err(pcie->dev, "too many outbound wins\n"); return -EINVAL; } if (is_bmips(pcie)) { u64 start = res->start; unsigned int j, nwins = resource_size(res) / SZ_128M; /* bmips PCIe outbound windows have a 128MB max size */ if (nwins > BRCM_NUM_PCIE_OUT_WINS) nwins = BRCM_NUM_PCIE_OUT_WINS; for (j = 0; j < nwins; j++, start += SZ_128M) brcm_pcie_set_outbound_win(pcie, j, start, start - entry->offset, SZ_128M); break; } brcm_pcie_set_outbound_win(pcie, num_out_wins, res->start, res->start - entry->offset, resource_size(res)); num_out_wins++; } /* PCIe->SCB endian mode for BAR */ tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN, PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK); writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); return 0; } static int brcm_pcie_start_link(struct brcm_pcie *pcie) { struct device *dev = pcie->dev; void __iomem *base = pcie->base; u16 nlw, cls, lnksta; bool ssc_good = false; u32 tmp; int ret, i; /* Unassert the fundamental reset */ pcie->perst_set(pcie, 0); /* * Wait for 100ms after PERST# deassertion; see PCIe CEM specification * sections 2.2, PCIe r5.0, 6.6.1. */ msleep(100); /* * Give the RC/EP even more time to wake up, before trying to * configure RC. Intermittently check status for link-up, up to a * total of 100ms. */ for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5) msleep(5); if (!brcm_pcie_link_up(pcie)) { dev_err(dev, "link down\n"); return -ENODEV; } if (pcie->gen) brcm_pcie_set_gen(pcie, pcie->gen); if (pcie->ssc) { ret = brcm_pcie_set_ssc(pcie); if (ret == 0) ssc_good = true; else dev_err(dev, "failed attempt to enter ssc mode\n"); } lnksta = readw(base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKSTA); cls = FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta); nlw = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta); dev_info(dev, "link up, %s x%u %s\n", pci_speed_string(pcie_link_speed[cls]), nlw, ssc_good ? "(SSC)" : "(!SSC)"); /* * Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1 * is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1. */ tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); tmp |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK; writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); return 0; } static const char * const supplies[] = { "vpcie3v3", "vpcie3v3aux", "vpcie12v", }; static void *alloc_subdev_regulators(struct device *dev) { const size_t size = sizeof(struct subdev_regulators) + sizeof(struct regulator_bulk_data) * ARRAY_SIZE(supplies); struct subdev_regulators *sr; int i; sr = devm_kzalloc(dev, size, GFP_KERNEL); if (sr) { sr->num_supplies = ARRAY_SIZE(supplies); for (i = 0; i < ARRAY_SIZE(supplies); i++) sr->supplies[i].supply = supplies[i]; } return sr; } static int brcm_pcie_add_bus(struct pci_bus *bus) { struct brcm_pcie *pcie = bus->sysdata; struct device *dev = &bus->dev; struct subdev_regulators *sr; int ret; if (!bus->parent || !pci_is_root_bus(bus->parent)) return 0; if (dev->of_node) { sr = alloc_subdev_regulators(dev); if (!sr) { dev_info(dev, "Can't allocate regulators for downstream device\n"); goto no_regulators; } pcie->sr = sr; ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies); if (ret) { dev_info(dev, "No regulators for downstream device\n"); goto no_regulators; } ret = regulator_bulk_enable(sr->num_supplies, sr->supplies); if (ret) { dev_err(dev, "Can't enable regulators for downstream device\n"); regulator_bulk_free(sr->num_supplies, sr->supplies); pcie->sr = NULL; } } no_regulators: brcm_pcie_start_link(pcie); return 0; } static void brcm_pcie_remove_bus(struct pci_bus *bus) { struct brcm_pcie *pcie = bus->sysdata; struct subdev_regulators *sr = pcie->sr; struct device *dev = &bus->dev; if (!sr) return; if (regulator_bulk_disable(sr->num_supplies, sr->supplies)) dev_err(dev, "Failed to disable regulators for downstream device\n"); regulator_bulk_free(sr->num_supplies, sr->supplies); pcie->sr = NULL; } /* L23 is a low-power PCIe link state */ static void brcm_pcie_enter_l23(struct brcm_pcie *pcie) { void __iomem *base = pcie->base; int l23, i; u32 tmp; /* Assert request for L23 */ tmp = readl(base + PCIE_MISC_PCIE_CTRL); u32p_replace_bits(&tmp, 1, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK); writel(tmp, base + PCIE_MISC_PCIE_CTRL); /* Wait up to 36 msec for L23 */ tmp = readl(base + PCIE_MISC_PCIE_STATUS); l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK, tmp); for (i = 0; i < 15 && !l23; i++) { usleep_range(2000, 2400); tmp = readl(base + PCIE_MISC_PCIE_STATUS); l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK, tmp); } if (!l23) dev_err(pcie->dev, "failed to enter low-power link state\n"); } static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start) { static const u32 shifts[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = { PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT, PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT, PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT,}; static const u32 masks[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = { PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK, PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK, PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK,}; const int beg = start ? 0 : PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS - 1; const int end = start ? PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS : -1; u32 tmp, combined_mask = 0; u32 val; void __iomem *base = pcie->base; int i, ret; for (i = beg; i != end; start ? i++ : i--) { val = start ? BIT_MASK(shifts[i]) : 0; tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL); tmp = (tmp & ~masks[i]) | (val & masks[i]); writel(tmp, base + PCIE_DVT_PMU_PCIE_PHY_CTRL); usleep_range(50, 200); combined_mask |= masks[i]; } tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL); val = start ? combined_mask : 0; ret = (tmp & combined_mask) == val ? 0 : -EIO; if (ret) dev_err(pcie->dev, "failed to %s phy\n", (start ? "start" : "stop")); return ret; } static inline int brcm_phy_start(struct brcm_pcie *pcie) { return pcie->rescal ? brcm_phy_cntl(pcie, 1) : 0; } static inline int brcm_phy_stop(struct brcm_pcie *pcie) { return pcie->rescal ? brcm_phy_cntl(pcie, 0) : 0; } static void brcm_pcie_turn_off(struct brcm_pcie *pcie) { void __iomem *base = pcie->base; int tmp; if (brcm_pcie_link_up(pcie)) brcm_pcie_enter_l23(pcie); /* Assert fundamental reset */ pcie->perst_set(pcie, 1); /* Deassert request for L23 in case it was asserted */ tmp = readl(base + PCIE_MISC_PCIE_CTRL); u32p_replace_bits(&tmp, 0, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK); writel(tmp, base + PCIE_MISC_PCIE_CTRL); /* Turn off SerDes */ tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK); writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); /* Shutdown PCIe bridge */ pcie->bridge_sw_init_set(pcie, 1); } static int pci_dev_may_wakeup(struct pci_dev *dev, void *data) { bool *ret = data; if (device_may_wakeup(&dev->dev)) { *ret = true; dev_info(&dev->dev, "Possible wake-up device; regulators will not be disabled\n"); } return (int) *ret; } static int brcm_pcie_suspend_noirq(struct device *dev) { struct brcm_pcie *pcie = dev_get_drvdata(dev); struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); int ret; brcm_pcie_turn_off(pcie); /* * If brcm_phy_stop() returns an error, just dev_err(). If we * return the error it will cause the suspend to fail and this is a * forgivable offense that will probably be erased on resume. */ if (brcm_phy_stop(pcie)) dev_err(dev, "Could not stop phy for suspend\n"); ret = reset_control_rearm(pcie->rescal); if (ret) { dev_err(dev, "Could not rearm rescal reset\n"); return ret; } if (pcie->sr) { /* * Now turn off the regulators, but if at least one * downstream device is enabled as a wake-up source, do not * turn off regulators. */ pcie->ep_wakeup_capable = false; pci_walk_bus(bridge->bus, pci_dev_may_wakeup, &pcie->ep_wakeup_capable); if (!pcie->ep_wakeup_capable) { ret = regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies); if (ret) { dev_err(dev, "Could not turn off regulators\n"); reset_control_reset(pcie->rescal); return ret; } } } clk_disable_unprepare(pcie->clk); return 0; } static int brcm_pcie_resume_noirq(struct device *dev) { struct brcm_pcie *pcie = dev_get_drvdata(dev); void __iomem *base; u32 tmp; int ret; base = pcie->base; ret = clk_prepare_enable(pcie->clk); if (ret) return ret; ret = reset_control_reset(pcie->rescal); if (ret) goto err_disable_clk; ret = brcm_phy_start(pcie); if (ret) goto err_reset; /* Take bridge out of reset so we can access the SERDES reg */ pcie->bridge_sw_init_set(pcie, 0); /* SERDES_IDDQ = 0 */ tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK); writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); /* wait for serdes to be stable */ udelay(100); ret = brcm_pcie_setup(pcie); if (ret) goto err_reset; if (pcie->sr) { if (pcie->ep_wakeup_capable) { /* * We are resuming from a suspend. In the suspend we * did not disable the power supplies, so there is * no need to enable them (and falsely increase their * usage count). */ pcie->ep_wakeup_capable = false; } else { ret = regulator_bulk_enable(pcie->sr->num_supplies, pcie->sr->supplies); if (ret) { dev_err(dev, "Could not turn on regulators\n"); goto err_reset; } } } ret = brcm_pcie_start_link(pcie); if (ret) goto err_regulator; if (pcie->msi) brcm_msi_set_regs(pcie->msi); return 0; err_regulator: if (pcie->sr) regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies); err_reset: reset_control_rearm(pcie->rescal); err_disable_clk: clk_disable_unprepare(pcie->clk); return ret; } static void __brcm_pcie_remove(struct brcm_pcie *pcie) { brcm_msi_remove(pcie); brcm_pcie_turn_off(pcie); if (brcm_phy_stop(pcie)) dev_err(pcie->dev, "Could not stop phy\n"); if (reset_control_rearm(pcie->rescal)) dev_err(pcie->dev, "Could not rearm rescal reset\n"); clk_disable_unprepare(pcie->clk); } static void brcm_pcie_remove(struct platform_device *pdev) { struct brcm_pcie *pcie = platform_get_drvdata(pdev); struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); pci_stop_root_bus(bridge->bus); pci_remove_root_bus(bridge->bus); __brcm_pcie_remove(pcie); } static const int pcie_offsets[] = { [RGR1_SW_INIT_1] = 0x9210, [EXT_CFG_INDEX] = 0x9000, [EXT_CFG_DATA] = 0x9004, }; static const int pcie_offsets_bmips_7425[] = { [RGR1_SW_INIT_1] = 0x8010, [EXT_CFG_INDEX] = 0x8300, [EXT_CFG_DATA] = 0x8304, }; static const struct pcie_cfg_data generic_cfg = { .offsets = pcie_offsets, .type = GENERIC, .perst_set = brcm_pcie_perst_set_generic, .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, }; static const struct pcie_cfg_data bcm7425_cfg = { .offsets = pcie_offsets_bmips_7425, .type = BCM7425, .perst_set = brcm_pcie_perst_set_generic, .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, }; static const struct pcie_cfg_data bcm7435_cfg = { .offsets = pcie_offsets, .type = BCM7435, .perst_set = brcm_pcie_perst_set_generic, .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, }; static const struct pcie_cfg_data bcm4908_cfg = { .offsets = pcie_offsets, .type = BCM4908, .perst_set = brcm_pcie_perst_set_4908, .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, }; static const int pcie_offset_bcm7278[] = { [RGR1_SW_INIT_1] = 0xc010, [EXT_CFG_INDEX] = 0x9000, [EXT_CFG_DATA] = 0x9004, }; static const struct pcie_cfg_data bcm7278_cfg = { .offsets = pcie_offset_bcm7278, .type = BCM7278, .perst_set = brcm_pcie_perst_set_7278, .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278, }; static const struct pcie_cfg_data bcm2711_cfg = { .offsets = pcie_offsets, .type = BCM2711, .perst_set = brcm_pcie_perst_set_generic, .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, }; static const struct of_device_id brcm_pcie_match[] = { { .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg }, { .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg }, { .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg }, { .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg }, { .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg }, { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg }, { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg }, { .compatible = "brcm,bcm7425-pcie", .data = &bcm7425_cfg }, {}, }; static struct pci_ops brcm_pcie_ops = { .map_bus = brcm_pcie_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, .add_bus = brcm_pcie_add_bus, .remove_bus = brcm_pcie_remove_bus, }; static struct pci_ops brcm7425_pcie_ops = { .map_bus = brcm7425_pcie_map_bus, .read = pci_generic_config_read32, .write = pci_generic_config_write32, .add_bus = brcm_pcie_add_bus, .remove_bus = brcm_pcie_remove_bus, }; static int brcm_pcie_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node, *msi_np; struct pci_host_bridge *bridge; const struct pcie_cfg_data *data; struct brcm_pcie *pcie; int ret; bridge = devm_pci_alloc_host_bridge(&pdev->dev, sizeof(*pcie)); if (!bridge) return -ENOMEM; data = of_device_get_match_data(&pdev->dev); if (!data) { pr_err("failed to look up compatible string\n"); return -EINVAL; } pcie = pci_host_bridge_priv(bridge); pcie->dev = &pdev->dev; pcie->np = np; pcie->reg_offsets = data->offsets; pcie->type = data->type; pcie->perst_set = data->perst_set; pcie->bridge_sw_init_set = data->bridge_sw_init_set; pcie->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pcie->base)) return PTR_ERR(pcie->base); pcie->clk = devm_clk_get_optional(&pdev->dev, "sw_pcie"); if (IS_ERR(pcie->clk)) return PTR_ERR(pcie->clk); ret = of_pci_get_max_link_speed(np); pcie->gen = (ret < 0) ? 0 : ret; pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc"); ret = clk_prepare_enable(pcie->clk); if (ret) { dev_err(&pdev->dev, "could not enable clock\n"); return ret; } pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal"); if (IS_ERR(pcie->rescal)) { clk_disable_unprepare(pcie->clk); return PTR_ERR(pcie->rescal); } pcie->perst_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "perst"); if (IS_ERR(pcie->perst_reset)) { clk_disable_unprepare(pcie->clk); return PTR_ERR(pcie->perst_reset); } ret = reset_control_reset(pcie->rescal); if (ret) dev_err(&pdev->dev, "failed to deassert 'rescal'\n"); ret = brcm_phy_start(pcie); if (ret) { reset_control_rearm(pcie->rescal); clk_disable_unprepare(pcie->clk); return ret; } ret = brcm_pcie_setup(pcie); if (ret) goto fail; pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION); if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) { dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n"); ret = -ENODEV; goto fail; } msi_np = of_parse_phandle(pcie->np, "msi-parent", 0); if (pci_msi_enabled() && msi_np == pcie->np) { ret = brcm_pcie_enable_msi(pcie); if (ret) { dev_err(pcie->dev, "probe of internal MSI failed"); goto fail; } } bridge->ops = pcie->type == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops; bridge->sysdata = pcie; platform_set_drvdata(pdev, pcie); ret = pci_host_probe(bridge); if (!ret && !brcm_pcie_link_up(pcie)) ret = -ENODEV; if (ret) { brcm_pcie_remove(pdev); return ret; } return 0; fail: __brcm_pcie_remove(pcie); return ret; } MODULE_DEVICE_TABLE(of, brcm_pcie_match); static const struct dev_pm_ops brcm_pcie_pm_ops = { .suspend_noirq = brcm_pcie_suspend_noirq, .resume_noirq = brcm_pcie_resume_noirq, }; static struct platform_driver brcm_pcie_driver = { .probe = brcm_pcie_probe, .remove_new = brcm_pcie_remove, .driver = { .name = "brcm-pcie", .of_match_table = brcm_pcie_match, .pm = &brcm_pcie_pm_ops, }, }; module_platform_driver(brcm_pcie_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Broadcom STB PCIe RC driver"); MODULE_AUTHOR("Broadcom");
linux-master
drivers/pci/controller/pcie-brcmstb.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe driver for Renesas R-Car SoCs * Copyright (C) 2014-2020 Renesas Electronics Europe Ltd * * Author: Phil Edworthy <[email protected]> */ #include <linux/delay.h> #include <linux/pci.h> #include "pcie-rcar.h" void rcar_pci_write_reg(struct rcar_pcie *pcie, u32 val, unsigned int reg) { writel(val, pcie->base + reg); } u32 rcar_pci_read_reg(struct rcar_pcie *pcie, unsigned int reg) { return readl(pcie->base + reg); } void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data) { unsigned int shift = BITS_PER_BYTE * (where & 3); u32 val = rcar_pci_read_reg(pcie, where & ~3); val &= ~(mask << shift); val |= data << shift; rcar_pci_write_reg(pcie, val, where & ~3); } int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie) { unsigned int timeout = 10; while (timeout--) { if (rcar_pci_read_reg(pcie, PCIEPHYSR) & PHYRDY) return 0; msleep(5); } return -ETIMEDOUT; } int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie) { unsigned int timeout = 10000; while (timeout--) { if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE)) return 0; udelay(5); cpu_relax(); } return -ETIMEDOUT; } void rcar_pcie_set_outbound(struct rcar_pcie *pcie, int win, struct resource_entry *window) { /* Setup PCIe address space mappings for each resource */ struct resource *res = window->res; resource_size_t res_start; resource_size_t size; u32 mask; rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win)); /* * The PAMR mask is calculated in units of 128Bytes, which * keeps things pretty simple. */ size = resource_size(res); if (size > 128) mask = (roundup_pow_of_two(size) / SZ_128) - 1; else mask = 0x0; rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win)); if (res->flags & IORESOURCE_IO) res_start = pci_pio_to_address(res->start) - window->offset; else res_start = res->start - window->offset; rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win)); rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F, PCIEPALR(win)); /* First resource is for IO */ mask = PAR_ENABLE; if (res->flags & IORESOURCE_IO) mask |= IO_SPACE; rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win)); } void rcar_pcie_set_inbound(struct rcar_pcie *pcie, u64 cpu_addr, u64 pci_addr, u64 flags, int idx, bool host) { /* * Set up 64-bit inbound regions as the range parser doesn't * distinguish between 32 and 64-bit types. */ if (host) rcar_pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx)); rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx)); rcar_pci_write_reg(pcie, flags, PCIELAMR(idx)); if (host) rcar_pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx + 1)); rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx + 1)); rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1)); }
linux-master
drivers/pci/controller/pcie-rcar.c
// SPDX-License-Identifier: GPL-2.0 /* * Generic PCI host driver common code * * Copyright (C) 2014 ARM Limited * * Author: Will Deacon <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/pci-ecam.h> #include <linux/platform_device.h> static void gen_pci_unmap_cfg(void *ptr) { pci_ecam_free((struct pci_config_window *)ptr); } static struct pci_config_window *gen_pci_init(struct device *dev, struct pci_host_bridge *bridge, const struct pci_ecam_ops *ops) { int err; struct resource cfgres; struct resource_entry *bus; struct pci_config_window *cfg; err = of_address_to_resource(dev->of_node, 0, &cfgres); if (err) { dev_err(dev, "missing \"reg\" property\n"); return ERR_PTR(err); } bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); if (!bus) return ERR_PTR(-ENODEV); cfg = pci_ecam_create(dev, &cfgres, bus->res, ops); if (IS_ERR(cfg)) return cfg; err = devm_add_action_or_reset(dev, gen_pci_unmap_cfg, cfg); if (err) return ERR_PTR(err); return cfg; } int pci_host_common_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct pci_host_bridge *bridge; struct pci_config_window *cfg; const struct pci_ecam_ops *ops; ops = of_device_get_match_data(&pdev->dev); if (!ops) return -ENODEV; bridge = devm_pci_alloc_host_bridge(dev, 0); if (!bridge) return -ENOMEM; platform_set_drvdata(pdev, bridge); of_pci_check_probe_only(); /* Parse and map our Configuration Space windows */ cfg = gen_pci_init(dev, bridge, ops); if (IS_ERR(cfg)) return PTR_ERR(cfg); /* Do not reassign resources if probe only */ if (!pci_has_flag(PCI_PROBE_ONLY)) pci_add_flags(PCI_REASSIGN_ALL_BUS); bridge->sysdata = cfg; bridge->ops = (struct pci_ops *)&ops->pci_ops; bridge->msi_domain = true; return pci_host_probe(bridge); } EXPORT_SYMBOL_GPL(pci_host_common_probe); int pci_host_common_remove(struct platform_device *pdev) { struct pci_host_bridge *bridge = platform_get_drvdata(pdev); pci_lock_rescan_remove(); pci_stop_root_bus(bridge->bus); pci_remove_root_bus(bridge->bus); pci_unlock_rescan_remove(); return 0; } EXPORT_SYMBOL_GPL(pci_host_common_remove); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/pci-host-common.c
// SPDX-License-Identifier: GPL-2.0+ /* * PCIe host controller driver for Xilinx Versal CPM DMA Bridge * * (C) Copyright 2019 - 2020, Xilinx, Inc. */ #include <linux/bitfield.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/of_platform.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/pci-ecam.h> #include "../pci.h" /* Register definitions */ #define XILINX_CPM_PCIE_REG_IDR 0x00000E10 #define XILINX_CPM_PCIE_REG_IMR 0x00000E14 #define XILINX_CPM_PCIE_REG_PSCR 0x00000E1C #define XILINX_CPM_PCIE_REG_RPSC 0x00000E20 #define XILINX_CPM_PCIE_REG_RPEFR 0x00000E2C #define XILINX_CPM_PCIE_REG_IDRN 0x00000E38 #define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C #define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340 #define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348 #define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1) #define XILINX_CPM_PCIE_IR_STATUS 0x000002A0 #define XILINX_CPM_PCIE_IR_ENABLE 0x000002A8 #define XILINX_CPM_PCIE_IR_LOCAL BIT(0) /* Interrupt registers definitions */ #define XILINX_CPM_PCIE_INTR_LINK_DOWN 0 #define XILINX_CPM_PCIE_INTR_HOT_RESET 3 #define XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT 4 #define XILINX_CPM_PCIE_INTR_CFG_TIMEOUT 8 #define XILINX_CPM_PCIE_INTR_CORRECTABLE 9 #define XILINX_CPM_PCIE_INTR_NONFATAL 10 #define XILINX_CPM_PCIE_INTR_FATAL 11 #define XILINX_CPM_PCIE_INTR_CFG_ERR_POISON 12 #define XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD 15 #define XILINX_CPM_PCIE_INTR_INTX 16 #define XILINX_CPM_PCIE_INTR_PM_PME_RCVD 17 #define XILINX_CPM_PCIE_INTR_SLV_UNSUPP 20 #define XILINX_CPM_PCIE_INTR_SLV_UNEXP 21 #define XILINX_CPM_PCIE_INTR_SLV_COMPL 22 #define XILINX_CPM_PCIE_INTR_SLV_ERRP 23 #define XILINX_CPM_PCIE_INTR_SLV_CMPABT 24 #define XILINX_CPM_PCIE_INTR_SLV_ILLBUR 25 #define XILINX_CPM_PCIE_INTR_MST_DECERR 26 #define XILINX_CPM_PCIE_INTR_MST_SLVERR 27 #define XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT 28 #define IMR(x) BIT(XILINX_CPM_PCIE_INTR_ ##x) #define XILINX_CPM_PCIE_IMR_ALL_MASK \ ( \ IMR(LINK_DOWN) | \ IMR(HOT_RESET) | \ IMR(CFG_PCIE_TIMEOUT) | \ IMR(CFG_TIMEOUT) | \ IMR(CORRECTABLE) | \ IMR(NONFATAL) | \ IMR(FATAL) | \ IMR(CFG_ERR_POISON) | \ IMR(PME_TO_ACK_RCVD) | \ IMR(INTX) | \ IMR(PM_PME_RCVD) | \ IMR(SLV_UNSUPP) | \ IMR(SLV_UNEXP) | \ IMR(SLV_COMPL) | \ IMR(SLV_ERRP) | \ IMR(SLV_CMPABT) | \ IMR(SLV_ILLBUR) | \ IMR(MST_DECERR) | \ IMR(MST_SLVERR) | \ IMR(SLV_PCIE_TIMEOUT) \ ) #define XILINX_CPM_PCIE_IDR_ALL_MASK 0xFFFFFFFF #define XILINX_CPM_PCIE_IDRN_MASK GENMASK(19, 16) #define XILINX_CPM_PCIE_IDRN_SHIFT 16 /* Root Port Error FIFO Read Register definitions */ #define XILINX_CPM_PCIE_RPEFR_ERR_VALID BIT(18) #define XILINX_CPM_PCIE_RPEFR_REQ_ID GENMASK(15, 0) #define XILINX_CPM_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF /* Root Port Status/control Register definitions */ #define XILINX_CPM_PCIE_REG_RPSC_BEN BIT(0) /* Phy Status/Control Register definitions */ #define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11) enum xilinx_cpm_version { CPM, CPM5, }; /** * struct xilinx_cpm_variant - CPM variant information * @version: CPM version */ struct xilinx_cpm_variant { enum xilinx_cpm_version version; }; /** * struct xilinx_cpm_pcie - PCIe port information * @dev: Device pointer * @reg_base: Bridge Register Base * @cpm_base: CPM System Level Control and Status Register(SLCR) Base * @intx_domain: Legacy IRQ domain pointer * @cpm_domain: CPM IRQ domain pointer * @cfg: Holds mappings of config space window * @intx_irq: legacy interrupt number * @irq: Error interrupt number * @lock: lock protecting shared register access * @variant: CPM version check pointer */ struct xilinx_cpm_pcie { struct device *dev; void __iomem *reg_base; void __iomem *cpm_base; struct irq_domain *intx_domain; struct irq_domain *cpm_domain; struct pci_config_window *cfg; int intx_irq; int irq; raw_spinlock_t lock; const struct xilinx_cpm_variant *variant; }; static u32 pcie_read(struct xilinx_cpm_pcie *port, u32 reg) { return readl_relaxed(port->reg_base + reg); } static void pcie_write(struct xilinx_cpm_pcie *port, u32 val, u32 reg) { writel_relaxed(val, port->reg_base + reg); } static bool cpm_pcie_link_up(struct xilinx_cpm_pcie *port) { return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) & XILINX_CPM_PCIE_REG_PSCR_LNKUP); } static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie *port) { unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR); if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) { dev_dbg(port->dev, "Requester ID %lu\n", val & XILINX_CPM_PCIE_RPEFR_REQ_ID); pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK, XILINX_CPM_PCIE_REG_RPEFR); } } static void xilinx_cpm_mask_leg_irq(struct irq_data *data) { struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data); unsigned long flags; u32 mask; u32 val; mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT); raw_spin_lock_irqsave(&port->lock, flags); val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK); pcie_write(port, (val & (~mask)), XILINX_CPM_PCIE_REG_IDRN_MASK); raw_spin_unlock_irqrestore(&port->lock, flags); } static void xilinx_cpm_unmask_leg_irq(struct irq_data *data) { struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data); unsigned long flags; u32 mask; u32 val; mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT); raw_spin_lock_irqsave(&port->lock, flags); val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK); pcie_write(port, (val | mask), XILINX_CPM_PCIE_REG_IDRN_MASK); raw_spin_unlock_irqrestore(&port->lock, flags); } static struct irq_chip xilinx_cpm_leg_irq_chip = { .name = "INTx", .irq_mask = xilinx_cpm_mask_leg_irq, .irq_unmask = xilinx_cpm_unmask_leg_irq, }; /** * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid * @domain: IRQ domain * @irq: Virtual IRQ number * @hwirq: HW interrupt number * * Return: Always returns 0. */ static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &xilinx_cpm_leg_irq_chip, handle_level_irq); irq_set_chip_data(irq, domain->host_data); irq_set_status_flags(irq, IRQ_LEVEL); return 0; } /* INTx IRQ Domain operations */ static const struct irq_domain_ops intx_domain_ops = { .map = xilinx_cpm_pcie_intx_map, }; static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc) { struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long val; int i; chained_irq_enter(chip, desc); val = FIELD_GET(XILINX_CPM_PCIE_IDRN_MASK, pcie_read(port, XILINX_CPM_PCIE_REG_IDRN)); for_each_set_bit(i, &val, PCI_NUM_INTX) generic_handle_domain_irq(port->intx_domain, i); chained_irq_exit(chip, desc); } static void xilinx_cpm_mask_event_irq(struct irq_data *d) { struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d); u32 val; raw_spin_lock(&port->lock); val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR); val &= ~BIT(d->hwirq); pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR); raw_spin_unlock(&port->lock); } static void xilinx_cpm_unmask_event_irq(struct irq_data *d) { struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d); u32 val; raw_spin_lock(&port->lock); val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR); val |= BIT(d->hwirq); pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR); raw_spin_unlock(&port->lock); } static struct irq_chip xilinx_cpm_event_irq_chip = { .name = "RC-Event", .irq_mask = xilinx_cpm_mask_event_irq, .irq_unmask = xilinx_cpm_unmask_event_irq, }; static int xilinx_cpm_pcie_event_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &xilinx_cpm_event_irq_chip, handle_level_irq); irq_set_chip_data(irq, domain->host_data); irq_set_status_flags(irq, IRQ_LEVEL); return 0; } static const struct irq_domain_ops event_domain_ops = { .map = xilinx_cpm_pcie_event_map, }; static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc) { struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long val; int i; chained_irq_enter(chip, desc); val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR); val &= pcie_read(port, XILINX_CPM_PCIE_REG_IMR); for_each_set_bit(i, &val, 32) generic_handle_domain_irq(port->cpm_domain, i); pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR); if (port->variant->version == CPM5) { val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_IR_STATUS); if (val) writel_relaxed(val, port->cpm_base + XILINX_CPM_PCIE_IR_STATUS); } /* * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to * CPM SLCR block. */ val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS); if (val) writel_relaxed(val, port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS); chained_irq_exit(chip, desc); } #define _IC(x, s) \ [XILINX_CPM_PCIE_INTR_ ## x] = { __stringify(x), s } static const struct { const char *sym; const char *str; } intr_cause[32] = { _IC(LINK_DOWN, "Link Down"), _IC(HOT_RESET, "Hot reset"), _IC(CFG_TIMEOUT, "ECAM access timeout"), _IC(CORRECTABLE, "Correctable error message"), _IC(NONFATAL, "Non fatal error message"), _IC(FATAL, "Fatal error message"), _IC(SLV_UNSUPP, "Slave unsupported request"), _IC(SLV_UNEXP, "Slave unexpected completion"), _IC(SLV_COMPL, "Slave completion timeout"), _IC(SLV_ERRP, "Slave Error Poison"), _IC(SLV_CMPABT, "Slave Completer Abort"), _IC(SLV_ILLBUR, "Slave Illegal Burst"), _IC(MST_DECERR, "Master decode error"), _IC(MST_SLVERR, "Master slave error"), _IC(CFG_PCIE_TIMEOUT, "PCIe ECAM access timeout"), _IC(CFG_ERR_POISON, "ECAM poisoned completion received"), _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"), _IC(PM_PME_RCVD, "PM_PME message received"), _IC(SLV_PCIE_TIMEOUT, "PCIe completion timeout received"), }; static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id) { struct xilinx_cpm_pcie *port = dev_id; struct device *dev = port->dev; struct irq_data *d; d = irq_domain_get_irq_data(port->cpm_domain, irq); switch (d->hwirq) { case XILINX_CPM_PCIE_INTR_CORRECTABLE: case XILINX_CPM_PCIE_INTR_NONFATAL: case XILINX_CPM_PCIE_INTR_FATAL: cpm_pcie_clear_err_interrupts(port); fallthrough; default: if (intr_cause[d->hwirq].str) dev_warn(dev, "%s\n", intr_cause[d->hwirq].str); else dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq); } return IRQ_HANDLED; } static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie *port) { if (port->intx_domain) { irq_domain_remove(port->intx_domain); port->intx_domain = NULL; } if (port->cpm_domain) { irq_domain_remove(port->cpm_domain); port->cpm_domain = NULL; } } /** * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain * @port: PCIe port information * * Return: '0' on success and error value on failure */ static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie *port) { struct device *dev = port->dev; struct device_node *node = dev->of_node; struct device_node *pcie_intc_node; /* Setup INTx */ pcie_intc_node = of_get_next_child(node, NULL); if (!pcie_intc_node) { dev_err(dev, "No PCIe Intc node found\n"); return -EINVAL; } port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32, &event_domain_ops, port); if (!port->cpm_domain) goto out; irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS); port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, &intx_domain_ops, port); if (!port->intx_domain) goto out; irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED); of_node_put(pcie_intc_node); raw_spin_lock_init(&port->lock); return 0; out: xilinx_cpm_free_irq_domains(port); of_node_put(pcie_intc_node); dev_err(dev, "Failed to allocate IRQ domains\n"); return -ENOMEM; } static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port) { struct device *dev = port->dev; struct platform_device *pdev = to_platform_device(dev); int i, irq; port->irq = platform_get_irq(pdev, 0); if (port->irq < 0) return port->irq; for (i = 0; i < ARRAY_SIZE(intr_cause); i++) { int err; if (!intr_cause[i].str) continue; irq = irq_create_mapping(port->cpm_domain, i); if (!irq) { dev_err(dev, "Failed to map interrupt\n"); return -ENXIO; } err = devm_request_irq(dev, irq, xilinx_cpm_pcie_intr_handler, 0, intr_cause[i].sym, port); if (err) { dev_err(dev, "Failed to request IRQ %d\n", irq); return err; } } port->intx_irq = irq_create_mapping(port->cpm_domain, XILINX_CPM_PCIE_INTR_INTX); if (!port->intx_irq) { dev_err(dev, "Failed to map INTx interrupt\n"); return -ENXIO; } /* Plug the INTx chained handler */ irq_set_chained_handler_and_data(port->intx_irq, xilinx_cpm_pcie_intx_flow, port); /* Plug the main event chained handler */ irq_set_chained_handler_and_data(port->irq, xilinx_cpm_pcie_event_flow, port); return 0; } /** * xilinx_cpm_pcie_init_port - Initialize hardware * @port: PCIe port information */ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port) { if (cpm_pcie_link_up(port)) dev_info(port->dev, "PCIe Link is UP\n"); else dev_info(port->dev, "PCIe Link is DOWN\n"); /* Disable all interrupts */ pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK, XILINX_CPM_PCIE_REG_IMR); /* Clear pending interrupts */ pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) & XILINX_CPM_PCIE_IMR_ALL_MASK, XILINX_CPM_PCIE_REG_IDR); /* * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to * CPM SLCR block. */ writel(XILINX_CPM_PCIE_MISC_IR_LOCAL, port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE); if (port->variant->version == CPM5) { writel(XILINX_CPM_PCIE_IR_LOCAL, port->cpm_base + XILINX_CPM_PCIE_IR_ENABLE); } /* Enable the Bridge enable bit */ pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) | XILINX_CPM_PCIE_REG_RPSC_BEN, XILINX_CPM_PCIE_REG_RPSC); } /** * xilinx_cpm_pcie_parse_dt - Parse Device tree * @port: PCIe port information * @bus_range: Bus resource * * Return: '0' on success and error value on failure */ static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port, struct resource *bus_range) { struct device *dev = port->dev; struct platform_device *pdev = to_platform_device(dev); struct resource *res; port->cpm_base = devm_platform_ioremap_resource_byname(pdev, "cpm_slcr"); if (IS_ERR(port->cpm_base)) return PTR_ERR(port->cpm_base); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); if (!res) return -ENXIO; port->cfg = pci_ecam_create(dev, res, bus_range, &pci_generic_ecam_ops); if (IS_ERR(port->cfg)) return PTR_ERR(port->cfg); if (port->variant->version == CPM5) { port->reg_base = devm_platform_ioremap_resource_byname(pdev, "cpm_csr"); if (IS_ERR(port->reg_base)) return PTR_ERR(port->reg_base); } else { port->reg_base = port->cfg->win; } return 0; } static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie *port) { irq_set_chained_handler_and_data(port->intx_irq, NULL, NULL); irq_set_chained_handler_and_data(port->irq, NULL, NULL); } /** * xilinx_cpm_pcie_probe - Probe function * @pdev: Platform device pointer * * Return: '0' on success and error value on failure */ static int xilinx_cpm_pcie_probe(struct platform_device *pdev) { struct xilinx_cpm_pcie *port; struct device *dev = &pdev->dev; struct pci_host_bridge *bridge; struct resource_entry *bus; int err; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); if (!bridge) return -ENODEV; port = pci_host_bridge_priv(bridge); port->dev = dev; err = xilinx_cpm_pcie_init_irq_domain(port); if (err) return err; bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); if (!bus) return -ENODEV; port->variant = of_device_get_match_data(dev); err = xilinx_cpm_pcie_parse_dt(port, bus->res); if (err) { dev_err(dev, "Parsing DT failed\n"); goto err_parse_dt; } xilinx_cpm_pcie_init_port(port); err = xilinx_cpm_setup_irq(port); if (err) { dev_err(dev, "Failed to set up interrupts\n"); goto err_setup_irq; } bridge->sysdata = port->cfg; bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops; err = pci_host_probe(bridge); if (err < 0) goto err_host_bridge; return 0; err_host_bridge: xilinx_cpm_free_interrupts(port); err_setup_irq: pci_ecam_free(port->cfg); err_parse_dt: xilinx_cpm_free_irq_domains(port); return err; } static const struct xilinx_cpm_variant cpm_host = { .version = CPM, }; static const struct xilinx_cpm_variant cpm5_host = { .version = CPM5, }; static const struct of_device_id xilinx_cpm_pcie_of_match[] = { { .compatible = "xlnx,versal-cpm-host-1.00", .data = &cpm_host, }, { .compatible = "xlnx,versal-cpm5-host", .data = &cpm5_host, }, {} }; static struct platform_driver xilinx_cpm_pcie_driver = { .driver = { .name = "xilinx-cpm-pcie", .of_match_table = xilinx_cpm_pcie_of_match, .suppress_bind_attrs = true, }, .probe = xilinx_cpm_pcie_probe, }; builtin_platform_driver(xilinx_cpm_pcie_driver);
linux-master
drivers/pci/controller/pcie-xilinx-cpm.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015 Broadcom Corporation */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/clk.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/of_platform.h> #include <linux/phy/phy.h> #include "../pci.h" #include "pcie-iproc.h" static const struct of_device_id iproc_pcie_of_match_table[] = { { .compatible = "brcm,iproc-pcie", .data = (int *)IPROC_PCIE_PAXB, }, { .compatible = "brcm,iproc-pcie-paxb-v2", .data = (int *)IPROC_PCIE_PAXB_V2, }, { .compatible = "brcm,iproc-pcie-paxc", .data = (int *)IPROC_PCIE_PAXC, }, { .compatible = "brcm,iproc-pcie-paxc-v2", .data = (int *)IPROC_PCIE_PAXC_V2, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table); static int iproc_pltfm_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct iproc_pcie *pcie; struct device_node *np = dev->of_node; struct resource reg; struct pci_host_bridge *bridge; int ret; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); if (!bridge) return -ENOMEM; pcie = pci_host_bridge_priv(bridge); pcie->dev = dev; pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev); ret = of_address_to_resource(np, 0, &reg); if (ret < 0) { dev_err(dev, "unable to obtain controller resources\n"); return ret; } pcie->base = devm_pci_remap_cfgspace(dev, reg.start, resource_size(&reg)); if (!pcie->base) { dev_err(dev, "unable to map controller registers\n"); return -ENOMEM; } pcie->base_addr = reg.start; if (of_property_read_bool(np, "brcm,pcie-ob")) { u32 val; ret = of_property_read_u32(np, "brcm,pcie-ob-axi-offset", &val); if (ret) { dev_err(dev, "missing brcm,pcie-ob-axi-offset property\n"); return ret; } pcie->ob.axi_offset = val; pcie->need_ob_cfg = true; } /* * DT nodes are not used by all platforms that use the iProc PCIe * core driver. For platforms that require explicit inbound mapping * configuration, "dma-ranges" would have been present in DT */ pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges"); /* PHY use is optional */ pcie->phy = devm_phy_optional_get(dev, "pcie-phy"); if (IS_ERR(pcie->phy)) return PTR_ERR(pcie->phy); /* PAXC doesn't support legacy IRQs, skip mapping */ switch (pcie->type) { case IPROC_PCIE_PAXC: case IPROC_PCIE_PAXC_V2: pcie->map_irq = NULL; break; default: break; } ret = iproc_pcie_setup(pcie, &bridge->windows); if (ret) { dev_err(dev, "PCIe controller setup failed\n"); return ret; } platform_set_drvdata(pdev, pcie); return 0; } static void iproc_pltfm_pcie_remove(struct platform_device *pdev) { struct iproc_pcie *pcie = platform_get_drvdata(pdev); iproc_pcie_remove(pcie); } static void iproc_pltfm_pcie_shutdown(struct platform_device *pdev) { struct iproc_pcie *pcie = platform_get_drvdata(pdev); iproc_pcie_shutdown(pcie); } static struct platform_driver iproc_pltfm_pcie_driver = { .driver = { .name = "iproc-pcie", .of_match_table = of_match_ptr(iproc_pcie_of_match_table), }, .probe = iproc_pltfm_pcie_probe, .remove_new = iproc_pltfm_pcie_remove, .shutdown = iproc_pltfm_pcie_shutdown, }; module_platform_driver(iproc_pltfm_pcie_driver); MODULE_AUTHOR("Ray Jui <[email protected]>"); MODULE_DESCRIPTION("Broadcom iPROC PCIe platform driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/pcie-iproc-platform.c
// SPDX-License-Identifier: GPL-2.0 /* * MediaTek PCIe host controller driver. * * Copyright (c) 2017 MediaTek Inc. * Author: Ryder Lee <[email protected]> * Honghui Zhang <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/iopoll.h> #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/msi.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/of_platform.h> #include <linux/pci.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/reset.h> #include "../pci.h" /* PCIe shared registers */ #define PCIE_SYS_CFG 0x00 #define PCIE_INT_ENABLE 0x0c #define PCIE_CFG_ADDR 0x20 #define PCIE_CFG_DATA 0x24 /* PCIe per port registers */ #define PCIE_BAR0_SETUP 0x10 #define PCIE_CLASS 0x34 #define PCIE_LINK_STATUS 0x50 #define PCIE_PORT_INT_EN(x) BIT(20 + (x)) #define PCIE_PORT_PERST(x) BIT(1 + (x)) #define PCIE_PORT_LINKUP BIT(0) #define PCIE_BAR_MAP_MAX GENMASK(31, 16) #define PCIE_BAR_ENABLE BIT(0) #define PCIE_REVISION_ID BIT(0) #define PCIE_CLASS_CODE (0x60400 << 8) #define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \ ((((regn) >> 8) & GENMASK(3, 0)) << 24)) #define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8)) #define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11)) #define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16)) #define PCIE_CONF_ADDR(regn, fun, dev, bus) \ (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \ PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus)) /* MediaTek specific configuration registers */ #define PCIE_FTS_NUM 0x70c #define PCIE_FTS_NUM_MASK GENMASK(15, 8) #define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8) #define PCIE_FC_CREDIT 0x73c #define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16)) #define PCIE_FC_CREDIT_VAL(x) ((x) << 16) /* PCIe V2 share registers */ #define PCIE_SYS_CFG_V2 0x0 #define PCIE_CSR_LTSSM_EN(x) BIT(0 + (x) * 8) #define PCIE_CSR_ASPM_L1_EN(x) BIT(1 + (x) * 8) /* PCIe V2 per-port registers */ #define PCIE_MSI_VECTOR 0x0c0 #define PCIE_CONF_VEND_ID 0x100 #define PCIE_CONF_DEVICE_ID 0x102 #define PCIE_CONF_CLASS_ID 0x106 #define PCIE_INT_MASK 0x420 #define INTX_MASK GENMASK(19, 16) #define INTX_SHIFT 16 #define PCIE_INT_STATUS 0x424 #define MSI_STATUS BIT(23) #define PCIE_IMSI_STATUS 0x42c #define PCIE_IMSI_ADDR 0x430 #define MSI_MASK BIT(23) #define MTK_MSI_IRQS_NUM 32 #define PCIE_AHB_TRANS_BASE0_L 0x438 #define PCIE_AHB_TRANS_BASE0_H 0x43c #define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0)) #define PCIE_AXI_WINDOW0 0x448 #define WIN_ENABLE BIT(7) /* * Define PCIe to AHB window size as 2^33 to support max 8GB address space * translate, support least 4GB DRAM size access from EP DMA(physical DRAM * start from 0x40000000). */ #define PCIE2AHB_SIZE 0x21 /* PCIe V2 configuration transaction header */ #define PCIE_CFG_HEADER0 0x460 #define PCIE_CFG_HEADER1 0x464 #define PCIE_CFG_HEADER2 0x468 #define PCIE_CFG_WDATA 0x470 #define PCIE_APP_TLP_REQ 0x488 #define PCIE_CFG_RDATA 0x48c #define APP_CFG_REQ BIT(0) #define APP_CPL_STATUS GENMASK(7, 5) #define CFG_WRRD_TYPE_0 4 #define CFG_WR_FMT 2 #define CFG_RD_FMT 0 #define CFG_DW0_LENGTH(length) ((length) & GENMASK(9, 0)) #define CFG_DW0_TYPE(type) (((type) << 24) & GENMASK(28, 24)) #define CFG_DW0_FMT(fmt) (((fmt) << 29) & GENMASK(31, 29)) #define CFG_DW2_REGN(regn) ((regn) & GENMASK(11, 2)) #define CFG_DW2_FUN(fun) (((fun) << 16) & GENMASK(18, 16)) #define CFG_DW2_DEV(dev) (((dev) << 19) & GENMASK(23, 19)) #define CFG_DW2_BUS(bus) (((bus) << 24) & GENMASK(31, 24)) #define CFG_HEADER_DW0(type, fmt) \ (CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt)) #define CFG_HEADER_DW1(where, size) \ (GENMASK(((size) - 1), 0) << ((where) & 0x3)) #define CFG_HEADER_DW2(regn, fun, dev, bus) \ (CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \ CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus)) #define PCIE_RST_CTRL 0x510 #define PCIE_PHY_RSTB BIT(0) #define PCIE_PIPE_SRSTB BIT(1) #define PCIE_MAC_SRSTB BIT(2) #define PCIE_CRSTB BIT(3) #define PCIE_PERSTB BIT(8) #define PCIE_LINKDOWN_RST_EN GENMASK(15, 13) #define PCIE_LINK_STATUS_V2 0x804 #define PCIE_PORT_LINKUP_V2 BIT(10) struct mtk_pcie_port; /** * struct mtk_pcie_soc - differentiate between host generations * @need_fix_class_id: whether this host's class ID needed to be fixed or not * @need_fix_device_id: whether this host's device ID needed to be fixed or not * @no_msi: Bridge has no MSI support, and relies on an external block * @device_id: device ID which this host need to be fixed * @ops: pointer to configuration access functions * @startup: pointer to controller setting functions * @setup_irq: pointer to initialize IRQ functions */ struct mtk_pcie_soc { bool need_fix_class_id; bool need_fix_device_id; bool no_msi; unsigned int device_id; struct pci_ops *ops; int (*startup)(struct mtk_pcie_port *port); int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node); }; /** * struct mtk_pcie_port - PCIe port information * @base: IO mapped register base * @list: port list * @pcie: pointer to PCIe host info * @reset: pointer to port reset control * @sys_ck: pointer to transaction/data link layer clock * @ahb_ck: pointer to AHB slave interface operating clock for CSR access * and RC initiated MMIO access * @axi_ck: pointer to application layer MMIO channel operating clock * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock * when pcie_mac_ck/pcie_pipe_ck is turned off * @obff_ck: pointer to OBFF functional block operating clock * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock * @phy: pointer to PHY control block * @slot: port slot * @irq: GIC irq * @irq_domain: legacy INTx IRQ domain * @inner_domain: inner IRQ domain * @msi_domain: MSI IRQ domain * @lock: protect the msi_irq_in_use bitmap * @msi_irq_in_use: bit map for assigned MSI IRQ */ struct mtk_pcie_port { void __iomem *base; struct list_head list; struct mtk_pcie *pcie; struct reset_control *reset; struct clk *sys_ck; struct clk *ahb_ck; struct clk *axi_ck; struct clk *aux_ck; struct clk *obff_ck; struct clk *pipe_ck; struct phy *phy; u32 slot; int irq; struct irq_domain *irq_domain; struct irq_domain *inner_domain; struct irq_domain *msi_domain; struct mutex lock; DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM); }; /** * struct mtk_pcie - PCIe host information * @dev: pointer to PCIe device * @base: IO mapped register base * @cfg: IO mapped register map for PCIe config * @free_ck: free-run reference clock * @mem: non-prefetchable memory resource * @ports: pointer to PCIe port information * @soc: pointer to SoC-dependent operations */ struct mtk_pcie { struct device *dev; void __iomem *base; struct regmap *cfg; struct clk *free_ck; struct list_head ports; const struct mtk_pcie_soc *soc; }; static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie) { struct device *dev = pcie->dev; clk_disable_unprepare(pcie->free_ck); pm_runtime_put_sync(dev); pm_runtime_disable(dev); } static void mtk_pcie_port_free(struct mtk_pcie_port *port) { struct mtk_pcie *pcie = port->pcie; struct device *dev = pcie->dev; devm_iounmap(dev, port->base); list_del(&port->list); devm_kfree(dev, port); } static void mtk_pcie_put_resources(struct mtk_pcie *pcie) { struct mtk_pcie_port *port, *tmp; list_for_each_entry_safe(port, tmp, &pcie->ports, list) { phy_power_off(port->phy); phy_exit(port->phy); clk_disable_unprepare(port->pipe_ck); clk_disable_unprepare(port->obff_ck); clk_disable_unprepare(port->axi_ck); clk_disable_unprepare(port->aux_ck); clk_disable_unprepare(port->ahb_ck); clk_disable_unprepare(port->sys_ck); mtk_pcie_port_free(port); } mtk_pcie_subsys_powerdown(pcie); } static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port) { u32 val; int err; err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val, !(val & APP_CFG_REQ), 10, 100 * USEC_PER_MSEC); if (err) return PCIBIOS_SET_FAILED; if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS) return PCIBIOS_SET_FAILED; return PCIBIOS_SUCCESSFUL; } static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn, int where, int size, u32 *val) { u32 tmp; /* Write PCIe configuration transaction header for Cfgrd */ writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT), port->base + PCIE_CFG_HEADER0); writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus), port->base + PCIE_CFG_HEADER2); /* Trigger h/w to transmit Cfgrd TLP */ tmp = readl(port->base + PCIE_APP_TLP_REQ); tmp |= APP_CFG_REQ; writel(tmp, port->base + PCIE_APP_TLP_REQ); /* Check completion status */ if (mtk_pcie_check_cfg_cpld(port)) return PCIBIOS_SET_FAILED; /* Read cpld payload of Cfgrd */ *val = readl(port->base + PCIE_CFG_RDATA); if (size == 1) *val = (*val >> (8 * (where & 3))) & 0xff; else if (size == 2) *val = (*val >> (8 * (where & 3))) & 0xffff; return PCIBIOS_SUCCESSFUL; } static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn, int where, int size, u32 val) { /* Write PCIe configuration transaction header for Cfgwr */ writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT), port->base + PCIE_CFG_HEADER0); writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus), port->base + PCIE_CFG_HEADER2); /* Write Cfgwr data */ val = val << 8 * (where & 3); writel(val, port->base + PCIE_CFG_WDATA); /* Trigger h/w to transmit Cfgwr TLP */ val = readl(port->base + PCIE_APP_TLP_REQ); val |= APP_CFG_REQ; writel(val, port->base + PCIE_APP_TLP_REQ); /* Check completion status */ return mtk_pcie_check_cfg_cpld(port); } static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus, unsigned int devfn) { struct mtk_pcie *pcie = bus->sysdata; struct mtk_pcie_port *port; struct pci_dev *dev = NULL; /* * Walk the bus hierarchy to get the devfn value * of the port in the root bus. */ while (bus && bus->number) { dev = bus->self; bus = dev->bus; devfn = dev->devfn; } list_for_each_entry(port, &pcie->ports, list) if (port->slot == PCI_SLOT(devfn)) return port; return NULL; } static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct mtk_pcie_port *port; u32 bn = bus->number; port = mtk_pcie_find_port(bus, devfn); if (!port) return PCIBIOS_DEVICE_NOT_FOUND; return mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val); } static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct mtk_pcie_port *port; u32 bn = bus->number; port = mtk_pcie_find_port(bus, devfn); if (!port) return PCIBIOS_DEVICE_NOT_FOUND; return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val); } static struct pci_ops mtk_pcie_ops_v2 = { .read = mtk_pcie_config_read, .write = mtk_pcie_config_write, }; static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); phys_addr_t addr; /* MT2712/MT7622 only support 32-bit MSI addresses */ addr = virt_to_phys(port->base + PCIE_MSI_VECTOR); msg->address_hi = 0; msg->address_lo = lower_32_bits(addr); msg->data = data->hwirq; dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n", (int)data->hwirq, msg->address_hi, msg->address_lo); } static int mtk_msi_set_affinity(struct irq_data *irq_data, const struct cpumask *mask, bool force) { return -EINVAL; } static void mtk_msi_ack_irq(struct irq_data *data) { struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); u32 hwirq = data->hwirq; writel(1 << hwirq, port->base + PCIE_IMSI_STATUS); } static struct irq_chip mtk_msi_bottom_irq_chip = { .name = "MTK MSI", .irq_compose_msi_msg = mtk_compose_msi_msg, .irq_set_affinity = mtk_msi_set_affinity, .irq_ack = mtk_msi_ack_irq, }; static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct mtk_pcie_port *port = domain->host_data; unsigned long bit; WARN_ON(nr_irqs != 1); mutex_lock(&port->lock); bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM); if (bit >= MTK_MSI_IRQS_NUM) { mutex_unlock(&port->lock); return -ENOSPC; } __set_bit(bit, port->msi_irq_in_use); mutex_unlock(&port->lock); irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip, domain->host_data, handle_edge_irq, NULL, NULL); return 0; } static void mtk_pcie_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d); mutex_lock(&port->lock); if (!test_bit(d->hwirq, port->msi_irq_in_use)) dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n", d->hwirq); else __clear_bit(d->hwirq, port->msi_irq_in_use); mutex_unlock(&port->lock); irq_domain_free_irqs_parent(domain, virq, nr_irqs); } static const struct irq_domain_ops msi_domain_ops = { .alloc = mtk_pcie_irq_domain_alloc, .free = mtk_pcie_irq_domain_free, }; static struct irq_chip mtk_msi_irq_chip = { .name = "MTK PCIe MSI", .irq_ack = irq_chip_ack_parent, .irq_mask = pci_msi_mask_irq, .irq_unmask = pci_msi_unmask_irq, }; static struct msi_domain_info mtk_msi_domain_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_PCI_MSIX), .chip = &mtk_msi_irq_chip, }; static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port) { struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node); mutex_init(&port->lock); port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM, &msi_domain_ops, port); if (!port->inner_domain) { dev_err(port->pcie->dev, "failed to create IRQ domain\n"); return -ENOMEM; } port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info, port->inner_domain); if (!port->msi_domain) { dev_err(port->pcie->dev, "failed to create MSI domain\n"); irq_domain_remove(port->inner_domain); return -ENOMEM; } return 0; } static void mtk_pcie_enable_msi(struct mtk_pcie_port *port) { u32 val; phys_addr_t msg_addr; msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR); val = lower_32_bits(msg_addr); writel(val, port->base + PCIE_IMSI_ADDR); val = readl(port->base + PCIE_INT_MASK); val &= ~MSI_MASK; writel(val, port->base + PCIE_INT_MASK); } static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie) { struct mtk_pcie_port *port, *tmp; list_for_each_entry_safe(port, tmp, &pcie->ports, list) { irq_set_chained_handler_and_data(port->irq, NULL, NULL); if (port->irq_domain) irq_domain_remove(port->irq_domain); if (IS_ENABLED(CONFIG_PCI_MSI)) { if (port->msi_domain) irq_domain_remove(port->msi_domain); if (port->inner_domain) irq_domain_remove(port->inner_domain); } irq_dispose_mapping(port->irq); } } static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); irq_set_chip_data(irq, domain->host_data); return 0; } static const struct irq_domain_ops intx_domain_ops = { .map = mtk_pcie_intx_map, }; static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port, struct device_node *node) { struct device *dev = port->pcie->dev; struct device_node *pcie_intc_node; int ret; /* Setup INTx */ pcie_intc_node = of_get_next_child(node, NULL); if (!pcie_intc_node) { dev_err(dev, "no PCIe Intc node found\n"); return -ENODEV; } port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, &intx_domain_ops, port); of_node_put(pcie_intc_node); if (!port->irq_domain) { dev_err(dev, "failed to get INTx IRQ domain\n"); return -ENODEV; } if (IS_ENABLED(CONFIG_PCI_MSI)) { ret = mtk_pcie_allocate_msi_domains(port); if (ret) return ret; } return 0; } static void mtk_pcie_intr_handler(struct irq_desc *desc) { struct mtk_pcie_port *port = irq_desc_get_handler_data(desc); struct irq_chip *irqchip = irq_desc_get_chip(desc); unsigned long status; u32 bit = INTX_SHIFT; chained_irq_enter(irqchip, desc); status = readl(port->base + PCIE_INT_STATUS); if (status & INTX_MASK) { for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) { /* Clear the INTx */ writel(1 << bit, port->base + PCIE_INT_STATUS); generic_handle_domain_irq(port->irq_domain, bit - INTX_SHIFT); } } if (IS_ENABLED(CONFIG_PCI_MSI)) { if (status & MSI_STATUS){ unsigned long imsi_status; while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) { for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) generic_handle_domain_irq(port->inner_domain, bit); } /* Clear MSI interrupt status */ writel(MSI_STATUS, port->base + PCIE_INT_STATUS); } } chained_irq_exit(irqchip, desc); } static int mtk_pcie_setup_irq(struct mtk_pcie_port *port, struct device_node *node) { struct mtk_pcie *pcie = port->pcie; struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); int err; err = mtk_pcie_init_irq_domain(port, node); if (err) { dev_err(dev, "failed to init PCIe IRQ domain\n"); return err; } if (of_property_present(dev->of_node, "interrupt-names")) port->irq = platform_get_irq_byname(pdev, "pcie_irq"); else port->irq = platform_get_irq(pdev, port->slot); if (port->irq < 0) return port->irq; irq_set_chained_handler_and_data(port->irq, mtk_pcie_intr_handler, port); return 0; } static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) { struct mtk_pcie *pcie = port->pcie; struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); struct resource *mem = NULL; struct resource_entry *entry; const struct mtk_pcie_soc *soc = port->pcie->soc; u32 val; int err; entry = resource_list_first_type(&host->windows, IORESOURCE_MEM); if (entry) mem = entry->res; if (!mem) return -EINVAL; /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */ if (pcie->base) { val = readl(pcie->base + PCIE_SYS_CFG_V2); val |= PCIE_CSR_LTSSM_EN(port->slot) | PCIE_CSR_ASPM_L1_EN(port->slot); writel(val, pcie->base + PCIE_SYS_CFG_V2); } else if (pcie->cfg) { val = PCIE_CSR_LTSSM_EN(port->slot) | PCIE_CSR_ASPM_L1_EN(port->slot); regmap_update_bits(pcie->cfg, PCIE_SYS_CFG_V2, val, val); } /* Assert all reset signals */ writel(0, port->base + PCIE_RST_CTRL); /* * Enable PCIe link down reset, if link status changed from link up to * link down, this will reset MAC control registers and configuration * space. */ writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL); /* * Described in PCIe CEM specification sections 2.2 (PERST# Signal) and * 2.2.1 (Initial Power-Up (G3 to S0)). The deassertion of PERST# should * be delayed 100ms (TPVPERL) for the power and clock to become stable. */ msleep(100); /* De-assert PHY, PE, PIPE, MAC and configuration reset */ val = readl(port->base + PCIE_RST_CTRL); val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB | PCIE_MAC_SRSTB | PCIE_CRSTB; writel(val, port->base + PCIE_RST_CTRL); /* Set up vendor ID and class code */ if (soc->need_fix_class_id) { val = PCI_VENDOR_ID_MEDIATEK; writew(val, port->base + PCIE_CONF_VEND_ID); val = PCI_CLASS_BRIDGE_PCI; writew(val, port->base + PCIE_CONF_CLASS_ID); } if (soc->need_fix_device_id) writew(soc->device_id, port->base + PCIE_CONF_DEVICE_ID); /* 100ms timeout value should be enough for Gen1/2 training */ err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val, !!(val & PCIE_PORT_LINKUP_V2), 20, 100 * USEC_PER_MSEC); if (err) return -ETIMEDOUT; /* Set INTx mask */ val = readl(port->base + PCIE_INT_MASK); val &= ~INTX_MASK; writel(val, port->base + PCIE_INT_MASK); if (IS_ENABLED(CONFIG_PCI_MSI)) mtk_pcie_enable_msi(port); /* Set AHB to PCIe translation windows */ val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(resource_size(mem))); writel(val, port->base + PCIE_AHB_TRANS_BASE0_L); val = upper_32_bits(mem->start); writel(val, port->base + PCIE_AHB_TRANS_BASE0_H); /* Set PCIe to AXI translation memory space.*/ val = PCIE2AHB_SIZE | WIN_ENABLE; writel(val, port->base + PCIE_AXI_WINDOW0); return 0; } static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct mtk_pcie *pcie = bus->sysdata; writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus->number), pcie->base + PCIE_CFG_ADDR); return pcie->base + PCIE_CFG_DATA + (where & 3); } static struct pci_ops mtk_pcie_ops = { .map_bus = mtk_pcie_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, }; static int mtk_pcie_startup_port(struct mtk_pcie_port *port) { struct mtk_pcie *pcie = port->pcie; u32 func = PCI_FUNC(port->slot); u32 slot = PCI_SLOT(port->slot << 3); u32 val; int err; /* assert port PERST_N */ val = readl(pcie->base + PCIE_SYS_CFG); val |= PCIE_PORT_PERST(port->slot); writel(val, pcie->base + PCIE_SYS_CFG); /* de-assert port PERST_N */ val = readl(pcie->base + PCIE_SYS_CFG); val &= ~PCIE_PORT_PERST(port->slot); writel(val, pcie->base + PCIE_SYS_CFG); /* 100ms timeout value should be enough for Gen1/2 training */ err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val, !!(val & PCIE_PORT_LINKUP), 20, 100 * USEC_PER_MSEC); if (err) return -ETIMEDOUT; /* enable interrupt */ val = readl(pcie->base + PCIE_INT_ENABLE); val |= PCIE_PORT_INT_EN(port->slot); writel(val, pcie->base + PCIE_INT_ENABLE); /* map to all DDR region. We need to set it before cfg operation. */ writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE, port->base + PCIE_BAR0_SETUP); /* configure class code and revision ID */ writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS); /* configure FC credit */ writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), pcie->base + PCIE_CFG_ADDR); val = readl(pcie->base + PCIE_CFG_DATA); val &= ~PCIE_FC_CREDIT_MASK; val |= PCIE_FC_CREDIT_VAL(0x806c); writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), pcie->base + PCIE_CFG_ADDR); writel(val, pcie->base + PCIE_CFG_DATA); /* configure RC FTS number to 250 when it leaves L0s */ writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), pcie->base + PCIE_CFG_ADDR); val = readl(pcie->base + PCIE_CFG_DATA); val &= ~PCIE_FTS_NUM_MASK; val |= PCIE_FTS_NUM_L0(0x50); writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), pcie->base + PCIE_CFG_ADDR); writel(val, pcie->base + PCIE_CFG_DATA); return 0; } static void mtk_pcie_enable_port(struct mtk_pcie_port *port) { struct mtk_pcie *pcie = port->pcie; struct device *dev = pcie->dev; int err; err = clk_prepare_enable(port->sys_ck); if (err) { dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot); goto err_sys_clk; } err = clk_prepare_enable(port->ahb_ck); if (err) { dev_err(dev, "failed to enable ahb_ck%d\n", port->slot); goto err_ahb_clk; } err = clk_prepare_enable(port->aux_ck); if (err) { dev_err(dev, "failed to enable aux_ck%d\n", port->slot); goto err_aux_clk; } err = clk_prepare_enable(port->axi_ck); if (err) { dev_err(dev, "failed to enable axi_ck%d\n", port->slot); goto err_axi_clk; } err = clk_prepare_enable(port->obff_ck); if (err) { dev_err(dev, "failed to enable obff_ck%d\n", port->slot); goto err_obff_clk; } err = clk_prepare_enable(port->pipe_ck); if (err) { dev_err(dev, "failed to enable pipe_ck%d\n", port->slot); goto err_pipe_clk; } reset_control_assert(port->reset); reset_control_deassert(port->reset); err = phy_init(port->phy); if (err) { dev_err(dev, "failed to initialize port%d phy\n", port->slot); goto err_phy_init; } err = phy_power_on(port->phy); if (err) { dev_err(dev, "failed to power on port%d phy\n", port->slot); goto err_phy_on; } if (!pcie->soc->startup(port)) return; dev_info(dev, "Port%d link down\n", port->slot); phy_power_off(port->phy); err_phy_on: phy_exit(port->phy); err_phy_init: clk_disable_unprepare(port->pipe_ck); err_pipe_clk: clk_disable_unprepare(port->obff_ck); err_obff_clk: clk_disable_unprepare(port->axi_ck); err_axi_clk: clk_disable_unprepare(port->aux_ck); err_aux_clk: clk_disable_unprepare(port->ahb_ck); err_ahb_clk: clk_disable_unprepare(port->sys_ck); err_sys_clk: mtk_pcie_port_free(port); } static int mtk_pcie_parse_port(struct mtk_pcie *pcie, struct device_node *node, int slot) { struct mtk_pcie_port *port; struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); char name[10]; int err; port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); if (!port) return -ENOMEM; snprintf(name, sizeof(name), "port%d", slot); port->base = devm_platform_ioremap_resource_byname(pdev, name); if (IS_ERR(port->base)) { dev_err(dev, "failed to map port%d base\n", slot); return PTR_ERR(port->base); } snprintf(name, sizeof(name), "sys_ck%d", slot); port->sys_ck = devm_clk_get(dev, name); if (IS_ERR(port->sys_ck)) { dev_err(dev, "failed to get sys_ck%d clock\n", slot); return PTR_ERR(port->sys_ck); } /* sys_ck might be divided into the following parts in some chips */ snprintf(name, sizeof(name), "ahb_ck%d", slot); port->ahb_ck = devm_clk_get_optional(dev, name); if (IS_ERR(port->ahb_ck)) return PTR_ERR(port->ahb_ck); snprintf(name, sizeof(name), "axi_ck%d", slot); port->axi_ck = devm_clk_get_optional(dev, name); if (IS_ERR(port->axi_ck)) return PTR_ERR(port->axi_ck); snprintf(name, sizeof(name), "aux_ck%d", slot); port->aux_ck = devm_clk_get_optional(dev, name); if (IS_ERR(port->aux_ck)) return PTR_ERR(port->aux_ck); snprintf(name, sizeof(name), "obff_ck%d", slot); port->obff_ck = devm_clk_get_optional(dev, name); if (IS_ERR(port->obff_ck)) return PTR_ERR(port->obff_ck); snprintf(name, sizeof(name), "pipe_ck%d", slot); port->pipe_ck = devm_clk_get_optional(dev, name); if (IS_ERR(port->pipe_ck)) return PTR_ERR(port->pipe_ck); snprintf(name, sizeof(name), "pcie-rst%d", slot); port->reset = devm_reset_control_get_optional_exclusive(dev, name); if (PTR_ERR(port->reset) == -EPROBE_DEFER) return PTR_ERR(port->reset); /* some platforms may use default PHY setting */ snprintf(name, sizeof(name), "pcie-phy%d", slot); port->phy = devm_phy_optional_get(dev, name); if (IS_ERR(port->phy)) return PTR_ERR(port->phy); port->slot = slot; port->pcie = pcie; if (pcie->soc->setup_irq) { err = pcie->soc->setup_irq(port, node); if (err) return err; } INIT_LIST_HEAD(&port->list); list_add_tail(&port->list, &pcie->ports); return 0; } static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie) { struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); struct resource *regs; struct device_node *cfg_node; int err; /* get shared registers, which are optional */ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys"); if (regs) { pcie->base = devm_ioremap_resource(dev, regs); if (IS_ERR(pcie->base)) return PTR_ERR(pcie->base); } cfg_node = of_find_compatible_node(NULL, NULL, "mediatek,generic-pciecfg"); if (cfg_node) { pcie->cfg = syscon_node_to_regmap(cfg_node); of_node_put(cfg_node); if (IS_ERR(pcie->cfg)) return PTR_ERR(pcie->cfg); } pcie->free_ck = devm_clk_get(dev, "free_ck"); if (IS_ERR(pcie->free_ck)) { if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER) return -EPROBE_DEFER; pcie->free_ck = NULL; } pm_runtime_enable(dev); pm_runtime_get_sync(dev); /* enable top level clock */ err = clk_prepare_enable(pcie->free_ck); if (err) { dev_err(dev, "failed to enable free_ck\n"); goto err_free_ck; } return 0; err_free_ck: pm_runtime_put_sync(dev); pm_runtime_disable(dev); return err; } static int mtk_pcie_setup(struct mtk_pcie *pcie) { struct device *dev = pcie->dev; struct device_node *node = dev->of_node, *child; struct mtk_pcie_port *port, *tmp; int err, slot; slot = of_get_pci_domain_nr(dev->of_node); if (slot < 0) { for_each_available_child_of_node(node, child) { err = of_pci_get_devfn(child); if (err < 0) { dev_err(dev, "failed to get devfn: %d\n", err); goto error_put_node; } slot = PCI_SLOT(err); err = mtk_pcie_parse_port(pcie, child, slot); if (err) goto error_put_node; } } else { err = mtk_pcie_parse_port(pcie, node, slot); if (err) return err; } err = mtk_pcie_subsys_powerup(pcie); if (err) return err; /* enable each port, and then check link status */ list_for_each_entry_safe(port, tmp, &pcie->ports, list) mtk_pcie_enable_port(port); /* power down PCIe subsys if slots are all empty (link down) */ if (list_empty(&pcie->ports)) mtk_pcie_subsys_powerdown(pcie); return 0; error_put_node: of_node_put(child); return err; } static int mtk_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mtk_pcie *pcie; struct pci_host_bridge *host; int err; host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); if (!host) return -ENOMEM; pcie = pci_host_bridge_priv(host); pcie->dev = dev; pcie->soc = of_device_get_match_data(dev); platform_set_drvdata(pdev, pcie); INIT_LIST_HEAD(&pcie->ports); err = mtk_pcie_setup(pcie); if (err) return err; host->ops = pcie->soc->ops; host->sysdata = pcie; host->msi_domain = pcie->soc->no_msi; err = pci_host_probe(host); if (err) goto put_resources; return 0; put_resources: if (!list_empty(&pcie->ports)) mtk_pcie_put_resources(pcie); return err; } static void mtk_pcie_free_resources(struct mtk_pcie *pcie) { struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); struct list_head *windows = &host->windows; pci_free_resource_list(windows); } static void mtk_pcie_remove(struct platform_device *pdev) { struct mtk_pcie *pcie = platform_get_drvdata(pdev); struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); pci_stop_root_bus(host->bus); pci_remove_root_bus(host->bus); mtk_pcie_free_resources(pcie); mtk_pcie_irq_teardown(pcie); mtk_pcie_put_resources(pcie); } static int mtk_pcie_suspend_noirq(struct device *dev) { struct mtk_pcie *pcie = dev_get_drvdata(dev); struct mtk_pcie_port *port; if (list_empty(&pcie->ports)) return 0; list_for_each_entry(port, &pcie->ports, list) { clk_disable_unprepare(port->pipe_ck); clk_disable_unprepare(port->obff_ck); clk_disable_unprepare(port->axi_ck); clk_disable_unprepare(port->aux_ck); clk_disable_unprepare(port->ahb_ck); clk_disable_unprepare(port->sys_ck); phy_power_off(port->phy); phy_exit(port->phy); } clk_disable_unprepare(pcie->free_ck); return 0; } static int mtk_pcie_resume_noirq(struct device *dev) { struct mtk_pcie *pcie = dev_get_drvdata(dev); struct mtk_pcie_port *port, *tmp; if (list_empty(&pcie->ports)) return 0; clk_prepare_enable(pcie->free_ck); list_for_each_entry_safe(port, tmp, &pcie->ports, list) mtk_pcie_enable_port(port); /* In case of EP was removed while system suspend. */ if (list_empty(&pcie->ports)) clk_disable_unprepare(pcie->free_ck); return 0; } static const struct dev_pm_ops mtk_pcie_pm_ops = { NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq, mtk_pcie_resume_noirq) }; static const struct mtk_pcie_soc mtk_pcie_soc_v1 = { .no_msi = true, .ops = &mtk_pcie_ops, .startup = mtk_pcie_startup_port, }; static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = { .ops = &mtk_pcie_ops_v2, .startup = mtk_pcie_startup_port_v2, .setup_irq = mtk_pcie_setup_irq, }; static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = { .need_fix_class_id = true, .ops = &mtk_pcie_ops_v2, .startup = mtk_pcie_startup_port_v2, .setup_irq = mtk_pcie_setup_irq, }; static const struct mtk_pcie_soc mtk_pcie_soc_mt7629 = { .need_fix_class_id = true, .need_fix_device_id = true, .device_id = PCI_DEVICE_ID_MEDIATEK_7629, .ops = &mtk_pcie_ops_v2, .startup = mtk_pcie_startup_port_v2, .setup_irq = mtk_pcie_setup_irq, }; static const struct of_device_id mtk_pcie_ids[] = { { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 }, { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 }, { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 }, { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 }, { .compatible = "mediatek,mt7629-pcie", .data = &mtk_pcie_soc_mt7629 }, {}, }; MODULE_DEVICE_TABLE(of, mtk_pcie_ids); static struct platform_driver mtk_pcie_driver = { .probe = mtk_pcie_probe, .remove_new = mtk_pcie_remove, .driver = { .name = "mtk-pcie", .of_match_table = mtk_pcie_ids, .suppress_bind_attrs = true, .pm = &mtk_pcie_pm_ops, }, }; module_platform_driver(mtk_pcie_driver); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/pcie-mediatek.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015 - 2016 Cavium, Inc. */ #include <linux/bitfield.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/pci-acpi.h> #include <linux/pci-ecam.h> #include <linux/platform_device.h> #include <linux/io-64-nonatomic-lo-hi.h> #include "../pci.h" #if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) #define PEM_CFG_WR 0x28 #define PEM_CFG_RD 0x30 /* * Enhanced Configuration Access Mechanism (ECAM) * * N.B. This is a non-standard platform-specific ECAM bus shift value. For * standard values defined in the PCI Express Base Specification see * include/linux/pci-ecam.h. */ #define THUNDER_PCIE_ECAM_BUS_SHIFT 24 struct thunder_pem_pci { u32 ea_entry[3]; void __iomem *pem_reg_base; }; static int thunder_pem_bridge_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { u64 read_val, tmp_val; struct pci_config_window *cfg = bus->sysdata; struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv; if (devfn != 0 || where >= 2048) return PCIBIOS_DEVICE_NOT_FOUND; /* * 32-bit accesses only. Write the address to the low order * bits of PEM_CFG_RD, then trigger the read by reading back. * The config data lands in the upper 32-bits of PEM_CFG_RD. */ read_val = where & ~3ull; writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD); read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); read_val >>= 32; /* * The config space contains some garbage, fix it up. Also * synthesize an EA capability for the BAR used by MSI-X. */ switch (where & ~3) { case 0x40: read_val &= 0xffff00ff; read_val |= 0x00007000; /* Skip MSI CAP */ break; case 0x70: /* Express Cap */ /* * Change PME interrupt to vector 2 on T88 where it * reads as 0, else leave it alone. */ if (!(read_val & (0x1f << 25))) read_val |= (2u << 25); break; case 0xb0: /* MSI-X Cap */ /* TableSize=2 or 4, Next Cap is EA */ read_val &= 0xc00000ff; /* * If Express Cap(0x70) raw PME vector reads as 0 we are on * T88 and TableSize is reported as 4, else TableSize * is 2. */ writeq(0x70, pem_pci->pem_reg_base + PEM_CFG_RD); tmp_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); tmp_val >>= 32; if (!(tmp_val & (0x1f << 25))) read_val |= 0x0003bc00; else read_val |= 0x0001bc00; break; case 0xb4: /* Table offset=0, BIR=0 */ read_val = 0x00000000; break; case 0xb8: /* BPA offset=0xf0000, BIR=0 */ read_val = 0x000f0000; break; case 0xbc: /* EA, 1 entry, no next Cap */ read_val = 0x00010014; break; case 0xc0: /* DW2 for type-1 */ read_val = 0x00000000; break; case 0xc4: /* Entry BEI=0, PP=0x00, SP=0xff, ES=3 */ read_val = 0x80ff0003; break; case 0xc8: read_val = pem_pci->ea_entry[0]; break; case 0xcc: read_val = pem_pci->ea_entry[1]; break; case 0xd0: read_val = pem_pci->ea_entry[2]; break; default: break; } read_val >>= (8 * (where & 3)); switch (size) { case 1: read_val &= 0xff; break; case 2: read_val &= 0xffff; break; default: break; } *val = read_val; return PCIBIOS_SUCCESSFUL; } static int thunder_pem_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct pci_config_window *cfg = bus->sysdata; if (bus->number < cfg->busr.start || bus->number > cfg->busr.end) return PCIBIOS_DEVICE_NOT_FOUND; /* * The first device on the bus is the PEM PCIe bridge. * Special case its config access. */ if (bus->number == cfg->busr.start) return thunder_pem_bridge_read(bus, devfn, where, size, val); return pci_generic_config_read(bus, devfn, where, size, val); } /* * Some of the w1c_bits below also include read-only or non-writable * reserved bits, this makes the code simpler and is OK as the bits * are not affected by writing zeros to them. */ static u32 thunder_pem_bridge_w1c_bits(u64 where_aligned) { u32 w1c_bits = 0; switch (where_aligned) { case 0x04: /* Command/Status */ case 0x1c: /* Base and I/O Limit/Secondary Status */ w1c_bits = 0xff000000; break; case 0x44: /* Power Management Control and Status */ w1c_bits = 0xfffffe00; break; case 0x78: /* Device Control/Device Status */ case 0x80: /* Link Control/Link Status */ case 0x88: /* Slot Control/Slot Status */ case 0x90: /* Root Status */ case 0xa0: /* Link Control 2 Registers/Link Status 2 */ w1c_bits = 0xffff0000; break; case 0x104: /* Uncorrectable Error Status */ case 0x110: /* Correctable Error Status */ case 0x130: /* Error Status */ case 0x160: /* Link Control 4 */ w1c_bits = 0xffffffff; break; default: break; } return w1c_bits; } /* Some bits must be written to one so they appear to be read-only. */ static u32 thunder_pem_bridge_w1_bits(u64 where_aligned) { u32 w1_bits; switch (where_aligned) { case 0x1c: /* I/O Base / I/O Limit, Secondary Status */ /* Force 32-bit I/O addressing. */ w1_bits = 0x0101; break; case 0x24: /* Prefetchable Memory Base / Prefetchable Memory Limit */ /* Force 64-bit addressing */ w1_bits = 0x00010001; break; default: w1_bits = 0; break; } return w1_bits; } static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct pci_config_window *cfg = bus->sysdata; struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv; u64 write_val, read_val; u64 where_aligned = where & ~3ull; u32 mask = 0; if (devfn != 0 || where >= 2048) return PCIBIOS_DEVICE_NOT_FOUND; /* * 32-bit accesses only. If the write is for a size smaller * than 32-bits, we must first read the 32-bit value and merge * in the desired bits and then write the whole 32-bits back * out. */ switch (size) { case 1: writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD); read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); read_val >>= 32; mask = ~(0xff << (8 * (where & 3))); read_val &= mask; val = (val & 0xff) << (8 * (where & 3)); val |= (u32)read_val; break; case 2: writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD); read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); read_val >>= 32; mask = ~(0xffff << (8 * (where & 3))); read_val &= mask; val = (val & 0xffff) << (8 * (where & 3)); val |= (u32)read_val; break; default: break; } /* * By expanding the write width to 32 bits, we may * inadvertently hit some W1C bits that were not intended to * be written. Calculate the mask that must be applied to the * data to be written to avoid these cases. */ if (mask) { u32 w1c_bits = thunder_pem_bridge_w1c_bits(where); if (w1c_bits) { mask &= w1c_bits; val &= ~mask; } } /* * Some bits must be read-only with value of one. Since the * access method allows these to be cleared if a zero is * written, force them to one before writing. */ val |= thunder_pem_bridge_w1_bits(where_aligned); /* * Low order bits are the config address, the high order 32 * bits are the data to be written. */ write_val = (((u64)val) << 32) | where_aligned; writeq(write_val, pem_pci->pem_reg_base + PEM_CFG_WR); return PCIBIOS_SUCCESSFUL; } static int thunder_pem_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct pci_config_window *cfg = bus->sysdata; if (bus->number < cfg->busr.start || bus->number > cfg->busr.end) return PCIBIOS_DEVICE_NOT_FOUND; /* * The first device on the bus is the PEM PCIe bridge. * Special case its config access. */ if (bus->number == cfg->busr.start) return thunder_pem_bridge_write(bus, devfn, where, size, val); return pci_generic_config_write(bus, devfn, where, size, val); } static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg, struct resource *res_pem) { struct thunder_pem_pci *pem_pci; resource_size_t bar4_start; pem_pci = devm_kzalloc(dev, sizeof(*pem_pci), GFP_KERNEL); if (!pem_pci) return -ENOMEM; pem_pci->pem_reg_base = devm_ioremap(dev, res_pem->start, 0x10000); if (!pem_pci->pem_reg_base) return -ENOMEM; /* * The MSI-X BAR for the PEM and AER interrupts is located at * a fixed offset from the PEM register base. Generate a * fragment of the synthesized Enhanced Allocation capability * structure here for the BAR. */ bar4_start = res_pem->start + 0xf00000; pem_pci->ea_entry[0] = lower_32_bits(bar4_start) | 2; pem_pci->ea_entry[1] = lower_32_bits(res_pem->end - bar4_start) & ~3u; pem_pci->ea_entry[2] = upper_32_bits(bar4_start); cfg->priv = pem_pci; return 0; } #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) #define PEM_RES_BASE 0x87e0c0000000ULL #define PEM_NODE_MASK GENMASK_ULL(45, 44) #define PEM_INDX_MASK GENMASK_ULL(26, 24) #define PEM_MIN_DOM_IN_NODE 4 #define PEM_MAX_DOM_IN_NODE 10 static void thunder_pem_reserve_range(struct device *dev, int seg, struct resource *r) { resource_size_t start = r->start, end = r->end; struct resource *res; const char *regionid; regionid = kasprintf(GFP_KERNEL, "PEM RC:%d", seg); if (!regionid) return; res = request_mem_region(start, end - start + 1, regionid); if (res) res->flags &= ~IORESOURCE_BUSY; else kfree(regionid); dev_info(dev, "%pR %s reserved\n", r, res ? "has been" : "could not be"); } static void thunder_pem_legacy_fw(struct acpi_pci_root *root, struct resource *res_pem) { int node = acpi_get_node(root->device->handle); int index; if (node == NUMA_NO_NODE) node = 0; index = root->segment - PEM_MIN_DOM_IN_NODE; index -= node * PEM_MAX_DOM_IN_NODE; res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) | FIELD_PREP(PEM_INDX_MASK, index); res_pem->flags = IORESOURCE_MEM; } static int thunder_pem_acpi_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; struct acpi_device *adev = to_acpi_device(dev); struct acpi_pci_root *root = acpi_driver_data(adev); struct resource *res_pem; int ret; res_pem = devm_kzalloc(&adev->dev, sizeof(*res_pem), GFP_KERNEL); if (!res_pem) return -ENOMEM; ret = acpi_get_rc_resources(dev, "CAVA02B", root->segment, res_pem); /* * If we fail to gather resources it means that we run with old * FW where we need to calculate PEM-specific resources manually. */ if (ret) { thunder_pem_legacy_fw(root, res_pem); /* * Reserve 64K size PEM specific resources. The full 16M range * size is required for thunder_pem_init() call. */ res_pem->end = res_pem->start + SZ_64K - 1; thunder_pem_reserve_range(dev, root->segment, res_pem); res_pem->end = res_pem->start + SZ_16M - 1; /* Reserve PCI configuration space as well. */ thunder_pem_reserve_range(dev, root->segment, &cfg->res); } return thunder_pem_init(dev, cfg, res_pem); } const struct pci_ecam_ops thunder_pem_ecam_ops = { .bus_shift = THUNDER_PCIE_ECAM_BUS_SHIFT, .init = thunder_pem_acpi_init, .pci_ops = { .map_bus = pci_ecam_map_bus, .read = thunder_pem_config_read, .write = thunder_pem_config_write, } }; #endif #ifdef CONFIG_PCI_HOST_THUNDER_PEM static int thunder_pem_platform_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; struct platform_device *pdev = to_platform_device(dev); struct resource *res_pem; if (!dev->of_node) return -EINVAL; /* * The second register range is the PEM bridge to the PCIe * bus. It has a different config access method than those * devices behind the bridge. */ res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res_pem) { dev_err(dev, "missing \"reg[1]\"property\n"); return -EINVAL; } return thunder_pem_init(dev, cfg, res_pem); } static const struct pci_ecam_ops pci_thunder_pem_ops = { .bus_shift = THUNDER_PCIE_ECAM_BUS_SHIFT, .init = thunder_pem_platform_init, .pci_ops = { .map_bus = pci_ecam_map_bus, .read = thunder_pem_config_read, .write = thunder_pem_config_write, } }; static const struct of_device_id thunder_pem_of_match[] = { { .compatible = "cavium,pci-host-thunder-pem", .data = &pci_thunder_pem_ops, }, { }, }; static struct platform_driver thunder_pem_driver = { .driver = { .name = KBUILD_MODNAME, .of_match_table = thunder_pem_of_match, .suppress_bind_attrs = true, }, .probe = pci_host_common_probe, }; builtin_platform_driver(thunder_pem_driver); #endif #endif
linux-master
drivers/pci/controller/pci-thunder-pem.c
// SPDX-License-Identifier: GPL-2.0 /* * Driver for the Aardvark PCIe controller, used on Marvell Armada * 3700. * * Copyright (C) 2016 Marvell * * Author: Hezi Shahmoon <[email protected]> */ #include <linux/bitfield.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/pci-ecam.h> #include <linux/init.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_gpio.h> #include <linux/of_pci.h> #include "../pci.h" #include "../pci-bridge-emul.h" /* PCIe core registers */ #define PCIE_CORE_DEV_ID_REG 0x0 #define PCIE_CORE_CMD_STATUS_REG 0x4 #define PCIE_CORE_DEV_REV_REG 0x8 #define PCIE_CORE_SSDEV_ID_REG 0x2c #define PCIE_CORE_PCIEXP_CAP 0xc0 #define PCIE_CORE_PCIERR_CAP 0x100 #define PCIE_CORE_ERR_CAPCTL_REG 0x118 #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5) #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6) #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7) #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8) /* PIO registers base address and register offsets */ #define PIO_BASE_ADDR 0x4000 #define PIO_CTRL (PIO_BASE_ADDR + 0x0) #define PIO_CTRL_TYPE_MASK GENMASK(3, 0) #define PIO_CTRL_ADDR_WIN_DISABLE BIT(24) #define PIO_STAT (PIO_BASE_ADDR + 0x4) #define PIO_COMPLETION_STATUS_SHIFT 7 #define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7) #define PIO_COMPLETION_STATUS_OK 0 #define PIO_COMPLETION_STATUS_UR 1 #define PIO_COMPLETION_STATUS_CRS 2 #define PIO_COMPLETION_STATUS_CA 4 #define PIO_NON_POSTED_REQ BIT(10) #define PIO_ERR_STATUS BIT(11) #define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8) #define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc) #define PIO_WR_DATA (PIO_BASE_ADDR + 0x10) #define PIO_WR_DATA_STRB (PIO_BASE_ADDR + 0x14) #define PIO_RD_DATA (PIO_BASE_ADDR + 0x18) #define PIO_START (PIO_BASE_ADDR + 0x1c) #define PIO_ISR (PIO_BASE_ADDR + 0x20) #define PIO_ISRM (PIO_BASE_ADDR + 0x24) /* Aardvark Control registers */ #define CONTROL_BASE_ADDR 0x4800 #define PCIE_CORE_CTRL0_REG (CONTROL_BASE_ADDR + 0x0) #define PCIE_GEN_SEL_MSK 0x3 #define PCIE_GEN_SEL_SHIFT 0x0 #define SPEED_GEN_1 0 #define SPEED_GEN_2 1 #define SPEED_GEN_3 2 #define IS_RC_MSK 1 #define IS_RC_SHIFT 2 #define LANE_CNT_MSK 0x18 #define LANE_CNT_SHIFT 0x3 #define LANE_COUNT_1 (0 << LANE_CNT_SHIFT) #define LANE_COUNT_2 (1 << LANE_CNT_SHIFT) #define LANE_COUNT_4 (2 << LANE_CNT_SHIFT) #define LANE_COUNT_8 (3 << LANE_CNT_SHIFT) #define LINK_TRAINING_EN BIT(6) #define LEGACY_INTA BIT(28) #define LEGACY_INTB BIT(29) #define LEGACY_INTC BIT(30) #define LEGACY_INTD BIT(31) #define PCIE_CORE_CTRL1_REG (CONTROL_BASE_ADDR + 0x4) #define HOT_RESET_GEN BIT(0) #define PCIE_CORE_CTRL2_REG (CONTROL_BASE_ADDR + 0x8) #define PCIE_CORE_CTRL2_RESERVED 0x7 #define PCIE_CORE_CTRL2_TD_ENABLE BIT(4) #define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5) #define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6) #define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10) #define PCIE_CORE_REF_CLK_REG (CONTROL_BASE_ADDR + 0x14) #define PCIE_CORE_REF_CLK_TX_ENABLE BIT(1) #define PCIE_CORE_REF_CLK_RX_ENABLE BIT(2) #define PCIE_MSG_LOG_REG (CONTROL_BASE_ADDR + 0x30) #define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40) #define PCIE_MSG_PM_PME_MASK BIT(7) #define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44) #define PCIE_ISR0_MSI_INT_PENDING BIT(24) #define PCIE_ISR0_CORR_ERR BIT(11) #define PCIE_ISR0_NFAT_ERR BIT(12) #define PCIE_ISR0_FAT_ERR BIT(13) #define PCIE_ISR0_ERR_MASK GENMASK(13, 11) #define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val)) #define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val)) #define PCIE_ISR0_ALL_MASK GENMASK(31, 0) #define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48) #define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C) #define PCIE_ISR1_POWER_STATE_CHANGE BIT(4) #define PCIE_ISR1_FLUSH BIT(5) #define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val)) #define PCIE_ISR1_ALL_MASK GENMASK(31, 0) #define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50) #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) #define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C) #define PCIE_MSI_ALL_MASK GENMASK(31, 0) #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C) #define PCIE_MSI_DATA_MASK GENMASK(15, 0) /* PCIe window configuration */ #define OB_WIN_BASE_ADDR 0x4c00 #define OB_WIN_BLOCK_SIZE 0x20 #define OB_WIN_COUNT 8 #define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \ OB_WIN_BLOCK_SIZE * (win) + \ (offset)) #define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00) #define OB_WIN_ENABLE BIT(0) #define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04) #define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08) #define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c) #define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10) #define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14) #define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18) #define OB_WIN_DEFAULT_ACTIONS (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4) #define OB_WIN_FUNC_NUM_MASK GENMASK(31, 24) #define OB_WIN_FUNC_NUM_SHIFT 24 #define OB_WIN_FUNC_NUM_ENABLE BIT(23) #define OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20) #define OB_WIN_BUS_NUM_BITS_SHIFT 20 #define OB_WIN_MSG_CODE_ENABLE BIT(22) #define OB_WIN_MSG_CODE_MASK GENMASK(21, 14) #define OB_WIN_MSG_CODE_SHIFT 14 #define OB_WIN_MSG_PAYLOAD_LEN BIT(12) #define OB_WIN_ATTR_ENABLE BIT(11) #define OB_WIN_ATTR_TC_MASK GENMASK(10, 8) #define OB_WIN_ATTR_TC_SHIFT 8 #define OB_WIN_ATTR_RELAXED BIT(7) #define OB_WIN_ATTR_NOSNOOP BIT(6) #define OB_WIN_ATTR_POISON BIT(5) #define OB_WIN_ATTR_IDO BIT(4) #define OB_WIN_TYPE_MASK GENMASK(3, 0) #define OB_WIN_TYPE_SHIFT 0 #define OB_WIN_TYPE_MEM 0x0 #define OB_WIN_TYPE_IO 0x4 #define OB_WIN_TYPE_CONFIG_TYPE0 0x8 #define OB_WIN_TYPE_CONFIG_TYPE1 0x9 #define OB_WIN_TYPE_MSG 0xc /* LMI registers base address and register offsets */ #define LMI_BASE_ADDR 0x6000 #define CFG_REG (LMI_BASE_ADDR + 0x0) #define LTSSM_SHIFT 24 #define LTSSM_MASK 0x3f #define RC_BAR_CONFIG 0x300 /* LTSSM values in CFG_REG */ enum { LTSSM_DETECT_QUIET = 0x0, LTSSM_DETECT_ACTIVE = 0x1, LTSSM_POLLING_ACTIVE = 0x2, LTSSM_POLLING_COMPLIANCE = 0x3, LTSSM_POLLING_CONFIGURATION = 0x4, LTSSM_CONFIG_LINKWIDTH_START = 0x5, LTSSM_CONFIG_LINKWIDTH_ACCEPT = 0x6, LTSSM_CONFIG_LANENUM_ACCEPT = 0x7, LTSSM_CONFIG_LANENUM_WAIT = 0x8, LTSSM_CONFIG_COMPLETE = 0x9, LTSSM_CONFIG_IDLE = 0xa, LTSSM_RECOVERY_RCVR_LOCK = 0xb, LTSSM_RECOVERY_SPEED = 0xc, LTSSM_RECOVERY_RCVR_CFG = 0xd, LTSSM_RECOVERY_IDLE = 0xe, LTSSM_L0 = 0x10, LTSSM_RX_L0S_ENTRY = 0x11, LTSSM_RX_L0S_IDLE = 0x12, LTSSM_RX_L0S_FTS = 0x13, LTSSM_TX_L0S_ENTRY = 0x14, LTSSM_TX_L0S_IDLE = 0x15, LTSSM_TX_L0S_FTS = 0x16, LTSSM_L1_ENTRY = 0x17, LTSSM_L1_IDLE = 0x18, LTSSM_L2_IDLE = 0x19, LTSSM_L2_TRANSMIT_WAKE = 0x1a, LTSSM_DISABLED = 0x20, LTSSM_LOOPBACK_ENTRY_MASTER = 0x21, LTSSM_LOOPBACK_ACTIVE_MASTER = 0x22, LTSSM_LOOPBACK_EXIT_MASTER = 0x23, LTSSM_LOOPBACK_ENTRY_SLAVE = 0x24, LTSSM_LOOPBACK_ACTIVE_SLAVE = 0x25, LTSSM_LOOPBACK_EXIT_SLAVE = 0x26, LTSSM_HOT_RESET = 0x27, LTSSM_RECOVERY_EQUALIZATION_PHASE0 = 0x28, LTSSM_RECOVERY_EQUALIZATION_PHASE1 = 0x29, LTSSM_RECOVERY_EQUALIZATION_PHASE2 = 0x2a, LTSSM_RECOVERY_EQUALIZATION_PHASE3 = 0x2b, }; #define VENDOR_ID_REG (LMI_BASE_ADDR + 0x44) /* PCIe core controller registers */ #define CTRL_CORE_BASE_ADDR 0x18000 #define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0) #define CTRL_MODE_SHIFT 0x0 #define CTRL_MODE_MASK 0x1 #define PCIE_CORE_MODE_DIRECT 0x0 #define PCIE_CORE_MODE_COMMAND 0x1 /* PCIe Central Interrupts Registers */ #define CENTRAL_INT_BASE_ADDR 0x1b000 #define HOST_CTRL_INT_STATUS_REG (CENTRAL_INT_BASE_ADDR + 0x0) #define HOST_CTRL_INT_MASK_REG (CENTRAL_INT_BASE_ADDR + 0x4) #define PCIE_IRQ_CMDQ_INT BIT(0) #define PCIE_IRQ_MSI_STATUS_INT BIT(1) #define PCIE_IRQ_CMD_SENT_DONE BIT(3) #define PCIE_IRQ_DMA_INT BIT(4) #define PCIE_IRQ_IB_DXFERDONE BIT(5) #define PCIE_IRQ_OB_DXFERDONE BIT(6) #define PCIE_IRQ_OB_RXFERDONE BIT(7) #define PCIE_IRQ_COMPQ_INT BIT(12) #define PCIE_IRQ_DIR_RD_DDR_DET BIT(13) #define PCIE_IRQ_DIR_WR_DDR_DET BIT(14) #define PCIE_IRQ_CORE_INT BIT(16) #define PCIE_IRQ_CORE_INT_PIO BIT(17) #define PCIE_IRQ_DPMU_INT BIT(18) #define PCIE_IRQ_PCIE_MIS_INT BIT(19) #define PCIE_IRQ_MSI_INT1_DET BIT(20) #define PCIE_IRQ_MSI_INT2_DET BIT(21) #define PCIE_IRQ_RC_DBELL_DET BIT(22) #define PCIE_IRQ_EP_STATUS BIT(23) #define PCIE_IRQ_ALL_MASK GENMASK(31, 0) #define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT /* Transaction types */ #define PCIE_CONFIG_RD_TYPE0 0x8 #define PCIE_CONFIG_RD_TYPE1 0x9 #define PCIE_CONFIG_WR_TYPE0 0xa #define PCIE_CONFIG_WR_TYPE1 0xb #define PIO_RETRY_CNT 750000 /* 1.5 s */ #define PIO_RETRY_DELAY 2 /* 2 us*/ #define LINK_WAIT_MAX_RETRIES 10 #define LINK_WAIT_USLEEP_MIN 90000 #define LINK_WAIT_USLEEP_MAX 100000 #define RETRAIN_WAIT_MAX_RETRIES 10 #define RETRAIN_WAIT_USLEEP_US 2000 #define MSI_IRQ_NUM 32 #define CFG_RD_CRS_VAL 0xffff0001 struct advk_pcie { struct platform_device *pdev; void __iomem *base; struct { phys_addr_t match; phys_addr_t remap; phys_addr_t mask; u32 actions; } wins[OB_WIN_COUNT]; u8 wins_count; struct irq_domain *rp_irq_domain; struct irq_domain *irq_domain; struct irq_chip irq_chip; raw_spinlock_t irq_lock; struct irq_domain *msi_domain; struct irq_domain *msi_inner_domain; raw_spinlock_t msi_irq_lock; DECLARE_BITMAP(msi_used, MSI_IRQ_NUM); struct mutex msi_used_lock; int link_gen; struct pci_bridge_emul bridge; struct gpio_desc *reset_gpio; struct phy *phy; }; static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg) { writel(val, pcie->base + reg); } static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg) { return readl(pcie->base + reg); } static u8 advk_pcie_ltssm_state(struct advk_pcie *pcie) { u32 val; u8 ltssm_state; val = advk_readl(pcie, CFG_REG); ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK; return ltssm_state; } static inline bool advk_pcie_link_up(struct advk_pcie *pcie) { /* check if LTSSM is in normal operation - some L* state */ u8 ltssm_state = advk_pcie_ltssm_state(pcie); return ltssm_state >= LTSSM_L0 && ltssm_state < LTSSM_DISABLED; } static inline bool advk_pcie_link_active(struct advk_pcie *pcie) { /* * According to PCIe Base specification 3.0, Table 4-14: Link * Status Mapped to the LTSSM, and 4.2.6.3.6 Configuration.Idle * is Link Up mapped to LTSSM Configuration.Idle, Recovery, L0, * L0s, L1 and L2 states. And according to 3.2.1. Data Link * Control and Management State Machine Rules is DL Up status * reported in DL Active state. */ u8 ltssm_state = advk_pcie_ltssm_state(pcie); return ltssm_state >= LTSSM_CONFIG_IDLE && ltssm_state < LTSSM_DISABLED; } static inline bool advk_pcie_link_training(struct advk_pcie *pcie) { /* * According to PCIe Base specification 3.0, Table 4-14: Link * Status Mapped to the LTSSM is Link Training mapped to LTSSM * Configuration and Recovery states. */ u8 ltssm_state = advk_pcie_ltssm_state(pcie); return ((ltssm_state >= LTSSM_CONFIG_LINKWIDTH_START && ltssm_state < LTSSM_L0) || (ltssm_state >= LTSSM_RECOVERY_EQUALIZATION_PHASE0 && ltssm_state <= LTSSM_RECOVERY_EQUALIZATION_PHASE3)); } static int advk_pcie_wait_for_link(struct advk_pcie *pcie) { int retries; /* check if the link is up or not */ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { if (advk_pcie_link_up(pcie)) return 0; usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); } return -ETIMEDOUT; } static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie) { size_t retries; for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) { if (advk_pcie_link_training(pcie)) break; udelay(RETRAIN_WAIT_USLEEP_US); } } static void advk_pcie_issue_perst(struct advk_pcie *pcie) { if (!pcie->reset_gpio) return; /* 10ms delay is needed for some cards */ dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n"); gpiod_set_value_cansleep(pcie->reset_gpio, 1); usleep_range(10000, 11000); gpiod_set_value_cansleep(pcie->reset_gpio, 0); } static void advk_pcie_train_link(struct advk_pcie *pcie) { struct device *dev = &pcie->pdev->dev; u32 reg; int ret; /* * Setup PCIe rev / gen compliance based on device tree property * 'max-link-speed' which also forces maximal link speed. */ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); reg &= ~PCIE_GEN_SEL_MSK; if (pcie->link_gen == 3) reg |= SPEED_GEN_3; else if (pcie->link_gen == 2) reg |= SPEED_GEN_2; else reg |= SPEED_GEN_1; advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); /* * Set maximal link speed value also into PCIe Link Control 2 register. * Armada 3700 Functional Specification says that default value is based * on SPEED_GEN but tests showed that default value is always 8.0 GT/s. */ reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2); reg &= ~PCI_EXP_LNKCTL2_TLS; if (pcie->link_gen == 3) reg |= PCI_EXP_LNKCTL2_TLS_8_0GT; else if (pcie->link_gen == 2) reg |= PCI_EXP_LNKCTL2_TLS_5_0GT; else reg |= PCI_EXP_LNKCTL2_TLS_2_5GT; advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2); /* Enable link training after selecting PCIe generation */ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); reg |= LINK_TRAINING_EN; advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); /* * Reset PCIe card via PERST# signal. Some cards are not detected * during link training when they are in some non-initial state. */ advk_pcie_issue_perst(pcie); /* * PERST# signal could have been asserted by pinctrl subsystem before * probe() callback has been called or issued explicitly by reset gpio * function advk_pcie_issue_perst(), making the endpoint going into * fundamental reset. As required by PCI Express spec (PCI Express * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1 * Conventional Reset) a delay for at least 100ms after such a reset * before sending a Configuration Request to the device is needed. * So wait until PCIe link is up. Function advk_pcie_wait_for_link() * waits for link at least 900ms. */ ret = advk_pcie_wait_for_link(pcie); if (ret < 0) dev_err(dev, "link never came up\n"); else dev_info(dev, "link up\n"); } /* * Set PCIe address window register which could be used for memory * mapping. */ static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num, phys_addr_t match, phys_addr_t remap, phys_addr_t mask, u32 actions) { advk_writel(pcie, OB_WIN_ENABLE | lower_32_bits(match), OB_WIN_MATCH_LS(win_num)); advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num)); advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num)); advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num)); advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num)); advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num)); advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num)); } static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num) { advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num)); advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num)); advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num)); advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num)); advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num)); advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num)); advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num)); } static void advk_pcie_setup_hw(struct advk_pcie *pcie) { phys_addr_t msi_addr; u32 reg; int i; /* * Configure PCIe Reference clock. Direction is from the PCIe * controller to the endpoint card, so enable transmitting of * Reference clock differential signal off-chip and disable * receiving off-chip differential signal. */ reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG); reg |= PCIE_CORE_REF_CLK_TX_ENABLE; reg &= ~PCIE_CORE_REF_CLK_RX_ENABLE; advk_writel(pcie, reg, PCIE_CORE_REF_CLK_REG); /* Set to Direct mode */ reg = advk_readl(pcie, CTRL_CONFIG_REG); reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT); reg |= ((PCIE_CORE_MODE_DIRECT & CTRL_MODE_MASK) << CTRL_MODE_SHIFT); advk_writel(pcie, reg, CTRL_CONFIG_REG); /* Set PCI global control register to RC mode */ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); reg |= (IS_RC_MSK << IS_RC_SHIFT); advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); /* * Replace incorrect PCI vendor id value 0x1b4b by correct value 0x11ab. * VENDOR_ID_REG contains vendor id in low 16 bits and subsystem vendor * id in high 16 bits. Updating this register changes readback value of * read-only vendor id bits in PCIE_CORE_DEV_ID_REG register. Workaround * for erratum 4.1: "The value of device and vendor ID is incorrect". */ reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL; advk_writel(pcie, reg, VENDOR_ID_REG); /* * Change Class Code of PCI Bridge device to PCI Bridge (0x600400), * because the default value is Mass storage controller (0x010400). * * Note that this Aardvark PCI Bridge does not have compliant Type 1 * Configuration Space and it even cannot be accessed via Aardvark's * PCI config space access method. Something like config space is * available in internal Aardvark registers starting at offset 0x0 * and is reported as Type 0. In range 0x10 - 0x34 it has totally * different registers. * * Therefore driver uses emulation of PCI Bridge which emulates * access to configuration space via internal Aardvark registers or * emulated configuration buffer. */ reg = advk_readl(pcie, PCIE_CORE_DEV_REV_REG); reg &= ~0xffffff00; reg |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; advk_writel(pcie, reg, PCIE_CORE_DEV_REV_REG); /* Disable Root Bridge I/O space, memory space and bus mastering */ reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); reg &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG); /* Set Advanced Error Capabilities and Control PF0 register */ reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX | PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN | PCIE_CORE_ERR_CAPCTL_ECRC_CHCK | PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV; advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG); /* Set PCIe Device Control register */ reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL); reg &= ~PCI_EXP_DEVCTL_RELAX_EN; reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; reg &= ~PCI_EXP_DEVCTL_PAYLOAD; reg &= ~PCI_EXP_DEVCTL_READRQ; reg |= PCI_EXP_DEVCTL_PAYLOAD_512B; reg |= PCI_EXP_DEVCTL_READRQ_512B; advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL); /* Program PCIe Control 2 to disable strict ordering */ reg = PCIE_CORE_CTRL2_RESERVED | PCIE_CORE_CTRL2_TD_ENABLE; advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); /* Set lane X1 */ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); reg &= ~LANE_CNT_MSK; reg |= LANE_COUNT_1; advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); /* Set MSI address */ msi_addr = virt_to_phys(pcie); advk_writel(pcie, lower_32_bits(msi_addr), PCIE_MSI_ADDR_LOW_REG); advk_writel(pcie, upper_32_bits(msi_addr), PCIE_MSI_ADDR_HIGH_REG); /* Enable MSI */ reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG); reg |= PCIE_CORE_CTRL2_MSI_ENABLE; advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); /* Clear all interrupts */ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG); advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG); advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); /* Disable All ISR0/1 and MSI Sources */ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG); advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); /* Unmask summary MSI interrupt */ reg = advk_readl(pcie, PCIE_ISR0_MASK_REG); reg &= ~PCIE_ISR0_MSI_INT_PENDING; advk_writel(pcie, reg, PCIE_ISR0_MASK_REG); /* Unmask PME interrupt for processing of PME requester */ reg = advk_readl(pcie, PCIE_ISR0_MASK_REG); reg &= ~PCIE_MSG_PM_PME_MASK; advk_writel(pcie, reg, PCIE_ISR0_MASK_REG); /* Enable summary interrupt for GIC SPI source */ reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK); advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG); /* * Enable AXI address window location generation: * When it is enabled, the default outbound window * configurations (Default User Field: 0xD0074CFC) * are used to transparent address translation for * the outbound transactions. Thus, PCIe address * windows are not required for transparent memory * access when default outbound window configuration * is set for memory access. */ reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG); reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE; advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); /* * Set memory access in Default User Field so it * is not required to configure PCIe address for * transparent memory access. */ advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS); /* * Bypass the address window mapping for PIO: * Since PIO access already contains all required * info over AXI interface by PIO registers, the * address window is not required. */ reg = advk_readl(pcie, PIO_CTRL); reg |= PIO_CTRL_ADDR_WIN_DISABLE; advk_writel(pcie, reg, PIO_CTRL); /* * Configure PCIe address windows for non-memory or * non-transparent access as by default PCIe uses * transparent memory access. */ for (i = 0; i < pcie->wins_count; i++) advk_pcie_set_ob_win(pcie, i, pcie->wins[i].match, pcie->wins[i].remap, pcie->wins[i].mask, pcie->wins[i].actions); /* Disable remaining PCIe outbound windows */ for (i = pcie->wins_count; i < OB_WIN_COUNT; i++) advk_pcie_disable_ob_win(pcie, i); advk_pcie_train_link(pcie); } static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val) { struct device *dev = &pcie->pdev->dev; u32 reg; unsigned int status; char *strcomp_status, *str_posted; int ret; reg = advk_readl(pcie, PIO_STAT); status = (reg & PIO_COMPLETION_STATUS_MASK) >> PIO_COMPLETION_STATUS_SHIFT; /* * According to HW spec, the PIO status check sequence as below: * 1) even if COMPLETION_STATUS(bit9:7) indicates successful, * it still needs to check Error Status(bit11), only when this bit * indicates no error happen, the operation is successful. * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only * means a PIO write error, and for PIO read it is successful with * a read value of 0xFFFFFFFF. * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7) * only means a PIO write error, and for PIO read it is successful * with a read value of 0xFFFF0001. * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means * error for both PIO read and PIO write operation. * 5) other errors are indicated as 'unknown'. */ switch (status) { case PIO_COMPLETION_STATUS_OK: if (reg & PIO_ERR_STATUS) { strcomp_status = "COMP_ERR"; ret = -EFAULT; break; } /* Get the read result */ if (val) *val = advk_readl(pcie, PIO_RD_DATA); /* No error */ strcomp_status = NULL; ret = 0; break; case PIO_COMPLETION_STATUS_UR: strcomp_status = "UR"; ret = -EOPNOTSUPP; break; case PIO_COMPLETION_STATUS_CRS: if (allow_crs && val) { /* PCIe r4.0, sec 2.3.2, says: * If CRS Software Visibility is enabled: * For a Configuration Read Request that includes both * bytes of the Vendor ID field of a device Function's * Configuration Space Header, the Root Complex must * complete the Request to the host by returning a * read-data value of 0001h for the Vendor ID field and * all '1's for any additional bytes included in the * request. * * So CRS in this case is not an error status. */ *val = CFG_RD_CRS_VAL; strcomp_status = NULL; ret = 0; break; } /* PCIe r4.0, sec 2.3.2, says: * If CRS Software Visibility is not enabled, the Root Complex * must re-issue the Configuration Request as a new Request. * If CRS Software Visibility is enabled: For a Configuration * Write Request or for any other Configuration Read Request, * the Root Complex must re-issue the Configuration Request as * a new Request. * A Root Complex implementation may choose to limit the number * of Configuration Request/CRS Completion Status loops before * determining that something is wrong with the target of the * Request and taking appropriate action, e.g., complete the * Request to the host as a failed transaction. * * So return -EAGAIN and caller (pci-aardvark.c driver) will * re-issue request again up to the PIO_RETRY_CNT retries. */ strcomp_status = "CRS"; ret = -EAGAIN; break; case PIO_COMPLETION_STATUS_CA: strcomp_status = "CA"; ret = -ECANCELED; break; default: strcomp_status = "Unknown"; ret = -EINVAL; break; } if (!strcomp_status) return ret; if (reg & PIO_NON_POSTED_REQ) str_posted = "Non-posted"; else str_posted = "Posted"; dev_dbg(dev, "%s PIO Response Status: %s, %#x @ %#x\n", str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS)); return ret; } static int advk_pcie_wait_pio(struct advk_pcie *pcie) { struct device *dev = &pcie->pdev->dev; int i; for (i = 1; i <= PIO_RETRY_CNT; i++) { u32 start, isr; start = advk_readl(pcie, PIO_START); isr = advk_readl(pcie, PIO_ISR); if (!start && isr) return i; udelay(PIO_RETRY_DELAY); } dev_err(dev, "PIO read/write transfer time out\n"); return -ETIMEDOUT; } static pci_bridge_emul_read_status_t advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge, int reg, u32 *value) { struct advk_pcie *pcie = bridge->data; switch (reg) { case PCI_COMMAND: *value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); return PCI_BRIDGE_EMUL_HANDLED; case PCI_INTERRUPT_LINE: { /* * From the whole 32bit register we support reading from HW only * two bits: PCI_BRIDGE_CTL_BUS_RESET and PCI_BRIDGE_CTL_SERR. * Other bits are retrieved only from emulated config buffer. */ __le32 *cfgspace = (__le32 *)&bridge->conf; u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]); if (advk_readl(pcie, PCIE_ISR0_MASK_REG) & PCIE_ISR0_ERR_MASK) val &= ~(PCI_BRIDGE_CTL_SERR << 16); else val |= PCI_BRIDGE_CTL_SERR << 16; if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN) val |= PCI_BRIDGE_CTL_BUS_RESET << 16; else val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16); *value = val; return PCI_BRIDGE_EMUL_HANDLED; } default: return PCI_BRIDGE_EMUL_NOT_HANDLED; } } static void advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, int reg, u32 old, u32 new, u32 mask) { struct advk_pcie *pcie = bridge->data; switch (reg) { case PCI_COMMAND: advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG); break; case PCI_INTERRUPT_LINE: /* * According to Figure 6-3: Pseudo Logic Diagram for Error * Message Controls in PCIe base specification, SERR# Enable bit * in Bridge Control register enable receiving of ERR_* messages */ if (mask & (PCI_BRIDGE_CTL_SERR << 16)) { u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG); if (new & (PCI_BRIDGE_CTL_SERR << 16)) val &= ~PCIE_ISR0_ERR_MASK; else val |= PCIE_ISR0_ERR_MASK; advk_writel(pcie, val, PCIE_ISR0_MASK_REG); } if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) { u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG); if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16)) val |= HOT_RESET_GEN; else val &= ~HOT_RESET_GEN; advk_writel(pcie, val, PCIE_CORE_CTRL1_REG); } break; default: break; } } static pci_bridge_emul_read_status_t advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, int reg, u32 *value) { struct advk_pcie *pcie = bridge->data; switch (reg) { /* * PCI_EXP_SLTCAP, PCI_EXP_SLTCTL, PCI_EXP_RTCTL and PCI_EXP_RTSTA are * also supported, but do not need to be handled here, because their * values are stored in emulated config space buffer, and we read them * from there when needed. */ case PCI_EXP_LNKCAP: { u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); /* * PCI_EXP_LNKCAP_DLLLARC bit is hardwired in aardvark HW to 0. * But support for PCI_EXP_LNKSTA_DLLLA is emulated via ltssm * state so explicitly enable PCI_EXP_LNKCAP_DLLLARC flag. */ val |= PCI_EXP_LNKCAP_DLLLARC; *value = val; return PCI_BRIDGE_EMUL_HANDLED; } case PCI_EXP_LNKCTL: { /* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */ u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) & ~(PCI_EXP_LNKSTA_LT << 16); if (advk_pcie_link_training(pcie)) val |= (PCI_EXP_LNKSTA_LT << 16); if (advk_pcie_link_active(pcie)) val |= (PCI_EXP_LNKSTA_DLLLA << 16); *value = val; return PCI_BRIDGE_EMUL_HANDLED; } case PCI_EXP_DEVCAP: case PCI_EXP_DEVCTL: case PCI_EXP_DEVCAP2: case PCI_EXP_DEVCTL2: case PCI_EXP_LNKCAP2: case PCI_EXP_LNKCTL2: *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); return PCI_BRIDGE_EMUL_HANDLED; default: return PCI_BRIDGE_EMUL_NOT_HANDLED; } } static void advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, int reg, u32 old, u32 new, u32 mask) { struct advk_pcie *pcie = bridge->data; switch (reg) { case PCI_EXP_LNKCTL: advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg); if (new & PCI_EXP_LNKCTL_RL) advk_pcie_wait_for_retrain(pcie); break; case PCI_EXP_RTCTL: { u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl); /* Only emulation of PMEIE and CRSSVE bits is provided */ rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE; bridge->pcie_conf.rootctl = cpu_to_le16(rootctl); break; } /* * PCI_EXP_RTSTA is also supported, but does not need to be handled * here, because its value is stored in emulated config space buffer, * and we write it there when needed. */ case PCI_EXP_DEVCTL: case PCI_EXP_DEVCTL2: case PCI_EXP_LNKCTL2: advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg); break; default: break; } } static pci_bridge_emul_read_status_t advk_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge, int reg, u32 *value) { struct advk_pcie *pcie = bridge->data; switch (reg) { case 0: *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg); /* * PCI_EXT_CAP_NEXT bits are set to offset 0x150, but Armada * 3700 Functional Specification does not document registers * at those addresses. * * Thus we clear PCI_EXT_CAP_NEXT bits to make Advanced Error * Reporting Capability header the last Extended Capability. * If we obtain documentation for those registers in the * future, this can be changed. */ *value &= 0x000fffff; return PCI_BRIDGE_EMUL_HANDLED; case PCI_ERR_UNCOR_STATUS: case PCI_ERR_UNCOR_MASK: case PCI_ERR_UNCOR_SEVER: case PCI_ERR_COR_STATUS: case PCI_ERR_COR_MASK: case PCI_ERR_CAP: case PCI_ERR_HEADER_LOG + 0: case PCI_ERR_HEADER_LOG + 4: case PCI_ERR_HEADER_LOG + 8: case PCI_ERR_HEADER_LOG + 12: case PCI_ERR_ROOT_COMMAND: case PCI_ERR_ROOT_STATUS: case PCI_ERR_ROOT_ERR_SRC: *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg); return PCI_BRIDGE_EMUL_HANDLED; default: return PCI_BRIDGE_EMUL_NOT_HANDLED; } } static void advk_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge, int reg, u32 old, u32 new, u32 mask) { struct advk_pcie *pcie = bridge->data; switch (reg) { /* These are W1C registers, so clear other bits */ case PCI_ERR_UNCOR_STATUS: case PCI_ERR_COR_STATUS: case PCI_ERR_ROOT_STATUS: new &= mask; fallthrough; case PCI_ERR_UNCOR_MASK: case PCI_ERR_UNCOR_SEVER: case PCI_ERR_COR_MASK: case PCI_ERR_CAP: case PCI_ERR_HEADER_LOG + 0: case PCI_ERR_HEADER_LOG + 4: case PCI_ERR_HEADER_LOG + 8: case PCI_ERR_HEADER_LOG + 12: case PCI_ERR_ROOT_COMMAND: case PCI_ERR_ROOT_ERR_SRC: advk_writel(pcie, new, PCIE_CORE_PCIERR_CAP + reg); break; default: break; } } static const struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = { .read_base = advk_pci_bridge_emul_base_conf_read, .write_base = advk_pci_bridge_emul_base_conf_write, .read_pcie = advk_pci_bridge_emul_pcie_conf_read, .write_pcie = advk_pci_bridge_emul_pcie_conf_write, .read_ext = advk_pci_bridge_emul_ext_conf_read, .write_ext = advk_pci_bridge_emul_ext_conf_write, }; /* * Initialize the configuration space of the PCI-to-PCI bridge * associated with the given PCIe interface. */ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie) { struct pci_bridge_emul *bridge = &pcie->bridge; bridge->conf.vendor = cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff); bridge->conf.device = cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16); bridge->conf.class_revision = cpu_to_le32(advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff); /* Support 32 bits I/O addressing */ bridge->conf.iobase = PCI_IO_RANGE_TYPE_32; bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32; /* Support 64 bits memory pref */ bridge->conf.pref_mem_base = cpu_to_le16(PCI_PREF_RANGE_TYPE_64); bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64); /* Support interrupt A for MSI feature */ bridge->conf.intpin = PCI_INTERRUPT_INTA; /* * Aardvark HW provides PCIe Capability structure in version 2 and * indicate slot support, which is emulated. */ bridge->pcie_conf.cap = cpu_to_le16(2 | PCI_EXP_FLAGS_SLOT); /* * Set Presence Detect State bit permanently since there is no support * for unplugging the card nor detecting whether it is plugged. (If a * platform exists in the future that supports it, via a GPIO for * example, it should be implemented via this bit.) * * Set physical slot number to 1 since there is only one port and zero * value is reserved for ports within the same silicon as Root Port * which is not our case. */ bridge->pcie_conf.slotcap = cpu_to_le32(FIELD_PREP(PCI_EXP_SLTCAP_PSN, 1)); bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS); /* Indicates supports for Completion Retry Status */ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS); bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff; bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16; bridge->has_pcie = true; bridge->pcie_start = PCIE_CORE_PCIEXP_CAP; bridge->data = pcie; bridge->ops = &advk_pci_bridge_emul_ops; return pci_bridge_emul_init(bridge, 0); } static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus, int devfn) { if (pci_is_root_bus(bus) && PCI_SLOT(devfn) != 0) return false; /* * If the link goes down after we check for link-up, we have a problem: * if a PIO request is executed while link-down, the whole controller * gets stuck in a non-functional state, and even after link comes up * again, PIO requests won't work anymore, and a reset of the whole PCIe * controller is needed. Therefore we need to prevent sending PIO * requests while the link is down. */ if (!pci_is_root_bus(bus) && !advk_pcie_link_up(pcie)) return false; return true; } static bool advk_pcie_pio_is_running(struct advk_pcie *pcie) { struct device *dev = &pcie->pdev->dev; /* * Trying to start a new PIO transfer when previous has not completed * cause External Abort on CPU which results in kernel panic: * * SError Interrupt on CPU0, code 0xbf000002 -- SError * Kernel panic - not syncing: Asynchronous SError Interrupt * * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent * concurrent calls at the same time. But because PIO transfer may take * about 1.5s when link is down or card is disconnected, it means that * advk_pcie_wait_pio() does not always have to wait for completion. * * Some versions of ARM Trusted Firmware handles this External Abort at * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit: * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50 */ if (advk_readl(pcie, PIO_START)) { dev_err(dev, "Previous PIO read/write transfer is still running\n"); return true; } return false; } static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { struct advk_pcie *pcie = bus->sysdata; int retry_count; bool allow_crs; u32 reg; int ret; if (!advk_pcie_valid_device(pcie, bus, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; if (pci_is_root_bus(bus)) return pci_bridge_emul_conf_read(&pcie->bridge, where, size, val); /* * Completion Retry Status is possible to return only when reading all * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and * CRSSVE flag on Root Bridge is enabled. */ allow_crs = (where == PCI_VENDOR_ID) && (size == 4) && (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & PCI_EXP_RTCTL_CRSSVE); if (advk_pcie_pio_is_running(pcie)) goto try_crs; /* Program the control register */ reg = advk_readl(pcie, PIO_CTRL); reg &= ~PIO_CTRL_TYPE_MASK; if (pci_is_root_bus(bus->parent)) reg |= PCIE_CONFIG_RD_TYPE0; else reg |= PCIE_CONFIG_RD_TYPE1; advk_writel(pcie, reg, PIO_CTRL); /* Program the address registers */ reg = ALIGN_DOWN(PCIE_ECAM_OFFSET(bus->number, devfn, where), 4); advk_writel(pcie, reg, PIO_ADDR_LS); advk_writel(pcie, 0, PIO_ADDR_MS); /* Program the data strobe */ advk_writel(pcie, 0xf, PIO_WR_DATA_STRB); retry_count = 0; do { /* Clear PIO DONE ISR and start the transfer */ advk_writel(pcie, 1, PIO_ISR); advk_writel(pcie, 1, PIO_START); ret = advk_pcie_wait_pio(pcie); if (ret < 0) goto try_crs; retry_count += ret; /* Check PIO status and get the read result */ ret = advk_pcie_check_pio_status(pcie, allow_crs, val); } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT); if (ret < 0) goto fail; if (size == 1) *val = (*val >> (8 * (where & 3))) & 0xff; else if (size == 2) *val = (*val >> (8 * (where & 3))) & 0xffff; return PCIBIOS_SUCCESSFUL; try_crs: /* * If it is possible, return Completion Retry Status so that caller * tries to issue the request again instead of failing. */ if (allow_crs) { *val = CFG_RD_CRS_VAL; return PCIBIOS_SUCCESSFUL; } fail: *val = 0xffffffff; return PCIBIOS_SET_FAILED; } static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { struct advk_pcie *pcie = bus->sysdata; u32 reg; u32 data_strobe = 0x0; int retry_count; int offset; int ret; if (!advk_pcie_valid_device(pcie, bus, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; if (pci_is_root_bus(bus)) return pci_bridge_emul_conf_write(&pcie->bridge, where, size, val); if (where % size) return PCIBIOS_SET_FAILED; if (advk_pcie_pio_is_running(pcie)) return PCIBIOS_SET_FAILED; /* Program the control register */ reg = advk_readl(pcie, PIO_CTRL); reg &= ~PIO_CTRL_TYPE_MASK; if (pci_is_root_bus(bus->parent)) reg |= PCIE_CONFIG_WR_TYPE0; else reg |= PCIE_CONFIG_WR_TYPE1; advk_writel(pcie, reg, PIO_CTRL); /* Program the address registers */ reg = ALIGN_DOWN(PCIE_ECAM_OFFSET(bus->number, devfn, where), 4); advk_writel(pcie, reg, PIO_ADDR_LS); advk_writel(pcie, 0, PIO_ADDR_MS); /* Calculate the write strobe */ offset = where & 0x3; reg = val << (8 * offset); data_strobe = GENMASK(size - 1, 0) << offset; /* Program the data register */ advk_writel(pcie, reg, PIO_WR_DATA); /* Program the data strobe */ advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB); retry_count = 0; do { /* Clear PIO DONE ISR and start the transfer */ advk_writel(pcie, 1, PIO_ISR); advk_writel(pcie, 1, PIO_START); ret = advk_pcie_wait_pio(pcie); if (ret < 0) return PCIBIOS_SET_FAILED; retry_count += ret; ret = advk_pcie_check_pio_status(pcie, false, NULL); } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT); return ret < 0 ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL; } static struct pci_ops advk_pcie_ops = { .read = advk_pcie_rd_conf, .write = advk_pcie_wr_conf, }; static void advk_msi_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct advk_pcie *pcie = irq_data_get_irq_chip_data(data); phys_addr_t msi_addr = virt_to_phys(pcie); msg->address_lo = lower_32_bits(msi_addr); msg->address_hi = upper_32_bits(msi_addr); msg->data = data->hwirq; } static int advk_msi_set_affinity(struct irq_data *irq_data, const struct cpumask *mask, bool force) { return -EINVAL; } static void advk_msi_irq_mask(struct irq_data *d) { struct advk_pcie *pcie = d->domain->host_data; irq_hw_number_t hwirq = irqd_to_hwirq(d); unsigned long flags; u32 mask; raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags); mask = advk_readl(pcie, PCIE_MSI_MASK_REG); mask |= BIT(hwirq); advk_writel(pcie, mask, PCIE_MSI_MASK_REG); raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags); } static void advk_msi_irq_unmask(struct irq_data *d) { struct advk_pcie *pcie = d->domain->host_data; irq_hw_number_t hwirq = irqd_to_hwirq(d); unsigned long flags; u32 mask; raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags); mask = advk_readl(pcie, PCIE_MSI_MASK_REG); mask &= ~BIT(hwirq); advk_writel(pcie, mask, PCIE_MSI_MASK_REG); raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags); } static void advk_msi_top_irq_mask(struct irq_data *d) { pci_msi_mask_irq(d); irq_chip_mask_parent(d); } static void advk_msi_top_irq_unmask(struct irq_data *d) { pci_msi_unmask_irq(d); irq_chip_unmask_parent(d); } static struct irq_chip advk_msi_bottom_irq_chip = { .name = "MSI", .irq_compose_msi_msg = advk_msi_irq_compose_msi_msg, .irq_set_affinity = advk_msi_set_affinity, .irq_mask = advk_msi_irq_mask, .irq_unmask = advk_msi_irq_unmask, }; static int advk_msi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct advk_pcie *pcie = domain->host_data; int hwirq, i; mutex_lock(&pcie->msi_used_lock); hwirq = bitmap_find_free_region(pcie->msi_used, MSI_IRQ_NUM, order_base_2(nr_irqs)); mutex_unlock(&pcie->msi_used_lock); if (hwirq < 0) return -ENOSPC; for (i = 0; i < nr_irqs; i++) irq_domain_set_info(domain, virq + i, hwirq + i, &advk_msi_bottom_irq_chip, domain->host_data, handle_simple_irq, NULL, NULL); return 0; } static void advk_msi_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct advk_pcie *pcie = domain->host_data; mutex_lock(&pcie->msi_used_lock); bitmap_release_region(pcie->msi_used, d->hwirq, order_base_2(nr_irqs)); mutex_unlock(&pcie->msi_used_lock); } static const struct irq_domain_ops advk_msi_domain_ops = { .alloc = advk_msi_irq_domain_alloc, .free = advk_msi_irq_domain_free, }; static void advk_pcie_irq_mask(struct irq_data *d) { struct advk_pcie *pcie = d->domain->host_data; irq_hw_number_t hwirq = irqd_to_hwirq(d); unsigned long flags; u32 mask; raw_spin_lock_irqsave(&pcie->irq_lock, flags); mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); mask |= PCIE_ISR1_INTX_ASSERT(hwirq); advk_writel(pcie, mask, PCIE_ISR1_MASK_REG); raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); } static void advk_pcie_irq_unmask(struct irq_data *d) { struct advk_pcie *pcie = d->domain->host_data; irq_hw_number_t hwirq = irqd_to_hwirq(d); unsigned long flags; u32 mask; raw_spin_lock_irqsave(&pcie->irq_lock, flags); mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq); advk_writel(pcie, mask, PCIE_ISR1_MASK_REG); raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); } static int advk_pcie_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hwirq) { struct advk_pcie *pcie = h->host_data; irq_set_status_flags(virq, IRQ_LEVEL); irq_set_chip_and_handler(virq, &pcie->irq_chip, handle_level_irq); irq_set_chip_data(virq, pcie); return 0; } static const struct irq_domain_ops advk_pcie_irq_domain_ops = { .map = advk_pcie_irq_map, .xlate = irq_domain_xlate_onecell, }; static struct irq_chip advk_msi_irq_chip = { .name = "advk-MSI", .irq_mask = advk_msi_top_irq_mask, .irq_unmask = advk_msi_top_irq_unmask, }; static struct msi_domain_info advk_msi_domain_info = { .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, .chip = &advk_msi_irq_chip, }; static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) { struct device *dev = &pcie->pdev->dev; raw_spin_lock_init(&pcie->msi_irq_lock); mutex_init(&pcie->msi_used_lock); pcie->msi_inner_domain = irq_domain_add_linear(NULL, MSI_IRQ_NUM, &advk_msi_domain_ops, pcie); if (!pcie->msi_inner_domain) return -ENOMEM; pcie->msi_domain = pci_msi_create_irq_domain(dev_fwnode(dev), &advk_msi_domain_info, pcie->msi_inner_domain); if (!pcie->msi_domain) { irq_domain_remove(pcie->msi_inner_domain); return -ENOMEM; } return 0; } static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie) { irq_domain_remove(pcie->msi_domain); irq_domain_remove(pcie->msi_inner_domain); } static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) { struct device *dev = &pcie->pdev->dev; struct device_node *node = dev->of_node; struct device_node *pcie_intc_node; struct irq_chip *irq_chip; int ret = 0; raw_spin_lock_init(&pcie->irq_lock); pcie_intc_node = of_get_next_child(node, NULL); if (!pcie_intc_node) { dev_err(dev, "No PCIe Intc node found\n"); return -ENODEV; } irq_chip = &pcie->irq_chip; irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq", dev_name(dev)); if (!irq_chip->name) { ret = -ENOMEM; goto out_put_node; } irq_chip->irq_mask = advk_pcie_irq_mask; irq_chip->irq_unmask = advk_pcie_irq_unmask; pcie->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, &advk_pcie_irq_domain_ops, pcie); if (!pcie->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); ret = -ENOMEM; goto out_put_node; } out_put_node: of_node_put(pcie_intc_node); return ret; } static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie) { irq_domain_remove(pcie->irq_domain); } static struct irq_chip advk_rp_irq_chip = { .name = "advk-RP", }; static int advk_pcie_rp_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hwirq) { struct advk_pcie *pcie = h->host_data; irq_set_chip_and_handler(virq, &advk_rp_irq_chip, handle_simple_irq); irq_set_chip_data(virq, pcie); return 0; } static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = { .map = advk_pcie_rp_irq_map, .xlate = irq_domain_xlate_onecell, }; static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie) { pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1, &advk_pcie_rp_irq_domain_ops, pcie); if (!pcie->rp_irq_domain) { dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n"); return -ENOMEM; } return 0; } static void advk_pcie_remove_rp_irq_domain(struct advk_pcie *pcie) { irq_domain_remove(pcie->rp_irq_domain); } static void advk_pcie_handle_pme(struct advk_pcie *pcie) { u32 requester = advk_readl(pcie, PCIE_MSG_LOG_REG) >> 16; advk_writel(pcie, PCIE_MSG_PM_PME_MASK, PCIE_ISR0_REG); /* * PCIE_MSG_LOG_REG contains the last inbound message, so store * the requester ID only when PME was not asserted yet. * Also do not trigger PME interrupt when PME is still asserted. */ if (!(le32_to_cpu(pcie->bridge.pcie_conf.rootsta) & PCI_EXP_RTSTA_PME)) { pcie->bridge.pcie_conf.rootsta = cpu_to_le32(requester | PCI_EXP_RTSTA_PME); /* * Trigger PME interrupt only if PMEIE bit in Root Control is set. * Aardvark HW returns zero for PCI_EXP_FLAGS_IRQ, so use PCIe interrupt 0. */ if (!(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & PCI_EXP_RTCTL_PMEIE)) return; if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL) dev_err_ratelimited(&pcie->pdev->dev, "unhandled PME IRQ\n"); } } static void advk_pcie_handle_msi(struct advk_pcie *pcie) { u32 msi_val, msi_mask, msi_status, msi_idx; msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG); msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG); msi_status = msi_val & ((~msi_mask) & PCIE_MSI_ALL_MASK); for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) { if (!(BIT(msi_idx) & msi_status)) continue; advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG); if (generic_handle_domain_irq(pcie->msi_inner_domain, msi_idx) == -EINVAL) dev_err_ratelimited(&pcie->pdev->dev, "unexpected MSI 0x%02x\n", msi_idx); } advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING, PCIE_ISR0_REG); } static void advk_pcie_handle_int(struct advk_pcie *pcie) { u32 isr0_val, isr0_mask, isr0_status; u32 isr1_val, isr1_mask, isr1_status; int i; isr0_val = advk_readl(pcie, PCIE_ISR0_REG); isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK); isr1_val = advk_readl(pcie, PCIE_ISR1_REG); isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK); /* Process PME interrupt as the first one to do not miss PME requester id */ if (isr0_status & PCIE_MSG_PM_PME_MASK) advk_pcie_handle_pme(pcie); /* Process ERR interrupt */ if (isr0_status & PCIE_ISR0_ERR_MASK) { advk_writel(pcie, PCIE_ISR0_ERR_MASK, PCIE_ISR0_REG); /* * Aardvark HW returns zero for PCI_ERR_ROOT_AER_IRQ, so use * PCIe interrupt 0 */ if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL) dev_err_ratelimited(&pcie->pdev->dev, "unhandled ERR IRQ\n"); } /* Process MSI interrupts */ if (isr0_status & PCIE_ISR0_MSI_INT_PENDING) advk_pcie_handle_msi(pcie); /* Process legacy interrupts */ for (i = 0; i < PCI_NUM_INTX; i++) { if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i))) continue; advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i), PCIE_ISR1_REG); if (generic_handle_domain_irq(pcie->irq_domain, i) == -EINVAL) dev_err_ratelimited(&pcie->pdev->dev, "unexpected INT%c IRQ\n", (char)i + 'A'); } } static irqreturn_t advk_pcie_irq_handler(int irq, void *arg) { struct advk_pcie *pcie = arg; u32 status; status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG); if (!(status & PCIE_IRQ_CORE_INT)) return IRQ_NONE; advk_pcie_handle_int(pcie); /* Clear interrupt */ advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG); return IRQ_HANDLED; } static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { struct advk_pcie *pcie = dev->bus->sysdata; /* * Emulated root bridge has its own emulated irq chip and irq domain. * Argument pin is the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) and * hwirq for irq_create_mapping() is indexed from zero. */ if (pci_is_root_bus(dev->bus)) return irq_create_mapping(pcie->rp_irq_domain, pin - 1); else return of_irq_parse_and_map_pci(dev, slot, pin); } static void advk_pcie_disable_phy(struct advk_pcie *pcie) { phy_power_off(pcie->phy); phy_exit(pcie->phy); } static int advk_pcie_enable_phy(struct advk_pcie *pcie) { int ret; if (!pcie->phy) return 0; ret = phy_init(pcie->phy); if (ret) return ret; ret = phy_set_mode(pcie->phy, PHY_MODE_PCIE); if (ret) { phy_exit(pcie->phy); return ret; } ret = phy_power_on(pcie->phy); if (ret) { phy_exit(pcie->phy); return ret; } return 0; } static int advk_pcie_setup_phy(struct advk_pcie *pcie) { struct device *dev = &pcie->pdev->dev; struct device_node *node = dev->of_node; int ret = 0; pcie->phy = devm_of_phy_get(dev, node, NULL); if (IS_ERR(pcie->phy) && (PTR_ERR(pcie->phy) == -EPROBE_DEFER)) return PTR_ERR(pcie->phy); /* Old bindings miss the PHY handle */ if (IS_ERR(pcie->phy)) { dev_warn(dev, "PHY unavailable (%ld)\n", PTR_ERR(pcie->phy)); pcie->phy = NULL; return 0; } ret = advk_pcie_enable_phy(pcie); if (ret) dev_err(dev, "Failed to initialize PHY (%d)\n", ret); return ret; } static int advk_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct advk_pcie *pcie; struct pci_host_bridge *bridge; struct resource_entry *entry; int ret, irq; bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie)); if (!bridge) return -ENOMEM; pcie = pci_host_bridge_priv(bridge); pcie->pdev = pdev; platform_set_drvdata(pdev, pcie); resource_list_for_each_entry(entry, &bridge->windows) { resource_size_t start = entry->res->start; resource_size_t size = resource_size(entry->res); unsigned long type = resource_type(entry->res); u64 win_size; /* * Aardvark hardware allows to configure also PCIe window * for config type 0 and type 1 mapping, but driver uses * only PIO for issuing configuration transfers which does * not use PCIe window configuration. */ if (type != IORESOURCE_MEM && type != IORESOURCE_IO) continue; /* * Skip transparent memory resources. Default outbound access * configuration is set to transparent memory access so it * does not need window configuration. */ if (type == IORESOURCE_MEM && entry->offset == 0) continue; /* * The n-th PCIe window is configured by tuple (match, remap, mask) * and an access to address A uses this window if A matches the * match with given mask. * So every PCIe window size must be a power of two and every start * address must be aligned to window size. Minimal size is 64 KiB * because lower 16 bits of mask must be zero. Remapped address * may have set only bits from the mask. */ while (pcie->wins_count < OB_WIN_COUNT && size > 0) { /* Calculate the largest aligned window size */ win_size = (1ULL << (fls64(size)-1)) | (start ? (1ULL << __ffs64(start)) : 0); win_size = 1ULL << __ffs64(win_size); if (win_size < 0x10000) break; dev_dbg(dev, "Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n", pcie->wins_count, (unsigned long long)start, (unsigned long long)start + win_size, type); if (type == IORESOURCE_IO) { pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO; pcie->wins[pcie->wins_count].match = pci_pio_to_address(start); } else { pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM; pcie->wins[pcie->wins_count].match = start; } pcie->wins[pcie->wins_count].remap = start - entry->offset; pcie->wins[pcie->wins_count].mask = ~(win_size - 1); if (pcie->wins[pcie->wins_count].remap & (win_size - 1)) break; start += win_size; size -= win_size; pcie->wins_count++; } if (size > 0) { dev_err(&pcie->pdev->dev, "Invalid PCIe region [0x%llx-0x%llx]\n", (unsigned long long)entry->res->start, (unsigned long long)entry->res->end + 1); return -EINVAL; } } pcie->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pcie->base)) return PTR_ERR(pcie->base); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_irq(dev, irq, advk_pcie_irq_handler, IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie", pcie); if (ret) { dev_err(dev, "Failed to register interrupt\n"); return ret; } pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); ret = PTR_ERR_OR_ZERO(pcie->reset_gpio); if (ret) { if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get reset-gpio: %i\n", ret); return ret; } ret = gpiod_set_consumer_name(pcie->reset_gpio, "pcie1-reset"); if (ret) { dev_err(dev, "Failed to set reset gpio name: %d\n", ret); return ret; } ret = of_pci_get_max_link_speed(dev->of_node); if (ret <= 0 || ret > 3) pcie->link_gen = 3; else pcie->link_gen = ret; ret = advk_pcie_setup_phy(pcie); if (ret) return ret; advk_pcie_setup_hw(pcie); ret = advk_sw_pci_bridge_init(pcie); if (ret) { dev_err(dev, "Failed to register emulated root PCI bridge\n"); return ret; } ret = advk_pcie_init_irq_domain(pcie); if (ret) { dev_err(dev, "Failed to initialize irq\n"); return ret; } ret = advk_pcie_init_msi_irq_domain(pcie); if (ret) { dev_err(dev, "Failed to initialize irq\n"); advk_pcie_remove_irq_domain(pcie); return ret; } ret = advk_pcie_init_rp_irq_domain(pcie); if (ret) { dev_err(dev, "Failed to initialize irq\n"); advk_pcie_remove_msi_irq_domain(pcie); advk_pcie_remove_irq_domain(pcie); return ret; } bridge->sysdata = pcie; bridge->ops = &advk_pcie_ops; bridge->map_irq = advk_pcie_map_irq; ret = pci_host_probe(bridge); if (ret < 0) { advk_pcie_remove_rp_irq_domain(pcie); advk_pcie_remove_msi_irq_domain(pcie); advk_pcie_remove_irq_domain(pcie); return ret; } return 0; } static void advk_pcie_remove(struct platform_device *pdev) { struct advk_pcie *pcie = platform_get_drvdata(pdev); struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); u32 val; int i; /* Remove PCI bus with all devices */ pci_lock_rescan_remove(); pci_stop_root_bus(bridge->bus); pci_remove_root_bus(bridge->bus); pci_unlock_rescan_remove(); /* Disable Root Bridge I/O space, memory space and bus mastering */ val = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); advk_writel(pcie, val, PCIE_CORE_CMD_STATUS_REG); /* Disable MSI */ val = advk_readl(pcie, PCIE_CORE_CTRL2_REG); val &= ~PCIE_CORE_CTRL2_MSI_ENABLE; advk_writel(pcie, val, PCIE_CORE_CTRL2_REG); /* Clear MSI address */ advk_writel(pcie, 0, PCIE_MSI_ADDR_LOW_REG); advk_writel(pcie, 0, PCIE_MSI_ADDR_HIGH_REG); /* Mask all interrupts */ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG); advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_MASK_REG); /* Clear all interrupts */ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG); advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG); advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); /* Remove IRQ domains */ advk_pcie_remove_rp_irq_domain(pcie); advk_pcie_remove_msi_irq_domain(pcie); advk_pcie_remove_irq_domain(pcie); /* Free config space for emulated root bridge */ pci_bridge_emul_cleanup(&pcie->bridge); /* Assert PERST# signal which prepares PCIe card for power down */ if (pcie->reset_gpio) gpiod_set_value_cansleep(pcie->reset_gpio, 1); /* Disable link training */ val = advk_readl(pcie, PCIE_CORE_CTRL0_REG); val &= ~LINK_TRAINING_EN; advk_writel(pcie, val, PCIE_CORE_CTRL0_REG); /* Disable outbound address windows mapping */ for (i = 0; i < OB_WIN_COUNT; i++) advk_pcie_disable_ob_win(pcie, i); /* Disable phy */ advk_pcie_disable_phy(pcie); } static const struct of_device_id advk_pcie_of_match_table[] = { { .compatible = "marvell,armada-3700-pcie", }, {}, }; MODULE_DEVICE_TABLE(of, advk_pcie_of_match_table); static struct platform_driver advk_pcie_driver = { .driver = { .name = "advk-pcie", .of_match_table = advk_pcie_of_match_table, }, .probe = advk_pcie_probe, .remove_new = advk_pcie_remove, }; module_platform_driver(advk_pcie_driver); MODULE_DESCRIPTION("Aardvark PCIe controller"); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/pci-aardvark.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Faraday Technology FTPC100 PCI Controller * * Copyright (C) 2017 Linus Walleij <[email protected]> * * Based on the out-of-tree OpenWRT patch for Cortina Gemini: * Copyright (C) 2009 Janos Laube <[email protected]> * Copyright (C) 2009 Paulius Zaleckas <[email protected]> * Based on SL2312 PCI controller code * Storlink (C) 2003 */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_pci.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/irqdomain.h> #include <linux/irqchip/chained_irq.h> #include <linux/bitops.h> #include <linux/irq.h> #include <linux/clk.h> #include "../pci.h" /* * Special configuration registers directly in the first few words * in I/O space. */ #define FTPCI_IOSIZE 0x00 #define FTPCI_PROT 0x04 /* AHB protection */ #define FTPCI_CTRL 0x08 /* PCI control signal */ #define FTPCI_SOFTRST 0x10 /* Soft reset counter and response error enable */ #define FTPCI_CONFIG 0x28 /* PCI configuration command register */ #define FTPCI_DATA 0x2C #define FARADAY_PCI_STATUS_CMD 0x04 /* Status and command */ #define FARADAY_PCI_PMC 0x40 /* Power management control */ #define FARADAY_PCI_PMCSR 0x44 /* Power management status */ #define FARADAY_PCI_CTRL1 0x48 /* Control register 1 */ #define FARADAY_PCI_CTRL2 0x4C /* Control register 2 */ #define FARADAY_PCI_MEM1_BASE_SIZE 0x50 /* Memory base and size #1 */ #define FARADAY_PCI_MEM2_BASE_SIZE 0x54 /* Memory base and size #2 */ #define FARADAY_PCI_MEM3_BASE_SIZE 0x58 /* Memory base and size #3 */ #define PCI_STATUS_66MHZ_CAPABLE BIT(21) /* Bits 31..28 gives INTD..INTA status */ #define PCI_CTRL2_INTSTS_SHIFT 28 #define PCI_CTRL2_INTMASK_CMDERR BIT(27) #define PCI_CTRL2_INTMASK_PARERR BIT(26) /* Bits 25..22 masks INTD..INTA */ #define PCI_CTRL2_INTMASK_SHIFT 22 #define PCI_CTRL2_INTMASK_MABRT_RX BIT(21) #define PCI_CTRL2_INTMASK_TABRT_RX BIT(20) #define PCI_CTRL2_INTMASK_TABRT_TX BIT(19) #define PCI_CTRL2_INTMASK_RETRY4 BIT(18) #define PCI_CTRL2_INTMASK_SERR_RX BIT(17) #define PCI_CTRL2_INTMASK_PERR_RX BIT(16) /* Bit 15 reserved */ #define PCI_CTRL2_MSTPRI_REQ6 BIT(14) #define PCI_CTRL2_MSTPRI_REQ5 BIT(13) #define PCI_CTRL2_MSTPRI_REQ4 BIT(12) #define PCI_CTRL2_MSTPRI_REQ3 BIT(11) #define PCI_CTRL2_MSTPRI_REQ2 BIT(10) #define PCI_CTRL2_MSTPRI_REQ1 BIT(9) #define PCI_CTRL2_MSTPRI_REQ0 BIT(8) /* Bits 7..4 reserved */ /* Bits 3..0 TRDYW */ /* * Memory configs: * Bit 31..20 defines the PCI side memory base * Bit 19..16 (4 bits) defines the size per below */ #define FARADAY_PCI_MEMBASE_MASK 0xfff00000 #define FARADAY_PCI_MEMSIZE_1MB 0x0 #define FARADAY_PCI_MEMSIZE_2MB 0x1 #define FARADAY_PCI_MEMSIZE_4MB 0x2 #define FARADAY_PCI_MEMSIZE_8MB 0x3 #define FARADAY_PCI_MEMSIZE_16MB 0x4 #define FARADAY_PCI_MEMSIZE_32MB 0x5 #define FARADAY_PCI_MEMSIZE_64MB 0x6 #define FARADAY_PCI_MEMSIZE_128MB 0x7 #define FARADAY_PCI_MEMSIZE_256MB 0x8 #define FARADAY_PCI_MEMSIZE_512MB 0x9 #define FARADAY_PCI_MEMSIZE_1GB 0xa #define FARADAY_PCI_MEMSIZE_2GB 0xb #define FARADAY_PCI_MEMSIZE_SHIFT 16 /* * The DMA base is set to 0x0 for all memory segments, it reflects the * fact that the memory of the host system starts at 0x0. */ #define FARADAY_PCI_DMA_MEM1_BASE 0x00000000 #define FARADAY_PCI_DMA_MEM2_BASE 0x00000000 #define FARADAY_PCI_DMA_MEM3_BASE 0x00000000 /** * struct faraday_pci_variant - encodes IP block differences * @cascaded_irq: this host has cascaded IRQs from an interrupt controller * embedded in the host bridge. */ struct faraday_pci_variant { bool cascaded_irq; }; struct faraday_pci { struct device *dev; void __iomem *base; struct irq_domain *irqdomain; struct pci_bus *bus; struct clk *bus_clk; }; static int faraday_res_to_memcfg(resource_size_t mem_base, resource_size_t mem_size, u32 *val) { u32 outval; switch (mem_size) { case SZ_1M: outval = FARADAY_PCI_MEMSIZE_1MB; break; case SZ_2M: outval = FARADAY_PCI_MEMSIZE_2MB; break; case SZ_4M: outval = FARADAY_PCI_MEMSIZE_4MB; break; case SZ_8M: outval = FARADAY_PCI_MEMSIZE_8MB; break; case SZ_16M: outval = FARADAY_PCI_MEMSIZE_16MB; break; case SZ_32M: outval = FARADAY_PCI_MEMSIZE_32MB; break; case SZ_64M: outval = FARADAY_PCI_MEMSIZE_64MB; break; case SZ_128M: outval = FARADAY_PCI_MEMSIZE_128MB; break; case SZ_256M: outval = FARADAY_PCI_MEMSIZE_256MB; break; case SZ_512M: outval = FARADAY_PCI_MEMSIZE_512MB; break; case SZ_1G: outval = FARADAY_PCI_MEMSIZE_1GB; break; case SZ_2G: outval = FARADAY_PCI_MEMSIZE_2GB; break; default: return -EINVAL; } outval <<= FARADAY_PCI_MEMSIZE_SHIFT; /* This is probably not good */ if (mem_base & ~(FARADAY_PCI_MEMBASE_MASK)) pr_warn("truncated PCI memory base\n"); /* Translate to bridge side address space */ outval |= (mem_base & FARADAY_PCI_MEMBASE_MASK); pr_debug("Translated pci base @%pap, size %pap to config %08x\n", &mem_base, &mem_size, outval); *val = outval; return 0; } static int faraday_raw_pci_read_config(struct faraday_pci *p, int bus_number, unsigned int fn, int config, int size, u32 *value) { writel(PCI_CONF1_ADDRESS(bus_number, PCI_SLOT(fn), PCI_FUNC(fn), config), p->base + FTPCI_CONFIG); *value = readl(p->base + FTPCI_DATA); if (size == 1) *value = (*value >> (8 * (config & 3))) & 0xFF; else if (size == 2) *value = (*value >> (8 * (config & 3))) & 0xFFFF; return PCIBIOS_SUCCESSFUL; } static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn, int config, int size, u32 *value) { struct faraday_pci *p = bus->sysdata; dev_dbg(&bus->dev, "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value); return faraday_raw_pci_read_config(p, bus->number, fn, config, size, value); } static int faraday_raw_pci_write_config(struct faraday_pci *p, int bus_number, unsigned int fn, int config, int size, u32 value) { int ret = PCIBIOS_SUCCESSFUL; writel(PCI_CONF1_ADDRESS(bus_number, PCI_SLOT(fn), PCI_FUNC(fn), config), p->base + FTPCI_CONFIG); switch (size) { case 4: writel(value, p->base + FTPCI_DATA); break; case 2: writew(value, p->base + FTPCI_DATA + (config & 3)); break; case 1: writeb(value, p->base + FTPCI_DATA + (config & 3)); break; default: ret = PCIBIOS_BAD_REGISTER_NUMBER; } return ret; } static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn, int config, int size, u32 value) { struct faraday_pci *p = bus->sysdata; dev_dbg(&bus->dev, "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", PCI_SLOT(fn), PCI_FUNC(fn), config, size, value); return faraday_raw_pci_write_config(p, bus->number, fn, config, size, value); } static struct pci_ops faraday_pci_ops = { .read = faraday_pci_read_config, .write = faraday_pci_write_config, }; static void faraday_pci_ack_irq(struct irq_data *d) { struct faraday_pci *p = irq_data_get_irq_chip_data(d); unsigned int reg; faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg); reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT); reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTSTS_SHIFT); faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg); } static void faraday_pci_mask_irq(struct irq_data *d) { struct faraday_pci *p = irq_data_get_irq_chip_data(d); unsigned int reg; faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg); reg &= ~((0xF << PCI_CTRL2_INTSTS_SHIFT) | BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT)); faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg); } static void faraday_pci_unmask_irq(struct irq_data *d) { struct faraday_pci *p = irq_data_get_irq_chip_data(d); unsigned int reg; faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg); reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT); reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT); faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg); } static void faraday_pci_irq_handler(struct irq_desc *desc) { struct faraday_pci *p = irq_desc_get_handler_data(desc); struct irq_chip *irqchip = irq_desc_get_chip(desc); unsigned int irq_stat, reg, i; faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg); irq_stat = reg >> PCI_CTRL2_INTSTS_SHIFT; chained_irq_enter(irqchip, desc); for (i = 0; i < 4; i++) { if ((irq_stat & BIT(i)) == 0) continue; generic_handle_domain_irq(p->irqdomain, i); } chained_irq_exit(irqchip, desc); } static struct irq_chip faraday_pci_irq_chip = { .name = "PCI", .irq_ack = faraday_pci_ack_irq, .irq_mask = faraday_pci_mask_irq, .irq_unmask = faraday_pci_unmask_irq, }; static int faraday_pci_irq_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &faraday_pci_irq_chip, handle_level_irq); irq_set_chip_data(irq, domain->host_data); return 0; } static const struct irq_domain_ops faraday_pci_irqdomain_ops = { .map = faraday_pci_irq_map, }; static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p) { struct device_node *intc = of_get_next_child(p->dev->of_node, NULL); int irq; int i; if (!intc) { dev_err(p->dev, "missing child interrupt-controller node\n"); return -EINVAL; } /* All PCI IRQs cascade off this one */ irq = of_irq_get(intc, 0); if (irq <= 0) { dev_err(p->dev, "failed to get parent IRQ\n"); of_node_put(intc); return irq ?: -EINVAL; } p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX, &faraday_pci_irqdomain_ops, p); of_node_put(intc); if (!p->irqdomain) { dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n"); return -EINVAL; } irq_set_chained_handler_and_data(irq, faraday_pci_irq_handler, p); for (i = 0; i < 4; i++) irq_create_mapping(p->irqdomain, i); return 0; } static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p) { struct device *dev = p->dev; struct pci_host_bridge *bridge = pci_host_bridge_from_priv(p); struct resource_entry *entry; u32 confreg[3] = { FARADAY_PCI_MEM1_BASE_SIZE, FARADAY_PCI_MEM2_BASE_SIZE, FARADAY_PCI_MEM3_BASE_SIZE, }; int i = 0; u32 val; resource_list_for_each_entry(entry, &bridge->dma_ranges) { u64 pci_addr = entry->res->start - entry->offset; u64 end = entry->res->end - entry->offset; int ret; ret = faraday_res_to_memcfg(pci_addr, resource_size(entry->res), &val); if (ret) { dev_err(dev, "DMA range %d: illegal MEM resource size\n", i); return -EINVAL; } dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n", i + 1, pci_addr, end, val); if (i <= 2) { faraday_raw_pci_write_config(p, 0, 0, confreg[i], 4, val); } else { dev_err(dev, "ignore extraneous dma-range %d\n", i); break; } i++; } return 0; } static int faraday_pci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct faraday_pci_variant *variant = of_device_get_match_data(dev); struct resource_entry *win; struct faraday_pci *p; struct resource *io; struct pci_host_bridge *host; struct clk *clk; unsigned char max_bus_speed = PCI_SPEED_33MHz; unsigned char cur_bus_speed = PCI_SPEED_33MHz; int ret; u32 val; host = devm_pci_alloc_host_bridge(dev, sizeof(*p)); if (!host) return -ENOMEM; host->ops = &faraday_pci_ops; p = pci_host_bridge_priv(host); host->sysdata = p; p->dev = dev; /* Retrieve and enable optional clocks */ clk = devm_clk_get_enabled(dev, "PCLK"); if (IS_ERR(clk)) return PTR_ERR(clk); p->bus_clk = devm_clk_get_enabled(dev, "PCICLK"); if (IS_ERR(p->bus_clk)) return PTR_ERR(p->bus_clk); p->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(p->base)) return PTR_ERR(p->base); win = resource_list_first_type(&host->windows, IORESOURCE_IO); if (win) { io = win->res; if (!faraday_res_to_memcfg(io->start - win->offset, resource_size(io), &val)) { /* setup I/O space size */ writel(val, p->base + FTPCI_IOSIZE); } else { dev_err(dev, "illegal IO mem size\n"); return -EINVAL; } } /* Setup hostbridge */ val = readl(p->base + FTPCI_CTRL); val |= PCI_COMMAND_IO; val |= PCI_COMMAND_MEMORY; val |= PCI_COMMAND_MASTER; writel(val, p->base + FTPCI_CTRL); /* Mask and clear all interrupts */ faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000); if (variant->cascaded_irq) { ret = faraday_pci_setup_cascaded_irq(p); if (ret) { dev_err(dev, "failed to setup cascaded IRQ\n"); return ret; } } /* Check bus clock if we can gear up to 66 MHz */ if (!IS_ERR(p->bus_clk)) { unsigned long rate; u32 val; faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_STATUS_CMD, 4, &val); rate = clk_get_rate(p->bus_clk); if ((rate == 33000000) && (val & PCI_STATUS_66MHZ_CAPABLE)) { dev_info(dev, "33MHz bus is 66MHz capable\n"); max_bus_speed = PCI_SPEED_66MHz; ret = clk_set_rate(p->bus_clk, 66000000); if (ret) dev_err(dev, "failed to set bus clock\n"); } else { dev_info(dev, "33MHz only bus\n"); max_bus_speed = PCI_SPEED_33MHz; } /* Bumping the clock may fail so read back the rate */ rate = clk_get_rate(p->bus_clk); if (rate == 33000000) cur_bus_speed = PCI_SPEED_33MHz; if (rate == 66000000) cur_bus_speed = PCI_SPEED_66MHz; } ret = faraday_pci_parse_map_dma_ranges(p); if (ret) return ret; ret = pci_scan_root_bus_bridge(host); if (ret) { dev_err(dev, "failed to scan host: %d\n", ret); return ret; } p->bus = host->bus; p->bus->max_bus_speed = max_bus_speed; p->bus->cur_bus_speed = cur_bus_speed; pci_bus_assign_resources(p->bus); pci_bus_add_devices(p->bus); return 0; } /* * We encode bridge variants here, we have at least two so it doesn't * hurt to have infrastructure to encompass future variants as well. */ static const struct faraday_pci_variant faraday_regular = { .cascaded_irq = true, }; static const struct faraday_pci_variant faraday_dual = { .cascaded_irq = false, }; static const struct of_device_id faraday_pci_of_match[] = { { .compatible = "faraday,ftpci100", .data = &faraday_regular, }, { .compatible = "faraday,ftpci100-dual", .data = &faraday_dual, }, {}, }; static struct platform_driver faraday_pci_driver = { .driver = { .name = "ftpci100", .of_match_table = faraday_pci_of_match, .suppress_bind_attrs = true, }, .probe = faraday_pci_probe, }; builtin_platform_driver(faraday_pci_driver);
linux-master
drivers/pci/controller/pci-ftpci100.c
// SPDX-License-Identifier: GPL-2.0+ /* * Rockchip AXI PCIe endpoint controller driver * * Copyright (c) 2018 Rockchip, Inc. * * Author: Shawn Lin <[email protected]> * Simon Xue <[email protected]> */ #include <linux/configfs.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/pci-epc.h> #include <linux/platform_device.h> #include <linux/pci-epf.h> #include <linux/sizes.h> #include "pcie-rockchip.h" /** * struct rockchip_pcie_ep - private data for PCIe endpoint controller driver * @rockchip: Rockchip PCIe controller * @epc: PCI EPC device * @max_regions: maximum number of regions supported by hardware * @ob_region_map: bitmask of mapped outbound regions * @ob_addr: base addresses in the AXI bus where the outbound regions start * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ * dedicated outbound regions is mapped. * @irq_cpu_addr: base address in the CPU space where a write access triggers * the sending of a memory write (MSI) / normal message (legacy * IRQ) TLP through the PCIe bus. * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ * dedicated outbound region. * @irq_pci_fn: the latest PCI function that has updated the mapping of * the MSI/legacy IRQ dedicated outbound region. * @irq_pending: bitmask of asserted legacy IRQs. */ struct rockchip_pcie_ep { struct rockchip_pcie rockchip; struct pci_epc *epc; u32 max_regions; unsigned long ob_region_map; phys_addr_t *ob_addr; phys_addr_t irq_phys_addr; void __iomem *irq_cpu_addr; u64 irq_pci_addr; u8 irq_pci_fn; u8 irq_pending; }; static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip, u32 region) { rockchip_pcie_write(rockchip, 0, ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(region)); rockchip_pcie_write(rockchip, 0, ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(region)); rockchip_pcie_write(rockchip, 0, ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region)); rockchip_pcie_write(rockchip, 0, ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region)); } static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn, u32 r, u64 cpu_addr, u64 pci_addr, size_t size) { int num_pass_bits = fls64(size - 1); u32 addr0, addr1, desc0; if (num_pass_bits < 8) num_pass_bits = 8; addr0 = ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) | (lower_32_bits(pci_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR); addr1 = upper_32_bits(pci_addr); desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | AXI_WRAPPER_MEM_WRITE; /* PCI bus address region */ rockchip_pcie_write(rockchip, addr0, ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r)); rockchip_pcie_write(rockchip, addr1, ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r)); rockchip_pcie_write(rockchip, desc0, ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r)); rockchip_pcie_write(rockchip, 0, ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r)); } static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn, struct pci_epf_header *hdr) { u32 reg; struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); struct rockchip_pcie *rockchip = &ep->rockchip; /* All functions share the same vendor ID with function 0 */ if (fn == 0) { u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) | (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16; rockchip_pcie_write(rockchip, vid_regs, PCIE_CORE_CONFIG_VENDOR); } reg = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_DID_VID); reg = (reg & 0xFFFF) | (hdr->deviceid << 16); rockchip_pcie_write(rockchip, reg, PCIE_EP_CONFIG_DID_VID); rockchip_pcie_write(rockchip, hdr->revid | hdr->progif_code << 8 | hdr->subclass_code << 16 | hdr->baseclass_code << 24, ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_REVISION_ID); rockchip_pcie_write(rockchip, hdr->cache_line_size, ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_CACHE_LINE_SIZE); rockchip_pcie_write(rockchip, hdr->subsys_id << 16, ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_SUBSYSTEM_VENDOR_ID); rockchip_pcie_write(rockchip, hdr->interrupt_pin << 8, ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_INTERRUPT_LINE); return 0; } static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn, struct pci_epf_bar *epf_bar) { struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); struct rockchip_pcie *rockchip = &ep->rockchip; dma_addr_t bar_phys = epf_bar->phys_addr; enum pci_barno bar = epf_bar->barno; int flags = epf_bar->flags; u32 addr0, addr1, reg, cfg, b, aperture, ctrl; u64 sz; /* BAR size is 2^(aperture + 7) */ sz = max_t(size_t, epf_bar->size, MIN_EP_APERTURE); /* * roundup_pow_of_two() returns an unsigned long, which is not suited * for 64bit values. */ sz = 1ULL << fls64(sz - 1); aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */ if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS; } else { bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH); bool is_64bits = sz > SZ_2G; if (is_64bits && (bar & 1)) return -EINVAL; if (is_64bits && is_prefetch) ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS; else if (is_prefetch) ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS; else if (is_64bits) ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS; else ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS; } if (bar < BAR_4) { reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn); b = bar; } else { reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn); b = bar - BAR_4; } addr0 = lower_32_bits(bar_phys); addr1 = upper_32_bits(bar_phys); cfg = rockchip_pcie_read(rockchip, reg); cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); cfg |= (ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); rockchip_pcie_write(rockchip, cfg, reg); rockchip_pcie_write(rockchip, addr0, ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar)); rockchip_pcie_write(rockchip, addr1, ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar)); return 0; } static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn, struct pci_epf_bar *epf_bar) { struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); struct rockchip_pcie *rockchip = &ep->rockchip; u32 reg, cfg, b, ctrl; enum pci_barno bar = epf_bar->barno; if (bar < BAR_4) { reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn); b = bar; } else { reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn); b = bar - BAR_4; } ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED; cfg = rockchip_pcie_read(rockchip, reg); cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); cfg |= ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl); rockchip_pcie_write(rockchip, cfg, reg); rockchip_pcie_write(rockchip, 0x0, ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar)); rockchip_pcie_write(rockchip, 0x0, ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar)); } static inline u32 rockchip_ob_region(phys_addr_t addr) { return (addr >> ilog2(SZ_1M)) & 0x1f; } static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn, phys_addr_t addr, u64 pci_addr, size_t size) { struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); struct rockchip_pcie *pcie = &ep->rockchip; u32 r = rockchip_ob_region(addr); rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, addr, pci_addr, size); set_bit(r, &ep->ob_region_map); ep->ob_addr[r] = addr; return 0; } static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn, phys_addr_t addr) { struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); struct rockchip_pcie *rockchip = &ep->rockchip; u32 r; for (r = 0; r < ep->max_regions; r++) if (ep->ob_addr[r] == addr) break; if (r == ep->max_regions) return; rockchip_pcie_clear_ep_ob_atu(rockchip, r); ep->ob_addr[r] = 0; clear_bit(r, &ep->ob_region_map); } static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 multi_msg_cap) { struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); struct rockchip_pcie *rockchip = &ep->rockchip; u32 flags; flags = rockchip_pcie_read(rockchip, ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + ROCKCHIP_PCIE_EP_MSI_CTRL_REG); flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK; flags |= (multi_msg_cap << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) | (PCI_MSI_FLAGS_64BIT << ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET); flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP; rockchip_pcie_write(rockchip, flags, ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + ROCKCHIP_PCIE_EP_MSI_CTRL_REG); return 0; } static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn) { struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); struct rockchip_pcie *rockchip = &ep->rockchip; u32 flags; flags = rockchip_pcie_read(rockchip, ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + ROCKCHIP_PCIE_EP_MSI_CTRL_REG); if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME)) return -EINVAL; return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >> ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET); } static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn, u8 intx, bool do_assert) { struct rockchip_pcie *rockchip = &ep->rockchip; intx &= 3; if (do_assert) { ep->irq_pending |= BIT(intx); rockchip_pcie_write(rockchip, PCIE_CLIENT_INT_IN_ASSERT | PCIE_CLIENT_INT_PEND_ST_PEND, PCIE_CLIENT_LEGACY_INT_CTRL); } else { ep->irq_pending &= ~BIT(intx); rockchip_pcie_write(rockchip, PCIE_CLIENT_INT_IN_DEASSERT | PCIE_CLIENT_INT_PEND_ST_NORMAL, PCIE_CLIENT_LEGACY_INT_CTRL); } } static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn, u8 intx) { u16 cmd; cmd = rockchip_pcie_read(&ep->rockchip, ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + ROCKCHIP_PCIE_EP_CMD_STATUS); if (cmd & PCI_COMMAND_INTX_DISABLE) return -EINVAL; /* * Should add some delay between toggling INTx per TRM vaguely saying * it depends on some cycles of the AHB bus clock to function it. So * add sufficient 1ms here. */ rockchip_pcie_ep_assert_intx(ep, fn, intx, true); mdelay(1); rockchip_pcie_ep_assert_intx(ep, fn, intx, false); return 0; } static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn, u8 interrupt_num) { struct rockchip_pcie *rockchip = &ep->rockchip; u32 flags, mme, data, data_mask; u8 msi_count; u64 pci_addr; u32 r; /* Check MSI enable bit */ flags = rockchip_pcie_read(&ep->rockchip, ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + ROCKCHIP_PCIE_EP_MSI_CTRL_REG); if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME)) return -EINVAL; /* Get MSI numbers from MME */ mme = ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >> ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET); msi_count = 1 << mme; if (!interrupt_num || interrupt_num > msi_count) return -EINVAL; /* Set MSI private data */ data_mask = msi_count - 1; data = rockchip_pcie_read(rockchip, ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + ROCKCHIP_PCIE_EP_MSI_CTRL_REG + PCI_MSI_DATA_64); data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); /* Get MSI PCI address */ pci_addr = rockchip_pcie_read(rockchip, ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + ROCKCHIP_PCIE_EP_MSI_CTRL_REG + PCI_MSI_ADDRESS_HI); pci_addr <<= 32; pci_addr |= rockchip_pcie_read(rockchip, ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + ROCKCHIP_PCIE_EP_MSI_CTRL_REG + PCI_MSI_ADDRESS_LO); /* Set the outbound region if needed. */ if (unlikely(ep->irq_pci_addr != (pci_addr & PCIE_ADDR_MASK) || ep->irq_pci_fn != fn)) { r = rockchip_ob_region(ep->irq_phys_addr); rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r, ep->irq_phys_addr, pci_addr & PCIE_ADDR_MASK, ~PCIE_ADDR_MASK + 1); ep->irq_pci_addr = (pci_addr & PCIE_ADDR_MASK); ep->irq_pci_fn = fn; } writew(data, ep->irq_cpu_addr + (pci_addr & ~PCIE_ADDR_MASK)); return 0; } static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, enum pci_epc_irq_type type, u16 interrupt_num) { struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); switch (type) { case PCI_EPC_IRQ_LEGACY: return rockchip_pcie_ep_send_legacy_irq(ep, fn, 0); case PCI_EPC_IRQ_MSI: return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num); default: return -EINVAL; } } static int rockchip_pcie_ep_start(struct pci_epc *epc) { struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); struct rockchip_pcie *rockchip = &ep->rockchip; struct pci_epf *epf; u32 cfg; cfg = BIT(0); list_for_each_entry(epf, &epc->pci_epf, list) cfg |= BIT(epf->func_no); rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG); return 0; } static const struct pci_epc_features rockchip_pcie_epc_features = { .linkup_notifier = false, .msi_capable = true, .msix_capable = false, .align = 256, }; static const struct pci_epc_features* rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) { return &rockchip_pcie_epc_features; } static const struct pci_epc_ops rockchip_pcie_epc_ops = { .write_header = rockchip_pcie_ep_write_header, .set_bar = rockchip_pcie_ep_set_bar, .clear_bar = rockchip_pcie_ep_clear_bar, .map_addr = rockchip_pcie_ep_map_addr, .unmap_addr = rockchip_pcie_ep_unmap_addr, .set_msi = rockchip_pcie_ep_set_msi, .get_msi = rockchip_pcie_ep_get_msi, .raise_irq = rockchip_pcie_ep_raise_irq, .start = rockchip_pcie_ep_start, .get_features = rockchip_pcie_ep_get_features, }; static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip, struct rockchip_pcie_ep *ep) { struct device *dev = rockchip->dev; int err; err = rockchip_pcie_parse_dt(rockchip); if (err) return err; err = rockchip_pcie_get_phys(rockchip); if (err) return err; err = of_property_read_u32(dev->of_node, "rockchip,max-outbound-regions", &ep->max_regions); if (err < 0 || ep->max_regions > MAX_REGION_LIMIT) ep->max_regions = MAX_REGION_LIMIT; ep->ob_region_map = 0; err = of_property_read_u8(dev->of_node, "max-functions", &ep->epc->max_functions); if (err < 0) ep->epc->max_functions = 1; return 0; } static const struct of_device_id rockchip_pcie_ep_of_match[] = { { .compatible = "rockchip,rk3399-pcie-ep"}, {}, }; static int rockchip_pcie_ep_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rockchip_pcie_ep *ep; struct rockchip_pcie *rockchip; struct pci_epc *epc; size_t max_regions; struct pci_epc_mem_window *windows = NULL; int err, i; u32 cfg_msi, cfg_msix_cp; ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); if (!ep) return -ENOMEM; rockchip = &ep->rockchip; rockchip->is_rc = false; rockchip->dev = dev; epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops); if (IS_ERR(epc)) { dev_err(dev, "failed to create epc device\n"); return PTR_ERR(epc); } ep->epc = epc; epc_set_drvdata(epc, ep); err = rockchip_pcie_parse_ep_dt(rockchip, ep); if (err) return err; err = rockchip_pcie_enable_clocks(rockchip); if (err) return err; err = rockchip_pcie_init_port(rockchip); if (err) goto err_disable_clocks; /* Establish the link automatically */ rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE, PCIE_CLIENT_CONFIG); max_regions = ep->max_regions; ep->ob_addr = devm_kcalloc(dev, max_regions, sizeof(*ep->ob_addr), GFP_KERNEL); if (!ep->ob_addr) { err = -ENOMEM; goto err_uninit_port; } /* Only enable function 0 by default */ rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG); windows = devm_kcalloc(dev, ep->max_regions, sizeof(struct pci_epc_mem_window), GFP_KERNEL); if (!windows) { err = -ENOMEM; goto err_uninit_port; } for (i = 0; i < ep->max_regions; i++) { windows[i].phys_base = rockchip->mem_res->start + (SZ_1M * i); windows[i].size = SZ_1M; windows[i].page_size = SZ_1M; } err = pci_epc_multi_mem_init(epc, windows, ep->max_regions); devm_kfree(dev, windows); if (err < 0) { dev_err(dev, "failed to initialize the memory space\n"); goto err_uninit_port; } ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, SZ_1M); if (!ep->irq_cpu_addr) { dev_err(dev, "failed to reserve memory space for MSI\n"); err = -ENOMEM; goto err_epc_mem_exit; } ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR; /* * MSI-X is not supported but the controller still advertises the MSI-X * capability by default, which can lead to the Root Complex side * allocating MSI-X vectors which cannot be used. Avoid this by skipping * the MSI-X capability entry in the PCIe capabilities linked-list: get * the next pointer from the MSI-X entry and set that in the MSI * capability entry (which is the previous entry). This way the MSI-X * entry is skipped (left out of the linked-list) and not advertised. */ cfg_msi = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE + ROCKCHIP_PCIE_EP_MSI_CTRL_REG); cfg_msi &= ~ROCKCHIP_PCIE_EP_MSI_CP1_MASK; cfg_msix_cp = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE + ROCKCHIP_PCIE_EP_MSIX_CAP_REG) & ROCKCHIP_PCIE_EP_MSIX_CAP_CP_MASK; cfg_msi |= cfg_msix_cp; rockchip_pcie_write(rockchip, cfg_msi, PCIE_EP_CONFIG_BASE + ROCKCHIP_PCIE_EP_MSI_CTRL_REG); rockchip_pcie_write(rockchip, PCIE_CLIENT_CONF_ENABLE, PCIE_CLIENT_CONFIG); return 0; err_epc_mem_exit: pci_epc_mem_exit(epc); err_uninit_port: rockchip_pcie_deinit_phys(rockchip); err_disable_clocks: rockchip_pcie_disable_clocks(rockchip); return err; } static struct platform_driver rockchip_pcie_ep_driver = { .driver = { .name = "rockchip-pcie-ep", .of_match_table = rockchip_pcie_ep_of_match, }, .probe = rockchip_pcie_ep_probe, }; builtin_platform_driver(rockchip_pcie_ep_driver);
linux-master
drivers/pci/controller/pcie-rockchip-ep.c
// SPDX-License-Identifier: GPL-2.0 /* * MediaTek PCIe host controller driver. * * Copyright (c) 2020 MediaTek Inc. * Author: Jianjun Wang <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/iopoll.h> #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/pci.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include "../pci.h" #define PCIE_SETTING_REG 0x80 #define PCIE_PCI_IDS_1 0x9c #define PCI_CLASS(class) (class << 8) #define PCIE_RC_MODE BIT(0) #define PCIE_CFGNUM_REG 0x140 #define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0)) #define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8)) #define PCIE_CFG_BYTE_EN(bytes) (((bytes) << 16) & GENMASK(19, 16)) #define PCIE_CFG_FORCE_BYTE_EN BIT(20) #define PCIE_CFG_OFFSET_ADDR 0x1000 #define PCIE_CFG_HEADER(bus, devfn) \ (PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn)) #define PCIE_RST_CTRL_REG 0x148 #define PCIE_MAC_RSTB BIT(0) #define PCIE_PHY_RSTB BIT(1) #define PCIE_BRG_RSTB BIT(2) #define PCIE_PE_RSTB BIT(3) #define PCIE_LTSSM_STATUS_REG 0x150 #define PCIE_LTSSM_STATE_MASK GENMASK(28, 24) #define PCIE_LTSSM_STATE(val) ((val & PCIE_LTSSM_STATE_MASK) >> 24) #define PCIE_LTSSM_STATE_L2_IDLE 0x14 #define PCIE_LINK_STATUS_REG 0x154 #define PCIE_PORT_LINKUP BIT(8) #define PCIE_MSI_SET_NUM 8 #define PCIE_MSI_IRQS_PER_SET 32 #define PCIE_MSI_IRQS_NUM \ (PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM) #define PCIE_INT_ENABLE_REG 0x180 #define PCIE_MSI_ENABLE GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8) #define PCIE_MSI_SHIFT 8 #define PCIE_INTX_SHIFT 24 #define PCIE_INTX_ENABLE \ GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT) #define PCIE_INT_STATUS_REG 0x184 #define PCIE_MSI_SET_ENABLE_REG 0x190 #define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0) #define PCIE_MSI_SET_BASE_REG 0xc00 #define PCIE_MSI_SET_OFFSET 0x10 #define PCIE_MSI_SET_STATUS_OFFSET 0x04 #define PCIE_MSI_SET_ENABLE_OFFSET 0x08 #define PCIE_MSI_SET_ADDR_HI_BASE 0xc80 #define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04 #define PCIE_ICMD_PM_REG 0x198 #define PCIE_TURN_OFF_LINK BIT(4) #define PCIE_MISC_CTRL_REG 0x348 #define PCIE_DISABLE_DVFSRC_VLT_REQ BIT(1) #define PCIE_TRANS_TABLE_BASE_REG 0x800 #define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4 #define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8 #define PCIE_ATR_TRSL_ADDR_MSB_OFFSET 0xc #define PCIE_ATR_TRSL_PARAM_OFFSET 0x10 #define PCIE_ATR_TLB_SET_OFFSET 0x20 #define PCIE_MAX_TRANS_TABLES 8 #define PCIE_ATR_EN BIT(0) #define PCIE_ATR_SIZE(size) \ (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN) #define PCIE_ATR_ID(id) ((id) & GENMASK(3, 0)) #define PCIE_ATR_TYPE_MEM PCIE_ATR_ID(0) #define PCIE_ATR_TYPE_IO PCIE_ATR_ID(1) #define PCIE_ATR_TLP_TYPE(type) (((type) << 16) & GENMASK(18, 16)) #define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0) #define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2) /** * struct mtk_msi_set - MSI information for each set * @base: IO mapped register base * @msg_addr: MSI message address * @saved_irq_state: IRQ enable state saved at suspend time */ struct mtk_msi_set { void __iomem *base; phys_addr_t msg_addr; u32 saved_irq_state; }; /** * struct mtk_gen3_pcie - PCIe port information * @dev: pointer to PCIe device * @base: IO mapped register base * @reg_base: physical register base * @mac_reset: MAC reset control * @phy_reset: PHY reset control * @phy: PHY controller block * @clks: PCIe clocks * @num_clks: PCIe clocks count for this port * @irq: PCIe controller interrupt number * @saved_irq_state: IRQ enable state saved at suspend time * @irq_lock: lock protecting IRQ register access * @intx_domain: legacy INTx IRQ domain * @msi_domain: MSI IRQ domain * @msi_bottom_domain: MSI IRQ bottom domain * @msi_sets: MSI sets information * @lock: lock protecting IRQ bit map * @msi_irq_in_use: bit map for assigned MSI IRQ */ struct mtk_gen3_pcie { struct device *dev; void __iomem *base; phys_addr_t reg_base; struct reset_control *mac_reset; struct reset_control *phy_reset; struct phy *phy; struct clk_bulk_data *clks; int num_clks; int irq; u32 saved_irq_state; raw_spinlock_t irq_lock; struct irq_domain *intx_domain; struct irq_domain *msi_domain; struct irq_domain *msi_bottom_domain; struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM]; struct mutex lock; DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM); }; /* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */ static const char *const ltssm_str[] = { "detect.quiet", /* 0x00 */ "detect.active", /* 0x01 */ "polling.active", /* 0x02 */ "polling.compliance", /* 0x03 */ "polling.configuration", /* 0x04 */ "config.linkwidthstart", /* 0x05 */ "config.linkwidthaccept", /* 0x06 */ "config.lanenumwait", /* 0x07 */ "config.lanenumaccept", /* 0x08 */ "config.complete", /* 0x09 */ "config.idle", /* 0x0A */ "recovery.receiverlock", /* 0x0B */ "recovery.equalization", /* 0x0C */ "recovery.speed", /* 0x0D */ "recovery.receiverconfig", /* 0x0E */ "recovery.idle", /* 0x0F */ "L0", /* 0x10 */ "L0s", /* 0x11 */ "L1.entry", /* 0x12 */ "L1.idle", /* 0x13 */ "L2.idle", /* 0x14 */ "L2.transmitwake", /* 0x15 */ "disable", /* 0x16 */ "loopback.entry", /* 0x17 */ "loopback.active", /* 0x18 */ "loopback.exit", /* 0x19 */ "hotreset", /* 0x1A */ }; /** * mtk_pcie_config_tlp_header() - Configure a configuration TLP header * @bus: PCI bus to query * @devfn: device/function number * @where: offset in config space * @size: data size in TLP header * * Set byte enable field and device information in configuration TLP header. */ static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn, int where, int size) { struct mtk_gen3_pcie *pcie = bus->sysdata; int bytes; u32 val; bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3); val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) | PCIE_CFG_HEADER(bus->number, devfn); writel_relaxed(val, pcie->base + PCIE_CFGNUM_REG); } static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct mtk_gen3_pcie *pcie = bus->sysdata; return pcie->base + PCIE_CFG_OFFSET_ADDR + where; } static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { mtk_pcie_config_tlp_header(bus, devfn, where, size); return pci_generic_config_read32(bus, devfn, where, size, val); } static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { mtk_pcie_config_tlp_header(bus, devfn, where, size); if (size <= 2) val <<= (where & 0x3) * 8; return pci_generic_config_write32(bus, devfn, where, 4, val); } static struct pci_ops mtk_pcie_ops = { .map_bus = mtk_pcie_map_bus, .read = mtk_pcie_config_read, .write = mtk_pcie_config_write, }; static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie, resource_size_t cpu_addr, resource_size_t pci_addr, resource_size_t size, unsigned long type, int num) { void __iomem *table; u32 val; if (num >= PCIE_MAX_TRANS_TABLES) { dev_err(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n", (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES); return -ENODEV; } table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + num * PCIE_ATR_TLB_SET_OFFSET; writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1), table); writel_relaxed(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET); writel_relaxed(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET); writel_relaxed(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET); if (type == IORESOURCE_IO) val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO; else val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM; writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET); return 0; } static void mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie) { int i; u32 val; for (i = 0; i < PCIE_MSI_SET_NUM; i++) { struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG + i * PCIE_MSI_SET_OFFSET; msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG + i * PCIE_MSI_SET_OFFSET; /* Configure the MSI capture address */ writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base); writel_relaxed(upper_32_bits(msi_set->msg_addr), pcie->base + PCIE_MSI_SET_ADDR_HI_BASE + i * PCIE_MSI_SET_ADDR_HI_OFFSET); } val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG); val |= PCIE_MSI_SET_ENABLE; writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG); val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); val |= PCIE_MSI_ENABLE; writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); } static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie) { struct resource_entry *entry; struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); unsigned int table_index = 0; int err; u32 val; /* Set as RC mode */ val = readl_relaxed(pcie->base + PCIE_SETTING_REG); val |= PCIE_RC_MODE; writel_relaxed(val, pcie->base + PCIE_SETTING_REG); /* Set class code */ val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1); val &= ~GENMASK(31, 8); val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL); writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1); /* Mask all INTx interrupts */ val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); val &= ~PCIE_INTX_ENABLE; writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); /* Disable DVFSRC voltage request */ val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG); val |= PCIE_DISABLE_DVFSRC_VLT_REQ; writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG); /* Assert all reset signals */ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG); val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB; writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); /* * Described in PCIe CEM specification sections 2.2 (PERST# Signal) * and 2.2.1 (Initial Power-Up (G3 to S0)). * The deassertion of PERST# should be delayed 100ms (TPVPERL) * for the power and clock to become stable. */ msleep(100); /* De-assert reset signals */ val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB); writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); /* Check if the link is up or not */ err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val, !!(val & PCIE_PORT_LINKUP), 20, PCI_PM_D3COLD_WAIT * USEC_PER_MSEC); if (err) { const char *ltssm_state; int ltssm_index; val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG); ltssm_index = PCIE_LTSSM_STATE(val); ltssm_state = ltssm_index >= ARRAY_SIZE(ltssm_str) ? "Unknown state" : ltssm_str[ltssm_index]; dev_err(pcie->dev, "PCIe link down, current LTSSM state: %s (%#x)\n", ltssm_state, val); return err; } mtk_pcie_enable_msi(pcie); /* Set PCIe translation windows */ resource_list_for_each_entry(entry, &host->windows) { struct resource *res = entry->res; unsigned long type = resource_type(res); resource_size_t cpu_addr; resource_size_t pci_addr; resource_size_t size; const char *range_type; if (type == IORESOURCE_IO) { cpu_addr = pci_pio_to_address(res->start); range_type = "IO"; } else if (type == IORESOURCE_MEM) { cpu_addr = res->start; range_type = "MEM"; } else { continue; } pci_addr = res->start - entry->offset; size = resource_size(res); err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size, type, table_index); if (err) return err; dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n", range_type, table_index, (unsigned long long)cpu_addr, (unsigned long long)pci_addr, (unsigned long long)size); table_index++; } return 0; } static int mtk_pcie_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { return -EINVAL; } static void mtk_pcie_msi_irq_mask(struct irq_data *data) { pci_msi_mask_irq(data); irq_chip_mask_parent(data); } static void mtk_pcie_msi_irq_unmask(struct irq_data *data) { pci_msi_unmask_irq(data); irq_chip_unmask_parent(data); } static struct irq_chip mtk_msi_irq_chip = { .irq_ack = irq_chip_ack_parent, .irq_mask = mtk_pcie_msi_irq_mask, .irq_unmask = mtk_pcie_msi_irq_unmask, .name = "MSI", }; static struct msi_domain_info mtk_msi_domain_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), .chip = &mtk_msi_irq_chip, }; static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data); struct mtk_gen3_pcie *pcie = data->domain->host_data; unsigned long hwirq; hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; msg->address_hi = upper_32_bits(msi_set->msg_addr); msg->address_lo = lower_32_bits(msi_set->msg_addr); msg->data = hwirq; dev_dbg(pcie->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n", hwirq, msg->address_hi, msg->address_lo, msg->data); } static void mtk_msi_bottom_irq_ack(struct irq_data *data) { struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data); unsigned long hwirq; hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET); } static void mtk_msi_bottom_irq_mask(struct irq_data *data) { struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data); struct mtk_gen3_pcie *pcie = data->domain->host_data; unsigned long hwirq, flags; u32 val; hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; raw_spin_lock_irqsave(&pcie->irq_lock, flags); val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); val &= ~BIT(hwirq); writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); } static void mtk_msi_bottom_irq_unmask(struct irq_data *data) { struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data); struct mtk_gen3_pcie *pcie = data->domain->host_data; unsigned long hwirq, flags; u32 val; hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; raw_spin_lock_irqsave(&pcie->irq_lock, flags); val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); val |= BIT(hwirq); writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); } static struct irq_chip mtk_msi_bottom_irq_chip = { .irq_ack = mtk_msi_bottom_irq_ack, .irq_mask = mtk_msi_bottom_irq_mask, .irq_unmask = mtk_msi_bottom_irq_unmask, .irq_compose_msi_msg = mtk_compose_msi_msg, .irq_set_affinity = mtk_pcie_set_affinity, .name = "MSI", }; static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { struct mtk_gen3_pcie *pcie = domain->host_data; struct mtk_msi_set *msi_set; int i, hwirq, set_idx; mutex_lock(&pcie->lock); hwirq = bitmap_find_free_region(pcie->msi_irq_in_use, PCIE_MSI_IRQS_NUM, order_base_2(nr_irqs)); mutex_unlock(&pcie->lock); if (hwirq < 0) return -ENOSPC; set_idx = hwirq / PCIE_MSI_IRQS_PER_SET; msi_set = &pcie->msi_sets[set_idx]; for (i = 0; i < nr_irqs; i++) irq_domain_set_info(domain, virq + i, hwirq + i, &mtk_msi_bottom_irq_chip, msi_set, handle_edge_irq, NULL, NULL); return 0; } static void mtk_msi_bottom_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct mtk_gen3_pcie *pcie = domain->host_data; struct irq_data *data = irq_domain_get_irq_data(domain, virq); mutex_lock(&pcie->lock); bitmap_release_region(pcie->msi_irq_in_use, data->hwirq, order_base_2(nr_irqs)); mutex_unlock(&pcie->lock); irq_domain_free_irqs_common(domain, virq, nr_irqs); } static const struct irq_domain_ops mtk_msi_bottom_domain_ops = { .alloc = mtk_msi_bottom_domain_alloc, .free = mtk_msi_bottom_domain_free, }; static void mtk_intx_mask(struct irq_data *data) { struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data); unsigned long flags; u32 val; raw_spin_lock_irqsave(&pcie->irq_lock, flags); val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT); writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); } static void mtk_intx_unmask(struct irq_data *data) { struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data); unsigned long flags; u32 val; raw_spin_lock_irqsave(&pcie->irq_lock, flags); val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); val |= BIT(data->hwirq + PCIE_INTX_SHIFT); writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); } /** * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt * @data: pointer to chip specific data * * As an emulated level IRQ, its interrupt status will remain * until the corresponding de-assert message is received; hence that * the status can only be cleared when the interrupt has been serviced. */ static void mtk_intx_eoi(struct irq_data *data) { struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data); unsigned long hwirq; hwirq = data->hwirq + PCIE_INTX_SHIFT; writel_relaxed(BIT(hwirq), pcie->base + PCIE_INT_STATUS_REG); } static struct irq_chip mtk_intx_irq_chip = { .irq_mask = mtk_intx_mask, .irq_unmask = mtk_intx_unmask, .irq_eoi = mtk_intx_eoi, .irq_set_affinity = mtk_pcie_set_affinity, .name = "INTx", }; static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_data(irq, domain->host_data); irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip, handle_fasteoi_irq, "INTx"); return 0; } static const struct irq_domain_ops intx_domain_ops = { .map = mtk_pcie_intx_map, }; static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie) { struct device *dev = pcie->dev; struct device_node *intc_node, *node = dev->of_node; int ret; raw_spin_lock_init(&pcie->irq_lock); /* Setup INTx */ intc_node = of_get_child_by_name(node, "interrupt-controller"); if (!intc_node) { dev_err(dev, "missing interrupt-controller node\n"); return -ENODEV; } pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX, &intx_domain_ops, pcie); if (!pcie->intx_domain) { dev_err(dev, "failed to create INTx IRQ domain\n"); ret = -ENODEV; goto out_put_node; } /* Setup MSI */ mutex_init(&pcie->lock); pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM, &mtk_msi_bottom_domain_ops, pcie); if (!pcie->msi_bottom_domain) { dev_err(dev, "failed to create MSI bottom domain\n"); ret = -ENODEV; goto err_msi_bottom_domain; } pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode, &mtk_msi_domain_info, pcie->msi_bottom_domain); if (!pcie->msi_domain) { dev_err(dev, "failed to create MSI domain\n"); ret = -ENODEV; goto err_msi_domain; } of_node_put(intc_node); return 0; err_msi_domain: irq_domain_remove(pcie->msi_bottom_domain); err_msi_bottom_domain: irq_domain_remove(pcie->intx_domain); out_put_node: of_node_put(intc_node); return ret; } static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie) { irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); if (pcie->intx_domain) irq_domain_remove(pcie->intx_domain); if (pcie->msi_domain) irq_domain_remove(pcie->msi_domain); if (pcie->msi_bottom_domain) irq_domain_remove(pcie->msi_bottom_domain); irq_dispose_mapping(pcie->irq); } static void mtk_pcie_msi_handler(struct mtk_gen3_pcie *pcie, int set_idx) { struct mtk_msi_set *msi_set = &pcie->msi_sets[set_idx]; unsigned long msi_enable, msi_status; irq_hw_number_t bit, hwirq; msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); do { msi_status = readl_relaxed(msi_set->base + PCIE_MSI_SET_STATUS_OFFSET); msi_status &= msi_enable; if (!msi_status) break; for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) { hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET; generic_handle_domain_irq(pcie->msi_bottom_domain, hwirq); } } while (true); } static void mtk_pcie_irq_handler(struct irq_desc *desc) { struct mtk_gen3_pcie *pcie = irq_desc_get_handler_data(desc); struct irq_chip *irqchip = irq_desc_get_chip(desc); unsigned long status; irq_hw_number_t irq_bit = PCIE_INTX_SHIFT; chained_irq_enter(irqchip, desc); status = readl_relaxed(pcie->base + PCIE_INT_STATUS_REG); for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX + PCIE_INTX_SHIFT) generic_handle_domain_irq(pcie->intx_domain, irq_bit - PCIE_INTX_SHIFT); irq_bit = PCIE_MSI_SHIFT; for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM + PCIE_MSI_SHIFT) { mtk_pcie_msi_handler(pcie, irq_bit - PCIE_MSI_SHIFT); writel_relaxed(BIT(irq_bit), pcie->base + PCIE_INT_STATUS_REG); } chained_irq_exit(irqchip, desc); } static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie) { struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); int err; err = mtk_pcie_init_irq_domains(pcie); if (err) return err; pcie->irq = platform_get_irq(pdev, 0); if (pcie->irq < 0) return pcie->irq; irq_set_chained_handler_and_data(pcie->irq, mtk_pcie_irq_handler, pcie); return 0; } static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie) { struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); struct resource *regs; int ret; regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac"); if (!regs) return -EINVAL; pcie->base = devm_ioremap_resource(dev, regs); if (IS_ERR(pcie->base)) { dev_err(dev, "failed to map register base\n"); return PTR_ERR(pcie->base); } pcie->reg_base = regs->start; pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy"); if (IS_ERR(pcie->phy_reset)) { ret = PTR_ERR(pcie->phy_reset); if (ret != -EPROBE_DEFER) dev_err(dev, "failed to get PHY reset\n"); return ret; } pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac"); if (IS_ERR(pcie->mac_reset)) { ret = PTR_ERR(pcie->mac_reset); if (ret != -EPROBE_DEFER) dev_err(dev, "failed to get MAC reset\n"); return ret; } pcie->phy = devm_phy_optional_get(dev, "pcie-phy"); if (IS_ERR(pcie->phy)) { ret = PTR_ERR(pcie->phy); if (ret != -EPROBE_DEFER) dev_err(dev, "failed to get PHY\n"); return ret; } pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks); if (pcie->num_clks < 0) { dev_err(dev, "failed to get clocks\n"); return pcie->num_clks; } return 0; } static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie) { struct device *dev = pcie->dev; int err; /* PHY power on and enable pipe clock */ reset_control_deassert(pcie->phy_reset); err = phy_init(pcie->phy); if (err) { dev_err(dev, "failed to initialize PHY\n"); goto err_phy_init; } err = phy_power_on(pcie->phy); if (err) { dev_err(dev, "failed to power on PHY\n"); goto err_phy_on; } /* MAC power on and enable transaction layer clocks */ reset_control_deassert(pcie->mac_reset); pm_runtime_enable(dev); pm_runtime_get_sync(dev); err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks); if (err) { dev_err(dev, "failed to enable clocks\n"); goto err_clk_init; } return 0; err_clk_init: pm_runtime_put_sync(dev); pm_runtime_disable(dev); reset_control_assert(pcie->mac_reset); phy_power_off(pcie->phy); err_phy_on: phy_exit(pcie->phy); err_phy_init: reset_control_assert(pcie->phy_reset); return err; } static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie) { clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks); pm_runtime_put_sync(pcie->dev); pm_runtime_disable(pcie->dev); reset_control_assert(pcie->mac_reset); phy_power_off(pcie->phy); phy_exit(pcie->phy); reset_control_assert(pcie->phy_reset); } static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie) { int err; err = mtk_pcie_parse_port(pcie); if (err) return err; /* * The controller may have been left out of reset by the bootloader * so make sure that we get a clean start by asserting resets here. */ reset_control_assert(pcie->phy_reset); reset_control_assert(pcie->mac_reset); usleep_range(10, 20); /* Don't touch the hardware registers before power up */ err = mtk_pcie_power_up(pcie); if (err) return err; /* Try link up */ err = mtk_pcie_startup_port(pcie); if (err) goto err_setup; err = mtk_pcie_setup_irq(pcie); if (err) goto err_setup; return 0; err_setup: mtk_pcie_power_down(pcie); return err; } static int mtk_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mtk_gen3_pcie *pcie; struct pci_host_bridge *host; int err; host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); if (!host) return -ENOMEM; pcie = pci_host_bridge_priv(host); pcie->dev = dev; platform_set_drvdata(pdev, pcie); err = mtk_pcie_setup(pcie); if (err) return err; host->ops = &mtk_pcie_ops; host->sysdata = pcie; err = pci_host_probe(host); if (err) { mtk_pcie_irq_teardown(pcie); mtk_pcie_power_down(pcie); return err; } return 0; } static void mtk_pcie_remove(struct platform_device *pdev) { struct mtk_gen3_pcie *pcie = platform_get_drvdata(pdev); struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); pci_lock_rescan_remove(); pci_stop_root_bus(host->bus); pci_remove_root_bus(host->bus); pci_unlock_rescan_remove(); mtk_pcie_irq_teardown(pcie); mtk_pcie_power_down(pcie); } static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie) { int i; raw_spin_lock(&pcie->irq_lock); pcie->saved_irq_state = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); for (i = 0; i < PCIE_MSI_SET_NUM; i++) { struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; msi_set->saved_irq_state = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); } raw_spin_unlock(&pcie->irq_lock); } static void mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie) { int i; raw_spin_lock(&pcie->irq_lock); writel_relaxed(pcie->saved_irq_state, pcie->base + PCIE_INT_ENABLE_REG); for (i = 0; i < PCIE_MSI_SET_NUM; i++) { struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; writel_relaxed(msi_set->saved_irq_state, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); } raw_spin_unlock(&pcie->irq_lock); } static int mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie) { u32 val; val = readl_relaxed(pcie->base + PCIE_ICMD_PM_REG); val |= PCIE_TURN_OFF_LINK; writel_relaxed(val, pcie->base + PCIE_ICMD_PM_REG); /* Check the link is L2 */ return readl_poll_timeout(pcie->base + PCIE_LTSSM_STATUS_REG, val, (PCIE_LTSSM_STATE(val) == PCIE_LTSSM_STATE_L2_IDLE), 20, 50 * USEC_PER_MSEC); } static int mtk_pcie_suspend_noirq(struct device *dev) { struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev); int err; u32 val; /* Trigger link to L2 state */ err = mtk_pcie_turn_off_link(pcie); if (err) { dev_err(pcie->dev, "cannot enter L2 state\n"); return err; } /* Pull down the PERST# pin */ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG); val |= PCIE_PE_RSTB; writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); dev_dbg(pcie->dev, "entered L2 states successfully"); mtk_pcie_irq_save(pcie); mtk_pcie_power_down(pcie); return 0; } static int mtk_pcie_resume_noirq(struct device *dev) { struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev); int err; err = mtk_pcie_power_up(pcie); if (err) return err; err = mtk_pcie_startup_port(pcie); if (err) { mtk_pcie_power_down(pcie); return err; } mtk_pcie_irq_restore(pcie); return 0; } static const struct dev_pm_ops mtk_pcie_pm_ops = { NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq, mtk_pcie_resume_noirq) }; static const struct of_device_id mtk_pcie_of_match[] = { { .compatible = "mediatek,mt8192-pcie" }, {}, }; MODULE_DEVICE_TABLE(of, mtk_pcie_of_match); static struct platform_driver mtk_pcie_driver = { .probe = mtk_pcie_probe, .remove_new = mtk_pcie_remove, .driver = { .name = "mtk-pcie-gen3", .of_match_table = mtk_pcie_of_match, .pm = &mtk_pcie_pm_ops, }, }; module_platform_driver(mtk_pcie_driver); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/pcie-mediatek-gen3.c
// SPDX-License-Identifier: GPL-2.0+ /* * APM X-Gene PCIe Driver * * Copyright (c) 2014 Applied Micro Circuits Corporation. * * Author: Tanmay Inamdar <[email protected]>. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/jiffies.h> #include <linux/memblock.h> #include <linux/init.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/pci.h> #include <linux/pci-acpi.h> #include <linux/pci-ecam.h> #include <linux/platform_device.h> #include <linux/slab.h> #include "../pci.h" #define PCIECORE_CTLANDSTATUS 0x50 #define PIM1_1L 0x80 #define IBAR2 0x98 #define IR2MSK 0x9c #define PIM2_1L 0xa0 #define IBAR3L 0xb4 #define IR3MSKL 0xbc #define PIM3_1L 0xc4 #define OMR1BARL 0x100 #define OMR2BARL 0x118 #define OMR3BARL 0x130 #define CFGBARL 0x154 #define CFGBARH 0x158 #define CFGCTL 0x15c #define RTDID 0x160 #define BRIDGE_CFG_0 0x2000 #define BRIDGE_CFG_4 0x2010 #define BRIDGE_STATUS_0 0x2600 #define LINK_UP_MASK 0x00000100 #define AXI_EP_CFG_ACCESS 0x10000 #define EN_COHERENCY 0xF0000000 #define EN_REG 0x00000001 #define OB_LO_IO 0x00000002 #define XGENE_PCIE_DEVICEID 0xE004 #define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe) #define XGENE_V1_PCI_EXP_CAP 0x40 /* PCIe IP version */ #define XGENE_PCIE_IP_VER_UNKN 0 #define XGENE_PCIE_IP_VER_1 1 #define XGENE_PCIE_IP_VER_2 2 #if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) struct xgene_pcie { struct device_node *node; struct device *dev; struct clk *clk; void __iomem *csr_base; void __iomem *cfg_base; unsigned long cfg_addr; bool link_up; u32 version; }; static u32 xgene_pcie_readl(struct xgene_pcie *port, u32 reg) { return readl(port->csr_base + reg); } static void xgene_pcie_writel(struct xgene_pcie *port, u32 reg, u32 val) { writel(val, port->csr_base + reg); } static inline u32 pcie_bar_low_val(u32 addr, u32 flags) { return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags; } static inline struct xgene_pcie *pcie_bus_to_port(struct pci_bus *bus) { struct pci_config_window *cfg; if (acpi_disabled) return (struct xgene_pcie *)(bus->sysdata); cfg = bus->sysdata; return (struct xgene_pcie *)(cfg->priv); } /* * When the address bit [17:16] is 2'b01, the Configuration access will be * treated as Type 1 and it will be forwarded to external PCIe device. */ static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus) { struct xgene_pcie *port = pcie_bus_to_port(bus); if (bus->number >= (bus->primary + 1)) return port->cfg_base + AXI_EP_CFG_ACCESS; return port->cfg_base; } /* * For Configuration request, RTDID register is used as Bus Number, * Device Number and Function number of the header fields. */ static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn) { struct xgene_pcie *port = pcie_bus_to_port(bus); unsigned int b, d, f; u32 rtdid_val = 0; b = bus->number; d = PCI_SLOT(devfn); f = PCI_FUNC(devfn); if (!pci_is_root_bus(bus)) rtdid_val = (b << 8) | (d << 3) | f; xgene_pcie_writel(port, RTDID, rtdid_val); /* read the register back to ensure flush */ xgene_pcie_readl(port, RTDID); } /* * X-Gene PCIe port uses BAR0-BAR1 of RC's configuration space as * the translation from PCI bus to native BUS. Entire DDR region * is mapped into PCIe space using these registers, so it can be * reached by DMA from EP devices. The BAR0/1 of bridge should be * hidden during enumeration to avoid the sizing and resource allocation * by PCIe core. */ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset) { if (pci_is_root_bus(bus) && ((offset == PCI_BASE_ADDRESS_0) || (offset == PCI_BASE_ADDRESS_1))) return true; return false; } static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int offset) { if ((pci_is_root_bus(bus) && devfn != 0) || xgene_pcie_hide_rc_bars(bus, offset)) return NULL; xgene_pcie_set_rtdid_reg(bus, devfn); return xgene_pcie_get_cfg_base(bus) + offset; } static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct xgene_pcie *port = pcie_bus_to_port(bus); if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) != PCIBIOS_SUCCESSFUL) return PCIBIOS_DEVICE_NOT_FOUND; /* * The v1 controller has a bug in its Configuration Request Retry * Status (CRS) logic: when CRS Software Visibility is enabled and * we read the Vendor and Device ID of a non-existent device, the * controller fabricates return data of 0xFFFF0001 ("device exists * but is not ready") instead of 0xFFFFFFFF (PCI_ERROR_RESPONSE) * ("device does not exist"). This causes the PCI core to retry * the read until it times out. Avoid this by not claiming to * support CRS SV. */ if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) && ((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL)) *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); if (size <= 2) *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); return PCIBIOS_SUCCESSFUL; } #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) static int xgene_get_csr_resource(struct acpi_device *adev, struct resource *res) { struct device *dev = &adev->dev; struct resource_entry *entry; struct list_head list; unsigned long flags; int ret; INIT_LIST_HEAD(&list); flags = IORESOURCE_MEM; ret = acpi_dev_get_resources(adev, &list, acpi_dev_filter_resource_type_cb, (void *) flags); if (ret < 0) { dev_err(dev, "failed to parse _CRS method, error code %d\n", ret); return ret; } if (ret == 0) { dev_err(dev, "no IO and memory resources present in _CRS\n"); return -EINVAL; } entry = list_first_entry(&list, struct resource_entry, node); *res = *entry->res; acpi_dev_free_resource_list(&list); return 0; } static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion) { struct device *dev = cfg->parent; struct acpi_device *adev = to_acpi_device(dev); struct xgene_pcie *port; struct resource csr; int ret; port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); if (!port) return -ENOMEM; ret = xgene_get_csr_resource(adev, &csr); if (ret) { dev_err(dev, "can't get CSR resource\n"); return ret; } port->csr_base = devm_pci_remap_cfg_resource(dev, &csr); if (IS_ERR(port->csr_base)) return PTR_ERR(port->csr_base); port->cfg_base = cfg->win; port->version = ipversion; cfg->priv = port; return 0; } static int xgene_v1_pcie_ecam_init(struct pci_config_window *cfg) { return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_1); } const struct pci_ecam_ops xgene_v1_pcie_ecam_ops = { .init = xgene_v1_pcie_ecam_init, .pci_ops = { .map_bus = xgene_pcie_map_bus, .read = xgene_pcie_config_read32, .write = pci_generic_config_write, } }; static int xgene_v2_pcie_ecam_init(struct pci_config_window *cfg) { return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_2); } const struct pci_ecam_ops xgene_v2_pcie_ecam_ops = { .init = xgene_v2_pcie_ecam_init, .pci_ops = { .map_bus = xgene_pcie_map_bus, .read = xgene_pcie_config_read32, .write = pci_generic_config_write, } }; #endif #if defined(CONFIG_PCI_XGENE) static u64 xgene_pcie_set_ib_mask(struct xgene_pcie *port, u32 addr, u32 flags, u64 size) { u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags; u32 val32 = 0; u32 val; val32 = xgene_pcie_readl(port, addr); val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16); xgene_pcie_writel(port, addr, val); val32 = xgene_pcie_readl(port, addr + 0x04); val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16); xgene_pcie_writel(port, addr + 0x04, val); val32 = xgene_pcie_readl(port, addr + 0x04); val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16); xgene_pcie_writel(port, addr + 0x04, val); val32 = xgene_pcie_readl(port, addr + 0x08); val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16); xgene_pcie_writel(port, addr + 0x08, val); return mask; } static void xgene_pcie_linkup(struct xgene_pcie *port, u32 *lanes, u32 *speed) { u32 val32; port->link_up = false; val32 = xgene_pcie_readl(port, PCIECORE_CTLANDSTATUS); if (val32 & LINK_UP_MASK) { port->link_up = true; *speed = PIPE_PHY_RATE_RD(val32); val32 = xgene_pcie_readl(port, BRIDGE_STATUS_0); *lanes = val32 >> 26; } } static int xgene_pcie_init_port(struct xgene_pcie *port) { struct device *dev = port->dev; int rc; port->clk = clk_get(dev, NULL); if (IS_ERR(port->clk)) { dev_err(dev, "clock not available\n"); return -ENODEV; } rc = clk_prepare_enable(port->clk); if (rc) { dev_err(dev, "clock enable failed\n"); return rc; } return 0; } static int xgene_pcie_map_reg(struct xgene_pcie *port, struct platform_device *pdev) { struct device *dev = port->dev; struct resource *res; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr"); port->csr_base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(port->csr_base)) return PTR_ERR(port->csr_base); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); port->cfg_base = devm_ioremap_resource(dev, res); if (IS_ERR(port->cfg_base)) return PTR_ERR(port->cfg_base); port->cfg_addr = res->start; return 0; } static void xgene_pcie_setup_ob_reg(struct xgene_pcie *port, struct resource *res, u32 offset, u64 cpu_addr, u64 pci_addr) { struct device *dev = port->dev; resource_size_t size = resource_size(res); u64 restype = resource_type(res); u64 mask = 0; u32 min_size; u32 flag = EN_REG; if (restype == IORESOURCE_MEM) { min_size = SZ_128M; } else { min_size = 128; flag |= OB_LO_IO; } if (size >= min_size) mask = ~(size - 1) | flag; else dev_warn(dev, "res size 0x%llx less than minimum 0x%x\n", (u64)size, min_size); xgene_pcie_writel(port, offset, lower_32_bits(cpu_addr)); xgene_pcie_writel(port, offset + 0x04, upper_32_bits(cpu_addr)); xgene_pcie_writel(port, offset + 0x08, lower_32_bits(mask)); xgene_pcie_writel(port, offset + 0x0c, upper_32_bits(mask)); xgene_pcie_writel(port, offset + 0x10, lower_32_bits(pci_addr)); xgene_pcie_writel(port, offset + 0x14, upper_32_bits(pci_addr)); } static void xgene_pcie_setup_cfg_reg(struct xgene_pcie *port) { u64 addr = port->cfg_addr; xgene_pcie_writel(port, CFGBARL, lower_32_bits(addr)); xgene_pcie_writel(port, CFGBARH, upper_32_bits(addr)); xgene_pcie_writel(port, CFGCTL, EN_REG); } static int xgene_pcie_map_ranges(struct xgene_pcie *port) { struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port); struct resource_entry *window; struct device *dev = port->dev; resource_list_for_each_entry(window, &bridge->windows) { struct resource *res = window->res; u64 restype = resource_type(res); dev_dbg(dev, "%pR\n", res); switch (restype) { case IORESOURCE_IO: xgene_pcie_setup_ob_reg(port, res, OMR3BARL, pci_pio_to_address(res->start), res->start - window->offset); break; case IORESOURCE_MEM: if (res->flags & IORESOURCE_PREFETCH) xgene_pcie_setup_ob_reg(port, res, OMR2BARL, res->start, res->start - window->offset); else xgene_pcie_setup_ob_reg(port, res, OMR1BARL, res->start, res->start - window->offset); break; case IORESOURCE_BUS: break; default: dev_err(dev, "invalid resource %pR\n", res); return -EINVAL; } } xgene_pcie_setup_cfg_reg(port); return 0; } static void xgene_pcie_setup_pims(struct xgene_pcie *port, u32 pim_reg, u64 pim, u64 size) { xgene_pcie_writel(port, pim_reg, lower_32_bits(pim)); xgene_pcie_writel(port, pim_reg + 0x04, upper_32_bits(pim) | EN_COHERENCY); xgene_pcie_writel(port, pim_reg + 0x10, lower_32_bits(size)); xgene_pcie_writel(port, pim_reg + 0x14, upper_32_bits(size)); } /* * X-Gene PCIe support maximum 3 inbound memory regions * This function helps to select a region based on size of region */ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size) { if ((size > 4) && (size < SZ_16M) && !(*ib_reg_mask & (1 << 1))) { *ib_reg_mask |= (1 << 1); return 1; } if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) { *ib_reg_mask |= (1 << 0); return 0; } if ((size > SZ_1M) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 2))) { *ib_reg_mask |= (1 << 2); return 2; } return -EINVAL; } static void xgene_pcie_setup_ib_reg(struct xgene_pcie *port, struct of_pci_range *range, u8 *ib_reg_mask) { void __iomem *cfg_base = port->cfg_base; struct device *dev = port->dev; void __iomem *bar_addr; u32 pim_reg; u64 cpu_addr = range->cpu_addr; u64 pci_addr = range->pci_addr; u64 size = range->size; u64 mask = ~(size - 1) | EN_REG; u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64; u32 bar_low; int region; region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size); if (region < 0) { dev_warn(dev, "invalid pcie dma-range config\n"); return; } if (range->flags & IORESOURCE_PREFETCH) flags |= PCI_BASE_ADDRESS_MEM_PREFETCH; bar_low = pcie_bar_low_val((u32)cpu_addr, flags); switch (region) { case 0: xgene_pcie_set_ib_mask(port, BRIDGE_CFG_4, flags, size); bar_addr = cfg_base + PCI_BASE_ADDRESS_0; writel(bar_low, bar_addr); writel(upper_32_bits(cpu_addr), bar_addr + 0x4); pim_reg = PIM1_1L; break; case 1: xgene_pcie_writel(port, IBAR2, bar_low); xgene_pcie_writel(port, IR2MSK, lower_32_bits(mask)); pim_reg = PIM2_1L; break; case 2: xgene_pcie_writel(port, IBAR3L, bar_low); xgene_pcie_writel(port, IBAR3L + 0x4, upper_32_bits(cpu_addr)); xgene_pcie_writel(port, IR3MSKL, lower_32_bits(mask)); xgene_pcie_writel(port, IR3MSKL + 0x4, upper_32_bits(mask)); pim_reg = PIM3_1L; break; } xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1)); } static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie *port) { struct device_node *np = port->node; struct of_pci_range range; struct of_pci_range_parser parser; struct device *dev = port->dev; u8 ib_reg_mask = 0; if (of_pci_dma_range_parser_init(&parser, np)) { dev_err(dev, "missing dma-ranges property\n"); return -EINVAL; } /* Get the dma-ranges from DT */ for_each_of_pci_range(&parser, &range) { u64 end = range.cpu_addr + range.size - 1; dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", range.flags, range.cpu_addr, end, range.pci_addr); xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask); } return 0; } /* clear BAR configuration which was done by firmware */ static void xgene_pcie_clear_config(struct xgene_pcie *port) { int i; for (i = PIM1_1L; i <= CFGCTL; i += 4) xgene_pcie_writel(port, i, 0); } static int xgene_pcie_setup(struct xgene_pcie *port) { struct device *dev = port->dev; u32 val, lanes = 0, speed = 0; int ret; xgene_pcie_clear_config(port); /* setup the vendor and device IDs correctly */ val = (XGENE_PCIE_DEVICEID << 16) | PCI_VENDOR_ID_AMCC; xgene_pcie_writel(port, BRIDGE_CFG_0, val); ret = xgene_pcie_map_ranges(port); if (ret) return ret; ret = xgene_pcie_parse_map_dma_ranges(port); if (ret) return ret; xgene_pcie_linkup(port, &lanes, &speed); if (!port->link_up) dev_info(dev, "(rc) link down\n"); else dev_info(dev, "(rc) x%d gen-%d link up\n", lanes, speed + 1); return 0; } static struct pci_ops xgene_pcie_ops = { .map_bus = xgene_pcie_map_bus, .read = xgene_pcie_config_read32, .write = pci_generic_config_write32, }; static int xgene_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *dn = dev->of_node; struct xgene_pcie *port; struct pci_host_bridge *bridge; int ret; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); if (!bridge) return -ENOMEM; port = pci_host_bridge_priv(bridge); port->node = of_node_get(dn); port->dev = dev; port->version = XGENE_PCIE_IP_VER_UNKN; if (of_device_is_compatible(port->node, "apm,xgene-pcie")) port->version = XGENE_PCIE_IP_VER_1; ret = xgene_pcie_map_reg(port, pdev); if (ret) return ret; ret = xgene_pcie_init_port(port); if (ret) return ret; ret = xgene_pcie_setup(port); if (ret) return ret; bridge->sysdata = port; bridge->ops = &xgene_pcie_ops; return pci_host_probe(bridge); } static const struct of_device_id xgene_pcie_match_table[] = { {.compatible = "apm,xgene-pcie",}, {}, }; static struct platform_driver xgene_pcie_driver = { .driver = { .name = "xgene-pcie", .of_match_table = xgene_pcie_match_table, .suppress_bind_attrs = true, }, .probe = xgene_pcie_probe, }; builtin_platform_driver(xgene_pcie_driver); #endif
linux-master
drivers/pci/controller/pci-xgene.c
// SPDX-License-Identifier: GPL-2.0 /* * Volume Management Device driver * Copyright (c) 2015, Intel Corporation. */ #include <linux/device.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/pci.h> #include <linux/pci-acpi.h> #include <linux/pci-ecam.h> #include <linux/srcu.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <asm/irqdomain.h> #define VMD_CFGBAR 0 #define VMD_MEMBAR1 2 #define VMD_MEMBAR2 4 #define PCI_REG_VMCAP 0x40 #define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1) #define PCI_REG_VMCONFIG 0x44 #define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3) #define VMCONFIG_MSI_REMAP 0x2 #define PCI_REG_VMLOCK 0x70 #define MB2_SHADOW_EN(vmlock) (vmlock & 0x2) #define MB2_SHADOW_OFFSET 0x2000 #define MB2_SHADOW_SIZE 16 enum vmd_features { /* * Device may contain registers which hint the physical location of the * membars, in order to allow proper address translation during * resource assignment to enable guest virtualization */ VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0), /* * Device may provide root port configuration information which limits * bus numbering */ VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1), /* * Device contains physical location shadow registers in * vendor-specific capability space */ VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP = (1 << 2), /* * Device may use MSI-X vector 0 for software triggering and will not * be used for MSI remapping */ VMD_FEAT_OFFSET_FIRST_VECTOR = (1 << 3), /* * Device can bypass remapping MSI-X transactions into its MSI-X table, * avoiding the requirement of a VMD MSI domain for child device * interrupt handling. */ VMD_FEAT_CAN_BYPASS_MSI_REMAP = (1 << 4), /* * Enable ASPM on the PCIE root ports and set the default LTR of the * storage devices on platforms where these values are not configured by * BIOS. This is needed for laptops, which require these settings for * proper power management of the SoC. */ VMD_FEAT_BIOS_PM_QUIRK = (1 << 5), }; #define VMD_BIOS_PM_QUIRK_LTR 0x1003 /* 3145728 ns */ #define VMD_FEATS_CLIENT (VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | \ VMD_FEAT_HAS_BUS_RESTRICTIONS | \ VMD_FEAT_OFFSET_FIRST_VECTOR | \ VMD_FEAT_BIOS_PM_QUIRK) static DEFINE_IDA(vmd_instance_ida); /* * Lock for manipulating VMD IRQ lists. */ static DEFINE_RAW_SPINLOCK(list_lock); /** * struct vmd_irq - private data to map driver IRQ to the VMD shared vector * @node: list item for parent traversal. * @irq: back pointer to parent. * @enabled: true if driver enabled IRQ * @virq: the virtual IRQ value provided to the requesting driver. * * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to * a VMD IRQ using this structure. */ struct vmd_irq { struct list_head node; struct vmd_irq_list *irq; bool enabled; unsigned int virq; }; /** * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector * @irq_list: the list of irq's the VMD one demuxes to. * @srcu: SRCU struct for local synchronization. * @count: number of child IRQs assigned to this vector; used to track * sharing. * @virq: The underlying VMD Linux interrupt number */ struct vmd_irq_list { struct list_head irq_list; struct srcu_struct srcu; unsigned int count; unsigned int virq; }; struct vmd_dev { struct pci_dev *dev; spinlock_t cfg_lock; void __iomem *cfgbar; int msix_count; struct vmd_irq_list *irqs; struct pci_sysdata sysdata; struct resource resources[3]; struct irq_domain *irq_domain; struct pci_bus *bus; u8 busn_start; u8 first_vec; char *name; int instance; }; static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus) { return container_of(bus->sysdata, struct vmd_dev, sysdata); } static inline unsigned int index_from_irqs(struct vmd_dev *vmd, struct vmd_irq_list *irqs) { return irqs - vmd->irqs; } /* * Drivers managing a device in a VMD domain allocate their own IRQs as before, * but the MSI entry for the hardware it's driving will be programmed with a * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its * domain into one of its own, and the VMD driver de-muxes these for the * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations * and irq_chip to set this up. */ static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct vmd_irq *vmdirq = data->chip_data; struct vmd_irq_list *irq = vmdirq->irq; struct vmd_dev *vmd = irq_data_get_irq_handler_data(data); memset(msg, 0, sizeof(*msg)); msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH; msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW; msg->arch_addr_lo.destid_0_7 = index_from_irqs(vmd, irq); } /* * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops. */ static void vmd_irq_enable(struct irq_data *data) { struct vmd_irq *vmdirq = data->chip_data; unsigned long flags; raw_spin_lock_irqsave(&list_lock, flags); WARN_ON(vmdirq->enabled); list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); vmdirq->enabled = true; raw_spin_unlock_irqrestore(&list_lock, flags); data->chip->irq_unmask(data); } static void vmd_irq_disable(struct irq_data *data) { struct vmd_irq *vmdirq = data->chip_data; unsigned long flags; data->chip->irq_mask(data); raw_spin_lock_irqsave(&list_lock, flags); if (vmdirq->enabled) { list_del_rcu(&vmdirq->node); vmdirq->enabled = false; } raw_spin_unlock_irqrestore(&list_lock, flags); } /* * XXX: Stubbed until we develop acceptable way to not create conflicts with * other devices sharing the same vector. */ static int vmd_irq_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force) { return -EINVAL; } static struct irq_chip vmd_msi_controller = { .name = "VMD-MSI", .irq_enable = vmd_irq_enable, .irq_disable = vmd_irq_disable, .irq_compose_msi_msg = vmd_compose_msi_msg, .irq_set_affinity = vmd_irq_set_affinity, }; static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info, msi_alloc_info_t *arg) { return 0; } /* * XXX: We can be even smarter selecting the best IRQ once we solve the * affinity problem. */ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc) { unsigned long flags; int i, best; if (vmd->msix_count == 1 + vmd->first_vec) return &vmd->irqs[vmd->first_vec]; /* * White list for fast-interrupt handlers. All others will share the * "slow" interrupt vector. */ switch (msi_desc_to_pci_dev(desc)->class) { case PCI_CLASS_STORAGE_EXPRESS: break; default: return &vmd->irqs[vmd->first_vec]; } raw_spin_lock_irqsave(&list_lock, flags); best = vmd->first_vec + 1; for (i = best; i < vmd->msix_count; i++) if (vmd->irqs[i].count < vmd->irqs[best].count) best = i; vmd->irqs[best].count++; raw_spin_unlock_irqrestore(&list_lock, flags); return &vmd->irqs[best]; } static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, unsigned int virq, irq_hw_number_t hwirq, msi_alloc_info_t *arg) { struct msi_desc *desc = arg->desc; struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus); struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); if (!vmdirq) return -ENOMEM; INIT_LIST_HEAD(&vmdirq->node); vmdirq->irq = vmd_next_irq(vmd, desc); vmdirq->virq = virq; irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq, handle_untracked_irq, vmd, NULL); return 0; } static void vmd_msi_free(struct irq_domain *domain, struct msi_domain_info *info, unsigned int virq) { struct vmd_irq *vmdirq = irq_get_chip_data(virq); unsigned long flags; synchronize_srcu(&vmdirq->irq->srcu); /* XXX: Potential optimization to rebalance */ raw_spin_lock_irqsave(&list_lock, flags); vmdirq->irq->count--; raw_spin_unlock_irqrestore(&list_lock, flags); kfree(vmdirq); } static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *arg) { struct pci_dev *pdev = to_pci_dev(dev); struct vmd_dev *vmd = vmd_from_bus(pdev->bus); if (nvec > vmd->msix_count) return vmd->msix_count; memset(arg, 0, sizeof(*arg)); return 0; } static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) { arg->desc = desc; } static struct msi_domain_ops vmd_msi_domain_ops = { .get_hwirq = vmd_get_hwirq, .msi_init = vmd_msi_init, .msi_free = vmd_msi_free, .msi_prepare = vmd_msi_prepare, .set_desc = vmd_set_desc, }; static struct msi_domain_info vmd_msi_domain_info = { .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_PCI_MSIX, .ops = &vmd_msi_domain_ops, .chip = &vmd_msi_controller, }; static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable) { u16 reg; pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, &reg); reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) : (reg | VMCONFIG_MSI_REMAP); pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg); } static int vmd_create_irq_domain(struct vmd_dev *vmd) { struct fwnode_handle *fn; fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain); if (!fn) return -ENODEV; vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, NULL); if (!vmd->irq_domain) { irq_domain_free_fwnode(fn); return -ENODEV; } return 0; } static void vmd_remove_irq_domain(struct vmd_dev *vmd) { /* * Some production BIOS won't enable remapping between soft reboots. * Ensure remapping is restored before unloading the driver. */ if (!vmd->msix_count) vmd_set_msi_remapping(vmd, true); if (vmd->irq_domain) { struct fwnode_handle *fn = vmd->irq_domain->fwnode; irq_domain_remove(vmd->irq_domain); irq_domain_free_fwnode(fn); } } static void __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus, unsigned int devfn, int reg, int len) { unsigned int busnr_ecam = bus->number - vmd->busn_start; u32 offset = PCIE_ECAM_OFFSET(busnr_ecam, devfn, reg); if (offset + len >= resource_size(&vmd->dev->resource[VMD_CFGBAR])) return NULL; return vmd->cfgbar + offset; } /* * CPU may deadlock if config space is not serialized on some versions of this * hardware, so all config space access is done under a spinlock. */ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, int len, u32 *value) { struct vmd_dev *vmd = vmd_from_bus(bus); void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); unsigned long flags; int ret = 0; if (!addr) return -EFAULT; spin_lock_irqsave(&vmd->cfg_lock, flags); switch (len) { case 1: *value = readb(addr); break; case 2: *value = readw(addr); break; case 4: *value = readl(addr); break; default: ret = -EINVAL; break; } spin_unlock_irqrestore(&vmd->cfg_lock, flags); return ret; } /* * VMD h/w converts non-posted config writes to posted memory writes. The * read-back in this function forces the completion so it returns only after * the config space was written, as expected. */ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, int len, u32 value) { struct vmd_dev *vmd = vmd_from_bus(bus); void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); unsigned long flags; int ret = 0; if (!addr) return -EFAULT; spin_lock_irqsave(&vmd->cfg_lock, flags); switch (len) { case 1: writeb(value, addr); readb(addr); break; case 2: writew(value, addr); readw(addr); break; case 4: writel(value, addr); readl(addr); break; default: ret = -EINVAL; break; } spin_unlock_irqrestore(&vmd->cfg_lock, flags); return ret; } static struct pci_ops vmd_ops = { .read = vmd_pci_read, .write = vmd_pci_write, }; #ifdef CONFIG_ACPI static struct acpi_device *vmd_acpi_find_companion(struct pci_dev *pci_dev) { struct pci_host_bridge *bridge; u32 busnr, addr; if (pci_dev->bus->ops != &vmd_ops) return NULL; bridge = pci_find_host_bridge(pci_dev->bus); busnr = pci_dev->bus->number - bridge->bus->number; /* * The address computation below is only applicable to relative bus * numbers below 32. */ if (busnr > 31) return NULL; addr = (busnr << 24) | ((u32)pci_dev->devfn << 16) | 0x8000FFFFU; dev_dbg(&pci_dev->dev, "Looking for ACPI companion (address 0x%x)\n", addr); return acpi_find_child_device(ACPI_COMPANION(bridge->dev.parent), addr, false); } static bool hook_installed; static void vmd_acpi_begin(void) { if (pci_acpi_set_companion_lookup_hook(vmd_acpi_find_companion)) return; hook_installed = true; } static void vmd_acpi_end(void) { if (!hook_installed) return; pci_acpi_clear_companion_lookup_hook(); hook_installed = false; } #else static inline void vmd_acpi_begin(void) { } static inline void vmd_acpi_end(void) { } #endif /* CONFIG_ACPI */ static void vmd_domain_reset(struct vmd_dev *vmd) { u16 bus, max_buses = resource_size(&vmd->resources[0]); u8 dev, functions, fn, hdr_type; char __iomem *base; for (bus = 0; bus < max_buses; bus++) { for (dev = 0; dev < 32; dev++) { base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus, PCI_DEVFN(dev, 0), 0); hdr_type = readb(base + PCI_HEADER_TYPE) & PCI_HEADER_TYPE_MASK; functions = (hdr_type & 0x80) ? 8 : 1; for (fn = 0; fn < functions; fn++) { base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus, PCI_DEVFN(dev, fn), 0); hdr_type = readb(base + PCI_HEADER_TYPE) & PCI_HEADER_TYPE_MASK; if (hdr_type != PCI_HEADER_TYPE_BRIDGE || (readw(base + PCI_CLASS_DEVICE) != PCI_CLASS_BRIDGE_PCI)) continue; /* * Temporarily disable the I/O range before updating * PCI_IO_BASE. */ writel(0x0000ffff, base + PCI_IO_BASE_UPPER16); /* Update lower 16 bits of I/O base/limit */ writew(0x00f0, base + PCI_IO_BASE); /* Update upper 16 bits of I/O base/limit */ writel(0, base + PCI_IO_BASE_UPPER16); /* MMIO Base/Limit */ writel(0x0000fff0, base + PCI_MEMORY_BASE); /* Prefetchable MMIO Base/Limit */ writel(0, base + PCI_PREF_LIMIT_UPPER32); writel(0x0000fff0, base + PCI_PREF_MEMORY_BASE); writel(0xffffffff, base + PCI_PREF_BASE_UPPER32); } } } } static void vmd_attach_resources(struct vmd_dev *vmd) { vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1]; vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2]; } static void vmd_detach_resources(struct vmd_dev *vmd) { vmd->dev->resource[VMD_MEMBAR1].child = NULL; vmd->dev->resource[VMD_MEMBAR2].child = NULL; } /* * VMD domains start at 0x10000 to not clash with ACPI _SEG domains. * Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower * 16 bits are the PCI Segment Group (domain) number. Other bits are * currently reserved. */ static int vmd_find_free_domain(void) { int domain = 0xffff; struct pci_bus *bus = NULL; while ((bus = pci_find_next_bus(bus)) != NULL) domain = max_t(int, domain, pci_domain_nr(bus)); return domain + 1; } static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint, resource_size_t *offset1, resource_size_t *offset2) { struct pci_dev *dev = vmd->dev; u64 phys1, phys2; if (native_hint) { u32 vmlock; int ret; ret = pci_read_config_dword(dev, PCI_REG_VMLOCK, &vmlock); if (ret || PCI_POSSIBLE_ERROR(vmlock)) return -ENODEV; if (MB2_SHADOW_EN(vmlock)) { void __iomem *membar2; membar2 = pci_iomap(dev, VMD_MEMBAR2, 0); if (!membar2) return -ENOMEM; phys1 = readq(membar2 + MB2_SHADOW_OFFSET); phys2 = readq(membar2 + MB2_SHADOW_OFFSET + 8); pci_iounmap(dev, membar2); } else return 0; } else { /* Hypervisor-Emulated Vendor-Specific Capability */ int pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); u32 reg, regu; pci_read_config_dword(dev, pos + 4, &reg); /* "SHDW" */ if (pos && reg == 0x53484457) { pci_read_config_dword(dev, pos + 8, &reg); pci_read_config_dword(dev, pos + 12, &regu); phys1 = (u64) regu << 32 | reg; pci_read_config_dword(dev, pos + 16, &reg); pci_read_config_dword(dev, pos + 20, &regu); phys2 = (u64) regu << 32 | reg; } else return 0; } *offset1 = dev->resource[VMD_MEMBAR1].start - (phys1 & PCI_BASE_ADDRESS_MEM_MASK); *offset2 = dev->resource[VMD_MEMBAR2].start - (phys2 & PCI_BASE_ADDRESS_MEM_MASK); return 0; } static int vmd_get_bus_number_start(struct vmd_dev *vmd) { struct pci_dev *dev = vmd->dev; u16 reg; pci_read_config_word(dev, PCI_REG_VMCAP, &reg); if (BUS_RESTRICT_CAP(reg)) { pci_read_config_word(dev, PCI_REG_VMCONFIG, &reg); switch (BUS_RESTRICT_CFG(reg)) { case 0: vmd->busn_start = 0; break; case 1: vmd->busn_start = 128; break; case 2: vmd->busn_start = 224; break; default: pci_err(dev, "Unknown Bus Offset Setting (%d)\n", BUS_RESTRICT_CFG(reg)); return -ENODEV; } } return 0; } static irqreturn_t vmd_irq(int irq, void *data) { struct vmd_irq_list *irqs = data; struct vmd_irq *vmdirq; int idx; idx = srcu_read_lock(&irqs->srcu); list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node) generic_handle_irq(vmdirq->virq); srcu_read_unlock(&irqs->srcu, idx); return IRQ_HANDLED; } static int vmd_alloc_irqs(struct vmd_dev *vmd) { struct pci_dev *dev = vmd->dev; int i, err; vmd->msix_count = pci_msix_vec_count(dev); if (vmd->msix_count < 0) return -ENODEV; vmd->msix_count = pci_alloc_irq_vectors(dev, vmd->first_vec + 1, vmd->msix_count, PCI_IRQ_MSIX); if (vmd->msix_count < 0) return vmd->msix_count; vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs), GFP_KERNEL); if (!vmd->irqs) return -ENOMEM; for (i = 0; i < vmd->msix_count; i++) { err = init_srcu_struct(&vmd->irqs[i].srcu); if (err) return err; INIT_LIST_HEAD(&vmd->irqs[i].irq_list); vmd->irqs[i].virq = pci_irq_vector(dev, i); err = devm_request_irq(&dev->dev, vmd->irqs[i].virq, vmd_irq, IRQF_NO_THREAD, vmd->name, &vmd->irqs[i]); if (err) return err; } return 0; } /* * Since VMD is an aperture to regular PCIe root ports, only allow it to * control features that the OS is allowed to control on the physical PCI bus. */ static void vmd_copy_host_bridge_flags(struct pci_host_bridge *root_bridge, struct pci_host_bridge *vmd_bridge) { vmd_bridge->native_pcie_hotplug = root_bridge->native_pcie_hotplug; vmd_bridge->native_shpc_hotplug = root_bridge->native_shpc_hotplug; vmd_bridge->native_aer = root_bridge->native_aer; vmd_bridge->native_pme = root_bridge->native_pme; vmd_bridge->native_ltr = root_bridge->native_ltr; vmd_bridge->native_dpc = root_bridge->native_dpc; } /* * Enable ASPM and LTR settings on devices that aren't configured by BIOS. */ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata) { unsigned long features = *(unsigned long *)userdata; u16 ltr = VMD_BIOS_PM_QUIRK_LTR; u32 ltr_reg; int pos; if (!(features & VMD_FEAT_BIOS_PM_QUIRK)) return 0; pci_enable_link_state(pdev, PCIE_LINK_STATE_ALL); pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR); if (!pos) return 0; /* * Skip if the max snoop LTR is non-zero, indicating BIOS has set it * so the LTR quirk is not needed. */ pci_read_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, &ltr_reg); if (!!(ltr_reg & (PCI_LTR_VALUE_MASK | PCI_LTR_SCALE_MASK))) return 0; /* * Set the default values to the maximum required by the platform to * allow the deepest power management savings. Write as a DWORD where * the lower word is the max snoop latency and the upper word is the * max non-snoop latency. */ ltr_reg = (ltr << 16) | ltr; pci_write_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, ltr_reg); pci_info(pdev, "VMD: Default LTR value set by driver\n"); return 0; } static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) { struct pci_sysdata *sd = &vmd->sysdata; struct resource *res; u32 upper_bits; unsigned long flags; LIST_HEAD(resources); resource_size_t offset[2] = {0}; resource_size_t membar2_offset = 0x2000; struct pci_bus *child; struct pci_dev *dev; int ret; /* * Shadow registers may exist in certain VMD device ids which allow * guests to correctly assign host physical addresses to the root ports * and child devices. These registers will either return the host value * or 0, depending on an enable bit in the VMD device. */ if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) { membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE; ret = vmd_get_phys_offsets(vmd, true, &offset[0], &offset[1]); if (ret) return ret; } else if (features & VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP) { ret = vmd_get_phys_offsets(vmd, false, &offset[0], &offset[1]); if (ret) return ret; } /* * Certain VMD devices may have a root port configuration option which * limits the bus range to between 0-127, 128-255, or 224-255 */ if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) { ret = vmd_get_bus_number_start(vmd); if (ret) return ret; } res = &vmd->dev->resource[VMD_CFGBAR]; vmd->resources[0] = (struct resource) { .name = "VMD CFGBAR", .start = vmd->busn_start, .end = vmd->busn_start + (resource_size(res) >> 20) - 1, .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED, }; /* * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can * put 32-bit resources in the window. * * There's no hardware reason why a 64-bit window *couldn't* * contain a 32-bit resource, but pbus_size_mem() computes the * bridge window size assuming a 64-bit window will contain no * 32-bit resources. __pci_assign_resource() enforces that * artificial restriction to make sure everything will fit. * * The only way we could use a 64-bit non-prefetchable MEMBAR is * if its address is <4GB so that we can convert it to a 32-bit * resource. To be visible to the host OS, all VMD endpoints must * be initially configured by platform BIOS, which includes setting * up these resources. We can assume the device is configured * according to the platform needs. */ res = &vmd->dev->resource[VMD_MEMBAR1]; upper_bits = upper_32_bits(res->end); flags = res->flags & ~IORESOURCE_SIZEALIGN; if (!upper_bits) flags &= ~IORESOURCE_MEM_64; vmd->resources[1] = (struct resource) { .name = "VMD MEMBAR1", .start = res->start, .end = res->end, .flags = flags, .parent = res, }; res = &vmd->dev->resource[VMD_MEMBAR2]; upper_bits = upper_32_bits(res->end); flags = res->flags & ~IORESOURCE_SIZEALIGN; if (!upper_bits) flags &= ~IORESOURCE_MEM_64; vmd->resources[2] = (struct resource) { .name = "VMD MEMBAR2", .start = res->start + membar2_offset, .end = res->end, .flags = flags, .parent = res, }; sd->vmd_dev = vmd->dev; sd->domain = vmd_find_free_domain(); if (sd->domain < 0) return sd->domain; sd->node = pcibus_to_node(vmd->dev->bus); /* * Currently MSI remapping must be enabled in guest passthrough mode * due to some missing interrupt remapping plumbing. This is probably * acceptable because the guest is usually CPU-limited and MSI * remapping doesn't become a performance bottleneck. */ if (!(features & VMD_FEAT_CAN_BYPASS_MSI_REMAP) || offset[0] || offset[1]) { ret = vmd_alloc_irqs(vmd); if (ret) return ret; vmd_set_msi_remapping(vmd, true); ret = vmd_create_irq_domain(vmd); if (ret) return ret; /* * Override the IRQ domain bus token so the domain can be * distinguished from a regular PCI/MSI domain. */ irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI); } else { vmd_set_msi_remapping(vmd, false); } pci_add_resource(&resources, &vmd->resources[0]); pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]); vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start, &vmd_ops, sd, &resources); if (!vmd->bus) { pci_free_resource_list(&resources); vmd_remove_irq_domain(vmd); return -ENODEV; } vmd_copy_host_bridge_flags(pci_find_host_bridge(vmd->dev->bus), to_pci_host_bridge(vmd->bus->bridge)); vmd_attach_resources(vmd); if (vmd->irq_domain) dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); else dev_set_msi_domain(&vmd->bus->dev, dev_get_msi_domain(&vmd->dev->dev)); vmd_acpi_begin(); pci_scan_child_bus(vmd->bus); vmd_domain_reset(vmd); /* When Intel VMD is enabled, the OS does not discover the Root Ports * owned by Intel VMD within the MMCFG space. pci_reset_bus() applies * a reset to the parent of the PCI device supplied as argument. This * is why we pass a child device, so the reset can be triggered at * the Intel bridge level and propagated to all the children in the * hierarchy. */ list_for_each_entry(child, &vmd->bus->children, node) { if (!list_empty(&child->devices)) { dev = list_first_entry(&child->devices, struct pci_dev, bus_list); ret = pci_reset_bus(dev); if (ret) pci_warn(dev, "can't reset device: %d\n", ret); break; } } pci_assign_unassigned_bus_resources(vmd->bus); pci_walk_bus(vmd->bus, vmd_pm_enable_quirk, &features); /* * VMD root buses are virtual and don't return true on pci_is_pcie() * and will fail pcie_bus_configure_settings() early. It can instead be * run on each of the real root ports. */ list_for_each_entry(child, &vmd->bus->children, node) pcie_bus_configure_settings(child); pci_bus_add_devices(vmd->bus); vmd_acpi_end(); WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj, "domain"), "Can't create symlink to domain\n"); return 0; } static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) { unsigned long features = (unsigned long) id->driver_data; struct vmd_dev *vmd; int err; if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) return -ENOMEM; vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL); if (!vmd) return -ENOMEM; vmd->dev = dev; vmd->instance = ida_simple_get(&vmd_instance_ida, 0, 0, GFP_KERNEL); if (vmd->instance < 0) return vmd->instance; vmd->name = devm_kasprintf(&dev->dev, GFP_KERNEL, "vmd%d", vmd->instance); if (!vmd->name) { err = -ENOMEM; goto out_release_instance; } err = pcim_enable_device(dev); if (err < 0) goto out_release_instance; vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0); if (!vmd->cfgbar) { err = -ENOMEM; goto out_release_instance; } pci_set_master(dev); if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) && dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) { err = -ENODEV; goto out_release_instance; } if (features & VMD_FEAT_OFFSET_FIRST_VECTOR) vmd->first_vec = 1; spin_lock_init(&vmd->cfg_lock); pci_set_drvdata(dev, vmd); err = vmd_enable_domain(vmd, features); if (err) goto out_release_instance; dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n", vmd->sysdata.domain); return 0; out_release_instance: ida_simple_remove(&vmd_instance_ida, vmd->instance); return err; } static void vmd_cleanup_srcu(struct vmd_dev *vmd) { int i; for (i = 0; i < vmd->msix_count; i++) cleanup_srcu_struct(&vmd->irqs[i].srcu); } static void vmd_remove(struct pci_dev *dev) { struct vmd_dev *vmd = pci_get_drvdata(dev); sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); pci_stop_root_bus(vmd->bus); pci_remove_root_bus(vmd->bus); vmd_cleanup_srcu(vmd); vmd_detach_resources(vmd); vmd_remove_irq_domain(vmd); ida_simple_remove(&vmd_instance_ida, vmd->instance); } static void vmd_shutdown(struct pci_dev *dev) { struct vmd_dev *vmd = pci_get_drvdata(dev); vmd_remove_irq_domain(vmd); } #ifdef CONFIG_PM_SLEEP static int vmd_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct vmd_dev *vmd = pci_get_drvdata(pdev); int i; for (i = 0; i < vmd->msix_count; i++) devm_free_irq(dev, vmd->irqs[i].virq, &vmd->irqs[i]); return 0; } static int vmd_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct vmd_dev *vmd = pci_get_drvdata(pdev); int err, i; if (vmd->irq_domain) vmd_set_msi_remapping(vmd, true); else vmd_set_msi_remapping(vmd, false); for (i = 0; i < vmd->msix_count; i++) { err = devm_request_irq(dev, vmd->irqs[i].virq, vmd_irq, IRQF_NO_THREAD, vmd->name, &vmd->irqs[i]); if (err) return err; } return 0; } #endif static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume); static const struct pci_device_id vmd_ids[] = { {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_201D), .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP,}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0), .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW | VMD_FEAT_HAS_BUS_RESTRICTIONS | VMD_FEAT_CAN_BYPASS_MSI_REMAP,}, {PCI_VDEVICE(INTEL, 0x467f), .driver_data = VMD_FEATS_CLIENT,}, {PCI_VDEVICE(INTEL, 0x4c3d), .driver_data = VMD_FEATS_CLIENT,}, {PCI_VDEVICE(INTEL, 0xa77f), .driver_data = VMD_FEATS_CLIENT,}, {PCI_VDEVICE(INTEL, 0x7d0b), .driver_data = VMD_FEATS_CLIENT,}, {PCI_VDEVICE(INTEL, 0xad0b), .driver_data = VMD_FEATS_CLIENT,}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B), .driver_data = VMD_FEATS_CLIENT,}, {0,} }; MODULE_DEVICE_TABLE(pci, vmd_ids); static struct pci_driver vmd_drv = { .name = "vmd", .id_table = vmd_ids, .probe = vmd_probe, .remove = vmd_remove, .shutdown = vmd_shutdown, .driver = { .pm = &vmd_dev_pm_ops, }, }; module_pci_driver(vmd_drv); MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL v2"); MODULE_VERSION("0.6");
linux-master
drivers/pci/controller/vmd.c
// SPDX-License-Identifier: GPL-2.0+ /* * Rockchip AXI PCIe host controller driver * * Copyright (c) 2016 Rockchip, Inc. * * Author: Shawn Lin <[email protected]> * Wenrui Li <[email protected]> * * Bits taken from Synopsys DesignWare Host controller driver and * ARM PCI Host generic driver. */ #include <linux/bitrev.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_pci.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <linux/regmap.h> #include "../pci.h" #include "pcie-rockchip.h" static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip) { u32 status; status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE); rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); } static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip) { u32 status; status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16; rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); } static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip) { u32 val; /* Update Tx credit maximum update interval */ val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1); val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK; val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */ rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1); } static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip, struct pci_bus *bus, int dev) { /* * Access only one slot on each root port. * Do not read more than one device on the bus directly attached * to RC's downstream side. */ if (pci_is_root_bus(bus) || pci_is_root_bus(bus->parent)) return dev == 0; return 1; } static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip) { u32 val; u8 map; if (rockchip->legacy_phy) return GENMASK(MAX_LANE_NUM - 1, 0); val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP); map = val & PCIE_CORE_LANE_MAP_MASK; /* The link may be using a reverse-indexed mapping. */ if (val & PCIE_CORE_LANE_MAP_REVERSE) map = bitrev8(map) >> 4; return map; } static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip, int where, int size, u32 *val) { void __iomem *addr; addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where; if (!IS_ALIGNED((uintptr_t)addr, size)) { *val = 0; return PCIBIOS_BAD_REGISTER_NUMBER; } if (size == 4) { *val = readl(addr); } else if (size == 2) { *val = readw(addr); } else if (size == 1) { *val = readb(addr); } else { *val = 0; return PCIBIOS_BAD_REGISTER_NUMBER; } return PCIBIOS_SUCCESSFUL; } static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip, int where, int size, u32 val) { u32 mask, tmp, offset; void __iomem *addr; offset = where & ~0x3; addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset; if (size == 4) { writel(val, addr); return PCIBIOS_SUCCESSFUL; } mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); /* * N.B. This read/modify/write isn't safe in general because it can * corrupt RW1C bits in adjacent registers. But the hardware * doesn't support smaller writes. */ tmp = readl(addr) & mask; tmp |= val << ((where & 0x3) * 8); writel(tmp, addr); return PCIBIOS_SUCCESSFUL; } static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip, struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { void __iomem *addr; addr = rockchip->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where); if (!IS_ALIGNED((uintptr_t)addr, size)) { *val = 0; return PCIBIOS_BAD_REGISTER_NUMBER; } if (pci_is_root_bus(bus->parent)) rockchip_pcie_cfg_configuration_accesses(rockchip, AXI_WRAPPER_TYPE0_CFG); else rockchip_pcie_cfg_configuration_accesses(rockchip, AXI_WRAPPER_TYPE1_CFG); if (size == 4) { *val = readl(addr); } else if (size == 2) { *val = readw(addr); } else if (size == 1) { *val = readb(addr); } else { *val = 0; return PCIBIOS_BAD_REGISTER_NUMBER; } return PCIBIOS_SUCCESSFUL; } static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip, struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { void __iomem *addr; addr = rockchip->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where); if (!IS_ALIGNED((uintptr_t)addr, size)) return PCIBIOS_BAD_REGISTER_NUMBER; if (pci_is_root_bus(bus->parent)) rockchip_pcie_cfg_configuration_accesses(rockchip, AXI_WRAPPER_TYPE0_CFG); else rockchip_pcie_cfg_configuration_accesses(rockchip, AXI_WRAPPER_TYPE1_CFG); if (size == 4) writel(val, addr); else if (size == 2) writew(val, addr); else if (size == 1) writeb(val, addr); else return PCIBIOS_BAD_REGISTER_NUMBER; return PCIBIOS_SUCCESSFUL; } static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { struct rockchip_pcie *rockchip = bus->sysdata; if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) return PCIBIOS_DEVICE_NOT_FOUND; if (pci_is_root_bus(bus)) return rockchip_pcie_rd_own_conf(rockchip, where, size, val); return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size, val); } static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { struct rockchip_pcie *rockchip = bus->sysdata; if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) return PCIBIOS_DEVICE_NOT_FOUND; if (pci_is_root_bus(bus)) return rockchip_pcie_wr_own_conf(rockchip, where, size, val); return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size, val); } static struct pci_ops rockchip_pcie_ops = { .read = rockchip_pcie_rd_conf, .write = rockchip_pcie_wr_conf, }; static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) { int curr; u32 status, scale, power; if (IS_ERR(rockchip->vpcie3v3)) return; /* * Set RC's captured slot power limit and scale if * vpcie3v3 available. The default values are both zero * which means the software should set these two according * to the actual power supply. */ curr = regulator_get_current_limit(rockchip->vpcie3v3); if (curr <= 0) return; scale = 3; /* 0.001x */ curr = curr / 1000; /* convert to mA */ power = (curr * 3300) / 1000; /* milliwatt */ while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) { if (!scale) { dev_warn(rockchip->dev, "invalid power supply\n"); return; } scale--; power = power / 10; } status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR); status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) | (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT); rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR); } /** * rockchip_pcie_host_init_port - Initialize hardware * @rockchip: PCIe port information */ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; int err, i = MAX_LANE_NUM; u32 status; gpiod_set_value_cansleep(rockchip->ep_gpio, 0); err = rockchip_pcie_init_port(rockchip); if (err) return err; /* Fix the transmitted FTS count desired to exit from L0s. */ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1); status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) | (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT); rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1); rockchip_pcie_set_power_limit(rockchip); /* Set RC's clock architecture as common clock */ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); status |= PCI_EXP_LNKSTA_SLC << 16; rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); /* Set RC's RCB to 128 */ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); status |= PCI_EXP_LNKCTL_RCB; rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); /* Enable Gen1 training */ rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE, PCIE_CLIENT_CONFIG); gpiod_set_value_cansleep(rockchip->ep_gpio, 1); /* 500ms timeout value should be enough for Gen1/2 training */ err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1, status, PCIE_LINK_UP(status), 20, 500 * USEC_PER_MSEC); if (err) { dev_err(dev, "PCIe link training gen1 timeout!\n"); goto err_power_off_phy; } if (rockchip->link_gen == 2) { /* * Enable retrain for gen2. This should be configured only after * gen1 finished. */ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); status |= PCI_EXP_LNKCTL_RL; rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL, status, PCIE_LINK_IS_GEN2(status), 20, 500 * USEC_PER_MSEC); if (err) dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n"); } /* Check the final link width from negotiated lane counter from MGMT */ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL); status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >> PCIE_CORE_PL_CONF_LANE_SHIFT); dev_dbg(dev, "current link width is x%d\n", status); /* Power off unused lane(s) */ rockchip->lanes_map = rockchip_pcie_lane_map(rockchip); for (i = 0; i < MAX_LANE_NUM; i++) { if (!(rockchip->lanes_map & BIT(i))) { dev_dbg(dev, "idling lane %d\n", i); phy_power_off(rockchip->phys[i]); } } rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID, PCIE_CORE_CONFIG_VENDOR); rockchip_pcie_write(rockchip, PCI_CLASS_BRIDGE_PCI_NORMAL << 8, PCIE_RC_CONFIG_RID_CCR); /* Clear THP cap's next cap pointer to remove L1 substate cap */ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP); status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK; rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP); /* Clear L0s from RC's link cap */ if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) { status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP); status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S; rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP); } status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR); status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK; status |= PCIE_RC_CONFIG_DCSR_MPS_256; rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR); return 0; err_power_off_phy: while (i--) phy_power_off(rockchip->phys[i]); i = MAX_LANE_NUM; while (i--) phy_exit(rockchip->phys[i]); return err; } static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg) { struct rockchip_pcie *rockchip = arg; struct device *dev = rockchip->dev; u32 reg; u32 sub_reg; reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); if (reg & PCIE_CLIENT_INT_LOCAL) { dev_dbg(dev, "local interrupt received\n"); sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS); if (sub_reg & PCIE_CORE_INT_PRFPE) dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n"); if (sub_reg & PCIE_CORE_INT_CRFPE) dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n"); if (sub_reg & PCIE_CORE_INT_RRPE) dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n"); if (sub_reg & PCIE_CORE_INT_PRFO) dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n"); if (sub_reg & PCIE_CORE_INT_CRFO) dev_dbg(dev, "overflow occurred in the completion receive FIFO\n"); if (sub_reg & PCIE_CORE_INT_RT) dev_dbg(dev, "replay timer timed out\n"); if (sub_reg & PCIE_CORE_INT_RTR) dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n"); if (sub_reg & PCIE_CORE_INT_PE) dev_dbg(dev, "phy error detected on receive side\n"); if (sub_reg & PCIE_CORE_INT_MTR) dev_dbg(dev, "malformed TLP received from the link\n"); if (sub_reg & PCIE_CORE_INT_UCR) dev_dbg(dev, "malformed TLP received from the link\n"); if (sub_reg & PCIE_CORE_INT_FCE) dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n"); if (sub_reg & PCIE_CORE_INT_CT) dev_dbg(dev, "a request timed out waiting for completion\n"); if (sub_reg & PCIE_CORE_INT_UTC) dev_dbg(dev, "unmapped TC error\n"); if (sub_reg & PCIE_CORE_INT_MMVC) dev_dbg(dev, "MSI mask register changes\n"); rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS); } else if (reg & PCIE_CLIENT_INT_PHY) { dev_dbg(dev, "phy link changes\n"); rockchip_pcie_update_txcredit_mui(rockchip); rockchip_pcie_clr_bw_int(rockchip); } rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL, PCIE_CLIENT_INT_STATUS); return IRQ_HANDLED; } static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg) { struct rockchip_pcie *rockchip = arg; struct device *dev = rockchip->dev; u32 reg; reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); if (reg & PCIE_CLIENT_INT_LEGACY_DONE) dev_dbg(dev, "legacy done interrupt received\n"); if (reg & PCIE_CLIENT_INT_MSG) dev_dbg(dev, "message done interrupt received\n"); if (reg & PCIE_CLIENT_INT_HOT_RST) dev_dbg(dev, "hot reset interrupt received\n"); if (reg & PCIE_CLIENT_INT_DPA) dev_dbg(dev, "dpa interrupt received\n"); if (reg & PCIE_CLIENT_INT_FATAL_ERR) dev_dbg(dev, "fatal error interrupt received\n"); if (reg & PCIE_CLIENT_INT_NFATAL_ERR) dev_dbg(dev, "no fatal error interrupt received\n"); if (reg & PCIE_CLIENT_INT_CORR_ERR) dev_dbg(dev, "correctable error interrupt received\n"); if (reg & PCIE_CLIENT_INT_PHY) dev_dbg(dev, "phy interrupt received\n"); rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_NFATAL_ERR | PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_PHY), PCIE_CLIENT_INT_STATUS); return IRQ_HANDLED; } static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc); struct device *dev = rockchip->dev; u32 reg; u32 hwirq; int ret; chained_irq_enter(chip, desc); reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT; while (reg) { hwirq = ffs(reg) - 1; reg &= ~BIT(hwirq); ret = generic_handle_domain_irq(rockchip->irq_domain, hwirq); if (ret) dev_err(dev, "unexpected IRQ, INT%d\n", hwirq); } chained_irq_exit(chip, desc); } static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip) { int irq, err; struct device *dev = rockchip->dev; struct platform_device *pdev = to_platform_device(dev); irq = platform_get_irq_byname(pdev, "sys"); if (irq < 0) return irq; err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler, IRQF_SHARED, "pcie-sys", rockchip); if (err) { dev_err(dev, "failed to request PCIe subsystem IRQ\n"); return err; } irq = platform_get_irq_byname(pdev, "legacy"); if (irq < 0) return irq; irq_set_chained_handler_and_data(irq, rockchip_pcie_legacy_int_handler, rockchip); irq = platform_get_irq_byname(pdev, "client"); if (irq < 0) return irq; err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler, IRQF_SHARED, "pcie-client", rockchip); if (err) { dev_err(dev, "failed to request PCIe client IRQ\n"); return err; } return 0; } /** * rockchip_pcie_parse_host_dt - Parse Device Tree * @rockchip: PCIe port information * * Return: '0' on success and error value on failure */ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; int err; err = rockchip_pcie_parse_dt(rockchip); if (err) return err; rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v"); if (IS_ERR(rockchip->vpcie12v)) { if (PTR_ERR(rockchip->vpcie12v) != -ENODEV) return PTR_ERR(rockchip->vpcie12v); dev_info(dev, "no vpcie12v regulator found\n"); } rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3"); if (IS_ERR(rockchip->vpcie3v3)) { if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV) return PTR_ERR(rockchip->vpcie3v3); dev_info(dev, "no vpcie3v3 regulator found\n"); } rockchip->vpcie1v8 = devm_regulator_get(dev, "vpcie1v8"); if (IS_ERR(rockchip->vpcie1v8)) return PTR_ERR(rockchip->vpcie1v8); rockchip->vpcie0v9 = devm_regulator_get(dev, "vpcie0v9"); if (IS_ERR(rockchip->vpcie0v9)) return PTR_ERR(rockchip->vpcie0v9); return 0; } static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; int err; if (!IS_ERR(rockchip->vpcie12v)) { err = regulator_enable(rockchip->vpcie12v); if (err) { dev_err(dev, "fail to enable vpcie12v regulator\n"); goto err_out; } } if (!IS_ERR(rockchip->vpcie3v3)) { err = regulator_enable(rockchip->vpcie3v3); if (err) { dev_err(dev, "fail to enable vpcie3v3 regulator\n"); goto err_disable_12v; } } err = regulator_enable(rockchip->vpcie1v8); if (err) { dev_err(dev, "fail to enable vpcie1v8 regulator\n"); goto err_disable_3v3; } err = regulator_enable(rockchip->vpcie0v9); if (err) { dev_err(dev, "fail to enable vpcie0v9 regulator\n"); goto err_disable_1v8; } return 0; err_disable_1v8: regulator_disable(rockchip->vpcie1v8); err_disable_3v3: if (!IS_ERR(rockchip->vpcie3v3)) regulator_disable(rockchip->vpcie3v3); err_disable_12v: if (!IS_ERR(rockchip->vpcie12v)) regulator_disable(rockchip->vpcie12v); err_out: return err; } static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip) { rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) & (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK); rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT), PCIE_CORE_INT_MASK); rockchip_pcie_enable_bw_int(rockchip); } static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); irq_set_chip_data(irq, domain->host_data); return 0; } static const struct irq_domain_ops intx_domain_ops = { .map = rockchip_pcie_intx_map, }; static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; struct device_node *intc = of_get_next_child(dev->of_node, NULL); if (!intc) { dev_err(dev, "missing child interrupt-controller node\n"); return -EINVAL; } rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX, &intx_domain_ops, rockchip); of_node_put(intc); if (!rockchip->irq_domain) { dev_err(dev, "failed to get a INTx IRQ domain\n"); return -EINVAL; } return 0; } static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip, int region_no, int type, u8 num_pass_bits, u32 lower_addr, u32 upper_addr) { u32 ob_addr_0; u32 ob_addr_1; u32 ob_desc_0; u32 aw_offset; if (region_no >= MAX_AXI_WRAPPER_REGION_NUM) return -EINVAL; if (num_pass_bits + 1 < 8) return -EINVAL; if (num_pass_bits > 63) return -EINVAL; if (region_no == 0) { if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits)) return -EINVAL; } if (region_no != 0) { if (AXI_REGION_SIZE < (2ULL << num_pass_bits)) return -EINVAL; } aw_offset = (region_no << OB_REG_SIZE_SHIFT); ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS; ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR; ob_addr_1 = upper_addr; ob_desc_0 = (1 << 23 | type); rockchip_pcie_write(rockchip, ob_addr_0, PCIE_CORE_OB_REGION_ADDR0 + aw_offset); rockchip_pcie_write(rockchip, ob_addr_1, PCIE_CORE_OB_REGION_ADDR1 + aw_offset); rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0 + aw_offset); rockchip_pcie_write(rockchip, 0, PCIE_CORE_OB_REGION_DESC1 + aw_offset); return 0; } static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip, int region_no, u8 num_pass_bits, u32 lower_addr, u32 upper_addr) { u32 ib_addr_0; u32 ib_addr_1; u32 aw_offset; if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM) return -EINVAL; if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED) return -EINVAL; if (num_pass_bits > 63) return -EINVAL; aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT); ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS; ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR; ib_addr_1 = upper_addr; rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset); rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset); return 0; } static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rockchip); struct resource_entry *entry; u64 pci_addr, size; int offset; int err; int reg_no; rockchip_pcie_cfg_configuration_accesses(rockchip, AXI_WRAPPER_TYPE0_CFG); entry = resource_list_first_type(&bridge->windows, IORESOURCE_MEM); if (!entry) return -ENODEV; size = resource_size(entry->res); pci_addr = entry->res->start - entry->offset; rockchip->msg_bus_addr = pci_addr; for (reg_no = 0; reg_no < (size >> 20); reg_no++) { err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1, AXI_WRAPPER_MEM_WRITE, 20 - 1, pci_addr + (reg_no << 20), 0); if (err) { dev_err(dev, "program RC mem outbound ATU failed\n"); return err; } } err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0); if (err) { dev_err(dev, "program RC mem inbound ATU failed\n"); return err; } entry = resource_list_first_type(&bridge->windows, IORESOURCE_IO); if (!entry) return -ENODEV; /* store the register number offset to program RC io outbound ATU */ offset = size >> 20; size = resource_size(entry->res); pci_addr = entry->res->start - entry->offset; for (reg_no = 0; reg_no < (size >> 20); reg_no++) { err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset, AXI_WRAPPER_IO_WRITE, 20 - 1, pci_addr + (reg_no << 20), 0); if (err) { dev_err(dev, "program RC io outbound ATU failed\n"); return err; } } /* assign message regions */ rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset, AXI_WRAPPER_NOR_MSG, 20 - 1, 0, 0); rockchip->msg_bus_addr += ((reg_no + offset) << 20); return err; } static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip) { u32 value; int err; /* send PME_TURN_OFF message */ writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF); /* read LTSSM and wait for falling into L2 link state */ err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0, value, PCIE_LINK_IS_L2(value), 20, jiffies_to_usecs(5 * HZ)); if (err) { dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n"); return err; } return 0; } static int rockchip_pcie_suspend_noirq(struct device *dev) { struct rockchip_pcie *rockchip = dev_get_drvdata(dev); int ret; /* disable core and cli int since we don't need to ack PME_ACK */ rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) | PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK); rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK); ret = rockchip_pcie_wait_l2(rockchip); if (ret) { rockchip_pcie_enable_interrupts(rockchip); return ret; } rockchip_pcie_deinit_phys(rockchip); rockchip_pcie_disable_clocks(rockchip); regulator_disable(rockchip->vpcie0v9); return ret; } static int rockchip_pcie_resume_noirq(struct device *dev) { struct rockchip_pcie *rockchip = dev_get_drvdata(dev); int err; err = regulator_enable(rockchip->vpcie0v9); if (err) { dev_err(dev, "fail to enable vpcie0v9 regulator\n"); return err; } err = rockchip_pcie_enable_clocks(rockchip); if (err) goto err_disable_0v9; err = rockchip_pcie_host_init_port(rockchip); if (err) goto err_pcie_resume; err = rockchip_pcie_cfg_atu(rockchip); if (err) goto err_err_deinit_port; /* Need this to enter L1 again */ rockchip_pcie_update_txcredit_mui(rockchip); rockchip_pcie_enable_interrupts(rockchip); return 0; err_err_deinit_port: rockchip_pcie_deinit_phys(rockchip); err_pcie_resume: rockchip_pcie_disable_clocks(rockchip); err_disable_0v9: regulator_disable(rockchip->vpcie0v9); return err; } static int rockchip_pcie_probe(struct platform_device *pdev) { struct rockchip_pcie *rockchip; struct device *dev = &pdev->dev; struct pci_host_bridge *bridge; int err; if (!dev->of_node) return -ENODEV; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip)); if (!bridge) return -ENOMEM; rockchip = pci_host_bridge_priv(bridge); platform_set_drvdata(pdev, rockchip); rockchip->dev = dev; rockchip->is_rc = true; err = rockchip_pcie_parse_host_dt(rockchip); if (err) return err; err = rockchip_pcie_enable_clocks(rockchip); if (err) return err; err = rockchip_pcie_set_vpcie(rockchip); if (err) { dev_err(dev, "failed to set vpcie regulator\n"); goto err_set_vpcie; } err = rockchip_pcie_host_init_port(rockchip); if (err) goto err_vpcie; err = rockchip_pcie_init_irq_domain(rockchip); if (err < 0) goto err_deinit_port; err = rockchip_pcie_cfg_atu(rockchip); if (err) goto err_remove_irq_domain; rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M); if (!rockchip->msg_region) { err = -ENOMEM; goto err_remove_irq_domain; } bridge->sysdata = rockchip; bridge->ops = &rockchip_pcie_ops; err = rockchip_pcie_setup_irq(rockchip); if (err) goto err_remove_irq_domain; rockchip_pcie_enable_interrupts(rockchip); err = pci_host_probe(bridge); if (err < 0) goto err_remove_irq_domain; return 0; err_remove_irq_domain: irq_domain_remove(rockchip->irq_domain); err_deinit_port: rockchip_pcie_deinit_phys(rockchip); err_vpcie: if (!IS_ERR(rockchip->vpcie12v)) regulator_disable(rockchip->vpcie12v); if (!IS_ERR(rockchip->vpcie3v3)) regulator_disable(rockchip->vpcie3v3); regulator_disable(rockchip->vpcie1v8); regulator_disable(rockchip->vpcie0v9); err_set_vpcie: rockchip_pcie_disable_clocks(rockchip); return err; } static void rockchip_pcie_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rockchip_pcie *rockchip = dev_get_drvdata(dev); struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rockchip); pci_stop_root_bus(bridge->bus); pci_remove_root_bus(bridge->bus); irq_domain_remove(rockchip->irq_domain); rockchip_pcie_deinit_phys(rockchip); rockchip_pcie_disable_clocks(rockchip); if (!IS_ERR(rockchip->vpcie12v)) regulator_disable(rockchip->vpcie12v); if (!IS_ERR(rockchip->vpcie3v3)) regulator_disable(rockchip->vpcie3v3); regulator_disable(rockchip->vpcie1v8); regulator_disable(rockchip->vpcie0v9); } static const struct dev_pm_ops rockchip_pcie_pm_ops = { NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq, rockchip_pcie_resume_noirq) }; static const struct of_device_id rockchip_pcie_of_match[] = { { .compatible = "rockchip,rk3399-pcie", }, {} }; MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match); static struct platform_driver rockchip_pcie_driver = { .driver = { .name = "rockchip-pcie", .of_match_table = rockchip_pcie_of_match, .pm = &rockchip_pcie_pm_ops, }, .probe = rockchip_pcie_probe, .remove_new = rockchip_pcie_remove, }; module_platform_driver(rockchip_pcie_driver); MODULE_AUTHOR("Rockchip Inc"); MODULE_DESCRIPTION("Rockchip AXI PCIe driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/pcie-rockchip-host.c
// SPDX-License-Identifier: GPL-2.0 /* * Altera PCIe MSI support * * Author: Ley Foon Tan <[email protected]> * * Copyright Altera Corporation (C) 2013-2015. All rights reserved */ #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/init.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/slab.h> #define MSI_STATUS 0x0 #define MSI_ERROR 0x4 #define MSI_INTMASK 0x8 #define MAX_MSI_VECTORS 32 struct altera_msi { DECLARE_BITMAP(used, MAX_MSI_VECTORS); struct mutex lock; /* protect "used" bitmap */ struct platform_device *pdev; struct irq_domain *msi_domain; struct irq_domain *inner_domain; void __iomem *csr_base; void __iomem *vector_base; phys_addr_t vector_phy; u32 num_of_vectors; int irq; }; static inline void msi_writel(struct altera_msi *msi, const u32 value, const u32 reg) { writel_relaxed(value, msi->csr_base + reg); } static inline u32 msi_readl(struct altera_msi *msi, const u32 reg) { return readl_relaxed(msi->csr_base + reg); } static void altera_msi_isr(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct altera_msi *msi; unsigned long status; u32 bit; int ret; chained_irq_enter(chip, desc); msi = irq_desc_get_handler_data(desc); while ((status = msi_readl(msi, MSI_STATUS)) != 0) { for_each_set_bit(bit, &status, msi->num_of_vectors) { /* Dummy read from vector to clear the interrupt */ readl_relaxed(msi->vector_base + (bit * sizeof(u32))); ret = generic_handle_domain_irq(msi->inner_domain, bit); if (ret) dev_err_ratelimited(&msi->pdev->dev, "unexpected MSI\n"); } } chained_irq_exit(chip, desc); } static struct irq_chip altera_msi_irq_chip = { .name = "Altera PCIe MSI", .irq_mask = pci_msi_mask_irq, .irq_unmask = pci_msi_unmask_irq, }; static struct msi_domain_info altera_msi_domain_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_PCI_MSIX), .chip = &altera_msi_irq_chip, }; static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct altera_msi *msi = irq_data_get_irq_chip_data(data); phys_addr_t addr = msi->vector_phy + (data->hwirq * sizeof(u32)); msg->address_lo = lower_32_bits(addr); msg->address_hi = upper_32_bits(addr); msg->data = data->hwirq; dev_dbg(&msi->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n", (int)data->hwirq, msg->address_hi, msg->address_lo); } static int altera_msi_set_affinity(struct irq_data *irq_data, const struct cpumask *mask, bool force) { return -EINVAL; } static struct irq_chip altera_msi_bottom_irq_chip = { .name = "Altera MSI", .irq_compose_msi_msg = altera_compose_msi_msg, .irq_set_affinity = altera_msi_set_affinity, }; static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct altera_msi *msi = domain->host_data; unsigned long bit; u32 mask; WARN_ON(nr_irqs != 1); mutex_lock(&msi->lock); bit = find_first_zero_bit(msi->used, msi->num_of_vectors); if (bit >= msi->num_of_vectors) { mutex_unlock(&msi->lock); return -ENOSPC; } set_bit(bit, msi->used); mutex_unlock(&msi->lock); irq_domain_set_info(domain, virq, bit, &altera_msi_bottom_irq_chip, domain->host_data, handle_simple_irq, NULL, NULL); mask = msi_readl(msi, MSI_INTMASK); mask |= 1 << bit; msi_writel(msi, mask, MSI_INTMASK); return 0; } static void altera_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct altera_msi *msi = irq_data_get_irq_chip_data(d); u32 mask; mutex_lock(&msi->lock); if (!test_bit(d->hwirq, msi->used)) { dev_err(&msi->pdev->dev, "trying to free unused MSI#%lu\n", d->hwirq); } else { __clear_bit(d->hwirq, msi->used); mask = msi_readl(msi, MSI_INTMASK); mask &= ~(1 << d->hwirq); msi_writel(msi, mask, MSI_INTMASK); } mutex_unlock(&msi->lock); } static const struct irq_domain_ops msi_domain_ops = { .alloc = altera_irq_domain_alloc, .free = altera_irq_domain_free, }; static int altera_allocate_domains(struct altera_msi *msi) { struct fwnode_handle *fwnode = of_node_to_fwnode(msi->pdev->dev.of_node); msi->inner_domain = irq_domain_add_linear(NULL, msi->num_of_vectors, &msi_domain_ops, msi); if (!msi->inner_domain) { dev_err(&msi->pdev->dev, "failed to create IRQ domain\n"); return -ENOMEM; } msi->msi_domain = pci_msi_create_irq_domain(fwnode, &altera_msi_domain_info, msi->inner_domain); if (!msi->msi_domain) { dev_err(&msi->pdev->dev, "failed to create MSI domain\n"); irq_domain_remove(msi->inner_domain); return -ENOMEM; } return 0; } static void altera_free_domains(struct altera_msi *msi) { irq_domain_remove(msi->msi_domain); irq_domain_remove(msi->inner_domain); } static void altera_msi_remove(struct platform_device *pdev) { struct altera_msi *msi = platform_get_drvdata(pdev); msi_writel(msi, 0, MSI_INTMASK); irq_set_chained_handler_and_data(msi->irq, NULL, NULL); altera_free_domains(msi); platform_set_drvdata(pdev, NULL); } static int altera_msi_probe(struct platform_device *pdev) { struct altera_msi *msi; struct device_node *np = pdev->dev.of_node; struct resource *res; int ret; msi = devm_kzalloc(&pdev->dev, sizeof(struct altera_msi), GFP_KERNEL); if (!msi) return -ENOMEM; mutex_init(&msi->lock); msi->pdev = pdev; msi->csr_base = devm_platform_ioremap_resource_byname(pdev, "csr"); if (IS_ERR(msi->csr_base)) { dev_err(&pdev->dev, "failed to map csr memory\n"); return PTR_ERR(msi->csr_base); } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vector_slave"); msi->vector_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(msi->vector_base)) return PTR_ERR(msi->vector_base); msi->vector_phy = res->start; if (of_property_read_u32(np, "num-vectors", &msi->num_of_vectors)) { dev_err(&pdev->dev, "failed to parse the number of vectors\n"); return -EINVAL; } ret = altera_allocate_domains(msi); if (ret) return ret; msi->irq = platform_get_irq(pdev, 0); if (msi->irq < 0) { ret = msi->irq; goto err; } irq_set_chained_handler_and_data(msi->irq, altera_msi_isr, msi); platform_set_drvdata(pdev, msi); return 0; err: altera_msi_remove(pdev); return ret; } static const struct of_device_id altera_msi_of_match[] = { { .compatible = "altr,msi-1.0", NULL }, { }, }; static struct platform_driver altera_msi_driver = { .driver = { .name = "altera-msi", .of_match_table = altera_msi_of_match, }, .probe = altera_msi_probe, .remove_new = altera_msi_remove, }; static int __init altera_msi_init(void) { return platform_driver_register(&altera_msi_driver); } static void __exit altera_msi_exit(void) { platform_driver_unregister(&altera_msi_driver); } subsys_initcall(altera_msi_init); MODULE_DEVICE_TABLE(of, altera_msi_of_match); module_exit(altera_msi_exit); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/pcie-altera-msi.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015 Broadcom Corporation */ #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/msi.h> #include <linux/of_irq.h> #include <linux/of_pci.h> #include <linux/pci.h> #include "pcie-iproc.h" #define IPROC_MSI_INTR_EN_SHIFT 11 #define IPROC_MSI_INTR_EN BIT(IPROC_MSI_INTR_EN_SHIFT) #define IPROC_MSI_INT_N_EVENT_SHIFT 1 #define IPROC_MSI_INT_N_EVENT BIT(IPROC_MSI_INT_N_EVENT_SHIFT) #define IPROC_MSI_EQ_EN_SHIFT 0 #define IPROC_MSI_EQ_EN BIT(IPROC_MSI_EQ_EN_SHIFT) #define IPROC_MSI_EQ_MASK 0x3f /* Max number of GIC interrupts */ #define NR_HW_IRQS 6 /* Number of entries in each event queue */ #define EQ_LEN 64 /* Size of each event queue memory region */ #define EQ_MEM_REGION_SIZE SZ_4K /* Size of each MSI address region */ #define MSI_MEM_REGION_SIZE SZ_4K enum iproc_msi_reg { IPROC_MSI_EQ_PAGE = 0, IPROC_MSI_EQ_PAGE_UPPER, IPROC_MSI_PAGE, IPROC_MSI_PAGE_UPPER, IPROC_MSI_CTRL, IPROC_MSI_EQ_HEAD, IPROC_MSI_EQ_TAIL, IPROC_MSI_INTS_EN, IPROC_MSI_REG_SIZE, }; struct iproc_msi; /** * struct iproc_msi_grp - iProc MSI group * * One MSI group is allocated per GIC interrupt, serviced by one iProc MSI * event queue. * * @msi: pointer to iProc MSI data * @gic_irq: GIC interrupt * @eq: Event queue number */ struct iproc_msi_grp { struct iproc_msi *msi; int gic_irq; unsigned int eq; }; /** * struct iproc_msi - iProc event queue based MSI * * Only meant to be used on platforms without MSI support integrated into the * GIC. * * @pcie: pointer to iProc PCIe data * @reg_offsets: MSI register offsets * @grps: MSI groups * @nr_irqs: number of total interrupts connected to GIC * @nr_cpus: number of toal CPUs * @has_inten_reg: indicates the MSI interrupt enable register needs to be * set explicitly (required for some legacy platforms) * @bitmap: MSI vector bitmap * @bitmap_lock: lock to protect access to the MSI bitmap * @nr_msi_vecs: total number of MSI vectors * @inner_domain: inner IRQ domain * @msi_domain: MSI IRQ domain * @nr_eq_region: required number of 4K aligned memory region for MSI event * queues * @nr_msi_region: required number of 4K aligned address region for MSI posted * writes * @eq_cpu: pointer to allocated memory region for MSI event queues * @eq_dma: DMA address of MSI event queues * @msi_addr: MSI address */ struct iproc_msi { struct iproc_pcie *pcie; const u16 (*reg_offsets)[IPROC_MSI_REG_SIZE]; struct iproc_msi_grp *grps; int nr_irqs; int nr_cpus; bool has_inten_reg; unsigned long *bitmap; struct mutex bitmap_lock; unsigned int nr_msi_vecs; struct irq_domain *inner_domain; struct irq_domain *msi_domain; unsigned int nr_eq_region; unsigned int nr_msi_region; void *eq_cpu; dma_addr_t eq_dma; phys_addr_t msi_addr; }; static const u16 iproc_msi_reg_paxb[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = { { 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 }, { 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 }, { 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 }, { 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 }, { 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 }, { 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 }, }; static const u16 iproc_msi_reg_paxc[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = { { 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 }, { 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 }, { 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 }, { 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c }, }; static inline u32 iproc_msi_read_reg(struct iproc_msi *msi, enum iproc_msi_reg reg, unsigned int eq) { struct iproc_pcie *pcie = msi->pcie; return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]); } static inline void iproc_msi_write_reg(struct iproc_msi *msi, enum iproc_msi_reg reg, int eq, u32 val) { struct iproc_pcie *pcie = msi->pcie; writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]); } static inline u32 hwirq_to_group(struct iproc_msi *msi, unsigned long hwirq) { return (hwirq % msi->nr_irqs); } static inline unsigned int iproc_msi_addr_offset(struct iproc_msi *msi, unsigned long hwirq) { if (msi->nr_msi_region > 1) return hwirq_to_group(msi, hwirq) * MSI_MEM_REGION_SIZE; else return hwirq_to_group(msi, hwirq) * sizeof(u32); } static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq) { if (msi->nr_eq_region > 1) return eq * EQ_MEM_REGION_SIZE; else return eq * EQ_LEN * sizeof(u32); } static struct irq_chip iproc_msi_irq_chip = { .name = "iProc-MSI", }; static struct msi_domain_info iproc_msi_domain_info = { .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_PCI_MSIX, .chip = &iproc_msi_irq_chip, }; /* * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a * dedicated event queue. Each MSI group can support up to 64 MSI vectors. * * The number of MSI groups varies between different iProc SoCs. The total * number of CPU cores also varies. To support MSI IRQ affinity, we * distribute GIC interrupts across all available CPUs. MSI vector is moved * from one GIC interrupt to another to steer to the target CPU. * * Assuming: * - the number of MSI groups is M * - the number of CPU cores is N * - M is always a multiple of N * * Total number of raw MSI vectors = M * 64 * Total number of supported MSI vectors = (M * 64) / N */ static inline int hwirq_to_cpu(struct iproc_msi *msi, unsigned long hwirq) { return (hwirq % msi->nr_cpus); } static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi *msi, unsigned long hwirq) { return (hwirq - hwirq_to_cpu(msi, hwirq)); } static int iproc_msi_irq_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { struct iproc_msi *msi = irq_data_get_irq_chip_data(data); int target_cpu = cpumask_first(mask); int curr_cpu; int ret; curr_cpu = hwirq_to_cpu(msi, data->hwirq); if (curr_cpu == target_cpu) ret = IRQ_SET_MASK_OK_DONE; else { /* steer MSI to the target CPU */ data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu; ret = IRQ_SET_MASK_OK; } irq_data_update_effective_affinity(data, cpumask_of(target_cpu)); return ret; } static void iproc_msi_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct iproc_msi *msi = irq_data_get_irq_chip_data(data); dma_addr_t addr; addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq); msg->address_lo = lower_32_bits(addr); msg->address_hi = upper_32_bits(addr); msg->data = data->hwirq << 5; } static struct irq_chip iproc_msi_bottom_irq_chip = { .name = "MSI", .irq_set_affinity = iproc_msi_irq_set_affinity, .irq_compose_msi_msg = iproc_msi_irq_compose_msi_msg, }; static int iproc_msi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct iproc_msi *msi = domain->host_data; int hwirq, i; if (msi->nr_cpus > 1 && nr_irqs > 1) return -EINVAL; mutex_lock(&msi->bitmap_lock); /* * Allocate 'nr_irqs' multiplied by 'nr_cpus' number of MSI vectors * each time */ hwirq = bitmap_find_free_region(msi->bitmap, msi->nr_msi_vecs, order_base_2(msi->nr_cpus * nr_irqs)); mutex_unlock(&msi->bitmap_lock); if (hwirq < 0) return -ENOSPC; for (i = 0; i < nr_irqs; i++) { irq_domain_set_info(domain, virq + i, hwirq + i, &iproc_msi_bottom_irq_chip, domain->host_data, handle_simple_irq, NULL, NULL); } return 0; } static void iproc_msi_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *data = irq_domain_get_irq_data(domain, virq); struct iproc_msi *msi = irq_data_get_irq_chip_data(data); unsigned int hwirq; mutex_lock(&msi->bitmap_lock); hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq); bitmap_release_region(msi->bitmap, hwirq, order_base_2(msi->nr_cpus * nr_irqs)); mutex_unlock(&msi->bitmap_lock); irq_domain_free_irqs_parent(domain, virq, nr_irqs); } static const struct irq_domain_ops msi_domain_ops = { .alloc = iproc_msi_irq_domain_alloc, .free = iproc_msi_irq_domain_free, }; static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head) { u32 __iomem *msg; u32 hwirq; unsigned int offs; offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32); msg = (u32 __iomem *)(msi->eq_cpu + offs); hwirq = readl(msg); hwirq = (hwirq >> 5) + (hwirq & 0x1f); /* * Since we have multiple hwirq mapped to a single MSI vector, * now we need to derive the hwirq at CPU0. It can then be used to * mapped back to virq. */ return hwirq_to_canonical_hwirq(msi, hwirq); } static void iproc_msi_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct iproc_msi_grp *grp; struct iproc_msi *msi; u32 eq, head, tail, nr_events; unsigned long hwirq; chained_irq_enter(chip, desc); grp = irq_desc_get_handler_data(desc); msi = grp->msi; eq = grp->eq; /* * iProc MSI event queue is tracked by head and tail pointers. Head * pointer indicates the next entry (MSI data) to be consumed by SW in * the queue and needs to be updated by SW. iProc MSI core uses the * tail pointer as the next data insertion point. * * Entries between head and tail pointers contain valid MSI data. MSI * data is guaranteed to be in the event queue memory before the tail * pointer is updated by the iProc MSI core. */ head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD, eq) & IPROC_MSI_EQ_MASK; do { tail = iproc_msi_read_reg(msi, IPROC_MSI_EQ_TAIL, eq) & IPROC_MSI_EQ_MASK; /* * Figure out total number of events (MSI data) to be * processed. */ nr_events = (tail < head) ? (EQ_LEN - (head - tail)) : (tail - head); if (!nr_events) break; /* process all outstanding events */ while (nr_events--) { hwirq = decode_msi_hwirq(msi, eq, head); generic_handle_domain_irq(msi->inner_domain, hwirq); head++; head %= EQ_LEN; } /* * Now all outstanding events have been processed. Update the * head pointer. */ iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head); /* * Now go read the tail pointer again to see if there are new * outstanding events that came in during the above window. */ } while (true); chained_irq_exit(chip, desc); } static void iproc_msi_enable(struct iproc_msi *msi) { int i, eq; u32 val; /* Program memory region for each event queue */ for (i = 0; i < msi->nr_eq_region; i++) { dma_addr_t addr = msi->eq_dma + (i * EQ_MEM_REGION_SIZE); iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE, i, lower_32_bits(addr)); iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE_UPPER, i, upper_32_bits(addr)); } /* Program address region for MSI posted writes */ for (i = 0; i < msi->nr_msi_region; i++) { phys_addr_t addr = msi->msi_addr + (i * MSI_MEM_REGION_SIZE); iproc_msi_write_reg(msi, IPROC_MSI_PAGE, i, lower_32_bits(addr)); iproc_msi_write_reg(msi, IPROC_MSI_PAGE_UPPER, i, upper_32_bits(addr)); } for (eq = 0; eq < msi->nr_irqs; eq++) { /* Enable MSI event queue */ val = IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT | IPROC_MSI_EQ_EN; iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val); /* * Some legacy platforms require the MSI interrupt enable * register to be set explicitly. */ if (msi->has_inten_reg) { val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq); val |= BIT(eq); iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val); } } } static void iproc_msi_disable(struct iproc_msi *msi) { u32 eq, val; for (eq = 0; eq < msi->nr_irqs; eq++) { if (msi->has_inten_reg) { val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq); val &= ~BIT(eq); iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val); } val = iproc_msi_read_reg(msi, IPROC_MSI_CTRL, eq); val &= ~(IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT | IPROC_MSI_EQ_EN); iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val); } } static int iproc_msi_alloc_domains(struct device_node *node, struct iproc_msi *msi) { msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs, &msi_domain_ops, msi); if (!msi->inner_domain) return -ENOMEM; msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), &iproc_msi_domain_info, msi->inner_domain); if (!msi->msi_domain) { irq_domain_remove(msi->inner_domain); return -ENOMEM; } return 0; } static void iproc_msi_free_domains(struct iproc_msi *msi) { if (msi->msi_domain) irq_domain_remove(msi->msi_domain); if (msi->inner_domain) irq_domain_remove(msi->inner_domain); } static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu) { int i; for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) { irq_set_chained_handler_and_data(msi->grps[i].gic_irq, NULL, NULL); } } static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu) { int i, ret; cpumask_var_t mask; struct iproc_pcie *pcie = msi->pcie; for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) { irq_set_chained_handler_and_data(msi->grps[i].gic_irq, iproc_msi_handler, &msi->grps[i]); /* Dedicate GIC interrupt to each CPU core */ if (alloc_cpumask_var(&mask, GFP_KERNEL)) { cpumask_clear(mask); cpumask_set_cpu(cpu, mask); ret = irq_set_affinity(msi->grps[i].gic_irq, mask); if (ret) dev_err(pcie->dev, "failed to set affinity for IRQ%d\n", msi->grps[i].gic_irq); free_cpumask_var(mask); } else { dev_err(pcie->dev, "failed to alloc CPU mask\n"); ret = -EINVAL; } if (ret) { /* Free all configured/unconfigured IRQs */ iproc_msi_irq_free(msi, cpu); return ret; } } return 0; } int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node) { struct iproc_msi *msi; int i, ret; unsigned int cpu; if (!of_device_is_compatible(node, "brcm,iproc-msi")) return -ENODEV; if (!of_property_read_bool(node, "msi-controller")) return -ENODEV; if (pcie->msi) return -EBUSY; msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL); if (!msi) return -ENOMEM; msi->pcie = pcie; pcie->msi = msi; msi->msi_addr = pcie->base_addr; mutex_init(&msi->bitmap_lock); msi->nr_cpus = num_possible_cpus(); if (msi->nr_cpus == 1) iproc_msi_domain_info.flags |= MSI_FLAG_MULTI_PCI_MSI; msi->nr_irqs = of_irq_count(node); if (!msi->nr_irqs) { dev_err(pcie->dev, "found no MSI GIC interrupt\n"); return -ENODEV; } if (msi->nr_irqs > NR_HW_IRQS) { dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n", msi->nr_irqs); msi->nr_irqs = NR_HW_IRQS; } if (msi->nr_irqs < msi->nr_cpus) { dev_err(pcie->dev, "not enough GIC interrupts for MSI affinity\n"); return -EINVAL; } if (msi->nr_irqs % msi->nr_cpus != 0) { msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus; dev_warn(pcie->dev, "Reducing number of interrupts to %d\n", msi->nr_irqs); } switch (pcie->type) { case IPROC_PCIE_PAXB_BCMA: case IPROC_PCIE_PAXB: msi->reg_offsets = iproc_msi_reg_paxb; msi->nr_eq_region = 1; msi->nr_msi_region = 1; break; case IPROC_PCIE_PAXC: msi->reg_offsets = iproc_msi_reg_paxc; msi->nr_eq_region = msi->nr_irqs; msi->nr_msi_region = msi->nr_irqs; break; default: dev_err(pcie->dev, "incompatible iProc PCIe interface\n"); return -EINVAL; } msi->has_inten_reg = of_property_read_bool(node, "brcm,pcie-msi-inten"); msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN; msi->bitmap = devm_bitmap_zalloc(pcie->dev, msi->nr_msi_vecs, GFP_KERNEL); if (!msi->bitmap) return -ENOMEM; msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps), GFP_KERNEL); if (!msi->grps) return -ENOMEM; for (i = 0; i < msi->nr_irqs; i++) { unsigned int irq = irq_of_parse_and_map(node, i); if (!irq) { dev_err(pcie->dev, "unable to parse/map interrupt\n"); ret = -ENODEV; goto free_irqs; } msi->grps[i].gic_irq = irq; msi->grps[i].msi = msi; msi->grps[i].eq = i; } /* Reserve memory for event queue and make sure memories are zeroed */ msi->eq_cpu = dma_alloc_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE, &msi->eq_dma, GFP_KERNEL); if (!msi->eq_cpu) { ret = -ENOMEM; goto free_irqs; } ret = iproc_msi_alloc_domains(node, msi); if (ret) { dev_err(pcie->dev, "failed to create MSI domains\n"); goto free_eq_dma; } for_each_online_cpu(cpu) { ret = iproc_msi_irq_setup(msi, cpu); if (ret) goto free_msi_irq; } iproc_msi_enable(msi); return 0; free_msi_irq: for_each_online_cpu(cpu) iproc_msi_irq_free(msi, cpu); iproc_msi_free_domains(msi); free_eq_dma: dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE, msi->eq_cpu, msi->eq_dma); free_irqs: for (i = 0; i < msi->nr_irqs; i++) { if (msi->grps[i].gic_irq) irq_dispose_mapping(msi->grps[i].gic_irq); } pcie->msi = NULL; return ret; } EXPORT_SYMBOL(iproc_msi_init); void iproc_msi_exit(struct iproc_pcie *pcie) { struct iproc_msi *msi = pcie->msi; unsigned int i, cpu; if (!msi) return; iproc_msi_disable(msi); for_each_online_cpu(cpu) iproc_msi_irq_free(msi, cpu); iproc_msi_free_domains(msi); dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE, msi->eq_cpu, msi->eq_dma); for (i = 0; i < msi->nr_irqs; i++) { if (msi->grps[i].gic_irq) irq_dispose_mapping(msi->grps[i].gic_irq); } } EXPORT_SYMBOL(iproc_msi_exit);
linux-master
drivers/pci/controller/pcie-iproc-msi.c
// SPDX-License-Identifier: GPL-2.0+ /* * Rockchip AXI PCIe host controller driver * * Copyright (c) 2016 Rockchip, Inc. * * Author: Shawn Lin <[email protected]> * Wenrui Li <[email protected]> * * Bits taken from Synopsys DesignWare Host controller driver and * ARM PCI Host generic driver. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/iopoll.h> #include <linux/of.h> #include <linux/of_pci.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/reset.h> #include "../pci.h" #include "pcie-rockchip.h" int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; struct platform_device *pdev = to_platform_device(dev); struct device_node *node = dev->of_node; struct resource *regs; int err; if (rockchip->is_rc) { regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "axi-base"); rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs); if (IS_ERR(rockchip->reg_base)) return PTR_ERR(rockchip->reg_base); } else { rockchip->mem_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem-base"); if (!rockchip->mem_res) return -EINVAL; } rockchip->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb-base"); if (IS_ERR(rockchip->apb_base)) return PTR_ERR(rockchip->apb_base); err = rockchip_pcie_get_phys(rockchip); if (err) return err; rockchip->lanes = 1; err = of_property_read_u32(node, "num-lanes", &rockchip->lanes); if (!err && (rockchip->lanes == 0 || rockchip->lanes == 3 || rockchip->lanes > 4)) { dev_warn(dev, "invalid num-lanes, default to use one lane\n"); rockchip->lanes = 1; } rockchip->link_gen = of_pci_get_max_link_speed(node); if (rockchip->link_gen < 0 || rockchip->link_gen > 2) rockchip->link_gen = 2; rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core"); if (IS_ERR(rockchip->core_rst)) { if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER) dev_err(dev, "missing core reset property in node\n"); return PTR_ERR(rockchip->core_rst); } rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt"); if (IS_ERR(rockchip->mgmt_rst)) { if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER) dev_err(dev, "missing mgmt reset property in node\n"); return PTR_ERR(rockchip->mgmt_rst); } rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev, "mgmt-sticky"); if (IS_ERR(rockchip->mgmt_sticky_rst)) { if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER) dev_err(dev, "missing mgmt-sticky reset property in node\n"); return PTR_ERR(rockchip->mgmt_sticky_rst); } rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe"); if (IS_ERR(rockchip->pipe_rst)) { if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER) dev_err(dev, "missing pipe reset property in node\n"); return PTR_ERR(rockchip->pipe_rst); } rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm"); if (IS_ERR(rockchip->pm_rst)) { if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER) dev_err(dev, "missing pm reset property in node\n"); return PTR_ERR(rockchip->pm_rst); } rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk"); if (IS_ERR(rockchip->pclk_rst)) { if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER) dev_err(dev, "missing pclk reset property in node\n"); return PTR_ERR(rockchip->pclk_rst); } rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk"); if (IS_ERR(rockchip->aclk_rst)) { if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER) dev_err(dev, "missing aclk reset property in node\n"); return PTR_ERR(rockchip->aclk_rst); } if (rockchip->is_rc) { rockchip->ep_gpio = devm_gpiod_get_optional(dev, "ep", GPIOD_OUT_HIGH); if (IS_ERR(rockchip->ep_gpio)) return dev_err_probe(dev, PTR_ERR(rockchip->ep_gpio), "failed to get ep GPIO\n"); } rockchip->aclk_pcie = devm_clk_get(dev, "aclk"); if (IS_ERR(rockchip->aclk_pcie)) { dev_err(dev, "aclk clock not found\n"); return PTR_ERR(rockchip->aclk_pcie); } rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf"); if (IS_ERR(rockchip->aclk_perf_pcie)) { dev_err(dev, "aclk_perf clock not found\n"); return PTR_ERR(rockchip->aclk_perf_pcie); } rockchip->hclk_pcie = devm_clk_get(dev, "hclk"); if (IS_ERR(rockchip->hclk_pcie)) { dev_err(dev, "hclk clock not found\n"); return PTR_ERR(rockchip->hclk_pcie); } rockchip->clk_pcie_pm = devm_clk_get(dev, "pm"); if (IS_ERR(rockchip->clk_pcie_pm)) { dev_err(dev, "pm clock not found\n"); return PTR_ERR(rockchip->clk_pcie_pm); } return 0; } EXPORT_SYMBOL_GPL(rockchip_pcie_parse_dt); #define rockchip_pcie_read_addr(addr) rockchip_pcie_read(rockchip, addr) /* 100 ms max wait time for PHY PLLs to lock */ #define RK_PHY_PLL_LOCK_TIMEOUT_US 100000 /* Sleep should be less than 20ms */ #define RK_PHY_PLL_LOCK_SLEEP_US 1000 int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; int err, i; u32 regs; err = reset_control_assert(rockchip->aclk_rst); if (err) { dev_err(dev, "assert aclk_rst err %d\n", err); return err; } err = reset_control_assert(rockchip->pclk_rst); if (err) { dev_err(dev, "assert pclk_rst err %d\n", err); return err; } err = reset_control_assert(rockchip->pm_rst); if (err) { dev_err(dev, "assert pm_rst err %d\n", err); return err; } for (i = 0; i < MAX_LANE_NUM; i++) { err = phy_init(rockchip->phys[i]); if (err) { dev_err(dev, "init phy%d err %d\n", i, err); goto err_exit_phy; } } err = reset_control_assert(rockchip->core_rst); if (err) { dev_err(dev, "assert core_rst err %d\n", err); goto err_exit_phy; } err = reset_control_assert(rockchip->mgmt_rst); if (err) { dev_err(dev, "assert mgmt_rst err %d\n", err); goto err_exit_phy; } err = reset_control_assert(rockchip->mgmt_sticky_rst); if (err) { dev_err(dev, "assert mgmt_sticky_rst err %d\n", err); goto err_exit_phy; } err = reset_control_assert(rockchip->pipe_rst); if (err) { dev_err(dev, "assert pipe_rst err %d\n", err); goto err_exit_phy; } udelay(10); err = reset_control_deassert(rockchip->pm_rst); if (err) { dev_err(dev, "deassert pm_rst err %d\n", err); goto err_exit_phy; } err = reset_control_deassert(rockchip->aclk_rst); if (err) { dev_err(dev, "deassert aclk_rst err %d\n", err); goto err_exit_phy; } err = reset_control_deassert(rockchip->pclk_rst); if (err) { dev_err(dev, "deassert pclk_rst err %d\n", err); goto err_exit_phy; } if (rockchip->link_gen == 2) rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_2, PCIE_CLIENT_CONFIG); else rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1, PCIE_CLIENT_CONFIG); regs = PCIE_CLIENT_LINK_TRAIN_ENABLE | PCIE_CLIENT_ARI_ENABLE | PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes); if (rockchip->is_rc) regs |= PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC; else regs |= PCIE_CLIENT_CONF_DISABLE | PCIE_CLIENT_MODE_EP; rockchip_pcie_write(rockchip, regs, PCIE_CLIENT_CONFIG); for (i = 0; i < MAX_LANE_NUM; i++) { err = phy_power_on(rockchip->phys[i]); if (err) { dev_err(dev, "power on phy%d err %d\n", i, err); goto err_power_off_phy; } } err = readx_poll_timeout(rockchip_pcie_read_addr, PCIE_CLIENT_SIDE_BAND_STATUS, regs, !(regs & PCIE_CLIENT_PHY_ST), RK_PHY_PLL_LOCK_SLEEP_US, RK_PHY_PLL_LOCK_TIMEOUT_US); if (err) { dev_err(dev, "PHY PLLs could not lock, %d\n", err); goto err_power_off_phy; } /* * Please don't reorder the deassert sequence of the following * four reset pins. */ err = reset_control_deassert(rockchip->mgmt_sticky_rst); if (err) { dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err); goto err_power_off_phy; } err = reset_control_deassert(rockchip->core_rst); if (err) { dev_err(dev, "deassert core_rst err %d\n", err); goto err_power_off_phy; } err = reset_control_deassert(rockchip->mgmt_rst); if (err) { dev_err(dev, "deassert mgmt_rst err %d\n", err); goto err_power_off_phy; } err = reset_control_deassert(rockchip->pipe_rst); if (err) { dev_err(dev, "deassert pipe_rst err %d\n", err); goto err_power_off_phy; } return 0; err_power_off_phy: while (i--) phy_power_off(rockchip->phys[i]); i = MAX_LANE_NUM; err_exit_phy: while (i--) phy_exit(rockchip->phys[i]); return err; } EXPORT_SYMBOL_GPL(rockchip_pcie_init_port); int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; struct phy *phy; char *name; u32 i; phy = devm_phy_get(dev, "pcie-phy"); if (!IS_ERR(phy)) { rockchip->legacy_phy = true; rockchip->phys[0] = phy; dev_warn(dev, "legacy phy model is deprecated!\n"); return 0; } if (PTR_ERR(phy) == -EPROBE_DEFER) return PTR_ERR(phy); dev_dbg(dev, "missing legacy phy; search for per-lane PHY\n"); for (i = 0; i < MAX_LANE_NUM; i++) { name = kasprintf(GFP_KERNEL, "pcie-phy-%u", i); if (!name) return -ENOMEM; phy = devm_of_phy_get(dev, dev->of_node, name); kfree(name); if (IS_ERR(phy)) { if (PTR_ERR(phy) != -EPROBE_DEFER) dev_err(dev, "missing phy for lane %d: %ld\n", i, PTR_ERR(phy)); return PTR_ERR(phy); } rockchip->phys[i] = phy; } return 0; } EXPORT_SYMBOL_GPL(rockchip_pcie_get_phys); void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip) { int i; for (i = 0; i < MAX_LANE_NUM; i++) { /* inactive lanes are already powered off */ if (rockchip->lanes_map & BIT(i)) phy_power_off(rockchip->phys[i]); phy_exit(rockchip->phys[i]); } } EXPORT_SYMBOL_GPL(rockchip_pcie_deinit_phys); int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; int err; err = clk_prepare_enable(rockchip->aclk_pcie); if (err) { dev_err(dev, "unable to enable aclk_pcie clock\n"); return err; } err = clk_prepare_enable(rockchip->aclk_perf_pcie); if (err) { dev_err(dev, "unable to enable aclk_perf_pcie clock\n"); goto err_aclk_perf_pcie; } err = clk_prepare_enable(rockchip->hclk_pcie); if (err) { dev_err(dev, "unable to enable hclk_pcie clock\n"); goto err_hclk_pcie; } err = clk_prepare_enable(rockchip->clk_pcie_pm); if (err) { dev_err(dev, "unable to enable clk_pcie_pm clock\n"); goto err_clk_pcie_pm; } return 0; err_clk_pcie_pm: clk_disable_unprepare(rockchip->hclk_pcie); err_hclk_pcie: clk_disable_unprepare(rockchip->aclk_perf_pcie); err_aclk_perf_pcie: clk_disable_unprepare(rockchip->aclk_pcie); return err; } EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks); void rockchip_pcie_disable_clocks(void *data) { struct rockchip_pcie *rockchip = data; clk_disable_unprepare(rockchip->clk_pcie_pm); clk_disable_unprepare(rockchip->hclk_pcie); clk_disable_unprepare(rockchip->aclk_perf_pcie); clk_disable_unprepare(rockchip->aclk_pcie); } EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks); void rockchip_pcie_cfg_configuration_accesses( struct rockchip_pcie *rockchip, u32 type) { u32 ob_desc_0; /* Configuration Accesses for region 0 */ rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF); rockchip_pcie_write(rockchip, (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS), PCIE_CORE_OB_REGION_ADDR0); rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H, PCIE_CORE_OB_REGION_ADDR1); ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0); ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK); ob_desc_0 |= (type | (0x1 << 23)); rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0); rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1); } EXPORT_SYMBOL_GPL(rockchip_pcie_cfg_configuration_accesses);
linux-master
drivers/pci/controller/pcie-rockchip.c
// SPDX-License-Identifier: GPL-2.0+ /* * PCIe host controller driver for Tegra SoCs * * Copyright (c) 2010, CompuLab, Ltd. * Author: Mike Rapoport <[email protected]> * * Based on NVIDIA PCIe driver * Copyright (c) 2008-2009, NVIDIA Corporation. * * Bits taken from arch/arm/mach-dove/pcie.c * * Author: Thierry Reding <[email protected]> */ #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/of_platform.h> #include <linux/pci.h> #include <linux/phy/phy.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <linux/sizes.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/regulator/consumer.h> #include <soc/tegra/cpuidle.h> #include <soc/tegra/pmc.h> #include "../pci.h" #define INT_PCI_MSI_NR (8 * 32) /* register definitions */ #define AFI_AXI_BAR0_SZ 0x00 #define AFI_AXI_BAR1_SZ 0x04 #define AFI_AXI_BAR2_SZ 0x08 #define AFI_AXI_BAR3_SZ 0x0c #define AFI_AXI_BAR4_SZ 0x10 #define AFI_AXI_BAR5_SZ 0x14 #define AFI_AXI_BAR0_START 0x18 #define AFI_AXI_BAR1_START 0x1c #define AFI_AXI_BAR2_START 0x20 #define AFI_AXI_BAR3_START 0x24 #define AFI_AXI_BAR4_START 0x28 #define AFI_AXI_BAR5_START 0x2c #define AFI_FPCI_BAR0 0x30 #define AFI_FPCI_BAR1 0x34 #define AFI_FPCI_BAR2 0x38 #define AFI_FPCI_BAR3 0x3c #define AFI_FPCI_BAR4 0x40 #define AFI_FPCI_BAR5 0x44 #define AFI_CACHE_BAR0_SZ 0x48 #define AFI_CACHE_BAR0_ST 0x4c #define AFI_CACHE_BAR1_SZ 0x50 #define AFI_CACHE_BAR1_ST 0x54 #define AFI_MSI_BAR_SZ 0x60 #define AFI_MSI_FPCI_BAR_ST 0x64 #define AFI_MSI_AXI_BAR_ST 0x68 #define AFI_MSI_VEC(x) (0x6c + ((x) * 4)) #define AFI_MSI_EN_VEC(x) (0x8c + ((x) * 4)) #define AFI_CONFIGURATION 0xac #define AFI_CONFIGURATION_EN_FPCI (1 << 0) #define AFI_CONFIGURATION_CLKEN_OVERRIDE (1 << 31) #define AFI_FPCI_ERROR_MASKS 0xb0 #define AFI_INTR_MASK 0xb4 #define AFI_INTR_MASK_INT_MASK (1 << 0) #define AFI_INTR_MASK_MSI_MASK (1 << 8) #define AFI_INTR_CODE 0xb8 #define AFI_INTR_CODE_MASK 0xf #define AFI_INTR_INI_SLAVE_ERROR 1 #define AFI_INTR_INI_DECODE_ERROR 2 #define AFI_INTR_TARGET_ABORT 3 #define AFI_INTR_MASTER_ABORT 4 #define AFI_INTR_INVALID_WRITE 5 #define AFI_INTR_LEGACY 6 #define AFI_INTR_FPCI_DECODE_ERROR 7 #define AFI_INTR_AXI_DECODE_ERROR 8 #define AFI_INTR_FPCI_TIMEOUT 9 #define AFI_INTR_PE_PRSNT_SENSE 10 #define AFI_INTR_PE_CLKREQ_SENSE 11 #define AFI_INTR_CLKCLAMP_SENSE 12 #define AFI_INTR_RDY4PD_SENSE 13 #define AFI_INTR_P2P_ERROR 14 #define AFI_INTR_SIGNATURE 0xbc #define AFI_UPPER_FPCI_ADDRESS 0xc0 #define AFI_SM_INTR_ENABLE 0xc4 #define AFI_SM_INTR_INTA_ASSERT (1 << 0) #define AFI_SM_INTR_INTB_ASSERT (1 << 1) #define AFI_SM_INTR_INTC_ASSERT (1 << 2) #define AFI_SM_INTR_INTD_ASSERT (1 << 3) #define AFI_SM_INTR_INTA_DEASSERT (1 << 4) #define AFI_SM_INTR_INTB_DEASSERT (1 << 5) #define AFI_SM_INTR_INTC_DEASSERT (1 << 6) #define AFI_SM_INTR_INTD_DEASSERT (1 << 7) #define AFI_AFI_INTR_ENABLE 0xc8 #define AFI_INTR_EN_INI_SLVERR (1 << 0) #define AFI_INTR_EN_INI_DECERR (1 << 1) #define AFI_INTR_EN_TGT_SLVERR (1 << 2) #define AFI_INTR_EN_TGT_DECERR (1 << 3) #define AFI_INTR_EN_TGT_WRERR (1 << 4) #define AFI_INTR_EN_DFPCI_DECERR (1 << 5) #define AFI_INTR_EN_AXI_DECERR (1 << 6) #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7) #define AFI_INTR_EN_PRSNT_SENSE (1 << 8) #define AFI_PCIE_PME 0xf0 #define AFI_PCIE_CONFIG 0x0f8 #define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1)) #define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401 (0x0 << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211 (0x1 << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111 (0x2 << 20) #define AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x) (1 << ((x) + 29)) #define AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL (0x7 << 29) #define AFI_FUSE 0x104 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2) #define AFI_PEX0_CTRL 0x110 #define AFI_PEX1_CTRL 0x118 #define AFI_PEX_CTRL_RST (1 << 0) #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1) #define AFI_PEX_CTRL_REFCLK_EN (1 << 3) #define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4) #define AFI_PLLE_CONTROL 0x160 #define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9) #define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1) #define AFI_PEXBIAS_CTRL_0 0x168 #define RP_ECTL_2_R1 0x00000e84 #define RP_ECTL_2_R1_RX_CTLE_1C_MASK 0xffff #define RP_ECTL_4_R1 0x00000e8c #define RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK (0xffff << 16) #define RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT 16 #define RP_ECTL_5_R1 0x00000e90 #define RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK 0xffffffff #define RP_ECTL_6_R1 0x00000e94 #define RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK 0xffffffff #define RP_ECTL_2_R2 0x00000ea4 #define RP_ECTL_2_R2_RX_CTLE_1C_MASK 0xffff #define RP_ECTL_4_R2 0x00000eac #define RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK (0xffff << 16) #define RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT 16 #define RP_ECTL_5_R2 0x00000eb0 #define RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK 0xffffffff #define RP_ECTL_6_R2 0x00000eb4 #define RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK 0xffffffff #define RP_VEND_XP 0x00000f00 #define RP_VEND_XP_DL_UP (1 << 30) #define RP_VEND_XP_OPPORTUNISTIC_ACK (1 << 27) #define RP_VEND_XP_OPPORTUNISTIC_UPDATEFC (1 << 28) #define RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK (0xff << 18) #define RP_VEND_CTL0 0x00000f44 #define RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK (0xf << 12) #define RP_VEND_CTL0_DSK_RST_PULSE_WIDTH (0x9 << 12) #define RP_VEND_CTL1 0x00000f48 #define RP_VEND_CTL1_ERPT (1 << 13) #define RP_VEND_XP_BIST 0x00000f4c #define RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE (1 << 28) #define RP_VEND_CTL2 0x00000fa8 #define RP_VEND_CTL2_PCA_ENABLE (1 << 7) #define RP_PRIV_MISC 0x00000fe0 #define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0) #define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0) #define RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK (0x7f << 16) #define RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD (0xf << 16) #define RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE (1 << 23) #define RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK (0x7f << 24) #define RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD (0xf << 24) #define RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE (1 << 31) #define RP_LINK_CONTROL_STATUS 0x00000090 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000 #define RP_LINK_CONTROL_STATUS_2 0x000000b0 #define PADS_CTL_SEL 0x0000009c #define PADS_CTL 0x000000a0 #define PADS_CTL_IDDQ_1L (1 << 0) #define PADS_CTL_TX_DATA_EN_1L (1 << 6) #define PADS_CTL_RX_DATA_EN_1L (1 << 10) #define PADS_PLL_CTL_TEGRA20 0x000000b8 #define PADS_PLL_CTL_TEGRA30 0x000000b4 #define PADS_PLL_CTL_RST_B4SM (1 << 1) #define PADS_PLL_CTL_LOCKDET (1 << 8) #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16) #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16) #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16) #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16) #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20) #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20) #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20) #define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22) #define PADS_REFCLK_CFG0 0x000000c8 #define PADS_REFCLK_CFG1 0x000000cc #define PADS_REFCLK_BIAS 0x000000d0 /* * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit * entries, one entry per PCIe port. These field definitions and desired * values aren't in the TRM, but do come from NVIDIA. */ #define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */ #define PADS_REFCLK_CFG_E_TERM_SHIFT 7 #define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */ #define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */ #define PME_ACK_TIMEOUT 10000 #define LINK_RETRAIN_TIMEOUT 100000 /* in usec */ struct tegra_msi { DECLARE_BITMAP(used, INT_PCI_MSI_NR); struct irq_domain *domain; struct mutex map_lock; spinlock_t mask_lock; void *virt; dma_addr_t phys; int irq; }; /* used to differentiate between Tegra SoC generations */ struct tegra_pcie_port_soc { struct { u8 turnoff_bit; u8 ack_bit; } pme; }; struct tegra_pcie_soc { unsigned int num_ports; const struct tegra_pcie_port_soc *ports; unsigned int msi_base_shift; unsigned long afi_pex2_ctrl; u32 pads_pll_ctl; u32 tx_ref_sel; u32 pads_refclk_cfg0; u32 pads_refclk_cfg1; u32 update_fc_threshold; bool has_pex_clkreq_en; bool has_pex_bias_ctrl; bool has_intr_prsnt_sense; bool has_cml_clk; bool has_gen2; bool force_pca_enable; bool program_uphy; bool update_clamp_threshold; bool program_deskew_time; bool update_fc_timer; bool has_cache_bars; struct { struct { u32 rp_ectl_2_r1; u32 rp_ectl_4_r1; u32 rp_ectl_5_r1; u32 rp_ectl_6_r1; u32 rp_ectl_2_r2; u32 rp_ectl_4_r2; u32 rp_ectl_5_r2; u32 rp_ectl_6_r2; } regs; bool enable; } ectl; }; struct tegra_pcie { struct device *dev; void __iomem *pads; void __iomem *afi; void __iomem *cfg; int irq; struct resource cs; struct clk *pex_clk; struct clk *afi_clk; struct clk *pll_e; struct clk *cml_clk; struct reset_control *pex_rst; struct reset_control *afi_rst; struct reset_control *pcie_xrst; bool legacy_phy; struct phy *phy; struct tegra_msi msi; struct list_head ports; u32 xbar_config; struct regulator_bulk_data *supplies; unsigned int num_supplies; const struct tegra_pcie_soc *soc; struct dentry *debugfs; }; static inline struct tegra_pcie *msi_to_pcie(struct tegra_msi *msi) { return container_of(msi, struct tegra_pcie, msi); } struct tegra_pcie_port { struct tegra_pcie *pcie; struct device_node *np; struct list_head list; struct resource regs; void __iomem *base; unsigned int index; unsigned int lanes; struct phy **phys; struct gpio_desc *reset_gpio; }; static inline void afi_writel(struct tegra_pcie *pcie, u32 value, unsigned long offset) { writel(value, pcie->afi + offset); } static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset) { return readl(pcie->afi + offset); } static inline void pads_writel(struct tegra_pcie *pcie, u32 value, unsigned long offset) { writel(value, pcie->pads + offset); } static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset) { return readl(pcie->pads + offset); } /* * The configuration space mapping on Tegra is somewhat similar to the ECAM * defined by PCIe. However it deviates a bit in how the 4 bits for extended * register accesses are mapped: * * [27:24] extended register number * [23:16] bus number * [15:11] device number * [10: 8] function number * [ 7: 0] register number * * Mapping the whole extended configuration space would require 256 MiB of * virtual address space, only a small part of which will actually be used. * * To work around this, a 4 KiB region is used to generate the required * configuration transaction with relevant B:D:F and register offset values. * This is achieved by dynamically programming base address and size of * AFI_AXI_BAR used for end point config space mapping to make sure that the * address (access to which generates correct config transaction) falls in * this 4 KiB region. */ static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn, unsigned int where) { return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) | (PCI_FUNC(devfn) << 8) | (where & 0xff); } static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct tegra_pcie *pcie = bus->sysdata; void __iomem *addr = NULL; if (bus->number == 0) { unsigned int slot = PCI_SLOT(devfn); struct tegra_pcie_port *port; list_for_each_entry(port, &pcie->ports, list) { if (port->index + 1 == slot) { addr = port->base + (where & ~3); break; } } } else { unsigned int offset; u32 base; offset = tegra_pcie_conf_offset(bus->number, devfn, where); /* move 4 KiB window to offset within the FPCI region */ base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8); afi_writel(pcie, base, AFI_FPCI_BAR0); /* move to correct offset within the 4 KiB page */ addr = pcie->cfg + (offset & (SZ_4K - 1)); } return addr; } static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { if (bus->number == 0) return pci_generic_config_read32(bus, devfn, where, size, value); return pci_generic_config_read(bus, devfn, where, size, value); } static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { if (bus->number == 0) return pci_generic_config_write32(bus, devfn, where, size, value); return pci_generic_config_write(bus, devfn, where, size, value); } static struct pci_ops tegra_pcie_ops = { .map_bus = tegra_pcie_map_bus, .read = tegra_pcie_config_read, .write = tegra_pcie_config_write, }; static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port) { const struct tegra_pcie_soc *soc = port->pcie->soc; unsigned long ret = 0; switch (port->index) { case 0: ret = AFI_PEX0_CTRL; break; case 1: ret = AFI_PEX1_CTRL; break; case 2: ret = soc->afi_pex2_ctrl; break; } return ret; } static void tegra_pcie_port_reset(struct tegra_pcie_port *port) { unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); unsigned long value; /* pulse reset signal */ if (port->reset_gpio) { gpiod_set_value(port->reset_gpio, 1); } else { value = afi_readl(port->pcie, ctrl); value &= ~AFI_PEX_CTRL_RST; afi_writel(port->pcie, value, ctrl); } usleep_range(1000, 2000); if (port->reset_gpio) { gpiod_set_value(port->reset_gpio, 0); } else { value = afi_readl(port->pcie, ctrl); value |= AFI_PEX_CTRL_RST; afi_writel(port->pcie, value, ctrl); } } static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port) { const struct tegra_pcie_soc *soc = port->pcie->soc; u32 value; /* Enable AER capability */ value = readl(port->base + RP_VEND_CTL1); value |= RP_VEND_CTL1_ERPT; writel(value, port->base + RP_VEND_CTL1); /* Optimal settings to enhance bandwidth */ value = readl(port->base + RP_VEND_XP); value |= RP_VEND_XP_OPPORTUNISTIC_ACK; value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC; writel(value, port->base + RP_VEND_XP); /* * LTSSM will wait for DLLP to finish before entering L1 or L2, * to avoid truncation of PM messages which results in receiver errors */ value = readl(port->base + RP_VEND_XP_BIST); value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE; writel(value, port->base + RP_VEND_XP_BIST); value = readl(port->base + RP_PRIV_MISC); value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE; value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE; if (soc->update_clamp_threshold) { value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK | RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK); value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD | RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD; } writel(value, port->base + RP_PRIV_MISC); } static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port) { const struct tegra_pcie_soc *soc = port->pcie->soc; u32 value; value = readl(port->base + RP_ECTL_2_R1); value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK; value |= soc->ectl.regs.rp_ectl_2_r1; writel(value, port->base + RP_ECTL_2_R1); value = readl(port->base + RP_ECTL_4_R1); value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK; value |= soc->ectl.regs.rp_ectl_4_r1 << RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT; writel(value, port->base + RP_ECTL_4_R1); value = readl(port->base + RP_ECTL_5_R1); value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK; value |= soc->ectl.regs.rp_ectl_5_r1; writel(value, port->base + RP_ECTL_5_R1); value = readl(port->base + RP_ECTL_6_R1); value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK; value |= soc->ectl.regs.rp_ectl_6_r1; writel(value, port->base + RP_ECTL_6_R1); value = readl(port->base + RP_ECTL_2_R2); value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK; value |= soc->ectl.regs.rp_ectl_2_r2; writel(value, port->base + RP_ECTL_2_R2); value = readl(port->base + RP_ECTL_4_R2); value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK; value |= soc->ectl.regs.rp_ectl_4_r2 << RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT; writel(value, port->base + RP_ECTL_4_R2); value = readl(port->base + RP_ECTL_5_R2); value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK; value |= soc->ectl.regs.rp_ectl_5_r2; writel(value, port->base + RP_ECTL_5_R2); value = readl(port->base + RP_ECTL_6_R2); value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK; value |= soc->ectl.regs.rp_ectl_6_r2; writel(value, port->base + RP_ECTL_6_R2); } static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port) { const struct tegra_pcie_soc *soc = port->pcie->soc; u32 value; /* * Sometimes link speed change from Gen2 to Gen1 fails due to * instability in deskew logic on lane-0. Increase the deskew * retry time to resolve this issue. */ if (soc->program_deskew_time) { value = readl(port->base + RP_VEND_CTL0); value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK; value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH; writel(value, port->base + RP_VEND_CTL0); } if (soc->update_fc_timer) { value = readl(port->base + RP_VEND_XP); value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK; value |= soc->update_fc_threshold; writel(value, port->base + RP_VEND_XP); } /* * PCIe link doesn't come up with few legacy PCIe endpoints if * root port advertises both Gen-1 and Gen-2 speeds in Tegra. * Hence, the strategy followed here is to initially advertise * only Gen-1 and after link is up, retrain link to Gen-2 speed */ value = readl(port->base + RP_LINK_CONTROL_STATUS_2); value &= ~PCI_EXP_LNKSTA_CLS; value |= PCI_EXP_LNKSTA_CLS_2_5GB; writel(value, port->base + RP_LINK_CONTROL_STATUS_2); } static void tegra_pcie_port_enable(struct tegra_pcie_port *port) { unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); const struct tegra_pcie_soc *soc = port->pcie->soc; unsigned long value; /* enable reference clock */ value = afi_readl(port->pcie, ctrl); value |= AFI_PEX_CTRL_REFCLK_EN; if (soc->has_pex_clkreq_en) value |= AFI_PEX_CTRL_CLKREQ_EN; value |= AFI_PEX_CTRL_OVERRIDE_EN; afi_writel(port->pcie, value, ctrl); tegra_pcie_port_reset(port); if (soc->force_pca_enable) { value = readl(port->base + RP_VEND_CTL2); value |= RP_VEND_CTL2_PCA_ENABLE; writel(value, port->base + RP_VEND_CTL2); } tegra_pcie_enable_rp_features(port); if (soc->ectl.enable) tegra_pcie_program_ectl_settings(port); tegra_pcie_apply_sw_fixup(port); } static void tegra_pcie_port_disable(struct tegra_pcie_port *port) { unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); const struct tegra_pcie_soc *soc = port->pcie->soc; unsigned long value; /* assert port reset */ value = afi_readl(port->pcie, ctrl); value &= ~AFI_PEX_CTRL_RST; afi_writel(port->pcie, value, ctrl); /* disable reference clock */ value = afi_readl(port->pcie, ctrl); if (soc->has_pex_clkreq_en) value &= ~AFI_PEX_CTRL_CLKREQ_EN; value &= ~AFI_PEX_CTRL_REFCLK_EN; afi_writel(port->pcie, value, ctrl); /* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */ value = afi_readl(port->pcie, AFI_PCIE_CONFIG); value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index); value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index); afi_writel(port->pcie, value, AFI_PCIE_CONFIG); } static void tegra_pcie_port_free(struct tegra_pcie_port *port) { struct tegra_pcie *pcie = port->pcie; struct device *dev = pcie->dev; devm_iounmap(dev, port->base); devm_release_mem_region(dev, port->regs.start, resource_size(&port->regs)); list_del(&port->list); devm_kfree(dev, port); } /* Tegra PCIE root complex wrongly reports device class */ static void tegra_pcie_fixup_class(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class); /* Tegra20 and Tegra30 PCIE requires relaxed ordering */ static void tegra_pcie_relax_enable(struct pci_dev *dev) { pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable); static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin) { struct tegra_pcie *pcie = pdev->bus->sysdata; int irq; tegra_cpuidle_pcie_irqs_in_use(); irq = of_irq_parse_and_map_pci(pdev, slot, pin); if (!irq) irq = pcie->irq; return irq; } static irqreturn_t tegra_pcie_isr(int irq, void *arg) { static const char * const err_msg[] = { "Unknown", "AXI slave error", "AXI decode error", "Target abort", "Master abort", "Invalid write", "Legacy interrupt", "Response decoding error", "AXI response decoding error", "Transaction timeout", "Slot present pin change", "Slot clock request change", "TMS clock ramp change", "TMS ready for power down", "Peer2Peer error", }; struct tegra_pcie *pcie = arg; struct device *dev = pcie->dev; u32 code, signature; code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK; signature = afi_readl(pcie, AFI_INTR_SIGNATURE); afi_writel(pcie, 0, AFI_INTR_CODE); if (code == AFI_INTR_LEGACY) return IRQ_NONE; if (code >= ARRAY_SIZE(err_msg)) code = 0; /* * do not pollute kernel log with master abort reports since they * happen a lot during enumeration */ if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE) dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature); else dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature); if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_FPCI_DECODE_ERROR) { u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff; u64 address = (u64)fpci << 32 | (signature & 0xfffffffc); if (code == AFI_INTR_MASTER_ABORT) dev_dbg(dev, " FPCI address: %10llx\n", address); else dev_err(dev, " FPCI address: %10llx\n", address); } return IRQ_HANDLED; } /* * FPCI map is as follows: * - 0xfdfc000000: I/O space * - 0xfdfe000000: type 0 configuration space * - 0xfdff000000: type 1 configuration space * - 0xfe00000000: type 0 extended configuration space * - 0xfe10000000: type 1 extended configuration space */ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie) { u32 size; struct resource_entry *entry; struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); /* Bar 0: type 1 extended configuration space */ size = resource_size(&pcie->cs); afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START); afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ); resource_list_for_each_entry(entry, &bridge->windows) { u32 fpci_bar, axi_address; struct resource *res = entry->res; size = resource_size(res); switch (resource_type(res)) { case IORESOURCE_IO: /* Bar 1: downstream IO bar */ fpci_bar = 0xfdfc0000; axi_address = pci_pio_to_address(res->start); afi_writel(pcie, axi_address, AFI_AXI_BAR1_START); afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ); afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1); break; case IORESOURCE_MEM: fpci_bar = (((res->start >> 12) & 0x0fffffff) << 4) | 0x1; axi_address = res->start; if (res->flags & IORESOURCE_PREFETCH) { /* Bar 2: prefetchable memory BAR */ afi_writel(pcie, axi_address, AFI_AXI_BAR2_START); afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ); afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2); } else { /* Bar 3: non prefetchable memory BAR */ afi_writel(pcie, axi_address, AFI_AXI_BAR3_START); afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ); afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3); } break; } } /* NULL out the remaining BARs as they are not used */ afi_writel(pcie, 0, AFI_AXI_BAR4_START); afi_writel(pcie, 0, AFI_AXI_BAR4_SZ); afi_writel(pcie, 0, AFI_FPCI_BAR4); afi_writel(pcie, 0, AFI_AXI_BAR5_START); afi_writel(pcie, 0, AFI_AXI_BAR5_SZ); afi_writel(pcie, 0, AFI_FPCI_BAR5); if (pcie->soc->has_cache_bars) { /* map all upstream transactions as uncached */ afi_writel(pcie, 0, AFI_CACHE_BAR0_ST); afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ); afi_writel(pcie, 0, AFI_CACHE_BAR1_ST); afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ); } /* MSI translations are setup only when needed */ afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST); afi_writel(pcie, 0, AFI_MSI_BAR_SZ); afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST); afi_writel(pcie, 0, AFI_MSI_BAR_SZ); } static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout) { const struct tegra_pcie_soc *soc = pcie->soc; u32 value; timeout = jiffies + msecs_to_jiffies(timeout); while (time_before(jiffies, timeout)) { value = pads_readl(pcie, soc->pads_pll_ctl); if (value & PADS_PLL_CTL_LOCKDET) return 0; } return -ETIMEDOUT; } static int tegra_pcie_phy_enable(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; const struct tegra_pcie_soc *soc = pcie->soc; u32 value; int err; /* initialize internal PHY, enable up to 16 PCIE lanes */ pads_writel(pcie, 0x0, PADS_CTL_SEL); /* override IDDQ to 1 on all 4 lanes */ value = pads_readl(pcie, PADS_CTL); value |= PADS_CTL_IDDQ_1L; pads_writel(pcie, value, PADS_CTL); /* * Set up PHY PLL inputs select PLLE output as refclock, * set TX ref sel to div10 (not div5). */ value = pads_readl(pcie, soc->pads_pll_ctl); value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK); value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel; pads_writel(pcie, value, soc->pads_pll_ctl); /* reset PLL */ value = pads_readl(pcie, soc->pads_pll_ctl); value &= ~PADS_PLL_CTL_RST_B4SM; pads_writel(pcie, value, soc->pads_pll_ctl); usleep_range(20, 100); /* take PLL out of reset */ value = pads_readl(pcie, soc->pads_pll_ctl); value |= PADS_PLL_CTL_RST_B4SM; pads_writel(pcie, value, soc->pads_pll_ctl); /* wait for the PLL to lock */ err = tegra_pcie_pll_wait(pcie, 500); if (err < 0) { dev_err(dev, "PLL failed to lock: %d\n", err); return err; } /* turn off IDDQ override */ value = pads_readl(pcie, PADS_CTL); value &= ~PADS_CTL_IDDQ_1L; pads_writel(pcie, value, PADS_CTL); /* enable TX/RX data */ value = pads_readl(pcie, PADS_CTL); value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L; pads_writel(pcie, value, PADS_CTL); return 0; } static int tegra_pcie_phy_disable(struct tegra_pcie *pcie) { const struct tegra_pcie_soc *soc = pcie->soc; u32 value; /* disable TX/RX data */ value = pads_readl(pcie, PADS_CTL); value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L); pads_writel(pcie, value, PADS_CTL); /* override IDDQ */ value = pads_readl(pcie, PADS_CTL); value |= PADS_CTL_IDDQ_1L; pads_writel(pcie, value, PADS_CTL); /* reset PLL */ value = pads_readl(pcie, soc->pads_pll_ctl); value &= ~PADS_PLL_CTL_RST_B4SM; pads_writel(pcie, value, soc->pads_pll_ctl); usleep_range(20, 100); return 0; } static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port) { struct device *dev = port->pcie->dev; unsigned int i; int err; for (i = 0; i < port->lanes; i++) { err = phy_power_on(port->phys[i]); if (err < 0) { dev_err(dev, "failed to power on PHY#%u: %d\n", i, err); return err; } } return 0; } static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port) { struct device *dev = port->pcie->dev; unsigned int i; int err; for (i = 0; i < port->lanes; i++) { err = phy_power_off(port->phys[i]); if (err < 0) { dev_err(dev, "failed to power off PHY#%u: %d\n", i, err); return err; } } return 0; } static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; struct tegra_pcie_port *port; int err; if (pcie->legacy_phy) { if (pcie->phy) err = phy_power_on(pcie->phy); else err = tegra_pcie_phy_enable(pcie); if (err < 0) dev_err(dev, "failed to power on PHY: %d\n", err); return err; } list_for_each_entry(port, &pcie->ports, list) { err = tegra_pcie_port_phy_power_on(port); if (err < 0) { dev_err(dev, "failed to power on PCIe port %u PHY: %d\n", port->index, err); return err; } } return 0; } static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; struct tegra_pcie_port *port; int err; if (pcie->legacy_phy) { if (pcie->phy) err = phy_power_off(pcie->phy); else err = tegra_pcie_phy_disable(pcie); if (err < 0) dev_err(dev, "failed to power off PHY: %d\n", err); return err; } list_for_each_entry(port, &pcie->ports, list) { err = tegra_pcie_port_phy_power_off(port); if (err < 0) { dev_err(dev, "failed to power off PCIe port %u PHY: %d\n", port->index, err); return err; } } return 0; } static void tegra_pcie_enable_controller(struct tegra_pcie *pcie) { const struct tegra_pcie_soc *soc = pcie->soc; struct tegra_pcie_port *port; unsigned long value; /* enable PLL power down */ if (pcie->phy) { value = afi_readl(pcie, AFI_PLLE_CONTROL); value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL; value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN; afi_writel(pcie, value, AFI_PLLE_CONTROL); } /* power down PCIe slot clock bias pad */ if (soc->has_pex_bias_ctrl) afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0); /* configure mode and disable all ports */ value = afi_readl(pcie, AFI_PCIE_CONFIG); value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK; value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config; value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL; list_for_each_entry(port, &pcie->ports, list) { value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index); value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index); } afi_writel(pcie, value, AFI_PCIE_CONFIG); if (soc->has_gen2) { value = afi_readl(pcie, AFI_FUSE); value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS; afi_writel(pcie, value, AFI_FUSE); } else { value = afi_readl(pcie, AFI_FUSE); value |= AFI_FUSE_PCIE_T0_GEN2_DIS; afi_writel(pcie, value, AFI_FUSE); } /* Disable AFI dynamic clock gating and enable PCIe */ value = afi_readl(pcie, AFI_CONFIGURATION); value |= AFI_CONFIGURATION_EN_FPCI; value |= AFI_CONFIGURATION_CLKEN_OVERRIDE; afi_writel(pcie, value, AFI_CONFIGURATION); value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR | AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR | AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR; if (soc->has_intr_prsnt_sense) value |= AFI_INTR_EN_PRSNT_SENSE; afi_writel(pcie, value, AFI_AFI_INTR_ENABLE); afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE); /* don't enable MSI for now, only when needed */ afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK); /* disable all exceptions */ afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS); } static void tegra_pcie_power_off(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; const struct tegra_pcie_soc *soc = pcie->soc; int err; reset_control_assert(pcie->afi_rst); clk_disable_unprepare(pcie->pll_e); if (soc->has_cml_clk) clk_disable_unprepare(pcie->cml_clk); clk_disable_unprepare(pcie->afi_clk); if (!dev->pm_domain) tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies); if (err < 0) dev_warn(dev, "failed to disable regulators: %d\n", err); } static int tegra_pcie_power_on(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; const struct tegra_pcie_soc *soc = pcie->soc; int err; reset_control_assert(pcie->pcie_xrst); reset_control_assert(pcie->afi_rst); reset_control_assert(pcie->pex_rst); if (!dev->pm_domain) tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); /* enable regulators */ err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies); if (err < 0) dev_err(dev, "failed to enable regulators: %d\n", err); if (!dev->pm_domain) { err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE); if (err) { dev_err(dev, "failed to power ungate: %d\n", err); goto regulator_disable; } err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE); if (err) { dev_err(dev, "failed to remove clamp: %d\n", err); goto powergate; } } err = clk_prepare_enable(pcie->afi_clk); if (err < 0) { dev_err(dev, "failed to enable AFI clock: %d\n", err); goto powergate; } if (soc->has_cml_clk) { err = clk_prepare_enable(pcie->cml_clk); if (err < 0) { dev_err(dev, "failed to enable CML clock: %d\n", err); goto disable_afi_clk; } } err = clk_prepare_enable(pcie->pll_e); if (err < 0) { dev_err(dev, "failed to enable PLLE clock: %d\n", err); goto disable_cml_clk; } reset_control_deassert(pcie->afi_rst); return 0; disable_cml_clk: if (soc->has_cml_clk) clk_disable_unprepare(pcie->cml_clk); disable_afi_clk: clk_disable_unprepare(pcie->afi_clk); powergate: if (!dev->pm_domain) tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); regulator_disable: regulator_bulk_disable(pcie->num_supplies, pcie->supplies); return err; } static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie) { const struct tegra_pcie_soc *soc = pcie->soc; /* Configure the reference clock driver */ pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0); if (soc->num_ports > 2) pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1); } static int tegra_pcie_clocks_get(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; const struct tegra_pcie_soc *soc = pcie->soc; pcie->pex_clk = devm_clk_get(dev, "pex"); if (IS_ERR(pcie->pex_clk)) return PTR_ERR(pcie->pex_clk); pcie->afi_clk = devm_clk_get(dev, "afi"); if (IS_ERR(pcie->afi_clk)) return PTR_ERR(pcie->afi_clk); pcie->pll_e = devm_clk_get(dev, "pll_e"); if (IS_ERR(pcie->pll_e)) return PTR_ERR(pcie->pll_e); if (soc->has_cml_clk) { pcie->cml_clk = devm_clk_get(dev, "cml"); if (IS_ERR(pcie->cml_clk)) return PTR_ERR(pcie->cml_clk); } return 0; } static int tegra_pcie_resets_get(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex"); if (IS_ERR(pcie->pex_rst)) return PTR_ERR(pcie->pex_rst); pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi"); if (IS_ERR(pcie->afi_rst)) return PTR_ERR(pcie->afi_rst); pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x"); if (IS_ERR(pcie->pcie_xrst)) return PTR_ERR(pcie->pcie_xrst); return 0; } static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; int err; pcie->phy = devm_phy_optional_get(dev, "pcie"); if (IS_ERR(pcie->phy)) { err = PTR_ERR(pcie->phy); dev_err(dev, "failed to get PHY: %d\n", err); return err; } err = phy_init(pcie->phy); if (err < 0) { dev_err(dev, "failed to initialize PHY: %d\n", err); return err; } pcie->legacy_phy = true; return 0; } static struct phy *devm_of_phy_optional_get_index(struct device *dev, struct device_node *np, const char *consumer, unsigned int index) { struct phy *phy; char *name; name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index); if (!name) return ERR_PTR(-ENOMEM); phy = devm_of_phy_optional_get(dev, np, name); kfree(name); return phy; } static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port) { struct device *dev = port->pcie->dev; struct phy *phy; unsigned int i; int err; port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL); if (!port->phys) return -ENOMEM; for (i = 0; i < port->lanes; i++) { phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i); if (IS_ERR(phy)) { dev_err(dev, "failed to get PHY#%u: %ld\n", i, PTR_ERR(phy)); return PTR_ERR(phy); } err = phy_init(phy); if (err < 0) { dev_err(dev, "failed to initialize PHY#%u: %d\n", i, err); return err; } port->phys[i] = phy; } return 0; } static int tegra_pcie_phys_get(struct tegra_pcie *pcie) { const struct tegra_pcie_soc *soc = pcie->soc; struct device_node *np = pcie->dev->of_node; struct tegra_pcie_port *port; int err; if (!soc->has_gen2 || of_property_present(np, "phys")) return tegra_pcie_phys_get_legacy(pcie); list_for_each_entry(port, &pcie->ports, list) { err = tegra_pcie_port_get_phys(port); if (err < 0) return err; } return 0; } static void tegra_pcie_phys_put(struct tegra_pcie *pcie) { struct tegra_pcie_port *port; struct device *dev = pcie->dev; int err, i; if (pcie->legacy_phy) { err = phy_exit(pcie->phy); if (err < 0) dev_err(dev, "failed to teardown PHY: %d\n", err); return; } list_for_each_entry(port, &pcie->ports, list) { for (i = 0; i < port->lanes; i++) { err = phy_exit(port->phys[i]); if (err < 0) dev_err(dev, "failed to teardown PHY#%u: %d\n", i, err); } } } static int tegra_pcie_get_resources(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); struct resource *res; const struct tegra_pcie_soc *soc = pcie->soc; int err; err = tegra_pcie_clocks_get(pcie); if (err) { dev_err(dev, "failed to get clocks: %d\n", err); return err; } err = tegra_pcie_resets_get(pcie); if (err) { dev_err(dev, "failed to get resets: %d\n", err); return err; } if (soc->program_uphy) { err = tegra_pcie_phys_get(pcie); if (err < 0) { dev_err(dev, "failed to get PHYs: %d\n", err); return err; } } pcie->pads = devm_platform_ioremap_resource_byname(pdev, "pads"); if (IS_ERR(pcie->pads)) { err = PTR_ERR(pcie->pads); goto phys_put; } pcie->afi = devm_platform_ioremap_resource_byname(pdev, "afi"); if (IS_ERR(pcie->afi)) { err = PTR_ERR(pcie->afi); goto phys_put; } /* request configuration space, but remap later, on demand */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs"); if (!res) { err = -EADDRNOTAVAIL; goto phys_put; } pcie->cs = *res; /* constrain configuration space to 4 KiB */ pcie->cs.end = pcie->cs.start + SZ_4K - 1; pcie->cfg = devm_ioremap_resource(dev, &pcie->cs); if (IS_ERR(pcie->cfg)) { err = PTR_ERR(pcie->cfg); goto phys_put; } /* request interrupt */ err = platform_get_irq_byname(pdev, "intr"); if (err < 0) goto phys_put; pcie->irq = err; err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie); if (err) { dev_err(dev, "failed to register IRQ: %d\n", err); goto phys_put; } return 0; phys_put: if (soc->program_uphy) tegra_pcie_phys_put(pcie); return err; } static int tegra_pcie_put_resources(struct tegra_pcie *pcie) { const struct tegra_pcie_soc *soc = pcie->soc; if (pcie->irq > 0) free_irq(pcie->irq, pcie); if (soc->program_uphy) tegra_pcie_phys_put(pcie); return 0; } static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port) { struct tegra_pcie *pcie = port->pcie; const struct tegra_pcie_soc *soc = pcie->soc; int err; u32 val; u8 ack_bit; val = afi_readl(pcie, AFI_PCIE_PME); val |= (0x1 << soc->ports[port->index].pme.turnoff_bit); afi_writel(pcie, val, AFI_PCIE_PME); ack_bit = soc->ports[port->index].pme.ack_bit; err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val, val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT); if (err) dev_err(pcie->dev, "PME Ack is not received on port: %d\n", port->index); usleep_range(10000, 11000); val = afi_readl(pcie, AFI_PCIE_PME); val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit); afi_writel(pcie, val, AFI_PCIE_PME); } static void tegra_pcie_msi_irq(struct irq_desc *desc) { struct tegra_pcie *pcie = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); struct tegra_msi *msi = &pcie->msi; struct device *dev = pcie->dev; unsigned int i; chained_irq_enter(chip, desc); for (i = 0; i < 8; i++) { unsigned long reg = afi_readl(pcie, AFI_MSI_VEC(i)); while (reg) { unsigned int offset = find_first_bit(&reg, 32); unsigned int index = i * 32 + offset; int ret; ret = generic_handle_domain_irq(msi->domain->parent, index); if (ret) { /* * that's weird who triggered this? * just clear it */ dev_info(dev, "unexpected MSI\n"); afi_writel(pcie, BIT(index % 32), AFI_MSI_VEC(index)); } /* see if there's any more pending in this vector */ reg = afi_readl(pcie, AFI_MSI_VEC(i)); } } chained_irq_exit(chip, desc); } static void tegra_msi_top_irq_ack(struct irq_data *d) { irq_chip_ack_parent(d); } static void tegra_msi_top_irq_mask(struct irq_data *d) { pci_msi_mask_irq(d); irq_chip_mask_parent(d); } static void tegra_msi_top_irq_unmask(struct irq_data *d) { pci_msi_unmask_irq(d); irq_chip_unmask_parent(d); } static struct irq_chip tegra_msi_top_chip = { .name = "Tegra PCIe MSI", .irq_ack = tegra_msi_top_irq_ack, .irq_mask = tegra_msi_top_irq_mask, .irq_unmask = tegra_msi_top_irq_unmask, }; static void tegra_msi_irq_ack(struct irq_data *d) { struct tegra_msi *msi = irq_data_get_irq_chip_data(d); struct tegra_pcie *pcie = msi_to_pcie(msi); unsigned int index = d->hwirq / 32; /* clear the interrupt */ afi_writel(pcie, BIT(d->hwirq % 32), AFI_MSI_VEC(index)); } static void tegra_msi_irq_mask(struct irq_data *d) { struct tegra_msi *msi = irq_data_get_irq_chip_data(d); struct tegra_pcie *pcie = msi_to_pcie(msi); unsigned int index = d->hwirq / 32; unsigned long flags; u32 value; spin_lock_irqsave(&msi->mask_lock, flags); value = afi_readl(pcie, AFI_MSI_EN_VEC(index)); value &= ~BIT(d->hwirq % 32); afi_writel(pcie, value, AFI_MSI_EN_VEC(index)); spin_unlock_irqrestore(&msi->mask_lock, flags); } static void tegra_msi_irq_unmask(struct irq_data *d) { struct tegra_msi *msi = irq_data_get_irq_chip_data(d); struct tegra_pcie *pcie = msi_to_pcie(msi); unsigned int index = d->hwirq / 32; unsigned long flags; u32 value; spin_lock_irqsave(&msi->mask_lock, flags); value = afi_readl(pcie, AFI_MSI_EN_VEC(index)); value |= BIT(d->hwirq % 32); afi_writel(pcie, value, AFI_MSI_EN_VEC(index)); spin_unlock_irqrestore(&msi->mask_lock, flags); } static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) { return -EINVAL; } static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct tegra_msi *msi = irq_data_get_irq_chip_data(data); msg->address_lo = lower_32_bits(msi->phys); msg->address_hi = upper_32_bits(msi->phys); msg->data = data->hwirq; } static struct irq_chip tegra_msi_bottom_chip = { .name = "Tegra MSI", .irq_ack = tegra_msi_irq_ack, .irq_mask = tegra_msi_irq_mask, .irq_unmask = tegra_msi_irq_unmask, .irq_set_affinity = tegra_msi_set_affinity, .irq_compose_msi_msg = tegra_compose_msi_msg, }; static int tegra_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct tegra_msi *msi = domain->host_data; unsigned int i; int hwirq; mutex_lock(&msi->map_lock); hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs)); mutex_unlock(&msi->map_lock); if (hwirq < 0) return -ENOSPC; for (i = 0; i < nr_irqs; i++) irq_domain_set_info(domain, virq + i, hwirq + i, &tegra_msi_bottom_chip, domain->host_data, handle_edge_irq, NULL, NULL); tegra_cpuidle_pcie_irqs_in_use(); return 0; } static void tegra_msi_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct tegra_msi *msi = domain->host_data; mutex_lock(&msi->map_lock); bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs)); mutex_unlock(&msi->map_lock); } static const struct irq_domain_ops tegra_msi_domain_ops = { .alloc = tegra_msi_domain_alloc, .free = tegra_msi_domain_free, }; static struct msi_domain_info tegra_msi_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_PCI_MSIX), .chip = &tegra_msi_top_chip, }; static int tegra_allocate_domains(struct tegra_msi *msi) { struct tegra_pcie *pcie = msi_to_pcie(msi); struct fwnode_handle *fwnode = dev_fwnode(pcie->dev); struct irq_domain *parent; parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR, &tegra_msi_domain_ops, msi); if (!parent) { dev_err(pcie->dev, "failed to create IRQ domain\n"); return -ENOMEM; } irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent); if (!msi->domain) { dev_err(pcie->dev, "failed to create MSI domain\n"); irq_domain_remove(parent); return -ENOMEM; } return 0; } static void tegra_free_domains(struct tegra_msi *msi) { struct irq_domain *parent = msi->domain->parent; irq_domain_remove(msi->domain); irq_domain_remove(parent); } static int tegra_pcie_msi_setup(struct tegra_pcie *pcie) { struct platform_device *pdev = to_platform_device(pcie->dev); struct tegra_msi *msi = &pcie->msi; struct device *dev = pcie->dev; int err; mutex_init(&msi->map_lock); spin_lock_init(&msi->mask_lock); if (IS_ENABLED(CONFIG_PCI_MSI)) { err = tegra_allocate_domains(msi); if (err) return err; } err = platform_get_irq_byname(pdev, "msi"); if (err < 0) goto free_irq_domain; msi->irq = err; irq_set_chained_handler_and_data(msi->irq, tegra_pcie_msi_irq, pcie); /* Though the PCIe controller can address >32-bit address space, to * facilitate endpoints that support only 32-bit MSI target address, * the mask is set to 32-bit to make sure that MSI target address is * always a 32-bit address */ err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); if (err < 0) { dev_err(dev, "failed to set DMA coherent mask: %d\n", err); goto free_irq; } msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING); if (!msi->virt) { dev_err(dev, "failed to allocate DMA memory for MSI\n"); err = -ENOMEM; goto free_irq; } return 0; free_irq: irq_set_chained_handler_and_data(msi->irq, NULL, NULL); free_irq_domain: if (IS_ENABLED(CONFIG_PCI_MSI)) tegra_free_domains(msi); return err; } static void tegra_pcie_enable_msi(struct tegra_pcie *pcie) { const struct tegra_pcie_soc *soc = pcie->soc; struct tegra_msi *msi = &pcie->msi; u32 reg, msi_state[INT_PCI_MSI_NR / 32]; int i; afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); /* this register is in 4K increments */ afi_writel(pcie, 1, AFI_MSI_BAR_SZ); /* Restore the MSI allocation state */ bitmap_to_arr32(msi_state, msi->used, INT_PCI_MSI_NR); for (i = 0; i < ARRAY_SIZE(msi_state); i++) afi_writel(pcie, msi_state[i], AFI_MSI_EN_VEC(i)); /* and unmask the MSI interrupt */ reg = afi_readl(pcie, AFI_INTR_MASK); reg |= AFI_INTR_MASK_MSI_MASK; afi_writel(pcie, reg, AFI_INTR_MASK); } static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie) { struct tegra_msi *msi = &pcie->msi; unsigned int i, irq; dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys, DMA_ATTR_NO_KERNEL_MAPPING); for (i = 0; i < INT_PCI_MSI_NR; i++) { irq = irq_find_mapping(msi->domain, i); if (irq > 0) irq_domain_free_irqs(irq, 1); } irq_set_chained_handler_and_data(msi->irq, NULL, NULL); if (IS_ENABLED(CONFIG_PCI_MSI)) tegra_free_domains(msi); } static int tegra_pcie_disable_msi(struct tegra_pcie *pcie) { u32 value; /* mask the MSI interrupt */ value = afi_readl(pcie, AFI_INTR_MASK); value &= ~AFI_INTR_MASK_MSI_MASK; afi_writel(pcie, value, AFI_INTR_MASK); return 0; } static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie) { u32 value; value = afi_readl(pcie, AFI_INTR_MASK); value &= ~AFI_INTR_MASK_INT_MASK; afi_writel(pcie, value, AFI_INTR_MASK); } static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes, u32 *xbar) { struct device *dev = pcie->dev; struct device_node *np = dev->of_node; if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) { switch (lanes) { case 0x010004: dev_info(dev, "4x1, 1x1 configuration\n"); *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401; return 0; case 0x010102: dev_info(dev, "2x1, 1X1, 1x1 configuration\n"); *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211; return 0; case 0x010101: dev_info(dev, "1x1, 1x1, 1x1 configuration\n"); *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111; return 0; default: dev_info(dev, "wrong configuration updated in DT, " "switching to default 2x1, 1x1, 1x1 " "configuration\n"); *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211; return 0; } } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") || of_device_is_compatible(np, "nvidia,tegra210-pcie")) { switch (lanes) { case 0x0000104: dev_info(dev, "4x1, 1x1 configuration\n"); *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1; return 0; case 0x0000102: dev_info(dev, "2x1, 1x1 configuration\n"); *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1; return 0; } } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { switch (lanes) { case 0x00000204: dev_info(dev, "4x1, 2x1 configuration\n"); *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420; return 0; case 0x00020202: dev_info(dev, "2x3 configuration\n"); *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222; return 0; case 0x00010104: dev_info(dev, "4x1, 1x2 configuration\n"); *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411; return 0; } } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) { switch (lanes) { case 0x00000004: dev_info(dev, "single-mode configuration\n"); *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE; return 0; case 0x00000202: dev_info(dev, "dual-mode configuration\n"); *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL; return 0; } } return -EINVAL; } /* * Check whether a given set of supplies is available in a device tree node. * This is used to check whether the new or the legacy device tree bindings * should be used. */ static bool of_regulator_bulk_available(struct device_node *np, struct regulator_bulk_data *supplies, unsigned int num_supplies) { char property[32]; unsigned int i; for (i = 0; i < num_supplies; i++) { snprintf(property, 32, "%s-supply", supplies[i].supply); if (!of_property_present(np, property)) return false; } return true; } /* * Old versions of the device tree binding for this device used a set of power * supplies that didn't match the hardware inputs. This happened to work for a * number of cases but is not future proof. However to preserve backwards- * compatibility with old device trees, this function will try to use the old * set of supplies. */ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; struct device_node *np = dev->of_node; if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) pcie->num_supplies = 3; else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) pcie->num_supplies = 2; if (pcie->num_supplies == 0) { dev_err(dev, "device %pOF not supported in legacy mode\n", np); return -ENODEV; } pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, sizeof(*pcie->supplies), GFP_KERNEL); if (!pcie->supplies) return -ENOMEM; pcie->supplies[0].supply = "pex-clk"; pcie->supplies[1].supply = "vdd"; if (pcie->num_supplies > 2) pcie->supplies[2].supply = "avdd"; return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies); } /* * Obtains the list of regulators required for a particular generation of the * IP block. * * This would've been nice to do simply by providing static tables for use * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB) * and either seems to be optional depending on which ports are being used. */ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) { struct device *dev = pcie->dev; struct device_node *np = dev->of_node; unsigned int i = 0; if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) { pcie->num_supplies = 4; pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, sizeof(*pcie->supplies), GFP_KERNEL); if (!pcie->supplies) return -ENOMEM; pcie->supplies[i++].supply = "dvdd-pex"; pcie->supplies[i++].supply = "hvdd-pex-pll"; pcie->supplies[i++].supply = "hvdd-pex"; pcie->supplies[i++].supply = "vddio-pexctl-aud"; } else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) { pcie->num_supplies = 3; pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, sizeof(*pcie->supplies), GFP_KERNEL); if (!pcie->supplies) return -ENOMEM; pcie->supplies[i++].supply = "hvddio-pex"; pcie->supplies[i++].supply = "dvddio-pex"; pcie->supplies[i++].supply = "vddio-pex-ctl"; } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) { pcie->num_supplies = 4; pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, sizeof(*pcie->supplies), GFP_KERNEL); if (!pcie->supplies) return -ENOMEM; pcie->supplies[i++].supply = "avddio-pex"; pcie->supplies[i++].supply = "dvddio-pex"; pcie->supplies[i++].supply = "hvdd-pex"; pcie->supplies[i++].supply = "vddio-pex-ctl"; } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { bool need_pexa = false, need_pexb = false; /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */ if (lane_mask & 0x0f) need_pexa = true; /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */ if (lane_mask & 0x30) need_pexb = true; pcie->num_supplies = 4 + (need_pexa ? 2 : 0) + (need_pexb ? 2 : 0); pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, sizeof(*pcie->supplies), GFP_KERNEL); if (!pcie->supplies) return -ENOMEM; pcie->supplies[i++].supply = "avdd-pex-pll"; pcie->supplies[i++].supply = "hvdd-pex"; pcie->supplies[i++].supply = "vddio-pex-ctl"; pcie->supplies[i++].supply = "avdd-plle"; if (need_pexa) { pcie->supplies[i++].supply = "avdd-pexa"; pcie->supplies[i++].supply = "vdd-pexa"; } if (need_pexb) { pcie->supplies[i++].supply = "avdd-pexb"; pcie->supplies[i++].supply = "vdd-pexb"; } } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) { pcie->num_supplies = 5; pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, sizeof(*pcie->supplies), GFP_KERNEL); if (!pcie->supplies) return -ENOMEM; pcie->supplies[0].supply = "avdd-pex"; pcie->supplies[1].supply = "vdd-pex"; pcie->supplies[2].supply = "avdd-pex-pll"; pcie->supplies[3].supply = "avdd-plle"; pcie->supplies[4].supply = "vddio-pex-clk"; } if (of_regulator_bulk_available(dev->of_node, pcie->supplies, pcie->num_supplies)) return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies); /* * If not all regulators are available for this new scheme, assume * that the device tree complies with an older version of the device * tree binding. */ dev_info(dev, "using legacy DT binding for power supplies\n"); devm_kfree(dev, pcie->supplies); pcie->num_supplies = 0; return tegra_pcie_get_legacy_regulators(pcie); } static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; struct device_node *np = dev->of_node, *port; const struct tegra_pcie_soc *soc = pcie->soc; u32 lanes = 0, mask = 0; unsigned int lane = 0; int err; /* parse root ports */ for_each_child_of_node(np, port) { struct tegra_pcie_port *rp; unsigned int index; u32 value; char *label; err = of_pci_get_devfn(port); if (err < 0) { dev_err(dev, "failed to parse address: %d\n", err); goto err_node_put; } index = PCI_SLOT(err); if (index < 1 || index > soc->num_ports) { dev_err(dev, "invalid port number: %d\n", index); err = -EINVAL; goto err_node_put; } index--; err = of_property_read_u32(port, "nvidia,num-lanes", &value); if (err < 0) { dev_err(dev, "failed to parse # of lanes: %d\n", err); goto err_node_put; } if (value > 16) { dev_err(dev, "invalid # of lanes: %u\n", value); err = -EINVAL; goto err_node_put; } lanes |= value << (index << 3); if (!of_device_is_available(port)) { lane += value; continue; } mask |= ((1 << value) - 1) << lane; lane += value; rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL); if (!rp) { err = -ENOMEM; goto err_node_put; } err = of_address_to_resource(port, 0, &rp->regs); if (err < 0) { dev_err(dev, "failed to parse address: %d\n", err); goto err_node_put; } INIT_LIST_HEAD(&rp->list); rp->index = index; rp->lanes = value; rp->pcie = pcie; rp->np = port; rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs); if (IS_ERR(rp->base)) { err = PTR_ERR(rp->base); goto err_node_put; } label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index); if (!label) { err = -ENOMEM; goto err_node_put; } /* * Returns -ENOENT if reset-gpios property is not populated * and in this case fall back to using AFI per port register * to toggle PERST# SFIO line. */ rp->reset_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(port), "reset", GPIOD_OUT_LOW, label); if (IS_ERR(rp->reset_gpio)) { if (PTR_ERR(rp->reset_gpio) == -ENOENT) { rp->reset_gpio = NULL; } else { dev_err(dev, "failed to get reset GPIO: %ld\n", PTR_ERR(rp->reset_gpio)); err = PTR_ERR(rp->reset_gpio); goto err_node_put; } } list_add_tail(&rp->list, &pcie->ports); } err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config); if (err < 0) { dev_err(dev, "invalid lane configuration\n"); return err; } err = tegra_pcie_get_regulators(pcie, mask); if (err < 0) return err; return 0; err_node_put: of_node_put(port); return err; } /* * FIXME: If there are no PCIe cards attached, then calling this function * can result in the increase of the bootup time as there are big timeout * loops. */ #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port) { struct device *dev = port->pcie->dev; unsigned int retries = 3; unsigned long value; /* override presence detection */ value = readl(port->base + RP_PRIV_MISC); value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT; value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT; writel(value, port->base + RP_PRIV_MISC); do { unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT; do { value = readl(port->base + RP_VEND_XP); if (value & RP_VEND_XP_DL_UP) break; usleep_range(1000, 2000); } while (--timeout); if (!timeout) { dev_dbg(dev, "link %u down, retrying\n", port->index); goto retry; } timeout = TEGRA_PCIE_LINKUP_TIMEOUT; do { value = readl(port->base + RP_LINK_CONTROL_STATUS); if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE) return true; usleep_range(1000, 2000); } while (--timeout); retry: tegra_pcie_port_reset(port); } while (--retries); return false; } static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; struct tegra_pcie_port *port; ktime_t deadline; u32 value; list_for_each_entry(port, &pcie->ports, list) { /* * "Supported Link Speeds Vector" in "Link Capabilities 2" * is not supported by Tegra. tegra_pcie_change_link_speed() * is called only for Tegra chips which support Gen2. * So there no harm if supported link speed is not verified. */ value = readl(port->base + RP_LINK_CONTROL_STATUS_2); value &= ~PCI_EXP_LNKSTA_CLS; value |= PCI_EXP_LNKSTA_CLS_5_0GB; writel(value, port->base + RP_LINK_CONTROL_STATUS_2); /* * Poll until link comes back from recovery to avoid race * condition. */ deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT); while (ktime_before(ktime_get(), deadline)) { value = readl(port->base + RP_LINK_CONTROL_STATUS); if ((value & PCI_EXP_LNKSTA_LT) == 0) break; usleep_range(2000, 3000); } if (value & PCI_EXP_LNKSTA_LT) dev_warn(dev, "PCIe port %u link is in recovery\n", port->index); /* Retrain the link */ value = readl(port->base + RP_LINK_CONTROL_STATUS); value |= PCI_EXP_LNKCTL_RL; writel(value, port->base + RP_LINK_CONTROL_STATUS); deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT); while (ktime_before(ktime_get(), deadline)) { value = readl(port->base + RP_LINK_CONTROL_STATUS); if ((value & PCI_EXP_LNKSTA_LT) == 0) break; usleep_range(2000, 3000); } if (value & PCI_EXP_LNKSTA_LT) dev_err(dev, "failed to retrain link of port %u\n", port->index); } } static void tegra_pcie_enable_ports(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; struct tegra_pcie_port *port, *tmp; list_for_each_entry_safe(port, tmp, &pcie->ports, list) { dev_info(dev, "probing port %u, using %u lanes\n", port->index, port->lanes); tegra_pcie_port_enable(port); } /* Start LTSSM from Tegra side */ reset_control_deassert(pcie->pcie_xrst); list_for_each_entry_safe(port, tmp, &pcie->ports, list) { if (tegra_pcie_port_check_link(port)) continue; dev_info(dev, "link %u down, ignoring\n", port->index); tegra_pcie_port_disable(port); tegra_pcie_port_free(port); } if (pcie->soc->has_gen2) tegra_pcie_change_link_speed(pcie); } static void tegra_pcie_disable_ports(struct tegra_pcie *pcie) { struct tegra_pcie_port *port, *tmp; reset_control_assert(pcie->pcie_xrst); list_for_each_entry_safe(port, tmp, &pcie->ports, list) tegra_pcie_port_disable(port); } static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = { { .pme.turnoff_bit = 0, .pme.ack_bit = 5 }, { .pme.turnoff_bit = 8, .pme.ack_bit = 10 }, }; static const struct tegra_pcie_soc tegra20_pcie = { .num_ports = 2, .ports = tegra20_pcie_ports, .msi_base_shift = 0, .pads_pll_ctl = PADS_PLL_CTL_TEGRA20, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10, .pads_refclk_cfg0 = 0xfa5cfa5c, .has_pex_clkreq_en = false, .has_pex_bias_ctrl = false, .has_intr_prsnt_sense = false, .has_cml_clk = false, .has_gen2 = false, .force_pca_enable = false, .program_uphy = true, .update_clamp_threshold = false, .program_deskew_time = false, .update_fc_timer = false, .has_cache_bars = true, .ectl.enable = false, }; static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = { { .pme.turnoff_bit = 0, .pme.ack_bit = 5 }, { .pme.turnoff_bit = 8, .pme.ack_bit = 10 }, { .pme.turnoff_bit = 16, .pme.ack_bit = 18 }, }; static const struct tegra_pcie_soc tegra30_pcie = { .num_ports = 3, .ports = tegra30_pcie_ports, .msi_base_shift = 8, .afi_pex2_ctrl = 0x128, .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, .pads_refclk_cfg0 = 0xfa5cfa5c, .pads_refclk_cfg1 = 0xfa5cfa5c, .has_pex_clkreq_en = true, .has_pex_bias_ctrl = true, .has_intr_prsnt_sense = true, .has_cml_clk = true, .has_gen2 = false, .force_pca_enable = false, .program_uphy = true, .update_clamp_threshold = false, .program_deskew_time = false, .update_fc_timer = false, .has_cache_bars = false, .ectl.enable = false, }; static const struct tegra_pcie_soc tegra124_pcie = { .num_ports = 2, .ports = tegra20_pcie_ports, .msi_base_shift = 8, .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, .pads_refclk_cfg0 = 0x44ac44ac, .has_pex_clkreq_en = true, .has_pex_bias_ctrl = true, .has_intr_prsnt_sense = true, .has_cml_clk = true, .has_gen2 = true, .force_pca_enable = false, .program_uphy = true, .update_clamp_threshold = true, .program_deskew_time = false, .update_fc_timer = false, .has_cache_bars = false, .ectl.enable = false, }; static const struct tegra_pcie_soc tegra210_pcie = { .num_ports = 2, .ports = tegra20_pcie_ports, .msi_base_shift = 8, .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, .pads_refclk_cfg0 = 0x90b890b8, /* FC threshold is bit[25:18] */ .update_fc_threshold = 0x01800000, .has_pex_clkreq_en = true, .has_pex_bias_ctrl = true, .has_intr_prsnt_sense = true, .has_cml_clk = true, .has_gen2 = true, .force_pca_enable = true, .program_uphy = true, .update_clamp_threshold = true, .program_deskew_time = true, .update_fc_timer = true, .has_cache_bars = false, .ectl = { .regs = { .rp_ectl_2_r1 = 0x0000000f, .rp_ectl_4_r1 = 0x00000067, .rp_ectl_5_r1 = 0x55010000, .rp_ectl_6_r1 = 0x00000001, .rp_ectl_2_r2 = 0x0000008f, .rp_ectl_4_r2 = 0x000000c7, .rp_ectl_5_r2 = 0x55010000, .rp_ectl_6_r2 = 0x00000001, }, .enable = true, }, }; static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = { { .pme.turnoff_bit = 0, .pme.ack_bit = 5 }, { .pme.turnoff_bit = 8, .pme.ack_bit = 10 }, { .pme.turnoff_bit = 12, .pme.ack_bit = 14 }, }; static const struct tegra_pcie_soc tegra186_pcie = { .num_ports = 3, .ports = tegra186_pcie_ports, .msi_base_shift = 8, .afi_pex2_ctrl = 0x19c, .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, .pads_refclk_cfg0 = 0x80b880b8, .pads_refclk_cfg1 = 0x000480b8, .has_pex_clkreq_en = true, .has_pex_bias_ctrl = true, .has_intr_prsnt_sense = true, .has_cml_clk = false, .has_gen2 = true, .force_pca_enable = false, .program_uphy = false, .update_clamp_threshold = false, .program_deskew_time = false, .update_fc_timer = false, .has_cache_bars = false, .ectl.enable = false, }; static const struct of_device_id tegra_pcie_of_match[] = { { .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie }, { .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie }, { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie }, { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie }, { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie }, { }, }; MODULE_DEVICE_TABLE(of, tegra_pcie_of_match); static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos) { struct tegra_pcie *pcie = s->private; if (list_empty(&pcie->ports)) return NULL; seq_puts(s, "Index Status\n"); return seq_list_start(&pcie->ports, *pos); } static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct tegra_pcie *pcie = s->private; return seq_list_next(v, &pcie->ports, pos); } static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v) { } static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v) { bool up = false, active = false; struct tegra_pcie_port *port; unsigned int value; port = list_entry(v, struct tegra_pcie_port, list); value = readl(port->base + RP_VEND_XP); if (value & RP_VEND_XP_DL_UP) up = true; value = readl(port->base + RP_LINK_CONTROL_STATUS); if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE) active = true; seq_printf(s, "%2u ", port->index); if (up) seq_puts(s, "up"); if (active) { if (up) seq_puts(s, ", "); seq_puts(s, "active"); } seq_puts(s, "\n"); return 0; } static const struct seq_operations tegra_pcie_ports_sops = { .start = tegra_pcie_ports_seq_start, .next = tegra_pcie_ports_seq_next, .stop = tegra_pcie_ports_seq_stop, .show = tegra_pcie_ports_seq_show, }; DEFINE_SEQ_ATTRIBUTE(tegra_pcie_ports); static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie) { debugfs_remove_recursive(pcie->debugfs); pcie->debugfs = NULL; } static void tegra_pcie_debugfs_init(struct tegra_pcie *pcie) { pcie->debugfs = debugfs_create_dir("pcie", NULL); debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, pcie, &tegra_pcie_ports_fops); } static int tegra_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct pci_host_bridge *host; struct tegra_pcie *pcie; int err; host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); if (!host) return -ENOMEM; pcie = pci_host_bridge_priv(host); host->sysdata = pcie; platform_set_drvdata(pdev, pcie); pcie->soc = of_device_get_match_data(dev); INIT_LIST_HEAD(&pcie->ports); pcie->dev = dev; err = tegra_pcie_parse_dt(pcie); if (err < 0) return err; err = tegra_pcie_get_resources(pcie); if (err < 0) { dev_err(dev, "failed to request resources: %d\n", err); return err; } err = tegra_pcie_msi_setup(pcie); if (err < 0) { dev_err(dev, "failed to enable MSI support: %d\n", err); goto put_resources; } pm_runtime_enable(pcie->dev); err = pm_runtime_get_sync(pcie->dev); if (err < 0) { dev_err(dev, "fail to enable pcie controller: %d\n", err); goto pm_runtime_put; } host->ops = &tegra_pcie_ops; host->map_irq = tegra_pcie_map_irq; err = pci_host_probe(host); if (err < 0) { dev_err(dev, "failed to register host: %d\n", err); goto pm_runtime_put; } if (IS_ENABLED(CONFIG_DEBUG_FS)) tegra_pcie_debugfs_init(pcie); return 0; pm_runtime_put: pm_runtime_put_sync(pcie->dev); pm_runtime_disable(pcie->dev); tegra_pcie_msi_teardown(pcie); put_resources: tegra_pcie_put_resources(pcie); return err; } static void tegra_pcie_remove(struct platform_device *pdev) { struct tegra_pcie *pcie = platform_get_drvdata(pdev); struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); struct tegra_pcie_port *port, *tmp; if (IS_ENABLED(CONFIG_DEBUG_FS)) tegra_pcie_debugfs_exit(pcie); pci_stop_root_bus(host->bus); pci_remove_root_bus(host->bus); pm_runtime_put_sync(pcie->dev); pm_runtime_disable(pcie->dev); if (IS_ENABLED(CONFIG_PCI_MSI)) tegra_pcie_msi_teardown(pcie); tegra_pcie_put_resources(pcie); list_for_each_entry_safe(port, tmp, &pcie->ports, list) tegra_pcie_port_free(port); } static int tegra_pcie_pm_suspend(struct device *dev) { struct tegra_pcie *pcie = dev_get_drvdata(dev); struct tegra_pcie_port *port; int err; list_for_each_entry(port, &pcie->ports, list) tegra_pcie_pme_turnoff(port); tegra_pcie_disable_ports(pcie); /* * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to * avoid unwanted interrupts raised by AFI after pex_rst is asserted. */ tegra_pcie_disable_interrupts(pcie); if (pcie->soc->program_uphy) { err = tegra_pcie_phy_power_off(pcie); if (err < 0) dev_err(dev, "failed to power off PHY(s): %d\n", err); } reset_control_assert(pcie->pex_rst); clk_disable_unprepare(pcie->pex_clk); if (IS_ENABLED(CONFIG_PCI_MSI)) tegra_pcie_disable_msi(pcie); pinctrl_pm_select_idle_state(dev); tegra_pcie_power_off(pcie); return 0; } static int tegra_pcie_pm_resume(struct device *dev) { struct tegra_pcie *pcie = dev_get_drvdata(dev); int err; err = tegra_pcie_power_on(pcie); if (err) { dev_err(dev, "tegra pcie power on fail: %d\n", err); return err; } err = pinctrl_pm_select_default_state(dev); if (err < 0) { dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err); goto poweroff; } tegra_pcie_enable_controller(pcie); tegra_pcie_setup_translations(pcie); if (IS_ENABLED(CONFIG_PCI_MSI)) tegra_pcie_enable_msi(pcie); err = clk_prepare_enable(pcie->pex_clk); if (err) { dev_err(dev, "failed to enable PEX clock: %d\n", err); goto pex_dpd_enable; } reset_control_deassert(pcie->pex_rst); if (pcie->soc->program_uphy) { err = tegra_pcie_phy_power_on(pcie); if (err < 0) { dev_err(dev, "failed to power on PHY(s): %d\n", err); goto disable_pex_clk; } } tegra_pcie_apply_pad_settings(pcie); tegra_pcie_enable_ports(pcie); return 0; disable_pex_clk: reset_control_assert(pcie->pex_rst); clk_disable_unprepare(pcie->pex_clk); pex_dpd_enable: pinctrl_pm_select_idle_state(dev); poweroff: tegra_pcie_power_off(pcie); return err; } static const struct dev_pm_ops tegra_pcie_pm_ops = { RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL) NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume) }; static struct platform_driver tegra_pcie_driver = { .driver = { .name = "tegra-pcie", .of_match_table = tegra_pcie_of_match, .suppress_bind_attrs = true, .pm = &tegra_pcie_pm_ops, }, .probe = tegra_pcie_probe, .remove_new = tegra_pcie_remove, }; module_platform_driver(tegra_pcie_driver);
linux-master
drivers/pci/controller/pci-tegra.c
// SPDX-License-Identifier: GPL-2.0 /* * Simple, generic PCI host controller driver targeting firmware-initialised * systems and virtual machines (e.g. the PCI emulation provided by kvmtool). * * Copyright (C) 2014 ARM Limited * * Author: Will Deacon <[email protected]> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/pci-ecam.h> #include <linux/platform_device.h> static const struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = { .bus_shift = 16, .pci_ops = { .map_bus = pci_ecam_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, } }; static bool pci_dw_valid_device(struct pci_bus *bus, unsigned int devfn) { struct pci_config_window *cfg = bus->sysdata; /* * The Synopsys DesignWare PCIe controller in ECAM mode will not filter * type 0 config TLPs sent to devices 1 and up on its downstream port, * resulting in devices appearing multiple times on bus 0 unless we * filter out those accesses here. */ if (bus->number == cfg->busr.start && PCI_SLOT(devfn) > 0) return false; return true; } static void __iomem *pci_dw_ecam_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { if (!pci_dw_valid_device(bus, devfn)) return NULL; return pci_ecam_map_bus(bus, devfn, where); } static const struct pci_ecam_ops pci_dw_ecam_bus_ops = { .pci_ops = { .map_bus = pci_dw_ecam_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, } }; static const struct of_device_id gen_pci_of_match[] = { { .compatible = "pci-host-cam-generic", .data = &gen_pci_cfg_cam_bus_ops }, { .compatible = "pci-host-ecam-generic", .data = &pci_generic_ecam_ops }, { .compatible = "marvell,armada8k-pcie-ecam", .data = &pci_dw_ecam_bus_ops }, { .compatible = "socionext,synquacer-pcie-ecam", .data = &pci_dw_ecam_bus_ops }, { .compatible = "snps,dw-pcie-ecam", .data = &pci_dw_ecam_bus_ops }, { }, }; MODULE_DEVICE_TABLE(of, gen_pci_of_match); static struct platform_driver gen_pci_driver = { .driver = { .name = "pci-host-generic", .of_match_table = gen_pci_of_match, }, .probe = pci_host_common_probe, .remove = pci_host_common_remove, }; module_platform_driver(gen_pci_driver); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/pci-host-generic.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright Altera Corporation (C) 2013-2015. All rights reserved * * Author: Ley Foon Tan <[email protected]> * Description: Altera PCIe host controller driver */ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/init.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_pci.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/slab.h> #include "../pci.h" #define RP_TX_REG0 0x2000 #define RP_TX_REG1 0x2004 #define RP_TX_CNTRL 0x2008 #define RP_TX_EOP 0x2 #define RP_TX_SOP 0x1 #define RP_RXCPL_STATUS 0x2010 #define RP_RXCPL_EOP 0x2 #define RP_RXCPL_SOP 0x1 #define RP_RXCPL_REG0 0x2014 #define RP_RXCPL_REG1 0x2018 #define P2A_INT_STATUS 0x3060 #define P2A_INT_STS_ALL 0xf #define P2A_INT_ENABLE 0x3070 #define P2A_INT_ENA_ALL 0xf #define RP_LTSSM 0x3c64 #define RP_LTSSM_MASK 0x1f #define LTSSM_L0 0xf #define S10_RP_TX_CNTRL 0x2004 #define S10_RP_RXCPL_REG 0x2008 #define S10_RP_RXCPL_STATUS 0x200C #define S10_RP_CFG_ADDR(pcie, reg) \ (((pcie)->hip_base) + (reg) + (1 << 20)) #define S10_RP_SECONDARY(pcie) \ readb(S10_RP_CFG_ADDR(pcie, PCI_SECONDARY_BUS)) /* TLP configuration type 0 and 1 */ #define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */ #define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */ #define TLP_FMTTYPE_CFGRD1 0x05 /* Configuration Read Type 1 */ #define TLP_FMTTYPE_CFGWR1 0x45 /* Configuration Write Type 1 */ #define TLP_PAYLOAD_SIZE 0x01 #define TLP_READ_TAG 0x1d #define TLP_WRITE_TAG 0x10 #define RP_DEVFN 0 #define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) #define TLP_CFG_DW0(pcie, cfg) \ (((cfg) << 24) | \ TLP_PAYLOAD_SIZE) #define TLP_CFG_DW1(pcie, tag, be) \ (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be)) #define TLP_CFG_DW2(bus, devfn, offset) \ (((bus) << 24) | ((devfn) << 16) | (offset)) #define TLP_COMP_STATUS(s) (((s) >> 13) & 7) #define TLP_BYTE_COUNT(s) (((s) >> 0) & 0xfff) #define TLP_HDR_SIZE 3 #define TLP_LOOP 500 #define LINK_UP_TIMEOUT HZ #define LINK_RETRAIN_TIMEOUT HZ #define DWORD_MASK 3 #define S10_TLP_FMTTYPE_CFGRD0 0x05 #define S10_TLP_FMTTYPE_CFGRD1 0x04 #define S10_TLP_FMTTYPE_CFGWR0 0x45 #define S10_TLP_FMTTYPE_CFGWR1 0x44 enum altera_pcie_version { ALTERA_PCIE_V1 = 0, ALTERA_PCIE_V2, }; struct altera_pcie { struct platform_device *pdev; void __iomem *cra_base; void __iomem *hip_base; int irq; u8 root_bus_nr; struct irq_domain *irq_domain; struct resource bus_range; const struct altera_pcie_data *pcie_data; }; struct altera_pcie_ops { int (*tlp_read_pkt)(struct altera_pcie *pcie, u32 *value); void (*tlp_write_pkt)(struct altera_pcie *pcie, u32 *headers, u32 data, bool align); bool (*get_link_status)(struct altera_pcie *pcie); int (*rp_read_cfg)(struct altera_pcie *pcie, int where, int size, u32 *value); int (*rp_write_cfg)(struct altera_pcie *pcie, u8 busno, int where, int size, u32 value); }; struct altera_pcie_data { const struct altera_pcie_ops *ops; enum altera_pcie_version version; u32 cap_offset; /* PCIe capability structure register offset */ u32 cfgrd0; u32 cfgrd1; u32 cfgwr0; u32 cfgwr1; }; struct tlp_rp_regpair_t { u32 ctrl; u32 reg0; u32 reg1; }; static inline void cra_writel(struct altera_pcie *pcie, const u32 value, const u32 reg) { writel_relaxed(value, pcie->cra_base + reg); } static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg) { return readl_relaxed(pcie->cra_base + reg); } static bool altera_pcie_link_up(struct altera_pcie *pcie) { return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0); } static bool s10_altera_pcie_link_up(struct altera_pcie *pcie) { void __iomem *addr = S10_RP_CFG_ADDR(pcie, pcie->pcie_data->cap_offset + PCI_EXP_LNKSTA); return !!(readw(addr) & PCI_EXP_LNKSTA_DLLLA); } /* * Altera PCIe port uses BAR0 of RC's configuration space as the translation * from PCI bus to native BUS. Entire DDR region is mapped into PCIe space * using these registers, so it can be reached by DMA from EP devices. * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt * from EP devices, eventually trigger interrupt to GIC. The BAR0 of bridge * should be hidden during enumeration to avoid the sizing and resource * allocation by PCIe core. */ static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn, int offset) { if (pci_is_root_bus(bus) && (devfn == 0) && (offset == PCI_BASE_ADDRESS_0)) return true; return false; } static void tlp_write_tx(struct altera_pcie *pcie, struct tlp_rp_regpair_t *tlp_rp_regdata) { cra_writel(pcie, tlp_rp_regdata->reg0, RP_TX_REG0); cra_writel(pcie, tlp_rp_regdata->reg1, RP_TX_REG1); cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL); } static void s10_tlp_write_tx(struct altera_pcie *pcie, u32 reg0, u32 ctrl) { cra_writel(pcie, reg0, RP_TX_REG0); cra_writel(pcie, ctrl, S10_RP_TX_CNTRL); } static bool altera_pcie_valid_device(struct altera_pcie *pcie, struct pci_bus *bus, int dev) { /* If there is no link, then there is no device */ if (bus->number != pcie->root_bus_nr) { if (!pcie->pcie_data->ops->get_link_status(pcie)) return false; } /* access only one slot on each root port */ if (bus->number == pcie->root_bus_nr && dev > 0) return false; return true; } static int tlp_read_packet(struct altera_pcie *pcie, u32 *value) { int i; bool sop = false; u32 ctrl; u32 reg0, reg1; u32 comp_status = 1; /* * Minimum 2 loops to read TLP headers and 1 loop to read data * payload. */ for (i = 0; i < TLP_LOOP; i++) { ctrl = cra_readl(pcie, RP_RXCPL_STATUS); if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) { reg0 = cra_readl(pcie, RP_RXCPL_REG0); reg1 = cra_readl(pcie, RP_RXCPL_REG1); if (ctrl & RP_RXCPL_SOP) { sop = true; comp_status = TLP_COMP_STATUS(reg1); } if (ctrl & RP_RXCPL_EOP) { if (comp_status) return PCIBIOS_DEVICE_NOT_FOUND; if (value) *value = reg0; return PCIBIOS_SUCCESSFUL; } } udelay(5); } return PCIBIOS_DEVICE_NOT_FOUND; } static int s10_tlp_read_packet(struct altera_pcie *pcie, u32 *value) { u32 ctrl; u32 comp_status; u32 dw[4]; u32 count; struct device *dev = &pcie->pdev->dev; for (count = 0; count < TLP_LOOP; count++) { ctrl = cra_readl(pcie, S10_RP_RXCPL_STATUS); if (ctrl & RP_RXCPL_SOP) { /* Read first DW */ dw[0] = cra_readl(pcie, S10_RP_RXCPL_REG); break; } udelay(5); } /* SOP detection failed, return error */ if (count == TLP_LOOP) return PCIBIOS_DEVICE_NOT_FOUND; count = 1; /* Poll for EOP */ while (count < ARRAY_SIZE(dw)) { ctrl = cra_readl(pcie, S10_RP_RXCPL_STATUS); dw[count++] = cra_readl(pcie, S10_RP_RXCPL_REG); if (ctrl & RP_RXCPL_EOP) { comp_status = TLP_COMP_STATUS(dw[1]); if (comp_status) return PCIBIOS_DEVICE_NOT_FOUND; if (value && TLP_BYTE_COUNT(dw[1]) == sizeof(u32) && count == 4) *value = dw[3]; return PCIBIOS_SUCCESSFUL; } } dev_warn(dev, "Malformed TLP packet\n"); return PCIBIOS_DEVICE_NOT_FOUND; } static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers, u32 data, bool align) { struct tlp_rp_regpair_t tlp_rp_regdata; tlp_rp_regdata.reg0 = headers[0]; tlp_rp_regdata.reg1 = headers[1]; tlp_rp_regdata.ctrl = RP_TX_SOP; tlp_write_tx(pcie, &tlp_rp_regdata); if (align) { tlp_rp_regdata.reg0 = headers[2]; tlp_rp_regdata.reg1 = 0; tlp_rp_regdata.ctrl = 0; tlp_write_tx(pcie, &tlp_rp_regdata); tlp_rp_regdata.reg0 = data; tlp_rp_regdata.reg1 = 0; } else { tlp_rp_regdata.reg0 = headers[2]; tlp_rp_regdata.reg1 = data; } tlp_rp_regdata.ctrl = RP_TX_EOP; tlp_write_tx(pcie, &tlp_rp_regdata); } static void s10_tlp_write_packet(struct altera_pcie *pcie, u32 *headers, u32 data, bool dummy) { s10_tlp_write_tx(pcie, headers[0], RP_TX_SOP); s10_tlp_write_tx(pcie, headers[1], 0); s10_tlp_write_tx(pcie, headers[2], 0); s10_tlp_write_tx(pcie, data, RP_TX_EOP); } static void get_tlp_header(struct altera_pcie *pcie, u8 bus, u32 devfn, int where, u8 byte_en, bool read, u32 *headers) { u8 cfg; u8 cfg0 = read ? pcie->pcie_data->cfgrd0 : pcie->pcie_data->cfgwr0; u8 cfg1 = read ? pcie->pcie_data->cfgrd1 : pcie->pcie_data->cfgwr1; u8 tag = read ? TLP_READ_TAG : TLP_WRITE_TAG; if (pcie->pcie_data->version == ALTERA_PCIE_V1) cfg = (bus == pcie->root_bus_nr) ? cfg0 : cfg1; else cfg = (bus > S10_RP_SECONDARY(pcie)) ? cfg0 : cfg1; headers[0] = TLP_CFG_DW0(pcie, cfg); headers[1] = TLP_CFG_DW1(pcie, tag, byte_en); headers[2] = TLP_CFG_DW2(bus, devfn, where); } static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn, int where, u8 byte_en, u32 *value) { u32 headers[TLP_HDR_SIZE]; get_tlp_header(pcie, bus, devfn, where, byte_en, true, headers); pcie->pcie_data->ops->tlp_write_pkt(pcie, headers, 0, false); return pcie->pcie_data->ops->tlp_read_pkt(pcie, value); } static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, int where, u8 byte_en, u32 value) { u32 headers[TLP_HDR_SIZE]; int ret; get_tlp_header(pcie, bus, devfn, where, byte_en, false, headers); /* check alignment to Qword */ if ((where & 0x7) == 0) pcie->pcie_data->ops->tlp_write_pkt(pcie, headers, value, true); else pcie->pcie_data->ops->tlp_write_pkt(pcie, headers, value, false); ret = pcie->pcie_data->ops->tlp_read_pkt(pcie, NULL); if (ret != PCIBIOS_SUCCESSFUL) return ret; /* * Monitor changes to PCI_PRIMARY_BUS register on root port * and update local copy of root bus number accordingly. */ if ((bus == pcie->root_bus_nr) && (where == PCI_PRIMARY_BUS)) pcie->root_bus_nr = (u8)(value); return PCIBIOS_SUCCESSFUL; } static int s10_rp_read_cfg(struct altera_pcie *pcie, int where, int size, u32 *value) { void __iomem *addr = S10_RP_CFG_ADDR(pcie, where); switch (size) { case 1: *value = readb(addr); break; case 2: *value = readw(addr); break; default: *value = readl(addr); break; } return PCIBIOS_SUCCESSFUL; } static int s10_rp_write_cfg(struct altera_pcie *pcie, u8 busno, int where, int size, u32 value) { void __iomem *addr = S10_RP_CFG_ADDR(pcie, where); switch (size) { case 1: writeb(value, addr); break; case 2: writew(value, addr); break; default: writel(value, addr); break; } /* * Monitor changes to PCI_PRIMARY_BUS register on root port * and update local copy of root bus number accordingly. */ if (busno == pcie->root_bus_nr && where == PCI_PRIMARY_BUS) pcie->root_bus_nr = value & 0xff; return PCIBIOS_SUCCESSFUL; } static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno, unsigned int devfn, int where, int size, u32 *value) { int ret; u32 data; u8 byte_en; if (busno == pcie->root_bus_nr && pcie->pcie_data->ops->rp_read_cfg) return pcie->pcie_data->ops->rp_read_cfg(pcie, where, size, value); switch (size) { case 1: byte_en = 1 << (where & 3); break; case 2: byte_en = 3 << (where & 3); break; default: byte_en = 0xf; break; } ret = tlp_cfg_dword_read(pcie, busno, devfn, (where & ~DWORD_MASK), byte_en, &data); if (ret != PCIBIOS_SUCCESSFUL) return ret; switch (size) { case 1: *value = (data >> (8 * (where & 0x3))) & 0xff; break; case 2: *value = (data >> (8 * (where & 0x2))) & 0xffff; break; default: *value = data; break; } return PCIBIOS_SUCCESSFUL; } static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno, unsigned int devfn, int where, int size, u32 value) { u32 data32; u32 shift = 8 * (where & 3); u8 byte_en; if (busno == pcie->root_bus_nr && pcie->pcie_data->ops->rp_write_cfg) return pcie->pcie_data->ops->rp_write_cfg(pcie, busno, where, size, value); switch (size) { case 1: data32 = (value & 0xff) << shift; byte_en = 1 << (where & 3); break; case 2: data32 = (value & 0xffff) << shift; byte_en = 3 << (where & 3); break; default: data32 = value; byte_en = 0xf; break; } return tlp_cfg_dword_write(pcie, busno, devfn, (where & ~DWORD_MASK), byte_en, data32); } static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { struct altera_pcie *pcie = bus->sysdata; if (altera_pcie_hide_rc_bar(bus, devfn, where)) return PCIBIOS_BAD_REGISTER_NUMBER; if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) return PCIBIOS_DEVICE_NOT_FOUND; return _altera_pcie_cfg_read(pcie, bus->number, devfn, where, size, value); } static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { struct altera_pcie *pcie = bus->sysdata; if (altera_pcie_hide_rc_bar(bus, devfn, where)) return PCIBIOS_BAD_REGISTER_NUMBER; if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) return PCIBIOS_DEVICE_NOT_FOUND; return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size, value); } static struct pci_ops altera_pcie_ops = { .read = altera_pcie_cfg_read, .write = altera_pcie_cfg_write, }; static int altera_read_cap_word(struct altera_pcie *pcie, u8 busno, unsigned int devfn, int offset, u16 *value) { u32 data; int ret; ret = _altera_pcie_cfg_read(pcie, busno, devfn, pcie->pcie_data->cap_offset + offset, sizeof(*value), &data); *value = data; return ret; } static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno, unsigned int devfn, int offset, u16 value) { return _altera_pcie_cfg_write(pcie, busno, devfn, pcie->pcie_data->cap_offset + offset, sizeof(value), value); } static void altera_wait_link_retrain(struct altera_pcie *pcie) { struct device *dev = &pcie->pdev->dev; u16 reg16; unsigned long start_jiffies; /* Wait for link training end. */ start_jiffies = jiffies; for (;;) { altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKSTA, &reg16); if (!(reg16 & PCI_EXP_LNKSTA_LT)) break; if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) { dev_err(dev, "link retrain timeout\n"); break; } udelay(100); } /* Wait for link is up */ start_jiffies = jiffies; for (;;) { if (pcie->pcie_data->ops->get_link_status(pcie)) break; if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) { dev_err(dev, "link up timeout\n"); break; } udelay(100); } } static void altera_pcie_retrain(struct altera_pcie *pcie) { u16 linkcap, linkstat, linkctl; if (!pcie->pcie_data->ops->get_link_status(pcie)) return; /* * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but * current speed is 2.5 GB/s. */ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKCAP, &linkcap); if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB) return; altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKSTA, &linkstat); if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) { altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKCTL, &linkctl); linkctl |= PCI_EXP_LNKCTL_RL; altera_write_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKCTL, linkctl); altera_wait_link_retrain(pcie); } } static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); irq_set_chip_data(irq, domain->host_data); return 0; } static const struct irq_domain_ops intx_domain_ops = { .map = altera_pcie_intx_map, .xlate = pci_irqd_intx_xlate, }; static void altera_pcie_isr(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct altera_pcie *pcie; struct device *dev; unsigned long status; u32 bit; int ret; chained_irq_enter(chip, desc); pcie = irq_desc_get_handler_data(desc); dev = &pcie->pdev->dev; while ((status = cra_readl(pcie, P2A_INT_STATUS) & P2A_INT_STS_ALL) != 0) { for_each_set_bit(bit, &status, PCI_NUM_INTX) { /* clear interrupts */ cra_writel(pcie, 1 << bit, P2A_INT_STATUS); ret = generic_handle_domain_irq(pcie->irq_domain, bit); if (ret) dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n", bit); } } chained_irq_exit(chip, desc); } static int altera_pcie_init_irq_domain(struct altera_pcie *pcie) { struct device *dev = &pcie->pdev->dev; struct device_node *node = dev->of_node; /* Setup INTx */ pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX, &intx_domain_ops, pcie); if (!pcie->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); return -ENOMEM; } return 0; } static void altera_pcie_irq_teardown(struct altera_pcie *pcie) { irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); irq_domain_remove(pcie->irq_domain); irq_dispose_mapping(pcie->irq); } static int altera_pcie_parse_dt(struct altera_pcie *pcie) { struct platform_device *pdev = pcie->pdev; pcie->cra_base = devm_platform_ioremap_resource_byname(pdev, "Cra"); if (IS_ERR(pcie->cra_base)) return PTR_ERR(pcie->cra_base); if (pcie->pcie_data->version == ALTERA_PCIE_V2) { pcie->hip_base = devm_platform_ioremap_resource_byname(pdev, "Hip"); if (IS_ERR(pcie->hip_base)) return PTR_ERR(pcie->hip_base); } /* setup IRQ */ pcie->irq = platform_get_irq(pdev, 0); if (pcie->irq < 0) return pcie->irq; irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie); return 0; } static void altera_pcie_host_init(struct altera_pcie *pcie) { altera_pcie_retrain(pcie); } static const struct altera_pcie_ops altera_pcie_ops_1_0 = { .tlp_read_pkt = tlp_read_packet, .tlp_write_pkt = tlp_write_packet, .get_link_status = altera_pcie_link_up, }; static const struct altera_pcie_ops altera_pcie_ops_2_0 = { .tlp_read_pkt = s10_tlp_read_packet, .tlp_write_pkt = s10_tlp_write_packet, .get_link_status = s10_altera_pcie_link_up, .rp_read_cfg = s10_rp_read_cfg, .rp_write_cfg = s10_rp_write_cfg, }; static const struct altera_pcie_data altera_pcie_1_0_data = { .ops = &altera_pcie_ops_1_0, .cap_offset = 0x80, .version = ALTERA_PCIE_V1, .cfgrd0 = TLP_FMTTYPE_CFGRD0, .cfgrd1 = TLP_FMTTYPE_CFGRD1, .cfgwr0 = TLP_FMTTYPE_CFGWR0, .cfgwr1 = TLP_FMTTYPE_CFGWR1, }; static const struct altera_pcie_data altera_pcie_2_0_data = { .ops = &altera_pcie_ops_2_0, .version = ALTERA_PCIE_V2, .cap_offset = 0x70, .cfgrd0 = S10_TLP_FMTTYPE_CFGRD0, .cfgrd1 = S10_TLP_FMTTYPE_CFGRD1, .cfgwr0 = S10_TLP_FMTTYPE_CFGWR0, .cfgwr1 = S10_TLP_FMTTYPE_CFGWR1, }; static const struct of_device_id altera_pcie_of_match[] = { {.compatible = "altr,pcie-root-port-1.0", .data = &altera_pcie_1_0_data }, {.compatible = "altr,pcie-root-port-2.0", .data = &altera_pcie_2_0_data }, {}, }; static int altera_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct altera_pcie *pcie; struct pci_host_bridge *bridge; int ret; const struct altera_pcie_data *data; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); if (!bridge) return -ENOMEM; pcie = pci_host_bridge_priv(bridge); pcie->pdev = pdev; platform_set_drvdata(pdev, pcie); data = of_device_get_match_data(&pdev->dev); if (!data) return -ENODEV; pcie->pcie_data = data; ret = altera_pcie_parse_dt(pcie); if (ret) { dev_err(dev, "Parsing DT failed\n"); return ret; } ret = altera_pcie_init_irq_domain(pcie); if (ret) { dev_err(dev, "Failed creating IRQ Domain\n"); return ret; } /* clear all interrupts */ cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS); /* enable all interrupts */ cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); altera_pcie_host_init(pcie); bridge->sysdata = pcie; bridge->busnr = pcie->root_bus_nr; bridge->ops = &altera_pcie_ops; return pci_host_probe(bridge); } static void altera_pcie_remove(struct platform_device *pdev) { struct altera_pcie *pcie = platform_get_drvdata(pdev); struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); pci_stop_root_bus(bridge->bus); pci_remove_root_bus(bridge->bus); altera_pcie_irq_teardown(pcie); } static struct platform_driver altera_pcie_driver = { .probe = altera_pcie_probe, .remove_new = altera_pcie_remove, .driver = { .name = "altera-pcie", .of_match_table = altera_pcie_of_match, }, }; MODULE_DEVICE_TABLE(of, altera_pcie_of_match); module_platform_driver(altera_pcie_driver); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/pcie-altera.c
// SPDX-License-Identifier: GPL-2.0+ /* * BRIEF MODULE DESCRIPTION * PCI init for Ralink RT2880 solution * * Copyright 2007 Ralink Inc. ([email protected]) * * May 2007 Bruce Chang * Initial Release * * May 2009 Bruce Chang * support RT2880/RT3883 PCIe * * May 2011 Bruce Chang * support RT6855/MT7620 PCIe */ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/of_platform.h> #include <linux/pci.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <linux/sys_soc.h> #include "../pci.h" /* MediaTek-specific configuration registers */ #define PCIE_FTS_NUM 0x70c #define PCIE_FTS_NUM_MASK GENMASK(15, 8) #define PCIE_FTS_NUM_L0(x) (((x) & 0xff) << 8) /* Host-PCI bridge registers */ #define RALINK_PCI_PCICFG_ADDR 0x0000 #define RALINK_PCI_PCIMSK_ADDR 0x000c #define RALINK_PCI_CONFIG_ADDR 0x0020 #define RALINK_PCI_CONFIG_DATA 0x0024 #define RALINK_PCI_MEMBASE 0x0028 #define RALINK_PCI_IOBASE 0x002c /* PCIe RC control registers */ #define RALINK_PCI_ID 0x0030 #define RALINK_PCI_CLASS 0x0034 #define RALINK_PCI_SUBID 0x0038 #define RALINK_PCI_STATUS 0x0050 /* Some definition values */ #define PCIE_REVISION_ID BIT(0) #define PCIE_CLASS_CODE (0x60400 << 8) #define PCIE_BAR_MAP_MAX GENMASK(30, 16) #define PCIE_BAR_ENABLE BIT(0) #define PCIE_PORT_INT_EN(x) BIT(20 + (x)) #define PCIE_PORT_LINKUP BIT(0) #define PCIE_PORT_CNT 3 #define INIT_PORTS_DELAY_MS 100 #define PERST_DELAY_MS 100 /** * struct mt7621_pcie_port - PCIe port information * @base: I/O mapped register base * @list: port list * @pcie: pointer to PCIe host info * @clk: pointer to the port clock gate * @phy: pointer to PHY control block * @pcie_rst: pointer to port reset control * @gpio_rst: gpio reset * @slot: port slot * @enabled: indicates if port is enabled */ struct mt7621_pcie_port { void __iomem *base; struct list_head list; struct mt7621_pcie *pcie; struct clk *clk; struct phy *phy; struct reset_control *pcie_rst; struct gpio_desc *gpio_rst; u32 slot; bool enabled; }; /** * struct mt7621_pcie - PCIe host information * @base: IO Mapped Register Base * @dev: Pointer to PCIe device * @ports: pointer to PCIe port information * @resets_inverted: depends on chip revision * reset lines are inverted. */ struct mt7621_pcie { struct device *dev; void __iomem *base; struct list_head ports; bool resets_inverted; }; static inline u32 pcie_read(struct mt7621_pcie *pcie, u32 reg) { return readl_relaxed(pcie->base + reg); } static inline void pcie_write(struct mt7621_pcie *pcie, u32 val, u32 reg) { writel_relaxed(val, pcie->base + reg); } static inline u32 pcie_port_read(struct mt7621_pcie_port *port, u32 reg) { return readl_relaxed(port->base + reg); } static inline void pcie_port_write(struct mt7621_pcie_port *port, u32 val, u32 reg) { writel_relaxed(val, port->base + reg); } static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct mt7621_pcie *pcie = bus->sysdata; u32 address = PCI_CONF1_EXT_ADDRESS(bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where); writel_relaxed(address, pcie->base + RALINK_PCI_CONFIG_ADDR); return pcie->base + RALINK_PCI_CONFIG_DATA + (where & 3); } static struct pci_ops mt7621_pcie_ops = { .map_bus = mt7621_pcie_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, }; static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg) { u32 address = PCI_CONF1_EXT_ADDRESS(0, dev, 0, reg); pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR); return pcie_read(pcie, RALINK_PCI_CONFIG_DATA); } static void write_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg, u32 val) { u32 address = PCI_CONF1_EXT_ADDRESS(0, dev, 0, reg); pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR); pcie_write(pcie, val, RALINK_PCI_CONFIG_DATA); } static inline void mt7621_rst_gpio_pcie_assert(struct mt7621_pcie_port *port) { if (port->gpio_rst) gpiod_set_value(port->gpio_rst, 1); } static inline void mt7621_rst_gpio_pcie_deassert(struct mt7621_pcie_port *port) { if (port->gpio_rst) gpiod_set_value(port->gpio_rst, 0); } static inline bool mt7621_pcie_port_is_linkup(struct mt7621_pcie_port *port) { return (pcie_port_read(port, RALINK_PCI_STATUS) & PCIE_PORT_LINKUP) != 0; } static inline void mt7621_control_assert(struct mt7621_pcie_port *port) { struct mt7621_pcie *pcie = port->pcie; if (pcie->resets_inverted) reset_control_assert(port->pcie_rst); else reset_control_deassert(port->pcie_rst); } static inline void mt7621_control_deassert(struct mt7621_pcie_port *port) { struct mt7621_pcie *pcie = port->pcie; if (pcie->resets_inverted) reset_control_deassert(port->pcie_rst); else reset_control_assert(port->pcie_rst); } static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie, struct device_node *node, int slot) { struct mt7621_pcie_port *port; struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); char name[10]; int err; port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); if (!port) return -ENOMEM; port->base = devm_platform_ioremap_resource(pdev, slot + 1); if (IS_ERR(port->base)) return PTR_ERR(port->base); port->clk = devm_get_clk_from_child(dev, node, NULL); if (IS_ERR(port->clk)) { dev_err(dev, "failed to get pcie%d clock\n", slot); return PTR_ERR(port->clk); } port->pcie_rst = of_reset_control_get_exclusive(node, NULL); if (PTR_ERR(port->pcie_rst) == -EPROBE_DEFER) { dev_err(dev, "failed to get pcie%d reset control\n", slot); return PTR_ERR(port->pcie_rst); } snprintf(name, sizeof(name), "pcie-phy%d", slot); port->phy = devm_of_phy_get(dev, node, name); if (IS_ERR(port->phy)) { dev_err(dev, "failed to get pcie-phy%d\n", slot); err = PTR_ERR(port->phy); goto remove_reset; } port->gpio_rst = devm_gpiod_get_index_optional(dev, "reset", slot, GPIOD_OUT_LOW); if (IS_ERR(port->gpio_rst)) { dev_err(dev, "failed to get GPIO for PCIe%d\n", slot); err = PTR_ERR(port->gpio_rst); goto remove_reset; } port->slot = slot; port->pcie = pcie; INIT_LIST_HEAD(&port->list); list_add_tail(&port->list, &pcie->ports); return 0; remove_reset: reset_control_put(port->pcie_rst); return err; } static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie) { struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); struct device_node *node = dev->of_node, *child; int err; pcie->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pcie->base)) return PTR_ERR(pcie->base); for_each_available_child_of_node(node, child) { int slot; err = of_pci_get_devfn(child); if (err < 0) { of_node_put(child); dev_err(dev, "failed to parse devfn: %d\n", err); return err; } slot = PCI_SLOT(err); err = mt7621_pcie_parse_port(pcie, child, slot); if (err) { of_node_put(child); return err; } } return 0; } static int mt7621_pcie_init_port(struct mt7621_pcie_port *port) { struct mt7621_pcie *pcie = port->pcie; struct device *dev = pcie->dev; u32 slot = port->slot; int err; err = phy_init(port->phy); if (err) { dev_err(dev, "failed to initialize port%d phy\n", slot); return err; } err = phy_power_on(port->phy); if (err) { dev_err(dev, "failed to power on port%d phy\n", slot); phy_exit(port->phy); return err; } port->enabled = true; return 0; } static void mt7621_pcie_reset_assert(struct mt7621_pcie *pcie) { struct mt7621_pcie_port *port; list_for_each_entry(port, &pcie->ports, list) { /* PCIe RC reset assert */ mt7621_control_assert(port); /* PCIe EP reset assert */ mt7621_rst_gpio_pcie_assert(port); } msleep(PERST_DELAY_MS); } static void mt7621_pcie_reset_rc_deassert(struct mt7621_pcie *pcie) { struct mt7621_pcie_port *port; list_for_each_entry(port, &pcie->ports, list) mt7621_control_deassert(port); } static void mt7621_pcie_reset_ep_deassert(struct mt7621_pcie *pcie) { struct mt7621_pcie_port *port; list_for_each_entry(port, &pcie->ports, list) mt7621_rst_gpio_pcie_deassert(port); msleep(PERST_DELAY_MS); } static int mt7621_pcie_init_ports(struct mt7621_pcie *pcie) { struct device *dev = pcie->dev; struct mt7621_pcie_port *port, *tmp; u8 num_disabled = 0; int err; mt7621_pcie_reset_assert(pcie); mt7621_pcie_reset_rc_deassert(pcie); list_for_each_entry_safe(port, tmp, &pcie->ports, list) { u32 slot = port->slot; if (slot == 1) { port->enabled = true; continue; } err = mt7621_pcie_init_port(port); if (err) { dev_err(dev, "initializing port %d failed\n", slot); list_del(&port->list); } } msleep(INIT_PORTS_DELAY_MS); mt7621_pcie_reset_ep_deassert(pcie); tmp = NULL; list_for_each_entry(port, &pcie->ports, list) { u32 slot = port->slot; if (!mt7621_pcie_port_is_linkup(port)) { dev_info(dev, "pcie%d no card, disable it (RST & CLK)\n", slot); mt7621_control_assert(port); port->enabled = false; num_disabled++; if (slot == 0) { tmp = port; continue; } if (slot == 1 && tmp && !tmp->enabled) phy_power_off(tmp->phy); } } return (num_disabled != PCIE_PORT_CNT) ? 0 : -ENODEV; } static void mt7621_pcie_enable_port(struct mt7621_pcie_port *port) { struct mt7621_pcie *pcie = port->pcie; u32 slot = port->slot; u32 val; /* enable pcie interrupt */ val = pcie_read(pcie, RALINK_PCI_PCIMSK_ADDR); val |= PCIE_PORT_INT_EN(slot); pcie_write(pcie, val, RALINK_PCI_PCIMSK_ADDR); /* map 2G DDR region */ pcie_port_write(port, PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE, PCI_BASE_ADDRESS_0); /* configure class code and revision ID */ pcie_port_write(port, PCIE_CLASS_CODE | PCIE_REVISION_ID, RALINK_PCI_CLASS); /* configure RC FTS number to 250 when it leaves L0s */ val = read_config(pcie, slot, PCIE_FTS_NUM); val &= ~PCIE_FTS_NUM_MASK; val |= PCIE_FTS_NUM_L0(0x50); write_config(pcie, slot, PCIE_FTS_NUM, val); } static int mt7621_pcie_enable_ports(struct pci_host_bridge *host) { struct mt7621_pcie *pcie = pci_host_bridge_priv(host); struct device *dev = pcie->dev; struct mt7621_pcie_port *port; struct resource_entry *entry; int err; entry = resource_list_first_type(&host->windows, IORESOURCE_IO); if (!entry) { dev_err(dev, "cannot get io resource\n"); return -EINVAL; } /* Setup MEMWIN and IOWIN */ pcie_write(pcie, 0xffffffff, RALINK_PCI_MEMBASE); pcie_write(pcie, entry->res->start - entry->offset, RALINK_PCI_IOBASE); list_for_each_entry(port, &pcie->ports, list) { if (port->enabled) { err = clk_prepare_enable(port->clk); if (err) { dev_err(dev, "enabling clk pcie%d\n", port->slot); return err; } mt7621_pcie_enable_port(port); dev_info(dev, "PCIE%d enabled\n", port->slot); } } return 0; } static int mt7621_pcie_register_host(struct pci_host_bridge *host) { struct mt7621_pcie *pcie = pci_host_bridge_priv(host); host->ops = &mt7621_pcie_ops; host->sysdata = pcie; return pci_host_probe(host); } static const struct soc_device_attribute mt7621_pcie_quirks_match[] = { { .soc_id = "mt7621", .revision = "E2" }, { /* sentinel */ } }; static int mt7621_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct soc_device_attribute *attr; struct mt7621_pcie_port *port; struct mt7621_pcie *pcie; struct pci_host_bridge *bridge; int err; if (!dev->of_node) return -ENODEV; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); if (!bridge) return -ENOMEM; pcie = pci_host_bridge_priv(bridge); pcie->dev = dev; platform_set_drvdata(pdev, pcie); INIT_LIST_HEAD(&pcie->ports); attr = soc_device_match(mt7621_pcie_quirks_match); if (attr) pcie->resets_inverted = true; err = mt7621_pcie_parse_dt(pcie); if (err) { dev_err(dev, "parsing DT failed\n"); return err; } err = mt7621_pcie_init_ports(pcie); if (err) { dev_err(dev, "nothing connected in virtual bridges\n"); return 0; } err = mt7621_pcie_enable_ports(bridge); if (err) { dev_err(dev, "error enabling pcie ports\n"); goto remove_resets; } return mt7621_pcie_register_host(bridge); remove_resets: list_for_each_entry(port, &pcie->ports, list) reset_control_put(port->pcie_rst); return err; } static void mt7621_pcie_remove(struct platform_device *pdev) { struct mt7621_pcie *pcie = platform_get_drvdata(pdev); struct mt7621_pcie_port *port; list_for_each_entry(port, &pcie->ports, list) reset_control_put(port->pcie_rst); } static const struct of_device_id mt7621_pcie_ids[] = { { .compatible = "mediatek,mt7621-pci" }, {}, }; MODULE_DEVICE_TABLE(of, mt7621_pcie_ids); static struct platform_driver mt7621_pcie_driver = { .probe = mt7621_pcie_probe, .remove_new = mt7621_pcie_remove, .driver = { .name = "mt7621-pci", .of_match_table = mt7621_pcie_ids, }, }; builtin_platform_driver(mt7621_pcie_driver); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/pcie-mt7621.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015 Broadcom Corporation * Copyright (C) 2015 Hauke Mehrtens <[email protected]> */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/phy/phy.h> #include <linux/bcma/bcma.h> #include <linux/ioport.h> #include "pcie-iproc.h" /* NS: CLASS field is R/O, and set to wrong 0x200 value */ static void bcma_pcie2_fixup_class(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class); static int iproc_bcma_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { struct iproc_pcie *pcie = dev->sysdata; struct bcma_device *bdev = container_of(pcie->dev, struct bcma_device, dev); return bcma_core_irq(bdev, 5); } static int iproc_bcma_pcie_probe(struct bcma_device *bdev) { struct device *dev = &bdev->dev; struct iproc_pcie *pcie; struct pci_host_bridge *bridge; int ret; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); if (!bridge) return -ENOMEM; pcie = pci_host_bridge_priv(bridge); pcie->dev = dev; pcie->type = IPROC_PCIE_PAXB_BCMA; pcie->base = bdev->io_addr; if (!pcie->base) { dev_err(dev, "no controller registers\n"); return -ENOMEM; } pcie->base_addr = bdev->addr; pcie->mem.start = bdev->addr_s[0]; pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1; pcie->mem.name = "PCIe MEM space"; pcie->mem.flags = IORESOURCE_MEM; pci_add_resource(&bridge->windows, &pcie->mem); ret = devm_request_pci_bus_resources(dev, &bridge->windows); if (ret) return ret; pcie->map_irq = iproc_bcma_pcie_map_irq; bcma_set_drvdata(bdev, pcie); return iproc_pcie_setup(pcie, &bridge->windows); } static void iproc_bcma_pcie_remove(struct bcma_device *bdev) { struct iproc_pcie *pcie = bcma_get_drvdata(bdev); iproc_pcie_remove(pcie); } static const struct bcma_device_id iproc_bcma_pcie_table[] = { BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_PCIEG2, BCMA_ANY_REV, BCMA_ANY_CLASS), {}, }; MODULE_DEVICE_TABLE(bcma, iproc_bcma_pcie_table); static struct bcma_driver iproc_bcma_pcie_driver = { .name = KBUILD_MODNAME, .id_table = iproc_bcma_pcie_table, .probe = iproc_bcma_pcie_probe, .remove = iproc_bcma_pcie_remove, }; module_bcma_driver(iproc_bcma_pcie_driver); MODULE_AUTHOR("Hauke Mehrtens"); MODULE_DESCRIPTION("Broadcom iProc PCIe BCMA driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/pcie-iproc-bcma.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015, 2016 Cavium, Inc. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/of_pci.h> #include <linux/of.h> #include <linux/pci-ecam.h> #include <linux/platform_device.h> #if defined(CONFIG_PCI_HOST_THUNDER_ECAM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) static void set_val(u32 v, int where, int size, u32 *val) { int shift = (where & 3) * 8; pr_debug("set_val %04x: %08x\n", (unsigned int)(where & ~3), v); v >>= shift; if (size == 1) v &= 0xff; else if (size == 2) v &= 0xffff; *val = v; } static int handle_ea_bar(u32 e0, int bar, struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { void __iomem *addr; u32 v; /* Entries are 16-byte aligned; bits[2,3] select word in entry */ int where_a = where & 0xc; if (where_a == 0) { set_val(e0, where, size, val); return PCIBIOS_SUCCESSFUL; } if (where_a == 0x4) { addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */ if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; v = readl(addr); v &= ~0xf; v |= 2; /* EA entry-1. Base-L */ set_val(v, where, size, val); return PCIBIOS_SUCCESSFUL; } if (where_a == 0x8) { u32 barl_orig; u32 barl_rb; addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */ if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; barl_orig = readl(addr + 0); writel(0xffffffff, addr + 0); barl_rb = readl(addr + 0); writel(barl_orig, addr + 0); /* zeros in unsettable bits */ v = ~barl_rb & ~3; v |= 0xc; /* EA entry-2. Offset-L */ set_val(v, where, size, val); return PCIBIOS_SUCCESSFUL; } if (where_a == 0xc) { addr = bus->ops->map_bus(bus, devfn, bar + 4); /* BAR 1 */ if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; v = readl(addr); /* EA entry-3. Base-H */ set_val(v, where, size, val); return PCIBIOS_SUCCESSFUL; } return PCIBIOS_DEVICE_NOT_FOUND; } static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct pci_config_window *cfg = bus->sysdata; int where_a = where & ~3; void __iomem *addr; u32 node_bits; u32 v; /* EA Base[63:32] may be missing some bits ... */ switch (where_a) { case 0xa8: case 0xbc: case 0xd0: case 0xe4: break; default: return pci_generic_config_read(bus, devfn, where, size, val); } addr = bus->ops->map_bus(bus, devfn, where_a); if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; v = readl(addr); /* * Bit 44 of the 64-bit Base must match the same bit in * the config space access window. Since we are working with * the high-order 32 bits, shift everything down by 32 bits. */ node_bits = upper_32_bits(cfg->res.start) & (1 << 12); v |= node_bits; set_val(v, where, size, val); return PCIBIOS_SUCCESSFUL; } static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { u32 v; u32 vendor_device; u32 class_rev; void __iomem *addr; int cfg_type; int where_a = where & ~3; addr = bus->ops->map_bus(bus, devfn, 0xc); if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; v = readl(addr); /* Check for non type-00 header */ cfg_type = (v >> 16) & 0x7f; addr = bus->ops->map_bus(bus, devfn, 8); if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; class_rev = readl(addr); if (class_rev == 0xffffffff) goto no_emulation; if ((class_rev & 0xff) >= 8) { /* Pass-2 handling */ if (cfg_type) goto no_emulation; return thunder_ecam_p2_config_read(bus, devfn, where, size, val); } /* * All BARs have fixed addresses specified by the EA * capability; they must return zero on read. */ if (cfg_type == 0 && ((where >= 0x10 && where < 0x2c) || (where >= 0x1a4 && where < 0x1bc))) { /* BAR or SR-IOV BAR */ *val = 0; return PCIBIOS_SUCCESSFUL; } addr = bus->ops->map_bus(bus, devfn, 0); if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; vendor_device = readl(addr); if (vendor_device == 0xffffffff) goto no_emulation; pr_debug("%04x:%04x - Fix pass#: %08x, where: %03x, devfn: %03x\n", vendor_device & 0xffff, vendor_device >> 16, class_rev, (unsigned int)where, devfn); /* Check for non type-00 header */ if (cfg_type == 0) { bool has_msix; bool is_nic = (vendor_device == 0xa01e177d); bool is_tns = (vendor_device == 0xa01f177d); addr = bus->ops->map_bus(bus, devfn, 0x70); if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; /* E_CAP */ v = readl(addr); has_msix = (v & 0xff00) != 0; if (!has_msix && where_a == 0x70) { v |= 0xbc00; /* next capability is EA at 0xbc */ set_val(v, where, size, val); return PCIBIOS_SUCCESSFUL; } if (where_a == 0xb0) { addr = bus->ops->map_bus(bus, devfn, where_a); if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; v = readl(addr); if (v & 0xff00) pr_err("Bad MSIX cap header: %08x\n", v); v |= 0xbc00; /* next capability is EA at 0xbc */ set_val(v, where, size, val); return PCIBIOS_SUCCESSFUL; } if (where_a == 0xbc) { if (is_nic) v = 0x40014; /* EA last in chain, 4 entries */ else if (is_tns) v = 0x30014; /* EA last in chain, 3 entries */ else if (has_msix) v = 0x20014; /* EA last in chain, 2 entries */ else v = 0x10014; /* EA last in chain, 1 entry */ set_val(v, where, size, val); return PCIBIOS_SUCCESSFUL; } if (where_a >= 0xc0 && where_a < 0xd0) /* EA entry-0. PP=0, BAR0 Size:3 */ return handle_ea_bar(0x80ff0003, 0x10, bus, devfn, where, size, val); if (where_a >= 0xd0 && where_a < 0xe0 && has_msix) /* EA entry-1. PP=0, BAR4 Size:3 */ return handle_ea_bar(0x80ff0043, 0x20, bus, devfn, where, size, val); if (where_a >= 0xe0 && where_a < 0xf0 && is_tns) /* EA entry-2. PP=0, BAR2, Size:3 */ return handle_ea_bar(0x80ff0023, 0x18, bus, devfn, where, size, val); if (where_a >= 0xe0 && where_a < 0xf0 && is_nic) /* EA entry-2. PP=4, VF_BAR0 (9), Size:3 */ return handle_ea_bar(0x80ff0493, 0x1a4, bus, devfn, where, size, val); if (where_a >= 0xf0 && where_a < 0x100 && is_nic) /* EA entry-3. PP=4, VF_BAR4 (d), Size:3 */ return handle_ea_bar(0x80ff04d3, 0x1b4, bus, devfn, where, size, val); } else if (cfg_type == 1) { bool is_rsl_bridge = devfn == 0x08; bool is_rad_bridge = devfn == 0xa0; bool is_zip_bridge = devfn == 0xa8; bool is_dfa_bridge = devfn == 0xb0; bool is_nic_bridge = devfn == 0x10; if (where_a == 0x70) { addr = bus->ops->map_bus(bus, devfn, where_a); if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; v = readl(addr); if (v & 0xff00) pr_err("Bad PCIe cap header: %08x\n", v); v |= 0xbc00; /* next capability is EA at 0xbc */ set_val(v, where, size, val); return PCIBIOS_SUCCESSFUL; } if (where_a == 0xbc) { if (is_nic_bridge) v = 0x10014; /* EA last in chain, 1 entry */ else v = 0x00014; /* EA last in chain, no entries */ set_val(v, where, size, val); return PCIBIOS_SUCCESSFUL; } if (where_a == 0xc0) { if (is_rsl_bridge || is_nic_bridge) v = 0x0101; /* subordinate:secondary = 1:1 */ else if (is_rad_bridge) v = 0x0202; /* subordinate:secondary = 2:2 */ else if (is_zip_bridge) v = 0x0303; /* subordinate:secondary = 3:3 */ else if (is_dfa_bridge) v = 0x0404; /* subordinate:secondary = 4:4 */ set_val(v, where, size, val); return PCIBIOS_SUCCESSFUL; } if (where_a == 0xc4 && is_nic_bridge) { /* Enabled, not-Write, SP=ff, PP=05, BEI=6, ES=4 */ v = 0x80ff0564; set_val(v, where, size, val); return PCIBIOS_SUCCESSFUL; } if (where_a == 0xc8 && is_nic_bridge) { v = 0x00000002; /* Base-L 64-bit */ set_val(v, where, size, val); return PCIBIOS_SUCCESSFUL; } if (where_a == 0xcc && is_nic_bridge) { v = 0xfffffffe; /* MaxOffset-L 64-bit */ set_val(v, where, size, val); return PCIBIOS_SUCCESSFUL; } if (where_a == 0xd0 && is_nic_bridge) { v = 0x00008430; /* NIC Base-H */ set_val(v, where, size, val); return PCIBIOS_SUCCESSFUL; } if (where_a == 0xd4 && is_nic_bridge) { v = 0x0000000f; /* MaxOffset-H */ set_val(v, where, size, val); return PCIBIOS_SUCCESSFUL; } } no_emulation: return pci_generic_config_read(bus, devfn, where, size, val); } static int thunder_ecam_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { /* * All BARs have fixed addresses; ignore BAR writes so they * don't get corrupted. */ if ((where >= 0x10 && where < 0x2c) || (where >= 0x1a4 && where < 0x1bc)) /* BAR or SR-IOV BAR */ return PCIBIOS_SUCCESSFUL; return pci_generic_config_write(bus, devfn, where, size, val); } const struct pci_ecam_ops pci_thunder_ecam_ops = { .pci_ops = { .map_bus = pci_ecam_map_bus, .read = thunder_ecam_config_read, .write = thunder_ecam_config_write, } }; #ifdef CONFIG_PCI_HOST_THUNDER_ECAM static const struct of_device_id thunder_ecam_of_match[] = { { .compatible = "cavium,pci-host-thunder-ecam", .data = &pci_thunder_ecam_ops, }, { }, }; static struct platform_driver thunder_ecam_driver = { .driver = { .name = KBUILD_MODNAME, .of_match_table = thunder_ecam_of_match, .suppress_bind_attrs = true, }, .probe = pci_host_common_probe, }; builtin_platform_driver(thunder_ecam_driver); #endif #endif
linux-master
drivers/pci/controller/pci-thunder-ecam.c
// SPDX-License-Identifier: GPL-2.0 /* * pci-rcar-gen2: internal PCI bus support * * Copyright (C) 2013 Renesas Solutions Corp. * Copyright (C) 2013 Cogent Embedded, Inc. * * Author: Valentine Barshak <[email protected]> */ #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/sizes.h> #include <linux/slab.h> #include "../pci.h" /* AHB-PCI Bridge PCI communication registers */ #define RCAR_AHBPCI_PCICOM_OFFSET 0x800 #define RCAR_PCIAHB_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x00) #define RCAR_PCIAHB_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x04) #define RCAR_PCIAHB_PREFETCH0 0x0 #define RCAR_PCIAHB_PREFETCH4 0x1 #define RCAR_PCIAHB_PREFETCH8 0x2 #define RCAR_PCIAHB_PREFETCH16 0x3 #define RCAR_AHBPCI_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x10) #define RCAR_AHBPCI_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x14) #define RCAR_AHBPCI_WIN_CTR_MEM (3 << 1) #define RCAR_AHBPCI_WIN_CTR_CFG (5 << 1) #define RCAR_AHBPCI_WIN1_HOST (1 << 30) #define RCAR_AHBPCI_WIN1_DEVICE (1 << 31) #define RCAR_PCI_INT_ENABLE_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x20) #define RCAR_PCI_INT_STATUS_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x24) #define RCAR_PCI_INT_SIGTABORT (1 << 0) #define RCAR_PCI_INT_SIGRETABORT (1 << 1) #define RCAR_PCI_INT_REMABORT (1 << 2) #define RCAR_PCI_INT_PERR (1 << 3) #define RCAR_PCI_INT_SIGSERR (1 << 4) #define RCAR_PCI_INT_RESERR (1 << 5) #define RCAR_PCI_INT_WIN1ERR (1 << 12) #define RCAR_PCI_INT_WIN2ERR (1 << 13) #define RCAR_PCI_INT_A (1 << 16) #define RCAR_PCI_INT_B (1 << 17) #define RCAR_PCI_INT_PME (1 << 19) #define RCAR_PCI_INT_ALLERRORS (RCAR_PCI_INT_SIGTABORT | \ RCAR_PCI_INT_SIGRETABORT | \ RCAR_PCI_INT_REMABORT | \ RCAR_PCI_INT_PERR | \ RCAR_PCI_INT_SIGSERR | \ RCAR_PCI_INT_RESERR | \ RCAR_PCI_INT_WIN1ERR | \ RCAR_PCI_INT_WIN2ERR) #define RCAR_AHB_BUS_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x30) #define RCAR_AHB_BUS_MMODE_HTRANS (1 << 0) #define RCAR_AHB_BUS_MMODE_BYTE_BURST (1 << 1) #define RCAR_AHB_BUS_MMODE_WR_INCR (1 << 2) #define RCAR_AHB_BUS_MMODE_HBUS_REQ (1 << 7) #define RCAR_AHB_BUS_SMODE_READYCTR (1 << 17) #define RCAR_AHB_BUS_MODE (RCAR_AHB_BUS_MMODE_HTRANS | \ RCAR_AHB_BUS_MMODE_BYTE_BURST | \ RCAR_AHB_BUS_MMODE_WR_INCR | \ RCAR_AHB_BUS_MMODE_HBUS_REQ | \ RCAR_AHB_BUS_SMODE_READYCTR) #define RCAR_USBCTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x34) #define RCAR_USBCTR_USBH_RST (1 << 0) #define RCAR_USBCTR_PCICLK_MASK (1 << 1) #define RCAR_USBCTR_PLL_RST (1 << 2) #define RCAR_USBCTR_DIRPD (1 << 8) #define RCAR_USBCTR_PCIAHB_WIN2_EN (1 << 9) #define RCAR_USBCTR_PCIAHB_WIN1_256M (0 << 10) #define RCAR_USBCTR_PCIAHB_WIN1_512M (1 << 10) #define RCAR_USBCTR_PCIAHB_WIN1_1G (2 << 10) #define RCAR_USBCTR_PCIAHB_WIN1_2G (3 << 10) #define RCAR_USBCTR_PCIAHB_WIN1_MASK (3 << 10) #define RCAR_PCI_ARBITER_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x40) #define RCAR_PCI_ARBITER_PCIREQ0 (1 << 0) #define RCAR_PCI_ARBITER_PCIREQ1 (1 << 1) #define RCAR_PCI_ARBITER_PCIBP_MODE (1 << 12) #define RCAR_PCI_UNIT_REV_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x48) struct rcar_pci { struct device *dev; void __iomem *reg; struct resource mem_res; struct resource *cfg_res; int irq; }; /* PCI configuration space operations */ static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn, int where) { struct rcar_pci *priv = bus->sysdata; int slot, val; if (!pci_is_root_bus(bus) || PCI_FUNC(devfn)) return NULL; /* Only one EHCI/OHCI device built-in */ slot = PCI_SLOT(devfn); if (slot > 2) return NULL; /* bridge logic only has registers to 0x40 */ if (slot == 0x0 && where >= 0x40) return NULL; val = slot ? RCAR_AHBPCI_WIN1_DEVICE | RCAR_AHBPCI_WIN_CTR_CFG : RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG; iowrite32(val, priv->reg + RCAR_AHBPCI_WIN1_CTR_REG); return priv->reg + (slot >> 1) * 0x100 + where; } #ifdef CONFIG_PCI_DEBUG /* if debug enabled, then attach an error handler irq to the bridge */ static irqreturn_t rcar_pci_err_irq(int irq, void *pw) { struct rcar_pci *priv = pw; struct device *dev = priv->dev; u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG); if (status & RCAR_PCI_INT_ALLERRORS) { dev_err(dev, "error irq: status %08x\n", status); /* clear the error(s) */ iowrite32(status & RCAR_PCI_INT_ALLERRORS, priv->reg + RCAR_PCI_INT_STATUS_REG); return IRQ_HANDLED; } return IRQ_NONE; } static void rcar_pci_setup_errirq(struct rcar_pci *priv) { struct device *dev = priv->dev; int ret; u32 val; ret = devm_request_irq(dev, priv->irq, rcar_pci_err_irq, IRQF_SHARED, "error irq", priv); if (ret) { dev_err(dev, "cannot claim IRQ for error handling\n"); return; } val = ioread32(priv->reg + RCAR_PCI_INT_ENABLE_REG); val |= RCAR_PCI_INT_ALLERRORS; iowrite32(val, priv->reg + RCAR_PCI_INT_ENABLE_REG); } #else static inline void rcar_pci_setup_errirq(struct rcar_pci *priv) { } #endif /* PCI host controller setup */ static void rcar_pci_setup(struct rcar_pci *priv) { struct pci_host_bridge *bridge = pci_host_bridge_from_priv(priv); struct device *dev = priv->dev; void __iomem *reg = priv->reg; struct resource_entry *entry; unsigned long window_size; unsigned long window_addr; unsigned long window_pci; u32 val; entry = resource_list_first_type(&bridge->dma_ranges, IORESOURCE_MEM); if (!entry) { window_addr = 0x40000000; window_pci = 0x40000000; window_size = SZ_1G; } else { window_addr = entry->res->start; window_pci = entry->res->start - entry->offset; window_size = resource_size(entry->res); } pm_runtime_enable(dev); pm_runtime_get_sync(dev); val = ioread32(reg + RCAR_PCI_UNIT_REV_REG); dev_info(dev, "PCI: revision %x\n", val); /* Disable Direct Power Down State and assert reset */ val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD; val |= RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST; iowrite32(val, reg + RCAR_USBCTR_REG); udelay(4); /* De-assert reset and reset PCIAHB window1 size */ val &= ~(RCAR_USBCTR_PCIAHB_WIN1_MASK | RCAR_USBCTR_PCICLK_MASK | RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST); /* Setup PCIAHB window1 size */ switch (window_size) { case SZ_2G: val |= RCAR_USBCTR_PCIAHB_WIN1_2G; break; case SZ_1G: val |= RCAR_USBCTR_PCIAHB_WIN1_1G; break; case SZ_512M: val |= RCAR_USBCTR_PCIAHB_WIN1_512M; break; default: pr_warn("unknown window size %ld - defaulting to 256M\n", window_size); window_size = SZ_256M; fallthrough; case SZ_256M: val |= RCAR_USBCTR_PCIAHB_WIN1_256M; break; } iowrite32(val, reg + RCAR_USBCTR_REG); /* Configure AHB master and slave modes */ iowrite32(RCAR_AHB_BUS_MODE, reg + RCAR_AHB_BUS_CTR_REG); /* Configure PCI arbiter */ val = ioread32(reg + RCAR_PCI_ARBITER_CTR_REG); val |= RCAR_PCI_ARBITER_PCIREQ0 | RCAR_PCI_ARBITER_PCIREQ1 | RCAR_PCI_ARBITER_PCIBP_MODE; iowrite32(val, reg + RCAR_PCI_ARBITER_CTR_REG); /* PCI-AHB mapping */ iowrite32(window_addr | RCAR_PCIAHB_PREFETCH16, reg + RCAR_PCIAHB_WIN1_CTR_REG); /* AHB-PCI mapping: OHCI/EHCI registers */ val = priv->mem_res.start | RCAR_AHBPCI_WIN_CTR_MEM; iowrite32(val, reg + RCAR_AHBPCI_WIN2_CTR_REG); /* Enable AHB-PCI bridge PCI configuration access */ iowrite32(RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG, reg + RCAR_AHBPCI_WIN1_CTR_REG); /* Set PCI-AHB Window1 address */ iowrite32(window_pci | PCI_BASE_ADDRESS_MEM_PREFETCH, reg + PCI_BASE_ADDRESS_1); /* Set AHB-PCI bridge PCI communication area address */ val = priv->cfg_res->start + RCAR_AHBPCI_PCICOM_OFFSET; iowrite32(val, reg + PCI_BASE_ADDRESS_0); val = ioread32(reg + PCI_COMMAND); val |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; iowrite32(val, reg + PCI_COMMAND); /* Enable PCI interrupts */ iowrite32(RCAR_PCI_INT_A | RCAR_PCI_INT_B | RCAR_PCI_INT_PME, reg + RCAR_PCI_INT_ENABLE_REG); rcar_pci_setup_errirq(priv); } static struct pci_ops rcar_pci_ops = { .map_bus = rcar_pci_cfg_base, .read = pci_generic_config_read, .write = pci_generic_config_write, }; static int rcar_pci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *cfg_res, *mem_res; struct rcar_pci *priv; struct pci_host_bridge *bridge; void __iomem *reg; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*priv)); if (!bridge) return -ENOMEM; priv = pci_host_bridge_priv(bridge); bridge->sysdata = priv; reg = devm_platform_get_and_ioremap_resource(pdev, 0, &cfg_res); if (IS_ERR(reg)) return PTR_ERR(reg); mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!mem_res || !mem_res->start) return -ENODEV; if (mem_res->start & 0xFFFF) return -EINVAL; priv->mem_res = *mem_res; priv->cfg_res = cfg_res; priv->irq = platform_get_irq(pdev, 0); priv->reg = reg; priv->dev = dev; if (priv->irq < 0) { dev_err(dev, "no valid irq found\n"); return priv->irq; } bridge->ops = &rcar_pci_ops; pci_add_flags(PCI_REASSIGN_ALL_BUS); rcar_pci_setup(priv); return pci_host_probe(bridge); } static const struct of_device_id rcar_pci_of_match[] = { { .compatible = "renesas,pci-r8a7790", }, { .compatible = "renesas,pci-r8a7791", }, { .compatible = "renesas,pci-r8a7794", }, { .compatible = "renesas,pci-rcar-gen2", }, { .compatible = "renesas,pci-rzn1", }, { }, }; static struct platform_driver rcar_pci_driver = { .driver = { .name = "pci-rcar-gen2", .suppress_bind_attrs = true, .of_match_table = rcar_pci_of_match, }, .probe = rcar_pci_probe, }; builtin_platform_driver(rcar_pci_driver);
linux-master
drivers/pci/controller/pci-rcar-gen2.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe host bridge driver for Apple system-on-chips. * * The HW is ECAM compliant, so once the controller is initialized, * the driver mostly deals MSI mapping and handling of per-port * interrupts (INTx, management and error signals). * * Initialization requires enabling power and clocks, along with a * number of register pokes. * * Copyright (C) 2021 Alyssa Rosenzweig <[email protected]> * Copyright (C) 2021 Google LLC * Copyright (C) 2021 Corellium LLC * Copyright (C) 2021 Mark Kettenis <[email protected]> * * Author: Alyssa Rosenzweig <[email protected]> * Author: Marc Zyngier <[email protected]> */ #include <linux/gpio/consumer.h> #include <linux/kernel.h> #include <linux/iopoll.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/list.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/notifier.h> #include <linux/of_irq.h> #include <linux/pci-ecam.h> #define CORE_RC_PHYIF_CTL 0x00024 #define CORE_RC_PHYIF_CTL_RUN BIT(0) #define CORE_RC_PHYIF_STAT 0x00028 #define CORE_RC_PHYIF_STAT_REFCLK BIT(4) #define CORE_RC_CTL 0x00050 #define CORE_RC_CTL_RUN BIT(0) #define CORE_RC_STAT 0x00058 #define CORE_RC_STAT_READY BIT(0) #define CORE_FABRIC_STAT 0x04000 #define CORE_FABRIC_STAT_MASK 0x001F001F #define CORE_LANE_CFG(port) (0x84000 + 0x4000 * (port)) #define CORE_LANE_CFG_REFCLK0REQ BIT(0) #define CORE_LANE_CFG_REFCLK1REQ BIT(1) #define CORE_LANE_CFG_REFCLK0ACK BIT(2) #define CORE_LANE_CFG_REFCLK1ACK BIT(3) #define CORE_LANE_CFG_REFCLKEN (BIT(9) | BIT(10)) #define CORE_LANE_CTL(port) (0x84004 + 0x4000 * (port)) #define CORE_LANE_CTL_CFGACC BIT(15) #define PORT_LTSSMCTL 0x00080 #define PORT_LTSSMCTL_START BIT(0) #define PORT_INTSTAT 0x00100 #define PORT_INT_TUNNEL_ERR 31 #define PORT_INT_CPL_TIMEOUT 23 #define PORT_INT_RID2SID_MAPERR 22 #define PORT_INT_CPL_ABORT 21 #define PORT_INT_MSI_BAD_DATA 19 #define PORT_INT_MSI_ERR 18 #define PORT_INT_REQADDR_GT32 17 #define PORT_INT_AF_TIMEOUT 15 #define PORT_INT_LINK_DOWN 14 #define PORT_INT_LINK_UP 12 #define PORT_INT_LINK_BWMGMT 11 #define PORT_INT_AER_MASK (15 << 4) #define PORT_INT_PORT_ERR 4 #define PORT_INT_INTx(i) i #define PORT_INT_INTx_MASK 15 #define PORT_INTMSK 0x00104 #define PORT_INTMSKSET 0x00108 #define PORT_INTMSKCLR 0x0010c #define PORT_MSICFG 0x00124 #define PORT_MSICFG_EN BIT(0) #define PORT_MSICFG_L2MSINUM_SHIFT 4 #define PORT_MSIBASE 0x00128 #define PORT_MSIBASE_1_SHIFT 16 #define PORT_MSIADDR 0x00168 #define PORT_LINKSTS 0x00208 #define PORT_LINKSTS_UP BIT(0) #define PORT_LINKSTS_BUSY BIT(2) #define PORT_LINKCMDSTS 0x00210 #define PORT_OUTS_NPREQS 0x00284 #define PORT_OUTS_NPREQS_REQ BIT(24) #define PORT_OUTS_NPREQS_CPL BIT(16) #define PORT_RXWR_FIFO 0x00288 #define PORT_RXWR_FIFO_HDR GENMASK(15, 10) #define PORT_RXWR_FIFO_DATA GENMASK(9, 0) #define PORT_RXRD_FIFO 0x0028C #define PORT_RXRD_FIFO_REQ GENMASK(6, 0) #define PORT_OUTS_CPLS 0x00290 #define PORT_OUTS_CPLS_SHRD GENMASK(14, 8) #define PORT_OUTS_CPLS_WAIT GENMASK(6, 0) #define PORT_APPCLK 0x00800 #define PORT_APPCLK_EN BIT(0) #define PORT_APPCLK_CGDIS BIT(8) #define PORT_STATUS 0x00804 #define PORT_STATUS_READY BIT(0) #define PORT_REFCLK 0x00810 #define PORT_REFCLK_EN BIT(0) #define PORT_REFCLK_CGDIS BIT(8) #define PORT_PERST 0x00814 #define PORT_PERST_OFF BIT(0) #define PORT_RID2SID(i16) (0x00828 + 4 * (i16)) #define PORT_RID2SID_VALID BIT(31) #define PORT_RID2SID_SID_SHIFT 16 #define PORT_RID2SID_BUS_SHIFT 8 #define PORT_RID2SID_DEV_SHIFT 3 #define PORT_RID2SID_FUNC_SHIFT 0 #define PORT_OUTS_PREQS_HDR 0x00980 #define PORT_OUTS_PREQS_HDR_MASK GENMASK(9, 0) #define PORT_OUTS_PREQS_DATA 0x00984 #define PORT_OUTS_PREQS_DATA_MASK GENMASK(15, 0) #define PORT_TUNCTRL 0x00988 #define PORT_TUNCTRL_PERST_ON BIT(0) #define PORT_TUNCTRL_PERST_ACK_REQ BIT(1) #define PORT_TUNSTAT 0x0098c #define PORT_TUNSTAT_PERST_ON BIT(0) #define PORT_TUNSTAT_PERST_ACK_PEND BIT(1) #define PORT_PREFMEM_ENABLE 0x00994 #define MAX_RID2SID 64 /* * The doorbell address is set to 0xfffff000, which by convention * matches what MacOS does, and it is possible to use any other * address (in the bottom 4GB, as the base register is only 32bit). * However, it has to be excluded from the IOVA range, and the DART * driver has to know about it. */ #define DOORBELL_ADDR CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR struct apple_pcie { struct mutex lock; struct device *dev; void __iomem *base; struct irq_domain *domain; unsigned long *bitmap; struct list_head ports; struct completion event; struct irq_fwspec fwspec; u32 nvecs; }; struct apple_pcie_port { struct apple_pcie *pcie; struct device_node *np; void __iomem *base; struct irq_domain *domain; struct list_head entry; DECLARE_BITMAP(sid_map, MAX_RID2SID); int sid_map_sz; int idx; }; static void rmw_set(u32 set, void __iomem *addr) { writel_relaxed(readl_relaxed(addr) | set, addr); } static void rmw_clear(u32 clr, void __iomem *addr) { writel_relaxed(readl_relaxed(addr) & ~clr, addr); } static void apple_msi_top_irq_mask(struct irq_data *d) { pci_msi_mask_irq(d); irq_chip_mask_parent(d); } static void apple_msi_top_irq_unmask(struct irq_data *d) { pci_msi_unmask_irq(d); irq_chip_unmask_parent(d); } static struct irq_chip apple_msi_top_chip = { .name = "PCIe MSI", .irq_mask = apple_msi_top_irq_mask, .irq_unmask = apple_msi_top_irq_unmask, .irq_eoi = irq_chip_eoi_parent, .irq_set_affinity = irq_chip_set_affinity_parent, .irq_set_type = irq_chip_set_type_parent, }; static void apple_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) { msg->address_hi = upper_32_bits(DOORBELL_ADDR); msg->address_lo = lower_32_bits(DOORBELL_ADDR); msg->data = data->hwirq; } static struct irq_chip apple_msi_bottom_chip = { .name = "MSI", .irq_mask = irq_chip_mask_parent, .irq_unmask = irq_chip_unmask_parent, .irq_eoi = irq_chip_eoi_parent, .irq_set_affinity = irq_chip_set_affinity_parent, .irq_set_type = irq_chip_set_type_parent, .irq_compose_msi_msg = apple_msi_compose_msg, }; static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct apple_pcie *pcie = domain->host_data; struct irq_fwspec fwspec = pcie->fwspec; unsigned int i; int ret, hwirq; mutex_lock(&pcie->lock); hwirq = bitmap_find_free_region(pcie->bitmap, pcie->nvecs, order_base_2(nr_irqs)); mutex_unlock(&pcie->lock); if (hwirq < 0) return -ENOSPC; fwspec.param[fwspec.param_count - 2] += hwirq; ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &fwspec); if (ret) return ret; for (i = 0; i < nr_irqs; i++) { irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &apple_msi_bottom_chip, domain->host_data); } return 0; } static void apple_msi_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct apple_pcie *pcie = domain->host_data; mutex_lock(&pcie->lock); bitmap_release_region(pcie->bitmap, d->hwirq, order_base_2(nr_irqs)); mutex_unlock(&pcie->lock); } static const struct irq_domain_ops apple_msi_domain_ops = { .alloc = apple_msi_domain_alloc, .free = apple_msi_domain_free, }; static struct msi_domain_info apple_msi_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), .chip = &apple_msi_top_chip, }; static void apple_port_irq_mask(struct irq_data *data) { struct apple_pcie_port *port = irq_data_get_irq_chip_data(data); writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKSET); } static void apple_port_irq_unmask(struct irq_data *data) { struct apple_pcie_port *port = irq_data_get_irq_chip_data(data); writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKCLR); } static bool hwirq_is_intx(unsigned int hwirq) { return BIT(hwirq) & PORT_INT_INTx_MASK; } static void apple_port_irq_ack(struct irq_data *data) { struct apple_pcie_port *port = irq_data_get_irq_chip_data(data); if (!hwirq_is_intx(data->hwirq)) writel_relaxed(BIT(data->hwirq), port->base + PORT_INTSTAT); } static int apple_port_irq_set_type(struct irq_data *data, unsigned int type) { /* * It doesn't seem that there is any way to configure the * trigger, so assume INTx have to be level (as per the spec), * and the rest is edge (which looks likely). */ if (hwirq_is_intx(data->hwirq) ^ !!(type & IRQ_TYPE_LEVEL_MASK)) return -EINVAL; irqd_set_trigger_type(data, type); return 0; } static struct irq_chip apple_port_irqchip = { .name = "PCIe", .irq_ack = apple_port_irq_ack, .irq_mask = apple_port_irq_mask, .irq_unmask = apple_port_irq_unmask, .irq_set_type = apple_port_irq_set_type, }; static int apple_port_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct apple_pcie_port *port = domain->host_data; struct irq_fwspec *fwspec = args; int i; for (i = 0; i < nr_irqs; i++) { irq_flow_handler_t flow = handle_edge_irq; unsigned int type = IRQ_TYPE_EDGE_RISING; if (hwirq_is_intx(fwspec->param[0] + i)) { flow = handle_level_irq; type = IRQ_TYPE_LEVEL_HIGH; } irq_domain_set_info(domain, virq + i, fwspec->param[0] + i, &apple_port_irqchip, port, flow, NULL, NULL); irq_set_irq_type(virq + i, type); } return 0; } static void apple_port_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { int i; for (i = 0; i < nr_irqs; i++) { struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); irq_set_handler(virq + i, NULL); irq_domain_reset_irq_data(d); } } static const struct irq_domain_ops apple_port_irq_domain_ops = { .translate = irq_domain_translate_onecell, .alloc = apple_port_irq_domain_alloc, .free = apple_port_irq_domain_free, }; static void apple_port_irq_handler(struct irq_desc *desc) { struct apple_pcie_port *port = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long stat; int i; chained_irq_enter(chip, desc); stat = readl_relaxed(port->base + PORT_INTSTAT); for_each_set_bit(i, &stat, 32) generic_handle_domain_irq(port->domain, i); chained_irq_exit(chip, desc); } static int apple_pcie_port_setup_irq(struct apple_pcie_port *port) { struct fwnode_handle *fwnode = &port->np->fwnode; unsigned int irq; /* FIXME: consider moving each interrupt under each port */ irq = irq_of_parse_and_map(to_of_node(dev_fwnode(port->pcie->dev)), port->idx); if (!irq) return -ENXIO; port->domain = irq_domain_create_linear(fwnode, 32, &apple_port_irq_domain_ops, port); if (!port->domain) return -ENOMEM; /* Disable all interrupts */ writel_relaxed(~0, port->base + PORT_INTMSKSET); writel_relaxed(~0, port->base + PORT_INTSTAT); irq_set_chained_handler_and_data(irq, apple_port_irq_handler, port); /* Configure MSI base address */ BUILD_BUG_ON(upper_32_bits(DOORBELL_ADDR)); writel_relaxed(lower_32_bits(DOORBELL_ADDR), port->base + PORT_MSIADDR); /* Enable MSIs, shared between all ports */ writel_relaxed(0, port->base + PORT_MSIBASE); writel_relaxed((ilog2(port->pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT) | PORT_MSICFG_EN, port->base + PORT_MSICFG); return 0; } static irqreturn_t apple_pcie_port_irq(int irq, void *data) { struct apple_pcie_port *port = data; unsigned int hwirq = irq_domain_get_irq_data(port->domain, irq)->hwirq; switch (hwirq) { case PORT_INT_LINK_UP: dev_info_ratelimited(port->pcie->dev, "Link up on %pOF\n", port->np); complete_all(&port->pcie->event); break; case PORT_INT_LINK_DOWN: dev_info_ratelimited(port->pcie->dev, "Link down on %pOF\n", port->np); break; default: return IRQ_NONE; } return IRQ_HANDLED; } static int apple_pcie_port_register_irqs(struct apple_pcie_port *port) { static struct { unsigned int hwirq; const char *name; } port_irqs[] = { { PORT_INT_LINK_UP, "Link up", }, { PORT_INT_LINK_DOWN, "Link down", }, }; int i; for (i = 0; i < ARRAY_SIZE(port_irqs); i++) { struct irq_fwspec fwspec = { .fwnode = &port->np->fwnode, .param_count = 1, .param = { [0] = port_irqs[i].hwirq, }, }; unsigned int irq; int ret; irq = irq_domain_alloc_irqs(port->domain, 1, NUMA_NO_NODE, &fwspec); if (WARN_ON(!irq)) continue; ret = request_irq(irq, apple_pcie_port_irq, 0, port_irqs[i].name, port); WARN_ON(ret); } return 0; } static int apple_pcie_setup_refclk(struct apple_pcie *pcie, struct apple_pcie_port *port) { u32 stat; int res; res = readl_relaxed_poll_timeout(pcie->base + CORE_RC_PHYIF_STAT, stat, stat & CORE_RC_PHYIF_STAT_REFCLK, 100, 50000); if (res < 0) return res; rmw_set(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx)); rmw_set(CORE_LANE_CFG_REFCLK0REQ, pcie->base + CORE_LANE_CFG(port->idx)); res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx), stat, stat & CORE_LANE_CFG_REFCLK0ACK, 100, 50000); if (res < 0) return res; rmw_set(CORE_LANE_CFG_REFCLK1REQ, pcie->base + CORE_LANE_CFG(port->idx)); res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx), stat, stat & CORE_LANE_CFG_REFCLK1ACK, 100, 50000); if (res < 0) return res; rmw_clear(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx)); rmw_set(CORE_LANE_CFG_REFCLKEN, pcie->base + CORE_LANE_CFG(port->idx)); rmw_set(PORT_REFCLK_EN, port->base + PORT_REFCLK); return 0; } static u32 apple_pcie_rid2sid_write(struct apple_pcie_port *port, int idx, u32 val) { writel_relaxed(val, port->base + PORT_RID2SID(idx)); /* Read back to ensure completion of the write */ return readl_relaxed(port->base + PORT_RID2SID(idx)); } static int apple_pcie_setup_port(struct apple_pcie *pcie, struct device_node *np) { struct platform_device *platform = to_platform_device(pcie->dev); struct apple_pcie_port *port; struct gpio_desc *reset; u32 stat, idx; int ret, i; reset = devm_fwnode_gpiod_get(pcie->dev, of_fwnode_handle(np), "reset", GPIOD_OUT_LOW, "PERST#"); if (IS_ERR(reset)) return PTR_ERR(reset); port = devm_kzalloc(pcie->dev, sizeof(*port), GFP_KERNEL); if (!port) return -ENOMEM; ret = of_property_read_u32_index(np, "reg", 0, &idx); if (ret) return ret; /* Use the first reg entry to work out the port index */ port->idx = idx >> 11; port->pcie = pcie; port->np = np; port->base = devm_platform_ioremap_resource(platform, port->idx + 2); if (IS_ERR(port->base)) return PTR_ERR(port->base); rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK); /* Assert PERST# before setting up the clock */ gpiod_set_value(reset, 1); ret = apple_pcie_setup_refclk(pcie, port); if (ret < 0) return ret; /* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */ usleep_range(100, 200); /* Deassert PERST# */ rmw_set(PORT_PERST_OFF, port->base + PORT_PERST); gpiod_set_value(reset, 0); /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */ msleep(100); ret = readl_relaxed_poll_timeout(port->base + PORT_STATUS, stat, stat & PORT_STATUS_READY, 100, 250000); if (ret < 0) { dev_err(pcie->dev, "port %pOF ready wait timeout\n", np); return ret; } rmw_clear(PORT_REFCLK_CGDIS, port->base + PORT_REFCLK); rmw_clear(PORT_APPCLK_CGDIS, port->base + PORT_APPCLK); ret = apple_pcie_port_setup_irq(port); if (ret) return ret; /* Reset all RID/SID mappings, and check for RAZ/WI registers */ for (i = 0; i < MAX_RID2SID; i++) { if (apple_pcie_rid2sid_write(port, i, 0xbad1d) != 0xbad1d) break; apple_pcie_rid2sid_write(port, i, 0); } dev_dbg(pcie->dev, "%pOF: %d RID/SID mapping entries\n", np, i); port->sid_map_sz = i; list_add_tail(&port->entry, &pcie->ports); init_completion(&pcie->event); ret = apple_pcie_port_register_irqs(port); WARN_ON(ret); writel_relaxed(PORT_LTSSMCTL_START, port->base + PORT_LTSSMCTL); if (!wait_for_completion_timeout(&pcie->event, HZ / 10)) dev_warn(pcie->dev, "%pOF link didn't come up\n", np); return 0; } static int apple_msi_init(struct apple_pcie *pcie) { struct fwnode_handle *fwnode = dev_fwnode(pcie->dev); struct of_phandle_args args = {}; struct irq_domain *parent; int ret; ret = of_parse_phandle_with_args(to_of_node(fwnode), "msi-ranges", "#interrupt-cells", 0, &args); if (ret) return ret; ret = of_property_read_u32_index(to_of_node(fwnode), "msi-ranges", args.args_count + 1, &pcie->nvecs); if (ret) return ret; of_phandle_args_to_fwspec(args.np, args.args, args.args_count, &pcie->fwspec); pcie->bitmap = devm_bitmap_zalloc(pcie->dev, pcie->nvecs, GFP_KERNEL); if (!pcie->bitmap) return -ENOMEM; parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED); if (!parent) { dev_err(pcie->dev, "failed to find parent domain\n"); return -ENXIO; } parent = irq_domain_create_hierarchy(parent, 0, pcie->nvecs, fwnode, &apple_msi_domain_ops, pcie); if (!parent) { dev_err(pcie->dev, "failed to create IRQ domain\n"); return -ENOMEM; } irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); pcie->domain = pci_msi_create_irq_domain(fwnode, &apple_msi_info, parent); if (!pcie->domain) { dev_err(pcie->dev, "failed to create MSI domain\n"); irq_domain_remove(parent); return -ENOMEM; } return 0; } static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev) { struct pci_config_window *cfg = pdev->sysdata; struct apple_pcie *pcie = cfg->priv; struct pci_dev *port_pdev; struct apple_pcie_port *port; /* Find the root port this device is on */ port_pdev = pcie_find_root_port(pdev); /* If finding the port itself, nothing to do */ if (WARN_ON(!port_pdev) || pdev == port_pdev) return NULL; list_for_each_entry(port, &pcie->ports, entry) { if (port->idx == PCI_SLOT(port_pdev->devfn)) return port; } return NULL; } static int apple_pcie_add_device(struct apple_pcie_port *port, struct pci_dev *pdev) { u32 sid, rid = pci_dev_id(pdev); int idx, err; dev_dbg(&pdev->dev, "added to bus %s, index %d\n", pci_name(pdev->bus->self), port->idx); err = of_map_id(port->pcie->dev->of_node, rid, "iommu-map", "iommu-map-mask", NULL, &sid); if (err) return err; mutex_lock(&port->pcie->lock); idx = bitmap_find_free_region(port->sid_map, port->sid_map_sz, 0); if (idx >= 0) { apple_pcie_rid2sid_write(port, idx, PORT_RID2SID_VALID | (sid << PORT_RID2SID_SID_SHIFT) | rid); dev_dbg(&pdev->dev, "mapping RID%x to SID%x (index %d)\n", rid, sid, idx); } mutex_unlock(&port->pcie->lock); return idx >= 0 ? 0 : -ENOSPC; } static void apple_pcie_release_device(struct apple_pcie_port *port, struct pci_dev *pdev) { u32 rid = pci_dev_id(pdev); int idx; mutex_lock(&port->pcie->lock); for_each_set_bit(idx, port->sid_map, port->sid_map_sz) { u32 val; val = readl_relaxed(port->base + PORT_RID2SID(idx)); if ((val & 0xffff) == rid) { apple_pcie_rid2sid_write(port, idx, 0); bitmap_release_region(port->sid_map, idx, 0); dev_dbg(&pdev->dev, "Released %x (%d)\n", val, idx); break; } } mutex_unlock(&port->pcie->lock); } static int apple_pcie_bus_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; struct pci_dev *pdev = to_pci_dev(dev); struct apple_pcie_port *port; int err; /* * This is a bit ugly. We assume that if we get notified for * any PCI device, we must be in charge of it, and that there * is no other PCI controller in the whole system. It probably * holds for now, but who knows for how long? */ port = apple_pcie_get_port(pdev); if (!port) return NOTIFY_DONE; switch (action) { case BUS_NOTIFY_ADD_DEVICE: err = apple_pcie_add_device(port, pdev); if (err) return notifier_from_errno(err); break; case BUS_NOTIFY_DEL_DEVICE: apple_pcie_release_device(port, pdev); break; default: return NOTIFY_DONE; } return NOTIFY_OK; } static struct notifier_block apple_pcie_nb = { .notifier_call = apple_pcie_bus_notifier, }; static int apple_pcie_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; struct platform_device *platform = to_platform_device(dev); struct device_node *of_port; struct apple_pcie *pcie; int ret; pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; pcie->dev = dev; mutex_init(&pcie->lock); pcie->base = devm_platform_ioremap_resource(platform, 1); if (IS_ERR(pcie->base)) return PTR_ERR(pcie->base); cfg->priv = pcie; INIT_LIST_HEAD(&pcie->ports); ret = apple_msi_init(pcie); if (ret) return ret; for_each_child_of_node(dev->of_node, of_port) { ret = apple_pcie_setup_port(pcie, of_port); if (ret) { dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret); of_node_put(of_port); return ret; } } return 0; } static int apple_pcie_probe(struct platform_device *pdev) { int ret; ret = bus_register_notifier(&pci_bus_type, &apple_pcie_nb); if (ret) return ret; ret = pci_host_common_probe(pdev); if (ret) bus_unregister_notifier(&pci_bus_type, &apple_pcie_nb); return ret; } static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = { .init = apple_pcie_init, .pci_ops = { .map_bus = pci_ecam_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, } }; static const struct of_device_id apple_pcie_of_match[] = { { .compatible = "apple,pcie", .data = &apple_pcie_cfg_ecam_ops }, { } }; MODULE_DEVICE_TABLE(of, apple_pcie_of_match); static struct platform_driver apple_pcie_driver = { .probe = apple_pcie_probe, .driver = { .name = "pcie-apple", .of_match_table = apple_pcie_of_match, .suppress_bind_attrs = true, }, }; module_platform_driver(apple_pcie_driver); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/pcie-apple.c
// SPDX-License-Identifier: GPL-2.0+ /* * PCIe host controller driver for Xilinx AXI PCIe Bridge * * Copyright (c) 2012 - 2014 Xilinx, Inc. * * Based on the Tegra PCIe driver * * Bits taken from Synopsys DesignWare Host controller driver and * ARM PCI Host generic driver. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/of_platform.h> #include <linux/of_irq.h> #include <linux/pci.h> #include <linux/pci-ecam.h> #include <linux/platform_device.h> #include "../pci.h" /* Register definitions */ #define XILINX_PCIE_REG_BIR 0x00000130 #define XILINX_PCIE_REG_IDR 0x00000138 #define XILINX_PCIE_REG_IMR 0x0000013c #define XILINX_PCIE_REG_PSCR 0x00000144 #define XILINX_PCIE_REG_RPSC 0x00000148 #define XILINX_PCIE_REG_MSIBASE1 0x0000014c #define XILINX_PCIE_REG_MSIBASE2 0x00000150 #define XILINX_PCIE_REG_RPEFR 0x00000154 #define XILINX_PCIE_REG_RPIFR1 0x00000158 #define XILINX_PCIE_REG_RPIFR2 0x0000015c /* Interrupt registers definitions */ #define XILINX_PCIE_INTR_LINK_DOWN BIT(0) #define XILINX_PCIE_INTR_ECRC_ERR BIT(1) #define XILINX_PCIE_INTR_STR_ERR BIT(2) #define XILINX_PCIE_INTR_HOT_RESET BIT(3) #define XILINX_PCIE_INTR_CFG_TIMEOUT BIT(8) #define XILINX_PCIE_INTR_CORRECTABLE BIT(9) #define XILINX_PCIE_INTR_NONFATAL BIT(10) #define XILINX_PCIE_INTR_FATAL BIT(11) #define XILINX_PCIE_INTR_INTX BIT(16) #define XILINX_PCIE_INTR_MSI BIT(17) #define XILINX_PCIE_INTR_SLV_UNSUPP BIT(20) #define XILINX_PCIE_INTR_SLV_UNEXP BIT(21) #define XILINX_PCIE_INTR_SLV_COMPL BIT(22) #define XILINX_PCIE_INTR_SLV_ERRP BIT(23) #define XILINX_PCIE_INTR_SLV_CMPABT BIT(24) #define XILINX_PCIE_INTR_SLV_ILLBUR BIT(25) #define XILINX_PCIE_INTR_MST_DECERR BIT(26) #define XILINX_PCIE_INTR_MST_SLVERR BIT(27) #define XILINX_PCIE_INTR_MST_ERRP BIT(28) #define XILINX_PCIE_IMR_ALL_MASK 0x1FF30FED #define XILINX_PCIE_IMR_ENABLE_MASK 0x1FF30F0D #define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF /* Root Port Error FIFO Read Register definitions */ #define XILINX_PCIE_RPEFR_ERR_VALID BIT(18) #define XILINX_PCIE_RPEFR_REQ_ID GENMASK(15, 0) #define XILINX_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF /* Root Port Interrupt FIFO Read Register 1 definitions */ #define XILINX_PCIE_RPIFR1_INTR_VALID BIT(31) #define XILINX_PCIE_RPIFR1_MSI_INTR BIT(30) #define XILINX_PCIE_RPIFR1_INTR_MASK GENMASK(28, 27) #define XILINX_PCIE_RPIFR1_ALL_MASK 0xFFFFFFFF #define XILINX_PCIE_RPIFR1_INTR_SHIFT 27 /* Bridge Info Register definitions */ #define XILINX_PCIE_BIR_ECAM_SZ_MASK GENMASK(18, 16) #define XILINX_PCIE_BIR_ECAM_SZ_SHIFT 16 /* Root Port Interrupt FIFO Read Register 2 definitions */ #define XILINX_PCIE_RPIFR2_MSG_DATA GENMASK(15, 0) /* Root Port Status/control Register definitions */ #define XILINX_PCIE_REG_RPSC_BEN BIT(0) /* Phy Status/Control Register definitions */ #define XILINX_PCIE_REG_PSCR_LNKUP BIT(11) /* Number of MSI IRQs */ #define XILINX_NUM_MSI_IRQS 128 /** * struct xilinx_pcie - PCIe port information * @dev: Device pointer * @reg_base: IO Mapped Register Base * @msi_map: Bitmap of allocated MSIs * @map_lock: Mutex protecting the MSI allocation * @msi_domain: MSI IRQ domain pointer * @leg_domain: Legacy IRQ domain pointer * @resources: Bus Resources */ struct xilinx_pcie { struct device *dev; void __iomem *reg_base; unsigned long msi_map[BITS_TO_LONGS(XILINX_NUM_MSI_IRQS)]; struct mutex map_lock; struct irq_domain *msi_domain; struct irq_domain *leg_domain; struct list_head resources; }; static inline u32 pcie_read(struct xilinx_pcie *pcie, u32 reg) { return readl(pcie->reg_base + reg); } static inline void pcie_write(struct xilinx_pcie *pcie, u32 val, u32 reg) { writel(val, pcie->reg_base + reg); } static inline bool xilinx_pcie_link_up(struct xilinx_pcie *pcie) { return (pcie_read(pcie, XILINX_PCIE_REG_PSCR) & XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0; } /** * xilinx_pcie_clear_err_interrupts - Clear Error Interrupts * @pcie: PCIe port information */ static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie *pcie) { struct device *dev = pcie->dev; unsigned long val = pcie_read(pcie, XILINX_PCIE_REG_RPEFR); if (val & XILINX_PCIE_RPEFR_ERR_VALID) { dev_dbg(dev, "Requester ID %lu\n", val & XILINX_PCIE_RPEFR_REQ_ID); pcie_write(pcie, XILINX_PCIE_RPEFR_ALL_MASK, XILINX_PCIE_REG_RPEFR); } } /** * xilinx_pcie_valid_device - Check if a valid device is present on bus * @bus: PCI Bus structure * @devfn: device/function * * Return: 'true' on success and 'false' if invalid device is found */ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) { struct xilinx_pcie *pcie = bus->sysdata; /* Check if link is up when trying to access downstream pcie ports */ if (!pci_is_root_bus(bus)) { if (!xilinx_pcie_link_up(pcie)) return false; } else if (devfn > 0) { /* Only one device down on each root port */ return false; } return true; } /** * xilinx_pcie_map_bus - Get configuration base * @bus: PCI Bus structure * @devfn: Device/function * @where: Offset from base * * Return: Base address of the configuration space needed to be * accessed. */ static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct xilinx_pcie *pcie = bus->sysdata; if (!xilinx_pcie_valid_device(bus, devfn)) return NULL; return pcie->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where); } /* PCIe operations */ static struct pci_ops xilinx_pcie_ops = { .map_bus = xilinx_pcie_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, }; /* MSI functions */ static void xilinx_msi_top_irq_ack(struct irq_data *d) { /* * xilinx_pcie_intr_handler() will have performed the Ack. * Eventually, this should be fixed and the Ack be moved in * the respective callbacks for INTx and MSI. */ } static struct irq_chip xilinx_msi_top_chip = { .name = "PCIe MSI", .irq_ack = xilinx_msi_top_irq_ack, }; static int xilinx_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) { return -EINVAL; } static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct xilinx_pcie *pcie = irq_data_get_irq_chip_data(data); phys_addr_t pa = ALIGN_DOWN(virt_to_phys(pcie), SZ_4K); msg->address_lo = lower_32_bits(pa); msg->address_hi = upper_32_bits(pa); msg->data = data->hwirq; } static struct irq_chip xilinx_msi_bottom_chip = { .name = "Xilinx MSI", .irq_set_affinity = xilinx_msi_set_affinity, .irq_compose_msi_msg = xilinx_compose_msi_msg, }; static int xilinx_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct xilinx_pcie *pcie = domain->host_data; int hwirq, i; mutex_lock(&pcie->map_lock); hwirq = bitmap_find_free_region(pcie->msi_map, XILINX_NUM_MSI_IRQS, order_base_2(nr_irqs)); mutex_unlock(&pcie->map_lock); if (hwirq < 0) return -ENOSPC; for (i = 0; i < nr_irqs; i++) irq_domain_set_info(domain, virq + i, hwirq + i, &xilinx_msi_bottom_chip, domain->host_data, handle_edge_irq, NULL, NULL); return 0; } static void xilinx_msi_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct xilinx_pcie *pcie = domain->host_data; mutex_lock(&pcie->map_lock); bitmap_release_region(pcie->msi_map, d->hwirq, order_base_2(nr_irqs)); mutex_unlock(&pcie->map_lock); } static const struct irq_domain_ops xilinx_msi_domain_ops = { .alloc = xilinx_msi_domain_alloc, .free = xilinx_msi_domain_free, }; static struct msi_domain_info xilinx_msi_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), .chip = &xilinx_msi_top_chip, }; static int xilinx_allocate_msi_domains(struct xilinx_pcie *pcie) { struct fwnode_handle *fwnode = dev_fwnode(pcie->dev); struct irq_domain *parent; parent = irq_domain_create_linear(fwnode, XILINX_NUM_MSI_IRQS, &xilinx_msi_domain_ops, pcie); if (!parent) { dev_err(pcie->dev, "failed to create IRQ domain\n"); return -ENOMEM; } irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); pcie->msi_domain = pci_msi_create_irq_domain(fwnode, &xilinx_msi_info, parent); if (!pcie->msi_domain) { dev_err(pcie->dev, "failed to create MSI domain\n"); irq_domain_remove(parent); return -ENOMEM; } return 0; } static void xilinx_free_msi_domains(struct xilinx_pcie *pcie) { struct irq_domain *parent = pcie->msi_domain->parent; irq_domain_remove(pcie->msi_domain); irq_domain_remove(parent); } /* INTx Functions */ /** * xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid * @domain: IRQ domain * @irq: Virtual IRQ number * @hwirq: HW interrupt number * * Return: Always returns 0. */ static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); irq_set_chip_data(irq, domain->host_data); return 0; } /* INTx IRQ Domain operations */ static const struct irq_domain_ops intx_domain_ops = { .map = xilinx_pcie_intx_map, .xlate = pci_irqd_intx_xlate, }; /* PCIe HW Functions */ /** * xilinx_pcie_intr_handler - Interrupt Service Handler * @irq: IRQ number * @data: PCIe port information * * Return: IRQ_HANDLED on success and IRQ_NONE on failure */ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) { struct xilinx_pcie *pcie = (struct xilinx_pcie *)data; struct device *dev = pcie->dev; u32 val, mask, status; /* Read interrupt decode and mask registers */ val = pcie_read(pcie, XILINX_PCIE_REG_IDR); mask = pcie_read(pcie, XILINX_PCIE_REG_IMR); status = val & mask; if (!status) return IRQ_NONE; if (status & XILINX_PCIE_INTR_LINK_DOWN) dev_warn(dev, "Link Down\n"); if (status & XILINX_PCIE_INTR_ECRC_ERR) dev_warn(dev, "ECRC failed\n"); if (status & XILINX_PCIE_INTR_STR_ERR) dev_warn(dev, "Streaming error\n"); if (status & XILINX_PCIE_INTR_HOT_RESET) dev_info(dev, "Hot reset\n"); if (status & XILINX_PCIE_INTR_CFG_TIMEOUT) dev_warn(dev, "ECAM access timeout\n"); if (status & XILINX_PCIE_INTR_CORRECTABLE) { dev_warn(dev, "Correctable error message\n"); xilinx_pcie_clear_err_interrupts(pcie); } if (status & XILINX_PCIE_INTR_NONFATAL) { dev_warn(dev, "Non fatal error message\n"); xilinx_pcie_clear_err_interrupts(pcie); } if (status & XILINX_PCIE_INTR_FATAL) { dev_warn(dev, "Fatal error message\n"); xilinx_pcie_clear_err_interrupts(pcie); } if (status & (XILINX_PCIE_INTR_INTX | XILINX_PCIE_INTR_MSI)) { struct irq_domain *domain; val = pcie_read(pcie, XILINX_PCIE_REG_RPIFR1); /* Check whether interrupt valid */ if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) { dev_warn(dev, "RP Intr FIFO1 read error\n"); goto error; } /* Decode the IRQ number */ if (val & XILINX_PCIE_RPIFR1_MSI_INTR) { val = pcie_read(pcie, XILINX_PCIE_REG_RPIFR2) & XILINX_PCIE_RPIFR2_MSG_DATA; domain = pcie->msi_domain->parent; } else { val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >> XILINX_PCIE_RPIFR1_INTR_SHIFT; domain = pcie->leg_domain; } /* Clear interrupt FIFO register 1 */ pcie_write(pcie, XILINX_PCIE_RPIFR1_ALL_MASK, XILINX_PCIE_REG_RPIFR1); generic_handle_domain_irq(domain, val); } if (status & XILINX_PCIE_INTR_SLV_UNSUPP) dev_warn(dev, "Slave unsupported request\n"); if (status & XILINX_PCIE_INTR_SLV_UNEXP) dev_warn(dev, "Slave unexpected completion\n"); if (status & XILINX_PCIE_INTR_SLV_COMPL) dev_warn(dev, "Slave completion timeout\n"); if (status & XILINX_PCIE_INTR_SLV_ERRP) dev_warn(dev, "Slave Error Poison\n"); if (status & XILINX_PCIE_INTR_SLV_CMPABT) dev_warn(dev, "Slave Completer Abort\n"); if (status & XILINX_PCIE_INTR_SLV_ILLBUR) dev_warn(dev, "Slave Illegal Burst\n"); if (status & XILINX_PCIE_INTR_MST_DECERR) dev_warn(dev, "Master decode error\n"); if (status & XILINX_PCIE_INTR_MST_SLVERR) dev_warn(dev, "Master slave error\n"); if (status & XILINX_PCIE_INTR_MST_ERRP) dev_warn(dev, "Master error poison\n"); error: /* Clear the Interrupt Decode register */ pcie_write(pcie, status, XILINX_PCIE_REG_IDR); return IRQ_HANDLED; } /** * xilinx_pcie_init_irq_domain - Initialize IRQ domain * @pcie: PCIe port information * * Return: '0' on success and error value on failure */ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie *pcie) { struct device *dev = pcie->dev; struct device_node *pcie_intc_node; int ret; /* Setup INTx */ pcie_intc_node = of_get_next_child(dev->of_node, NULL); if (!pcie_intc_node) { dev_err(dev, "No PCIe Intc node found\n"); return -ENODEV; } pcie->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, &intx_domain_ops, pcie); of_node_put(pcie_intc_node); if (!pcie->leg_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); return -ENODEV; } /* Setup MSI */ if (IS_ENABLED(CONFIG_PCI_MSI)) { phys_addr_t pa = ALIGN_DOWN(virt_to_phys(pcie), SZ_4K); ret = xilinx_allocate_msi_domains(pcie); if (ret) return ret; pcie_write(pcie, upper_32_bits(pa), XILINX_PCIE_REG_MSIBASE1); pcie_write(pcie, lower_32_bits(pa), XILINX_PCIE_REG_MSIBASE2); } return 0; } /** * xilinx_pcie_init_port - Initialize hardware * @pcie: PCIe port information */ static void xilinx_pcie_init_port(struct xilinx_pcie *pcie) { struct device *dev = pcie->dev; if (xilinx_pcie_link_up(pcie)) dev_info(dev, "PCIe Link is UP\n"); else dev_info(dev, "PCIe Link is DOWN\n"); /* Disable all interrupts */ pcie_write(pcie, ~XILINX_PCIE_IDR_ALL_MASK, XILINX_PCIE_REG_IMR); /* Clear pending interrupts */ pcie_write(pcie, pcie_read(pcie, XILINX_PCIE_REG_IDR) & XILINX_PCIE_IMR_ALL_MASK, XILINX_PCIE_REG_IDR); /* Enable all interrupts we handle */ pcie_write(pcie, XILINX_PCIE_IMR_ENABLE_MASK, XILINX_PCIE_REG_IMR); /* Enable the Bridge enable bit */ pcie_write(pcie, pcie_read(pcie, XILINX_PCIE_REG_RPSC) | XILINX_PCIE_REG_RPSC_BEN, XILINX_PCIE_REG_RPSC); } /** * xilinx_pcie_parse_dt - Parse Device tree * @pcie: PCIe port information * * Return: '0' on success and error value on failure */ static int xilinx_pcie_parse_dt(struct xilinx_pcie *pcie) { struct device *dev = pcie->dev; struct device_node *node = dev->of_node; struct resource regs; unsigned int irq; int err; err = of_address_to_resource(node, 0, &regs); if (err) { dev_err(dev, "missing \"reg\" property\n"); return err; } pcie->reg_base = devm_pci_remap_cfg_resource(dev, &regs); if (IS_ERR(pcie->reg_base)) return PTR_ERR(pcie->reg_base); irq = irq_of_parse_and_map(node, 0); err = devm_request_irq(dev, irq, xilinx_pcie_intr_handler, IRQF_SHARED | IRQF_NO_THREAD, "xilinx-pcie", pcie); if (err) { dev_err(dev, "unable to request irq %d\n", irq); return err; } return 0; } /** * xilinx_pcie_probe - Probe function * @pdev: Platform device pointer * * Return: '0' on success and error value on failure */ static int xilinx_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct xilinx_pcie *pcie; struct pci_host_bridge *bridge; int err; if (!dev->of_node) return -ENODEV; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); if (!bridge) return -ENODEV; pcie = pci_host_bridge_priv(bridge); mutex_init(&pcie->map_lock); pcie->dev = dev; err = xilinx_pcie_parse_dt(pcie); if (err) { dev_err(dev, "Parsing DT failed\n"); return err; } xilinx_pcie_init_port(pcie); err = xilinx_pcie_init_irq_domain(pcie); if (err) { dev_err(dev, "Failed creating IRQ Domain\n"); return err; } bridge->sysdata = pcie; bridge->ops = &xilinx_pcie_ops; err = pci_host_probe(bridge); if (err) xilinx_free_msi_domains(pcie); return err; } static const struct of_device_id xilinx_pcie_of_match[] = { { .compatible = "xlnx,axi-pcie-host-1.00.a", }, {} }; static struct platform_driver xilinx_pcie_driver = { .driver = { .name = "xilinx-pcie", .of_match_table = xilinx_pcie_of_match, .suppress_bind_attrs = true, }, .probe = xilinx_pcie_probe, }; builtin_platform_driver(xilinx_pcie_driver);
linux-master
drivers/pci/controller/pcie-xilinx.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2004 Koninklijke Philips Electronics NV * * Conversion to platform driver and DT: * Copyright 2014 Linaro Ltd. * * 14/04/2005 Initial version, [email protected] */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/of_platform.h> #include <linux/pci.h> #include <linux/platform_device.h> #include "../pci.h" static void __iomem *versatile_pci_base; static void __iomem *versatile_cfg_base[2]; #define PCI_IMAP(m) (versatile_pci_base + ((m) * 4)) #define PCI_SMAP(m) (versatile_pci_base + 0x14 + ((m) * 4)) #define PCI_SELFID (versatile_pci_base + 0xc) #define VP_PCI_DEVICE_ID 0x030010ee #define VP_PCI_CLASS_ID 0x0b400000 static u32 pci_slot_ignore; static int __init versatile_pci_slot_ignore(char *str) { int slot; while (get_option(&str, &slot)) { if ((slot < 0) || (slot > 31)) pr_err("Illegal slot value: %d\n", slot); else pci_slot_ignore |= (1 << slot); } return 1; } __setup("pci_slot_ignore=", versatile_pci_slot_ignore); static void __iomem *versatile_map_bus(struct pci_bus *bus, unsigned int devfn, int offset) { unsigned int busnr = bus->number; if (pci_slot_ignore & (1 << PCI_SLOT(devfn))) return NULL; return versatile_cfg_base[1] + ((busnr << 16) | (devfn << 8) | offset); } static struct pci_ops pci_versatile_ops = { .map_bus = versatile_map_bus, .read = pci_generic_config_read32, .write = pci_generic_config_write, }; static int versatile_pci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *res; struct resource_entry *entry; int i, myslot = -1, mem = 1; u32 val; void __iomem *local_pci_cfg_base; struct pci_host_bridge *bridge; bridge = devm_pci_alloc_host_bridge(dev, 0); if (!bridge) return -ENOMEM; versatile_pci_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(versatile_pci_base)) return PTR_ERR(versatile_pci_base); versatile_cfg_base[0] = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(versatile_cfg_base[0])) return PTR_ERR(versatile_cfg_base[0]); res = platform_get_resource(pdev, IORESOURCE_MEM, 2); versatile_cfg_base[1] = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(versatile_cfg_base[1])) return PTR_ERR(versatile_cfg_base[1]); resource_list_for_each_entry(entry, &bridge->windows) { if (resource_type(entry->res) == IORESOURCE_MEM) { writel(entry->res->start >> 28, PCI_IMAP(mem)); writel(__pa(PAGE_OFFSET) >> 28, PCI_SMAP(mem)); mem++; } } /* * We need to discover the PCI core first to configure itself * before the main PCI probing is performed */ for (i = 0; i < 32; i++) { if ((readl(versatile_cfg_base[0] + (i << 11) + PCI_VENDOR_ID) == VP_PCI_DEVICE_ID) && (readl(versatile_cfg_base[0] + (i << 11) + PCI_CLASS_REVISION) == VP_PCI_CLASS_ID)) { myslot = i; break; } } if (myslot == -1) { dev_err(dev, "Cannot find PCI core!\n"); return -EIO; } /* * Do not to map Versatile FPGA PCI device into memory space */ pci_slot_ignore |= (1 << myslot); dev_info(dev, "PCI core found (slot %d)\n", myslot); writel(myslot, PCI_SELFID); local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11); val = readl(local_pci_cfg_base + PCI_COMMAND); val |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE; writel(val, local_pci_cfg_base + PCI_COMMAND); /* * Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM */ writel(__pa(PAGE_OFFSET), local_pci_cfg_base + PCI_BASE_ADDRESS_0); writel(__pa(PAGE_OFFSET), local_pci_cfg_base + PCI_BASE_ADDRESS_1); writel(__pa(PAGE_OFFSET), local_pci_cfg_base + PCI_BASE_ADDRESS_2); /* * For many years the kernel and QEMU were symbiotically buggy * in that they both assumed the same broken IRQ mapping. * QEMU therefore attempts to auto-detect old broken kernels * so that they still work on newer QEMU as they did on old * QEMU. Since we now use the correct (ie matching-hardware) * IRQ mapping we write a definitely different value to a * PCI_INTERRUPT_LINE register to tell QEMU that we expect * real hardware behaviour and it need not be backwards * compatible for us. This write is harmless on real hardware. */ writel(0, versatile_cfg_base[0] + PCI_INTERRUPT_LINE); pci_add_flags(PCI_REASSIGN_ALL_BUS); bridge->ops = &pci_versatile_ops; return pci_host_probe(bridge); } static const struct of_device_id versatile_pci_of_match[] = { { .compatible = "arm,versatile-pci", }, { }, }; MODULE_DEVICE_TABLE(of, versatile_pci_of_match); static struct platform_driver versatile_pci_driver = { .driver = { .name = "versatile-pci", .of_match_table = versatile_pci_of_match, .suppress_bind_attrs = true, }, .probe = versatile_pci_probe, }; module_platform_driver(versatile_pci_driver); MODULE_DESCRIPTION("Versatile PCI driver");
linux-master
drivers/pci/controller/pci-versatile.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) Microsoft Corporation. * * Author: * Jake Oshins <[email protected]> * * This driver acts as a paravirtual front-end for PCI Express root buses. * When a PCI Express function (either an entire device or an SR-IOV * Virtual Function) is being passed through to the VM, this driver exposes * a new bus to the guest VM. This is modeled as a root PCI bus because * no bridges are being exposed to the VM. In fact, with a "Generation 2" * VM within Hyper-V, there may seem to be no PCI bus at all in the VM * until a device as been exposed using this driver. * * Each root PCI bus has its own PCI domain, which is called "Segment" in * the PCI Firmware Specifications. Thus while each device passed through * to the VM using this front-end will appear at "device 0", the domain will * be unique. Typically, each bus will have one PCI function on it, though * this driver does support more than one. * * In order to map the interrupts from the device through to the guest VM, * this driver also implements an IRQ Domain, which handles interrupts (either * MSI or MSI-X) associated with the functions on the bus. As interrupts are * set up, torn down, or reaffined, this driver communicates with the * underlying hypervisor to adjust the mappings in the I/O MMU so that each * interrupt will be delivered to the correct virtual processor at the right * vector. This driver does not support level-triggered (line-based) * interrupts, and will report that the Interrupt Line register in the * function's configuration space is zero. * * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V * facilities. For instance, the configuration space of a function exposed * by Hyper-V is mapped into a single page of memory space, and the * read and write handlers for config space must be aware of this mechanism. * Similarly, device setup and teardown involves messages sent to and from * the PCI back-end driver in Hyper-V. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/pci-ecam.h> #include <linux/delay.h> #include <linux/semaphore.h> #include <linux/irq.h> #include <linux/msi.h> #include <linux/hyperv.h> #include <linux/refcount.h> #include <linux/irqdomain.h> #include <linux/acpi.h> #include <asm/mshyperv.h> /* * Protocol versions. The low word is the minor version, the high word the * major version. */ #define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor))) #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16) #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff) enum pci_protocol_version_t { PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */ PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */ PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3), /* Vibranium */ PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4), /* WS2022 */ }; #define CPU_AFFINITY_ALL -1ULL /* * Supported protocol versions in the order of probing - highest go * first. */ static enum pci_protocol_version_t pci_protocol_versions[] = { PCI_PROTOCOL_VERSION_1_4, PCI_PROTOCOL_VERSION_1_3, PCI_PROTOCOL_VERSION_1_2, PCI_PROTOCOL_VERSION_1_1, }; #define PCI_CONFIG_MMIO_LENGTH 0x2000 #define CFG_PAGE_OFFSET 0x1000 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET) #define MAX_SUPPORTED_MSI_MESSAGES 0x400 #define STATUS_REVISION_MISMATCH 0xC0000059 /* space for 32bit serial number as string */ #define SLOT_NAME_SIZE 11 /* * Size of requestor for VMbus; the value is based on the observation * that having more than one request outstanding is 'rare', and so 64 * should be generous in ensuring that we don't ever run out. */ #define HV_PCI_RQSTOR_SIZE 64 /* * Message Types */ enum pci_message_type { /* * Version 1.1 */ PCI_MESSAGE_BASE = 0x42490000, PCI_BUS_RELATIONS = PCI_MESSAGE_BASE + 0, PCI_QUERY_BUS_RELATIONS = PCI_MESSAGE_BASE + 1, PCI_POWER_STATE_CHANGE = PCI_MESSAGE_BASE + 4, PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5, PCI_QUERY_RESOURCE_RESOURCES = PCI_MESSAGE_BASE + 6, PCI_BUS_D0ENTRY = PCI_MESSAGE_BASE + 7, PCI_BUS_D0EXIT = PCI_MESSAGE_BASE + 8, PCI_READ_BLOCK = PCI_MESSAGE_BASE + 9, PCI_WRITE_BLOCK = PCI_MESSAGE_BASE + 0xA, PCI_EJECT = PCI_MESSAGE_BASE + 0xB, PCI_QUERY_STOP = PCI_MESSAGE_BASE + 0xC, PCI_REENABLE = PCI_MESSAGE_BASE + 0xD, PCI_QUERY_STOP_FAILED = PCI_MESSAGE_BASE + 0xE, PCI_EJECTION_COMPLETE = PCI_MESSAGE_BASE + 0xF, PCI_RESOURCES_ASSIGNED = PCI_MESSAGE_BASE + 0x10, PCI_RESOURCES_RELEASED = PCI_MESSAGE_BASE + 0x11, PCI_INVALIDATE_BLOCK = PCI_MESSAGE_BASE + 0x12, PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13, PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14, PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15, PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16, PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17, PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */ PCI_BUS_RELATIONS2 = PCI_MESSAGE_BASE + 0x19, PCI_RESOURCES_ASSIGNED3 = PCI_MESSAGE_BASE + 0x1A, PCI_CREATE_INTERRUPT_MESSAGE3 = PCI_MESSAGE_BASE + 0x1B, PCI_MESSAGE_MAXIMUM }; /* * Structures defining the virtual PCI Express protocol. */ union pci_version { struct { u16 minor_version; u16 major_version; } parts; u32 version; } __packed; /* * Function numbers are 8-bits wide on Express, as interpreted through ARI, * which is all this driver does. This representation is the one used in * Windows, which is what is expected when sending this back and forth with * the Hyper-V parent partition. */ union win_slot_encoding { struct { u32 dev:5; u32 func:3; u32 reserved:24; } bits; u32 slot; } __packed; /* * Pretty much as defined in the PCI Specifications. */ struct pci_function_description { u16 v_id; /* vendor ID */ u16 d_id; /* device ID */ u8 rev; u8 prog_intf; u8 subclass; u8 base_class; u32 subsystem_id; union win_slot_encoding win_slot; u32 ser; /* serial number */ } __packed; enum pci_device_description_flags { HV_PCI_DEVICE_FLAG_NONE = 0x0, HV_PCI_DEVICE_FLAG_NUMA_AFFINITY = 0x1, }; struct pci_function_description2 { u16 v_id; /* vendor ID */ u16 d_id; /* device ID */ u8 rev; u8 prog_intf; u8 subclass; u8 base_class; u32 subsystem_id; union win_slot_encoding win_slot; u32 ser; /* serial number */ u32 flags; u16 virtual_numa_node; u16 reserved; } __packed; /** * struct hv_msi_desc * @vector: IDT entry * @delivery_mode: As defined in Intel's Programmer's * Reference Manual, Volume 3, Chapter 8. * @vector_count: Number of contiguous entries in the * Interrupt Descriptor Table that are * occupied by this Message-Signaled * Interrupt. For "MSI", as first defined * in PCI 2.2, this can be between 1 and * 32. For "MSI-X," as first defined in PCI * 3.0, this must be 1, as each MSI-X table * entry would have its own descriptor. * @reserved: Empty space * @cpu_mask: All the target virtual processors. */ struct hv_msi_desc { u8 vector; u8 delivery_mode; u16 vector_count; u32 reserved; u64 cpu_mask; } __packed; /** * struct hv_msi_desc2 - 1.2 version of hv_msi_desc * @vector: IDT entry * @delivery_mode: As defined in Intel's Programmer's * Reference Manual, Volume 3, Chapter 8. * @vector_count: Number of contiguous entries in the * Interrupt Descriptor Table that are * occupied by this Message-Signaled * Interrupt. For "MSI", as first defined * in PCI 2.2, this can be between 1 and * 32. For "MSI-X," as first defined in PCI * 3.0, this must be 1, as each MSI-X table * entry would have its own descriptor. * @processor_count: number of bits enabled in array. * @processor_array: All the target virtual processors. */ struct hv_msi_desc2 { u8 vector; u8 delivery_mode; u16 vector_count; u16 processor_count; u16 processor_array[32]; } __packed; /* * struct hv_msi_desc3 - 1.3 version of hv_msi_desc * Everything is the same as in 'hv_msi_desc2' except that the size of the * 'vector' field is larger to support bigger vector values. For ex: LPI * vectors on ARM. */ struct hv_msi_desc3 { u32 vector; u8 delivery_mode; u8 reserved; u16 vector_count; u16 processor_count; u16 processor_array[32]; } __packed; /** * struct tran_int_desc * @reserved: unused, padding * @vector_count: same as in hv_msi_desc * @data: This is the "data payload" value that is * written by the device when it generates * a message-signaled interrupt, either MSI * or MSI-X. * @address: This is the address to which the data * payload is written on interrupt * generation. */ struct tran_int_desc { u16 reserved; u16 vector_count; u32 data; u64 address; } __packed; /* * A generic message format for virtual PCI. * Specific message formats are defined later in the file. */ struct pci_message { u32 type; } __packed; struct pci_child_message { struct pci_message message_type; union win_slot_encoding wslot; } __packed; struct pci_incoming_message { struct vmpacket_descriptor hdr; struct pci_message message_type; } __packed; struct pci_response { struct vmpacket_descriptor hdr; s32 status; /* negative values are failures */ } __packed; struct pci_packet { void (*completion_func)(void *context, struct pci_response *resp, int resp_packet_size); void *compl_ctxt; struct pci_message message[]; }; /* * Specific message types supporting the PCI protocol. */ /* * Version negotiation message. Sent from the guest to the host. * The guest is free to try different versions until the host * accepts the version. * * pci_version: The protocol version requested. * is_last_attempt: If TRUE, this is the last version guest will request. * reservedz: Reserved field, set to zero. */ struct pci_version_request { struct pci_message message_type; u32 protocol_version; } __packed; /* * Bus D0 Entry. This is sent from the guest to the host when the virtual * bus (PCI Express port) is ready for action. */ struct pci_bus_d0_entry { struct pci_message message_type; u32 reserved; u64 mmio_base; } __packed; struct pci_bus_relations { struct pci_incoming_message incoming; u32 device_count; struct pci_function_description func[]; } __packed; struct pci_bus_relations2 { struct pci_incoming_message incoming; u32 device_count; struct pci_function_description2 func[]; } __packed; struct pci_q_res_req_response { struct vmpacket_descriptor hdr; s32 status; /* negative values are failures */ u32 probed_bar[PCI_STD_NUM_BARS]; } __packed; struct pci_set_power { struct pci_message message_type; union win_slot_encoding wslot; u32 power_state; /* In Windows terms */ u32 reserved; } __packed; struct pci_set_power_response { struct vmpacket_descriptor hdr; s32 status; /* negative values are failures */ union win_slot_encoding wslot; u32 resultant_state; /* In Windows terms */ u32 reserved; } __packed; struct pci_resources_assigned { struct pci_message message_type; union win_slot_encoding wslot; u8 memory_range[0x14][6]; /* not used here */ u32 msi_descriptors; u32 reserved[4]; } __packed; struct pci_resources_assigned2 { struct pci_message message_type; union win_slot_encoding wslot; u8 memory_range[0x14][6]; /* not used here */ u32 msi_descriptor_count; u8 reserved[70]; } __packed; struct pci_create_interrupt { struct pci_message message_type; union win_slot_encoding wslot; struct hv_msi_desc int_desc; } __packed; struct pci_create_int_response { struct pci_response response; u32 reserved; struct tran_int_desc int_desc; } __packed; struct pci_create_interrupt2 { struct pci_message message_type; union win_slot_encoding wslot; struct hv_msi_desc2 int_desc; } __packed; struct pci_create_interrupt3 { struct pci_message message_type; union win_slot_encoding wslot; struct hv_msi_desc3 int_desc; } __packed; struct pci_delete_interrupt { struct pci_message message_type; union win_slot_encoding wslot; struct tran_int_desc int_desc; } __packed; /* * Note: the VM must pass a valid block id, wslot and bytes_requested. */ struct pci_read_block { struct pci_message message_type; u32 block_id; union win_slot_encoding wslot; u32 bytes_requested; } __packed; struct pci_read_block_response { struct vmpacket_descriptor hdr; u32 status; u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX]; } __packed; /* * Note: the VM must pass a valid block id, wslot and byte_count. */ struct pci_write_block { struct pci_message message_type; u32 block_id; union win_slot_encoding wslot; u32 byte_count; u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX]; } __packed; struct pci_dev_inval_block { struct pci_incoming_message incoming; union win_slot_encoding wslot; u64 block_mask; } __packed; struct pci_dev_incoming { struct pci_incoming_message incoming; union win_slot_encoding wslot; } __packed; struct pci_eject_response { struct pci_message message_type; union win_slot_encoding wslot; u32 status; } __packed; static int pci_ring_size = (4 * PAGE_SIZE); /* * Driver specific state. */ enum hv_pcibus_state { hv_pcibus_init = 0, hv_pcibus_probed, hv_pcibus_installed, hv_pcibus_removing, hv_pcibus_maximum }; struct hv_pcibus_device { #ifdef CONFIG_X86 struct pci_sysdata sysdata; #elif defined(CONFIG_ARM64) struct pci_config_window sysdata; #endif struct pci_host_bridge *bridge; struct fwnode_handle *fwnode; /* Protocol version negotiated with the host */ enum pci_protocol_version_t protocol_version; struct mutex state_lock; enum hv_pcibus_state state; struct hv_device *hdev; resource_size_t low_mmio_space; resource_size_t high_mmio_space; struct resource *mem_config; struct resource *low_mmio_res; struct resource *high_mmio_res; struct completion *survey_event; struct pci_bus *pci_bus; spinlock_t config_lock; /* Avoid two threads writing index page */ spinlock_t device_list_lock; /* Protect lists below */ void __iomem *cfg_addr; struct list_head children; struct list_head dr_list; struct msi_domain_info msi_info; struct irq_domain *irq_domain; struct workqueue_struct *wq; /* Highest slot of child device with resources allocated */ int wslot_res_allocated; bool use_calls; /* Use hypercalls to access mmio cfg space */ }; /* * Tracks "Device Relations" messages from the host, which must be both * processed in order and deferred so that they don't run in the context * of the incoming packet callback. */ struct hv_dr_work { struct work_struct wrk; struct hv_pcibus_device *bus; }; struct hv_pcidev_description { u16 v_id; /* vendor ID */ u16 d_id; /* device ID */ u8 rev; u8 prog_intf; u8 subclass; u8 base_class; u32 subsystem_id; union win_slot_encoding win_slot; u32 ser; /* serial number */ u32 flags; u16 virtual_numa_node; }; struct hv_dr_state { struct list_head list_entry; u32 device_count; struct hv_pcidev_description func[]; }; struct hv_pci_dev { /* List protected by pci_rescan_remove_lock */ struct list_head list_entry; refcount_t refs; struct pci_slot *pci_slot; struct hv_pcidev_description desc; bool reported_missing; struct hv_pcibus_device *hbus; struct work_struct wrk; void (*block_invalidate)(void *context, u64 block_mask); void *invalidate_context; /* * What would be observed if one wrote 0xFFFFFFFF to a BAR and then * read it back, for each of the BAR offsets within config space. */ u32 probed_bar[PCI_STD_NUM_BARS]; }; struct hv_pci_compl { struct completion host_event; s32 completion_status; }; static void hv_pci_onchannelcallback(void *context); #ifdef CONFIG_X86 #define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED #define FLOW_HANDLER handle_edge_irq #define FLOW_NAME "edge" static int hv_pci_irqchip_init(void) { return 0; } static struct irq_domain *hv_pci_get_root_domain(void) { return x86_vector_domain; } static unsigned int hv_msi_get_int_vector(struct irq_data *data) { struct irq_cfg *cfg = irqd_cfg(data); return cfg->vector; } #define hv_msi_prepare pci_msi_prepare /** * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current * affinity. * @data: Describes the IRQ * * Build new a destination for the MSI and make a hypercall to * update the Interrupt Redirection Table. "Device Logical ID" * is built out of this PCI bus's instance GUID and the function * number of the device. */ static void hv_arch_irq_unmask(struct irq_data *data) { struct msi_desc *msi_desc = irq_data_get_msi_desc(data); struct hv_retarget_device_interrupt *params; struct tran_int_desc *int_desc; struct hv_pcibus_device *hbus; const struct cpumask *dest; cpumask_var_t tmp; struct pci_bus *pbus; struct pci_dev *pdev; unsigned long flags; u32 var_size = 0; int cpu, nr_bank; u64 res; dest = irq_data_get_effective_affinity_mask(data); pdev = msi_desc_to_pci_dev(msi_desc); pbus = pdev->bus; hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); int_desc = data->chip_data; if (!int_desc) { dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n", __func__, data->irq); return; } local_irq_save(flags); params = *this_cpu_ptr(hyperv_pcpu_input_arg); memset(params, 0, sizeof(*params)); params->partition_id = HV_PARTITION_ID_SELF; params->int_entry.source = HV_INTERRUPT_SOURCE_MSI; params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff; params->int_entry.msi_entry.data.as_uint32 = int_desc->data; params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | (hbus->hdev->dev_instance.b[4] << 16) | (hbus->hdev->dev_instance.b[7] << 8) | (hbus->hdev->dev_instance.b[6] & 0xf8) | PCI_FUNC(pdev->devfn); params->int_target.vector = hv_msi_get_int_vector(data); /* * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a * spurious interrupt storm. Not doing so does not seem to have a * negative effect (yet?). */ if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) { /* * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides * with >64 VP support. * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED * is not sufficient for this hypercall. */ params->int_target.flags |= HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET; if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) { res = 1; goto out; } cpumask_and(tmp, dest, cpu_online_mask); nr_bank = cpumask_to_vpset(&params->int_target.vp_set, tmp); free_cpumask_var(tmp); if (nr_bank <= 0) { res = 1; goto out; } /* * var-sized hypercall, var-size starts after vp_mask (thus * vp_set.format does not count, but vp_set.valid_bank_mask * does). */ var_size = 1 + nr_bank; } else { for_each_cpu_and(cpu, dest, cpu_online_mask) { params->int_target.vp_mask |= (1ULL << hv_cpu_number_to_vp_number(cpu)); } } res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17), params, NULL); out: local_irq_restore(flags); /* * During hibernation, when a CPU is offlined, the kernel tries * to move the interrupt to the remaining CPUs that haven't * been offlined yet. In this case, the below hv_do_hypercall() * always fails since the vmbus channel has been closed: * refer to cpu_disable_common() -> fixup_irqs() -> * irq_migrate_all_off_this_cpu() -> migrate_one_irq(). * * Suppress the error message for hibernation because the failure * during hibernation does not matter (at this time all the devices * have been frozen). Note: the correct affinity info is still updated * into the irqdata data structure in migrate_one_irq() -> * irq_do_set_affinity(), so later when the VM resumes, * hv_pci_restore_msi_state() is able to correctly restore the * interrupt with the correct affinity. */ if (!hv_result_success(res) && hbus->state != hv_pcibus_removing) dev_err(&hbus->hdev->device, "%s() failed: %#llx", __func__, res); } #elif defined(CONFIG_ARM64) /* * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit * of room at the start to allow for SPIs to be specified through ACPI and * starting with a power of two to satisfy power of 2 multi-MSI requirement. */ #define HV_PCI_MSI_SPI_START 64 #define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START) #define DELIVERY_MODE 0 #define FLOW_HANDLER NULL #define FLOW_NAME NULL #define hv_msi_prepare NULL struct hv_pci_chip_data { DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR); struct mutex map_lock; }; /* Hyper-V vPCI MSI GIC IRQ domain */ static struct irq_domain *hv_msi_gic_irq_domain; /* Hyper-V PCI MSI IRQ chip */ static struct irq_chip hv_arm64_msi_irq_chip = { .name = "MSI", .irq_set_affinity = irq_chip_set_affinity_parent, .irq_eoi = irq_chip_eoi_parent, .irq_mask = irq_chip_mask_parent, .irq_unmask = irq_chip_unmask_parent }; static unsigned int hv_msi_get_int_vector(struct irq_data *irqd) { return irqd->parent_data->hwirq; } /* * @nr_bm_irqs: Indicates the number of IRQs that were allocated from * the bitmap. * @nr_dom_irqs: Indicates the number of IRQs that were allocated from * the parent domain. */ static void hv_pci_vec_irq_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_bm_irqs, unsigned int nr_dom_irqs) { struct hv_pci_chip_data *chip_data = domain->host_data; struct irq_data *d = irq_domain_get_irq_data(domain, virq); int first = d->hwirq - HV_PCI_MSI_SPI_START; int i; mutex_lock(&chip_data->map_lock); bitmap_release_region(chip_data->spi_map, first, get_count_order(nr_bm_irqs)); mutex_unlock(&chip_data->map_lock); for (i = 0; i < nr_dom_irqs; i++) { if (i) d = irq_domain_get_irq_data(domain, virq + i); irq_domain_reset_irq_data(d); } irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs); } static void hv_pci_vec_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs); } static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain, unsigned int nr_irqs, irq_hw_number_t *hwirq) { struct hv_pci_chip_data *chip_data = domain->host_data; int index; /* Find and allocate region from the SPI bitmap */ mutex_lock(&chip_data->map_lock); index = bitmap_find_free_region(chip_data->spi_map, HV_PCI_MSI_SPI_NR, get_count_order(nr_irqs)); mutex_unlock(&chip_data->map_lock); if (index < 0) return -ENOSPC; *hwirq = index + HV_PCI_MSI_SPI_START; return 0; } static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq) { struct irq_fwspec fwspec; struct irq_data *d; int ret; fwspec.fwnode = domain->parent->fwnode; fwspec.param_count = 2; fwspec.param[0] = hwirq; fwspec.param[1] = IRQ_TYPE_EDGE_RISING; ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); if (ret) return ret; /* * Since the interrupt specifier is not coming from ACPI or DT, the * trigger type will need to be set explicitly. Otherwise, it will be * set to whatever is in the GIC configuration. */ d = irq_domain_get_irq_data(domain->parent, virq); return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); } static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { irq_hw_number_t hwirq; unsigned int i; int ret; ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq); if (ret) return ret; for (i = 0; i < nr_irqs; i++) { ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i, hwirq + i); if (ret) { hv_pci_vec_irq_free(domain, virq, nr_irqs, i); return ret; } irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &hv_arm64_msi_irq_chip, domain->host_data); pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i); } return 0; } /* * Pick the first cpu as the irq affinity that can be temporarily used for * composing MSI from the hypervisor. GIC will eventually set the right * affinity for the irq and the 'unmask' will retarget the interrupt to that * cpu. */ static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain, struct irq_data *irqd, bool reserve) { int cpu = cpumask_first(cpu_present_mask); irq_data_update_effective_affinity(irqd, cpumask_of(cpu)); return 0; } static const struct irq_domain_ops hv_pci_domain_ops = { .alloc = hv_pci_vec_irq_domain_alloc, .free = hv_pci_vec_irq_domain_free, .activate = hv_pci_vec_irq_domain_activate, }; static int hv_pci_irqchip_init(void) { static struct hv_pci_chip_data *chip_data; struct fwnode_handle *fn = NULL; int ret = -ENOMEM; chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL); if (!chip_data) return ret; mutex_init(&chip_data->map_lock); fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64"); if (!fn) goto free_chip; /* * IRQ domain once enabled, should not be removed since there is no * way to ensure that all the corresponding devices are also gone and * no interrupts will be generated. */ hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR, fn, &hv_pci_domain_ops, chip_data); if (!hv_msi_gic_irq_domain) { pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n"); goto free_chip; } return 0; free_chip: kfree(chip_data); if (fn) irq_domain_free_fwnode(fn); return ret; } static struct irq_domain *hv_pci_get_root_domain(void) { return hv_msi_gic_irq_domain; } /* * SPIs are used for interrupts of PCI devices and SPIs is managed via GICD * registers which Hyper-V already supports, so no hypercall needed. */ static void hv_arch_irq_unmask(struct irq_data *data) { } #endif /* CONFIG_ARM64 */ /** * hv_pci_generic_compl() - Invoked for a completion packet * @context: Set up by the sender of the packet. * @resp: The response packet * @resp_packet_size: Size in bytes of the packet * * This function is used to trigger an event and report status * for any message for which the completion packet contains a * status and nothing else. */ static void hv_pci_generic_compl(void *context, struct pci_response *resp, int resp_packet_size) { struct hv_pci_compl *comp_pkt = context; comp_pkt->completion_status = resp->status; complete(&comp_pkt->host_event); } static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus, u32 wslot); static void get_pcichild(struct hv_pci_dev *hpdev) { refcount_inc(&hpdev->refs); } static void put_pcichild(struct hv_pci_dev *hpdev) { if (refcount_dec_and_test(&hpdev->refs)) kfree(hpdev); } /* * There is no good way to get notified from vmbus_onoffer_rescind(), * so let's use polling here, since this is not a hot path. */ static int wait_for_response(struct hv_device *hdev, struct completion *comp) { while (true) { if (hdev->channel->rescind) { dev_warn_once(&hdev->device, "The device is gone.\n"); return -ENODEV; } if (wait_for_completion_timeout(comp, HZ / 10)) break; } return 0; } /** * devfn_to_wslot() - Convert from Linux PCI slot to Windows * @devfn: The Linux representation of PCI slot * * Windows uses a slightly different representation of PCI slot. * * Return: The Windows representation */ static u32 devfn_to_wslot(int devfn) { union win_slot_encoding wslot; wslot.slot = 0; wslot.bits.dev = PCI_SLOT(devfn); wslot.bits.func = PCI_FUNC(devfn); return wslot.slot; } /** * wslot_to_devfn() - Convert from Windows PCI slot to Linux * @wslot: The Windows representation of PCI slot * * Windows uses a slightly different representation of PCI slot. * * Return: The Linux representation */ static int wslot_to_devfn(u32 wslot) { union win_slot_encoding slot_no; slot_no.slot = wslot; return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func); } static void hv_pci_read_mmio(struct device *dev, phys_addr_t gpa, int size, u32 *val) { struct hv_mmio_read_input *in; struct hv_mmio_read_output *out; u64 ret; /* * Must be called with interrupts disabled so it is safe * to use the per-cpu input argument page. Use it for * both input and output. */ in = *this_cpu_ptr(hyperv_pcpu_input_arg); out = *this_cpu_ptr(hyperv_pcpu_input_arg) + sizeof(*in); in->gpa = gpa; in->size = size; ret = hv_do_hypercall(HVCALL_MMIO_READ, in, out); if (hv_result_success(ret)) { switch (size) { case 1: *val = *(u8 *)(out->data); break; case 2: *val = *(u16 *)(out->data); break; default: *val = *(u32 *)(out->data); break; } } else dev_err(dev, "MMIO read hypercall error %llx addr %llx size %d\n", ret, gpa, size); } static void hv_pci_write_mmio(struct device *dev, phys_addr_t gpa, int size, u32 val) { struct hv_mmio_write_input *in; u64 ret; /* * Must be called with interrupts disabled so it is safe * to use the per-cpu input argument memory. */ in = *this_cpu_ptr(hyperv_pcpu_input_arg); in->gpa = gpa; in->size = size; switch (size) { case 1: *(u8 *)(in->data) = val; break; case 2: *(u16 *)(in->data) = val; break; default: *(u32 *)(in->data) = val; break; } ret = hv_do_hypercall(HVCALL_MMIO_WRITE, in, NULL); if (!hv_result_success(ret)) dev_err(dev, "MMIO write hypercall error %llx addr %llx size %d\n", ret, gpa, size); } /* * PCI Configuration Space for these root PCI buses is implemented as a pair * of pages in memory-mapped I/O space. Writing to the first page chooses * the PCI function being written or read. Once the first page has been * written to, the following page maps in the entire configuration space of * the function. */ /** * _hv_pcifront_read_config() - Internal PCI config read * @hpdev: The PCI driver's representation of the device * @where: Offset within config space * @size: Size of the transfer * @val: Pointer to the buffer receiving the data */ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where, int size, u32 *val) { struct hv_pcibus_device *hbus = hpdev->hbus; struct device *dev = &hbus->hdev->device; int offset = where + CFG_PAGE_OFFSET; unsigned long flags; /* * If the attempt is to read the IDs or the ROM BAR, simulate that. */ if (where + size <= PCI_COMMAND) { memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size); } else if (where >= PCI_CLASS_REVISION && where + size <= PCI_CACHE_LINE_SIZE) { memcpy(val, ((u8 *)&hpdev->desc.rev) + where - PCI_CLASS_REVISION, size); } else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <= PCI_ROM_ADDRESS) { memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where - PCI_SUBSYSTEM_VENDOR_ID, size); } else if (where >= PCI_ROM_ADDRESS && where + size <= PCI_CAPABILITY_LIST) { /* ROM BARs are unimplemented */ *val = 0; } else if (where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) { /* * Interrupt Line and Interrupt PIN are hard-wired to zero * because this front-end only supports message-signaled * interrupts. */ *val = 0; } else if (where + size <= CFG_PAGE_SIZE) { spin_lock_irqsave(&hbus->config_lock, flags); if (hbus->use_calls) { phys_addr_t addr = hbus->mem_config->start + offset; hv_pci_write_mmio(dev, hbus->mem_config->start, 4, hpdev->desc.win_slot.slot); hv_pci_read_mmio(dev, addr, size, val); } else { void __iomem *addr = hbus->cfg_addr + offset; /* Choose the function to be read. (See comment above) */ writel(hpdev->desc.win_slot.slot, hbus->cfg_addr); /* Make sure the function was chosen before reading. */ mb(); /* Read from that function's config space. */ switch (size) { case 1: *val = readb(addr); break; case 2: *val = readw(addr); break; default: *val = readl(addr); break; } /* * Make sure the read was done before we release the * spinlock allowing consecutive reads/writes. */ mb(); } spin_unlock_irqrestore(&hbus->config_lock, flags); } else { dev_err(dev, "Attempt to read beyond a function's config space.\n"); } } static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev) { struct hv_pcibus_device *hbus = hpdev->hbus; struct device *dev = &hbus->hdev->device; u32 val; u16 ret; unsigned long flags; spin_lock_irqsave(&hbus->config_lock, flags); if (hbus->use_calls) { phys_addr_t addr = hbus->mem_config->start + CFG_PAGE_OFFSET + PCI_VENDOR_ID; hv_pci_write_mmio(dev, hbus->mem_config->start, 4, hpdev->desc.win_slot.slot); hv_pci_read_mmio(dev, addr, 2, &val); ret = val; /* Truncates to 16 bits */ } else { void __iomem *addr = hbus->cfg_addr + CFG_PAGE_OFFSET + PCI_VENDOR_ID; /* Choose the function to be read. (See comment above) */ writel(hpdev->desc.win_slot.slot, hbus->cfg_addr); /* Make sure the function was chosen before we start reading. */ mb(); /* Read from that function's config space. */ ret = readw(addr); /* * mb() is not required here, because the * spin_unlock_irqrestore() is a barrier. */ } spin_unlock_irqrestore(&hbus->config_lock, flags); return ret; } /** * _hv_pcifront_write_config() - Internal PCI config write * @hpdev: The PCI driver's representation of the device * @where: Offset within config space * @size: Size of the transfer * @val: The data being transferred */ static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where, int size, u32 val) { struct hv_pcibus_device *hbus = hpdev->hbus; struct device *dev = &hbus->hdev->device; int offset = where + CFG_PAGE_OFFSET; unsigned long flags; if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <= PCI_CAPABILITY_LIST) { /* SSIDs and ROM BARs are read-only */ } else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) { spin_lock_irqsave(&hbus->config_lock, flags); if (hbus->use_calls) { phys_addr_t addr = hbus->mem_config->start + offset; hv_pci_write_mmio(dev, hbus->mem_config->start, 4, hpdev->desc.win_slot.slot); hv_pci_write_mmio(dev, addr, size, val); } else { void __iomem *addr = hbus->cfg_addr + offset; /* Choose the function to write. (See comment above) */ writel(hpdev->desc.win_slot.slot, hbus->cfg_addr); /* Make sure the function was chosen before writing. */ wmb(); /* Write to that function's config space. */ switch (size) { case 1: writeb(val, addr); break; case 2: writew(val, addr); break; default: writel(val, addr); break; } /* * Make sure the write was done before we release the * spinlock allowing consecutive reads/writes. */ mb(); } spin_unlock_irqrestore(&hbus->config_lock, flags); } else { dev_err(dev, "Attempt to write beyond a function's config space.\n"); } } /** * hv_pcifront_read_config() - Read configuration space * @bus: PCI Bus structure * @devfn: Device/function * @where: Offset from base * @size: Byte/word/dword * @val: Value to be read * * Return: PCIBIOS_SUCCESSFUL on success * PCIBIOS_DEVICE_NOT_FOUND on failure */ static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct hv_pcibus_device *hbus = container_of(bus->sysdata, struct hv_pcibus_device, sysdata); struct hv_pci_dev *hpdev; hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn)); if (!hpdev) return PCIBIOS_DEVICE_NOT_FOUND; _hv_pcifront_read_config(hpdev, where, size, val); put_pcichild(hpdev); return PCIBIOS_SUCCESSFUL; } /** * hv_pcifront_write_config() - Write configuration space * @bus: PCI Bus structure * @devfn: Device/function * @where: Offset from base * @size: Byte/word/dword * @val: Value to be written to device * * Return: PCIBIOS_SUCCESSFUL on success * PCIBIOS_DEVICE_NOT_FOUND on failure */ static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct hv_pcibus_device *hbus = container_of(bus->sysdata, struct hv_pcibus_device, sysdata); struct hv_pci_dev *hpdev; hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn)); if (!hpdev) return PCIBIOS_DEVICE_NOT_FOUND; _hv_pcifront_write_config(hpdev, where, size, val); put_pcichild(hpdev); return PCIBIOS_SUCCESSFUL; } /* PCIe operations */ static struct pci_ops hv_pcifront_ops = { .read = hv_pcifront_read_config, .write = hv_pcifront_write_config, }; /* * Paravirtual backchannel * * Hyper-V SR-IOV provides a backchannel mechanism in software for * communication between a VF driver and a PF driver. These * "configuration blocks" are similar in concept to PCI configuration space, * but instead of doing reads and writes in 32-bit chunks through a very slow * path, packets of up to 128 bytes can be sent or received asynchronously. * * Nearly every SR-IOV device contains just such a communications channel in * hardware, so using this one in software is usually optional. Using the * software channel, however, allows driver implementers to leverage software * tools that fuzz the communications channel looking for vulnerabilities. * * The usage model for these packets puts the responsibility for reading or * writing on the VF driver. The VF driver sends a read or a write packet, * indicating which "block" is being referred to by number. * * If the PF driver wishes to initiate communication, it can "invalidate" one or * more of the first 64 blocks. This invalidation is delivered via a callback * supplied by the VF driver by this driver. * * No protocol is implied, except that supplied by the PF and VF drivers. */ struct hv_read_config_compl { struct hv_pci_compl comp_pkt; void *buf; unsigned int len; unsigned int bytes_returned; }; /** * hv_pci_read_config_compl() - Invoked when a response packet * for a read config block operation arrives. * @context: Identifies the read config operation * @resp: The response packet itself * @resp_packet_size: Size in bytes of the response packet */ static void hv_pci_read_config_compl(void *context, struct pci_response *resp, int resp_packet_size) { struct hv_read_config_compl *comp = context; struct pci_read_block_response *read_resp = (struct pci_read_block_response *)resp; unsigned int data_len, hdr_len; hdr_len = offsetof(struct pci_read_block_response, bytes); if (resp_packet_size < hdr_len) { comp->comp_pkt.completion_status = -1; goto out; } data_len = resp_packet_size - hdr_len; if (data_len > 0 && read_resp->status == 0) { comp->bytes_returned = min(comp->len, data_len); memcpy(comp->buf, read_resp->bytes, comp->bytes_returned); } else { comp->bytes_returned = 0; } comp->comp_pkt.completion_status = read_resp->status; out: complete(&comp->comp_pkt.host_event); } /** * hv_read_config_block() - Sends a read config block request to * the back-end driver running in the Hyper-V parent partition. * @pdev: The PCI driver's representation for this device. * @buf: Buffer into which the config block will be copied. * @len: Size in bytes of buf. * @block_id: Identifies the config block which has been requested. * @bytes_returned: Size which came back from the back-end driver. * * Return: 0 on success, -errno on failure */ static int hv_read_config_block(struct pci_dev *pdev, void *buf, unsigned int len, unsigned int block_id, unsigned int *bytes_returned) { struct hv_pcibus_device *hbus = container_of(pdev->bus->sysdata, struct hv_pcibus_device, sysdata); struct { struct pci_packet pkt; char buf[sizeof(struct pci_read_block)]; } pkt; struct hv_read_config_compl comp_pkt; struct pci_read_block *read_blk; int ret; if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX) return -EINVAL; init_completion(&comp_pkt.comp_pkt.host_event); comp_pkt.buf = buf; comp_pkt.len = len; memset(&pkt, 0, sizeof(pkt)); pkt.pkt.completion_func = hv_pci_read_config_compl; pkt.pkt.compl_ctxt = &comp_pkt; read_blk = (struct pci_read_block *)&pkt.pkt.message; read_blk->message_type.type = PCI_READ_BLOCK; read_blk->wslot.slot = devfn_to_wslot(pdev->devfn); read_blk->block_id = block_id; read_blk->bytes_requested = len; ret = vmbus_sendpacket(hbus->hdev->channel, read_blk, sizeof(*read_blk), (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) return ret; ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event); if (ret) return ret; if (comp_pkt.comp_pkt.completion_status != 0 || comp_pkt.bytes_returned == 0) { dev_err(&hbus->hdev->device, "Read Config Block failed: 0x%x, bytes_returned=%d\n", comp_pkt.comp_pkt.completion_status, comp_pkt.bytes_returned); return -EIO; } *bytes_returned = comp_pkt.bytes_returned; return 0; } /** * hv_pci_write_config_compl() - Invoked when a response packet for a write * config block operation arrives. * @context: Identifies the write config operation * @resp: The response packet itself * @resp_packet_size: Size in bytes of the response packet */ static void hv_pci_write_config_compl(void *context, struct pci_response *resp, int resp_packet_size) { struct hv_pci_compl *comp_pkt = context; comp_pkt->completion_status = resp->status; complete(&comp_pkt->host_event); } /** * hv_write_config_block() - Sends a write config block request to the * back-end driver running in the Hyper-V parent partition. * @pdev: The PCI driver's representation for this device. * @buf: Buffer from which the config block will be copied. * @len: Size in bytes of buf. * @block_id: Identifies the config block which is being written. * * Return: 0 on success, -errno on failure */ static int hv_write_config_block(struct pci_dev *pdev, void *buf, unsigned int len, unsigned int block_id) { struct hv_pcibus_device *hbus = container_of(pdev->bus->sysdata, struct hv_pcibus_device, sysdata); struct { struct pci_packet pkt; char buf[sizeof(struct pci_write_block)]; u32 reserved; } pkt; struct hv_pci_compl comp_pkt; struct pci_write_block *write_blk; u32 pkt_size; int ret; if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX) return -EINVAL; init_completion(&comp_pkt.host_event); memset(&pkt, 0, sizeof(pkt)); pkt.pkt.completion_func = hv_pci_write_config_compl; pkt.pkt.compl_ctxt = &comp_pkt; write_blk = (struct pci_write_block *)&pkt.pkt.message; write_blk->message_type.type = PCI_WRITE_BLOCK; write_blk->wslot.slot = devfn_to_wslot(pdev->devfn); write_blk->block_id = block_id; write_blk->byte_count = len; memcpy(write_blk->bytes, buf, len); pkt_size = offsetof(struct pci_write_block, bytes) + len; /* * This quirk is required on some hosts shipped around 2018, because * these hosts don't check the pkt_size correctly (new hosts have been * fixed since early 2019). The quirk is also safe on very old hosts * and new hosts, because, on them, what really matters is the length * specified in write_blk->byte_count. */ pkt_size += sizeof(pkt.reserved); ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size, (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) return ret; ret = wait_for_response(hbus->hdev, &comp_pkt.host_event); if (ret) return ret; if (comp_pkt.completion_status != 0) { dev_err(&hbus->hdev->device, "Write Config Block failed: 0x%x\n", comp_pkt.completion_status); return -EIO; } return 0; } /** * hv_register_block_invalidate() - Invoked when a config block invalidation * arrives from the back-end driver. * @pdev: The PCI driver's representation for this device. * @context: Identifies the device. * @block_invalidate: Identifies all of the blocks being invalidated. * * Return: 0 on success, -errno on failure */ static int hv_register_block_invalidate(struct pci_dev *pdev, void *context, void (*block_invalidate)(void *context, u64 block_mask)) { struct hv_pcibus_device *hbus = container_of(pdev->bus->sysdata, struct hv_pcibus_device, sysdata); struct hv_pci_dev *hpdev; hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); if (!hpdev) return -ENODEV; hpdev->block_invalidate = block_invalidate; hpdev->invalidate_context = context; put_pcichild(hpdev); return 0; } /* Interrupt management hooks */ static void hv_int_desc_free(struct hv_pci_dev *hpdev, struct tran_int_desc *int_desc) { struct pci_delete_interrupt *int_pkt; struct { struct pci_packet pkt; u8 buffer[sizeof(struct pci_delete_interrupt)]; } ctxt; if (!int_desc->vector_count) { kfree(int_desc); return; } memset(&ctxt, 0, sizeof(ctxt)); int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message; int_pkt->message_type.type = PCI_DELETE_INTERRUPT_MESSAGE; int_pkt->wslot.slot = hpdev->desc.win_slot.slot; int_pkt->int_desc = *int_desc; vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt), 0, VM_PKT_DATA_INBAND, 0); kfree(int_desc); } /** * hv_msi_free() - Free the MSI. * @domain: The interrupt domain pointer * @info: Extra MSI-related context * @irq: Identifies the IRQ. * * The Hyper-V parent partition and hypervisor are tracking the * messages that are in use, keeping the interrupt redirection * table up to date. This callback sends a message that frees * the IRT entry and related tracking nonsense. */ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info, unsigned int irq) { struct hv_pcibus_device *hbus; struct hv_pci_dev *hpdev; struct pci_dev *pdev; struct tran_int_desc *int_desc; struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq); struct msi_desc *msi = irq_data_get_msi_desc(irq_data); pdev = msi_desc_to_pci_dev(msi); hbus = info->data; int_desc = irq_data_get_irq_chip_data(irq_data); if (!int_desc) return; irq_data->chip_data = NULL; hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); if (!hpdev) { kfree(int_desc); return; } hv_int_desc_free(hpdev, int_desc); put_pcichild(hpdev); } static void hv_irq_mask(struct irq_data *data) { pci_msi_mask_irq(data); if (data->parent_data->chip->irq_mask) irq_chip_mask_parent(data); } static void hv_irq_unmask(struct irq_data *data) { hv_arch_irq_unmask(data); if (data->parent_data->chip->irq_unmask) irq_chip_unmask_parent(data); pci_msi_unmask_irq(data); } struct compose_comp_ctxt { struct hv_pci_compl comp_pkt; struct tran_int_desc int_desc; }; static void hv_pci_compose_compl(void *context, struct pci_response *resp, int resp_packet_size) { struct compose_comp_ctxt *comp_pkt = context; struct pci_create_int_response *int_resp = (struct pci_create_int_response *)resp; if (resp_packet_size < sizeof(*int_resp)) { comp_pkt->comp_pkt.completion_status = -1; goto out; } comp_pkt->comp_pkt.completion_status = resp->status; comp_pkt->int_desc = int_resp->int_desc; out: complete(&comp_pkt->comp_pkt.host_event); } static u32 hv_compose_msi_req_v1( struct pci_create_interrupt *int_pkt, u32 slot, u8 vector, u16 vector_count) { int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; int_pkt->int_desc.vector_count = vector_count; int_pkt->int_desc.delivery_mode = DELIVERY_MODE; /* * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in * hv_irq_unmask(). */ int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL; return sizeof(*int_pkt); } /* * The vCPU selected by hv_compose_multi_msi_req_get_cpu() and * hv_compose_msi_req_get_cpu() is a "dummy" vCPU because the final vCPU to be * interrupted is specified later in hv_irq_unmask() and communicated to Hyper-V * via the HVCALL_RETARGET_INTERRUPT hypercall. But the choice of dummy vCPU is * not irrelevant because Hyper-V chooses the physical CPU to handle the * interrupts based on the vCPU specified in message sent to the vPCI VSP in * hv_compose_msi_msg(). Hyper-V's choice of pCPU is not visible to the guest, * but assigning too many vPCI device interrupts to the same pCPU can cause a * performance bottleneck. So we spread out the dummy vCPUs to influence Hyper-V * to spread out the pCPUs that it selects. * * For the single-MSI and MSI-X cases, it's OK for hv_compose_msi_req_get_cpu() * to always return the same dummy vCPU, because a second call to * hv_compose_msi_msg() contains the "real" vCPU, causing Hyper-V to choose a * new pCPU for the interrupt. But for the multi-MSI case, the second call to * hv_compose_msi_msg() exits without sending a message to the vPCI VSP, so the * original dummy vCPU is used. This dummy vCPU must be round-robin'ed so that * the pCPUs are spread out. All interrupts for a multi-MSI device end up using * the same pCPU, even though the vCPUs will be spread out by later calls * to hv_irq_unmask(), but that is the best we can do now. * * With Hyper-V in Nov 2022, the HVCALL_RETARGET_INTERRUPT hypercall does *not* * cause Hyper-V to reselect the pCPU based on the specified vCPU. Such an * enhancement is planned for a future version. With that enhancement, the * dummy vCPU selection won't matter, and interrupts for the same multi-MSI * device will be spread across multiple pCPUs. */ /* * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten * by subsequent retarget in hv_irq_unmask(). */ static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity) { return cpumask_first_and(affinity, cpu_online_mask); } /* * Make sure the dummy vCPU values for multi-MSI don't all point to vCPU0. */ static int hv_compose_multi_msi_req_get_cpu(void) { static DEFINE_SPINLOCK(multi_msi_cpu_lock); /* -1 means starting with CPU 0 */ static int cpu_next = -1; unsigned long flags; int cpu; spin_lock_irqsave(&multi_msi_cpu_lock, flags); cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids, false); cpu = cpu_next; spin_unlock_irqrestore(&multi_msi_cpu_lock, flags); return cpu; } static u32 hv_compose_msi_req_v2( struct pci_create_interrupt2 *int_pkt, int cpu, u32 slot, u8 vector, u16 vector_count) { int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2; int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; int_pkt->int_desc.vector_count = vector_count; int_pkt->int_desc.delivery_mode = DELIVERY_MODE; int_pkt->int_desc.processor_array[0] = hv_cpu_number_to_vp_number(cpu); int_pkt->int_desc.processor_count = 1; return sizeof(*int_pkt); } static u32 hv_compose_msi_req_v3( struct pci_create_interrupt3 *int_pkt, int cpu, u32 slot, u32 vector, u16 vector_count) { int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3; int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; int_pkt->int_desc.reserved = 0; int_pkt->int_desc.vector_count = vector_count; int_pkt->int_desc.delivery_mode = DELIVERY_MODE; int_pkt->int_desc.processor_array[0] = hv_cpu_number_to_vp_number(cpu); int_pkt->int_desc.processor_count = 1; return sizeof(*int_pkt); } /** * hv_compose_msi_msg() - Supplies a valid MSI address/data * @data: Everything about this MSI * @msg: Buffer that is filled in by this function * * This function unpacks the IRQ looking for target CPU set, IDT * vector and mode and sends a message to the parent partition * asking for a mapping for that tuple in this partition. The * response supplies a data value and address to which that data * should be written to trigger that interrupt. */ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct hv_pcibus_device *hbus; struct vmbus_channel *channel; struct hv_pci_dev *hpdev; struct pci_bus *pbus; struct pci_dev *pdev; const struct cpumask *dest; struct compose_comp_ctxt comp; struct tran_int_desc *int_desc; struct msi_desc *msi_desc; /* * vector_count should be u16: see hv_msi_desc, hv_msi_desc2 * and hv_msi_desc3. vector must be u32: see hv_msi_desc3. */ u16 vector_count; u32 vector; struct { struct pci_packet pci_pkt; union { struct pci_create_interrupt v1; struct pci_create_interrupt2 v2; struct pci_create_interrupt3 v3; } int_pkts; } __packed ctxt; bool multi_msi; u64 trans_id; u32 size; int ret; int cpu; msi_desc = irq_data_get_msi_desc(data); multi_msi = !msi_desc->pci.msi_attrib.is_msix && msi_desc->nvec_used > 1; /* Reuse the previous allocation */ if (data->chip_data && multi_msi) { int_desc = data->chip_data; msg->address_hi = int_desc->address >> 32; msg->address_lo = int_desc->address & 0xffffffff; msg->data = int_desc->data; return; } pdev = msi_desc_to_pci_dev(msi_desc); dest = irq_data_get_effective_affinity_mask(data); pbus = pdev->bus; hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); channel = hbus->hdev->channel; hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); if (!hpdev) goto return_null_message; /* Free any previous message that might have already been composed. */ if (data->chip_data && !multi_msi) { int_desc = data->chip_data; data->chip_data = NULL; hv_int_desc_free(hpdev, int_desc); } int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC); if (!int_desc) goto drop_reference; if (multi_msi) { /* * If this is not the first MSI of Multi MSI, we already have * a mapping. Can exit early. */ if (msi_desc->irq != data->irq) { data->chip_data = int_desc; int_desc->address = msi_desc->msg.address_lo | (u64)msi_desc->msg.address_hi << 32; int_desc->data = msi_desc->msg.data + (data->irq - msi_desc->irq); msg->address_hi = msi_desc->msg.address_hi; msg->address_lo = msi_desc->msg.address_lo; msg->data = int_desc->data; put_pcichild(hpdev); return; } /* * The vector we select here is a dummy value. The correct * value gets sent to the hypervisor in unmask(). This needs * to be aligned with the count, and also not zero. Multi-msi * is powers of 2 up to 32, so 32 will always work here. */ vector = 32; vector_count = msi_desc->nvec_used; cpu = hv_compose_multi_msi_req_get_cpu(); } else { vector = hv_msi_get_int_vector(data); vector_count = 1; cpu = hv_compose_msi_req_get_cpu(dest); } /* * hv_compose_msi_req_v1 and v2 are for x86 only, meaning 'vector' * can't exceed u8. Cast 'vector' down to u8 for v1/v2 explicitly * for better readability. */ memset(&ctxt, 0, sizeof(ctxt)); init_completion(&comp.comp_pkt.host_event); ctxt.pci_pkt.completion_func = hv_pci_compose_compl; ctxt.pci_pkt.compl_ctxt = &comp; switch (hbus->protocol_version) { case PCI_PROTOCOL_VERSION_1_1: size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, hpdev->desc.win_slot.slot, (u8)vector, vector_count); break; case PCI_PROTOCOL_VERSION_1_2: case PCI_PROTOCOL_VERSION_1_3: size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, cpu, hpdev->desc.win_slot.slot, (u8)vector, vector_count); break; case PCI_PROTOCOL_VERSION_1_4: size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3, cpu, hpdev->desc.win_slot.slot, vector, vector_count); break; default: /* As we only negotiate protocol versions known to this driver, * this path should never hit. However, this is it not a hot * path so we print a message to aid future updates. */ dev_err(&hbus->hdev->device, "Unexpected vPCI protocol, update driver."); goto free_int_desc; } ret = vmbus_sendpacket_getid(hpdev->hbus->hdev->channel, &ctxt.int_pkts, size, (unsigned long)&ctxt.pci_pkt, &trans_id, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) { dev_err(&hbus->hdev->device, "Sending request for interrupt failed: 0x%x", comp.comp_pkt.completion_status); goto free_int_desc; } /* * Prevents hv_pci_onchannelcallback() from running concurrently * in the tasklet. */ tasklet_disable_in_atomic(&channel->callback_event); /* * Since this function is called with IRQ locks held, can't * do normal wait for completion; instead poll. */ while (!try_wait_for_completion(&comp.comp_pkt.host_event)) { unsigned long flags; /* 0xFFFF means an invalid PCI VENDOR ID. */ if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) { dev_err_once(&hbus->hdev->device, "the device has gone\n"); goto enable_tasklet; } /* * Make sure that the ring buffer data structure doesn't get * freed while we dereference the ring buffer pointer. Test * for the channel's onchannel_callback being NULL within a * sched_lock critical section. See also the inline comments * in vmbus_reset_channel_cb(). */ spin_lock_irqsave(&channel->sched_lock, flags); if (unlikely(channel->onchannel_callback == NULL)) { spin_unlock_irqrestore(&channel->sched_lock, flags); goto enable_tasklet; } hv_pci_onchannelcallback(hbus); spin_unlock_irqrestore(&channel->sched_lock, flags); udelay(100); } tasklet_enable(&channel->callback_event); if (comp.comp_pkt.completion_status < 0) { dev_err(&hbus->hdev->device, "Request for interrupt failed: 0x%x", comp.comp_pkt.completion_status); goto free_int_desc; } /* * Record the assignment so that this can be unwound later. Using * irq_set_chip_data() here would be appropriate, but the lock it takes * is already held. */ *int_desc = comp.int_desc; data->chip_data = int_desc; /* Pass up the result. */ msg->address_hi = comp.int_desc.address >> 32; msg->address_lo = comp.int_desc.address & 0xffffffff; msg->data = comp.int_desc.data; put_pcichild(hpdev); return; enable_tasklet: tasklet_enable(&channel->callback_event); /* * The completion packet on the stack becomes invalid after 'return'; * remove the ID from the VMbus requestor if the identifier is still * mapped to/associated with the packet. (The identifier could have * been 're-used', i.e., already removed and (re-)mapped.) * * Cf. hv_pci_onchannelcallback(). */ vmbus_request_addr_match(channel, trans_id, (unsigned long)&ctxt.pci_pkt); free_int_desc: kfree(int_desc); drop_reference: put_pcichild(hpdev); return_null_message: msg->address_hi = 0; msg->address_lo = 0; msg->data = 0; } /* HW Interrupt Chip Descriptor */ static struct irq_chip hv_msi_irq_chip = { .name = "Hyper-V PCIe MSI", .irq_compose_msi_msg = hv_compose_msi_msg, .irq_set_affinity = irq_chip_set_affinity_parent, #ifdef CONFIG_X86 .irq_ack = irq_chip_ack_parent, #elif defined(CONFIG_ARM64) .irq_eoi = irq_chip_eoi_parent, #endif .irq_mask = hv_irq_mask, .irq_unmask = hv_irq_unmask, }; static struct msi_domain_ops hv_msi_ops = { .msi_prepare = hv_msi_prepare, .msi_free = hv_msi_free, }; /** * hv_pcie_init_irq_domain() - Initialize IRQ domain * @hbus: The root PCI bus * * This function creates an IRQ domain which will be used for * interrupts from devices that have been passed through. These * devices only support MSI and MSI-X, not line-based interrupts * or simulations of line-based interrupts through PCIe's * fabric-layer messages. Because interrupts are remapped, we * can support multi-message MSI here. * * Return: '0' on success and error value on failure */ static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus) { hbus->msi_info.chip = &hv_msi_irq_chip; hbus->msi_info.ops = &hv_msi_ops; hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX); hbus->msi_info.handler = FLOW_HANDLER; hbus->msi_info.handler_name = FLOW_NAME; hbus->msi_info.data = hbus; hbus->irq_domain = pci_msi_create_irq_domain(hbus->fwnode, &hbus->msi_info, hv_pci_get_root_domain()); if (!hbus->irq_domain) { dev_err(&hbus->hdev->device, "Failed to build an MSI IRQ domain\n"); return -ENODEV; } dev_set_msi_domain(&hbus->bridge->dev, hbus->irq_domain); return 0; } /** * get_bar_size() - Get the address space consumed by a BAR * @bar_val: Value that a BAR returned after -1 was written * to it. * * This function returns the size of the BAR, rounded up to 1 * page. It has to be rounded up because the hypervisor's page * table entry that maps the BAR into the VM can't specify an * offset within a page. The invariant is that the hypervisor * must place any BARs of smaller than page length at the * beginning of a page. * * Return: Size in bytes of the consumed MMIO space. */ static u64 get_bar_size(u64 bar_val) { return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)), PAGE_SIZE); } /** * survey_child_resources() - Total all MMIO requirements * @hbus: Root PCI bus, as understood by this driver */ static void survey_child_resources(struct hv_pcibus_device *hbus) { struct hv_pci_dev *hpdev; resource_size_t bar_size = 0; unsigned long flags; struct completion *event; u64 bar_val; int i; /* If nobody is waiting on the answer, don't compute it. */ event = xchg(&hbus->survey_event, NULL); if (!event) return; /* If the answer has already been computed, go with it. */ if (hbus->low_mmio_space || hbus->high_mmio_space) { complete(event); return; } spin_lock_irqsave(&hbus->device_list_lock, flags); /* * Due to an interesting quirk of the PCI spec, all memory regions * for a child device are a power of 2 in size and aligned in memory, * so it's sufficient to just add them up without tracking alignment. */ list_for_each_entry(hpdev, &hbus->children, list_entry) { for (i = 0; i < PCI_STD_NUM_BARS; i++) { if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO) dev_err(&hbus->hdev->device, "There's an I/O BAR in this list!\n"); if (hpdev->probed_bar[i] != 0) { /* * A probed BAR has all the upper bits set that * can be changed. */ bar_val = hpdev->probed_bar[i]; if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64) bar_val |= ((u64)hpdev->probed_bar[++i] << 32); else bar_val |= 0xffffffff00000000ULL; bar_size = get_bar_size(bar_val); if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64) hbus->high_mmio_space += bar_size; else hbus->low_mmio_space += bar_size; } } } spin_unlock_irqrestore(&hbus->device_list_lock, flags); complete(event); } /** * prepopulate_bars() - Fill in BARs with defaults * @hbus: Root PCI bus, as understood by this driver * * The core PCI driver code seems much, much happier if the BARs * for a device have values upon first scan. So fill them in. * The algorithm below works down from large sizes to small, * attempting to pack the assignments optimally. The assumption, * enforced in other parts of the code, is that the beginning of * the memory-mapped I/O space will be aligned on the largest * BAR size. */ static void prepopulate_bars(struct hv_pcibus_device *hbus) { resource_size_t high_size = 0; resource_size_t low_size = 0; resource_size_t high_base = 0; resource_size_t low_base = 0; resource_size_t bar_size; struct hv_pci_dev *hpdev; unsigned long flags; u64 bar_val; u32 command; bool high; int i; if (hbus->low_mmio_space) { low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space)); low_base = hbus->low_mmio_res->start; } if (hbus->high_mmio_space) { high_size = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space)); high_base = hbus->high_mmio_res->start; } spin_lock_irqsave(&hbus->device_list_lock, flags); /* * Clear the memory enable bit, in case it's already set. This occurs * in the suspend path of hibernation, where the device is suspended, * resumed and suspended again: see hibernation_snapshot() and * hibernation_platform_enter(). * * If the memory enable bit is already set, Hyper-V silently ignores * the below BAR updates, and the related PCI device driver can not * work, because reading from the device register(s) always returns * 0xFFFFFFFF (PCI_ERROR_RESPONSE). */ list_for_each_entry(hpdev, &hbus->children, list_entry) { _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command); command &= ~PCI_COMMAND_MEMORY; _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command); } /* Pick addresses for the BARs. */ do { list_for_each_entry(hpdev, &hbus->children, list_entry) { for (i = 0; i < PCI_STD_NUM_BARS; i++) { bar_val = hpdev->probed_bar[i]; if (bar_val == 0) continue; high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64; if (high) { bar_val |= ((u64)hpdev->probed_bar[i + 1] << 32); } else { bar_val |= 0xffffffffULL << 32; } bar_size = get_bar_size(bar_val); if (high) { if (high_size != bar_size) { i++; continue; } _hv_pcifront_write_config(hpdev, PCI_BASE_ADDRESS_0 + (4 * i), 4, (u32)(high_base & 0xffffff00)); i++; _hv_pcifront_write_config(hpdev, PCI_BASE_ADDRESS_0 + (4 * i), 4, (u32)(high_base >> 32)); high_base += bar_size; } else { if (low_size != bar_size) continue; _hv_pcifront_write_config(hpdev, PCI_BASE_ADDRESS_0 + (4 * i), 4, (u32)(low_base & 0xffffff00)); low_base += bar_size; } } if (high_size <= 1 && low_size <= 1) { /* * No need to set the PCI_COMMAND_MEMORY bit as * the core PCI driver doesn't require the bit * to be pre-set. Actually here we intentionally * keep the bit off so that the PCI BAR probing * in the core PCI driver doesn't cause Hyper-V * to unnecessarily unmap/map the virtual BARs * from/to the physical BARs multiple times. * This reduces the VM boot time significantly * if the BAR sizes are huge. */ break; } } high_size >>= 1; low_size >>= 1; } while (high_size || low_size); spin_unlock_irqrestore(&hbus->device_list_lock, flags); } /* * Assign entries in sysfs pci slot directory. * * Note that this function does not need to lock the children list * because it is called from pci_devices_present_work which * is serialized with hv_eject_device_work because they are on the * same ordered workqueue. Therefore hbus->children list will not change * even when pci_create_slot sleeps. */ static void hv_pci_assign_slots(struct hv_pcibus_device *hbus) { struct hv_pci_dev *hpdev; char name[SLOT_NAME_SIZE]; int slot_nr; list_for_each_entry(hpdev, &hbus->children, list_entry) { if (hpdev->pci_slot) continue; slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot)); snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser); hpdev->pci_slot = pci_create_slot(hbus->bridge->bus, slot_nr, name, NULL); if (IS_ERR(hpdev->pci_slot)) { pr_warn("pci_create slot %s failed\n", name); hpdev->pci_slot = NULL; } } } /* * Remove entries in sysfs pci slot directory. */ static void hv_pci_remove_slots(struct hv_pcibus_device *hbus) { struct hv_pci_dev *hpdev; list_for_each_entry(hpdev, &hbus->children, list_entry) { if (!hpdev->pci_slot) continue; pci_destroy_slot(hpdev->pci_slot); hpdev->pci_slot = NULL; } } /* * Set NUMA node for the devices on the bus */ static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus) { struct pci_dev *dev; struct pci_bus *bus = hbus->bridge->bus; struct hv_pci_dev *hv_dev; list_for_each_entry(dev, &bus->devices, bus_list) { hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn)); if (!hv_dev) continue; if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY && hv_dev->desc.virtual_numa_node < num_possible_nodes()) /* * The kernel may boot with some NUMA nodes offline * (e.g. in a KDUMP kernel) or with NUMA disabled via * "numa=off". In those cases, adjust the host provided * NUMA node to a valid NUMA node used by the kernel. */ set_dev_node(&dev->dev, numa_map_to_online_node( hv_dev->desc.virtual_numa_node)); put_pcichild(hv_dev); } } /** * create_root_hv_pci_bus() - Expose a new root PCI bus * @hbus: Root PCI bus, as understood by this driver * * Return: 0 on success, -errno on failure */ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus) { int error; struct pci_host_bridge *bridge = hbus->bridge; bridge->dev.parent = &hbus->hdev->device; bridge->sysdata = &hbus->sysdata; bridge->ops = &hv_pcifront_ops; error = pci_scan_root_bus_bridge(bridge); if (error) return error; pci_lock_rescan_remove(); hv_pci_assign_numa_node(hbus); pci_bus_assign_resources(bridge->bus); hv_pci_assign_slots(hbus); pci_bus_add_devices(bridge->bus); pci_unlock_rescan_remove(); hbus->state = hv_pcibus_installed; return 0; } struct q_res_req_compl { struct completion host_event; struct hv_pci_dev *hpdev; }; /** * q_resource_requirements() - Query Resource Requirements * @context: The completion context. * @resp: The response that came from the host. * @resp_packet_size: The size in bytes of resp. * * This function is invoked on completion of a Query Resource * Requirements packet. */ static void q_resource_requirements(void *context, struct pci_response *resp, int resp_packet_size) { struct q_res_req_compl *completion = context; struct pci_q_res_req_response *q_res_req = (struct pci_q_res_req_response *)resp; s32 status; int i; status = (resp_packet_size < sizeof(*q_res_req)) ? -1 : resp->status; if (status < 0) { dev_err(&completion->hpdev->hbus->hdev->device, "query resource requirements failed: %x\n", status); } else { for (i = 0; i < PCI_STD_NUM_BARS; i++) { completion->hpdev->probed_bar[i] = q_res_req->probed_bar[i]; } } complete(&completion->host_event); } /** * new_pcichild_device() - Create a new child device * @hbus: The internal struct tracking this root PCI bus. * @desc: The information supplied so far from the host * about the device. * * This function creates the tracking structure for a new child * device and kicks off the process of figuring out what it is. * * Return: Pointer to the new tracking struct */ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus, struct hv_pcidev_description *desc) { struct hv_pci_dev *hpdev; struct pci_child_message *res_req; struct q_res_req_compl comp_pkt; struct { struct pci_packet init_packet; u8 buffer[sizeof(struct pci_child_message)]; } pkt; unsigned long flags; int ret; hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL); if (!hpdev) return NULL; hpdev->hbus = hbus; memset(&pkt, 0, sizeof(pkt)); init_completion(&comp_pkt.host_event); comp_pkt.hpdev = hpdev; pkt.init_packet.compl_ctxt = &comp_pkt; pkt.init_packet.completion_func = q_resource_requirements; res_req = (struct pci_child_message *)&pkt.init_packet.message; res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS; res_req->wslot.slot = desc->win_slot.slot; ret = vmbus_sendpacket(hbus->hdev->channel, res_req, sizeof(struct pci_child_message), (unsigned long)&pkt.init_packet, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) goto error; if (wait_for_response(hbus->hdev, &comp_pkt.host_event)) goto error; hpdev->desc = *desc; refcount_set(&hpdev->refs, 1); get_pcichild(hpdev); spin_lock_irqsave(&hbus->device_list_lock, flags); list_add_tail(&hpdev->list_entry, &hbus->children); spin_unlock_irqrestore(&hbus->device_list_lock, flags); return hpdev; error: kfree(hpdev); return NULL; } /** * get_pcichild_wslot() - Find device from slot * @hbus: Root PCI bus, as understood by this driver * @wslot: Location on the bus * * This function looks up a PCI device and returns the internal * representation of it. It acquires a reference on it, so that * the device won't be deleted while somebody is using it. The * caller is responsible for calling put_pcichild() to release * this reference. * * Return: Internal representation of a PCI device */ static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus, u32 wslot) { unsigned long flags; struct hv_pci_dev *iter, *hpdev = NULL; spin_lock_irqsave(&hbus->device_list_lock, flags); list_for_each_entry(iter, &hbus->children, list_entry) { if (iter->desc.win_slot.slot == wslot) { hpdev = iter; get_pcichild(hpdev); break; } } spin_unlock_irqrestore(&hbus->device_list_lock, flags); return hpdev; } /** * pci_devices_present_work() - Handle new list of child devices * @work: Work struct embedded in struct hv_dr_work * * "Bus Relations" is the Windows term for "children of this * bus." The terminology is preserved here for people trying to * debug the interaction between Hyper-V and Linux. This * function is called when the parent partition reports a list * of functions that should be observed under this PCI Express * port (bus). * * This function updates the list, and must tolerate being * called multiple times with the same information. The typical * number of child devices is one, with very atypical cases * involving three or four, so the algorithms used here can be * simple and inefficient. * * It must also treat the omission of a previously observed device as * notification that the device no longer exists. * * Note that this function is serialized with hv_eject_device_work(), * because both are pushed to the ordered workqueue hbus->wq. */ static void pci_devices_present_work(struct work_struct *work) { u32 child_no; bool found; struct hv_pcidev_description *new_desc; struct hv_pci_dev *hpdev; struct hv_pcibus_device *hbus; struct list_head removed; struct hv_dr_work *dr_wrk; struct hv_dr_state *dr = NULL; unsigned long flags; dr_wrk = container_of(work, struct hv_dr_work, wrk); hbus = dr_wrk->bus; kfree(dr_wrk); INIT_LIST_HEAD(&removed); /* Pull this off the queue and process it if it was the last one. */ spin_lock_irqsave(&hbus->device_list_lock, flags); while (!list_empty(&hbus->dr_list)) { dr = list_first_entry(&hbus->dr_list, struct hv_dr_state, list_entry); list_del(&dr->list_entry); /* Throw this away if the list still has stuff in it. */ if (!list_empty(&hbus->dr_list)) { kfree(dr); continue; } } spin_unlock_irqrestore(&hbus->device_list_lock, flags); if (!dr) return; mutex_lock(&hbus->state_lock); /* First, mark all existing children as reported missing. */ spin_lock_irqsave(&hbus->device_list_lock, flags); list_for_each_entry(hpdev, &hbus->children, list_entry) { hpdev->reported_missing = true; } spin_unlock_irqrestore(&hbus->device_list_lock, flags); /* Next, add back any reported devices. */ for (child_no = 0; child_no < dr->device_count; child_no++) { found = false; new_desc = &dr->func[child_no]; spin_lock_irqsave(&hbus->device_list_lock, flags); list_for_each_entry(hpdev, &hbus->children, list_entry) { if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) && (hpdev->desc.v_id == new_desc->v_id) && (hpdev->desc.d_id == new_desc->d_id) && (hpdev->desc.ser == new_desc->ser)) { hpdev->reported_missing = false; found = true; } } spin_unlock_irqrestore(&hbus->device_list_lock, flags); if (!found) { hpdev = new_pcichild_device(hbus, new_desc); if (!hpdev) dev_err(&hbus->hdev->device, "couldn't record a child device.\n"); } } /* Move missing children to a list on the stack. */ spin_lock_irqsave(&hbus->device_list_lock, flags); do { found = false; list_for_each_entry(hpdev, &hbus->children, list_entry) { if (hpdev->reported_missing) { found = true; put_pcichild(hpdev); list_move_tail(&hpdev->list_entry, &removed); break; } } } while (found); spin_unlock_irqrestore(&hbus->device_list_lock, flags); /* Delete everything that should no longer exist. */ while (!list_empty(&removed)) { hpdev = list_first_entry(&removed, struct hv_pci_dev, list_entry); list_del(&hpdev->list_entry); if (hpdev->pci_slot) pci_destroy_slot(hpdev->pci_slot); put_pcichild(hpdev); } switch (hbus->state) { case hv_pcibus_installed: /* * Tell the core to rescan bus * because there may have been changes. */ pci_lock_rescan_remove(); pci_scan_child_bus(hbus->bridge->bus); hv_pci_assign_numa_node(hbus); hv_pci_assign_slots(hbus); pci_unlock_rescan_remove(); break; case hv_pcibus_init: case hv_pcibus_probed: survey_child_resources(hbus); break; default: break; } mutex_unlock(&hbus->state_lock); kfree(dr); } /** * hv_pci_start_relations_work() - Queue work to start device discovery * @hbus: Root PCI bus, as understood by this driver * @dr: The list of children returned from host * * Return: 0 on success, -errno on failure */ static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus, struct hv_dr_state *dr) { struct hv_dr_work *dr_wrk; unsigned long flags; bool pending_dr; if (hbus->state == hv_pcibus_removing) { dev_info(&hbus->hdev->device, "PCI VMBus BUS_RELATIONS: ignored\n"); return -ENOENT; } dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT); if (!dr_wrk) return -ENOMEM; INIT_WORK(&dr_wrk->wrk, pci_devices_present_work); dr_wrk->bus = hbus; spin_lock_irqsave(&hbus->device_list_lock, flags); /* * If pending_dr is true, we have already queued a work, * which will see the new dr. Otherwise, we need to * queue a new work. */ pending_dr = !list_empty(&hbus->dr_list); list_add_tail(&dr->list_entry, &hbus->dr_list); spin_unlock_irqrestore(&hbus->device_list_lock, flags); if (pending_dr) kfree(dr_wrk); else queue_work(hbus->wq, &dr_wrk->wrk); return 0; } /** * hv_pci_devices_present() - Handle list of new children * @hbus: Root PCI bus, as understood by this driver * @relations: Packet from host listing children * * Process a new list of devices on the bus. The list of devices is * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS, * whenever a new list of devices for this bus appears. */ static void hv_pci_devices_present(struct hv_pcibus_device *hbus, struct pci_bus_relations *relations) { struct hv_dr_state *dr; int i; dr = kzalloc(struct_size(dr, func, relations->device_count), GFP_NOWAIT); if (!dr) return; dr->device_count = relations->device_count; for (i = 0; i < dr->device_count; i++) { dr->func[i].v_id = relations->func[i].v_id; dr->func[i].d_id = relations->func[i].d_id; dr->func[i].rev = relations->func[i].rev; dr->func[i].prog_intf = relations->func[i].prog_intf; dr->func[i].subclass = relations->func[i].subclass; dr->func[i].base_class = relations->func[i].base_class; dr->func[i].subsystem_id = relations->func[i].subsystem_id; dr->func[i].win_slot = relations->func[i].win_slot; dr->func[i].ser = relations->func[i].ser; } if (hv_pci_start_relations_work(hbus, dr)) kfree(dr); } /** * hv_pci_devices_present2() - Handle list of new children * @hbus: Root PCI bus, as understood by this driver * @relations: Packet from host listing children * * This function is the v2 version of hv_pci_devices_present() */ static void hv_pci_devices_present2(struct hv_pcibus_device *hbus, struct pci_bus_relations2 *relations) { struct hv_dr_state *dr; int i; dr = kzalloc(struct_size(dr, func, relations->device_count), GFP_NOWAIT); if (!dr) return; dr->device_count = relations->device_count; for (i = 0; i < dr->device_count; i++) { dr->func[i].v_id = relations->func[i].v_id; dr->func[i].d_id = relations->func[i].d_id; dr->func[i].rev = relations->func[i].rev; dr->func[i].prog_intf = relations->func[i].prog_intf; dr->func[i].subclass = relations->func[i].subclass; dr->func[i].base_class = relations->func[i].base_class; dr->func[i].subsystem_id = relations->func[i].subsystem_id; dr->func[i].win_slot = relations->func[i].win_slot; dr->func[i].ser = relations->func[i].ser; dr->func[i].flags = relations->func[i].flags; dr->func[i].virtual_numa_node = relations->func[i].virtual_numa_node; } if (hv_pci_start_relations_work(hbus, dr)) kfree(dr); } /** * hv_eject_device_work() - Asynchronously handles ejection * @work: Work struct embedded in internal device struct * * This function handles ejecting a device. Windows will * attempt to gracefully eject a device, waiting 60 seconds to * hear back from the guest OS that this completed successfully. * If this timer expires, the device will be forcibly removed. */ static void hv_eject_device_work(struct work_struct *work) { struct pci_eject_response *ejct_pkt; struct hv_pcibus_device *hbus; struct hv_pci_dev *hpdev; struct pci_dev *pdev; unsigned long flags; int wslot; struct { struct pci_packet pkt; u8 buffer[sizeof(struct pci_eject_response)]; } ctxt; hpdev = container_of(work, struct hv_pci_dev, wrk); hbus = hpdev->hbus; mutex_lock(&hbus->state_lock); /* * Ejection can come before or after the PCI bus has been set up, so * attempt to find it and tear down the bus state, if it exists. This * must be done without constructs like pci_domain_nr(hbus->bridge->bus) * because hbus->bridge->bus may not exist yet. */ wslot = wslot_to_devfn(hpdev->desc.win_slot.slot); pdev = pci_get_domain_bus_and_slot(hbus->bridge->domain_nr, 0, wslot); if (pdev) { pci_lock_rescan_remove(); pci_stop_and_remove_bus_device(pdev); pci_dev_put(pdev); pci_unlock_rescan_remove(); } spin_lock_irqsave(&hbus->device_list_lock, flags); list_del(&hpdev->list_entry); spin_unlock_irqrestore(&hbus->device_list_lock, flags); if (hpdev->pci_slot) pci_destroy_slot(hpdev->pci_slot); memset(&ctxt, 0, sizeof(ctxt)); ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot; vmbus_sendpacket(hbus->hdev->channel, ejct_pkt, sizeof(*ejct_pkt), 0, VM_PKT_DATA_INBAND, 0); /* For the get_pcichild() in hv_pci_eject_device() */ put_pcichild(hpdev); /* For the two refs got in new_pcichild_device() */ put_pcichild(hpdev); put_pcichild(hpdev); /* hpdev has been freed. Do not use it any more. */ mutex_unlock(&hbus->state_lock); } /** * hv_pci_eject_device() - Handles device ejection * @hpdev: Internal device tracking struct * * This function is invoked when an ejection packet arrives. It * just schedules work so that we don't re-enter the packet * delivery code handling the ejection. */ static void hv_pci_eject_device(struct hv_pci_dev *hpdev) { struct hv_pcibus_device *hbus = hpdev->hbus; struct hv_device *hdev = hbus->hdev; if (hbus->state == hv_pcibus_removing) { dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n"); return; } get_pcichild(hpdev); INIT_WORK(&hpdev->wrk, hv_eject_device_work); queue_work(hbus->wq, &hpdev->wrk); } /** * hv_pci_onchannelcallback() - Handles incoming packets * @context: Internal bus tracking struct * * This function is invoked whenever the host sends a packet to * this channel (which is private to this root PCI bus). */ static void hv_pci_onchannelcallback(void *context) { const int packet_size = 0x100; int ret; struct hv_pcibus_device *hbus = context; struct vmbus_channel *chan = hbus->hdev->channel; u32 bytes_recvd; u64 req_id, req_addr; struct vmpacket_descriptor *desc; unsigned char *buffer; int bufferlen = packet_size; struct pci_packet *comp_packet; struct pci_response *response; struct pci_incoming_message *new_message; struct pci_bus_relations *bus_rel; struct pci_bus_relations2 *bus_rel2; struct pci_dev_inval_block *inval; struct pci_dev_incoming *dev_message; struct hv_pci_dev *hpdev; unsigned long flags; buffer = kmalloc(bufferlen, GFP_ATOMIC); if (!buffer) return; while (1) { ret = vmbus_recvpacket_raw(chan, buffer, bufferlen, &bytes_recvd, &req_id); if (ret == -ENOBUFS) { kfree(buffer); /* Handle large packet */ bufferlen = bytes_recvd; buffer = kmalloc(bytes_recvd, GFP_ATOMIC); if (!buffer) return; continue; } /* Zero length indicates there are no more packets. */ if (ret || !bytes_recvd) break; /* * All incoming packets must be at least as large as a * response. */ if (bytes_recvd <= sizeof(struct pci_response)) continue; desc = (struct vmpacket_descriptor *)buffer; switch (desc->type) { case VM_PKT_COMP: lock_requestor(chan, flags); req_addr = __vmbus_request_addr_match(chan, req_id, VMBUS_RQST_ADDR_ANY); if (req_addr == VMBUS_RQST_ERROR) { unlock_requestor(chan, flags); dev_err(&hbus->hdev->device, "Invalid transaction ID %llx\n", req_id); break; } comp_packet = (struct pci_packet *)req_addr; response = (struct pci_response *)buffer; /* * Call ->completion_func() within the critical section to make * sure that the packet pointer is still valid during the call: * here 'valid' means that there's a task still waiting for the * completion, and that the packet data is still on the waiting * task's stack. Cf. hv_compose_msi_msg(). */ comp_packet->completion_func(comp_packet->compl_ctxt, response, bytes_recvd); unlock_requestor(chan, flags); break; case VM_PKT_DATA_INBAND: new_message = (struct pci_incoming_message *)buffer; switch (new_message->message_type.type) { case PCI_BUS_RELATIONS: bus_rel = (struct pci_bus_relations *)buffer; if (bytes_recvd < sizeof(*bus_rel) || bytes_recvd < struct_size(bus_rel, func, bus_rel->device_count)) { dev_err(&hbus->hdev->device, "bus relations too small\n"); break; } hv_pci_devices_present(hbus, bus_rel); break; case PCI_BUS_RELATIONS2: bus_rel2 = (struct pci_bus_relations2 *)buffer; if (bytes_recvd < sizeof(*bus_rel2) || bytes_recvd < struct_size(bus_rel2, func, bus_rel2->device_count)) { dev_err(&hbus->hdev->device, "bus relations v2 too small\n"); break; } hv_pci_devices_present2(hbus, bus_rel2); break; case PCI_EJECT: dev_message = (struct pci_dev_incoming *)buffer; if (bytes_recvd < sizeof(*dev_message)) { dev_err(&hbus->hdev->device, "eject message too small\n"); break; } hpdev = get_pcichild_wslot(hbus, dev_message->wslot.slot); if (hpdev) { hv_pci_eject_device(hpdev); put_pcichild(hpdev); } break; case PCI_INVALIDATE_BLOCK: inval = (struct pci_dev_inval_block *)buffer; if (bytes_recvd < sizeof(*inval)) { dev_err(&hbus->hdev->device, "invalidate message too small\n"); break; } hpdev = get_pcichild_wslot(hbus, inval->wslot.slot); if (hpdev) { if (hpdev->block_invalidate) { hpdev->block_invalidate( hpdev->invalidate_context, inval->block_mask); } put_pcichild(hpdev); } break; default: dev_warn(&hbus->hdev->device, "Unimplemented protocol message %x\n", new_message->message_type.type); break; } break; default: dev_err(&hbus->hdev->device, "unhandled packet type %d, tid %llx len %d\n", desc->type, req_id, bytes_recvd); break; } } kfree(buffer); } /** * hv_pci_protocol_negotiation() - Set up protocol * @hdev: VMBus's tracking struct for this root PCI bus. * @version: Array of supported channel protocol versions in * the order of probing - highest go first. * @num_version: Number of elements in the version array. * * This driver is intended to support running on Windows 10 * (server) and later versions. It will not run on earlier * versions, as they assume that many of the operations which * Linux needs accomplished with a spinlock held were done via * asynchronous messaging via VMBus. Windows 10 increases the * surface area of PCI emulation so that these actions can take * place by suspending a virtual processor for their duration. * * This function negotiates the channel protocol version, * failing if the host doesn't support the necessary protocol * level. */ static int hv_pci_protocol_negotiation(struct hv_device *hdev, enum pci_protocol_version_t version[], int num_version) { struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); struct pci_version_request *version_req; struct hv_pci_compl comp_pkt; struct pci_packet *pkt; int ret; int i; /* * Initiate the handshake with the host and negotiate * a version that the host can support. We start with the * highest version number and go down if the host cannot * support it. */ pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL); if (!pkt) return -ENOMEM; init_completion(&comp_pkt.host_event); pkt->completion_func = hv_pci_generic_compl; pkt->compl_ctxt = &comp_pkt; version_req = (struct pci_version_request *)&pkt->message; version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION; for (i = 0; i < num_version; i++) { version_req->protocol_version = version[i]; ret = vmbus_sendpacket(hdev->channel, version_req, sizeof(struct pci_version_request), (unsigned long)pkt, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (!ret) ret = wait_for_response(hdev, &comp_pkt.host_event); if (ret) { dev_err(&hdev->device, "PCI Pass-through VSP failed to request version: %d", ret); goto exit; } if (comp_pkt.completion_status >= 0) { hbus->protocol_version = version[i]; dev_info(&hdev->device, "PCI VMBus probing: Using version %#x\n", hbus->protocol_version); goto exit; } if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) { dev_err(&hdev->device, "PCI Pass-through VSP failed version request: %#x", comp_pkt.completion_status); ret = -EPROTO; goto exit; } reinit_completion(&comp_pkt.host_event); } dev_err(&hdev->device, "PCI pass-through VSP failed to find supported version"); ret = -EPROTO; exit: kfree(pkt); return ret; } /** * hv_pci_free_bridge_windows() - Release memory regions for the * bus * @hbus: Root PCI bus, as understood by this driver */ static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus) { /* * Set the resources back to the way they looked when they * were allocated by setting IORESOURCE_BUSY again. */ if (hbus->low_mmio_space && hbus->low_mmio_res) { hbus->low_mmio_res->flags |= IORESOURCE_BUSY; vmbus_free_mmio(hbus->low_mmio_res->start, resource_size(hbus->low_mmio_res)); } if (hbus->high_mmio_space && hbus->high_mmio_res) { hbus->high_mmio_res->flags |= IORESOURCE_BUSY; vmbus_free_mmio(hbus->high_mmio_res->start, resource_size(hbus->high_mmio_res)); } } /** * hv_pci_allocate_bridge_windows() - Allocate memory regions * for the bus * @hbus: Root PCI bus, as understood by this driver * * This function calls vmbus_allocate_mmio(), which is itself a * bit of a compromise. Ideally, we might change the pnp layer * in the kernel such that it comprehends either PCI devices * which are "grandchildren of ACPI," with some intermediate bus * node (in this case, VMBus) or change it such that it * understands VMBus. The pnp layer, however, has been declared * deprecated, and not subject to change. * * The workaround, implemented here, is to ask VMBus to allocate * MMIO space for this bus. VMBus itself knows which ranges are * appropriate by looking at its own ACPI objects. Then, after * these ranges are claimed, they're modified to look like they * would have looked if the ACPI and pnp code had allocated * bridge windows. These descriptors have to exist in this form * in order to satisfy the code which will get invoked when the * endpoint PCI function driver calls request_mem_region() or * request_mem_region_exclusive(). * * Return: 0 on success, -errno on failure */ static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus) { resource_size_t align; int ret; if (hbus->low_mmio_space) { align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space)); ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0, (u64)(u32)0xffffffff, hbus->low_mmio_space, align, false); if (ret) { dev_err(&hbus->hdev->device, "Need %#llx of low MMIO space. Consider reconfiguring the VM.\n", hbus->low_mmio_space); return ret; } /* Modify this resource to become a bridge window. */ hbus->low_mmio_res->flags |= IORESOURCE_WINDOW; hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY; pci_add_resource(&hbus->bridge->windows, hbus->low_mmio_res); } if (hbus->high_mmio_space) { align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space)); ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev, 0x100000000, -1, hbus->high_mmio_space, align, false); if (ret) { dev_err(&hbus->hdev->device, "Need %#llx of high MMIO space. Consider reconfiguring the VM.\n", hbus->high_mmio_space); goto release_low_mmio; } /* Modify this resource to become a bridge window. */ hbus->high_mmio_res->flags |= IORESOURCE_WINDOW; hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY; pci_add_resource(&hbus->bridge->windows, hbus->high_mmio_res); } return 0; release_low_mmio: if (hbus->low_mmio_res) { vmbus_free_mmio(hbus->low_mmio_res->start, resource_size(hbus->low_mmio_res)); } return ret; } /** * hv_allocate_config_window() - Find MMIO space for PCI Config * @hbus: Root PCI bus, as understood by this driver * * This function claims memory-mapped I/O space for accessing * configuration space for the functions on this bus. * * Return: 0 on success, -errno on failure */ static int hv_allocate_config_window(struct hv_pcibus_device *hbus) { int ret; /* * Set up a region of MMIO space to use for accessing configuration * space. */ ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1, PCI_CONFIG_MMIO_LENGTH, 0x1000, false); if (ret) return ret; /* * vmbus_allocate_mmio() gets used for allocating both device endpoint * resource claims (those which cannot be overlapped) and the ranges * which are valid for the children of this bus, which are intended * to be overlapped by those children. Set the flag on this claim * meaning that this region can't be overlapped. */ hbus->mem_config->flags |= IORESOURCE_BUSY; return 0; } static void hv_free_config_window(struct hv_pcibus_device *hbus) { vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH); } static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs); /** * hv_pci_enter_d0() - Bring the "bus" into the D0 power state * @hdev: VMBus's tracking struct for this root PCI bus * * Return: 0 on success, -errno on failure */ static int hv_pci_enter_d0(struct hv_device *hdev) { struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); struct pci_bus_d0_entry *d0_entry; struct hv_pci_compl comp_pkt; struct pci_packet *pkt; bool retry = true; int ret; enter_d0_retry: /* * Tell the host that the bus is ready to use, and moved into the * powered-on state. This includes telling the host which region * of memory-mapped I/O space has been chosen for configuration space * access. */ pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL); if (!pkt) return -ENOMEM; init_completion(&comp_pkt.host_event); pkt->completion_func = hv_pci_generic_compl; pkt->compl_ctxt = &comp_pkt; d0_entry = (struct pci_bus_d0_entry *)&pkt->message; d0_entry->message_type.type = PCI_BUS_D0ENTRY; d0_entry->mmio_base = hbus->mem_config->start; ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry), (unsigned long)pkt, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (!ret) ret = wait_for_response(hdev, &comp_pkt.host_event); if (ret) goto exit; /* * In certain case (Kdump) the pci device of interest was * not cleanly shut down and resource is still held on host * side, the host could return invalid device status. * We need to explicitly request host to release the resource * and try to enter D0 again. */ if (comp_pkt.completion_status < 0 && retry) { retry = false; dev_err(&hdev->device, "Retrying D0 Entry\n"); /* * Hv_pci_bus_exit() calls hv_send_resource_released() * to free up resources of its child devices. * In the kdump kernel we need to set the * wslot_res_allocated to 255 so it scans all child * devices to release resources allocated in the * normal kernel before panic happened. */ hbus->wslot_res_allocated = 255; ret = hv_pci_bus_exit(hdev, true); if (ret == 0) { kfree(pkt); goto enter_d0_retry; } dev_err(&hdev->device, "Retrying D0 failed with ret %d\n", ret); } if (comp_pkt.completion_status < 0) { dev_err(&hdev->device, "PCI Pass-through VSP failed D0 Entry with status %x\n", comp_pkt.completion_status); ret = -EPROTO; goto exit; } ret = 0; exit: kfree(pkt); return ret; } /** * hv_pci_query_relations() - Ask host to send list of child * devices * @hdev: VMBus's tracking struct for this root PCI bus * * Return: 0 on success, -errno on failure */ static int hv_pci_query_relations(struct hv_device *hdev) { struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); struct pci_message message; struct completion comp; int ret; /* Ask the host to send along the list of child devices */ init_completion(&comp); if (cmpxchg(&hbus->survey_event, NULL, &comp)) return -ENOTEMPTY; memset(&message, 0, sizeof(message)); message.type = PCI_QUERY_BUS_RELATIONS; ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message), 0, VM_PKT_DATA_INBAND, 0); if (!ret) ret = wait_for_response(hdev, &comp); /* * In the case of fast device addition/removal, it's possible that * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we * already got a PCI_BUS_RELATIONS* message from the host and the * channel callback already scheduled a work to hbus->wq, which can be * running pci_devices_present_work() -> survey_child_resources() -> * complete(&hbus->survey_event), even after hv_pci_query_relations() * exits and the stack variable 'comp' is no longer valid; as a result, * a hang or a page fault may happen when the complete() calls * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is * -ENODEV, there can't be any more work item scheduled to hbus->wq * after the flush_workqueue(): see vmbus_onoffer_rescind() -> * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() -> * channel->rescind = true. */ flush_workqueue(hbus->wq); return ret; } /** * hv_send_resources_allocated() - Report local resource choices * @hdev: VMBus's tracking struct for this root PCI bus * * The host OS is expecting to be sent a request as a message * which contains all the resources that the device will use. * The response contains those same resources, "translated" * which is to say, the values which should be used by the * hardware, when it delivers an interrupt. (MMIO resources are * used in local terms.) This is nice for Windows, and lines up * with the FDO/PDO split, which doesn't exist in Linux. Linux * is deeply expecting to scan an emulated PCI configuration * space. So this message is sent here only to drive the state * machine on the host forward. * * Return: 0 on success, -errno on failure */ static int hv_send_resources_allocated(struct hv_device *hdev) { struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); struct pci_resources_assigned *res_assigned; struct pci_resources_assigned2 *res_assigned2; struct hv_pci_compl comp_pkt; struct hv_pci_dev *hpdev; struct pci_packet *pkt; size_t size_res; int wslot; int ret; size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) ? sizeof(*res_assigned) : sizeof(*res_assigned2); pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL); if (!pkt) return -ENOMEM; ret = 0; for (wslot = 0; wslot < 256; wslot++) { hpdev = get_pcichild_wslot(hbus, wslot); if (!hpdev) continue; memset(pkt, 0, sizeof(*pkt) + size_res); init_completion(&comp_pkt.host_event); pkt->completion_func = hv_pci_generic_compl; pkt->compl_ctxt = &comp_pkt; if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) { res_assigned = (struct pci_resources_assigned *)&pkt->message; res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED; res_assigned->wslot.slot = hpdev->desc.win_slot.slot; } else { res_assigned2 = (struct pci_resources_assigned2 *)&pkt->message; res_assigned2->message_type.type = PCI_RESOURCES_ASSIGNED2; res_assigned2->wslot.slot = hpdev->desc.win_slot.slot; } put_pcichild(hpdev); ret = vmbus_sendpacket(hdev->channel, &pkt->message, size_res, (unsigned long)pkt, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (!ret) ret = wait_for_response(hdev, &comp_pkt.host_event); if (ret) break; if (comp_pkt.completion_status < 0) { ret = -EPROTO; dev_err(&hdev->device, "resource allocated returned 0x%x", comp_pkt.completion_status); break; } hbus->wslot_res_allocated = wslot; } kfree(pkt); return ret; } /** * hv_send_resources_released() - Report local resources * released * @hdev: VMBus's tracking struct for this root PCI bus * * Return: 0 on success, -errno on failure */ static int hv_send_resources_released(struct hv_device *hdev) { struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); struct pci_child_message pkt; struct hv_pci_dev *hpdev; int wslot; int ret; for (wslot = hbus->wslot_res_allocated; wslot >= 0; wslot--) { hpdev = get_pcichild_wslot(hbus, wslot); if (!hpdev) continue; memset(&pkt, 0, sizeof(pkt)); pkt.message_type.type = PCI_RESOURCES_RELEASED; pkt.wslot.slot = hpdev->desc.win_slot.slot; put_pcichild(hpdev); ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0, VM_PKT_DATA_INBAND, 0); if (ret) return ret; hbus->wslot_res_allocated = wslot - 1; } hbus->wslot_res_allocated = -1; return 0; } #define HVPCI_DOM_MAP_SIZE (64 * 1024) static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE); /* * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0 * as invalid for passthrough PCI devices of this driver. */ #define HVPCI_DOM_INVALID 0 /** * hv_get_dom_num() - Get a valid PCI domain number * Check if the PCI domain number is in use, and return another number if * it is in use. * * @dom: Requested domain number * * return: domain number on success, HVPCI_DOM_INVALID on failure */ static u16 hv_get_dom_num(u16 dom) { unsigned int i; if (test_and_set_bit(dom, hvpci_dom_map) == 0) return dom; for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) { if (test_and_set_bit(i, hvpci_dom_map) == 0) return i; } return HVPCI_DOM_INVALID; } /** * hv_put_dom_num() - Mark the PCI domain number as free * @dom: Domain number to be freed */ static void hv_put_dom_num(u16 dom) { clear_bit(dom, hvpci_dom_map); } /** * hv_pci_probe() - New VMBus channel probe, for a root PCI bus * @hdev: VMBus's tracking struct for this root PCI bus * @dev_id: Identifies the device itself * * Return: 0 on success, -errno on failure */ static int hv_pci_probe(struct hv_device *hdev, const struct hv_vmbus_device_id *dev_id) { struct pci_host_bridge *bridge; struct hv_pcibus_device *hbus; u16 dom_req, dom; char *name; int ret; bridge = devm_pci_alloc_host_bridge(&hdev->device, 0); if (!bridge) return -ENOMEM; hbus = kzalloc(sizeof(*hbus), GFP_KERNEL); if (!hbus) return -ENOMEM; hbus->bridge = bridge; mutex_init(&hbus->state_lock); hbus->state = hv_pcibus_init; hbus->wslot_res_allocated = -1; /* * The PCI bus "domain" is what is called "segment" in ACPI and other * specs. Pull it from the instance ID, to get something usually * unique. In rare cases of collision, we will find out another number * not in use. * * Note that, since this code only runs in a Hyper-V VM, Hyper-V * together with this guest driver can guarantee that (1) The only * domain used by Gen1 VMs for something that looks like a physical * PCI bus (which is actually emulated by the hypervisor) is domain 0. * (2) There will be no overlap between domains (after fixing possible * collisions) in the same VM. */ dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4]; dom = hv_get_dom_num(dom_req); if (dom == HVPCI_DOM_INVALID) { dev_err(&hdev->device, "Unable to use dom# 0x%x or other numbers", dom_req); ret = -EINVAL; goto free_bus; } if (dom != dom_req) dev_info(&hdev->device, "PCI dom# 0x%x has collision, using 0x%x", dom_req, dom); hbus->bridge->domain_nr = dom; #ifdef CONFIG_X86 hbus->sysdata.domain = dom; hbus->use_calls = !!(ms_hyperv.hints & HV_X64_USE_MMIO_HYPERCALLS); #elif defined(CONFIG_ARM64) /* * Set the PCI bus parent to be the corresponding VMbus * device. Then the VMbus device will be assigned as the * ACPI companion in pcibios_root_bridge_prepare() and * pci_dma_configure() will propagate device coherence * information to devices created on the bus. */ hbus->sysdata.parent = hdev->device.parent; hbus->use_calls = false; #endif hbus->hdev = hdev; INIT_LIST_HEAD(&hbus->children); INIT_LIST_HEAD(&hbus->dr_list); spin_lock_init(&hbus->config_lock); spin_lock_init(&hbus->device_list_lock); hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0, hbus->bridge->domain_nr); if (!hbus->wq) { ret = -ENOMEM; goto free_dom; } hdev->channel->next_request_id_callback = vmbus_next_request_id; hdev->channel->request_addr_callback = vmbus_request_addr; hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE; ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0, hv_pci_onchannelcallback, hbus); if (ret) goto destroy_wq; hv_set_drvdata(hdev, hbus); ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions, ARRAY_SIZE(pci_protocol_versions)); if (ret) goto close; ret = hv_allocate_config_window(hbus); if (ret) goto close; hbus->cfg_addr = ioremap(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH); if (!hbus->cfg_addr) { dev_err(&hdev->device, "Unable to map a virtual address for config space\n"); ret = -ENOMEM; goto free_config; } name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance); if (!name) { ret = -ENOMEM; goto unmap; } hbus->fwnode = irq_domain_alloc_named_fwnode(name); kfree(name); if (!hbus->fwnode) { ret = -ENOMEM; goto unmap; } ret = hv_pcie_init_irq_domain(hbus); if (ret) goto free_fwnode; ret = hv_pci_query_relations(hdev); if (ret) goto free_irq_domain; mutex_lock(&hbus->state_lock); ret = hv_pci_enter_d0(hdev); if (ret) goto release_state_lock; ret = hv_pci_allocate_bridge_windows(hbus); if (ret) goto exit_d0; ret = hv_send_resources_allocated(hdev); if (ret) goto free_windows; prepopulate_bars(hbus); hbus->state = hv_pcibus_probed; ret = create_root_hv_pci_bus(hbus); if (ret) goto free_windows; mutex_unlock(&hbus->state_lock); return 0; free_windows: hv_pci_free_bridge_windows(hbus); exit_d0: (void) hv_pci_bus_exit(hdev, true); release_state_lock: mutex_unlock(&hbus->state_lock); free_irq_domain: irq_domain_remove(hbus->irq_domain); free_fwnode: irq_domain_free_fwnode(hbus->fwnode); unmap: iounmap(hbus->cfg_addr); free_config: hv_free_config_window(hbus); close: vmbus_close(hdev->channel); destroy_wq: destroy_workqueue(hbus->wq); free_dom: hv_put_dom_num(hbus->bridge->domain_nr); free_bus: kfree(hbus); return ret; } static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs) { struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); struct vmbus_channel *chan = hdev->channel; struct { struct pci_packet teardown_packet; u8 buffer[sizeof(struct pci_message)]; } pkt; struct hv_pci_compl comp_pkt; struct hv_pci_dev *hpdev, *tmp; unsigned long flags; u64 trans_id; int ret; /* * After the host sends the RESCIND_CHANNEL message, it doesn't * access the per-channel ringbuffer any longer. */ if (chan->rescind) return 0; if (!keep_devs) { struct list_head removed; /* Move all present children to the list on stack */ INIT_LIST_HEAD(&removed); spin_lock_irqsave(&hbus->device_list_lock, flags); list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry) list_move_tail(&hpdev->list_entry, &removed); spin_unlock_irqrestore(&hbus->device_list_lock, flags); /* Remove all children in the list */ list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) { list_del(&hpdev->list_entry); if (hpdev->pci_slot) pci_destroy_slot(hpdev->pci_slot); /* For the two refs got in new_pcichild_device() */ put_pcichild(hpdev); put_pcichild(hpdev); } } ret = hv_send_resources_released(hdev); if (ret) { dev_err(&hdev->device, "Couldn't send resources released packet(s)\n"); return ret; } memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet)); init_completion(&comp_pkt.host_event); pkt.teardown_packet.completion_func = hv_pci_generic_compl; pkt.teardown_packet.compl_ctxt = &comp_pkt; pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT; ret = vmbus_sendpacket_getid(chan, &pkt.teardown_packet.message, sizeof(struct pci_message), (unsigned long)&pkt.teardown_packet, &trans_id, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) return ret; if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0) { /* * The completion packet on the stack becomes invalid after * 'return'; remove the ID from the VMbus requestor if the * identifier is still mapped to/associated with the packet. * * Cf. hv_pci_onchannelcallback(). */ vmbus_request_addr_match(chan, trans_id, (unsigned long)&pkt.teardown_packet); return -ETIMEDOUT; } return 0; } /** * hv_pci_remove() - Remove routine for this VMBus channel * @hdev: VMBus's tracking struct for this root PCI bus */ static void hv_pci_remove(struct hv_device *hdev) { struct hv_pcibus_device *hbus; hbus = hv_get_drvdata(hdev); if (hbus->state == hv_pcibus_installed) { tasklet_disable(&hdev->channel->callback_event); hbus->state = hv_pcibus_removing; tasklet_enable(&hdev->channel->callback_event); destroy_workqueue(hbus->wq); hbus->wq = NULL; /* * At this point, no work is running or can be scheduled * on hbus-wq. We can't race with hv_pci_devices_present() * or hv_pci_eject_device(), it's safe to proceed. */ /* Remove the bus from PCI's point of view. */ pci_lock_rescan_remove(); pci_stop_root_bus(hbus->bridge->bus); hv_pci_remove_slots(hbus); pci_remove_root_bus(hbus->bridge->bus); pci_unlock_rescan_remove(); } hv_pci_bus_exit(hdev, false); vmbus_close(hdev->channel); iounmap(hbus->cfg_addr); hv_free_config_window(hbus); hv_pci_free_bridge_windows(hbus); irq_domain_remove(hbus->irq_domain); irq_domain_free_fwnode(hbus->fwnode); hv_put_dom_num(hbus->bridge->domain_nr); kfree(hbus); } static int hv_pci_suspend(struct hv_device *hdev) { struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); enum hv_pcibus_state old_state; int ret; /* * hv_pci_suspend() must make sure there are no pending work items * before calling vmbus_close(), since it runs in a process context * as a callback in dpm_suspend(). When it starts to run, the channel * callback hv_pci_onchannelcallback(), which runs in a tasklet * context, can be still running concurrently and scheduling new work * items onto hbus->wq in hv_pci_devices_present() and * hv_pci_eject_device(), and the work item handlers can access the * vmbus channel, which can be being closed by hv_pci_suspend(), e.g. * the work item handler pci_devices_present_work() -> * new_pcichild_device() writes to the vmbus channel. * * To eliminate the race, hv_pci_suspend() disables the channel * callback tasklet, sets hbus->state to hv_pcibus_removing, and * re-enables the tasklet. This way, when hv_pci_suspend() proceeds, * it knows that no new work item can be scheduled, and then it flushes * hbus->wq and safely closes the vmbus channel. */ tasklet_disable(&hdev->channel->callback_event); /* Change the hbus state to prevent new work items. */ old_state = hbus->state; if (hbus->state == hv_pcibus_installed) hbus->state = hv_pcibus_removing; tasklet_enable(&hdev->channel->callback_event); if (old_state != hv_pcibus_installed) return -EINVAL; flush_workqueue(hbus->wq); ret = hv_pci_bus_exit(hdev, true); if (ret) return ret; vmbus_close(hdev->channel); return 0; } static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg) { struct irq_data *irq_data; struct msi_desc *entry; int ret = 0; if (!pdev->msi_enabled && !pdev->msix_enabled) return 0; msi_lock_descs(&pdev->dev); msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) { irq_data = irq_get_irq_data(entry->irq); if (WARN_ON_ONCE(!irq_data)) { ret = -EINVAL; break; } hv_compose_msi_msg(irq_data, &entry->msg); } msi_unlock_descs(&pdev->dev); return ret; } /* * Upon resume, pci_restore_msi_state() -> ... -> __pci_write_msi_msg() * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg() * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping * Table entries. */ static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus) { pci_walk_bus(hbus->bridge->bus, hv_pci_restore_msi_msg, NULL); } static int hv_pci_resume(struct hv_device *hdev) { struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); enum pci_protocol_version_t version[1]; int ret; hbus->state = hv_pcibus_init; hdev->channel->next_request_id_callback = vmbus_next_request_id; hdev->channel->request_addr_callback = vmbus_request_addr; hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE; ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0, hv_pci_onchannelcallback, hbus); if (ret) return ret; /* Only use the version that was in use before hibernation. */ version[0] = hbus->protocol_version; ret = hv_pci_protocol_negotiation(hdev, version, 1); if (ret) goto out; ret = hv_pci_query_relations(hdev); if (ret) goto out; mutex_lock(&hbus->state_lock); ret = hv_pci_enter_d0(hdev); if (ret) goto release_state_lock; ret = hv_send_resources_allocated(hdev); if (ret) goto release_state_lock; prepopulate_bars(hbus); hv_pci_restore_msi_state(hbus); hbus->state = hv_pcibus_installed; mutex_unlock(&hbus->state_lock); return 0; release_state_lock: mutex_unlock(&hbus->state_lock); out: vmbus_close(hdev->channel); return ret; } static const struct hv_vmbus_device_id hv_pci_id_table[] = { /* PCI Pass-through Class ID */ /* 44C4F61D-4444-4400-9D52-802E27EDE19F */ { HV_PCIE_GUID, }, { }, }; MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table); static struct hv_driver hv_pci_drv = { .name = "hv_pci", .id_table = hv_pci_id_table, .probe = hv_pci_probe, .remove = hv_pci_remove, .suspend = hv_pci_suspend, .resume = hv_pci_resume, }; static void __exit exit_hv_pci_drv(void) { vmbus_driver_unregister(&hv_pci_drv); hvpci_block_ops.read_block = NULL; hvpci_block_ops.write_block = NULL; hvpci_block_ops.reg_blk_invalidate = NULL; } static int __init init_hv_pci_drv(void) { int ret; if (!hv_is_hyperv_initialized()) return -ENODEV; ret = hv_pci_irqchip_init(); if (ret) return ret; /* Set the invalid domain number's bit, so it will not be used */ set_bit(HVPCI_DOM_INVALID, hvpci_dom_map); /* Initialize PCI block r/w interface */ hvpci_block_ops.read_block = hv_read_config_block; hvpci_block_ops.write_block = hv_write_config_block; hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate; return vmbus_driver_register(&hv_pci_drv); } module_init(init_hv_pci_drv); module_exit(exit_hv_pci_drv); MODULE_DESCRIPTION("Hyper-V PCI"); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/pci-hyperv.c
// SPDX-License-Identifier: GPL-2.0 /* * Loongson PCI Host Controller Driver * * Copyright (C) 2020 Jiaxun Yang <[email protected]> */ #include <linux/of.h> #include <linux/of_pci.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/pci-acpi.h> #include <linux/pci-ecam.h> #include "../pci.h" /* Device IDs */ #define DEV_LS2K_PCIE_PORT0 0x1a05 #define DEV_LS7A_PCIE_PORT0 0x7a09 #define DEV_LS7A_PCIE_PORT1 0x7a19 #define DEV_LS7A_PCIE_PORT2 0x7a29 #define DEV_LS7A_PCIE_PORT3 0x7a39 #define DEV_LS7A_PCIE_PORT4 0x7a49 #define DEV_LS7A_PCIE_PORT5 0x7a59 #define DEV_LS7A_PCIE_PORT6 0x7a69 #define DEV_LS2K_APB 0x7a02 #define DEV_LS7A_GMAC 0x7a03 #define DEV_LS7A_DC1 0x7a06 #define DEV_LS7A_LPC 0x7a0c #define DEV_LS7A_AHCI 0x7a08 #define DEV_LS7A_CONF 0x7a10 #define DEV_LS7A_GNET 0x7a13 #define DEV_LS7A_EHCI 0x7a14 #define DEV_LS7A_DC2 0x7a36 #define DEV_LS7A_HDMI 0x7a37 #define FLAG_CFG0 BIT(0) #define FLAG_CFG1 BIT(1) #define FLAG_DEV_FIX BIT(2) #define FLAG_DEV_HIDDEN BIT(3) struct loongson_pci_data { u32 flags; struct pci_ops *ops; }; struct loongson_pci { void __iomem *cfg0_base; void __iomem *cfg1_base; struct platform_device *pdev; const struct loongson_pci_data *data; }; /* Fixup wrong class code in PCIe bridges */ static void bridge_class_quirk(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT0, bridge_class_quirk); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT1, bridge_class_quirk); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT2, bridge_class_quirk); static void system_bus_quirk(struct pci_dev *pdev) { /* * The address space consumed by these devices is outside the * resources of the host bridge. */ pdev->mmio_always_on = 1; pdev->non_compliant_bars = 1; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS2K_APB, system_bus_quirk); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_CONF, system_bus_quirk); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_LPC, system_bus_quirk); static void loongson_mrrs_quirk(struct pci_dev *pdev) { /* * Some Loongson PCIe ports have h/w limitations of maximum read * request size. They can't handle anything larger than this. So * force this limit on any devices attached under these ports. */ struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus); bridge->no_inc_mrrs = 1; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS2K_PCIE_PORT0, loongson_mrrs_quirk); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT0, loongson_mrrs_quirk); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT1, loongson_mrrs_quirk); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT2, loongson_mrrs_quirk); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT3, loongson_mrrs_quirk); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT4, loongson_mrrs_quirk); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT5, loongson_mrrs_quirk); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT6, loongson_mrrs_quirk); static void loongson_pci_pin_quirk(struct pci_dev *pdev) { pdev->pin = 1 + (PCI_FUNC(pdev->devfn) & 3); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_DC1, loongson_pci_pin_quirk); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_DC2, loongson_pci_pin_quirk); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_GMAC, loongson_pci_pin_quirk); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_AHCI, loongson_pci_pin_quirk); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_EHCI, loongson_pci_pin_quirk); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_GNET, loongson_pci_pin_quirk); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_HDMI, loongson_pci_pin_quirk); static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus) { struct pci_config_window *cfg; if (acpi_disabled) return (struct loongson_pci *)(bus->sysdata); cfg = bus->sysdata; return (struct loongson_pci *)(cfg->priv); } static void __iomem *cfg0_map(struct loongson_pci *priv, struct pci_bus *bus, unsigned int devfn, int where) { unsigned long addroff = 0x0; unsigned char busnum = bus->number; if (!pci_is_root_bus(bus)) { addroff |= BIT(24); /* Type 1 Access */ addroff |= (busnum << 16); } addroff |= (devfn << 8) | where; return priv->cfg0_base + addroff; } static void __iomem *cfg1_map(struct loongson_pci *priv, struct pci_bus *bus, unsigned int devfn, int where) { unsigned long addroff = 0x0; unsigned char busnum = bus->number; if (!pci_is_root_bus(bus)) { addroff |= BIT(28); /* Type 1 Access */ addroff |= (busnum << 16); } addroff |= (devfn << 8) | (where & 0xff) | ((where & 0xf00) << 16); return priv->cfg1_base + addroff; } static bool pdev_may_exist(struct pci_bus *bus, unsigned int device, unsigned int function) { return !(pci_is_root_bus(bus) && (device >= 9 && device <= 20) && (function > 0)); } static void __iomem *pci_loongson_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { unsigned int device = PCI_SLOT(devfn); unsigned int function = PCI_FUNC(devfn); struct loongson_pci *priv = pci_bus_to_loongson_pci(bus); /* * Do not read more than one device on the bus other than * the host bus. */ if ((priv->data->flags & FLAG_DEV_FIX) && bus->self) { if (!pci_is_root_bus(bus) && (device > 0)) return NULL; } /* Don't access non-existent devices */ if (priv->data->flags & FLAG_DEV_HIDDEN) { if (!pdev_may_exist(bus, device, function)) return NULL; } /* CFG0 can only access standard space */ if (where < PCI_CFG_SPACE_SIZE && priv->cfg0_base) return cfg0_map(priv, bus, devfn, where); /* CFG1 can access extended space */ if (where < PCI_CFG_SPACE_EXP_SIZE && priv->cfg1_base) return cfg1_map(priv, bus, devfn, where); return NULL; } #ifdef CONFIG_OF static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq; u8 val; irq = of_irq_parse_and_map_pci(dev, slot, pin); if (irq > 0) return irq; /* Care i8259 legacy systems */ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &val); /* i8259 only have 15 IRQs */ if (val > 15) return 0; return val; } /* LS2K/LS7A accept 8/16/32-bit PCI config operations */ static struct pci_ops loongson_pci_ops = { .map_bus = pci_loongson_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, }; /* RS780/SR5690 only accept 32-bit PCI config operations */ static struct pci_ops loongson_pci_ops32 = { .map_bus = pci_loongson_map_bus, .read = pci_generic_config_read32, .write = pci_generic_config_write32, }; static const struct loongson_pci_data ls2k_pci_data = { .flags = FLAG_CFG1 | FLAG_DEV_FIX | FLAG_DEV_HIDDEN, .ops = &loongson_pci_ops, }; static const struct loongson_pci_data ls7a_pci_data = { .flags = FLAG_CFG1 | FLAG_DEV_FIX | FLAG_DEV_HIDDEN, .ops = &loongson_pci_ops, }; static const struct loongson_pci_data rs780e_pci_data = { .flags = FLAG_CFG0, .ops = &loongson_pci_ops32, }; static const struct of_device_id loongson_pci_of_match[] = { { .compatible = "loongson,ls2k-pci", .data = &ls2k_pci_data, }, { .compatible = "loongson,ls7a-pci", .data = &ls7a_pci_data, }, { .compatible = "loongson,rs780e-pci", .data = &rs780e_pci_data, }, {} }; static int loongson_pci_probe(struct platform_device *pdev) { struct loongson_pci *priv; struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; struct pci_host_bridge *bridge; struct resource *regs; if (!node) return -ENODEV; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*priv)); if (!bridge) return -ENODEV; priv = pci_host_bridge_priv(bridge); priv->pdev = pdev; priv->data = of_device_get_match_data(dev); if (priv->data->flags & FLAG_CFG0) { regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) dev_err(dev, "missing mem resources for cfg0\n"); else { priv->cfg0_base = devm_pci_remap_cfg_resource(dev, regs); if (IS_ERR(priv->cfg0_base)) return PTR_ERR(priv->cfg0_base); } } if (priv->data->flags & FLAG_CFG1) { regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!regs) dev_info(dev, "missing mem resource for cfg1\n"); else { priv->cfg1_base = devm_pci_remap_cfg_resource(dev, regs); if (IS_ERR(priv->cfg1_base)) priv->cfg1_base = NULL; } } bridge->sysdata = priv; bridge->ops = priv->data->ops; bridge->map_irq = loongson_map_irq; return pci_host_probe(bridge); } static struct platform_driver loongson_pci_driver = { .driver = { .name = "loongson-pci", .of_match_table = loongson_pci_of_match, }, .probe = loongson_pci_probe, }; builtin_platform_driver(loongson_pci_driver); #endif #ifdef CONFIG_ACPI static int loongson_pci_ecam_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; struct loongson_pci *priv; struct loongson_pci_data *data; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; cfg->priv = priv; data->flags = FLAG_CFG1 | FLAG_DEV_HIDDEN; priv->data = data; priv->cfg1_base = cfg->win - (cfg->busr.start << 16); return 0; } const struct pci_ecam_ops loongson_pci_ecam_ops = { .bus_shift = 16, .init = loongson_pci_ecam_init, .pci_ops = { .map_bus = pci_loongson_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, } }; #endif
linux-master
drivers/pci/controller/pci-loongson.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe driver for Renesas R-Car SoCs * Copyright (C) 2014-2020 Renesas Electronics Europe Ltd * * Based on: * arch/sh/drivers/pci/pcie-sh7786.c * arch/sh/drivers/pci/ops-sh7786.c * Copyright (C) 2009 - 2011 Paul Mundt * * Author: Phil Edworthy <[email protected]> */ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/iopoll.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/pci.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include "pcie-rcar.h" struct rcar_msi { DECLARE_BITMAP(used, INT_PCI_MSI_NR); struct irq_domain *domain; struct mutex map_lock; spinlock_t mask_lock; int irq1; int irq2; }; /* Structure representing the PCIe interface */ struct rcar_pcie_host { struct rcar_pcie pcie; struct phy *phy; struct clk *bus_clk; struct rcar_msi msi; int (*phy_init_fn)(struct rcar_pcie_host *host); }; static DEFINE_SPINLOCK(pmsr_lock); static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base) { unsigned long flags; u32 pmsr, val; int ret = 0; spin_lock_irqsave(&pmsr_lock, flags); if (!pcie_base || pm_runtime_suspended(pcie_dev)) { ret = -EINVAL; goto unlock_exit; } pmsr = readl(pcie_base + PMSR); /* * Test if the PCIe controller received PM_ENTER_L1 DLLP and * the PCIe controller is not in L1 link state. If true, apply * fix, which will put the controller into L1 link state, from * which it can return to L0s/L0 on its own. */ if ((pmsr & PMEL1RX) && ((pmsr & PMSTATE) != PMSTATE_L1)) { writel(L1IATN, pcie_base + PMCTLR); ret = readl_poll_timeout_atomic(pcie_base + PMSR, val, val & L1FAEG, 10, 1000); WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret); writel(L1FAEG | PMEL1RX, pcie_base + PMSR); } unlock_exit: spin_unlock_irqrestore(&pmsr_lock, flags); return ret; } static struct rcar_pcie_host *msi_to_host(struct rcar_msi *msi) { return container_of(msi, struct rcar_pcie_host, msi); } static u32 rcar_read_conf(struct rcar_pcie *pcie, int where) { unsigned int shift = BITS_PER_BYTE * (where & 3); u32 val = rcar_pci_read_reg(pcie, where & ~3); return val >> shift; } #ifdef CONFIG_ARM #define __rcar_pci_rw_reg_workaround(instr) \ " .arch armv7-a\n" \ "1: " instr " %1, [%2]\n" \ "2: isb\n" \ "3: .pushsection .text.fixup,\"ax\"\n" \ " .align 2\n" \ "4: mov %0, #" __stringify(PCIBIOS_SET_FAILED) "\n" \ " b 3b\n" \ " .popsection\n" \ " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 4b\n" \ " .long 2b, 4b\n" \ " .popsection\n" #endif static int rcar_pci_write_reg_workaround(struct rcar_pcie *pcie, u32 val, unsigned int reg) { int error = PCIBIOS_SUCCESSFUL; #ifdef CONFIG_ARM asm volatile( __rcar_pci_rw_reg_workaround("str") : "+r"(error):"r"(val), "r"(pcie->base + reg) : "memory"); #else rcar_pci_write_reg(pcie, val, reg); #endif return error; } static int rcar_pci_read_reg_workaround(struct rcar_pcie *pcie, u32 *val, unsigned int reg) { int error = PCIBIOS_SUCCESSFUL; #ifdef CONFIG_ARM asm volatile( __rcar_pci_rw_reg_workaround("ldr") : "+r"(error), "=r"(*val) : "r"(pcie->base + reg) : "memory"); if (error != PCIBIOS_SUCCESSFUL) PCI_SET_ERROR_RESPONSE(val); #else *val = rcar_pci_read_reg(pcie, reg); #endif return error; } /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ static int rcar_pcie_config_access(struct rcar_pcie_host *host, unsigned char access_type, struct pci_bus *bus, unsigned int devfn, int where, u32 *data) { struct rcar_pcie *pcie = &host->pcie; unsigned int dev, func, reg, index; int ret; /* Wake the bus up in case it is in L1 state. */ ret = rcar_pcie_wakeup(pcie->dev, pcie->base); if (ret) { PCI_SET_ERROR_RESPONSE(data); return PCIBIOS_SET_FAILED; } dev = PCI_SLOT(devfn); func = PCI_FUNC(devfn); reg = where & ~3; index = reg / 4; /* * While each channel has its own memory-mapped extended config * space, it's generally only accessible when in endpoint mode. * When in root complex mode, the controller is unable to target * itself with either type 0 or type 1 accesses, and indeed, any * controller initiated target transfer to its own config space * result in a completer abort. * * Each channel effectively only supports a single device, but as * the same channel <-> device access works for any PCI_SLOT() * value, we cheat a bit here and bind the controller's config * space to devfn 0 in order to enable self-enumeration. In this * case the regular ECAR/ECDR path is sidelined and the mangled * config access itself is initiated as an internal bus transaction. */ if (pci_is_root_bus(bus)) { if (dev != 0) return PCIBIOS_DEVICE_NOT_FOUND; if (access_type == RCAR_PCI_ACCESS_READ) *data = rcar_pci_read_reg(pcie, PCICONF(index)); else rcar_pci_write_reg(pcie, *data, PCICONF(index)); return PCIBIOS_SUCCESSFUL; } /* Clear errors */ rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR); /* Set the PIO address */ rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) | PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR); /* Enable the configuration access */ if (pci_is_root_bus(bus->parent)) rcar_pci_write_reg(pcie, PCIECCTLR_CCIE | TYPE0, PCIECCTLR); else rcar_pci_write_reg(pcie, PCIECCTLR_CCIE | TYPE1, PCIECCTLR); /* Check for errors */ if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST) return PCIBIOS_DEVICE_NOT_FOUND; /* Check for master and target aborts */ if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) & (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT)) return PCIBIOS_DEVICE_NOT_FOUND; if (access_type == RCAR_PCI_ACCESS_READ) ret = rcar_pci_read_reg_workaround(pcie, data, PCIECDR); else ret = rcar_pci_write_reg_workaround(pcie, *data, PCIECDR); /* Disable the configuration access */ rcar_pci_write_reg(pcie, 0, PCIECCTLR); return ret; } static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct rcar_pcie_host *host = bus->sysdata; int ret; ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ, bus, devfn, where, val); if (ret != PCIBIOS_SUCCESSFUL) return ret; if (size == 1) *val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff; else if (size == 2) *val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff; dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n", bus->number, devfn, where, size, *val); return ret; } /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct rcar_pcie_host *host = bus->sysdata; unsigned int shift; u32 data; int ret; ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ, bus, devfn, where, &data); if (ret != PCIBIOS_SUCCESSFUL) return ret; dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n", bus->number, devfn, where, size, val); if (size == 1) { shift = BITS_PER_BYTE * (where & 3); data &= ~(0xff << shift); data |= ((val & 0xff) << shift); } else if (size == 2) { shift = BITS_PER_BYTE * (where & 2); data &= ~(0xffff << shift); data |= ((val & 0xffff) << shift); } else data = val; ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_WRITE, bus, devfn, where, &data); return ret; } static struct pci_ops rcar_pcie_ops = { .read = rcar_pcie_read_conf, .write = rcar_pcie_write_conf, }; static void rcar_pcie_force_speedup(struct rcar_pcie *pcie) { struct device *dev = pcie->dev; unsigned int timeout = 1000; u32 macsr; if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS) return; if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) { dev_err(dev, "Speed change already in progress\n"); return; } macsr = rcar_pci_read_reg(pcie, MACSR); if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS) goto done; /* Set target link speed to 5.0 GT/s */ rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS, PCI_EXP_LNKSTA_CLS_5_0GB); /* Set speed change reason as intentional factor */ rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0); /* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */ if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL)) rcar_pci_write_reg(pcie, macsr, MACSR); /* Start link speed change */ rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE); while (timeout--) { macsr = rcar_pci_read_reg(pcie, MACSR); if (macsr & SPCHGFIN) { /* Clear the interrupt bits */ rcar_pci_write_reg(pcie, macsr, MACSR); if (macsr & SPCHGFAIL) dev_err(dev, "Speed change failed\n"); goto done; } msleep(1); } dev_err(dev, "Speed change timed out\n"); done: dev_info(dev, "Current link speed is %s GT/s\n", (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5"); } static void rcar_pcie_hw_enable(struct rcar_pcie_host *host) { struct rcar_pcie *pcie = &host->pcie; struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host); struct resource_entry *win; LIST_HEAD(res); int i = 0; /* Try setting 5 GT/s link speed */ rcar_pcie_force_speedup(pcie); /* Setup PCI resources */ resource_list_for_each_entry(win, &bridge->windows) { struct resource *res = win->res; if (!res->flags) continue; switch (resource_type(res)) { case IORESOURCE_IO: case IORESOURCE_MEM: rcar_pcie_set_outbound(pcie, i, win); i++; break; } } } static int rcar_pcie_enable(struct rcar_pcie_host *host) { struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host); rcar_pcie_hw_enable(host); pci_add_flags(PCI_REASSIGN_ALL_BUS); bridge->sysdata = host; bridge->ops = &rcar_pcie_ops; return pci_host_probe(bridge); } static int phy_wait_for_ack(struct rcar_pcie *pcie) { struct device *dev = pcie->dev; unsigned int timeout = 100; while (timeout--) { if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK) return 0; udelay(100); } dev_err(dev, "Access to PCIe phy timed out\n"); return -ETIMEDOUT; } static void phy_write_reg(struct rcar_pcie *pcie, unsigned int rate, u32 addr, unsigned int lane, u32 data) { u32 phyaddr; phyaddr = WRITE_CMD | ((rate & 1) << RATE_POS) | ((lane & 0xf) << LANE_POS) | ((addr & 0xff) << ADR_POS); /* Set write data */ rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR); rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR); /* Ignore errors as they will be dealt with if the data link is down */ phy_wait_for_ack(pcie); /* Clear command */ rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR); rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR); /* Ignore errors as they will be dealt with if the data link is down */ phy_wait_for_ack(pcie); } static int rcar_pcie_hw_init(struct rcar_pcie *pcie) { int err; /* Begin initialization */ rcar_pci_write_reg(pcie, 0, PCIETCTLR); /* Set mode */ rcar_pci_write_reg(pcie, 1, PCIEMSR); err = rcar_pcie_wait_for_phyrdy(pcie); if (err) return err; /* * Initial header for port config space is type 1, set the device * class to match. Hardware takes care of propagating the IDSETR * settings, so there is no need to bother with a quirk. */ rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI_NORMAL << 8, IDSETR1); /* * Setup Secondary Bus Number & Subordinate Bus Number, even though * they aren't used, to avoid bridge being detected as broken. */ rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1); rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1); /* Initialize default capabilities. */ rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP); rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS), PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4); rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f, PCI_HEADER_TYPE_BRIDGE); /* Enable data link layer active state reporting */ rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC, PCI_EXP_LNKCAP_DLLLARC); /* Write out the physical slot number = 0 */ rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0); /* Set the completion timer timeout to the maximum 50ms. */ rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50); /* Terminate list of capabilities (Next Capability Offset=0) */ rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0); /* Enable MSI */ if (IS_ENABLED(CONFIG_PCI_MSI)) rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR); rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR); /* Finish initialization - establish a PCI Express link */ rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR); /* This will timeout if we don't have a link. */ err = rcar_pcie_wait_for_dl(pcie); if (err) return err; /* Enable INTx interrupts */ rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8); wmb(); return 0; } static int rcar_pcie_phy_init_h1(struct rcar_pcie_host *host) { struct rcar_pcie *pcie = &host->pcie; /* Initialize the phy */ phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191); phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180); phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188); phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188); phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014); phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014); phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0); phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB); phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062); phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000); phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000); phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806); phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5); phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F); phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000); return 0; } static int rcar_pcie_phy_init_gen2(struct rcar_pcie_host *host) { struct rcar_pcie *pcie = &host->pcie; /* * These settings come from the R-Car Series, 2nd Generation User's * Manual, section 50.3.1 (2) Initialization of the physical layer. */ rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR); rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA); rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL); rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL); rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR); /* The following value is for DC connection, no termination resistor */ rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA); rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL); rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL); return 0; } static int rcar_pcie_phy_init_gen3(struct rcar_pcie_host *host) { int err; err = phy_init(host->phy); if (err) return err; err = phy_power_on(host->phy); if (err) phy_exit(host->phy); return err; } static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) { struct rcar_pcie_host *host = data; struct rcar_pcie *pcie = &host->pcie; struct rcar_msi *msi = &host->msi; struct device *dev = pcie->dev; unsigned long reg; reg = rcar_pci_read_reg(pcie, PCIEMSIFR); /* MSI & INTx share an interrupt - we only handle MSI here */ if (!reg) return IRQ_NONE; while (reg) { unsigned int index = find_first_bit(&reg, 32); int ret; ret = generic_handle_domain_irq(msi->domain->parent, index); if (ret) { /* Unknown MSI, just clear it */ dev_dbg(dev, "unexpected MSI\n"); rcar_pci_write_reg(pcie, BIT(index), PCIEMSIFR); } /* see if there's any more pending in this vector */ reg = rcar_pci_read_reg(pcie, PCIEMSIFR); } return IRQ_HANDLED; } static void rcar_msi_top_irq_ack(struct irq_data *d) { irq_chip_ack_parent(d); } static void rcar_msi_top_irq_mask(struct irq_data *d) { pci_msi_mask_irq(d); irq_chip_mask_parent(d); } static void rcar_msi_top_irq_unmask(struct irq_data *d) { pci_msi_unmask_irq(d); irq_chip_unmask_parent(d); } static struct irq_chip rcar_msi_top_chip = { .name = "PCIe MSI", .irq_ack = rcar_msi_top_irq_ack, .irq_mask = rcar_msi_top_irq_mask, .irq_unmask = rcar_msi_top_irq_unmask, }; static void rcar_msi_irq_ack(struct irq_data *d) { struct rcar_msi *msi = irq_data_get_irq_chip_data(d); struct rcar_pcie *pcie = &msi_to_host(msi)->pcie; /* clear the interrupt */ rcar_pci_write_reg(pcie, BIT(d->hwirq), PCIEMSIFR); } static void rcar_msi_irq_mask(struct irq_data *d) { struct rcar_msi *msi = irq_data_get_irq_chip_data(d); struct rcar_pcie *pcie = &msi_to_host(msi)->pcie; unsigned long flags; u32 value; spin_lock_irqsave(&msi->mask_lock, flags); value = rcar_pci_read_reg(pcie, PCIEMSIIER); value &= ~BIT(d->hwirq); rcar_pci_write_reg(pcie, value, PCIEMSIIER); spin_unlock_irqrestore(&msi->mask_lock, flags); } static void rcar_msi_irq_unmask(struct irq_data *d) { struct rcar_msi *msi = irq_data_get_irq_chip_data(d); struct rcar_pcie *pcie = &msi_to_host(msi)->pcie; unsigned long flags; u32 value; spin_lock_irqsave(&msi->mask_lock, flags); value = rcar_pci_read_reg(pcie, PCIEMSIIER); value |= BIT(d->hwirq); rcar_pci_write_reg(pcie, value, PCIEMSIIER); spin_unlock_irqrestore(&msi->mask_lock, flags); } static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) { return -EINVAL; } static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct rcar_msi *msi = irq_data_get_irq_chip_data(data); struct rcar_pcie *pcie = &msi_to_host(msi)->pcie; msg->address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; msg->address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); msg->data = data->hwirq; } static struct irq_chip rcar_msi_bottom_chip = { .name = "R-Car MSI", .irq_ack = rcar_msi_irq_ack, .irq_mask = rcar_msi_irq_mask, .irq_unmask = rcar_msi_irq_unmask, .irq_set_affinity = rcar_msi_set_affinity, .irq_compose_msi_msg = rcar_compose_msi_msg, }; static int rcar_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct rcar_msi *msi = domain->host_data; unsigned int i; int hwirq; mutex_lock(&msi->map_lock); hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs)); mutex_unlock(&msi->map_lock); if (hwirq < 0) return -ENOSPC; for (i = 0; i < nr_irqs; i++) irq_domain_set_info(domain, virq + i, hwirq + i, &rcar_msi_bottom_chip, domain->host_data, handle_edge_irq, NULL, NULL); return 0; } static void rcar_msi_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct rcar_msi *msi = domain->host_data; mutex_lock(&msi->map_lock); bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs)); mutex_unlock(&msi->map_lock); } static const struct irq_domain_ops rcar_msi_domain_ops = { .alloc = rcar_msi_domain_alloc, .free = rcar_msi_domain_free, }; static struct msi_domain_info rcar_msi_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI), .chip = &rcar_msi_top_chip, }; static int rcar_allocate_domains(struct rcar_msi *msi) { struct rcar_pcie *pcie = &msi_to_host(msi)->pcie; struct fwnode_handle *fwnode = dev_fwnode(pcie->dev); struct irq_domain *parent; parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR, &rcar_msi_domain_ops, msi); if (!parent) { dev_err(pcie->dev, "failed to create IRQ domain\n"); return -ENOMEM; } irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); msi->domain = pci_msi_create_irq_domain(fwnode, &rcar_msi_info, parent); if (!msi->domain) { dev_err(pcie->dev, "failed to create MSI domain\n"); irq_domain_remove(parent); return -ENOMEM; } return 0; } static void rcar_free_domains(struct rcar_msi *msi) { struct irq_domain *parent = msi->domain->parent; irq_domain_remove(msi->domain); irq_domain_remove(parent); } static int rcar_pcie_enable_msi(struct rcar_pcie_host *host) { struct rcar_pcie *pcie = &host->pcie; struct device *dev = pcie->dev; struct rcar_msi *msi = &host->msi; struct resource res; int err; mutex_init(&msi->map_lock); spin_lock_init(&msi->mask_lock); err = of_address_to_resource(dev->of_node, 0, &res); if (err) return err; err = rcar_allocate_domains(msi); if (err) return err; /* Two irqs are for MSI, but they are also used for non-MSI irqs */ err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq, IRQF_SHARED | IRQF_NO_THREAD, rcar_msi_bottom_chip.name, host); if (err < 0) { dev_err(dev, "failed to request IRQ: %d\n", err); goto err; } err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq, IRQF_SHARED | IRQF_NO_THREAD, rcar_msi_bottom_chip.name, host); if (err < 0) { dev_err(dev, "failed to request IRQ: %d\n", err); goto err; } /* disable all MSIs */ rcar_pci_write_reg(pcie, 0, PCIEMSIIER); /* * Setup MSI data target using RC base address address, which * is guaranteed to be in the low 32bit range on any R-Car HW. */ rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR); rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR); return 0; err: rcar_free_domains(msi); return err; } static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host) { struct rcar_pcie *pcie = &host->pcie; /* Disable all MSI interrupts */ rcar_pci_write_reg(pcie, 0, PCIEMSIIER); /* Disable address decoding of the MSI interrupt, MSIFE */ rcar_pci_write_reg(pcie, 0, PCIEMSIALR); rcar_free_domains(&host->msi); } static int rcar_pcie_get_resources(struct rcar_pcie_host *host) { struct rcar_pcie *pcie = &host->pcie; struct device *dev = pcie->dev; struct resource res; int err, i; host->phy = devm_phy_optional_get(dev, "pcie"); if (IS_ERR(host->phy)) return PTR_ERR(host->phy); err = of_address_to_resource(dev->of_node, 0, &res); if (err) return err; pcie->base = devm_ioremap_resource(dev, &res); if (IS_ERR(pcie->base)) return PTR_ERR(pcie->base); host->bus_clk = devm_clk_get(dev, "pcie_bus"); if (IS_ERR(host->bus_clk)) { dev_err(dev, "cannot get pcie bus clock\n"); return PTR_ERR(host->bus_clk); } i = irq_of_parse_and_map(dev->of_node, 0); if (!i) { dev_err(dev, "cannot get platform resources for msi interrupt\n"); err = -ENOENT; goto err_irq1; } host->msi.irq1 = i; i = irq_of_parse_and_map(dev->of_node, 1); if (!i) { dev_err(dev, "cannot get platform resources for msi interrupt\n"); err = -ENOENT; goto err_irq2; } host->msi.irq2 = i; return 0; err_irq2: irq_dispose_mapping(host->msi.irq1); err_irq1: return err; } static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie, struct resource_entry *entry, int *index) { u64 restype = entry->res->flags; u64 cpu_addr = entry->res->start; u64 cpu_end = entry->res->end; u64 pci_addr = entry->res->start - entry->offset; u32 flags = LAM_64BIT | LAR_ENABLE; u64 mask; u64 size = resource_size(entry->res); int idx = *index; if (restype & IORESOURCE_PREFETCH) flags |= LAM_PREFETCH; while (cpu_addr < cpu_end) { if (idx >= MAX_NR_INBOUND_MAPS - 1) { dev_err(pcie->dev, "Failed to map inbound regions!\n"); return -EINVAL; } /* * If the size of the range is larger than the alignment of * the start address, we have to use multiple entries to * perform the mapping. */ if (cpu_addr > 0) { unsigned long nr_zeros = __ffs64(cpu_addr); u64 alignment = 1ULL << nr_zeros; size = min(size, alignment); } /* Hardware supports max 4GiB inbound region */ size = min(size, 1ULL << 32); mask = roundup_pow_of_two(size) - 1; mask &= ~0xf; rcar_pcie_set_inbound(pcie, cpu_addr, pci_addr, lower_32_bits(mask) | flags, idx, true); pci_addr += size; cpu_addr += size; idx += 2; } *index = idx; return 0; } static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie_host *host) { struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host); struct resource_entry *entry; int index = 0, err = 0; resource_list_for_each_entry(entry, &bridge->dma_ranges) { err = rcar_pcie_inbound_ranges(&host->pcie, entry, &index); if (err) break; } return err; } static const struct of_device_id rcar_pcie_of_match[] = { { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_phy_init_h1 }, { .compatible = "renesas,pcie-r8a7790", .data = rcar_pcie_phy_init_gen2 }, { .compatible = "renesas,pcie-r8a7791", .data = rcar_pcie_phy_init_gen2 }, { .compatible = "renesas,pcie-rcar-gen2", .data = rcar_pcie_phy_init_gen2 }, { .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_phy_init_gen3 }, { .compatible = "renesas,pcie-rcar-gen3", .data = rcar_pcie_phy_init_gen3 }, {}, }; static int rcar_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rcar_pcie_host *host; struct rcar_pcie *pcie; u32 data; int err; struct pci_host_bridge *bridge; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host)); if (!bridge) return -ENOMEM; host = pci_host_bridge_priv(bridge); pcie = &host->pcie; pcie->dev = dev; platform_set_drvdata(pdev, host); pm_runtime_enable(pcie->dev); err = pm_runtime_get_sync(pcie->dev); if (err < 0) { dev_err(pcie->dev, "pm_runtime_get_sync failed\n"); goto err_pm_put; } err = rcar_pcie_get_resources(host); if (err < 0) { dev_err(dev, "failed to request resources: %d\n", err); goto err_pm_put; } err = clk_prepare_enable(host->bus_clk); if (err) { dev_err(dev, "failed to enable bus clock: %d\n", err); goto err_unmap_msi_irqs; } err = rcar_pcie_parse_map_dma_ranges(host); if (err) goto err_clk_disable; host->phy_init_fn = of_device_get_match_data(dev); err = host->phy_init_fn(host); if (err) { dev_err(dev, "failed to init PCIe PHY\n"); goto err_clk_disable; } /* Failure to get a link might just be that no cards are inserted */ if (rcar_pcie_hw_init(pcie)) { dev_info(dev, "PCIe link down\n"); err = -ENODEV; goto err_phy_shutdown; } data = rcar_pci_read_reg(pcie, MACSR); dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f); if (IS_ENABLED(CONFIG_PCI_MSI)) { err = rcar_pcie_enable_msi(host); if (err < 0) { dev_err(dev, "failed to enable MSI support: %d\n", err); goto err_phy_shutdown; } } err = rcar_pcie_enable(host); if (err) goto err_msi_teardown; return 0; err_msi_teardown: if (IS_ENABLED(CONFIG_PCI_MSI)) rcar_pcie_teardown_msi(host); err_phy_shutdown: if (host->phy) { phy_power_off(host->phy); phy_exit(host->phy); } err_clk_disable: clk_disable_unprepare(host->bus_clk); err_unmap_msi_irqs: irq_dispose_mapping(host->msi.irq2); irq_dispose_mapping(host->msi.irq1); err_pm_put: pm_runtime_put(dev); pm_runtime_disable(dev); return err; } static int rcar_pcie_resume(struct device *dev) { struct rcar_pcie_host *host = dev_get_drvdata(dev); struct rcar_pcie *pcie = &host->pcie; unsigned int data; int err; err = rcar_pcie_parse_map_dma_ranges(host); if (err) return 0; /* Failure to get a link might just be that no cards are inserted */ err = host->phy_init_fn(host); if (err) { dev_info(dev, "PCIe link down\n"); return 0; } data = rcar_pci_read_reg(pcie, MACSR); dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f); /* Enable MSI */ if (IS_ENABLED(CONFIG_PCI_MSI)) { struct resource res; u32 val; of_address_to_resource(dev->of_node, 0, &res); rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR); rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR); bitmap_to_arr32(&val, host->msi.used, INT_PCI_MSI_NR); rcar_pci_write_reg(pcie, val, PCIEMSIIER); } rcar_pcie_hw_enable(host); return 0; } static int rcar_pcie_resume_noirq(struct device *dev) { struct rcar_pcie_host *host = dev_get_drvdata(dev); struct rcar_pcie *pcie = &host->pcie; if (rcar_pci_read_reg(pcie, PMSR) && !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN)) return 0; /* Re-establish the PCIe link */ rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR); rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR); return rcar_pcie_wait_for_dl(pcie); } static const struct dev_pm_ops rcar_pcie_pm_ops = { SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume) .resume_noirq = rcar_pcie_resume_noirq, }; static struct platform_driver rcar_pcie_driver = { .driver = { .name = "rcar-pcie", .of_match_table = rcar_pcie_of_match, .pm = &rcar_pcie_pm_ops, .suppress_bind_attrs = true, }, .probe = rcar_pcie_probe, }; #ifdef CONFIG_ARM static int rcar_pcie_aarch32_abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { return !fixup_exception(regs); } static const struct of_device_id rcar_pcie_abort_handler_of_match[] __initconst = { { .compatible = "renesas,pcie-r8a7779" }, { .compatible = "renesas,pcie-r8a7790" }, { .compatible = "renesas,pcie-r8a7791" }, { .compatible = "renesas,pcie-rcar-gen2" }, {}, }; static int __init rcar_pcie_init(void) { if (of_find_matching_node(NULL, rcar_pcie_abort_handler_of_match)) { #ifdef CONFIG_ARM_LPAE hook_fault_code(17, rcar_pcie_aarch32_abort_handler, SIGBUS, 0, "asynchronous external abort"); #else hook_fault_code(22, rcar_pcie_aarch32_abort_handler, SIGBUS, 0, "imprecise external abort"); #endif } return platform_driver_register(&rcar_pcie_driver); } device_initcall(rcar_pcie_init); #else builtin_platform_driver(rcar_pcie_driver); #endif
linux-master
drivers/pci/controller/pcie-rcar-host.c
// SPDX-License-Identifier: GPL-2.0+ /* * APM X-Gene MSI Driver * * Copyright (c) 2014, Applied Micro Circuits Corporation * Author: Tanmay Inamdar <[email protected]> * Duc Dang <[email protected]> */ #include <linux/cpu.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/irqchip/chained_irq.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/of_pci.h> #define MSI_IR0 0x000000 #define MSI_INT0 0x800000 #define IDX_PER_GROUP 8 #define IRQS_PER_IDX 16 #define NR_HW_IRQS 16 #define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS) struct xgene_msi_group { struct xgene_msi *msi; int gic_irq; u32 msi_grp; }; struct xgene_msi { struct device_node *node; struct irq_domain *inner_domain; struct irq_domain *msi_domain; u64 msi_addr; void __iomem *msi_regs; unsigned long *bitmap; struct mutex bitmap_lock; struct xgene_msi_group *msi_groups; int num_cpus; }; /* Global data */ static struct xgene_msi xgene_msi_ctrl; static struct irq_chip xgene_msi_top_irq_chip = { .name = "X-Gene1 MSI", .irq_enable = pci_msi_unmask_irq, .irq_disable = pci_msi_mask_irq, .irq_mask = pci_msi_mask_irq, .irq_unmask = pci_msi_unmask_irq, }; static struct msi_domain_info xgene_msi_domain_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_PCI_MSIX), .chip = &xgene_msi_top_irq_chip, }; /* * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where * n is group number (0..F), x is index of registers in each group (0..7) * The register layout is as follows: * MSI0IR0 base_addr * MSI0IR1 base_addr + 0x10000 * ... ... * MSI0IR6 base_addr + 0x60000 * MSI0IR7 base_addr + 0x70000 * MSI1IR0 base_addr + 0x80000 * MSI1IR1 base_addr + 0x90000 * ... ... * MSI1IR7 base_addr + 0xF0000 * MSI2IR0 base_addr + 0x100000 * ... ... * MSIFIR0 base_addr + 0x780000 * MSIFIR1 base_addr + 0x790000 * ... ... * MSIFIR7 base_addr + 0x7F0000 * MSIINT0 base_addr + 0x800000 * MSIINT1 base_addr + 0x810000 * ... ... * MSIINTF base_addr + 0x8F0000 * * Each index register supports 16 MSI vectors (0..15) to generate interrupt. * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination * registers. * * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate * the MSI pending status caused by 1 of its 8 index registers. */ /* MSInIRx read helper */ static u32 xgene_msi_ir_read(struct xgene_msi *msi, u32 msi_grp, u32 msir_idx) { return readl_relaxed(msi->msi_regs + MSI_IR0 + (msi_grp << 19) + (msir_idx << 16)); } /* MSIINTn read helper */ static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp) { return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16)); } /* * With 2048 MSI vectors supported, the MSI message can be constructed using * following scheme: * - Divide into 8 256-vector groups * Group 0: 0-255 * Group 1: 256-511 * Group 2: 512-767 * ... * Group 7: 1792-2047 * - Each 256-vector group is divided into 16 16-vector groups * As an example: 16 16-vector groups for 256-vector group 0-255 is * Group 0: 0-15 * Group 1: 16-32 * ... * Group 15: 240-255 * - The termination address of MSI vector in 256-vector group n and 16-vector * group x is the address of MSIxIRn * - The data for MSI vector in 16-vector group x is x */ static u32 hwirq_to_reg_set(unsigned long hwirq) { return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX)); } static u32 hwirq_to_group(unsigned long hwirq) { return (hwirq % NR_HW_IRQS); } static u32 hwirq_to_msi_data(unsigned long hwirq) { return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX); } static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct xgene_msi *msi = irq_data_get_irq_chip_data(data); u32 reg_set = hwirq_to_reg_set(data->hwirq); u32 group = hwirq_to_group(data->hwirq); u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16); msg->address_hi = upper_32_bits(target_addr); msg->address_lo = lower_32_bits(target_addr); msg->data = hwirq_to_msi_data(data->hwirq); } /* * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain * the expected behaviour of .set_affinity for each MSI interrupt, the 16 * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a * consequence, the total MSI vectors that X-Gene v1 supports will be * reduced to 256 (2048/8) vectors. */ static int hwirq_to_cpu(unsigned long hwirq) { return (hwirq % xgene_msi_ctrl.num_cpus); } static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq) { return (hwirq - hwirq_to_cpu(hwirq)); } static int xgene_msi_set_affinity(struct irq_data *irqdata, const struct cpumask *mask, bool force) { int target_cpu = cpumask_first(mask); int curr_cpu; curr_cpu = hwirq_to_cpu(irqdata->hwirq); if (curr_cpu == target_cpu) return IRQ_SET_MASK_OK_DONE; /* Update MSI number to target the new CPU */ irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu; return IRQ_SET_MASK_OK; } static struct irq_chip xgene_msi_bottom_irq_chip = { .name = "MSI", .irq_set_affinity = xgene_msi_set_affinity, .irq_compose_msi_msg = xgene_compose_msi_msg, }; static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct xgene_msi *msi = domain->host_data; int msi_irq; mutex_lock(&msi->bitmap_lock); msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0, msi->num_cpus, 0); if (msi_irq < NR_MSI_VEC) bitmap_set(msi->bitmap, msi_irq, msi->num_cpus); else msi_irq = -ENOSPC; mutex_unlock(&msi->bitmap_lock); if (msi_irq < 0) return msi_irq; irq_domain_set_info(domain, virq, msi_irq, &xgene_msi_bottom_irq_chip, domain->host_data, handle_simple_irq, NULL, NULL); return 0; } static void xgene_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct xgene_msi *msi = irq_data_get_irq_chip_data(d); u32 hwirq; mutex_lock(&msi->bitmap_lock); hwirq = hwirq_to_canonical_hwirq(d->hwirq); bitmap_clear(msi->bitmap, hwirq, msi->num_cpus); mutex_unlock(&msi->bitmap_lock); irq_domain_free_irqs_parent(domain, virq, nr_irqs); } static const struct irq_domain_ops msi_domain_ops = { .alloc = xgene_irq_domain_alloc, .free = xgene_irq_domain_free, }; static int xgene_allocate_domains(struct xgene_msi *msi) { msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC, &msi_domain_ops, msi); if (!msi->inner_domain) return -ENOMEM; msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node), &xgene_msi_domain_info, msi->inner_domain); if (!msi->msi_domain) { irq_domain_remove(msi->inner_domain); return -ENOMEM; } return 0; } static void xgene_free_domains(struct xgene_msi *msi) { if (msi->msi_domain) irq_domain_remove(msi->msi_domain); if (msi->inner_domain) irq_domain_remove(msi->inner_domain); } static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi) { xgene_msi->bitmap = bitmap_zalloc(NR_MSI_VEC, GFP_KERNEL); if (!xgene_msi->bitmap) return -ENOMEM; mutex_init(&xgene_msi->bitmap_lock); xgene_msi->msi_groups = kcalloc(NR_HW_IRQS, sizeof(struct xgene_msi_group), GFP_KERNEL); if (!xgene_msi->msi_groups) return -ENOMEM; return 0; } static void xgene_msi_isr(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct xgene_msi_group *msi_groups; struct xgene_msi *xgene_msi; int msir_index, msir_val, hw_irq, ret; u32 intr_index, grp_select, msi_grp; chained_irq_enter(chip, desc); msi_groups = irq_desc_get_handler_data(desc); xgene_msi = msi_groups->msi; msi_grp = msi_groups->msi_grp; /* * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt * If bit x of this register is set (x is 0..7), one or more interrupts * corresponding to MSInIRx is set. */ grp_select = xgene_msi_int_read(xgene_msi, msi_grp); while (grp_select) { msir_index = ffs(grp_select) - 1; /* * Calculate MSInIRx address to read to check for interrupts * (refer to termination address and data assignment * described in xgene_compose_msi_msg() ) */ msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index); while (msir_val) { intr_index = ffs(msir_val) - 1; /* * Calculate MSI vector number (refer to the termination * address and data assignment described in * xgene_compose_msi_msg function) */ hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) * NR_HW_IRQS) + msi_grp; /* * As we have multiple hw_irq that maps to single MSI, * always look up the virq using the hw_irq as seen from * CPU0 */ hw_irq = hwirq_to_canonical_hwirq(hw_irq); ret = generic_handle_domain_irq(xgene_msi->inner_domain, hw_irq); WARN_ON_ONCE(ret); msir_val &= ~(1 << intr_index); } grp_select &= ~(1 << msir_index); if (!grp_select) { /* * We handled all interrupts happened in this group, * resample this group MSI_INTx register in case * something else has been made pending in the meantime */ grp_select = xgene_msi_int_read(xgene_msi, msi_grp); } } chained_irq_exit(chip, desc); } static enum cpuhp_state pci_xgene_online; static void xgene_msi_remove(struct platform_device *pdev) { struct xgene_msi *msi = platform_get_drvdata(pdev); if (pci_xgene_online) cpuhp_remove_state(pci_xgene_online); cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD); kfree(msi->msi_groups); bitmap_free(msi->bitmap); msi->bitmap = NULL; xgene_free_domains(msi); } static int xgene_msi_hwirq_alloc(unsigned int cpu) { struct xgene_msi *msi = &xgene_msi_ctrl; struct xgene_msi_group *msi_group; cpumask_var_t mask; int i; int err; for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { msi_group = &msi->msi_groups[i]; if (!msi_group->gic_irq) continue; irq_set_chained_handler_and_data(msi_group->gic_irq, xgene_msi_isr, msi_group); /* * Statically allocate MSI GIC IRQs to each CPU core. * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated * to each core. */ if (alloc_cpumask_var(&mask, GFP_KERNEL)) { cpumask_clear(mask); cpumask_set_cpu(cpu, mask); err = irq_set_affinity(msi_group->gic_irq, mask); if (err) pr_err("failed to set affinity for GIC IRQ"); free_cpumask_var(mask); } else { pr_err("failed to alloc CPU mask for affinity\n"); err = -EINVAL; } if (err) { irq_set_chained_handler_and_data(msi_group->gic_irq, NULL, NULL); return err; } } return 0; } static int xgene_msi_hwirq_free(unsigned int cpu) { struct xgene_msi *msi = &xgene_msi_ctrl; struct xgene_msi_group *msi_group; int i; for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { msi_group = &msi->msi_groups[i]; if (!msi_group->gic_irq) continue; irq_set_chained_handler_and_data(msi_group->gic_irq, NULL, NULL); } return 0; } static const struct of_device_id xgene_msi_match_table[] = { {.compatible = "apm,xgene1-msi"}, {}, }; static int xgene_msi_probe(struct platform_device *pdev) { struct resource *res; int rc, irq_index; struct xgene_msi *xgene_msi; int virt_msir; u32 msi_val, msi_idx; xgene_msi = &xgene_msi_ctrl; platform_set_drvdata(pdev, xgene_msi); xgene_msi->msi_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(xgene_msi->msi_regs)) { rc = PTR_ERR(xgene_msi->msi_regs); goto error; } xgene_msi->msi_addr = res->start; xgene_msi->node = pdev->dev.of_node; xgene_msi->num_cpus = num_possible_cpus(); rc = xgene_msi_init_allocator(xgene_msi); if (rc) { dev_err(&pdev->dev, "Error allocating MSI bitmap\n"); goto error; } rc = xgene_allocate_domains(xgene_msi); if (rc) { dev_err(&pdev->dev, "Failed to allocate MSI domain\n"); goto error; } for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { virt_msir = platform_get_irq(pdev, irq_index); if (virt_msir < 0) { rc = virt_msir; goto error; } xgene_msi->msi_groups[irq_index].gic_irq = virt_msir; xgene_msi->msi_groups[irq_index].msi_grp = irq_index; xgene_msi->msi_groups[irq_index].msi = xgene_msi; } /* * MSInIRx registers are read-to-clear; before registering * interrupt handlers, read all of them to clear spurious * interrupts that may occur before the driver is probed. */ for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++) xgene_msi_ir_read(xgene_msi, irq_index, msi_idx); /* Read MSIINTn to confirm */ msi_val = xgene_msi_int_read(xgene_msi, irq_index); if (msi_val) { dev_err(&pdev->dev, "Failed to clear spurious IRQ\n"); rc = -EINVAL; goto error; } } rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online", xgene_msi_hwirq_alloc, NULL); if (rc < 0) goto err_cpuhp; pci_xgene_online = rc; rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL, xgene_msi_hwirq_free); if (rc) goto err_cpuhp; dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n"); return 0; err_cpuhp: dev_err(&pdev->dev, "failed to add CPU MSI notifier\n"); error: xgene_msi_remove(pdev); return rc; } static struct platform_driver xgene_msi_driver = { .driver = { .name = "xgene-msi", .of_match_table = xgene_msi_match_table, }, .probe = xgene_msi_probe, .remove_new = xgene_msi_remove, }; static int __init xgene_pcie_msi_init(void) { return platform_driver_register(&xgene_msi_driver); } subsys_initcall(xgene_pcie_msi_init);
linux-master
drivers/pci/controller/pci-xgene-msi.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for V3 Semiconductor PCI Local Bus to PCI Bridge * Copyright (C) 2017 Linus Walleij <[email protected]> * * Based on the code from arch/arm/mach-integrator/pci_v3.c * Copyright (C) 1999 ARM Limited * Copyright (C) 2000-2001 Deep Blue Solutions Ltd * * Contributors to the old driver include: * Russell King <[email protected]> * David A. Rusling <[email protected]> (uHAL, ARM Firmware suite) * Rob Herring <[email protected]> * Liviu Dudau <[email protected]> * Grant Likely <[email protected]> * Arnd Bergmann <[email protected]> * Bjorn Helgaas <[email protected]> */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_pci.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/irq.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <linux/clk.h> #include "../pci.h" #define V3_PCI_VENDOR 0x00000000 #define V3_PCI_DEVICE 0x00000002 #define V3_PCI_CMD 0x00000004 #define V3_PCI_STAT 0x00000006 #define V3_PCI_CC_REV 0x00000008 #define V3_PCI_HDR_CFG 0x0000000C #define V3_PCI_IO_BASE 0x00000010 #define V3_PCI_BASE0 0x00000014 #define V3_PCI_BASE1 0x00000018 #define V3_PCI_SUB_VENDOR 0x0000002C #define V3_PCI_SUB_ID 0x0000002E #define V3_PCI_ROM 0x00000030 #define V3_PCI_BPARAM 0x0000003C #define V3_PCI_MAP0 0x00000040 #define V3_PCI_MAP1 0x00000044 #define V3_PCI_INT_STAT 0x00000048 #define V3_PCI_INT_CFG 0x0000004C #define V3_LB_BASE0 0x00000054 #define V3_LB_BASE1 0x00000058 #define V3_LB_MAP0 0x0000005E #define V3_LB_MAP1 0x00000062 #define V3_LB_BASE2 0x00000064 #define V3_LB_MAP2 0x00000066 #define V3_LB_SIZE 0x00000068 #define V3_LB_IO_BASE 0x0000006E #define V3_FIFO_CFG 0x00000070 #define V3_FIFO_PRIORITY 0x00000072 #define V3_FIFO_STAT 0x00000074 #define V3_LB_ISTAT 0x00000076 #define V3_LB_IMASK 0x00000077 #define V3_SYSTEM 0x00000078 #define V3_LB_CFG 0x0000007A #define V3_PCI_CFG 0x0000007C #define V3_DMA_PCI_ADR0 0x00000080 #define V3_DMA_PCI_ADR1 0x00000090 #define V3_DMA_LOCAL_ADR0 0x00000084 #define V3_DMA_LOCAL_ADR1 0x00000094 #define V3_DMA_LENGTH0 0x00000088 #define V3_DMA_LENGTH1 0x00000098 #define V3_DMA_CSR0 0x0000008B #define V3_DMA_CSR1 0x0000009B #define V3_DMA_CTLB_ADR0 0x0000008C #define V3_DMA_CTLB_ADR1 0x0000009C #define V3_DMA_DELAY 0x000000E0 #define V3_MAIL_DATA 0x000000C0 #define V3_PCI_MAIL_IEWR 0x000000D0 #define V3_PCI_MAIL_IERD 0x000000D2 #define V3_LB_MAIL_IEWR 0x000000D4 #define V3_LB_MAIL_IERD 0x000000D6 #define V3_MAIL_WR_STAT 0x000000D8 #define V3_MAIL_RD_STAT 0x000000DA #define V3_QBA_MAP 0x000000DC /* PCI STATUS bits */ #define V3_PCI_STAT_PAR_ERR BIT(15) #define V3_PCI_STAT_SYS_ERR BIT(14) #define V3_PCI_STAT_M_ABORT_ERR BIT(13) #define V3_PCI_STAT_T_ABORT_ERR BIT(12) /* LB ISTAT bits */ #define V3_LB_ISTAT_MAILBOX BIT(7) #define V3_LB_ISTAT_PCI_RD BIT(6) #define V3_LB_ISTAT_PCI_WR BIT(5) #define V3_LB_ISTAT_PCI_INT BIT(4) #define V3_LB_ISTAT_PCI_PERR BIT(3) #define V3_LB_ISTAT_I2O_QWR BIT(2) #define V3_LB_ISTAT_DMA1 BIT(1) #define V3_LB_ISTAT_DMA0 BIT(0) /* PCI COMMAND bits */ #define V3_COMMAND_M_FBB_EN BIT(9) #define V3_COMMAND_M_SERR_EN BIT(8) #define V3_COMMAND_M_PAR_EN BIT(6) #define V3_COMMAND_M_MASTER_EN BIT(2) #define V3_COMMAND_M_MEM_EN BIT(1) #define V3_COMMAND_M_IO_EN BIT(0) /* SYSTEM bits */ #define V3_SYSTEM_M_RST_OUT BIT(15) #define V3_SYSTEM_M_LOCK BIT(14) #define V3_SYSTEM_UNLOCK 0xa05f /* PCI CFG bits */ #define V3_PCI_CFG_M_I2O_EN BIT(15) #define V3_PCI_CFG_M_IO_REG_DIS BIT(14) #define V3_PCI_CFG_M_IO_DIS BIT(13) #define V3_PCI_CFG_M_EN3V BIT(12) #define V3_PCI_CFG_M_RETRY_EN BIT(10) #define V3_PCI_CFG_M_AD_LOW1 BIT(9) #define V3_PCI_CFG_M_AD_LOW0 BIT(8) /* * This is the value applied to C/BE[3:1], with bit 0 always held 0 * during DMA access. */ #define V3_PCI_CFG_M_RTYPE_SHIFT 5 #define V3_PCI_CFG_M_WTYPE_SHIFT 1 #define V3_PCI_CFG_TYPE_DEFAULT 0x3 /* PCI BASE bits (PCI -> Local Bus) */ #define V3_PCI_BASE_M_ADR_BASE 0xFFF00000U #define V3_PCI_BASE_M_ADR_BASEL 0x000FFF00U #define V3_PCI_BASE_M_PREFETCH BIT(3) #define V3_PCI_BASE_M_TYPE (3 << 1) #define V3_PCI_BASE_M_IO BIT(0) /* PCI MAP bits (PCI -> Local bus) */ #define V3_PCI_MAP_M_MAP_ADR 0xFFF00000U #define V3_PCI_MAP_M_RD_POST_INH BIT(15) #define V3_PCI_MAP_M_ROM_SIZE (3 << 10) #define V3_PCI_MAP_M_SWAP (3 << 8) #define V3_PCI_MAP_M_ADR_SIZE 0x000000F0U #define V3_PCI_MAP_M_REG_EN BIT(1) #define V3_PCI_MAP_M_ENABLE BIT(0) /* LB_BASE0,1 bits (Local bus -> PCI) */ #define V3_LB_BASE_ADR_BASE 0xfff00000U #define V3_LB_BASE_SWAP (3 << 8) #define V3_LB_BASE_ADR_SIZE (15 << 4) #define V3_LB_BASE_PREFETCH BIT(3) #define V3_LB_BASE_ENABLE BIT(0) #define V3_LB_BASE_ADR_SIZE_1MB (0 << 4) #define V3_LB_BASE_ADR_SIZE_2MB (1 << 4) #define V3_LB_BASE_ADR_SIZE_4MB (2 << 4) #define V3_LB_BASE_ADR_SIZE_8MB (3 << 4) #define V3_LB_BASE_ADR_SIZE_16MB (4 << 4) #define V3_LB_BASE_ADR_SIZE_32MB (5 << 4) #define V3_LB_BASE_ADR_SIZE_64MB (6 << 4) #define V3_LB_BASE_ADR_SIZE_128MB (7 << 4) #define V3_LB_BASE_ADR_SIZE_256MB (8 << 4) #define V3_LB_BASE_ADR_SIZE_512MB (9 << 4) #define V3_LB_BASE_ADR_SIZE_1GB (10 << 4) #define V3_LB_BASE_ADR_SIZE_2GB (11 << 4) #define v3_addr_to_lb_base(a) ((a) & V3_LB_BASE_ADR_BASE) /* LB_MAP0,1 bits (Local bus -> PCI) */ #define V3_LB_MAP_MAP_ADR 0xfff0U #define V3_LB_MAP_TYPE (7 << 1) #define V3_LB_MAP_AD_LOW_EN BIT(0) #define V3_LB_MAP_TYPE_IACK (0 << 1) #define V3_LB_MAP_TYPE_IO (1 << 1) #define V3_LB_MAP_TYPE_MEM (3 << 1) #define V3_LB_MAP_TYPE_CONFIG (5 << 1) #define V3_LB_MAP_TYPE_MEM_MULTIPLE (6 << 1) #define v3_addr_to_lb_map(a) (((a) >> 16) & V3_LB_MAP_MAP_ADR) /* LB_BASE2 bits (Local bus -> PCI IO) */ #define V3_LB_BASE2_ADR_BASE 0xff00U #define V3_LB_BASE2_SWAP_AUTO (3 << 6) #define V3_LB_BASE2_ENABLE BIT(0) #define v3_addr_to_lb_base2(a) (((a) >> 16) & V3_LB_BASE2_ADR_BASE) /* LB_MAP2 bits (Local bus -> PCI IO) */ #define V3_LB_MAP2_MAP_ADR 0xff00U #define v3_addr_to_lb_map2(a) (((a) >> 16) & V3_LB_MAP2_MAP_ADR) /* FIFO priority bits */ #define V3_FIFO_PRIO_LOCAL BIT(12) #define V3_FIFO_PRIO_LB_RD1_FLUSH_EOB BIT(10) #define V3_FIFO_PRIO_LB_RD1_FLUSH_AP1 BIT(11) #define V3_FIFO_PRIO_LB_RD1_FLUSH_ANY (BIT(10)|BIT(11)) #define V3_FIFO_PRIO_LB_RD0_FLUSH_EOB BIT(8) #define V3_FIFO_PRIO_LB_RD0_FLUSH_AP1 BIT(9) #define V3_FIFO_PRIO_LB_RD0_FLUSH_ANY (BIT(8)|BIT(9)) #define V3_FIFO_PRIO_PCI BIT(4) #define V3_FIFO_PRIO_PCI_RD1_FLUSH_EOB BIT(2) #define V3_FIFO_PRIO_PCI_RD1_FLUSH_AP1 BIT(3) #define V3_FIFO_PRIO_PCI_RD1_FLUSH_ANY (BIT(2)|BIT(3)) #define V3_FIFO_PRIO_PCI_RD0_FLUSH_EOB BIT(0) #define V3_FIFO_PRIO_PCI_RD0_FLUSH_AP1 BIT(1) #define V3_FIFO_PRIO_PCI_RD0_FLUSH_ANY (BIT(0)|BIT(1)) /* Local bus configuration bits */ #define V3_LB_CFG_LB_TO_64_CYCLES 0x0000 #define V3_LB_CFG_LB_TO_256_CYCLES BIT(13) #define V3_LB_CFG_LB_TO_512_CYCLES BIT(14) #define V3_LB_CFG_LB_TO_1024_CYCLES (BIT(13)|BIT(14)) #define V3_LB_CFG_LB_RST BIT(12) #define V3_LB_CFG_LB_PPC_RDY BIT(11) #define V3_LB_CFG_LB_LB_INT BIT(10) #define V3_LB_CFG_LB_ERR_EN BIT(9) #define V3_LB_CFG_LB_RDY_EN BIT(8) #define V3_LB_CFG_LB_BE_IMODE BIT(7) #define V3_LB_CFG_LB_BE_OMODE BIT(6) #define V3_LB_CFG_LB_ENDIAN BIT(5) #define V3_LB_CFG_LB_PARK_EN BIT(4) #define V3_LB_CFG_LB_FBB_DIS BIT(2) /* ARM Integrator-specific extended control registers */ #define INTEGRATOR_SC_PCI_OFFSET 0x18 #define INTEGRATOR_SC_PCI_ENABLE BIT(0) #define INTEGRATOR_SC_PCI_INTCLR BIT(1) #define INTEGRATOR_SC_LBFADDR_OFFSET 0x20 #define INTEGRATOR_SC_LBFCODE_OFFSET 0x24 struct v3_pci { struct device *dev; void __iomem *base; void __iomem *config_base; u32 config_mem; u32 non_pre_mem; u32 pre_mem; phys_addr_t non_pre_bus_addr; phys_addr_t pre_bus_addr; struct regmap *map; }; /* * The V3 PCI interface chip in Integrator provides several windows from * local bus memory into the PCI memory areas. Unfortunately, there * are not really enough windows for our usage, therefore we reuse * one of the windows for access to PCI configuration space. On the * Integrator/AP, the memory map is as follows: * * Local Bus Memory Usage * * 40000000 - 4FFFFFFF PCI memory. 256M non-prefetchable * 50000000 - 5FFFFFFF PCI memory. 256M prefetchable * 60000000 - 60FFFFFF PCI IO. 16M * 61000000 - 61FFFFFF PCI Configuration. 16M * * There are three V3 windows, each described by a pair of V3 registers. * These are LB_BASE0/LB_MAP0, LB_BASE1/LB_MAP1 and LB_BASE2/LB_MAP2. * Base0 and Base1 can be used for any type of PCI memory access. Base2 * can be used either for PCI I/O or for I20 accesses. By default, uHAL * uses this only for PCI IO space. * * Normally these spaces are mapped using the following base registers: * * Usage Local Bus Memory Base/Map registers used * * Mem 40000000 - 4FFFFFFF LB_BASE0/LB_MAP0 * Mem 50000000 - 5FFFFFFF LB_BASE1/LB_MAP1 * IO 60000000 - 60FFFFFF LB_BASE2/LB_MAP2 * Cfg 61000000 - 61FFFFFF * * This means that I20 and PCI configuration space accesses will fail. * When PCI configuration accesses are needed (via the uHAL PCI * configuration space primitives) we must remap the spaces as follows: * * Usage Local Bus Memory Base/Map registers used * * Mem 40000000 - 4FFFFFFF LB_BASE0/LB_MAP0 * Mem 50000000 - 5FFFFFFF LB_BASE0/LB_MAP0 * IO 60000000 - 60FFFFFF LB_BASE2/LB_MAP2 * Cfg 61000000 - 61FFFFFF LB_BASE1/LB_MAP1 * * To make this work, the code depends on overlapping windows working. * The V3 chip translates an address by checking its range within * each of the BASE/MAP pairs in turn (in ascending register number * order). It will use the first matching pair. So, for example, * if the same address is mapped by both LB_BASE0/LB_MAP0 and * LB_BASE1/LB_MAP1, the V3 will use the translation from * LB_BASE0/LB_MAP0. * * To allow PCI Configuration space access, the code enlarges the * window mapped by LB_BASE0/LB_MAP0 from 256M to 512M. This occludes * the windows currently mapped by LB_BASE1/LB_MAP1 so that it can * be remapped for use by configuration cycles. * * At the end of the PCI Configuration space accesses, * LB_BASE1/LB_MAP1 is reset to map PCI Memory. Finally the window * mapped by LB_BASE0/LB_MAP0 is reduced in size from 512M to 256M to * reveal the now restored LB_BASE1/LB_MAP1 window. * * NOTE: We do not set up I2O mapping. I suspect that this is only * for an intelligent (target) device. Using I2O disables most of * the mappings into PCI memory. */ static void __iomem *v3_map_bus(struct pci_bus *bus, unsigned int devfn, int offset) { struct v3_pci *v3 = bus->sysdata; unsigned int address, mapaddress, busnr; busnr = bus->number; if (busnr == 0) { int slot = PCI_SLOT(devfn); /* * local bus segment so need a type 0 config cycle * * build the PCI configuration "address" with one-hot in * A31-A11 * * mapaddress: * 3:1 = config cycle (101) * 0 = PCI A1 & A0 are 0 (0) */ address = PCI_FUNC(devfn) << 8; mapaddress = V3_LB_MAP_TYPE_CONFIG; if (slot > 12) /* * high order bits are handled by the MAP register */ mapaddress |= BIT(slot - 5); else /* * low order bits handled directly in the address */ address |= BIT(slot + 11); } else { /* * not the local bus segment so need a type 1 config cycle * * address: * 23:16 = bus number * 15:11 = slot number (7:3 of devfn) * 10:8 = func number (2:0 of devfn) * * mapaddress: * 3:1 = config cycle (101) * 0 = PCI A1 & A0 from host bus (1) */ mapaddress = V3_LB_MAP_TYPE_CONFIG | V3_LB_MAP_AD_LOW_EN; address = (busnr << 16) | (devfn << 8); } /* * Set up base0 to see all 512Mbytes of memory space (not * prefetchable), this frees up base1 for re-use by * configuration memory */ writel(v3_addr_to_lb_base(v3->non_pre_mem) | V3_LB_BASE_ADR_SIZE_512MB | V3_LB_BASE_ENABLE, v3->base + V3_LB_BASE0); /* * Set up base1/map1 to point into configuration space. * The config mem is always 16MB. */ writel(v3_addr_to_lb_base(v3->config_mem) | V3_LB_BASE_ADR_SIZE_16MB | V3_LB_BASE_ENABLE, v3->base + V3_LB_BASE1); writew(mapaddress, v3->base + V3_LB_MAP1); return v3->config_base + address + offset; } static void v3_unmap_bus(struct v3_pci *v3) { /* * Reassign base1 for use by prefetchable PCI memory */ writel(v3_addr_to_lb_base(v3->pre_mem) | V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_PREFETCH | V3_LB_BASE_ENABLE, v3->base + V3_LB_BASE1); writew(v3_addr_to_lb_map(v3->pre_bus_addr) | V3_LB_MAP_TYPE_MEM, /* was V3_LB_MAP_TYPE_MEM_MULTIPLE */ v3->base + V3_LB_MAP1); /* * And shrink base0 back to a 256M window (NOTE: MAP0 already correct) */ writel(v3_addr_to_lb_base(v3->non_pre_mem) | V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_ENABLE, v3->base + V3_LB_BASE0); } static int v3_pci_read_config(struct pci_bus *bus, unsigned int fn, int config, int size, u32 *value) { struct v3_pci *v3 = bus->sysdata; int ret; dev_dbg(&bus->dev, "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value); ret = pci_generic_config_read(bus, fn, config, size, value); v3_unmap_bus(v3); return ret; } static int v3_pci_write_config(struct pci_bus *bus, unsigned int fn, int config, int size, u32 value) { struct v3_pci *v3 = bus->sysdata; int ret; dev_dbg(&bus->dev, "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", PCI_SLOT(fn), PCI_FUNC(fn), config, size, value); ret = pci_generic_config_write(bus, fn, config, size, value); v3_unmap_bus(v3); return ret; } static struct pci_ops v3_pci_ops = { .map_bus = v3_map_bus, .read = v3_pci_read_config, .write = v3_pci_write_config, }; static irqreturn_t v3_irq(int irq, void *data) { struct v3_pci *v3 = data; struct device *dev = v3->dev; u32 status; status = readw(v3->base + V3_PCI_STAT); if (status & V3_PCI_STAT_PAR_ERR) dev_err(dev, "parity error interrupt\n"); if (status & V3_PCI_STAT_SYS_ERR) dev_err(dev, "system error interrupt\n"); if (status & V3_PCI_STAT_M_ABORT_ERR) dev_err(dev, "master abort error interrupt\n"); if (status & V3_PCI_STAT_T_ABORT_ERR) dev_err(dev, "target abort error interrupt\n"); writew(status, v3->base + V3_PCI_STAT); status = readb(v3->base + V3_LB_ISTAT); if (status & V3_LB_ISTAT_MAILBOX) dev_info(dev, "PCI mailbox interrupt\n"); if (status & V3_LB_ISTAT_PCI_RD) dev_err(dev, "PCI target LB->PCI READ abort interrupt\n"); if (status & V3_LB_ISTAT_PCI_WR) dev_err(dev, "PCI target LB->PCI WRITE abort interrupt\n"); if (status & V3_LB_ISTAT_PCI_INT) dev_info(dev, "PCI pin interrupt\n"); if (status & V3_LB_ISTAT_PCI_PERR) dev_err(dev, "PCI parity error interrupt\n"); if (status & V3_LB_ISTAT_I2O_QWR) dev_info(dev, "I2O inbound post queue interrupt\n"); if (status & V3_LB_ISTAT_DMA1) dev_info(dev, "DMA channel 1 interrupt\n"); if (status & V3_LB_ISTAT_DMA0) dev_info(dev, "DMA channel 0 interrupt\n"); /* Clear all possible interrupts on the local bus */ writeb(0, v3->base + V3_LB_ISTAT); if (v3->map) regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET, INTEGRATOR_SC_PCI_ENABLE | INTEGRATOR_SC_PCI_INTCLR); return IRQ_HANDLED; } static int v3_integrator_init(struct v3_pci *v3) { unsigned int val; v3->map = syscon_regmap_lookup_by_compatible("arm,integrator-ap-syscon"); if (IS_ERR(v3->map)) { dev_err(v3->dev, "no syscon\n"); return -ENODEV; } regmap_read(v3->map, INTEGRATOR_SC_PCI_OFFSET, &val); /* Take the PCI bridge out of reset, clear IRQs */ regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET, INTEGRATOR_SC_PCI_ENABLE | INTEGRATOR_SC_PCI_INTCLR); if (!(val & INTEGRATOR_SC_PCI_ENABLE)) { /* If we were in reset we need to sleep a bit */ msleep(230); /* Set the physical base for the controller itself */ writel(0x6200, v3->base + V3_LB_IO_BASE); /* Wait for the mailbox to settle after reset */ do { writeb(0xaa, v3->base + V3_MAIL_DATA); writeb(0x55, v3->base + V3_MAIL_DATA + 4); } while (readb(v3->base + V3_MAIL_DATA) != 0xaa && readb(v3->base + V3_MAIL_DATA) != 0x55); } dev_info(v3->dev, "initialized PCI V3 Integrator/AP integration\n"); return 0; } static int v3_pci_setup_resource(struct v3_pci *v3, struct pci_host_bridge *host, struct resource_entry *win) { struct device *dev = v3->dev; struct resource *mem; struct resource *io; switch (resource_type(win->res)) { case IORESOURCE_IO: io = win->res; /* Setup window 2 - PCI I/O */ writel(v3_addr_to_lb_base2(pci_pio_to_address(io->start)) | V3_LB_BASE2_ENABLE, v3->base + V3_LB_BASE2); writew(v3_addr_to_lb_map2(io->start - win->offset), v3->base + V3_LB_MAP2); break; case IORESOURCE_MEM: mem = win->res; if (mem->flags & IORESOURCE_PREFETCH) { mem->name = "V3 PCI PRE-MEM"; v3->pre_mem = mem->start; v3->pre_bus_addr = mem->start - win->offset; dev_dbg(dev, "PREFETCHABLE MEM window %pR, bus addr %pap\n", mem, &v3->pre_bus_addr); if (resource_size(mem) != SZ_256M) { dev_err(dev, "prefetchable memory range is not 256MB\n"); return -EINVAL; } if (v3->non_pre_mem && (mem->start != v3->non_pre_mem + SZ_256M)) { dev_err(dev, "prefetchable memory is not adjacent to non-prefetchable memory\n"); return -EINVAL; } /* Setup window 1 - PCI prefetchable memory */ writel(v3_addr_to_lb_base(v3->pre_mem) | V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_PREFETCH | V3_LB_BASE_ENABLE, v3->base + V3_LB_BASE1); writew(v3_addr_to_lb_map(v3->pre_bus_addr) | V3_LB_MAP_TYPE_MEM, /* Was V3_LB_MAP_TYPE_MEM_MULTIPLE */ v3->base + V3_LB_MAP1); } else { mem->name = "V3 PCI NON-PRE-MEM"; v3->non_pre_mem = mem->start; v3->non_pre_bus_addr = mem->start - win->offset; dev_dbg(dev, "NON-PREFETCHABLE MEM window %pR, bus addr %pap\n", mem, &v3->non_pre_bus_addr); if (resource_size(mem) != SZ_256M) { dev_err(dev, "non-prefetchable memory range is not 256MB\n"); return -EINVAL; } /* Setup window 0 - PCI non-prefetchable memory */ writel(v3_addr_to_lb_base(v3->non_pre_mem) | V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_ENABLE, v3->base + V3_LB_BASE0); writew(v3_addr_to_lb_map(v3->non_pre_bus_addr) | V3_LB_MAP_TYPE_MEM, v3->base + V3_LB_MAP0); } break; case IORESOURCE_BUS: break; default: dev_info(dev, "Unknown resource type %lu\n", resource_type(win->res)); break; } return 0; } static int v3_get_dma_range_config(struct v3_pci *v3, struct resource_entry *entry, u32 *pci_base, u32 *pci_map) { struct device *dev = v3->dev; u64 cpu_addr = entry->res->start; u64 cpu_end = entry->res->end; u64 pci_end = cpu_end - entry->offset; u64 pci_addr = entry->res->start - entry->offset; u32 val; if (pci_addr & ~V3_PCI_BASE_M_ADR_BASE) { dev_err(dev, "illegal range, only PCI bits 31..20 allowed\n"); return -EINVAL; } val = ((u32)pci_addr) & V3_PCI_BASE_M_ADR_BASE; *pci_base = val; if (cpu_addr & ~V3_PCI_MAP_M_MAP_ADR) { dev_err(dev, "illegal range, only CPU bits 31..20 allowed\n"); return -EINVAL; } val = ((u32)cpu_addr) & V3_PCI_MAP_M_MAP_ADR; switch (resource_size(entry->res)) { case SZ_1M: val |= V3_LB_BASE_ADR_SIZE_1MB; break; case SZ_2M: val |= V3_LB_BASE_ADR_SIZE_2MB; break; case SZ_4M: val |= V3_LB_BASE_ADR_SIZE_4MB; break; case SZ_8M: val |= V3_LB_BASE_ADR_SIZE_8MB; break; case SZ_16M: val |= V3_LB_BASE_ADR_SIZE_16MB; break; case SZ_32M: val |= V3_LB_BASE_ADR_SIZE_32MB; break; case SZ_64M: val |= V3_LB_BASE_ADR_SIZE_64MB; break; case SZ_128M: val |= V3_LB_BASE_ADR_SIZE_128MB; break; case SZ_256M: val |= V3_LB_BASE_ADR_SIZE_256MB; break; case SZ_512M: val |= V3_LB_BASE_ADR_SIZE_512MB; break; case SZ_1G: val |= V3_LB_BASE_ADR_SIZE_1GB; break; case SZ_2G: val |= V3_LB_BASE_ADR_SIZE_2GB; break; default: dev_err(v3->dev, "illegal dma memory chunk size\n"); return -EINVAL; } val |= V3_PCI_MAP_M_REG_EN | V3_PCI_MAP_M_ENABLE; *pci_map = val; dev_dbg(dev, "DMA MEM CPU: 0x%016llx -> 0x%016llx => " "PCI: 0x%016llx -> 0x%016llx base %08x map %08x\n", cpu_addr, cpu_end, pci_addr, pci_end, *pci_base, *pci_map); return 0; } static int v3_pci_parse_map_dma_ranges(struct v3_pci *v3, struct device_node *np) { struct pci_host_bridge *bridge = pci_host_bridge_from_priv(v3); struct device *dev = v3->dev; struct resource_entry *entry; int i = 0; resource_list_for_each_entry(entry, &bridge->dma_ranges) { int ret; u32 pci_base, pci_map; ret = v3_get_dma_range_config(v3, entry, &pci_base, &pci_map); if (ret) return ret; if (i == 0) { writel(pci_base, v3->base + V3_PCI_BASE0); writel(pci_map, v3->base + V3_PCI_MAP0); } else if (i == 1) { writel(pci_base, v3->base + V3_PCI_BASE1); writel(pci_map, v3->base + V3_PCI_MAP1); } else { dev_err(dev, "too many ranges, only two supported\n"); dev_err(dev, "range %d ignored\n", i); } i++; } return 0; } static int v3_pci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct resource *regs; struct resource_entry *win; struct v3_pci *v3; struct pci_host_bridge *host; struct clk *clk; u16 val; int irq; int ret; host = devm_pci_alloc_host_bridge(dev, sizeof(*v3)); if (!host) return -ENOMEM; host->ops = &v3_pci_ops; v3 = pci_host_bridge_priv(host); host->sysdata = v3; v3->dev = dev; /* Get and enable host clock */ clk = devm_clk_get(dev, NULL); if (IS_ERR(clk)) { dev_err(dev, "clock not found\n"); return PTR_ERR(clk); } ret = clk_prepare_enable(clk); if (ret) { dev_err(dev, "unable to enable clock\n"); return ret; } v3->base = devm_platform_get_and_ioremap_resource(pdev, 0, &regs); if (IS_ERR(v3->base)) return PTR_ERR(v3->base); /* * The hardware has a register with the physical base address * of the V3 controller itself, verify that this is the same * as the physical memory we've remapped it from. */ if (readl(v3->base + V3_LB_IO_BASE) != (regs->start >> 16)) dev_err(dev, "V3_LB_IO_BASE = %08x but device is @%pR\n", readl(v3->base + V3_LB_IO_BASE), regs); /* Configuration space is 16MB directly mapped */ regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (resource_size(regs) != SZ_16M) { dev_err(dev, "config mem is not 16MB!\n"); return -EINVAL; } v3->config_mem = regs->start; v3->config_base = devm_ioremap_resource(dev, regs); if (IS_ERR(v3->config_base)) return PTR_ERR(v3->config_base); /* Get and request error IRQ resource */ irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_irq(dev, irq, v3_irq, 0, "PCIv3 error", v3); if (ret < 0) { dev_err(dev, "unable to request PCIv3 error IRQ %d (%d)\n", irq, ret); return ret; } /* * Unlock V3 registers, but only if they were previously locked. */ if (readw(v3->base + V3_SYSTEM) & V3_SYSTEM_M_LOCK) writew(V3_SYSTEM_UNLOCK, v3->base + V3_SYSTEM); /* Disable all slave access while we set up the windows */ val = readw(v3->base + V3_PCI_CMD); val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); writew(val, v3->base + V3_PCI_CMD); /* Put the PCI bus into reset */ val = readw(v3->base + V3_SYSTEM); val &= ~V3_SYSTEM_M_RST_OUT; writew(val, v3->base + V3_SYSTEM); /* Retry until we're ready */ val = readw(v3->base + V3_PCI_CFG); val |= V3_PCI_CFG_M_RETRY_EN; writew(val, v3->base + V3_PCI_CFG); /* Set up the local bus protocol */ val = readw(v3->base + V3_LB_CFG); val |= V3_LB_CFG_LB_BE_IMODE; /* Byte enable input */ val |= V3_LB_CFG_LB_BE_OMODE; /* Byte enable output */ val &= ~V3_LB_CFG_LB_ENDIAN; /* Little endian */ val &= ~V3_LB_CFG_LB_PPC_RDY; /* TODO: when using on PPC403Gx, set to 1 */ writew(val, v3->base + V3_LB_CFG); /* Enable the PCI bus master */ val = readw(v3->base + V3_PCI_CMD); val |= PCI_COMMAND_MASTER; writew(val, v3->base + V3_PCI_CMD); /* Get the I/O and memory ranges from DT */ resource_list_for_each_entry(win, &host->windows) { ret = v3_pci_setup_resource(v3, host, win); if (ret) { dev_err(dev, "error setting up resources\n"); return ret; } } ret = v3_pci_parse_map_dma_ranges(v3, np); if (ret) return ret; /* * Disable PCI to host IO cycles, enable I/O buffers @3.3V, * set AD_LOW0 to 1 if one of the LB_MAP registers choose * to use this (should be unused). */ writel(0x00000000, v3->base + V3_PCI_IO_BASE); val = V3_PCI_CFG_M_IO_REG_DIS | V3_PCI_CFG_M_IO_DIS | V3_PCI_CFG_M_EN3V | V3_PCI_CFG_M_AD_LOW0; /* * DMA read and write from PCI bus commands types */ val |= V3_PCI_CFG_TYPE_DEFAULT << V3_PCI_CFG_M_RTYPE_SHIFT; val |= V3_PCI_CFG_TYPE_DEFAULT << V3_PCI_CFG_M_WTYPE_SHIFT; writew(val, v3->base + V3_PCI_CFG); /* * Set the V3 FIFO such that writes have higher priority than * reads, and local bus write causes local bus read fifo flush * on aperture 1. Same for PCI. */ writew(V3_FIFO_PRIO_LB_RD1_FLUSH_AP1 | V3_FIFO_PRIO_LB_RD0_FLUSH_AP1 | V3_FIFO_PRIO_PCI_RD1_FLUSH_AP1 | V3_FIFO_PRIO_PCI_RD0_FLUSH_AP1, v3->base + V3_FIFO_PRIORITY); /* * Clear any error interrupts, and enable parity and write error * interrupts */ writeb(0, v3->base + V3_LB_ISTAT); val = readw(v3->base + V3_LB_CFG); val |= V3_LB_CFG_LB_LB_INT; writew(val, v3->base + V3_LB_CFG); writeb(V3_LB_ISTAT_PCI_WR | V3_LB_ISTAT_PCI_PERR, v3->base + V3_LB_IMASK); /* Special Integrator initialization */ if (of_device_is_compatible(np, "arm,integrator-ap-pci")) { ret = v3_integrator_init(v3); if (ret) return ret; } /* Post-init: enable PCI memory and invalidate (master already on) */ val = readw(v3->base + V3_PCI_CMD); val |= PCI_COMMAND_MEMORY | PCI_COMMAND_INVALIDATE; writew(val, v3->base + V3_PCI_CMD); /* Clear pending interrupts */ writeb(0, v3->base + V3_LB_ISTAT); /* Read or write errors and parity errors cause interrupts */ writeb(V3_LB_ISTAT_PCI_RD | V3_LB_ISTAT_PCI_WR | V3_LB_ISTAT_PCI_PERR, v3->base + V3_LB_IMASK); /* Take the PCI bus out of reset so devices can initialize */ val = readw(v3->base + V3_SYSTEM); val |= V3_SYSTEM_M_RST_OUT; writew(val, v3->base + V3_SYSTEM); /* * Re-lock the system register. */ val = readw(v3->base + V3_SYSTEM); val |= V3_SYSTEM_M_LOCK; writew(val, v3->base + V3_SYSTEM); return pci_host_probe(host); } static const struct of_device_id v3_pci_of_match[] = { { .compatible = "v3,v360epc-pci", }, {}, }; static struct platform_driver v3_pci_driver = { .driver = { .name = "pci-v3-semi", .of_match_table = v3_pci_of_match, .suppress_bind_attrs = true, }, .probe = v3_pci_probe, }; builtin_platform_driver(v3_pci_driver);
linux-master
drivers/pci/controller/pci-v3-semi.c
// SPDX-License-Identifier: GPL-2.0+ /* * PCIe host controller driver for NWL PCIe Bridge * Based on pcie-xilinx.c, pci-tegra.c * * (C) Copyright 2014 - 2015, Xilinx, Inc. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/of_platform.h> #include <linux/pci.h> #include <linux/pci-ecam.h> #include <linux/platform_device.h> #include <linux/irqchip/chained_irq.h> #include "../pci.h" /* Bridge core config registers */ #define BRCFG_PCIE_RX0 0x00000000 #define BRCFG_PCIE_RX1 0x00000004 #define BRCFG_INTERRUPT 0x00000010 #define BRCFG_PCIE_RX_MSG_FILTER 0x00000020 /* Egress - Bridge translation registers */ #define E_BREG_CAPABILITIES 0x00000200 #define E_BREG_CONTROL 0x00000208 #define E_BREG_BASE_LO 0x00000210 #define E_BREG_BASE_HI 0x00000214 #define E_ECAM_CAPABILITIES 0x00000220 #define E_ECAM_CONTROL 0x00000228 #define E_ECAM_BASE_LO 0x00000230 #define E_ECAM_BASE_HI 0x00000234 /* Ingress - address translations */ #define I_MSII_CAPABILITIES 0x00000300 #define I_MSII_CONTROL 0x00000308 #define I_MSII_BASE_LO 0x00000310 #define I_MSII_BASE_HI 0x00000314 #define I_ISUB_CONTROL 0x000003E8 #define SET_ISUB_CONTROL BIT(0) /* Rxed msg fifo - Interrupt status registers */ #define MSGF_MISC_STATUS 0x00000400 #define MSGF_MISC_MASK 0x00000404 #define MSGF_LEG_STATUS 0x00000420 #define MSGF_LEG_MASK 0x00000424 #define MSGF_MSI_STATUS_LO 0x00000440 #define MSGF_MSI_STATUS_HI 0x00000444 #define MSGF_MSI_MASK_LO 0x00000448 #define MSGF_MSI_MASK_HI 0x0000044C /* Msg filter mask bits */ #define CFG_ENABLE_PM_MSG_FWD BIT(1) #define CFG_ENABLE_INT_MSG_FWD BIT(2) #define CFG_ENABLE_ERR_MSG_FWD BIT(3) #define CFG_ENABLE_MSG_FILTER_MASK (CFG_ENABLE_PM_MSG_FWD | \ CFG_ENABLE_INT_MSG_FWD | \ CFG_ENABLE_ERR_MSG_FWD) /* Misc interrupt status mask bits */ #define MSGF_MISC_SR_RXMSG_AVAIL BIT(0) #define MSGF_MISC_SR_RXMSG_OVER BIT(1) #define MSGF_MISC_SR_SLAVE_ERR BIT(4) #define MSGF_MISC_SR_MASTER_ERR BIT(5) #define MSGF_MISC_SR_I_ADDR_ERR BIT(6) #define MSGF_MISC_SR_E_ADDR_ERR BIT(7) #define MSGF_MISC_SR_FATAL_AER BIT(16) #define MSGF_MISC_SR_NON_FATAL_AER BIT(17) #define MSGF_MISC_SR_CORR_AER BIT(18) #define MSGF_MISC_SR_UR_DETECT BIT(20) #define MSGF_MISC_SR_NON_FATAL_DEV BIT(22) #define MSGF_MISC_SR_FATAL_DEV BIT(23) #define MSGF_MISC_SR_LINK_DOWN BIT(24) #define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25) #define MSGF_MSIC_SR_LINK_BWIDTH BIT(26) #define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \ MSGF_MISC_SR_RXMSG_OVER | \ MSGF_MISC_SR_SLAVE_ERR | \ MSGF_MISC_SR_MASTER_ERR | \ MSGF_MISC_SR_I_ADDR_ERR | \ MSGF_MISC_SR_E_ADDR_ERR | \ MSGF_MISC_SR_FATAL_AER | \ MSGF_MISC_SR_NON_FATAL_AER | \ MSGF_MISC_SR_CORR_AER | \ MSGF_MISC_SR_UR_DETECT | \ MSGF_MISC_SR_NON_FATAL_DEV | \ MSGF_MISC_SR_FATAL_DEV | \ MSGF_MISC_SR_LINK_DOWN | \ MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \ MSGF_MSIC_SR_LINK_BWIDTH) /* Legacy interrupt status mask bits */ #define MSGF_LEG_SR_INTA BIT(0) #define MSGF_LEG_SR_INTB BIT(1) #define MSGF_LEG_SR_INTC BIT(2) #define MSGF_LEG_SR_INTD BIT(3) #define MSGF_LEG_SR_MASKALL (MSGF_LEG_SR_INTA | MSGF_LEG_SR_INTB | \ MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD) /* MSI interrupt status mask bits */ #define MSGF_MSI_SR_LO_MASK GENMASK(31, 0) #define MSGF_MSI_SR_HI_MASK GENMASK(31, 0) #define MSII_PRESENT BIT(0) #define MSII_ENABLE BIT(0) #define MSII_STATUS_ENABLE BIT(15) /* Bridge config interrupt mask */ #define BRCFG_INTERRUPT_MASK BIT(0) #define BREG_PRESENT BIT(0) #define BREG_ENABLE BIT(0) #define BREG_ENABLE_FORCE BIT(1) /* E_ECAM status mask bits */ #define E_ECAM_PRESENT BIT(0) #define E_ECAM_CR_ENABLE BIT(0) #define E_ECAM_SIZE_LOC GENMASK(20, 16) #define E_ECAM_SIZE_SHIFT 16 #define NWL_ECAM_VALUE_DEFAULT 12 #define CFG_DMA_REG_BAR GENMASK(2, 0) #define CFG_PCIE_CACHE GENMASK(7, 0) #define INT_PCI_MSI_NR (2 * 32) /* Readin the PS_LINKUP */ #define PS_LINKUP_OFFSET 0x00000238 #define PCIE_PHY_LINKUP_BIT BIT(0) #define PHY_RDY_LINKUP_BIT BIT(1) /* Parameters for the waiting for link up routine */ #define LINK_WAIT_MAX_RETRIES 10 #define LINK_WAIT_USLEEP_MIN 90000 #define LINK_WAIT_USLEEP_MAX 100000 struct nwl_msi { /* MSI information */ struct irq_domain *msi_domain; DECLARE_BITMAP(bitmap, INT_PCI_MSI_NR); struct irq_domain *dev_domain; struct mutex lock; /* protect bitmap variable */ int irq_msi0; int irq_msi1; }; struct nwl_pcie { struct device *dev; void __iomem *breg_base; void __iomem *pcireg_base; void __iomem *ecam_base; phys_addr_t phys_breg_base; /* Physical Bridge Register Base */ phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */ phys_addr_t phys_ecam_base; /* Physical Configuration Base */ u32 breg_size; u32 pcie_reg_size; u32 ecam_size; int irq_intx; int irq_misc; u32 ecam_value; u8 last_busno; struct nwl_msi msi; struct irq_domain *legacy_irq_domain; struct clk *clk; raw_spinlock_t leg_mask_lock; }; static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off) { return readl(pcie->breg_base + off); } static inline void nwl_bridge_writel(struct nwl_pcie *pcie, u32 val, u32 off) { writel(val, pcie->breg_base + off); } static bool nwl_pcie_link_up(struct nwl_pcie *pcie) { if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PCIE_PHY_LINKUP_BIT) return true; return false; } static bool nwl_phy_link_up(struct nwl_pcie *pcie) { if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PHY_RDY_LINKUP_BIT) return true; return false; } static int nwl_wait_for_link(struct nwl_pcie *pcie) { struct device *dev = pcie->dev; int retries; /* check if the link is up or not */ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { if (nwl_phy_link_up(pcie)) return 0; usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); } dev_err(dev, "PHY link never came up\n"); return -ETIMEDOUT; } static bool nwl_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) { struct nwl_pcie *pcie = bus->sysdata; /* Check link before accessing downstream ports */ if (!pci_is_root_bus(bus)) { if (!nwl_pcie_link_up(pcie)) return false; } else if (devfn > 0) /* Only one device down on each root port */ return false; return true; } /** * nwl_pcie_map_bus - Get configuration base * * @bus: Bus structure of current bus * @devfn: Device/function * @where: Offset from base * * Return: Base address of the configuration space needed to be * accessed. */ static void __iomem *nwl_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct nwl_pcie *pcie = bus->sysdata; if (!nwl_pcie_valid_device(bus, devfn)) return NULL; return pcie->ecam_base + PCIE_ECAM_OFFSET(bus->number, devfn, where); } /* PCIe operations */ static struct pci_ops nwl_pcie_ops = { .map_bus = nwl_pcie_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, }; static irqreturn_t nwl_pcie_misc_handler(int irq, void *data) { struct nwl_pcie *pcie = data; struct device *dev = pcie->dev; u32 misc_stat; /* Checking for misc interrupts */ misc_stat = nwl_bridge_readl(pcie, MSGF_MISC_STATUS) & MSGF_MISC_SR_MASKALL; if (!misc_stat) return IRQ_NONE; if (misc_stat & MSGF_MISC_SR_RXMSG_OVER) dev_err(dev, "Received Message FIFO Overflow\n"); if (misc_stat & MSGF_MISC_SR_SLAVE_ERR) dev_err(dev, "Slave error\n"); if (misc_stat & MSGF_MISC_SR_MASTER_ERR) dev_err(dev, "Master error\n"); if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR) dev_err(dev, "In Misc Ingress address translation error\n"); if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR) dev_err(dev, "In Misc Egress address translation error\n"); if (misc_stat & MSGF_MISC_SR_FATAL_AER) dev_err(dev, "Fatal Error in AER Capability\n"); if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER) dev_err(dev, "Non-Fatal Error in AER Capability\n"); if (misc_stat & MSGF_MISC_SR_CORR_AER) dev_err(dev, "Correctable Error in AER Capability\n"); if (misc_stat & MSGF_MISC_SR_UR_DETECT) dev_err(dev, "Unsupported request Detected\n"); if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV) dev_err(dev, "Non-Fatal Error Detected\n"); if (misc_stat & MSGF_MISC_SR_FATAL_DEV) dev_err(dev, "Fatal Error Detected\n"); if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH) dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n"); if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH) dev_info(dev, "Link Bandwidth Management Status bit set\n"); /* Clear misc interrupt status */ nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS); return IRQ_HANDLED; } static void nwl_pcie_leg_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct nwl_pcie *pcie; unsigned long status; u32 bit; chained_irq_enter(chip, desc); pcie = irq_desc_get_handler_data(desc); while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) & MSGF_LEG_SR_MASKALL) != 0) { for_each_set_bit(bit, &status, PCI_NUM_INTX) generic_handle_domain_irq(pcie->legacy_irq_domain, bit); } chained_irq_exit(chip, desc); } static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg) { struct nwl_msi *msi = &pcie->msi; unsigned long status; u32 bit; while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) { for_each_set_bit(bit, &status, 32) { nwl_bridge_writel(pcie, 1 << bit, status_reg); generic_handle_domain_irq(msi->dev_domain, bit); } } } static void nwl_pcie_msi_handler_high(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct nwl_pcie *pcie = irq_desc_get_handler_data(desc); chained_irq_enter(chip, desc); nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_HI); chained_irq_exit(chip, desc); } static void nwl_pcie_msi_handler_low(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct nwl_pcie *pcie = irq_desc_get_handler_data(desc); chained_irq_enter(chip, desc); nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_LO); chained_irq_exit(chip, desc); } static void nwl_mask_leg_irq(struct irq_data *data) { struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data); unsigned long flags; u32 mask; u32 val; mask = 1 << (data->hwirq - 1); raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK); raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags); } static void nwl_unmask_leg_irq(struct irq_data *data) { struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data); unsigned long flags; u32 mask; u32 val; mask = 1 << (data->hwirq - 1); raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK); raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags); } static struct irq_chip nwl_leg_irq_chip = { .name = "nwl_pcie:legacy", .irq_enable = nwl_unmask_leg_irq, .irq_disable = nwl_mask_leg_irq, .irq_mask = nwl_mask_leg_irq, .irq_unmask = nwl_unmask_leg_irq, }; static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq); irq_set_chip_data(irq, domain->host_data); irq_set_status_flags(irq, IRQ_LEVEL); return 0; } static const struct irq_domain_ops legacy_domain_ops = { .map = nwl_legacy_map, .xlate = pci_irqd_intx_xlate, }; #ifdef CONFIG_PCI_MSI static struct irq_chip nwl_msi_irq_chip = { .name = "nwl_pcie:msi", .irq_enable = pci_msi_unmask_irq, .irq_disable = pci_msi_mask_irq, .irq_mask = pci_msi_mask_irq, .irq_unmask = pci_msi_unmask_irq, }; static struct msi_domain_info nwl_msi_domain_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI), .chip = &nwl_msi_irq_chip, }; #endif static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data); phys_addr_t msi_addr = pcie->phys_pcie_reg_base; msg->address_lo = lower_32_bits(msi_addr); msg->address_hi = upper_32_bits(msi_addr); msg->data = data->hwirq; } static int nwl_msi_set_affinity(struct irq_data *irq_data, const struct cpumask *mask, bool force) { return -EINVAL; } static struct irq_chip nwl_irq_chip = { .name = "Xilinx MSI", .irq_compose_msi_msg = nwl_compose_msi_msg, .irq_set_affinity = nwl_msi_set_affinity, }; static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct nwl_pcie *pcie = domain->host_data; struct nwl_msi *msi = &pcie->msi; int bit; int i; mutex_lock(&msi->lock); bit = bitmap_find_free_region(msi->bitmap, INT_PCI_MSI_NR, get_count_order(nr_irqs)); if (bit < 0) { mutex_unlock(&msi->lock); return -ENOSPC; } for (i = 0; i < nr_irqs; i++) { irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip, domain->host_data, handle_simple_irq, NULL, NULL); } mutex_unlock(&msi->lock); return 0; } static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *data = irq_domain_get_irq_data(domain, virq); struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data); struct nwl_msi *msi = &pcie->msi; mutex_lock(&msi->lock); bitmap_release_region(msi->bitmap, data->hwirq, get_count_order(nr_irqs)); mutex_unlock(&msi->lock); } static const struct irq_domain_ops dev_msi_domain_ops = { .alloc = nwl_irq_domain_alloc, .free = nwl_irq_domain_free, }; static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie) { #ifdef CONFIG_PCI_MSI struct device *dev = pcie->dev; struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); struct nwl_msi *msi = &pcie->msi; msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR, &dev_msi_domain_ops, pcie); if (!msi->dev_domain) { dev_err(dev, "failed to create dev IRQ domain\n"); return -ENOMEM; } msi->msi_domain = pci_msi_create_irq_domain(fwnode, &nwl_msi_domain_info, msi->dev_domain); if (!msi->msi_domain) { dev_err(dev, "failed to create msi IRQ domain\n"); irq_domain_remove(msi->dev_domain); return -ENOMEM; } #endif return 0; } static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) { struct device *dev = pcie->dev; struct device_node *node = dev->of_node; struct device_node *legacy_intc_node; legacy_intc_node = of_get_next_child(node, NULL); if (!legacy_intc_node) { dev_err(dev, "No legacy intc node found\n"); return -EINVAL; } pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node, PCI_NUM_INTX, &legacy_domain_ops, pcie); of_node_put(legacy_intc_node); if (!pcie->legacy_irq_domain) { dev_err(dev, "failed to create IRQ domain\n"); return -ENOMEM; } raw_spin_lock_init(&pcie->leg_mask_lock); nwl_pcie_init_msi_irq_domain(pcie); return 0; } static int nwl_pcie_enable_msi(struct nwl_pcie *pcie) { struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); struct nwl_msi *msi = &pcie->msi; unsigned long base; int ret; mutex_init(&msi->lock); /* Get msi_1 IRQ number */ msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1"); if (msi->irq_msi1 < 0) return -EINVAL; irq_set_chained_handler_and_data(msi->irq_msi1, nwl_pcie_msi_handler_high, pcie); /* Get msi_0 IRQ number */ msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0"); if (msi->irq_msi0 < 0) return -EINVAL; irq_set_chained_handler_and_data(msi->irq_msi0, nwl_pcie_msi_handler_low, pcie); /* Check for msii_present bit */ ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT; if (!ret) { dev_err(dev, "MSI not present\n"); return -EIO; } /* Enable MSII */ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) | MSII_ENABLE, I_MSII_CONTROL); /* Enable MSII status */ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) | MSII_STATUS_ENABLE, I_MSII_CONTROL); /* setup AFI/FPCI range */ base = pcie->phys_pcie_reg_base; nwl_bridge_writel(pcie, lower_32_bits(base), I_MSII_BASE_LO); nwl_bridge_writel(pcie, upper_32_bits(base), I_MSII_BASE_HI); /* * For high range MSI interrupts: disable, clear any pending, * and enable */ nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_HI); nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_HI) & MSGF_MSI_SR_HI_MASK, MSGF_MSI_STATUS_HI); nwl_bridge_writel(pcie, MSGF_MSI_SR_HI_MASK, MSGF_MSI_MASK_HI); /* * For low range MSI interrupts: disable, clear any pending, * and enable */ nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_LO); nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_LO) & MSGF_MSI_SR_LO_MASK, MSGF_MSI_STATUS_LO); nwl_bridge_writel(pcie, MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO); return 0; } static int nwl_pcie_bridge_init(struct nwl_pcie *pcie) { struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); u32 breg_val, ecam_val, first_busno = 0; int err; breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT; if (!breg_val) { dev_err(dev, "BREG is not present\n"); return breg_val; } /* Write bridge_off to breg base */ nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_breg_base), E_BREG_BASE_LO); nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_breg_base), E_BREG_BASE_HI); /* Enable BREG */ nwl_bridge_writel(pcie, ~BREG_ENABLE_FORCE & BREG_ENABLE, E_BREG_CONTROL); /* Disable DMA channel registers */ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX0) | CFG_DMA_REG_BAR, BRCFG_PCIE_RX0); /* Enable Ingress subtractive decode translation */ nwl_bridge_writel(pcie, SET_ISUB_CONTROL, I_ISUB_CONTROL); /* Enable msg filtering details */ nwl_bridge_writel(pcie, CFG_ENABLE_MSG_FILTER_MASK, BRCFG_PCIE_RX_MSG_FILTER); /* This routes the PCIe DMA traffic to go through CCI path */ if (of_dma_is_coherent(dev->of_node)) nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX1) | CFG_PCIE_CACHE, BRCFG_PCIE_RX1); err = nwl_wait_for_link(pcie); if (err) return err; ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT; if (!ecam_val) { dev_err(dev, "ECAM is not present\n"); return ecam_val; } /* Enable ECAM */ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) | E_ECAM_CR_ENABLE, E_ECAM_CONTROL); nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) | (pcie->ecam_value << E_ECAM_SIZE_SHIFT), E_ECAM_CONTROL); nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base), E_ECAM_BASE_LO); nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_ecam_base), E_ECAM_BASE_HI); /* Get bus range */ ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL); pcie->last_busno = (ecam_val & E_ECAM_SIZE_LOC) >> E_ECAM_SIZE_SHIFT; /* Write primary, secondary and subordinate bus numbers */ ecam_val = first_busno; ecam_val |= (first_busno + 1) << 8; ecam_val |= (pcie->last_busno << E_ECAM_SIZE_SHIFT); writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS)); if (nwl_pcie_link_up(pcie)) dev_info(dev, "Link is UP\n"); else dev_info(dev, "Link is DOWN\n"); /* Get misc IRQ number */ pcie->irq_misc = platform_get_irq_byname(pdev, "misc"); if (pcie->irq_misc < 0) return -EINVAL; err = devm_request_irq(dev, pcie->irq_misc, nwl_pcie_misc_handler, IRQF_SHARED, "nwl_pcie:misc", pcie); if (err) { dev_err(dev, "fail to register misc IRQ#%d\n", pcie->irq_misc); return err; } /* Disable all misc interrupts */ nwl_bridge_writel(pcie, (u32)~MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK); /* Clear pending misc interrupts */ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MISC_STATUS) & MSGF_MISC_SR_MASKALL, MSGF_MISC_STATUS); /* Enable all misc interrupts */ nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK); /* Disable all legacy interrupts */ nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK); /* Clear pending legacy interrupts */ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) & MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS); /* Enable all legacy interrupts */ nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK); /* Enable the bridge config interrupt */ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_INTERRUPT) | BRCFG_INTERRUPT_MASK, BRCFG_INTERRUPT); return 0; } static int nwl_pcie_parse_dt(struct nwl_pcie *pcie, struct platform_device *pdev) { struct device *dev = pcie->dev; struct resource *res; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg"); pcie->breg_base = devm_ioremap_resource(dev, res); if (IS_ERR(pcie->breg_base)) return PTR_ERR(pcie->breg_base); pcie->phys_breg_base = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg"); pcie->pcireg_base = devm_ioremap_resource(dev, res); if (IS_ERR(pcie->pcireg_base)) return PTR_ERR(pcie->pcireg_base); pcie->phys_pcie_reg_base = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); pcie->ecam_base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(pcie->ecam_base)) return PTR_ERR(pcie->ecam_base); pcie->phys_ecam_base = res->start; /* Get intx IRQ number */ pcie->irq_intx = platform_get_irq_byname(pdev, "intx"); if (pcie->irq_intx < 0) return pcie->irq_intx; irq_set_chained_handler_and_data(pcie->irq_intx, nwl_pcie_leg_handler, pcie); return 0; } static const struct of_device_id nwl_pcie_of_match[] = { { .compatible = "xlnx,nwl-pcie-2.11", }, {} }; static int nwl_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct nwl_pcie *pcie; struct pci_host_bridge *bridge; int err; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); if (!bridge) return -ENODEV; pcie = pci_host_bridge_priv(bridge); pcie->dev = dev; pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT; err = nwl_pcie_parse_dt(pcie, pdev); if (err) { dev_err(dev, "Parsing DT failed\n"); return err; } pcie->clk = devm_clk_get(dev, NULL); if (IS_ERR(pcie->clk)) return PTR_ERR(pcie->clk); err = clk_prepare_enable(pcie->clk); if (err) { dev_err(dev, "can't enable PCIe ref clock\n"); return err; } err = nwl_pcie_bridge_init(pcie); if (err) { dev_err(dev, "HW Initialization failed\n"); return err; } err = nwl_pcie_init_irq_domain(pcie); if (err) { dev_err(dev, "Failed creating IRQ Domain\n"); return err; } bridge->sysdata = pcie; bridge->ops = &nwl_pcie_ops; if (IS_ENABLED(CONFIG_PCI_MSI)) { err = nwl_pcie_enable_msi(pcie); if (err < 0) { dev_err(dev, "failed to enable MSI support: %d\n", err); return err; } } return pci_host_probe(bridge); } static struct platform_driver nwl_pcie_driver = { .driver = { .name = "nwl-pcie", .suppress_bind_attrs = true, .of_match_table = nwl_pcie_of_match, }, .probe = nwl_pcie_probe, }; builtin_platform_driver(nwl_pcie_driver);
linux-master
drivers/pci/controller/pcie-xilinx-nwl.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) Microsoft Corporation. * * Author: * Haiyang Zhang <[email protected]> * * This small module is a helper driver allows other drivers to * have a common interface with the Hyper-V PCI frontend driver. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/hyperv.h> struct hyperv_pci_block_ops hvpci_block_ops; EXPORT_SYMBOL_GPL(hvpci_block_ops); int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len, unsigned int block_id, unsigned int *bytes_returned) { if (!hvpci_block_ops.read_block) return -EOPNOTSUPP; return hvpci_block_ops.read_block(dev, buf, buf_len, block_id, bytes_returned); } EXPORT_SYMBOL_GPL(hyperv_read_cfg_blk); int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len, unsigned int block_id) { if (!hvpci_block_ops.write_block) return -EOPNOTSUPP; return hvpci_block_ops.write_block(dev, buf, len, block_id); } EXPORT_SYMBOL_GPL(hyperv_write_cfg_blk); int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context, void (*block_invalidate)(void *context, u64 block_mask)) { if (!hvpci_block_ops.reg_blk_invalidate) return -EOPNOTSUPP; return hvpci_block_ops.reg_blk_invalidate(dev, context, block_invalidate); } EXPORT_SYMBOL_GPL(hyperv_reg_block_invalidate); static void __exit exit_hv_pci_intf(void) { } static int __init init_hv_pci_intf(void) { return 0; } module_init(init_hv_pci_intf); module_exit(exit_hv_pci_intf); MODULE_DESCRIPTION("Hyper-V PCI Interface"); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/pci-hyperv-intf.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe host controller driver for Mobiveil PCIe Host controller * * Copyright (c) 2018 Mobiveil Inc. * Copyright 2019 NXP * * Author: Subrahmanya Lingappa <[email protected]> * Hou Zhiqiang <[email protected]> */ #include <linux/delay.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/platform_device.h> #include "pcie-mobiveil.h" /* * mobiveil_pcie_sel_page - routine to access paged register * * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged, * for this scheme to work extracted higher 6 bits of the offset will be * written to pg_sel field of PAB_CTRL register and rest of the lower 10 * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register. */ static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx) { u32 val; val = readl(pcie->csr_axi_slave_base + PAB_CTRL); val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT); val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT; writel(val, pcie->csr_axi_slave_base + PAB_CTRL); } static void __iomem *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, u32 off) { if (off < PAGED_ADDR_BNDRY) { /* For directly accessed registers, clear the pg_sel field */ mobiveil_pcie_sel_page(pcie, 0); return pcie->csr_axi_slave_base + off; } mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off)); return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off); } static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val) { if ((uintptr_t)addr & (size - 1)) { *val = 0; return PCIBIOS_BAD_REGISTER_NUMBER; } switch (size) { case 4: *val = readl(addr); break; case 2: *val = readw(addr); break; case 1: *val = readb(addr); break; default: *val = 0; return PCIBIOS_BAD_REGISTER_NUMBER; } return PCIBIOS_SUCCESSFUL; } static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val) { if ((uintptr_t)addr & (size - 1)) return PCIBIOS_BAD_REGISTER_NUMBER; switch (size) { case 4: writel(val, addr); break; case 2: writew(val, addr); break; case 1: writeb(val, addr); break; default: return PCIBIOS_BAD_REGISTER_NUMBER; } return PCIBIOS_SUCCESSFUL; } u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size) { void __iomem *addr; u32 val; int ret; addr = mobiveil_pcie_comp_addr(pcie, off); ret = mobiveil_pcie_read(addr, size, &val); if (ret) dev_err(&pcie->pdev->dev, "read CSR address failed\n"); return val; } void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size) { void __iomem *addr; int ret; addr = mobiveil_pcie_comp_addr(pcie, off); ret = mobiveil_pcie_write(addr, size, val); if (ret) dev_err(&pcie->pdev->dev, "write CSR address failed\n"); } bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie) { if (pcie->ops->link_up) return pcie->ops->link_up(pcie); return (mobiveil_csr_readl(pcie, LTSSM_STATUS) & LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0; } void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr, u64 pci_addr, u32 type, u64 size) { u32 value; u64 size64 = ~(size - 1); if (win_num >= pcie->ppio_wins) { dev_err(&pcie->pdev->dev, "ERROR: max inbound windows reached !\n"); return; } value = mobiveil_csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num)); value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK); value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT | (lower_32_bits(size64) & WIN_SIZE_MASK); mobiveil_csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num)); mobiveil_csr_writel(pcie, upper_32_bits(size64), PAB_EXT_PEX_AMAP_SIZEN(win_num)); mobiveil_csr_writel(pcie, lower_32_bits(cpu_addr), PAB_PEX_AMAP_AXI_WIN(win_num)); mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr), PAB_EXT_PEX_AMAP_AXI_WIN(win_num)); mobiveil_csr_writel(pcie, lower_32_bits(pci_addr), PAB_PEX_AMAP_PEX_WIN_L(win_num)); mobiveil_csr_writel(pcie, upper_32_bits(pci_addr), PAB_PEX_AMAP_PEX_WIN_H(win_num)); pcie->ib_wins_configured++; } /* * routine to program the outbound windows */ void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr, u64 pci_addr, u32 type, u64 size) { u32 value; u64 size64 = ~(size - 1); if (win_num >= pcie->apio_wins) { dev_err(&pcie->pdev->dev, "ERROR: max outbound windows reached !\n"); return; } /* * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit * to 4 KB in PAB_AXI_AMAP_CTRL register */ value = mobiveil_csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num)); value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK); value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT | (lower_32_bits(size64) & WIN_SIZE_MASK); mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num)); mobiveil_csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num)); /* * program AXI window base with appropriate value in * PAB_AXI_AMAP_AXI_WIN0 register */ mobiveil_csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK), PAB_AXI_AMAP_AXI_WIN(win_num)); mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr), PAB_EXT_AXI_AMAP_AXI_WIN(win_num)); mobiveil_csr_writel(pcie, lower_32_bits(pci_addr), PAB_AXI_AMAP_PEX_WIN_L(win_num)); mobiveil_csr_writel(pcie, upper_32_bits(pci_addr), PAB_AXI_AMAP_PEX_WIN_H(win_num)); pcie->ob_wins_configured++; } int mobiveil_bringup_link(struct mobiveil_pcie *pcie) { int retries; /* check if the link is up or not */ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { if (mobiveil_pcie_link_up(pcie)) return 0; usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX); } dev_err(&pcie->pdev->dev, "link never came up\n"); return -ETIMEDOUT; }
linux-master
drivers/pci/controller/mobiveil/pcie-mobiveil.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe host controller driver for Mobiveil PCIe Host controller * * Copyright (c) 2018 Mobiveil Inc. * Copyright 2019 NXP * * Author: Subrahmanya Lingappa <[email protected]> * Hou Zhiqiang <[email protected]> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_pci.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/slab.h> #include "pcie-mobiveil.h" static int mobiveil_pcie_probe(struct platform_device *pdev) { struct mobiveil_pcie *pcie; struct pci_host_bridge *bridge; struct device *dev = &pdev->dev; /* allocate the PCIe port */ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); if (!bridge) return -ENOMEM; pcie = pci_host_bridge_priv(bridge); pcie->rp.bridge = bridge; pcie->pdev = pdev; return mobiveil_pcie_host_probe(pcie); } static const struct of_device_id mobiveil_pcie_of_match[] = { {.compatible = "mbvl,gpex40-pcie",}, {}, }; MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match); static struct platform_driver mobiveil_pcie_driver = { .probe = mobiveil_pcie_probe, .driver = { .name = "mobiveil-pcie", .of_match_table = mobiveil_pcie_of_match, .suppress_bind_attrs = true, }, }; builtin_platform_driver(mobiveil_pcie_driver); MODULE_DESCRIPTION("Mobiveil PCIe host controller driver"); MODULE_AUTHOR("Subrahmanya Lingappa <[email protected]>");
linux-master
drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe host controller driver for Mobiveil PCIe Host controller * * Copyright (c) 2018 Mobiveil Inc. * Copyright 2019-2020 NXP * * Author: Subrahmanya Lingappa <[email protected]> * Hou Zhiqiang <[email protected]> */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/of_pci.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/slab.h> #include "pcie-mobiveil.h" static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) { /* Only one device down on each root port */ if (pci_is_root_bus(bus) && (devfn > 0)) return false; /* * Do not read more than one device on the bus directly * attached to RC */ if ((bus->primary == to_pci_host_bridge(bus->bridge)->busnr) && (PCI_SLOT(devfn) > 0)) return false; return true; } /* * mobiveil_pcie_map_bus - routine to get the configuration base of either * root port or endpoint */ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct mobiveil_pcie *pcie = bus->sysdata; struct mobiveil_root_port *rp = &pcie->rp; u32 value; if (!mobiveil_pcie_valid_device(bus, devfn)) return NULL; /* RC config access */ if (pci_is_root_bus(bus)) return pcie->csr_axi_slave_base + where; /* * EP config access (in Config/APIO space) * Program PEX Address base (31..16 bits) with appropriate value * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register. * Relies on pci_lock serialization */ value = bus->number << PAB_BUS_SHIFT | PCI_SLOT(devfn) << PAB_DEVICE_SHIFT | PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT; mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0)); return rp->config_axi_slave_base + where; } static struct pci_ops mobiveil_pcie_ops = { .map_bus = mobiveil_pcie_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, }; static void mobiveil_pcie_isr(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc); struct device *dev = &pcie->pdev->dev; struct mobiveil_root_port *rp = &pcie->rp; struct mobiveil_msi *msi = &rp->msi; u32 msi_data, msi_addr_lo, msi_addr_hi; u32 intr_status, msi_status; unsigned long shifted_status; u32 bit, val, mask; /* * The core provides a single interrupt for both INTx/MSI messages. * So we'll read both INTx and MSI status */ chained_irq_enter(chip, desc); /* read INTx status */ val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT); mask = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); intr_status = val & mask; /* Handle INTx */ if (intr_status & PAB_INTP_INTX_MASK) { shifted_status = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT); shifted_status &= PAB_INTP_INTX_MASK; shifted_status >>= PAB_INTX_START; do { for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) { int ret; ret = generic_handle_domain_irq(rp->intx_domain, bit + 1); if (ret) dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n", bit); /* clear interrupt handled */ mobiveil_csr_writel(pcie, 1 << (PAB_INTX_START + bit), PAB_INTP_AMBA_MISC_STAT); } shifted_status = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT); shifted_status &= PAB_INTP_INTX_MASK; shifted_status >>= PAB_INTX_START; } while (shifted_status != 0); } /* read extra MSI status register */ msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET); /* handle MSI interrupts */ while (msi_status & 1) { msi_data = readl_relaxed(pcie->apb_csr_base + MSI_DATA_OFFSET); /* * MSI_STATUS_OFFSET register gets updated to zero * once we pop not only the MSI data but also address * from MSI hardware FIFO. So keeping these following * two dummy reads. */ msi_addr_lo = readl_relaxed(pcie->apb_csr_base + MSI_ADDR_L_OFFSET); msi_addr_hi = readl_relaxed(pcie->apb_csr_base + MSI_ADDR_H_OFFSET); dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n", msi_data, msi_addr_hi, msi_addr_lo); generic_handle_domain_irq(msi->dev_domain, msi_data); msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET); } /* Clear the interrupt status */ mobiveil_csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT); chained_irq_exit(chip, desc); } static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie) { struct device *dev = &pcie->pdev->dev; struct platform_device *pdev = pcie->pdev; struct device_node *node = dev->of_node; struct mobiveil_root_port *rp = &pcie->rp; struct resource *res; /* map config resource */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config_axi_slave"); rp->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(rp->config_axi_slave_base)) return PTR_ERR(rp->config_axi_slave_base); rp->ob_io_res = res; /* map csr resource */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr_axi_slave"); pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(pcie->csr_axi_slave_base)) return PTR_ERR(pcie->csr_axi_slave_base); pcie->pcie_reg_base = res->start; /* read the number of windows requested */ if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins)) pcie->apio_wins = MAX_PIO_WINDOWS; if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins)) pcie->ppio_wins = MAX_PIO_WINDOWS; return 0; } static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie) { phys_addr_t msg_addr = pcie->pcie_reg_base; struct mobiveil_msi *msi = &pcie->rp.msi; msi->num_of_vectors = PCI_NUM_MSI; msi->msi_pages_phys = (phys_addr_t)msg_addr; writel_relaxed(lower_32_bits(msg_addr), pcie->apb_csr_base + MSI_BASE_LO_OFFSET); writel_relaxed(upper_32_bits(msg_addr), pcie->apb_csr_base + MSI_BASE_HI_OFFSET); writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET); writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET); } int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit) { struct mobiveil_root_port *rp = &pcie->rp; struct pci_host_bridge *bridge = rp->bridge; u32 value, pab_ctrl, type; struct resource_entry *win; pcie->ib_wins_configured = 0; pcie->ob_wins_configured = 0; if (!reinit) { /* setup bus numbers */ value = mobiveil_csr_readl(pcie, PCI_PRIMARY_BUS); value &= 0xff000000; value |= 0x00ff0100; mobiveil_csr_writel(pcie, value, PCI_PRIMARY_BUS); } /* * program Bus Master Enable Bit in Command Register in PAB Config * Space */ value = mobiveil_csr_readl(pcie, PCI_COMMAND); value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; mobiveil_csr_writel(pcie, value, PCI_COMMAND); /* * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL * register */ pab_ctrl = mobiveil_csr_readl(pcie, PAB_CTRL); pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT); mobiveil_csr_writel(pcie, pab_ctrl, PAB_CTRL); /* * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in * PAB_AXI_PIO_CTRL Register */ value = mobiveil_csr_readl(pcie, PAB_AXI_PIO_CTRL); value |= APIO_EN_MASK; mobiveil_csr_writel(pcie, value, PAB_AXI_PIO_CTRL); /* Enable PCIe PIO master */ value = mobiveil_csr_readl(pcie, PAB_PEX_PIO_CTRL); value |= 1 << PIO_ENABLE_SHIFT; mobiveil_csr_writel(pcie, value, PAB_PEX_PIO_CTRL); /* * we'll program one outbound window for config reads and * another default inbound window for all the upstream traffic * rest of the outbound windows will be configured according to * the "ranges" field defined in device tree */ /* config outbound translation window */ program_ob_windows(pcie, WIN_NUM_0, rp->ob_io_res->start, 0, CFG_WINDOW_TYPE, resource_size(rp->ob_io_res)); /* memory inbound translation window */ program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE); /* Get the I/O and memory ranges from DT */ resource_list_for_each_entry(win, &bridge->windows) { if (resource_type(win->res) == IORESOURCE_MEM) type = MEM_WINDOW_TYPE; else if (resource_type(win->res) == IORESOURCE_IO) type = IO_WINDOW_TYPE; else continue; /* configure outbound translation window */ program_ob_windows(pcie, pcie->ob_wins_configured, win->res->start, win->res->start - win->offset, type, resource_size(win->res)); } /* fixup for PCIe class register */ value = mobiveil_csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS); value &= 0xff; value |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS); return 0; } static void mobiveil_mask_intx_irq(struct irq_data *data) { struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data); struct mobiveil_root_port *rp; unsigned long flags; u32 mask, shifted_val; rp = &pcie->rp; mask = 1 << ((data->hwirq + PAB_INTX_START) - 1); raw_spin_lock_irqsave(&rp->intx_mask_lock, flags); shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); shifted_val &= ~mask; mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB); raw_spin_unlock_irqrestore(&rp->intx_mask_lock, flags); } static void mobiveil_unmask_intx_irq(struct irq_data *data) { struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data); struct mobiveil_root_port *rp; unsigned long flags; u32 shifted_val, mask; rp = &pcie->rp; mask = 1 << ((data->hwirq + PAB_INTX_START) - 1); raw_spin_lock_irqsave(&rp->intx_mask_lock, flags); shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); shifted_val |= mask; mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB); raw_spin_unlock_irqrestore(&rp->intx_mask_lock, flags); } static struct irq_chip intx_irq_chip = { .name = "mobiveil_pcie:intx", .irq_enable = mobiveil_unmask_intx_irq, .irq_disable = mobiveil_mask_intx_irq, .irq_mask = mobiveil_mask_intx_irq, .irq_unmask = mobiveil_unmask_intx_irq, }; /* routine to setup the INTx related data */ static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq); irq_set_chip_data(irq, domain->host_data); return 0; } /* INTx domain operations structure */ static const struct irq_domain_ops intx_domain_ops = { .map = mobiveil_pcie_intx_map, }; static struct irq_chip mobiveil_msi_irq_chip = { .name = "Mobiveil PCIe MSI", .irq_mask = pci_msi_mask_irq, .irq_unmask = pci_msi_unmask_irq, }; static struct msi_domain_info mobiveil_msi_domain_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_PCI_MSIX), .chip = &mobiveil_msi_irq_chip, }; static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data); phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int)); msg->address_lo = lower_32_bits(addr); msg->address_hi = upper_32_bits(addr); msg->data = data->hwirq; dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n", (int)data->hwirq, msg->address_hi, msg->address_lo); } static int mobiveil_msi_set_affinity(struct irq_data *irq_data, const struct cpumask *mask, bool force) { return -EINVAL; } static struct irq_chip mobiveil_msi_bottom_irq_chip = { .name = "Mobiveil MSI", .irq_compose_msi_msg = mobiveil_compose_msi_msg, .irq_set_affinity = mobiveil_msi_set_affinity, }; static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct mobiveil_pcie *pcie = domain->host_data; struct mobiveil_msi *msi = &pcie->rp.msi; unsigned long bit; WARN_ON(nr_irqs != 1); mutex_lock(&msi->lock); bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors); if (bit >= msi->num_of_vectors) { mutex_unlock(&msi->lock); return -ENOSPC; } set_bit(bit, msi->msi_irq_in_use); mutex_unlock(&msi->lock); irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip, domain->host_data, handle_level_irq, NULL, NULL); return 0; } static void mobiveil_irq_msi_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d); struct mobiveil_msi *msi = &pcie->rp.msi; mutex_lock(&msi->lock); if (!test_bit(d->hwirq, msi->msi_irq_in_use)) dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n", d->hwirq); else __clear_bit(d->hwirq, msi->msi_irq_in_use); mutex_unlock(&msi->lock); } static const struct irq_domain_ops msi_domain_ops = { .alloc = mobiveil_irq_msi_domain_alloc, .free = mobiveil_irq_msi_domain_free, }; static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie) { struct device *dev = &pcie->pdev->dev; struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); struct mobiveil_msi *msi = &pcie->rp.msi; mutex_init(&msi->lock); msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors, &msi_domain_ops, pcie); if (!msi->dev_domain) { dev_err(dev, "failed to create IRQ domain\n"); return -ENOMEM; } msi->msi_domain = pci_msi_create_irq_domain(fwnode, &mobiveil_msi_domain_info, msi->dev_domain); if (!msi->msi_domain) { dev_err(dev, "failed to create MSI domain\n"); irq_domain_remove(msi->dev_domain); return -ENOMEM; } return 0; } static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie) { struct device *dev = &pcie->pdev->dev; struct device_node *node = dev->of_node; struct mobiveil_root_port *rp = &pcie->rp; /* setup INTx */ rp->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX, &intx_domain_ops, pcie); if (!rp->intx_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); return -ENOMEM; } raw_spin_lock_init(&rp->intx_mask_lock); /* setup MSI */ return mobiveil_allocate_msi_domains(pcie); } static int mobiveil_pcie_integrated_interrupt_init(struct mobiveil_pcie *pcie) { struct platform_device *pdev = pcie->pdev; struct device *dev = &pdev->dev; struct mobiveil_root_port *rp = &pcie->rp; struct resource *res; int ret; /* map MSI config resource */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr"); pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(pcie->apb_csr_base)) return PTR_ERR(pcie->apb_csr_base); /* setup MSI hardware registers */ mobiveil_pcie_enable_msi(pcie); rp->irq = platform_get_irq(pdev, 0); if (rp->irq < 0) return rp->irq; /* initialize the IRQ domains */ ret = mobiveil_pcie_init_irq_domain(pcie); if (ret) { dev_err(dev, "Failed creating IRQ Domain\n"); return ret; } irq_set_chained_handler_and_data(rp->irq, mobiveil_pcie_isr, pcie); /* Enable interrupts */ mobiveil_csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK), PAB_INTP_AMBA_MISC_ENB); return 0; } static int mobiveil_pcie_interrupt_init(struct mobiveil_pcie *pcie) { struct mobiveil_root_port *rp = &pcie->rp; if (rp->ops->interrupt_init) return rp->ops->interrupt_init(pcie); return mobiveil_pcie_integrated_interrupt_init(pcie); } static bool mobiveil_pcie_is_bridge(struct mobiveil_pcie *pcie) { u32 header_type; header_type = mobiveil_csr_readb(pcie, PCI_HEADER_TYPE); header_type &= 0x7f; return header_type == PCI_HEADER_TYPE_BRIDGE; } int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie) { struct mobiveil_root_port *rp = &pcie->rp; struct pci_host_bridge *bridge = rp->bridge; struct device *dev = &pcie->pdev->dev; int ret; ret = mobiveil_pcie_parse_dt(pcie); if (ret) { dev_err(dev, "Parsing DT failed, ret: %x\n", ret); return ret; } if (!mobiveil_pcie_is_bridge(pcie)) return -ENODEV; /* * configure all inbound and outbound windows and prepare the RC for * config access */ ret = mobiveil_host_init(pcie, false); if (ret) { dev_err(dev, "Failed to initialize host\n"); return ret; } ret = mobiveil_pcie_interrupt_init(pcie); if (ret) { dev_err(dev, "Interrupt init failed\n"); return ret; } /* Initialize bridge */ bridge->sysdata = pcie; bridge->ops = &mobiveil_pcie_ops; ret = mobiveil_bringup_link(pcie); if (ret) { dev_info(dev, "link bring-up failed\n"); return ret; } return pci_host_probe(bridge); }
linux-master
drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe Gen4 host controller driver for NXP Layerscape SoCs * * Copyright 2019-2020 NXP * * Author: Zhiqiang Hou <[email protected]> */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/of_pci.h> #include <linux/of_platform.h> #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/resource.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include "pcie-mobiveil.h" /* LUT and PF control registers */ #define PCIE_LUT_OFF 0x80000 #define PCIE_PF_OFF 0xc0000 #define PCIE_PF_INT_STAT 0x18 #define PF_INT_STAT_PABRST BIT(31) #define PCIE_PF_DBG 0x7fc #define PF_DBG_LTSSM_MASK 0x3f #define PF_DBG_LTSSM_L0 0x2d /* L0 state */ #define PF_DBG_WE BIT(31) #define PF_DBG_PABR BIT(27) #define to_ls_g4_pcie(x) platform_get_drvdata((x)->pdev) struct ls_g4_pcie { struct mobiveil_pcie pci; struct delayed_work dwork; int irq; }; static inline u32 ls_g4_pcie_pf_readl(struct ls_g4_pcie *pcie, u32 off) { return ioread32(pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off); } static inline void ls_g4_pcie_pf_writel(struct ls_g4_pcie *pcie, u32 off, u32 val) { iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off); } static int ls_g4_pcie_link_up(struct mobiveil_pcie *pci) { struct ls_g4_pcie *pcie = to_ls_g4_pcie(pci); u32 state; state = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG); state = state & PF_DBG_LTSSM_MASK; if (state == PF_DBG_LTSSM_L0) return 1; return 0; } static void ls_g4_pcie_disable_interrupt(struct ls_g4_pcie *pcie) { struct mobiveil_pcie *mv_pci = &pcie->pci; mobiveil_csr_writel(mv_pci, 0, PAB_INTP_AMBA_MISC_ENB); } static void ls_g4_pcie_enable_interrupt(struct ls_g4_pcie *pcie) { struct mobiveil_pcie *mv_pci = &pcie->pci; u32 val; /* Clear the interrupt status */ mobiveil_csr_writel(mv_pci, 0xffffffff, PAB_INTP_AMBA_MISC_STAT); val = PAB_INTP_INTX_MASK | PAB_INTP_MSI | PAB_INTP_RESET | PAB_INTP_PCIE_UE | PAB_INTP_IE_PMREDI | PAB_INTP_IE_EC; mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_ENB); } static int ls_g4_pcie_reinit_hw(struct ls_g4_pcie *pcie) { struct mobiveil_pcie *mv_pci = &pcie->pci; struct device *dev = &mv_pci->pdev->dev; u32 val, act_stat; int to = 100; /* Poll for pab_csb_reset to set and PAB activity to clear */ do { usleep_range(10, 15); val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_INT_STAT); act_stat = mobiveil_csr_readl(mv_pci, PAB_ACTIVITY_STAT); } while (((val & PF_INT_STAT_PABRST) == 0 || act_stat) && to--); if (to < 0) { dev_err(dev, "Poll PABRST&PABACT timeout\n"); return -EIO; } /* clear PEX_RESET bit in PEX_PF0_DBG register */ val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG); val |= PF_DBG_WE; ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val); val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG); val |= PF_DBG_PABR; ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val); val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG); val &= ~PF_DBG_WE; ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val); mobiveil_host_init(mv_pci, true); to = 100; while (!ls_g4_pcie_link_up(mv_pci) && to--) usleep_range(200, 250); if (to < 0) { dev_err(dev, "PCIe link training timeout\n"); return -EIO; } return 0; } static irqreturn_t ls_g4_pcie_isr(int irq, void *dev_id) { struct ls_g4_pcie *pcie = (struct ls_g4_pcie *)dev_id; struct mobiveil_pcie *mv_pci = &pcie->pci; u32 val; val = mobiveil_csr_readl(mv_pci, PAB_INTP_AMBA_MISC_STAT); if (!val) return IRQ_NONE; if (val & PAB_INTP_RESET) { ls_g4_pcie_disable_interrupt(pcie); schedule_delayed_work(&pcie->dwork, msecs_to_jiffies(1)); } mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_STAT); return IRQ_HANDLED; } static int ls_g4_pcie_interrupt_init(struct mobiveil_pcie *mv_pci) { struct ls_g4_pcie *pcie = to_ls_g4_pcie(mv_pci); struct platform_device *pdev = mv_pci->pdev; struct device *dev = &pdev->dev; int ret; pcie->irq = platform_get_irq_byname(pdev, "intr"); if (pcie->irq < 0) return pcie->irq; ret = devm_request_irq(dev, pcie->irq, ls_g4_pcie_isr, IRQF_SHARED, pdev->name, pcie); if (ret) { dev_err(dev, "Can't register PCIe IRQ, errno = %d\n", ret); return ret; } return 0; } static void ls_g4_pcie_reset(struct work_struct *work) { struct delayed_work *dwork = container_of(work, struct delayed_work, work); struct ls_g4_pcie *pcie = container_of(dwork, struct ls_g4_pcie, dwork); struct mobiveil_pcie *mv_pci = &pcie->pci; u16 ctrl; ctrl = mobiveil_csr_readw(mv_pci, PCI_BRIDGE_CONTROL); ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; mobiveil_csr_writew(mv_pci, ctrl, PCI_BRIDGE_CONTROL); if (!ls_g4_pcie_reinit_hw(pcie)) return; ls_g4_pcie_enable_interrupt(pcie); } static struct mobiveil_rp_ops ls_g4_pcie_rp_ops = { .interrupt_init = ls_g4_pcie_interrupt_init, }; static const struct mobiveil_pab_ops ls_g4_pcie_pab_ops = { .link_up = ls_g4_pcie_link_up, }; static int __init ls_g4_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct pci_host_bridge *bridge; struct mobiveil_pcie *mv_pci; struct ls_g4_pcie *pcie; struct device_node *np = dev->of_node; int ret; if (!of_parse_phandle(np, "msi-parent", 0)) { dev_err(dev, "Failed to find msi-parent\n"); return -EINVAL; } bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); if (!bridge) return -ENOMEM; pcie = pci_host_bridge_priv(bridge); mv_pci = &pcie->pci; mv_pci->pdev = pdev; mv_pci->ops = &ls_g4_pcie_pab_ops; mv_pci->rp.ops = &ls_g4_pcie_rp_ops; mv_pci->rp.bridge = bridge; platform_set_drvdata(pdev, pcie); INIT_DELAYED_WORK(&pcie->dwork, ls_g4_pcie_reset); ret = mobiveil_pcie_host_probe(mv_pci); if (ret) { dev_err(dev, "Fail to probe\n"); return ret; } ls_g4_pcie_enable_interrupt(pcie); return 0; } static const struct of_device_id ls_g4_pcie_of_match[] = { { .compatible = "fsl,lx2160a-pcie", }, { }, }; static struct platform_driver ls_g4_pcie_driver = { .driver = { .name = "layerscape-pcie-gen4", .of_match_table = ls_g4_pcie_of_match, .suppress_bind_attrs = true, }, }; builtin_platform_driver_probe(ls_g4_pcie_driver, ls_g4_pcie_probe);
linux-master
drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe host controller driver for HiSilicon SoCs * * Copyright (C) 2015 HiSilicon Co., Ltd. http://www.hisilicon.com * * Authors: Zhou Wang <[email protected]> * Dacai Zhu <[email protected]> * Gabriele Paoloni <[email protected]> */ #include <linux/interrupt.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/pci.h> #include <linux/pci-acpi.h> #include <linux/pci-ecam.h> #include "../../pci.h" #if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) struct hisi_pcie { void __iomem *reg_base; }; static int hisi_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { struct pci_config_window *cfg = bus->sysdata; int dev = PCI_SLOT(devfn); if (bus->number == cfg->busr.start) { /* access only one slot on each root port */ if (dev > 0) return PCIBIOS_DEVICE_NOT_FOUND; else return pci_generic_config_read32(bus, devfn, where, size, val); } return pci_generic_config_read(bus, devfn, where, size, val); } static int hisi_pcie_wr_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { struct pci_config_window *cfg = bus->sysdata; int dev = PCI_SLOT(devfn); if (bus->number == cfg->busr.start) { /* access only one slot on each root port */ if (dev > 0) return PCIBIOS_DEVICE_NOT_FOUND; else return pci_generic_config_write32(bus, devfn, where, size, val); } return pci_generic_config_write(bus, devfn, where, size, val); } static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct pci_config_window *cfg = bus->sysdata; struct hisi_pcie *pcie = cfg->priv; if (bus->number == cfg->busr.start) return pcie->reg_base + where; else return pci_ecam_map_bus(bus, devfn, where); } #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) static int hisi_pcie_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; struct hisi_pcie *pcie; struct acpi_device *adev = to_acpi_device(dev); struct acpi_pci_root *root = acpi_driver_data(adev); struct resource *res; int ret; pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; /* * Retrieve RC base and size from a HISI0081 device with _UID * matching our segment. */ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); if (!res) return -ENOMEM; ret = acpi_get_rc_resources(dev, "HISI0081", root->segment, res); if (ret) { dev_err(dev, "can't get rc base address\n"); return -ENOMEM; } pcie->reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); if (!pcie->reg_base) return -ENOMEM; cfg->priv = pcie; return 0; } const struct pci_ecam_ops hisi_pcie_ops = { .init = hisi_pcie_init, .pci_ops = { .map_bus = hisi_pcie_map_bus, .read = hisi_pcie_rd_conf, .write = hisi_pcie_wr_conf, } }; #endif #ifdef CONFIG_PCI_HISI static int hisi_pcie_platform_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; struct hisi_pcie *pcie; struct platform_device *pdev = to_platform_device(dev); struct resource *res; pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) { dev_err(dev, "missing \"reg[1]\"property\n"); return -EINVAL; } pcie->reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); if (!pcie->reg_base) return -ENOMEM; cfg->priv = pcie; return 0; } static const struct pci_ecam_ops hisi_pcie_platform_ops = { .init = hisi_pcie_platform_init, .pci_ops = { .map_bus = hisi_pcie_map_bus, .read = hisi_pcie_rd_conf, .write = hisi_pcie_wr_conf, } }; static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = { { .compatible = "hisilicon,hip06-pcie-ecam", .data = &hisi_pcie_platform_ops, }, { .compatible = "hisilicon,hip07-pcie-ecam", .data = &hisi_pcie_platform_ops, }, {}, }; static struct platform_driver hisi_pcie_almost_ecam_driver = { .probe = pci_host_common_probe, .driver = { .name = "hisi-pcie-almost-ecam", .of_match_table = hisi_pcie_almost_ecam_of_match, .suppress_bind_attrs = true, }, }; builtin_platform_driver(hisi_pcie_almost_ecam_driver); #endif #endif
linux-master
drivers/pci/controller/dwc/pcie-hisi.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe host controller driver for ST Microelectronics SPEAr13xx SoCs * * SPEAr13xx PCIe Glue Layer Source Code * * Copyright (C) 2010-2014 ST Microelectronics * Pratyush Anand <[email protected]> * Mohit Kumar <[email protected]> */ #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/of.h> #include <linux/pci.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/resource.h> #include "pcie-designware.h" struct spear13xx_pcie { struct dw_pcie *pci; void __iomem *app_base; struct phy *phy; struct clk *clk; }; struct pcie_app_reg { u32 app_ctrl_0; /* cr0 */ u32 app_ctrl_1; /* cr1 */ u32 app_status_0; /* cr2 */ u32 app_status_1; /* cr3 */ u32 msg_status; /* cr4 */ u32 msg_payload; /* cr5 */ u32 int_sts; /* cr6 */ u32 int_clr; /* cr7 */ u32 int_mask; /* cr8 */ u32 mst_bmisc; /* cr9 */ u32 phy_ctrl; /* cr10 */ u32 phy_status; /* cr11 */ u32 cxpl_debug_info_0; /* cr12 */ u32 cxpl_debug_info_1; /* cr13 */ u32 ven_msg_ctrl_0; /* cr14 */ u32 ven_msg_ctrl_1; /* cr15 */ u32 ven_msg_data_0; /* cr16 */ u32 ven_msg_data_1; /* cr17 */ u32 ven_msi_0; /* cr18 */ u32 ven_msi_1; /* cr19 */ u32 mst_rmisc; /* cr20 */ }; /* CR0 ID */ #define APP_LTSSM_ENABLE_ID 3 #define DEVICE_TYPE_RC (4 << 25) #define MISCTRL_EN_ID 30 #define REG_TRANSLATION_ENABLE 31 /* CR3 ID */ #define XMLH_LINK_UP (1 << 6) /* CR6 */ #define MSI_CTRL_INT (1 << 26) #define to_spear13xx_pcie(x) dev_get_drvdata((x)->dev) static int spear13xx_pcie_start_link(struct dw_pcie *pci) { struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base; /* enable ltssm */ writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID) | (1 << APP_LTSSM_ENABLE_ID) | ((u32)1 << REG_TRANSLATION_ENABLE), &app_reg->app_ctrl_0); return 0; } static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg) { struct spear13xx_pcie *spear13xx_pcie = arg; struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base; struct dw_pcie *pci = spear13xx_pcie->pci; struct dw_pcie_rp *pp = &pci->pp; unsigned int status; status = readl(&app_reg->int_sts); if (status & MSI_CTRL_INT) { BUG_ON(!IS_ENABLED(CONFIG_PCI_MSI)); dw_handle_msi_irq(pp); } writel(status, &app_reg->int_clr); return IRQ_HANDLED; } static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie) { struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base; /* Enable MSI interrupt */ if (IS_ENABLED(CONFIG_PCI_MSI)) writel(readl(&app_reg->int_mask) | MSI_CTRL_INT, &app_reg->int_mask); } static int spear13xx_pcie_link_up(struct dw_pcie *pci) { struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base; if (readl(&app_reg->app_status_1) & XMLH_LINK_UP) return 1; return 0; } static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); u32 exp_cap_off = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); u32 val; spear13xx_pcie->app_base = pci->dbi_base + 0x2000; /* * this controller support only 128 bytes read size, however its * default value in capability register is 512 bytes. So force * it to 128 here. */ val = dw_pcie_readw_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL); val &= ~PCI_EXP_DEVCTL_READRQ; dw_pcie_writew_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL, val); dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, 0x104A); dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, 0xCD80); spear13xx_pcie_enable_interrupts(spear13xx_pcie); return 0; } static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = { .host_init = spear13xx_pcie_host_init, }; static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie, struct platform_device *pdev) { struct dw_pcie *pci = spear13xx_pcie->pci; struct dw_pcie_rp *pp = &pci->pp; struct device *dev = &pdev->dev; int ret; pp->irq = platform_get_irq(pdev, 0); if (pp->irq < 0) return pp->irq; ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler, IRQF_SHARED | IRQF_NO_THREAD, "spear1340-pcie", spear13xx_pcie); if (ret) { dev_err(dev, "failed to request irq %d\n", pp->irq); return ret; } pp->ops = &spear13xx_pcie_host_ops; pp->msi_irq[0] = -ENODEV; ret = dw_pcie_host_init(pp); if (ret) { dev_err(dev, "failed to initialize host\n"); return ret; } return 0; } static const struct dw_pcie_ops dw_pcie_ops = { .link_up = spear13xx_pcie_link_up, .start_link = spear13xx_pcie_start_link, }; static int spear13xx_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dw_pcie *pci; struct spear13xx_pcie *spear13xx_pcie; struct device_node *np = dev->of_node; int ret; spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL); if (!spear13xx_pcie) return -ENOMEM; pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); if (!pci) return -ENOMEM; pci->dev = dev; pci->ops = &dw_pcie_ops; spear13xx_pcie->pci = pci; spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy"); if (IS_ERR(spear13xx_pcie->phy)) { ret = PTR_ERR(spear13xx_pcie->phy); if (ret == -EPROBE_DEFER) dev_info(dev, "probe deferred\n"); else dev_err(dev, "couldn't get pcie-phy\n"); return ret; } phy_init(spear13xx_pcie->phy); spear13xx_pcie->clk = devm_clk_get(dev, NULL); if (IS_ERR(spear13xx_pcie->clk)) { dev_err(dev, "couldn't get clk for pcie\n"); return PTR_ERR(spear13xx_pcie->clk); } ret = clk_prepare_enable(spear13xx_pcie->clk); if (ret) { dev_err(dev, "couldn't enable clk for pcie\n"); return ret; } if (of_property_read_bool(np, "st,pcie-is-gen1")) pci->link_gen = 1; platform_set_drvdata(pdev, spear13xx_pcie); ret = spear13xx_add_pcie_port(spear13xx_pcie, pdev); if (ret < 0) goto fail_clk; return 0; fail_clk: clk_disable_unprepare(spear13xx_pcie->clk); return ret; } static const struct of_device_id spear13xx_pcie_of_match[] = { { .compatible = "st,spear1340-pcie", }, {}, }; static struct platform_driver spear13xx_pcie_driver = { .probe = spear13xx_pcie_probe, .driver = { .name = "spear-pcie", .of_match_table = spear13xx_pcie_of_match, .suppress_bind_attrs = true, }, }; builtin_platform_driver(spear13xx_pcie_driver);
linux-master
drivers/pci/controller/dwc/pcie-spear13xx.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe host controller driver for UniPhier SoCs * Copyright 2018 Socionext Inc. * Author: Kunihiko Hayashi <[email protected]> */ #include <linux/bitops.h> #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/of_irq.h> #include <linux/pci.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/reset.h> #include "pcie-designware.h" #define PCL_PINCTRL0 0x002c #define PCL_PERST_PLDN_REGEN BIT(12) #define PCL_PERST_NOE_REGEN BIT(11) #define PCL_PERST_OUT_REGEN BIT(8) #define PCL_PERST_PLDN_REGVAL BIT(4) #define PCL_PERST_NOE_REGVAL BIT(3) #define PCL_PERST_OUT_REGVAL BIT(0) #define PCL_PIPEMON 0x0044 #define PCL_PCLK_ALIVE BIT(15) #define PCL_MODE 0x8000 #define PCL_MODE_REGEN BIT(8) #define PCL_MODE_REGVAL BIT(0) #define PCL_APP_READY_CTRL 0x8008 #define PCL_APP_LTSSM_ENABLE BIT(0) #define PCL_APP_PM0 0x8078 #define PCL_SYS_AUX_PWR_DET BIT(8) #define PCL_RCV_INT 0x8108 #define PCL_RCV_INT_ALL_ENABLE GENMASK(20, 17) #define PCL_CFG_BW_MGT_STATUS BIT(4) #define PCL_CFG_LINK_AUTO_BW_STATUS BIT(3) #define PCL_CFG_AER_RC_ERR_MSI_STATUS BIT(2) #define PCL_CFG_PME_MSI_STATUS BIT(1) #define PCL_RCV_INTX 0x810c #define PCL_RCV_INTX_ALL_ENABLE GENMASK(19, 16) #define PCL_RCV_INTX_ALL_MASK GENMASK(11, 8) #define PCL_RCV_INTX_MASK_SHIFT 8 #define PCL_RCV_INTX_ALL_STATUS GENMASK(3, 0) #define PCL_RCV_INTX_STATUS_SHIFT 0 #define PCL_STATUS_LINK 0x8140 #define PCL_RDLH_LINK_UP BIT(1) #define PCL_XMLH_LINK_UP BIT(0) struct uniphier_pcie { struct dw_pcie pci; void __iomem *base; struct clk *clk; struct reset_control *rst; struct phy *phy; struct irq_domain *legacy_irq_domain; }; #define to_uniphier_pcie(x) dev_get_drvdata((x)->dev) static void uniphier_pcie_ltssm_enable(struct uniphier_pcie *pcie, bool enable) { u32 val; val = readl(pcie->base + PCL_APP_READY_CTRL); if (enable) val |= PCL_APP_LTSSM_ENABLE; else val &= ~PCL_APP_LTSSM_ENABLE; writel(val, pcie->base + PCL_APP_READY_CTRL); } static void uniphier_pcie_init_rc(struct uniphier_pcie *pcie) { u32 val; /* set RC MODE */ val = readl(pcie->base + PCL_MODE); val |= PCL_MODE_REGEN; val &= ~PCL_MODE_REGVAL; writel(val, pcie->base + PCL_MODE); /* use auxiliary power detection */ val = readl(pcie->base + PCL_APP_PM0); val |= PCL_SYS_AUX_PWR_DET; writel(val, pcie->base + PCL_APP_PM0); /* assert PERST# */ val = readl(pcie->base + PCL_PINCTRL0); val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL | PCL_PERST_PLDN_REGVAL); val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN | PCL_PERST_PLDN_REGEN; writel(val, pcie->base + PCL_PINCTRL0); uniphier_pcie_ltssm_enable(pcie, false); usleep_range(100000, 200000); /* deassert PERST# */ val = readl(pcie->base + PCL_PINCTRL0); val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN; writel(val, pcie->base + PCL_PINCTRL0); } static int uniphier_pcie_wait_rc(struct uniphier_pcie *pcie) { u32 status; int ret; /* wait PIPE clock */ ret = readl_poll_timeout(pcie->base + PCL_PIPEMON, status, status & PCL_PCLK_ALIVE, 100000, 1000000); if (ret) { dev_err(pcie->pci.dev, "Failed to initialize controller in RC mode\n"); return ret; } return 0; } static int uniphier_pcie_link_up(struct dw_pcie *pci) { struct uniphier_pcie *pcie = to_uniphier_pcie(pci); u32 val, mask; val = readl(pcie->base + PCL_STATUS_LINK); mask = PCL_RDLH_LINK_UP | PCL_XMLH_LINK_UP; return (val & mask) == mask; } static int uniphier_pcie_start_link(struct dw_pcie *pci) { struct uniphier_pcie *pcie = to_uniphier_pcie(pci); uniphier_pcie_ltssm_enable(pcie, true); return 0; } static void uniphier_pcie_stop_link(struct dw_pcie *pci) { struct uniphier_pcie *pcie = to_uniphier_pcie(pci); uniphier_pcie_ltssm_enable(pcie, false); } static void uniphier_pcie_irq_enable(struct uniphier_pcie *pcie) { writel(PCL_RCV_INT_ALL_ENABLE, pcie->base + PCL_RCV_INT); writel(PCL_RCV_INTX_ALL_ENABLE, pcie->base + PCL_RCV_INTX); } static void uniphier_pcie_irq_mask(struct irq_data *d) { struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct uniphier_pcie *pcie = to_uniphier_pcie(pci); unsigned long flags; u32 val; raw_spin_lock_irqsave(&pp->lock, flags); val = readl(pcie->base + PCL_RCV_INTX); val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT); writel(val, pcie->base + PCL_RCV_INTX); raw_spin_unlock_irqrestore(&pp->lock, flags); } static void uniphier_pcie_irq_unmask(struct irq_data *d) { struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct uniphier_pcie *pcie = to_uniphier_pcie(pci); unsigned long flags; u32 val; raw_spin_lock_irqsave(&pp->lock, flags); val = readl(pcie->base + PCL_RCV_INTX); val &= ~BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT); writel(val, pcie->base + PCL_RCV_INTX); raw_spin_unlock_irqrestore(&pp->lock, flags); } static struct irq_chip uniphier_pcie_irq_chip = { .name = "PCI", .irq_mask = uniphier_pcie_irq_mask, .irq_unmask = uniphier_pcie_irq_unmask, }; static int uniphier_pcie_intx_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &uniphier_pcie_irq_chip, handle_level_irq); irq_set_chip_data(irq, domain->host_data); return 0; } static const struct irq_domain_ops uniphier_intx_domain_ops = { .map = uniphier_pcie_intx_map, }; static void uniphier_pcie_irq_handler(struct irq_desc *desc) { struct dw_pcie_rp *pp = irq_desc_get_handler_data(desc); struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct uniphier_pcie *pcie = to_uniphier_pcie(pci); struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long reg; u32 val, bit; /* INT for debug */ val = readl(pcie->base + PCL_RCV_INT); if (val & PCL_CFG_BW_MGT_STATUS) dev_dbg(pci->dev, "Link Bandwidth Management Event\n"); if (val & PCL_CFG_LINK_AUTO_BW_STATUS) dev_dbg(pci->dev, "Link Autonomous Bandwidth Event\n"); if (val & PCL_CFG_AER_RC_ERR_MSI_STATUS) dev_dbg(pci->dev, "Root Error\n"); if (val & PCL_CFG_PME_MSI_STATUS) dev_dbg(pci->dev, "PME Interrupt\n"); writel(val, pcie->base + PCL_RCV_INT); /* INTx */ chained_irq_enter(chip, desc); val = readl(pcie->base + PCL_RCV_INTX); reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val); for_each_set_bit(bit, &reg, PCI_NUM_INTX) generic_handle_domain_irq(pcie->legacy_irq_domain, bit); chained_irq_exit(chip, desc); } static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct uniphier_pcie *pcie = to_uniphier_pcie(pci); struct device_node *np = pci->dev->of_node; struct device_node *np_intc; int ret = 0; np_intc = of_get_child_by_name(np, "legacy-interrupt-controller"); if (!np_intc) { dev_err(pci->dev, "Failed to get legacy-interrupt-controller node\n"); return -EINVAL; } pp->irq = irq_of_parse_and_map(np_intc, 0); if (!pp->irq) { dev_err(pci->dev, "Failed to get an IRQ entry in legacy-interrupt-controller\n"); ret = -EINVAL; goto out_put_node; } pcie->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX, &uniphier_intx_domain_ops, pp); if (!pcie->legacy_irq_domain) { dev_err(pci->dev, "Failed to get INTx domain\n"); ret = -ENODEV; goto out_put_node; } irq_set_chained_handler_and_data(pp->irq, uniphier_pcie_irq_handler, pp); out_put_node: of_node_put(np_intc); return ret; } static int uniphier_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct uniphier_pcie *pcie = to_uniphier_pcie(pci); int ret; ret = uniphier_pcie_config_legacy_irq(pp); if (ret) return ret; uniphier_pcie_irq_enable(pcie); return 0; } static const struct dw_pcie_host_ops uniphier_pcie_host_ops = { .host_init = uniphier_pcie_host_init, }; static int uniphier_pcie_host_enable(struct uniphier_pcie *pcie) { int ret; ret = clk_prepare_enable(pcie->clk); if (ret) return ret; ret = reset_control_deassert(pcie->rst); if (ret) goto out_clk_disable; uniphier_pcie_init_rc(pcie); ret = phy_init(pcie->phy); if (ret) goto out_rst_assert; ret = uniphier_pcie_wait_rc(pcie); if (ret) goto out_phy_exit; return 0; out_phy_exit: phy_exit(pcie->phy); out_rst_assert: reset_control_assert(pcie->rst); out_clk_disable: clk_disable_unprepare(pcie->clk); return ret; } static const struct dw_pcie_ops dw_pcie_ops = { .start_link = uniphier_pcie_start_link, .stop_link = uniphier_pcie_stop_link, .link_up = uniphier_pcie_link_up, }; static int uniphier_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct uniphier_pcie *pcie; int ret; pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; pcie->pci.dev = dev; pcie->pci.ops = &dw_pcie_ops; pcie->base = devm_platform_ioremap_resource_byname(pdev, "link"); if (IS_ERR(pcie->base)) return PTR_ERR(pcie->base); pcie->clk = devm_clk_get(dev, NULL); if (IS_ERR(pcie->clk)) return PTR_ERR(pcie->clk); pcie->rst = devm_reset_control_get_shared(dev, NULL); if (IS_ERR(pcie->rst)) return PTR_ERR(pcie->rst); pcie->phy = devm_phy_optional_get(dev, "pcie-phy"); if (IS_ERR(pcie->phy)) return PTR_ERR(pcie->phy); platform_set_drvdata(pdev, pcie); ret = uniphier_pcie_host_enable(pcie); if (ret) return ret; pcie->pci.pp.ops = &uniphier_pcie_host_ops; return dw_pcie_host_init(&pcie->pci.pp); } static const struct of_device_id uniphier_pcie_match[] = { { .compatible = "socionext,uniphier-pcie", }, { /* sentinel */ }, }; static struct platform_driver uniphier_pcie_driver = { .probe = uniphier_pcie_probe, .driver = { .name = "uniphier-pcie", .of_match_table = uniphier_pcie_match, }, }; builtin_platform_driver(uniphier_pcie_driver);
linux-master
drivers/pci/controller/dwc/pcie-uniphier.c
// SPDX-License-Identifier: GPL-2.0 /* * Synopsys DesignWare PCIe Endpoint controller driver * * Copyright (C) 2017 Texas Instruments * Author: Kishon Vijay Abraham I <[email protected]> */ #include <linux/of.h> #include <linux/platform_device.h> #include "pcie-designware.h" #include <linux/pci-epc.h> #include <linux/pci-epf.h> void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) { struct pci_epc *epc = ep->epc; pci_epc_linkup(epc); } EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup); void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep) { struct pci_epc *epc = ep->epc; pci_epc_init_notify(epc); } EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify); struct dw_pcie_ep_func * dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no) { struct dw_pcie_ep_func *ep_func; list_for_each_entry(ep_func, &ep->func_list, list) { if (ep_func->func_no == func_no) return ep_func; } return NULL; } static unsigned int dw_pcie_ep_func_select(struct dw_pcie_ep *ep, u8 func_no) { unsigned int func_offset = 0; if (ep->ops->func_conf_select) func_offset = ep->ops->func_conf_select(ep, func_no); return func_offset; } static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no, enum pci_barno bar, int flags) { u32 reg; unsigned int func_offset = 0; struct dw_pcie_ep *ep = &pci->ep; func_offset = dw_pcie_ep_func_select(ep, func_no); reg = func_offset + PCI_BASE_ADDRESS_0 + (4 * bar); dw_pcie_dbi_ro_wr_en(pci); dw_pcie_writel_dbi2(pci, reg, 0x0); dw_pcie_writel_dbi(pci, reg, 0x0); if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { dw_pcie_writel_dbi2(pci, reg + 4, 0x0); dw_pcie_writel_dbi(pci, reg + 4, 0x0); } dw_pcie_dbi_ro_wr_dis(pci); } void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) { u8 func_no, funcs; funcs = pci->ep.epc->max_functions; for (func_no = 0; func_no < funcs; func_no++) __dw_pcie_ep_reset_bar(pci, func_no, bar, 0); } EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar); static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no, u8 cap_ptr, u8 cap) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); unsigned int func_offset = 0; u8 cap_id, next_cap_ptr; u16 reg; if (!cap_ptr) return 0; func_offset = dw_pcie_ep_func_select(ep, func_no); reg = dw_pcie_readw_dbi(pci, func_offset + cap_ptr); cap_id = (reg & 0x00ff); if (cap_id > PCI_CAP_ID_MAX) return 0; if (cap_id == cap) return cap_ptr; next_cap_ptr = (reg & 0xff00) >> 8; return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap); } static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); unsigned int func_offset = 0; u8 next_cap_ptr; u16 reg; func_offset = dw_pcie_ep_func_select(ep, func_no); reg = dw_pcie_readw_dbi(pci, func_offset + PCI_CAPABILITY_LIST); next_cap_ptr = (reg & 0x00ff); return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap); } static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_header *hdr) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); unsigned int func_offset = 0; func_offset = dw_pcie_ep_func_select(ep, func_no); dw_pcie_dbi_ro_wr_en(pci); dw_pcie_writew_dbi(pci, func_offset + PCI_VENDOR_ID, hdr->vendorid); dw_pcie_writew_dbi(pci, func_offset + PCI_DEVICE_ID, hdr->deviceid); dw_pcie_writeb_dbi(pci, func_offset + PCI_REVISION_ID, hdr->revid); dw_pcie_writeb_dbi(pci, func_offset + PCI_CLASS_PROG, hdr->progif_code); dw_pcie_writew_dbi(pci, func_offset + PCI_CLASS_DEVICE, hdr->subclass_code | hdr->baseclass_code << 8); dw_pcie_writeb_dbi(pci, func_offset + PCI_CACHE_LINE_SIZE, hdr->cache_line_size); dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_VENDOR_ID, hdr->subsys_vendor_id); dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_ID, hdr->subsys_id); dw_pcie_writeb_dbi(pci, func_offset + PCI_INTERRUPT_PIN, hdr->interrupt_pin); dw_pcie_dbi_ro_wr_dis(pci); return 0; } static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type, dma_addr_t cpu_addr, enum pci_barno bar) { int ret; u32 free_win; struct dw_pcie *pci = to_dw_pcie_from_ep(ep); if (!ep->bar_to_atu[bar]) free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows); else free_win = ep->bar_to_atu[bar]; if (free_win >= pci->num_ib_windows) { dev_err(pci->dev, "No free inbound window\n"); return -EINVAL; } ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type, cpu_addr, bar); if (ret < 0) { dev_err(pci->dev, "Failed to program IB window\n"); return ret; } ep->bar_to_atu[bar] = free_win; set_bit(free_win, ep->ib_window_map); return 0; } static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no, phys_addr_t phys_addr, u64 pci_addr, size_t size) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); u32 free_win; int ret; free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows); if (free_win >= pci->num_ob_windows) { dev_err(pci->dev, "No free outbound window\n"); return -EINVAL; } ret = dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM, phys_addr, pci_addr, size); if (ret) return ret; set_bit(free_win, ep->ob_window_map); ep->outbound_addr[free_win] = phys_addr; return 0; } static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); enum pci_barno bar = epf_bar->barno; u32 atu_index = ep->bar_to_atu[bar]; __dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags); dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, atu_index); clear_bit(atu_index, ep->ib_window_map); ep->epf_bar[bar] = NULL; ep->bar_to_atu[bar] = 0; } static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); enum pci_barno bar = epf_bar->barno; size_t size = epf_bar->size; int flags = epf_bar->flags; unsigned int func_offset = 0; int ret, type; u32 reg; func_offset = dw_pcie_ep_func_select(ep, func_no); reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset; if (!(flags & PCI_BASE_ADDRESS_SPACE)) type = PCIE_ATU_TYPE_MEM; else type = PCIE_ATU_TYPE_IO; ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar); if (ret) return ret; if (ep->epf_bar[bar]) return 0; dw_pcie_dbi_ro_wr_en(pci); dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1)); dw_pcie_writel_dbi(pci, reg, flags); if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1)); dw_pcie_writel_dbi(pci, reg + 4, 0); } ep->epf_bar[bar] = epf_bar; dw_pcie_dbi_ro_wr_dis(pci); return 0; } static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr, u32 *atu_index) { u32 index; struct dw_pcie *pci = to_dw_pcie_from_ep(ep); for (index = 0; index < pci->num_ob_windows; index++) { if (ep->outbound_addr[index] != addr) continue; *atu_index = index; return 0; } return -EINVAL; } static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t addr) { int ret; u32 atu_index; struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); ret = dw_pcie_find_index(ep, addr, &atu_index); if (ret < 0) return; dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index); clear_bit(atu_index, ep->ob_window_map); } static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t addr, u64 pci_addr, size_t size) { int ret; struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); ret = dw_pcie_ep_outbound_atu(ep, func_no, addr, pci_addr, size); if (ret) { dev_err(pci->dev, "Failed to enable address\n"); return ret; } return 0; } static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); u32 val, reg; unsigned int func_offset = 0; struct dw_pcie_ep_func *ep_func; ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); if (!ep_func || !ep_func->msi_cap) return -EINVAL; func_offset = dw_pcie_ep_func_select(ep, func_no); reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS; val = dw_pcie_readw_dbi(pci, reg); if (!(val & PCI_MSI_FLAGS_ENABLE)) return -EINVAL; val = (val & PCI_MSI_FLAGS_QSIZE) >> 4; return val; } static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); u32 val, reg; unsigned int func_offset = 0; struct dw_pcie_ep_func *ep_func; ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); if (!ep_func || !ep_func->msi_cap) return -EINVAL; func_offset = dw_pcie_ep_func_select(ep, func_no); reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS; val = dw_pcie_readw_dbi(pci, reg); val &= ~PCI_MSI_FLAGS_QMASK; val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK; dw_pcie_dbi_ro_wr_en(pci); dw_pcie_writew_dbi(pci, reg, val); dw_pcie_dbi_ro_wr_dis(pci); return 0; } static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); u32 val, reg; unsigned int func_offset = 0; struct dw_pcie_ep_func *ep_func; ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); if (!ep_func || !ep_func->msix_cap) return -EINVAL; func_offset = dw_pcie_ep_func_select(ep, func_no); reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS; val = dw_pcie_readw_dbi(pci, reg); if (!(val & PCI_MSIX_FLAGS_ENABLE)) return -EINVAL; val &= PCI_MSIX_FLAGS_QSIZE; return val; } static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u16 interrupts, enum pci_barno bir, u32 offset) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); u32 val, reg; unsigned int func_offset = 0; struct dw_pcie_ep_func *ep_func; ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); if (!ep_func || !ep_func->msix_cap) return -EINVAL; dw_pcie_dbi_ro_wr_en(pci); func_offset = dw_pcie_ep_func_select(ep, func_no); reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS; val = dw_pcie_readw_dbi(pci, reg); val &= ~PCI_MSIX_FLAGS_QSIZE; val |= interrupts; dw_pcie_writew_dbi(pci, reg, val); reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE; val = offset | bir; dw_pcie_writel_dbi(pci, reg, val); reg = ep_func->msix_cap + func_offset + PCI_MSIX_PBA; val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; dw_pcie_writel_dbi(pci, reg, val); dw_pcie_dbi_ro_wr_dis(pci); return 0; } static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, enum pci_epc_irq_type type, u16 interrupt_num) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); if (!ep->ops->raise_irq) return -EINVAL; return ep->ops->raise_irq(ep, func_no, type, interrupt_num); } static void dw_pcie_ep_stop(struct pci_epc *epc) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); dw_pcie_stop_link(pci); } static int dw_pcie_ep_start(struct pci_epc *epc) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); return dw_pcie_start_link(pci); } static const struct pci_epc_features* dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); if (!ep->ops->get_features) return NULL; return ep->ops->get_features(ep); } static const struct pci_epc_ops epc_ops = { .write_header = dw_pcie_ep_write_header, .set_bar = dw_pcie_ep_set_bar, .clear_bar = dw_pcie_ep_clear_bar, .map_addr = dw_pcie_ep_map_addr, .unmap_addr = dw_pcie_ep_unmap_addr, .set_msi = dw_pcie_ep_set_msi, .get_msi = dw_pcie_ep_get_msi, .set_msix = dw_pcie_ep_set_msix, .get_msix = dw_pcie_ep_get_msix, .raise_irq = dw_pcie_ep_raise_irq, .start = dw_pcie_ep_start, .stop = dw_pcie_ep_stop, .get_features = dw_pcie_ep_get_features, }; int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct device *dev = pci->dev; dev_err(dev, "EP cannot trigger legacy IRQs\n"); return -EINVAL; } EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_legacy_irq); int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, u8 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct dw_pcie_ep_func *ep_func; struct pci_epc *epc = ep->epc; unsigned int aligned_offset; unsigned int func_offset = 0; u16 msg_ctrl, msg_data; u32 msg_addr_lower, msg_addr_upper, reg; u64 msg_addr; bool has_upper; int ret; ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); if (!ep_func || !ep_func->msi_cap) return -EINVAL; func_offset = dw_pcie_ep_func_select(ep, func_no); /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */ reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS; msg_ctrl = dw_pcie_readw_dbi(pci, reg); has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT); reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_LO; msg_addr_lower = dw_pcie_readl_dbi(pci, reg); if (has_upper) { reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_HI; msg_addr_upper = dw_pcie_readl_dbi(pci, reg); reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_64; msg_data = dw_pcie_readw_dbi(pci, reg); } else { msg_addr_upper = 0; reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_32; msg_data = dw_pcie_readw_dbi(pci, reg); } aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1); msg_addr = ((u64)msg_addr_upper) << 32 | (msg_addr_lower & ~aligned_offset); ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, epc->mem->window.page_size); if (ret) return ret; writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset); dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys); return 0; } EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq); int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct dw_pcie_ep_func *ep_func; u32 msg_data; ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); if (!ep_func || !ep_func->msix_cap) return -EINVAL; msg_data = (func_no << PCIE_MSIX_DOORBELL_PF_SHIFT) | (interrupt_num - 1); dw_pcie_writel_dbi(pci, PCIE_MSIX_DOORBELL, msg_data); return 0; } int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct dw_pcie_ep_func *ep_func; struct pci_epf_msix_tbl *msix_tbl; struct pci_epc *epc = ep->epc; unsigned int func_offset = 0; u32 reg, msg_data, vec_ctrl; unsigned int aligned_offset; u32 tbl_offset; u64 msg_addr; int ret; u8 bir; ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); if (!ep_func || !ep_func->msix_cap) return -EINVAL; func_offset = dw_pcie_ep_func_select(ep, func_no); reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE; tbl_offset = dw_pcie_readl_dbi(pci, reg); bir = (tbl_offset & PCI_MSIX_TABLE_BIR); tbl_offset &= PCI_MSIX_TABLE_OFFSET; msix_tbl = ep->epf_bar[bir]->addr + tbl_offset; msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; msg_data = msix_tbl[(interrupt_num - 1)].msg_data; vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl; if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) { dev_dbg(pci->dev, "MSI-X entry ctrl set\n"); return -EPERM; } aligned_offset = msg_addr & (epc->mem->window.page_size - 1); ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, epc->mem->window.page_size); if (ret) return ret; writel(msg_data, ep->msi_mem + aligned_offset); dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys); return 0; } void dw_pcie_ep_exit(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct pci_epc *epc = ep->epc; dw_pcie_edma_remove(pci); pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, epc->mem->window.page_size); pci_epc_mem_exit(epc); } static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap) { u32 header; int pos = PCI_CFG_SPACE_SIZE; while (pos) { header = dw_pcie_readl_dbi(pci, pos); if (PCI_EXT_CAP_ID(header) == cap) return pos; pos = PCI_EXT_CAP_NEXT(header); if (!pos) break; } return 0; } int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); unsigned int offset, ptm_cap_base; unsigned int nbars; u8 hdr_type; u32 reg; int i; hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) & PCI_HEADER_TYPE_MASK; if (hdr_type != PCI_HEADER_TYPE_NORMAL) { dev_err(pci->dev, "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n", hdr_type); return -EIO; } offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM); dw_pcie_dbi_ro_wr_en(pci); if (offset) { reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT; for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0); } /* * PTM responder capability can be disabled only after disabling * PTM root capability. */ if (ptm_cap_base) { dw_pcie_dbi_ro_wr_en(pci); reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP); reg &= ~PCI_PTM_CAP_ROOT; dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg); reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP); reg &= ~(PCI_PTM_CAP_RES | PCI_PTM_GRANULARITY_MASK); dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg); dw_pcie_dbi_ro_wr_dis(pci); } dw_pcie_setup(pci); dw_pcie_dbi_ro_wr_dis(pci); return 0; } EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete); int dw_pcie_ep_init(struct dw_pcie_ep *ep) { int ret; void *addr; u8 func_no; struct resource *res; struct pci_epc *epc; struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct device *dev = pci->dev; struct platform_device *pdev = to_platform_device(dev); struct device_node *np = dev->of_node; const struct pci_epc_features *epc_features; struct dw_pcie_ep_func *ep_func; INIT_LIST_HEAD(&ep->func_list); ret = dw_pcie_get_resources(pci); if (ret) return ret; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); if (!res) return -EINVAL; ep->phys_base = res->start; ep->addr_size = resource_size(res); dw_pcie_version_detect(pci); dw_pcie_iatu_detect(pci); ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows, GFP_KERNEL); if (!ep->ib_window_map) return -ENOMEM; ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows, GFP_KERNEL); if (!ep->ob_window_map) return -ENOMEM; addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t), GFP_KERNEL); if (!addr) return -ENOMEM; ep->outbound_addr = addr; epc = devm_pci_epc_create(dev, &epc_ops); if (IS_ERR(epc)) { dev_err(dev, "Failed to create epc device\n"); return PTR_ERR(epc); } ep->epc = epc; epc_set_drvdata(epc, ep); ret = of_property_read_u8(np, "max-functions", &epc->max_functions); if (ret < 0) epc->max_functions = 1; for (func_no = 0; func_no < epc->max_functions; func_no++) { ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL); if (!ep_func) return -ENOMEM; ep_func->func_no = func_no; ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no, PCI_CAP_ID_MSI); ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no, PCI_CAP_ID_MSIX); list_add_tail(&ep_func->list, &ep->func_list); } if (ep->ops->ep_init) ep->ops->ep_init(ep); ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, ep->page_size); if (ret < 0) { dev_err(dev, "Failed to initialize address space\n"); return ret; } ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys, epc->mem->window.page_size); if (!ep->msi_mem) { ret = -ENOMEM; dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n"); goto err_exit_epc_mem; } ret = dw_pcie_edma_detect(pci); if (ret) goto err_free_epc_mem; if (ep->ops->get_features) { epc_features = ep->ops->get_features(ep); if (epc_features->core_init_notifier) return 0; } ret = dw_pcie_ep_init_complete(ep); if (ret) goto err_remove_edma; return 0; err_remove_edma: dw_pcie_edma_remove(pci); err_free_epc_mem: pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, epc->mem->window.page_size); err_exit_epc_mem: pci_epc_mem_exit(epc); return ret; } EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
linux-master
drivers/pci/controller/dwc/pcie-designware-ep.c
// SPDX-License-Identifier: GPL-2.0 /* * Synopsys DesignWare PCIe host controller driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. * https://www.samsung.com * * Author: Jingoo Han <[email protected]> */ #include <linux/align.h> #include <linux/bitops.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/dma/edma.h> #include <linux/gpio/consumer.h> #include <linux/ioport.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/sizes.h> #include <linux/types.h> #include "../../pci.h" #include "pcie-designware.h" static const char * const dw_pcie_app_clks[DW_PCIE_NUM_APP_CLKS] = { [DW_PCIE_DBI_CLK] = "dbi", [DW_PCIE_MSTR_CLK] = "mstr", [DW_PCIE_SLV_CLK] = "slv", }; static const char * const dw_pcie_core_clks[DW_PCIE_NUM_CORE_CLKS] = { [DW_PCIE_PIPE_CLK] = "pipe", [DW_PCIE_CORE_CLK] = "core", [DW_PCIE_AUX_CLK] = "aux", [DW_PCIE_REF_CLK] = "ref", }; static const char * const dw_pcie_app_rsts[DW_PCIE_NUM_APP_RSTS] = { [DW_PCIE_DBI_RST] = "dbi", [DW_PCIE_MSTR_RST] = "mstr", [DW_PCIE_SLV_RST] = "slv", }; static const char * const dw_pcie_core_rsts[DW_PCIE_NUM_CORE_RSTS] = { [DW_PCIE_NON_STICKY_RST] = "non-sticky", [DW_PCIE_STICKY_RST] = "sticky", [DW_PCIE_CORE_RST] = "core", [DW_PCIE_PIPE_RST] = "pipe", [DW_PCIE_PHY_RST] = "phy", [DW_PCIE_HOT_RST] = "hot", [DW_PCIE_PWR_RST] = "pwr", }; static int dw_pcie_get_clocks(struct dw_pcie *pci) { int i, ret; for (i = 0; i < DW_PCIE_NUM_APP_CLKS; i++) pci->app_clks[i].id = dw_pcie_app_clks[i]; for (i = 0; i < DW_PCIE_NUM_CORE_CLKS; i++) pci->core_clks[i].id = dw_pcie_core_clks[i]; ret = devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_APP_CLKS, pci->app_clks); if (ret) return ret; return devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_CORE_CLKS, pci->core_clks); } static int dw_pcie_get_resets(struct dw_pcie *pci) { int i, ret; for (i = 0; i < DW_PCIE_NUM_APP_RSTS; i++) pci->app_rsts[i].id = dw_pcie_app_rsts[i]; for (i = 0; i < DW_PCIE_NUM_CORE_RSTS; i++) pci->core_rsts[i].id = dw_pcie_core_rsts[i]; ret = devm_reset_control_bulk_get_optional_shared(pci->dev, DW_PCIE_NUM_APP_RSTS, pci->app_rsts); if (ret) return ret; ret = devm_reset_control_bulk_get_optional_exclusive(pci->dev, DW_PCIE_NUM_CORE_RSTS, pci->core_rsts); if (ret) return ret; pci->pe_rst = devm_gpiod_get_optional(pci->dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(pci->pe_rst)) return PTR_ERR(pci->pe_rst); return 0; } int dw_pcie_get_resources(struct dw_pcie *pci) { struct platform_device *pdev = to_platform_device(pci->dev); struct device_node *np = dev_of_node(pci->dev); struct resource *res; int ret; if (!pci->dbi_base) { res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); } /* DBI2 is mainly useful for the endpoint controller */ if (!pci->dbi_base2) { res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); if (res) { pci->dbi_base2 = devm_pci_remap_cfg_resource(pci->dev, res); if (IS_ERR(pci->dbi_base2)) return PTR_ERR(pci->dbi_base2); } else { pci->dbi_base2 = pci->dbi_base + SZ_4K; } } /* For non-unrolled iATU/eDMA platforms this range will be ignored */ if (!pci->atu_base) { res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu"); if (res) { pci->atu_size = resource_size(res); pci->atu_base = devm_ioremap_resource(pci->dev, res); if (IS_ERR(pci->atu_base)) return PTR_ERR(pci->atu_base); } else { pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; } } /* Set a default value suitable for at most 8 in and 8 out windows */ if (!pci->atu_size) pci->atu_size = SZ_4K; /* eDMA region can be mapped to a custom base address */ if (!pci->edma.reg_base) { res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma"); if (res) { pci->edma.reg_base = devm_ioremap_resource(pci->dev, res); if (IS_ERR(pci->edma.reg_base)) return PTR_ERR(pci->edma.reg_base); } else if (pci->atu_size >= 2 * DEFAULT_DBI_DMA_OFFSET) { pci->edma.reg_base = pci->atu_base + DEFAULT_DBI_DMA_OFFSET; } } /* LLDD is supposed to manually switch the clocks and resets state */ if (dw_pcie_cap_is(pci, REQ_RES)) { ret = dw_pcie_get_clocks(pci); if (ret) return ret; ret = dw_pcie_get_resets(pci); if (ret) return ret; } if (pci->link_gen < 1) pci->link_gen = of_pci_get_max_link_speed(np); of_property_read_u32(np, "num-lanes", &pci->num_lanes); if (of_property_read_bool(np, "snps,enable-cdm-check")) dw_pcie_cap_set(pci, CDM_CHECK); return 0; } void dw_pcie_version_detect(struct dw_pcie *pci) { u32 ver; /* The content of the CSR is zero on DWC PCIe older than v4.70a */ ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_NUMBER); if (!ver) return; if (pci->version && pci->version != ver) dev_warn(pci->dev, "Versions don't match (%08x != %08x)\n", pci->version, ver); else pci->version = ver; ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_TYPE); if (pci->type && pci->type != ver) dev_warn(pci->dev, "Types don't match (%08x != %08x)\n", pci->type, ver); else pci->type = ver; } /* * These interfaces resemble the pci_find_*capability() interfaces, but these * are for configuring host controllers, which are bridges *to* PCI devices but * are not PCI devices themselves. */ static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr, u8 cap) { u8 cap_id, next_cap_ptr; u16 reg; if (!cap_ptr) return 0; reg = dw_pcie_readw_dbi(pci, cap_ptr); cap_id = (reg & 0x00ff); if (cap_id > PCI_CAP_ID_MAX) return 0; if (cap_id == cap) return cap_ptr; next_cap_ptr = (reg & 0xff00) >> 8; return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap); } u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap) { u8 next_cap_ptr; u16 reg; reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); next_cap_ptr = (reg & 0x00ff); return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap); } EXPORT_SYMBOL_GPL(dw_pcie_find_capability); static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start, u8 cap) { u32 header; int ttl; int pos = PCI_CFG_SPACE_SIZE; /* minimum 8 bytes per capability */ ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; if (start) pos = start; header = dw_pcie_readl_dbi(pci, pos); /* * If we have no capabilities, this is indicated by cap ID, * cap version and next pointer all being 0. */ if (header == 0) return 0; while (ttl-- > 0) { if (PCI_EXT_CAP_ID(header) == cap && pos != start) return pos; pos = PCI_EXT_CAP_NEXT(header); if (pos < PCI_CFG_SPACE_SIZE) break; header = dw_pcie_readl_dbi(pci, pos); } return 0; } u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap) { return dw_pcie_find_next_ext_capability(pci, 0, cap); } EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability); int dw_pcie_read(void __iomem *addr, int size, u32 *val) { if (!IS_ALIGNED((uintptr_t)addr, size)) { *val = 0; return PCIBIOS_BAD_REGISTER_NUMBER; } if (size == 4) { *val = readl(addr); } else if (size == 2) { *val = readw(addr); } else if (size == 1) { *val = readb(addr); } else { *val = 0; return PCIBIOS_BAD_REGISTER_NUMBER; } return PCIBIOS_SUCCESSFUL; } EXPORT_SYMBOL_GPL(dw_pcie_read); int dw_pcie_write(void __iomem *addr, int size, u32 val) { if (!IS_ALIGNED((uintptr_t)addr, size)) return PCIBIOS_BAD_REGISTER_NUMBER; if (size == 4) writel(val, addr); else if (size == 2) writew(val, addr); else if (size == 1) writeb(val, addr); else return PCIBIOS_BAD_REGISTER_NUMBER; return PCIBIOS_SUCCESSFUL; } EXPORT_SYMBOL_GPL(dw_pcie_write); u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size) { int ret; u32 val; if (pci->ops && pci->ops->read_dbi) return pci->ops->read_dbi(pci, pci->dbi_base, reg, size); ret = dw_pcie_read(pci->dbi_base + reg, size, &val); if (ret) dev_err(pci->dev, "Read DBI address failed\n"); return val; } EXPORT_SYMBOL_GPL(dw_pcie_read_dbi); void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val) { int ret; if (pci->ops && pci->ops->write_dbi) { pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val); return; } ret = dw_pcie_write(pci->dbi_base + reg, size, val); if (ret) dev_err(pci->dev, "Write DBI address failed\n"); } EXPORT_SYMBOL_GPL(dw_pcie_write_dbi); void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val) { int ret; if (pci->ops && pci->ops->write_dbi2) { pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val); return; } ret = dw_pcie_write(pci->dbi_base2 + reg, size, val); if (ret) dev_err(pci->dev, "write DBI address failed\n"); } static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir, u32 index) { if (dw_pcie_cap_is(pci, IATU_UNROLL)) return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index); dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index); return pci->atu_base; } static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 dir, u32 index, u32 reg) { void __iomem *base; int ret; u32 val; base = dw_pcie_select_atu(pci, dir, index); if (pci->ops && pci->ops->read_dbi) return pci->ops->read_dbi(pci, base, reg, 4); ret = dw_pcie_read(base + reg, 4, &val); if (ret) dev_err(pci->dev, "Read ATU address failed\n"); return val; } static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 dir, u32 index, u32 reg, u32 val) { void __iomem *base; int ret; base = dw_pcie_select_atu(pci, dir, index); if (pci->ops && pci->ops->write_dbi) { pci->ops->write_dbi(pci, base, reg, 4, val); return; } ret = dw_pcie_write(base + reg, 4, val); if (ret) dev_err(pci->dev, "Write ATU address failed\n"); } static inline u32 dw_pcie_readl_atu_ob(struct dw_pcie *pci, u32 index, u32 reg) { return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg); } static inline void dw_pcie_writel_atu_ob(struct dw_pcie *pci, u32 index, u32 reg, u32 val) { dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg, val); } static inline u32 dw_pcie_enable_ecrc(u32 val) { /* * DesignWare core version 4.90A has a design issue where the 'TD' * bit in the Control register-1 of the ATU outbound region acts * like an override for the ECRC setting, i.e., the presence of TLP * Digest (ECRC) in the outgoing TLPs is solely determined by this * bit. This is contrary to the PCIe spec which says that the * enablement of the ECRC is solely determined by the AER * registers. * * Because of this, even when the ECRC is enabled through AER * registers, the transactions going through ATU won't have TLP * Digest as there is no way the PCI core AER code could program * the TD bit which is specific to the DesignWare core. * * The best way to handle this scenario is to program the TD bit * always. It affects only the traffic from root port to downstream * devices. * * At this point, * When ECRC is enabled in AER registers, everything works normally * When ECRC is NOT enabled in AER registers, then, * on Root Port:- TLP Digest (DWord size) gets appended to each packet * even through it is not required. Since downstream * TLPs are mostly for configuration accesses and BAR * accesses, they are not in critical path and won't * have much negative effect on the performance. * on End Point:- TLP Digest is received for some/all the packets coming * from the root port. TLP Digest is ignored because, * as per the PCIe Spec r5.0 v1.0 section 2.2.3 * "TLP Digest Rules", when an endpoint receives TLP * Digest when its ECRC check functionality is disabled * in AER registers, received TLP Digest is just ignored. * Since there is no issue or error reported either side, best way to * handle the scenario is to program TD bit by default. */ return val | PCIE_ATU_TD; } static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no, int index, int type, u64 cpu_addr, u64 pci_addr, u64 size) { u32 retries, val; u64 limit_addr; if (pci->ops && pci->ops->cpu_addr_fixup) cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); limit_addr = cpu_addr + size - 1; if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) || !IS_ALIGNED(cpu_addr, pci->region_align) || !IS_ALIGNED(pci_addr, pci->region_align) || !size) { return -EINVAL; } dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE, lower_32_bits(cpu_addr)); dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE, upper_32_bits(cpu_addr)); dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT, lower_32_bits(limit_addr)); if (dw_pcie_ver_is_ge(pci, 460A)) dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT, upper_32_bits(limit_addr)); dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET, lower_32_bits(pci_addr)); dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET, upper_32_bits(pci_addr)); val = type | PCIE_ATU_FUNC_NUM(func_no); if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) && dw_pcie_ver_is_ge(pci, 460A)) val |= PCIE_ATU_INCREASE_REGION_SIZE; if (dw_pcie_ver_is(pci, 490A)) val = dw_pcie_enable_ecrc(val); dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val); dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE); /* * Make sure ATU enable takes effect before any subsequent config * and I/O accesses. */ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2); if (val & PCIE_ATU_ENABLE) return 0; mdelay(LINK_WAIT_IATU); } dev_err(pci->dev, "Outbound iATU is not being enabled\n"); return -ETIMEDOUT; } int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, u64 cpu_addr, u64 pci_addr, u64 size) { return __dw_pcie_prog_outbound_atu(pci, 0, index, type, cpu_addr, pci_addr, size); } int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index, int type, u64 cpu_addr, u64 pci_addr, u64 size) { return __dw_pcie_prog_outbound_atu(pci, func_no, index, type, cpu_addr, pci_addr, size); } static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg) { return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg); } static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg, u32 val) { dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val); } int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type, u64 cpu_addr, u64 pci_addr, u64 size) { u64 limit_addr = pci_addr + size - 1; u32 retries, val; if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) || !IS_ALIGNED(cpu_addr, pci->region_align) || !IS_ALIGNED(pci_addr, pci->region_align) || !size) { return -EINVAL; } dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_BASE, lower_32_bits(pci_addr)); dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_BASE, upper_32_bits(pci_addr)); dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LIMIT, lower_32_bits(limit_addr)); if (dw_pcie_ver_is_ge(pci, 460A)) dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_LIMIT, upper_32_bits(limit_addr)); dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr)); dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr)); val = type; if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) && dw_pcie_ver_is_ge(pci, 460A)) val |= PCIE_ATU_INCREASE_REGION_SIZE; dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, val); dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE); /* * Make sure ATU enable takes effect before any subsequent config * and I/O accesses. */ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2); if (val & PCIE_ATU_ENABLE) return 0; mdelay(LINK_WAIT_IATU); } dev_err(pci->dev, "Inbound iATU is not being enabled\n"); return -ETIMEDOUT; } int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, int type, u64 cpu_addr, u8 bar) { u32 retries, val; if (!IS_ALIGNED(cpu_addr, pci->region_align)) return -EINVAL; dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr)); dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr)); dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type | PCIE_ATU_FUNC_NUM(func_no)); dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE | PCIE_ATU_FUNC_NUM_MATCH_EN | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); /* * Make sure ATU enable takes effect before any subsequent config * and I/O accesses. */ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2); if (val & PCIE_ATU_ENABLE) return 0; mdelay(LINK_WAIT_IATU); } dev_err(pci->dev, "Inbound iATU is not being enabled\n"); return -ETIMEDOUT; } void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index) { dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0); } int dw_pcie_wait_for_link(struct dw_pcie *pci) { u32 offset, val; int retries; /* Check if the link is up or not */ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { if (dw_pcie_link_up(pci)) break; usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); } if (retries >= LINK_WAIT_MAX_RETRIES) { dev_info(pci->dev, "Phy link never came up\n"); return -ETIMEDOUT; } offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); dev_info(pci->dev, "PCIe Gen.%u x%u link up\n", FIELD_GET(PCI_EXP_LNKSTA_CLS, val), FIELD_GET(PCI_EXP_LNKSTA_NLW, val)); return 0; } EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link); int dw_pcie_link_up(struct dw_pcie *pci) { u32 val; if (pci->ops && pci->ops->link_up) return pci->ops->link_up(pci); val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1); return ((val & PCIE_PORT_DEBUG1_LINK_UP) && (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING))); } EXPORT_SYMBOL_GPL(dw_pcie_link_up); void dw_pcie_upconfig_setup(struct dw_pcie *pci) { u32 val; val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL); val |= PORT_MLTI_UPCFG_SUPPORT; dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val); } EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup); static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen) { u32 cap, ctrl2, link_speed; u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2); ctrl2 &= ~PCI_EXP_LNKCTL2_TLS; switch (pcie_link_speed[link_gen]) { case PCIE_SPEED_2_5GT: link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT; break; case PCIE_SPEED_5_0GT: link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT; break; case PCIE_SPEED_8_0GT: link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT; break; case PCIE_SPEED_16_0GT: link_speed = PCI_EXP_LNKCTL2_TLS_16_0GT; break; default: /* Use hardware capability */ link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap); ctrl2 &= ~PCI_EXP_LNKCTL2_HASD; break; } dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, ctrl2 | link_speed); cap &= ~((u32)PCI_EXP_LNKCAP_SLS); dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed); } void dw_pcie_iatu_detect(struct dw_pcie *pci) { int max_region, ob, ib; u32 val, min, dir; u64 max; val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); if (val == 0xFFFFFFFF) { dw_pcie_cap_set(pci, IATU_UNROLL); max_region = min((int)pci->atu_size / 512, 256); } else { pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE; pci->atu_size = PCIE_ATU_VIEWPORT_SIZE; dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF); max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1; } for (ob = 0; ob < max_region; ob++) { dw_pcie_writel_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET, 0x11110000); val = dw_pcie_readl_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET); if (val != 0x11110000) break; } for (ib = 0; ib < max_region; ib++) { dw_pcie_writel_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET, 0x11110000); val = dw_pcie_readl_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET); if (val != 0x11110000) break; } if (ob) { dir = PCIE_ATU_REGION_DIR_OB; } else if (ib) { dir = PCIE_ATU_REGION_DIR_IB; } else { dev_err(pci->dev, "No iATU regions found\n"); return; } dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_LIMIT, 0x0); min = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_LIMIT); if (dw_pcie_ver_is_ge(pci, 460A)) { dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT, 0xFFFFFFFF); max = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT); } else { max = 0; } pci->num_ob_windows = ob; pci->num_ib_windows = ib; pci->region_align = 1 << fls(min); pci->region_limit = (max << 32) | (SZ_4G - 1); dev_info(pci->dev, "iATU: unroll %s, %u ob, %u ib, align %uK, limit %lluG\n", dw_pcie_cap_is(pci, IATU_UNROLL) ? "T" : "F", pci->num_ob_windows, pci->num_ib_windows, pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G); } static u32 dw_pcie_readl_dma(struct dw_pcie *pci, u32 reg) { u32 val = 0; int ret; if (pci->ops && pci->ops->read_dbi) return pci->ops->read_dbi(pci, pci->edma.reg_base, reg, 4); ret = dw_pcie_read(pci->edma.reg_base + reg, 4, &val); if (ret) dev_err(pci->dev, "Read DMA address failed\n"); return val; } static int dw_pcie_edma_irq_vector(struct device *dev, unsigned int nr) { struct platform_device *pdev = to_platform_device(dev); char name[6]; int ret; if (nr >= EDMA_MAX_WR_CH + EDMA_MAX_RD_CH) return -EINVAL; ret = platform_get_irq_byname_optional(pdev, "dma"); if (ret > 0) return ret; snprintf(name, sizeof(name), "dma%u", nr); return platform_get_irq_byname_optional(pdev, name); } static struct dw_edma_plat_ops dw_pcie_edma_ops = { .irq_vector = dw_pcie_edma_irq_vector, }; static int dw_pcie_edma_find_chip(struct dw_pcie *pci) { u32 val; /* * Indirect eDMA CSRs access has been completely removed since v5.40a * thus no space is now reserved for the eDMA channels viewport and * former DMA CTRL register is no longer fixed to FFs. */ if (dw_pcie_ver_is_ge(pci, 540A)) val = 0xFFFFFFFF; else val = dw_pcie_readl_dbi(pci, PCIE_DMA_VIEWPORT_BASE + PCIE_DMA_CTRL); if (val == 0xFFFFFFFF && pci->edma.reg_base) { pci->edma.mf = EDMA_MF_EDMA_UNROLL; val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL); } else if (val != 0xFFFFFFFF) { pci->edma.mf = EDMA_MF_EDMA_LEGACY; pci->edma.reg_base = pci->dbi_base + PCIE_DMA_VIEWPORT_BASE; } else { return -ENODEV; } pci->edma.dev = pci->dev; if (!pci->edma.ops) pci->edma.ops = &dw_pcie_edma_ops; pci->edma.flags |= DW_EDMA_CHIP_LOCAL; pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val); pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val); /* Sanity check the channels count if the mapping was incorrect */ if (!pci->edma.ll_wr_cnt || pci->edma.ll_wr_cnt > EDMA_MAX_WR_CH || !pci->edma.ll_rd_cnt || pci->edma.ll_rd_cnt > EDMA_MAX_RD_CH) return -EINVAL; return 0; } static int dw_pcie_edma_irq_verify(struct dw_pcie *pci) { struct platform_device *pdev = to_platform_device(pci->dev); u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt; char name[6]; int ret; if (pci->edma.nr_irqs == 1) return 0; else if (pci->edma.nr_irqs > 1) return pci->edma.nr_irqs != ch_cnt ? -EINVAL : 0; ret = platform_get_irq_byname_optional(pdev, "dma"); if (ret > 0) { pci->edma.nr_irqs = 1; return 0; } for (; pci->edma.nr_irqs < ch_cnt; pci->edma.nr_irqs++) { snprintf(name, sizeof(name), "dma%d", pci->edma.nr_irqs); ret = platform_get_irq_byname_optional(pdev, name); if (ret <= 0) return -EINVAL; } return 0; } static int dw_pcie_edma_ll_alloc(struct dw_pcie *pci) { struct dw_edma_region *ll; dma_addr_t paddr; int i; for (i = 0; i < pci->edma.ll_wr_cnt; i++) { ll = &pci->edma.ll_region_wr[i]; ll->sz = DMA_LLP_MEM_SIZE; ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz, &paddr, GFP_KERNEL); if (!ll->vaddr.mem) return -ENOMEM; ll->paddr = paddr; } for (i = 0; i < pci->edma.ll_rd_cnt; i++) { ll = &pci->edma.ll_region_rd[i]; ll->sz = DMA_LLP_MEM_SIZE; ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz, &paddr, GFP_KERNEL); if (!ll->vaddr.mem) return -ENOMEM; ll->paddr = paddr; } return 0; } int dw_pcie_edma_detect(struct dw_pcie *pci) { int ret; /* Don't fail if no eDMA was found (for the backward compatibility) */ ret = dw_pcie_edma_find_chip(pci); if (ret) return 0; /* Don't fail on the IRQs verification (for the backward compatibility) */ ret = dw_pcie_edma_irq_verify(pci); if (ret) { dev_err(pci->dev, "Invalid eDMA IRQs found\n"); return 0; } ret = dw_pcie_edma_ll_alloc(pci); if (ret) { dev_err(pci->dev, "Couldn't allocate LLP memory\n"); return ret; } /* Don't fail if the DW eDMA driver can't find the device */ ret = dw_edma_probe(&pci->edma); if (ret && ret != -ENODEV) { dev_err(pci->dev, "Couldn't register eDMA device\n"); return ret; } dev_info(pci->dev, "eDMA: unroll %s, %hu wr, %hu rd\n", pci->edma.mf == EDMA_MF_EDMA_UNROLL ? "T" : "F", pci->edma.ll_wr_cnt, pci->edma.ll_rd_cnt); return 0; } void dw_pcie_edma_remove(struct dw_pcie *pci) { dw_edma_remove(&pci->edma); } void dw_pcie_setup(struct dw_pcie *pci) { u32 val; if (pci->link_gen > 0) dw_pcie_link_set_max_speed(pci, pci->link_gen); /* Configure Gen1 N_FTS */ if (pci->n_fts[0]) { val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR); val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK); val |= PORT_AFR_N_FTS(pci->n_fts[0]); val |= PORT_AFR_CC_N_FTS(pci->n_fts[0]); dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val); } /* Configure Gen2+ N_FTS */ if (pci->n_fts[1]) { val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); val &= ~PORT_LOGIC_N_FTS_MASK; val |= pci->n_fts[1]; dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); } if (dw_pcie_cap_is(pci, CDM_CHECK)) { val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | PCIE_PL_CHK_REG_CHK_REG_START; dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); } val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); val &= ~PORT_LINK_FAST_LINK_MODE; val |= PORT_LINK_DLL_LINK_EN; dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); if (!pci->num_lanes) { dev_dbg(pci->dev, "Using h/w default number of lanes\n"); return; } /* Set the number of lanes */ val &= ~PORT_LINK_FAST_LINK_MODE; val &= ~PORT_LINK_MODE_MASK; switch (pci->num_lanes) { case 1: val |= PORT_LINK_MODE_1_LANES; break; case 2: val |= PORT_LINK_MODE_2_LANES; break; case 4: val |= PORT_LINK_MODE_4_LANES; break; case 8: val |= PORT_LINK_MODE_8_LANES; break; default: dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->num_lanes); return; } dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); /* Set link width speed control register */ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); val &= ~PORT_LOGIC_LINK_WIDTH_MASK; switch (pci->num_lanes) { case 1: val |= PORT_LOGIC_LINK_WIDTH_1_LANES; break; case 2: val |= PORT_LOGIC_LINK_WIDTH_2_LANES; break; case 4: val |= PORT_LOGIC_LINK_WIDTH_4_LANES; break; case 8: val |= PORT_LOGIC_LINK_WIDTH_8_LANES; break; } dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); }
linux-master
drivers/pci/controller/dwc/pcie-designware.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe host controller driver for Intel Gateway SoCs * * Copyright (c) 2019 Intel Corporation. */ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/gpio/consumer.h> #include <linux/iopoll.h> #include <linux/mod_devicetable.h> #include <linux/pci_regs.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/reset.h> #include "../../pci.h" #include "pcie-designware.h" #define PORT_AFR_N_FTS_GEN12_DFT (SZ_128 - 1) #define PORT_AFR_N_FTS_GEN3 180 #define PORT_AFR_N_FTS_GEN4 196 /* PCIe Application logic Registers */ #define PCIE_APP_CCR 0x10 #define PCIE_APP_CCR_LTSSM_ENABLE BIT(0) #define PCIE_APP_MSG_CR 0x30 #define PCIE_APP_MSG_XMT_PM_TURNOFF BIT(0) #define PCIE_APP_PMC 0x44 #define PCIE_APP_PMC_IN_L2 BIT(20) #define PCIE_APP_IRNEN 0xF4 #define PCIE_APP_IRNCR 0xF8 #define PCIE_APP_IRN_AER_REPORT BIT(0) #define PCIE_APP_IRN_PME BIT(2) #define PCIE_APP_IRN_RX_VDM_MSG BIT(4) #define PCIE_APP_IRN_PM_TO_ACK BIT(9) #define PCIE_APP_IRN_LINK_AUTO_BW_STAT BIT(11) #define PCIE_APP_IRN_BW_MGT BIT(12) #define PCIE_APP_IRN_INTA BIT(13) #define PCIE_APP_IRN_INTB BIT(14) #define PCIE_APP_IRN_INTC BIT(15) #define PCIE_APP_IRN_INTD BIT(16) #define PCIE_APP_IRN_MSG_LTR BIT(18) #define PCIE_APP_IRN_SYS_ERR_RC BIT(29) #define PCIE_APP_INTX_OFST 12 #define PCIE_APP_IRN_INT \ (PCIE_APP_IRN_AER_REPORT | PCIE_APP_IRN_PME | \ PCIE_APP_IRN_RX_VDM_MSG | PCIE_APP_IRN_SYS_ERR_RC | \ PCIE_APP_IRN_PM_TO_ACK | PCIE_APP_IRN_MSG_LTR | \ PCIE_APP_IRN_BW_MGT | PCIE_APP_IRN_LINK_AUTO_BW_STAT | \ PCIE_APP_IRN_INTA | PCIE_APP_IRN_INTB | \ PCIE_APP_IRN_INTC | PCIE_APP_IRN_INTD) #define BUS_IATU_OFFSET SZ_256M #define RESET_INTERVAL_MS 100 struct intel_pcie { struct dw_pcie pci; void __iomem *app_base; struct gpio_desc *reset_gpio; u32 rst_intrvl; struct clk *core_clk; struct reset_control *core_rst; struct phy *phy; }; static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val) { u32 old; old = readl(base + ofs); val = (old & ~mask) | (val & mask); if (val != old) writel(val, base + ofs); } static inline void pcie_app_wr(struct intel_pcie *pcie, u32 ofs, u32 val) { writel(val, pcie->app_base + ofs); } static void pcie_app_wr_mask(struct intel_pcie *pcie, u32 ofs, u32 mask, u32 val) { pcie_update_bits(pcie->app_base, ofs, mask, val); } static inline u32 pcie_rc_cfg_rd(struct intel_pcie *pcie, u32 ofs) { return dw_pcie_readl_dbi(&pcie->pci, ofs); } static inline void pcie_rc_cfg_wr(struct intel_pcie *pcie, u32 ofs, u32 val) { dw_pcie_writel_dbi(&pcie->pci, ofs, val); } static void pcie_rc_cfg_wr_mask(struct intel_pcie *pcie, u32 ofs, u32 mask, u32 val) { pcie_update_bits(pcie->pci.dbi_base, ofs, mask, val); } static void intel_pcie_ltssm_enable(struct intel_pcie *pcie) { pcie_app_wr_mask(pcie, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, PCIE_APP_CCR_LTSSM_ENABLE); } static void intel_pcie_ltssm_disable(struct intel_pcie *pcie) { pcie_app_wr_mask(pcie, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, 0); } static void intel_pcie_link_setup(struct intel_pcie *pcie) { u32 val; u8 offset = dw_pcie_find_capability(&pcie->pci, PCI_CAP_ID_EXP); val = pcie_rc_cfg_rd(pcie, offset + PCI_EXP_LNKCTL); val &= ~(PCI_EXP_LNKCTL_LD | PCI_EXP_LNKCTL_ASPMC); pcie_rc_cfg_wr(pcie, offset + PCI_EXP_LNKCTL, val); } static void intel_pcie_init_n_fts(struct dw_pcie *pci) { switch (pci->link_gen) { case 3: pci->n_fts[1] = PORT_AFR_N_FTS_GEN3; break; case 4: pci->n_fts[1] = PORT_AFR_N_FTS_GEN4; break; default: pci->n_fts[1] = PORT_AFR_N_FTS_GEN12_DFT; break; } pci->n_fts[0] = PORT_AFR_N_FTS_GEN12_DFT; } static int intel_pcie_ep_rst_init(struct intel_pcie *pcie) { struct device *dev = pcie->pci.dev; int ret; pcie->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(pcie->reset_gpio)) { ret = PTR_ERR(pcie->reset_gpio); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to request PCIe GPIO: %d\n", ret); return ret; } /* Make initial reset last for 100us */ usleep_range(100, 200); return 0; } static void intel_pcie_core_rst_assert(struct intel_pcie *pcie) { reset_control_assert(pcie->core_rst); } static void intel_pcie_core_rst_deassert(struct intel_pcie *pcie) { /* * One micro-second delay to make sure the reset pulse * wide enough so that core reset is clean. */ udelay(1); reset_control_deassert(pcie->core_rst); /* * Some SoC core reset also reset PHY, more delay needed * to make sure the reset process is done. */ usleep_range(1000, 2000); } static void intel_pcie_device_rst_assert(struct intel_pcie *pcie) { gpiod_set_value_cansleep(pcie->reset_gpio, 1); } static void intel_pcie_device_rst_deassert(struct intel_pcie *pcie) { msleep(pcie->rst_intrvl); gpiod_set_value_cansleep(pcie->reset_gpio, 0); } static void intel_pcie_core_irq_disable(struct intel_pcie *pcie) { pcie_app_wr(pcie, PCIE_APP_IRNEN, 0); pcie_app_wr(pcie, PCIE_APP_IRNCR, PCIE_APP_IRN_INT); } static int intel_pcie_get_resources(struct platform_device *pdev) { struct intel_pcie *pcie = platform_get_drvdata(pdev); struct dw_pcie *pci = &pcie->pci; struct device *dev = pci->dev; int ret; pcie->core_clk = devm_clk_get(dev, NULL); if (IS_ERR(pcie->core_clk)) { ret = PTR_ERR(pcie->core_clk); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get clks: %d\n", ret); return ret; } pcie->core_rst = devm_reset_control_get(dev, NULL); if (IS_ERR(pcie->core_rst)) { ret = PTR_ERR(pcie->core_rst); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get resets: %d\n", ret); return ret; } ret = device_property_read_u32(dev, "reset-assert-ms", &pcie->rst_intrvl); if (ret) pcie->rst_intrvl = RESET_INTERVAL_MS; pcie->app_base = devm_platform_ioremap_resource_byname(pdev, "app"); if (IS_ERR(pcie->app_base)) return PTR_ERR(pcie->app_base); pcie->phy = devm_phy_get(dev, "pcie"); if (IS_ERR(pcie->phy)) { ret = PTR_ERR(pcie->phy); if (ret != -EPROBE_DEFER) dev_err(dev, "Couldn't get pcie-phy: %d\n", ret); return ret; } return 0; } static int intel_pcie_wait_l2(struct intel_pcie *pcie) { u32 value; int ret; struct dw_pcie *pci = &pcie->pci; if (pci->link_gen < 3) return 0; /* Send PME_TURN_OFF message */ pcie_app_wr_mask(pcie, PCIE_APP_MSG_CR, PCIE_APP_MSG_XMT_PM_TURNOFF, PCIE_APP_MSG_XMT_PM_TURNOFF); /* Read PMC status and wait for falling into L2 link state */ ret = readl_poll_timeout(pcie->app_base + PCIE_APP_PMC, value, value & PCIE_APP_PMC_IN_L2, 20, jiffies_to_usecs(5 * HZ)); if (ret) dev_err(pcie->pci.dev, "PCIe link enter L2 timeout!\n"); return ret; } static void intel_pcie_turn_off(struct intel_pcie *pcie) { if (dw_pcie_link_up(&pcie->pci)) intel_pcie_wait_l2(pcie); /* Put endpoint device in reset state */ intel_pcie_device_rst_assert(pcie); pcie_rc_cfg_wr_mask(pcie, PCI_COMMAND, PCI_COMMAND_MEMORY, 0); } static int intel_pcie_host_setup(struct intel_pcie *pcie) { int ret; struct dw_pcie *pci = &pcie->pci; intel_pcie_core_rst_assert(pcie); intel_pcie_device_rst_assert(pcie); ret = phy_init(pcie->phy); if (ret) return ret; intel_pcie_core_rst_deassert(pcie); ret = clk_prepare_enable(pcie->core_clk); if (ret) { dev_err(pcie->pci.dev, "Core clock enable failed: %d\n", ret); goto clk_err; } pci->atu_base = pci->dbi_base + 0xC0000; intel_pcie_ltssm_disable(pcie); intel_pcie_link_setup(pcie); intel_pcie_init_n_fts(pci); ret = dw_pcie_setup_rc(&pci->pp); if (ret) goto app_init_err; dw_pcie_upconfig_setup(pci); intel_pcie_device_rst_deassert(pcie); intel_pcie_ltssm_enable(pcie); ret = dw_pcie_wait_for_link(pci); if (ret) goto app_init_err; /* Enable integrated interrupts */ pcie_app_wr_mask(pcie, PCIE_APP_IRNEN, PCIE_APP_IRN_INT, PCIE_APP_IRN_INT); return 0; app_init_err: clk_disable_unprepare(pcie->core_clk); clk_err: intel_pcie_core_rst_assert(pcie); phy_exit(pcie->phy); return ret; } static void __intel_pcie_remove(struct intel_pcie *pcie) { intel_pcie_core_irq_disable(pcie); intel_pcie_turn_off(pcie); clk_disable_unprepare(pcie->core_clk); intel_pcie_core_rst_assert(pcie); phy_exit(pcie->phy); } static void intel_pcie_remove(struct platform_device *pdev) { struct intel_pcie *pcie = platform_get_drvdata(pdev); struct dw_pcie_rp *pp = &pcie->pci.pp; dw_pcie_host_deinit(pp); __intel_pcie_remove(pcie); } static int intel_pcie_suspend_noirq(struct device *dev) { struct intel_pcie *pcie = dev_get_drvdata(dev); int ret; intel_pcie_core_irq_disable(pcie); ret = intel_pcie_wait_l2(pcie); if (ret) return ret; phy_exit(pcie->phy); clk_disable_unprepare(pcie->core_clk); return ret; } static int intel_pcie_resume_noirq(struct device *dev) { struct intel_pcie *pcie = dev_get_drvdata(dev); return intel_pcie_host_setup(pcie); } static int intel_pcie_rc_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct intel_pcie *pcie = dev_get_drvdata(pci->dev); return intel_pcie_host_setup(pcie); } static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr) { return cpu_addr + BUS_IATU_OFFSET; } static const struct dw_pcie_ops intel_pcie_ops = { .cpu_addr_fixup = intel_pcie_cpu_addr, }; static const struct dw_pcie_host_ops intel_pcie_dw_ops = { .host_init = intel_pcie_rc_init, }; static int intel_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct intel_pcie *pcie; struct dw_pcie_rp *pp; struct dw_pcie *pci; int ret; pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; platform_set_drvdata(pdev, pcie); pci = &pcie->pci; pci->dev = dev; pp = &pci->pp; ret = intel_pcie_get_resources(pdev); if (ret) return ret; ret = intel_pcie_ep_rst_init(pcie); if (ret) return ret; pci->ops = &intel_pcie_ops; pp->ops = &intel_pcie_dw_ops; ret = dw_pcie_host_init(pp); if (ret) { dev_err(dev, "Cannot initialize host\n"); return ret; } return 0; } static const struct dev_pm_ops intel_pcie_pm_ops = { NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq, intel_pcie_resume_noirq) }; static const struct of_device_id of_intel_pcie_match[] = { { .compatible = "intel,lgm-pcie" }, {} }; static struct platform_driver intel_pcie_driver = { .probe = intel_pcie_probe, .remove_new = intel_pcie_remove, .driver = { .name = "intel-gw-pcie", .of_match_table = of_intel_pcie_match, .pm = &intel_pcie_pm_ops, }, }; builtin_platform_driver(intel_pcie_driver);
linux-master
drivers/pci/controller/dwc/pcie-intel-gw.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe controller EP driver for Freescale Layerscape SoCs * * Copyright (C) 2018 NXP Semiconductor. * * Author: Xiaowei Bao <[email protected]> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/of_pci.h> #include <linux/of_platform.h> #include <linux/of_address.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/resource.h> #include "pcie-designware.h" #define PEX_PF0_CONFIG 0xC0014 #define PEX_PF0_CFG_READY BIT(0) /* PEX PFa PCIE PME and message interrupt registers*/ #define PEX_PF0_PME_MES_DR 0xC0020 #define PEX_PF0_PME_MES_DR_LUD BIT(7) #define PEX_PF0_PME_MES_DR_LDD BIT(9) #define PEX_PF0_PME_MES_DR_HRD BIT(10) #define PEX_PF0_PME_MES_IER 0xC0028 #define PEX_PF0_PME_MES_IER_LUDIE BIT(7) #define PEX_PF0_PME_MES_IER_LDDIE BIT(9) #define PEX_PF0_PME_MES_IER_HRDIE BIT(10) #define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev) struct ls_pcie_ep_drvdata { u32 func_offset; const struct dw_pcie_ep_ops *ops; const struct dw_pcie_ops *dw_pcie_ops; }; struct ls_pcie_ep { struct dw_pcie *pci; struct pci_epc_features *ls_epc; const struct ls_pcie_ep_drvdata *drvdata; int irq; u32 lnkcap; bool big_endian; }; static u32 ls_lut_readl(struct ls_pcie_ep *pcie, u32 offset) { struct dw_pcie *pci = pcie->pci; if (pcie->big_endian) return ioread32be(pci->dbi_base + offset); else return ioread32(pci->dbi_base + offset); } static void ls_lut_writel(struct ls_pcie_ep *pcie, u32 offset, u32 value) { struct dw_pcie *pci = pcie->pci; if (pcie->big_endian) iowrite32be(value, pci->dbi_base + offset); else iowrite32(value, pci->dbi_base + offset); } static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id) { struct ls_pcie_ep *pcie = dev_id; struct dw_pcie *pci = pcie->pci; u32 val, cfg; u8 offset; val = ls_lut_readl(pcie, PEX_PF0_PME_MES_DR); ls_lut_writel(pcie, PEX_PF0_PME_MES_DR, val); if (!val) return IRQ_NONE; if (val & PEX_PF0_PME_MES_DR_LUD) { offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); /* * The values of the Maximum Link Width and Supported Link * Speed from the Link Capabilities Register will be lost * during link down or hot reset. Restore initial value * that configured by the Reset Configuration Word (RCW). */ dw_pcie_dbi_ro_wr_en(pci); dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, pcie->lnkcap); dw_pcie_dbi_ro_wr_dis(pci); cfg = ls_lut_readl(pcie, PEX_PF0_CONFIG); cfg |= PEX_PF0_CFG_READY; ls_lut_writel(pcie, PEX_PF0_CONFIG, cfg); dw_pcie_ep_linkup(&pci->ep); dev_dbg(pci->dev, "Link up\n"); } else if (val & PEX_PF0_PME_MES_DR_LDD) { dev_dbg(pci->dev, "Link down\n"); pci_epc_linkdown(pci->ep.epc); } else if (val & PEX_PF0_PME_MES_DR_HRD) { dev_dbg(pci->dev, "Hot reset\n"); } return IRQ_HANDLED; } static int ls_pcie_ep_interrupt_init(struct ls_pcie_ep *pcie, struct platform_device *pdev) { u32 val; int ret; pcie->irq = platform_get_irq_byname(pdev, "pme"); if (pcie->irq < 0) return pcie->irq; ret = devm_request_irq(&pdev->dev, pcie->irq, ls_pcie_ep_event_handler, IRQF_SHARED, pdev->name, pcie); if (ret) { dev_err(&pdev->dev, "Can't register PCIe IRQ\n"); return ret; } /* Enable interrupts */ val = ls_lut_readl(pcie, PEX_PF0_PME_MES_IER); val |= PEX_PF0_PME_MES_IER_LDDIE | PEX_PF0_PME_MES_IER_HRDIE | PEX_PF0_PME_MES_IER_LUDIE; ls_lut_writel(pcie, PEX_PF0_PME_MES_IER, val); return 0; } static const struct pci_epc_features* ls_pcie_ep_get_features(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci); return pcie->ls_epc; } static void ls_pcie_ep_init(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci); struct dw_pcie_ep_func *ep_func; enum pci_barno bar; ep_func = dw_pcie_ep_get_func_from_ep(ep, 0); if (!ep_func) return; for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) dw_pcie_ep_reset_bar(pci, bar); pcie->ls_epc->msi_capable = ep_func->msi_cap ? true : false; pcie->ls_epc->msix_capable = ep_func->msix_cap ? true : false; } static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { case PCI_EPC_IRQ_LEGACY: return dw_pcie_ep_raise_legacy_irq(ep, func_no); case PCI_EPC_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); case PCI_EPC_IRQ_MSIX: return dw_pcie_ep_raise_msix_irq_doorbell(ep, func_no, interrupt_num); default: dev_err(pci->dev, "UNKNOWN IRQ type\n"); return -EINVAL; } } static unsigned int ls_pcie_ep_func_conf_select(struct dw_pcie_ep *ep, u8 func_no) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci); WARN_ON(func_no && !pcie->drvdata->func_offset); return pcie->drvdata->func_offset * func_no; } static const struct dw_pcie_ep_ops ls_pcie_ep_ops = { .ep_init = ls_pcie_ep_init, .raise_irq = ls_pcie_ep_raise_irq, .get_features = ls_pcie_ep_get_features, .func_conf_select = ls_pcie_ep_func_conf_select, }; static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = { .ops = &ls_pcie_ep_ops, }; static const struct ls_pcie_ep_drvdata ls2_ep_drvdata = { .func_offset = 0x20000, .ops = &ls_pcie_ep_ops, }; static const struct ls_pcie_ep_drvdata lx2_ep_drvdata = { .func_offset = 0x8000, .ops = &ls_pcie_ep_ops, }; static const struct of_device_id ls_pcie_ep_of_match[] = { { .compatible = "fsl,ls1028a-pcie-ep", .data = &ls1_ep_drvdata }, { .compatible = "fsl,ls1046a-pcie-ep", .data = &ls1_ep_drvdata }, { .compatible = "fsl,ls1088a-pcie-ep", .data = &ls2_ep_drvdata }, { .compatible = "fsl,ls2088a-pcie-ep", .data = &ls2_ep_drvdata }, { .compatible = "fsl,lx2160ar2-pcie-ep", .data = &lx2_ep_drvdata }, { }, }; static int __init ls_pcie_ep_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dw_pcie *pci; struct ls_pcie_ep *pcie; struct pci_epc_features *ls_epc; struct resource *dbi_base; u8 offset; int ret; pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); if (!pci) return -ENOMEM; ls_epc = devm_kzalloc(dev, sizeof(*ls_epc), GFP_KERNEL); if (!ls_epc) return -ENOMEM; pcie->drvdata = of_device_get_match_data(dev); pci->dev = dev; pci->ops = pcie->drvdata->dw_pcie_ops; ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4); ls_epc->linkup_notifier = true; pcie->pci = pci; pcie->ls_epc = ls_epc; dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); pci->ep.ops = &ls_pcie_ep_ops; pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian"); platform_set_drvdata(pdev, pcie); offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); pcie->lnkcap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); ret = dw_pcie_ep_init(&pci->ep); if (ret) return ret; return ls_pcie_ep_interrupt_init(pcie, pdev); } static struct platform_driver ls_pcie_ep_driver = { .driver = { .name = "layerscape-pcie-ep", .of_match_table = ls_pcie_ep_of_match, .suppress_bind_attrs = true, }, }; builtin_platform_driver_probe(ls_pcie_ep_driver, ls_pcie_ep_probe);
linux-master
drivers/pci/controller/dwc/pci-layerscape-ep.c
// SPDX-License-Identifier: GPL-2.0 /* * DWC PCIe RC driver for Toshiba Visconti ARM SoC * * Copyright (C) 2021 Toshiba Electronic Device & Storage Corporation * Copyright (C) 2021 TOSHIBA CORPORATION * * Nobuhiro Iwamatsu <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/of_platform.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/resource.h> #include <linux/types.h> #include "pcie-designware.h" #include "../../pci.h" struct visconti_pcie { struct dw_pcie pci; void __iomem *ulreg_base; void __iomem *smu_base; void __iomem *mpu_base; struct clk *refclk; struct clk *coreclk; struct clk *auxclk; }; #define PCIE_UL_REG_S_PCIE_MODE 0x00F4 #define PCIE_UL_REG_S_PCIE_MODE_EP 0x00 #define PCIE_UL_REG_S_PCIE_MODE_RC 0x04 #define PCIE_UL_REG_S_PERSTN_CTRL 0x00F8 #define PCIE_UL_IOM_PCIE_PERSTN_I_EN BIT(3) #define PCIE_UL_DIRECT_PERSTN_EN BIT(2) #define PCIE_UL_PERSTN_OUT BIT(1) #define PCIE_UL_DIRECT_PERSTN BIT(0) #define PCIE_UL_REG_S_PERSTN_CTRL_INIT (PCIE_UL_IOM_PCIE_PERSTN_I_EN | \ PCIE_UL_DIRECT_PERSTN_EN | \ PCIE_UL_DIRECT_PERSTN) #define PCIE_UL_REG_S_PHY_INIT_02 0x0104 #define PCIE_UL_PHY0_SRAM_EXT_LD_DONE BIT(0) #define PCIE_UL_REG_S_PHY_INIT_03 0x0108 #define PCIE_UL_PHY0_SRAM_INIT_DONE BIT(0) #define PCIE_UL_REG_S_INT_EVENT_MASK1 0x0138 #define PCIE_UL_CFG_PME_INT BIT(0) #define PCIE_UL_CFG_LINK_EQ_REQ_INT BIT(1) #define PCIE_UL_EDMA_INT0 BIT(2) #define PCIE_UL_EDMA_INT1 BIT(3) #define PCIE_UL_EDMA_INT2 BIT(4) #define PCIE_UL_EDMA_INT3 BIT(5) #define PCIE_UL_S_INT_EVENT_MASK1_ALL (PCIE_UL_CFG_PME_INT | \ PCIE_UL_CFG_LINK_EQ_REQ_INT | \ PCIE_UL_EDMA_INT0 | \ PCIE_UL_EDMA_INT1 | \ PCIE_UL_EDMA_INT2 | \ PCIE_UL_EDMA_INT3) #define PCIE_UL_REG_S_SB_MON 0x0198 #define PCIE_UL_REG_S_SIG_MON 0x019C #define PCIE_UL_CORE_RST_N_MON BIT(0) #define PCIE_UL_REG_V_SII_DBG_00 0x0844 #define PCIE_UL_REG_V_SII_GEN_CTRL_01 0x0860 #define PCIE_UL_APP_LTSSM_ENABLE BIT(0) #define PCIE_UL_REG_V_PHY_ST_00 0x0864 #define PCIE_UL_SMLH_LINK_UP BIT(0) #define PCIE_UL_REG_V_PHY_ST_02 0x0868 #define PCIE_UL_S_DETECT_ACT 0x01 #define PCIE_UL_S_L0 0x11 #define PISMU_CKON_PCIE 0x0038 #define PISMU_CKON_PCIE_AUX_CLK BIT(1) #define PISMU_CKON_PCIE_MSTR_ACLK BIT(0) #define PISMU_RSOFF_PCIE 0x0538 #define PISMU_RSOFF_PCIE_ULREG_RST_N BIT(1) #define PISMU_RSOFF_PCIE_PWR_UP_RST_N BIT(0) #define PCIE_MPU_REG_MP_EN 0x0 #define MPU_MP_EN_DISABLE BIT(0) /* Access registers in PCIe ulreg */ static void visconti_ulreg_writel(struct visconti_pcie *pcie, u32 val, u32 reg) { writel_relaxed(val, pcie->ulreg_base + reg); } static u32 visconti_ulreg_readl(struct visconti_pcie *pcie, u32 reg) { return readl_relaxed(pcie->ulreg_base + reg); } /* Access registers in PCIe smu */ static void visconti_smu_writel(struct visconti_pcie *pcie, u32 val, u32 reg) { writel_relaxed(val, pcie->smu_base + reg); } /* Access registers in PCIe mpu */ static void visconti_mpu_writel(struct visconti_pcie *pcie, u32 val, u32 reg) { writel_relaxed(val, pcie->mpu_base + reg); } static u32 visconti_mpu_readl(struct visconti_pcie *pcie, u32 reg) { return readl_relaxed(pcie->mpu_base + reg); } static int visconti_pcie_link_up(struct dw_pcie *pci) { struct visconti_pcie *pcie = dev_get_drvdata(pci->dev); void __iomem *addr = pcie->ulreg_base; u32 val = readl_relaxed(addr + PCIE_UL_REG_V_PHY_ST_02); return !!(val & PCIE_UL_S_L0); } static int visconti_pcie_start_link(struct dw_pcie *pci) { struct visconti_pcie *pcie = dev_get_drvdata(pci->dev); void __iomem *addr = pcie->ulreg_base; u32 val; int ret; visconti_ulreg_writel(pcie, PCIE_UL_APP_LTSSM_ENABLE, PCIE_UL_REG_V_SII_GEN_CTRL_01); ret = readl_relaxed_poll_timeout(addr + PCIE_UL_REG_V_PHY_ST_02, val, (val & PCIE_UL_S_L0), 90000, 100000); if (ret) return ret; visconti_ulreg_writel(pcie, PCIE_UL_S_INT_EVENT_MASK1_ALL, PCIE_UL_REG_S_INT_EVENT_MASK1); if (dw_pcie_link_up(pci)) { val = visconti_mpu_readl(pcie, PCIE_MPU_REG_MP_EN); visconti_mpu_writel(pcie, val & ~MPU_MP_EN_DISABLE, PCIE_MPU_REG_MP_EN); } return 0; } static void visconti_pcie_stop_link(struct dw_pcie *pci) { struct visconti_pcie *pcie = dev_get_drvdata(pci->dev); u32 val; val = visconti_ulreg_readl(pcie, PCIE_UL_REG_V_SII_GEN_CTRL_01); val &= ~PCIE_UL_APP_LTSSM_ENABLE; visconti_ulreg_writel(pcie, val, PCIE_UL_REG_V_SII_GEN_CTRL_01); val = visconti_mpu_readl(pcie, PCIE_MPU_REG_MP_EN); visconti_mpu_writel(pcie, val | MPU_MP_EN_DISABLE, PCIE_MPU_REG_MP_EN); } /* * In this SoC specification, the CPU bus outputs the offset value from * 0x40000000 to the PCIe bus, so 0x40000000 is subtracted from the CPU * bus address. This 0x40000000 is also based on io_base from DT. */ static u64 visconti_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr) { struct dw_pcie_rp *pp = &pci->pp; return cpu_addr & ~pp->io_base; } static const struct dw_pcie_ops dw_pcie_ops = { .cpu_addr_fixup = visconti_pcie_cpu_addr_fixup, .link_up = visconti_pcie_link_up, .start_link = visconti_pcie_start_link, .stop_link = visconti_pcie_stop_link, }; static int visconti_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct visconti_pcie *pcie = dev_get_drvdata(pci->dev); void __iomem *addr; int err; u32 val; visconti_smu_writel(pcie, PISMU_CKON_PCIE_AUX_CLK | PISMU_CKON_PCIE_MSTR_ACLK, PISMU_CKON_PCIE); ndelay(250); visconti_smu_writel(pcie, PISMU_RSOFF_PCIE_ULREG_RST_N, PISMU_RSOFF_PCIE); visconti_ulreg_writel(pcie, PCIE_UL_REG_S_PCIE_MODE_RC, PCIE_UL_REG_S_PCIE_MODE); val = PCIE_UL_REG_S_PERSTN_CTRL_INIT; visconti_ulreg_writel(pcie, val, PCIE_UL_REG_S_PERSTN_CTRL); udelay(100); val |= PCIE_UL_PERSTN_OUT; visconti_ulreg_writel(pcie, val, PCIE_UL_REG_S_PERSTN_CTRL); udelay(100); visconti_smu_writel(pcie, PISMU_RSOFF_PCIE_PWR_UP_RST_N, PISMU_RSOFF_PCIE); addr = pcie->ulreg_base + PCIE_UL_REG_S_PHY_INIT_03; err = readl_relaxed_poll_timeout(addr, val, (val & PCIE_UL_PHY0_SRAM_INIT_DONE), 100, 1000); if (err) return err; visconti_ulreg_writel(pcie, PCIE_UL_PHY0_SRAM_EXT_LD_DONE, PCIE_UL_REG_S_PHY_INIT_02); addr = pcie->ulreg_base + PCIE_UL_REG_S_SIG_MON; return readl_relaxed_poll_timeout(addr, val, (val & PCIE_UL_CORE_RST_N_MON), 100, 1000); } static const struct dw_pcie_host_ops visconti_pcie_host_ops = { .host_init = visconti_pcie_host_init, }; static int visconti_get_resources(struct platform_device *pdev, struct visconti_pcie *pcie) { struct device *dev = &pdev->dev; pcie->ulreg_base = devm_platform_ioremap_resource_byname(pdev, "ulreg"); if (IS_ERR(pcie->ulreg_base)) return PTR_ERR(pcie->ulreg_base); pcie->smu_base = devm_platform_ioremap_resource_byname(pdev, "smu"); if (IS_ERR(pcie->smu_base)) return PTR_ERR(pcie->smu_base); pcie->mpu_base = devm_platform_ioremap_resource_byname(pdev, "mpu"); if (IS_ERR(pcie->mpu_base)) return PTR_ERR(pcie->mpu_base); pcie->refclk = devm_clk_get(dev, "ref"); if (IS_ERR(pcie->refclk)) return dev_err_probe(dev, PTR_ERR(pcie->refclk), "Failed to get ref clock\n"); pcie->coreclk = devm_clk_get(dev, "core"); if (IS_ERR(pcie->coreclk)) return dev_err_probe(dev, PTR_ERR(pcie->coreclk), "Failed to get core clock\n"); pcie->auxclk = devm_clk_get(dev, "aux"); if (IS_ERR(pcie->auxclk)) return dev_err_probe(dev, PTR_ERR(pcie->auxclk), "Failed to get aux clock\n"); return 0; } static int visconti_add_pcie_port(struct visconti_pcie *pcie, struct platform_device *pdev) { struct dw_pcie *pci = &pcie->pci; struct dw_pcie_rp *pp = &pci->pp; pp->irq = platform_get_irq_byname(pdev, "intr"); if (pp->irq < 0) return pp->irq; pp->ops = &visconti_pcie_host_ops; return dw_pcie_host_init(pp); } static int visconti_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct visconti_pcie *pcie; struct dw_pcie *pci; int ret; pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; pci = &pcie->pci; pci->dev = dev; pci->ops = &dw_pcie_ops; ret = visconti_get_resources(pdev, pcie); if (ret) return ret; platform_set_drvdata(pdev, pcie); return visconti_add_pcie_port(pcie, pdev); } static const struct of_device_id visconti_pcie_match[] = { { .compatible = "toshiba,visconti-pcie" }, {}, }; static struct platform_driver visconti_pcie_driver = { .probe = visconti_pcie_probe, .driver = { .name = "visconti-pcie", .of_match_table = visconti_pcie_match, .suppress_bind_attrs = true, }, }; builtin_platform_driver(visconti_pcie_driver);
linux-master
drivers/pci/controller/dwc/pcie-visconti.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2021 BAIKAL ELECTRONICS, JSC * * Authors: * Vadim Vlasov <[email protected]> * Serge Semin <[email protected]> * * Baikal-T1 PCIe controller driver */ #include <linux/bitfield.h> #include <linux/bits.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/reset.h> #include <linux/types.h> #include "pcie-designware.h" /* Baikal-T1 System CCU control registers */ #define BT1_CCU_PCIE_CLKC 0x140 #define BT1_CCU_PCIE_REQ_PCS_CLK BIT(16) #define BT1_CCU_PCIE_REQ_MAC_CLK BIT(17) #define BT1_CCU_PCIE_REQ_PIPE_CLK BIT(18) #define BT1_CCU_PCIE_RSTC 0x144 #define BT1_CCU_PCIE_REQ_LINK_RST BIT(13) #define BT1_CCU_PCIE_REQ_SMLH_RST BIT(14) #define BT1_CCU_PCIE_REQ_PHY_RST BIT(16) #define BT1_CCU_PCIE_REQ_CORE_RST BIT(24) #define BT1_CCU_PCIE_REQ_STICKY_RST BIT(26) #define BT1_CCU_PCIE_REQ_NSTICKY_RST BIT(27) #define BT1_CCU_PCIE_PMSC 0x148 #define BT1_CCU_PCIE_LTSSM_STATE_MASK GENMASK(5, 0) #define BT1_CCU_PCIE_LTSSM_DET_QUIET 0x00 #define BT1_CCU_PCIE_LTSSM_DET_ACT 0x01 #define BT1_CCU_PCIE_LTSSM_POLL_ACT 0x02 #define BT1_CCU_PCIE_LTSSM_POLL_COMP 0x03 #define BT1_CCU_PCIE_LTSSM_POLL_CONF 0x04 #define BT1_CCU_PCIE_LTSSM_PRE_DET_QUIET 0x05 #define BT1_CCU_PCIE_LTSSM_DET_WAIT 0x06 #define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_START 0x07 #define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_ACEPT 0x08 #define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_WAIT 0x09 #define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_ACEPT 0x0a #define BT1_CCU_PCIE_LTSSM_CFG_COMPLETE 0x0b #define BT1_CCU_PCIE_LTSSM_CFG_IDLE 0x0c #define BT1_CCU_PCIE_LTSSM_RCVR_LOCK 0x0d #define BT1_CCU_PCIE_LTSSM_RCVR_SPEED 0x0e #define BT1_CCU_PCIE_LTSSM_RCVR_RCVRCFG 0x0f #define BT1_CCU_PCIE_LTSSM_RCVR_IDLE 0x10 #define BT1_CCU_PCIE_LTSSM_L0 0x11 #define BT1_CCU_PCIE_LTSSM_L0S 0x12 #define BT1_CCU_PCIE_LTSSM_L123_SEND_IDLE 0x13 #define BT1_CCU_PCIE_LTSSM_L1_IDLE 0x14 #define BT1_CCU_PCIE_LTSSM_L2_IDLE 0x15 #define BT1_CCU_PCIE_LTSSM_L2_WAKE 0x16 #define BT1_CCU_PCIE_LTSSM_DIS_ENTRY 0x17 #define BT1_CCU_PCIE_LTSSM_DIS_IDLE 0x18 #define BT1_CCU_PCIE_LTSSM_DISABLE 0x19 #define BT1_CCU_PCIE_LTSSM_LPBK_ENTRY 0x1a #define BT1_CCU_PCIE_LTSSM_LPBK_ACTIVE 0x1b #define BT1_CCU_PCIE_LTSSM_LPBK_EXIT 0x1c #define BT1_CCU_PCIE_LTSSM_LPBK_EXIT_TOUT 0x1d #define BT1_CCU_PCIE_LTSSM_HOT_RST_ENTRY 0x1e #define BT1_CCU_PCIE_LTSSM_HOT_RST 0x1f #define BT1_CCU_PCIE_LTSSM_RCVR_EQ0 0x20 #define BT1_CCU_PCIE_LTSSM_RCVR_EQ1 0x21 #define BT1_CCU_PCIE_LTSSM_RCVR_EQ2 0x22 #define BT1_CCU_PCIE_LTSSM_RCVR_EQ3 0x23 #define BT1_CCU_PCIE_SMLH_LINKUP BIT(6) #define BT1_CCU_PCIE_RDLH_LINKUP BIT(7) #define BT1_CCU_PCIE_PM_LINKSTATE_L0S BIT(8) #define BT1_CCU_PCIE_PM_LINKSTATE_L1 BIT(9) #define BT1_CCU_PCIE_PM_LINKSTATE_L2 BIT(10) #define BT1_CCU_PCIE_L1_PENDING BIT(12) #define BT1_CCU_PCIE_REQ_EXIT_L1 BIT(14) #define BT1_CCU_PCIE_LTSSM_RCVR_EQ BIT(15) #define BT1_CCU_PCIE_PM_DSTAT_MASK GENMASK(18, 16) #define BT1_CCU_PCIE_PM_PME_EN BIT(20) #define BT1_CCU_PCIE_PM_PME_STATUS BIT(21) #define BT1_CCU_PCIE_AUX_PM_EN BIT(22) #define BT1_CCU_PCIE_AUX_PWR_DET BIT(23) #define BT1_CCU_PCIE_WAKE_DET BIT(24) #define BT1_CCU_PCIE_TURNOFF_REQ BIT(30) #define BT1_CCU_PCIE_TURNOFF_ACK BIT(31) #define BT1_CCU_PCIE_GENC 0x14c #define BT1_CCU_PCIE_LTSSM_EN BIT(1) #define BT1_CCU_PCIE_DBI2_MODE BIT(2) #define BT1_CCU_PCIE_MGMT_EN BIT(3) #define BT1_CCU_PCIE_RXLANE_FLIP_EN BIT(16) #define BT1_CCU_PCIE_TXLANE_FLIP_EN BIT(17) #define BT1_CCU_PCIE_SLV_XFER_PEND BIT(24) #define BT1_CCU_PCIE_RCV_XFER_PEND BIT(25) #define BT1_CCU_PCIE_DBI_XFER_PEND BIT(26) #define BT1_CCU_PCIE_DMA_XFER_PEND BIT(27) #define BT1_CCU_PCIE_LTSSM_LINKUP(_pmsc) \ ({ \ int __state = FIELD_GET(BT1_CCU_PCIE_LTSSM_STATE_MASK, _pmsc); \ __state >= BT1_CCU_PCIE_LTSSM_L0 && __state <= BT1_CCU_PCIE_LTSSM_L2_WAKE; \ }) /* Baikal-T1 PCIe specific control registers */ #define BT1_PCIE_AXI2MGM_LANENUM 0xd04 #define BT1_PCIE_AXI2MGM_LANESEL_MASK GENMASK(3, 0) #define BT1_PCIE_AXI2MGM_ADDRCTL 0xd08 #define BT1_PCIE_AXI2MGM_PHYREG_ADDR_MASK GENMASK(20, 0) #define BT1_PCIE_AXI2MGM_READ_FLAG BIT(29) #define BT1_PCIE_AXI2MGM_DONE BIT(30) #define BT1_PCIE_AXI2MGM_BUSY BIT(31) #define BT1_PCIE_AXI2MGM_WRITEDATA 0xd0c #define BT1_PCIE_AXI2MGM_WDATA GENMASK(15, 0) #define BT1_PCIE_AXI2MGM_READDATA 0xd10 #define BT1_PCIE_AXI2MGM_RDATA GENMASK(15, 0) /* Generic Baikal-T1 PCIe interface resources */ #define BT1_PCIE_NUM_APP_CLKS ARRAY_SIZE(bt1_pcie_app_clks) #define BT1_PCIE_NUM_CORE_CLKS ARRAY_SIZE(bt1_pcie_core_clks) #define BT1_PCIE_NUM_APP_RSTS ARRAY_SIZE(bt1_pcie_app_rsts) #define BT1_PCIE_NUM_CORE_RSTS ARRAY_SIZE(bt1_pcie_core_rsts) /* PCIe bus setup delays and timeouts */ #define BT1_PCIE_RST_DELAY_MS 100 #define BT1_PCIE_RUN_DELAY_US 100 #define BT1_PCIE_REQ_DELAY_US 1 #define BT1_PCIE_REQ_TIMEOUT_US 1000 #define BT1_PCIE_LNK_DELAY_US 1000 #define BT1_PCIE_LNK_TIMEOUT_US 1000000 static const enum dw_pcie_app_clk bt1_pcie_app_clks[] = { DW_PCIE_DBI_CLK, DW_PCIE_MSTR_CLK, DW_PCIE_SLV_CLK, }; static const enum dw_pcie_core_clk bt1_pcie_core_clks[] = { DW_PCIE_REF_CLK, }; static const enum dw_pcie_app_rst bt1_pcie_app_rsts[] = { DW_PCIE_MSTR_RST, DW_PCIE_SLV_RST, }; static const enum dw_pcie_core_rst bt1_pcie_core_rsts[] = { DW_PCIE_NON_STICKY_RST, DW_PCIE_STICKY_RST, DW_PCIE_CORE_RST, DW_PCIE_PIPE_RST, DW_PCIE_PHY_RST, DW_PCIE_HOT_RST, DW_PCIE_PWR_RST, }; struct bt1_pcie { struct dw_pcie dw; struct platform_device *pdev; struct regmap *sys_regs; }; #define to_bt1_pcie(_dw) container_of(_dw, struct bt1_pcie, dw) /* * Baikal-T1 MMIO space must be read/written by the dword-aligned * instructions. Note the methods are optimized to have the dword operations * performed with minimum overhead as the most frequently used ones. */ static int bt1_pcie_read_mmio(void __iomem *addr, int size, u32 *val) { unsigned int ofs = (uintptr_t)addr & 0x3; if (!IS_ALIGNED((uintptr_t)addr, size)) return -EINVAL; *val = readl(addr - ofs) >> ofs * BITS_PER_BYTE; if (size == 4) { return 0; } else if (size == 2) { *val &= 0xffff; return 0; } else if (size == 1) { *val &= 0xff; return 0; } return -EINVAL; } static int bt1_pcie_write_mmio(void __iomem *addr, int size, u32 val) { unsigned int ofs = (uintptr_t)addr & 0x3; u32 tmp, mask; if (!IS_ALIGNED((uintptr_t)addr, size)) return -EINVAL; if (size == 4) { writel(val, addr); return 0; } else if (size == 2 || size == 1) { mask = GENMASK(size * BITS_PER_BYTE - 1, 0); tmp = readl(addr - ofs) & ~(mask << ofs * BITS_PER_BYTE); tmp |= (val & mask) << ofs * BITS_PER_BYTE; writel(tmp, addr - ofs); return 0; } return -EINVAL; } static u32 bt1_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, size_t size) { int ret; u32 val; ret = bt1_pcie_read_mmio(base + reg, size, &val); if (ret) { dev_err(pci->dev, "Read DBI address failed\n"); return ~0U; } return val; } static void bt1_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, size_t size, u32 val) { int ret; ret = bt1_pcie_write_mmio(base + reg, size, val); if (ret) dev_err(pci->dev, "Write DBI address failed\n"); } static void bt1_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg, size_t size, u32 val) { struct bt1_pcie *btpci = to_bt1_pcie(pci); int ret; regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC, BT1_CCU_PCIE_DBI2_MODE, BT1_CCU_PCIE_DBI2_MODE); ret = bt1_pcie_write_mmio(base + reg, size, val); if (ret) dev_err(pci->dev, "Write DBI2 address failed\n"); regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC, BT1_CCU_PCIE_DBI2_MODE, 0); } static int bt1_pcie_start_link(struct dw_pcie *pci) { struct bt1_pcie *btpci = to_bt1_pcie(pci); u32 val; int ret; /* * Enable LTSSM and make sure it was able to establish both PHY and * data links. This procedure shall work fine to reach 2.5 GT/s speed. */ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC, BT1_CCU_PCIE_LTSSM_EN, BT1_CCU_PCIE_LTSSM_EN); ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val, (val & BT1_CCU_PCIE_SMLH_LINKUP), BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US); if (ret) { dev_err(pci->dev, "LTSSM failed to set PHY link up\n"); return ret; } ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val, (val & BT1_CCU_PCIE_RDLH_LINKUP), BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US); if (ret) { dev_err(pci->dev, "LTSSM failed to set data link up\n"); return ret; } /* * Activate direct speed change after the link is established in an * attempt to reach a higher bus performance (up to Gen.3 - 8.0 GT/s). * This is required at least to get 8.0 GT/s speed. */ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); val |= PORT_LOGIC_SPEED_CHANGE; dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val, BT1_CCU_PCIE_LTSSM_LINKUP(val), BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US); if (ret) dev_err(pci->dev, "LTSSM failed to get into L0 state\n"); return ret; } static void bt1_pcie_stop_link(struct dw_pcie *pci) { struct bt1_pcie *btpci = to_bt1_pcie(pci); regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC, BT1_CCU_PCIE_LTSSM_EN, 0); } static const struct dw_pcie_ops bt1_pcie_ops = { .read_dbi = bt1_pcie_read_dbi, .write_dbi = bt1_pcie_write_dbi, .write_dbi2 = bt1_pcie_write_dbi2, .start_link = bt1_pcie_start_link, .stop_link = bt1_pcie_stop_link, }; static struct pci_ops bt1_pci_ops = { .map_bus = dw_pcie_own_conf_map_bus, .read = pci_generic_config_read32, .write = pci_generic_config_write32, }; static int bt1_pcie_get_resources(struct bt1_pcie *btpci) { struct device *dev = btpci->dw.dev; int i; /* DBI access is supposed to be performed by the dword-aligned IOs */ btpci->dw.pp.bridge->ops = &bt1_pci_ops; /* These CSRs are in MMIO so we won't check the regmap-methods status */ btpci->sys_regs = syscon_regmap_lookup_by_phandle(dev->of_node, "baikal,bt1-syscon"); if (IS_ERR(btpci->sys_regs)) return dev_err_probe(dev, PTR_ERR(btpci->sys_regs), "Failed to get syscon\n"); /* Make sure all the required resources have been specified */ for (i = 0; i < BT1_PCIE_NUM_APP_CLKS; i++) { if (!btpci->dw.app_clks[bt1_pcie_app_clks[i]].clk) { dev_err(dev, "App clocks set is incomplete\n"); return -ENOENT; } } for (i = 0; i < BT1_PCIE_NUM_CORE_CLKS; i++) { if (!btpci->dw.core_clks[bt1_pcie_core_clks[i]].clk) { dev_err(dev, "Core clocks set is incomplete\n"); return -ENOENT; } } for (i = 0; i < BT1_PCIE_NUM_APP_RSTS; i++) { if (!btpci->dw.app_rsts[bt1_pcie_app_rsts[i]].rstc) { dev_err(dev, "App resets set is incomplete\n"); return -ENOENT; } } for (i = 0; i < BT1_PCIE_NUM_CORE_RSTS; i++) { if (!btpci->dw.core_rsts[bt1_pcie_core_rsts[i]].rstc) { dev_err(dev, "Core resets set is incomplete\n"); return -ENOENT; } } return 0; } static void bt1_pcie_full_stop_bus(struct bt1_pcie *btpci, bool init) { struct device *dev = btpci->dw.dev; struct dw_pcie *pci = &btpci->dw; int ret; /* Disable LTSSM for sure */ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC, BT1_CCU_PCIE_LTSSM_EN, 0); /* * Application reset controls are trigger-based so assert the core * resets only. */ ret = reset_control_bulk_assert(DW_PCIE_NUM_CORE_RSTS, pci->core_rsts); if (ret) dev_err(dev, "Failed to assert core resets\n"); /* * Clocks are disabled by default at least in accordance with the clk * enable counter value on init stage. */ if (!init) { clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks); clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks); } /* The peripheral devices are unavailable anyway so reset them too */ gpiod_set_value_cansleep(pci->pe_rst, 1); /* Make sure all the resets are settled */ msleep(BT1_PCIE_RST_DELAY_MS); } /* * Implements the cold reset procedure in accordance with the reference manual * and available PM signals. */ static int bt1_pcie_cold_start_bus(struct bt1_pcie *btpci) { struct device *dev = btpci->dw.dev; struct dw_pcie *pci = &btpci->dw; u32 val; int ret; /* First get out of the Power/Hot reset state */ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PWR_RST].rstc); if (ret) { dev_err(dev, "Failed to deassert PHY reset\n"); return ret; } ret = reset_control_deassert(pci->core_rsts[DW_PCIE_HOT_RST].rstc); if (ret) { dev_err(dev, "Failed to deassert hot reset\n"); goto err_assert_pwr_rst; } /* Wait for the PM-core to stop requesting the PHY reset */ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val, !(val & BT1_CCU_PCIE_REQ_PHY_RST), BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US); if (ret) { dev_err(dev, "Timed out waiting for PM to stop PHY resetting\n"); goto err_assert_hot_rst; } ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PHY_RST].rstc); if (ret) { dev_err(dev, "Failed to deassert PHY reset\n"); goto err_assert_hot_rst; } /* Clocks can be now enabled, but the ref one is crucial at this stage */ ret = clk_bulk_prepare_enable(DW_PCIE_NUM_APP_CLKS, pci->app_clks); if (ret) { dev_err(dev, "Failed to enable app clocks\n"); goto err_assert_phy_rst; } ret = clk_bulk_prepare_enable(DW_PCIE_NUM_CORE_CLKS, pci->core_clks); if (ret) { dev_err(dev, "Failed to enable ref clocks\n"); goto err_disable_app_clk; } /* Wait for the PM to stop requesting the controller core reset */ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val, !(val & BT1_CCU_PCIE_REQ_CORE_RST), BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US); if (ret) { dev_err(dev, "Timed out waiting for PM to stop core resetting\n"); goto err_disable_core_clk; } /* PCS-PIPE interface and controller core can be now activated */ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc); if (ret) { dev_err(dev, "Failed to deassert PIPE reset\n"); goto err_disable_core_clk; } ret = reset_control_deassert(pci->core_rsts[DW_PCIE_CORE_RST].rstc); if (ret) { dev_err(dev, "Failed to deassert core reset\n"); goto err_assert_pipe_rst; } /* It's recommended to reset the core and application logic together */ ret = reset_control_bulk_reset(DW_PCIE_NUM_APP_RSTS, pci->app_rsts); if (ret) { dev_err(dev, "Failed to reset app domain\n"); goto err_assert_core_rst; } /* Sticky/Non-sticky CSR flags can be now unreset too */ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc); if (ret) { dev_err(dev, "Failed to deassert sticky reset\n"); goto err_assert_core_rst; } ret = reset_control_deassert(pci->core_rsts[DW_PCIE_NON_STICKY_RST].rstc); if (ret) { dev_err(dev, "Failed to deassert non-sticky reset\n"); goto err_assert_sticky_rst; } /* Activate the PCIe bus peripheral devices */ gpiod_set_value_cansleep(pci->pe_rst, 0); /* Make sure the state is settled (LTSSM is still disabled though) */ usleep_range(BT1_PCIE_RUN_DELAY_US, BT1_PCIE_RUN_DELAY_US + 100); return 0; err_assert_sticky_rst: reset_control_assert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc); err_assert_core_rst: reset_control_assert(pci->core_rsts[DW_PCIE_CORE_RST].rstc); err_assert_pipe_rst: reset_control_assert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc); err_disable_core_clk: clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks); err_disable_app_clk: clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks); err_assert_phy_rst: reset_control_assert(pci->core_rsts[DW_PCIE_PHY_RST].rstc); err_assert_hot_rst: reset_control_assert(pci->core_rsts[DW_PCIE_HOT_RST].rstc); err_assert_pwr_rst: reset_control_assert(pci->core_rsts[DW_PCIE_PWR_RST].rstc); return ret; } static int bt1_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct bt1_pcie *btpci = to_bt1_pcie(pci); int ret; ret = bt1_pcie_get_resources(btpci); if (ret) return ret; bt1_pcie_full_stop_bus(btpci, true); return bt1_pcie_cold_start_bus(btpci); } static void bt1_pcie_host_deinit(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct bt1_pcie *btpci = to_bt1_pcie(pci); bt1_pcie_full_stop_bus(btpci, false); } static const struct dw_pcie_host_ops bt1_pcie_host_ops = { .host_init = bt1_pcie_host_init, .host_deinit = bt1_pcie_host_deinit, }; static struct bt1_pcie *bt1_pcie_create_data(struct platform_device *pdev) { struct bt1_pcie *btpci; btpci = devm_kzalloc(&pdev->dev, sizeof(*btpci), GFP_KERNEL); if (!btpci) return ERR_PTR(-ENOMEM); btpci->pdev = pdev; platform_set_drvdata(pdev, btpci); return btpci; } static int bt1_pcie_add_port(struct bt1_pcie *btpci) { struct device *dev = &btpci->pdev->dev; int ret; ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (ret) return ret; btpci->dw.version = DW_PCIE_VER_460A; btpci->dw.dev = dev; btpci->dw.ops = &bt1_pcie_ops; btpci->dw.pp.num_vectors = MAX_MSI_IRQS; btpci->dw.pp.ops = &bt1_pcie_host_ops; dw_pcie_cap_set(&btpci->dw, REQ_RES); ret = dw_pcie_host_init(&btpci->dw.pp); return dev_err_probe(dev, ret, "Failed to initialize DWC PCIe host\n"); } static void bt1_pcie_del_port(struct bt1_pcie *btpci) { dw_pcie_host_deinit(&btpci->dw.pp); } static int bt1_pcie_probe(struct platform_device *pdev) { struct bt1_pcie *btpci; btpci = bt1_pcie_create_data(pdev); if (IS_ERR(btpci)) return PTR_ERR(btpci); return bt1_pcie_add_port(btpci); } static void bt1_pcie_remove(struct platform_device *pdev) { struct bt1_pcie *btpci = platform_get_drvdata(pdev); bt1_pcie_del_port(btpci); } static const struct of_device_id bt1_pcie_of_match[] = { { .compatible = "baikal,bt1-pcie" }, {}, }; MODULE_DEVICE_TABLE(of, bt1_pcie_of_match); static struct platform_driver bt1_pcie_driver = { .probe = bt1_pcie_probe, .remove_new = bt1_pcie_remove, .driver = { .name = "bt1-pcie", .of_match_table = bt1_pcie_of_match, }, }; module_platform_driver(bt1_pcie_driver); MODULE_AUTHOR("Serge Semin <[email protected]>"); MODULE_DESCRIPTION("Baikal-T1 PCIe driver"); MODULE_LICENSE("GPL");
linux-master
drivers/pci/controller/dwc/pcie-bt1.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe endpoint controller driver for UniPhier SoCs * Copyright 2018 Socionext Inc. * Author: Kunihiko Hayashi <[email protected]> */ #include <linux/bitops.h> #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/iopoll.h> #include <linux/of.h> #include <linux/pci.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/reset.h> #include "pcie-designware.h" /* Link Glue registers */ #define PCL_RSTCTRL0 0x0010 #define PCL_RSTCTRL_AXI_REG BIT(3) #define PCL_RSTCTRL_AXI_SLAVE BIT(2) #define PCL_RSTCTRL_AXI_MASTER BIT(1) #define PCL_RSTCTRL_PIPE3 BIT(0) #define PCL_RSTCTRL1 0x0020 #define PCL_RSTCTRL_PERST BIT(0) #define PCL_RSTCTRL2 0x0024 #define PCL_RSTCTRL_PHY_RESET BIT(0) #define PCL_PINCTRL0 0x002c #define PCL_PERST_PLDN_REGEN BIT(12) #define PCL_PERST_NOE_REGEN BIT(11) #define PCL_PERST_OUT_REGEN BIT(8) #define PCL_PERST_PLDN_REGVAL BIT(4) #define PCL_PERST_NOE_REGVAL BIT(3) #define PCL_PERST_OUT_REGVAL BIT(0) #define PCL_PIPEMON 0x0044 #define PCL_PCLK_ALIVE BIT(15) #define PCL_MODE 0x8000 #define PCL_MODE_REGEN BIT(8) #define PCL_MODE_REGVAL BIT(0) #define PCL_APP_CLK_CTRL 0x8004 #define PCL_APP_CLK_REQ BIT(0) #define PCL_APP_READY_CTRL 0x8008 #define PCL_APP_LTSSM_ENABLE BIT(0) #define PCL_APP_MSI0 0x8040 #define PCL_APP_VEN_MSI_TC_MASK GENMASK(10, 8) #define PCL_APP_VEN_MSI_VECTOR_MASK GENMASK(4, 0) #define PCL_APP_MSI1 0x8044 #define PCL_APP_MSI_REQ BIT(0) #define PCL_APP_INTX 0x8074 #define PCL_APP_INTX_SYS_INT BIT(0) #define PCL_APP_PM0 0x8078 #define PCL_SYS_AUX_PWR_DET BIT(8) /* assertion time of INTx in usec */ #define PCL_INTX_WIDTH_USEC 30 struct uniphier_pcie_ep_priv { void __iomem *base; struct dw_pcie pci; struct clk *clk, *clk_gio; struct reset_control *rst, *rst_gio; struct phy *phy; const struct uniphier_pcie_ep_soc_data *data; }; struct uniphier_pcie_ep_soc_data { bool has_gio; void (*init)(struct uniphier_pcie_ep_priv *priv); int (*wait)(struct uniphier_pcie_ep_priv *priv); const struct pci_epc_features features; }; #define to_uniphier_pcie(x) dev_get_drvdata((x)->dev) static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_ep_priv *priv, bool enable) { u32 val; val = readl(priv->base + PCL_APP_READY_CTRL); if (enable) val |= PCL_APP_LTSSM_ENABLE; else val &= ~PCL_APP_LTSSM_ENABLE; writel(val, priv->base + PCL_APP_READY_CTRL); } static void uniphier_pcie_phy_reset(struct uniphier_pcie_ep_priv *priv, bool assert) { u32 val; val = readl(priv->base + PCL_RSTCTRL2); if (assert) val |= PCL_RSTCTRL_PHY_RESET; else val &= ~PCL_RSTCTRL_PHY_RESET; writel(val, priv->base + PCL_RSTCTRL2); } static void uniphier_pcie_pro5_init_ep(struct uniphier_pcie_ep_priv *priv) { u32 val; /* set EP mode */ val = readl(priv->base + PCL_MODE); val |= PCL_MODE_REGEN | PCL_MODE_REGVAL; writel(val, priv->base + PCL_MODE); /* clock request */ val = readl(priv->base + PCL_APP_CLK_CTRL); val &= ~PCL_APP_CLK_REQ; writel(val, priv->base + PCL_APP_CLK_CTRL); /* deassert PIPE3 and AXI reset */ val = readl(priv->base + PCL_RSTCTRL0); val |= PCL_RSTCTRL_AXI_REG | PCL_RSTCTRL_AXI_SLAVE | PCL_RSTCTRL_AXI_MASTER | PCL_RSTCTRL_PIPE3; writel(val, priv->base + PCL_RSTCTRL0); uniphier_pcie_ltssm_enable(priv, false); msleep(100); } static void uniphier_pcie_nx1_init_ep(struct uniphier_pcie_ep_priv *priv) { u32 val; /* set EP mode */ val = readl(priv->base + PCL_MODE); val |= PCL_MODE_REGEN | PCL_MODE_REGVAL; writel(val, priv->base + PCL_MODE); /* use auxiliary power detection */ val = readl(priv->base + PCL_APP_PM0); val |= PCL_SYS_AUX_PWR_DET; writel(val, priv->base + PCL_APP_PM0); /* assert PERST# */ val = readl(priv->base + PCL_PINCTRL0); val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL | PCL_PERST_PLDN_REGVAL); val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN | PCL_PERST_PLDN_REGEN; writel(val, priv->base + PCL_PINCTRL0); uniphier_pcie_ltssm_enable(priv, false); usleep_range(100000, 200000); /* deassert PERST# */ val = readl(priv->base + PCL_PINCTRL0); val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN; writel(val, priv->base + PCL_PINCTRL0); } static int uniphier_pcie_nx1_wait_ep(struct uniphier_pcie_ep_priv *priv) { u32 status; int ret; /* wait PIPE clock */ ret = readl_poll_timeout(priv->base + PCL_PIPEMON, status, status & PCL_PCLK_ALIVE, 100000, 1000000); if (ret) { dev_err(priv->pci.dev, "Failed to initialize controller in EP mode\n"); return ret; } return 0; } static int uniphier_pcie_start_link(struct dw_pcie *pci) { struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci); uniphier_pcie_ltssm_enable(priv, true); return 0; } static void uniphier_pcie_stop_link(struct dw_pcie *pci) { struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci); uniphier_pcie_ltssm_enable(priv, false); } static void uniphier_pcie_ep_init(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); enum pci_barno bar; for (bar = BAR_0; bar <= BAR_5; bar++) dw_pcie_ep_reset_bar(pci, bar); } static int uniphier_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci); u32 val; /* * This makes pulse signal to send INTx to the RC, so this should * be cleared as soon as possible. This sequence is covered with * mutex in pci_epc_raise_irq(). */ /* assert INTx */ val = readl(priv->base + PCL_APP_INTX); val |= PCL_APP_INTX_SYS_INT; writel(val, priv->base + PCL_APP_INTX); udelay(PCL_INTX_WIDTH_USEC); /* deassert INTx */ val &= ~PCL_APP_INTX_SYS_INT; writel(val, priv->base + PCL_APP_INTX); return 0; } static int uniphier_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci); u32 val; val = FIELD_PREP(PCL_APP_VEN_MSI_TC_MASK, func_no) | FIELD_PREP(PCL_APP_VEN_MSI_VECTOR_MASK, interrupt_num - 1); writel(val, priv->base + PCL_APP_MSI0); val = readl(priv->base + PCL_APP_MSI1); val |= PCL_APP_MSI_REQ; writel(val, priv->base + PCL_APP_MSI1); return 0; } static int uniphier_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { case PCI_EPC_IRQ_LEGACY: return uniphier_pcie_ep_raise_legacy_irq(ep); case PCI_EPC_IRQ_MSI: return uniphier_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); default: dev_err(pci->dev, "UNKNOWN IRQ type (%d)\n", type); } return 0; } static const struct pci_epc_features* uniphier_pcie_get_features(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci); return &priv->data->features; } static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = { .ep_init = uniphier_pcie_ep_init, .raise_irq = uniphier_pcie_ep_raise_irq, .get_features = uniphier_pcie_get_features, }; static int uniphier_pcie_ep_enable(struct uniphier_pcie_ep_priv *priv) { int ret; ret = clk_prepare_enable(priv->clk); if (ret) return ret; ret = clk_prepare_enable(priv->clk_gio); if (ret) goto out_clk_disable; ret = reset_control_deassert(priv->rst); if (ret) goto out_clk_gio_disable; ret = reset_control_deassert(priv->rst_gio); if (ret) goto out_rst_assert; if (priv->data->init) priv->data->init(priv); uniphier_pcie_phy_reset(priv, true); ret = phy_init(priv->phy); if (ret) goto out_rst_gio_assert; uniphier_pcie_phy_reset(priv, false); if (priv->data->wait) { ret = priv->data->wait(priv); if (ret) goto out_phy_exit; } return 0; out_phy_exit: phy_exit(priv->phy); out_rst_gio_assert: reset_control_assert(priv->rst_gio); out_rst_assert: reset_control_assert(priv->rst); out_clk_gio_disable: clk_disable_unprepare(priv->clk_gio); out_clk_disable: clk_disable_unprepare(priv->clk); return ret; } static const struct dw_pcie_ops dw_pcie_ops = { .start_link = uniphier_pcie_start_link, .stop_link = uniphier_pcie_stop_link, }; static int uniphier_pcie_ep_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct uniphier_pcie_ep_priv *priv; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->data = of_device_get_match_data(dev); if (WARN_ON(!priv->data)) return -EINVAL; priv->pci.dev = dev; priv->pci.ops = &dw_pcie_ops; priv->base = devm_platform_ioremap_resource_byname(pdev, "link"); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); if (priv->data->has_gio) { priv->clk_gio = devm_clk_get(dev, "gio"); if (IS_ERR(priv->clk_gio)) return PTR_ERR(priv->clk_gio); priv->rst_gio = devm_reset_control_get_shared(dev, "gio"); if (IS_ERR(priv->rst_gio)) return PTR_ERR(priv->rst_gio); } priv->clk = devm_clk_get(dev, "link"); if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk); priv->rst = devm_reset_control_get_shared(dev, "link"); if (IS_ERR(priv->rst)) return PTR_ERR(priv->rst); priv->phy = devm_phy_optional_get(dev, "pcie-phy"); if (IS_ERR(priv->phy)) { ret = PTR_ERR(priv->phy); dev_err(dev, "Failed to get phy (%d)\n", ret); return ret; } platform_set_drvdata(pdev, priv); ret = uniphier_pcie_ep_enable(priv); if (ret) return ret; priv->pci.ep.ops = &uniphier_pcie_ep_ops; return dw_pcie_ep_init(&priv->pci.ep); } static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = { .has_gio = true, .init = uniphier_pcie_pro5_init_ep, .wait = NULL, .features = { .linkup_notifier = false, .msi_capable = true, .msix_capable = false, .align = 1 << 16, .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4), .reserved_bar = BIT(BAR_4), }, }; static const struct uniphier_pcie_ep_soc_data uniphier_nx1_data = { .has_gio = false, .init = uniphier_pcie_nx1_init_ep, .wait = uniphier_pcie_nx1_wait_ep, .features = { .linkup_notifier = false, .msi_capable = true, .msix_capable = false, .align = 1 << 12, .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4), }, }; static const struct of_device_id uniphier_pcie_ep_match[] = { { .compatible = "socionext,uniphier-pro5-pcie-ep", .data = &uniphier_pro5_data, }, { .compatible = "socionext,uniphier-nx1-pcie-ep", .data = &uniphier_nx1_data, }, { /* sentinel */ }, }; static struct platform_driver uniphier_pcie_ep_driver = { .probe = uniphier_pcie_ep_probe, .driver = { .name = "uniphier-pcie-ep", .of_match_table = uniphier_pcie_ep_match, .suppress_bind_attrs = true, }, }; builtin_platform_driver(uniphier_pcie_ep_driver);
linux-master
drivers/pci/controller/dwc/pcie-uniphier-ep.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe host controller driver for Freescale Layerscape SoCs * * Copyright (C) 2014 Freescale Semiconductor. * Copyright 2021 NXP * * Author: Minghuan Lian <[email protected]> */ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/iopoll.h> #include <linux/of_pci.h> #include <linux/of_platform.h> #include <linux/of_address.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/resource.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include "../../pci.h" #include "pcie-designware.h" /* PEX Internal Configuration Registers */ #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ #define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */ #define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */ /* PF Message Command Register */ #define LS_PCIE_PF_MCR 0x2c #define PF_MCR_PTOMR BIT(0) #define PF_MCR_EXL2S BIT(1) #define PCIE_IATU_NUM 6 struct ls_pcie_drvdata { const u32 pf_off; bool pm_support; }; struct ls_pcie { struct dw_pcie *pci; const struct ls_pcie_drvdata *drvdata; void __iomem *pf_base; bool big_endian; }; #define ls_pcie_pf_readl_addr(addr) ls_pcie_pf_readl(pcie, addr) #define to_ls_pcie(x) dev_get_drvdata((x)->dev) static bool ls_pcie_is_bridge(struct ls_pcie *pcie) { struct dw_pcie *pci = pcie->pci; u32 header_type; header_type = ioread8(pci->dbi_base + PCI_HEADER_TYPE); header_type &= 0x7f; return header_type == PCI_HEADER_TYPE_BRIDGE; } /* Clear multi-function bit */ static void ls_pcie_clear_multifunction(struct ls_pcie *pcie) { struct dw_pcie *pci = pcie->pci; iowrite8(PCI_HEADER_TYPE_BRIDGE, pci->dbi_base + PCI_HEADER_TYPE); } /* Drop MSG TLP except for Vendor MSG */ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) { u32 val; struct dw_pcie *pci = pcie->pci; val = ioread32(pci->dbi_base + PCIE_STRFMR1); val &= 0xDFFFFFFF; iowrite32(val, pci->dbi_base + PCIE_STRFMR1); } /* Forward error response of outbound non-posted requests */ static void ls_pcie_fix_error_response(struct ls_pcie *pcie) { struct dw_pcie *pci = pcie->pci; iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR); } static u32 ls_pcie_pf_readl(struct ls_pcie *pcie, u32 off) { if (pcie->big_endian) return ioread32be(pcie->pf_base + off); return ioread32(pcie->pf_base + off); } static void ls_pcie_pf_writel(struct ls_pcie *pcie, u32 off, u32 val) { if (pcie->big_endian) iowrite32be(val, pcie->pf_base + off); else iowrite32(val, pcie->pf_base + off); } static void ls_pcie_send_turnoff_msg(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct ls_pcie *pcie = to_ls_pcie(pci); u32 val; int ret; val = ls_pcie_pf_readl(pcie, LS_PCIE_PF_MCR); val |= PF_MCR_PTOMR; ls_pcie_pf_writel(pcie, LS_PCIE_PF_MCR, val); ret = readx_poll_timeout(ls_pcie_pf_readl_addr, LS_PCIE_PF_MCR, val, !(val & PF_MCR_PTOMR), PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US); if (ret) dev_err(pcie->pci->dev, "PME_Turn_off timeout\n"); } static void ls_pcie_exit_from_l2(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct ls_pcie *pcie = to_ls_pcie(pci); u32 val; int ret; /* * Set PF_MCR_EXL2S bit in LS_PCIE_PF_MCR register for the link * to exit L2 state. */ val = ls_pcie_pf_readl(pcie, LS_PCIE_PF_MCR); val |= PF_MCR_EXL2S; ls_pcie_pf_writel(pcie, LS_PCIE_PF_MCR, val); /* * L2 exit timeout of 10ms is not defined in the specifications, * it was chosen based on empirical observations. */ ret = readx_poll_timeout(ls_pcie_pf_readl_addr, LS_PCIE_PF_MCR, val, !(val & PF_MCR_EXL2S), 1000, 10000); if (ret) dev_err(pcie->pci->dev, "L2 exit timeout\n"); } static int ls_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct ls_pcie *pcie = to_ls_pcie(pci); ls_pcie_fix_error_response(pcie); dw_pcie_dbi_ro_wr_en(pci); ls_pcie_clear_multifunction(pcie); dw_pcie_dbi_ro_wr_dis(pci); ls_pcie_drop_msg_tlp(pcie); return 0; } static const struct dw_pcie_host_ops ls_pcie_host_ops = { .host_init = ls_pcie_host_init, .pme_turn_off = ls_pcie_send_turnoff_msg, }; static const struct ls_pcie_drvdata ls1021a_drvdata = { .pm_support = false, }; static const struct ls_pcie_drvdata layerscape_drvdata = { .pf_off = 0xc0000, .pm_support = true, }; static const struct of_device_id ls_pcie_of_match[] = { { .compatible = "fsl,ls1012a-pcie", .data = &layerscape_drvdata }, { .compatible = "fsl,ls1021a-pcie", .data = &ls1021a_drvdata }, { .compatible = "fsl,ls1028a-pcie", .data = &layerscape_drvdata }, { .compatible = "fsl,ls1043a-pcie", .data = &ls1021a_drvdata }, { .compatible = "fsl,ls1046a-pcie", .data = &layerscape_drvdata }, { .compatible = "fsl,ls2080a-pcie", .data = &layerscape_drvdata }, { .compatible = "fsl,ls2085a-pcie", .data = &layerscape_drvdata }, { .compatible = "fsl,ls2088a-pcie", .data = &layerscape_drvdata }, { .compatible = "fsl,ls1088a-pcie", .data = &layerscape_drvdata }, { }, }; static int ls_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dw_pcie *pci; struct ls_pcie *pcie; struct resource *dbi_base; pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); if (!pci) return -ENOMEM; pcie->drvdata = of_device_get_match_data(dev); pci->dev = dev; pci->pp.ops = &ls_pcie_host_ops; pcie->pci = pci; dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian"); pcie->pf_base = pci->dbi_base + pcie->drvdata->pf_off; if (!ls_pcie_is_bridge(pcie)) return -ENODEV; platform_set_drvdata(pdev, pcie); return dw_pcie_host_init(&pci->pp); } static int ls_pcie_suspend_noirq(struct device *dev) { struct ls_pcie *pcie = dev_get_drvdata(dev); if (!pcie->drvdata->pm_support) return 0; return dw_pcie_suspend_noirq(pcie->pci); } static int ls_pcie_resume_noirq(struct device *dev) { struct ls_pcie *pcie = dev_get_drvdata(dev); if (!pcie->drvdata->pm_support) return 0; ls_pcie_exit_from_l2(&pcie->pci->pp); return dw_pcie_resume_noirq(pcie->pci); } static const struct dev_pm_ops ls_pcie_pm_ops = { NOIRQ_SYSTEM_SLEEP_PM_OPS(ls_pcie_suspend_noirq, ls_pcie_resume_noirq) }; static struct platform_driver ls_pcie_driver = { .probe = ls_pcie_probe, .driver = { .name = "layerscape-pcie", .of_match_table = ls_pcie_of_match, .suppress_bind_attrs = true, .pm = &ls_pcie_pm_ops, }, }; builtin_platform_driver(ls_pcie_driver);
linux-master
drivers/pci/controller/dwc/pci-layerscape.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe host controller driver for Amazon's Annapurna Labs IP (used in chips * such as Graviton and Alpine) * * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Author: Jonathan Chocron <[email protected]> */ #include <linux/pci.h> #include <linux/pci-ecam.h> #include <linux/pci-acpi.h> #include "../../pci.h" #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) struct al_pcie_acpi { void __iomem *dbi_base; }; static void __iomem *al_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct pci_config_window *cfg = bus->sysdata; struct al_pcie_acpi *pcie = cfg->priv; void __iomem *dbi_base = pcie->dbi_base; if (bus->number == cfg->busr.start) { /* * The DW PCIe core doesn't filter out transactions to other * devices/functions on the root bus num, so we do this here. */ if (PCI_SLOT(devfn) > 0) return NULL; else return dbi_base + where; } return pci_ecam_map_bus(bus, devfn, where); } static int al_pcie_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; struct acpi_device *adev = to_acpi_device(dev); struct acpi_pci_root *root = acpi_driver_data(adev); struct al_pcie_acpi *al_pcie; struct resource *res; int ret; al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL); if (!al_pcie) return -ENOMEM; res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); if (!res) return -ENOMEM; ret = acpi_get_rc_resources(dev, "AMZN0001", root->segment, res); if (ret) { dev_err(dev, "can't get rc dbi base address for SEG %d\n", root->segment); return ret; } dev_dbg(dev, "Root port dbi res: %pR\n", res); al_pcie->dbi_base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(al_pcie->dbi_base)) return PTR_ERR(al_pcie->dbi_base); cfg->priv = al_pcie; return 0; } const struct pci_ecam_ops al_pcie_ops = { .init = al_pcie_init, .pci_ops = { .map_bus = al_pcie_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, } }; #endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */ #ifdef CONFIG_PCIE_AL #include <linux/of_pci.h> #include "pcie-designware.h" #define AL_PCIE_REV_ID_2 2 #define AL_PCIE_REV_ID_3 3 #define AL_PCIE_REV_ID_4 4 #define AXI_BASE_OFFSET 0x0 #define DEVICE_ID_OFFSET 0x16c #define DEVICE_REV_ID 0x0 #define DEVICE_REV_ID_DEV_ID_MASK GENMASK(31, 16) #define DEVICE_REV_ID_DEV_ID_X4 0 #define DEVICE_REV_ID_DEV_ID_X8 2 #define DEVICE_REV_ID_DEV_ID_X16 4 #define OB_CTRL_REV1_2_OFFSET 0x0040 #define OB_CTRL_REV3_5_OFFSET 0x0030 #define CFG_TARGET_BUS 0x0 #define CFG_TARGET_BUS_MASK_MASK GENMASK(7, 0) #define CFG_TARGET_BUS_BUSNUM_MASK GENMASK(15, 8) #define CFG_CONTROL 0x4 #define CFG_CONTROL_SUBBUS_MASK GENMASK(15, 8) #define CFG_CONTROL_SEC_BUS_MASK GENMASK(23, 16) struct al_pcie_reg_offsets { unsigned int ob_ctrl; }; struct al_pcie_target_bus_cfg { u8 reg_val; u8 reg_mask; u8 ecam_mask; }; struct al_pcie { struct dw_pcie *pci; void __iomem *controller_base; /* base of PCIe unit (not DW core) */ struct device *dev; resource_size_t ecam_size; unsigned int controller_rev_id; struct al_pcie_reg_offsets reg_offsets; struct al_pcie_target_bus_cfg target_bus_cfg; }; #define to_al_pcie(x) dev_get_drvdata((x)->dev) static inline u32 al_pcie_controller_readl(struct al_pcie *pcie, u32 offset) { return readl_relaxed(pcie->controller_base + offset); } static inline void al_pcie_controller_writel(struct al_pcie *pcie, u32 offset, u32 val) { writel_relaxed(val, pcie->controller_base + offset); } static int al_pcie_rev_id_get(struct al_pcie *pcie, unsigned int *rev_id) { u32 dev_rev_id_val; u32 dev_id_val; dev_rev_id_val = al_pcie_controller_readl(pcie, AXI_BASE_OFFSET + DEVICE_ID_OFFSET + DEVICE_REV_ID); dev_id_val = FIELD_GET(DEVICE_REV_ID_DEV_ID_MASK, dev_rev_id_val); switch (dev_id_val) { case DEVICE_REV_ID_DEV_ID_X4: *rev_id = AL_PCIE_REV_ID_2; break; case DEVICE_REV_ID_DEV_ID_X8: *rev_id = AL_PCIE_REV_ID_3; break; case DEVICE_REV_ID_DEV_ID_X16: *rev_id = AL_PCIE_REV_ID_4; break; default: dev_err(pcie->dev, "Unsupported dev_id_val (0x%x)\n", dev_id_val); return -EINVAL; } dev_dbg(pcie->dev, "dev_id_val: 0x%x\n", dev_id_val); return 0; } static int al_pcie_reg_offsets_set(struct al_pcie *pcie) { switch (pcie->controller_rev_id) { case AL_PCIE_REV_ID_2: pcie->reg_offsets.ob_ctrl = OB_CTRL_REV1_2_OFFSET; break; case AL_PCIE_REV_ID_3: case AL_PCIE_REV_ID_4: pcie->reg_offsets.ob_ctrl = OB_CTRL_REV3_5_OFFSET; break; default: dev_err(pcie->dev, "Unsupported controller rev_id: 0x%x\n", pcie->controller_rev_id); return -EINVAL; } return 0; } static inline void al_pcie_target_bus_set(struct al_pcie *pcie, u8 target_bus, u8 mask_target_bus) { u32 reg; reg = FIELD_PREP(CFG_TARGET_BUS_MASK_MASK, mask_target_bus) | FIELD_PREP(CFG_TARGET_BUS_BUSNUM_MASK, target_bus); al_pcie_controller_writel(pcie, AXI_BASE_OFFSET + pcie->reg_offsets.ob_ctrl + CFG_TARGET_BUS, reg); } static void __iomem *al_pcie_conf_addr_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct dw_pcie_rp *pp = bus->sysdata; struct al_pcie *pcie = to_al_pcie(to_dw_pcie_from_pp(pp)); unsigned int busnr = bus->number; struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg; unsigned int busnr_ecam = busnr & target_bus_cfg->ecam_mask; unsigned int busnr_reg = busnr & target_bus_cfg->reg_mask; if (busnr_reg != target_bus_cfg->reg_val) { dev_dbg(pcie->pci->dev, "Changing target bus busnum val from 0x%x to 0x%x\n", target_bus_cfg->reg_val, busnr_reg); target_bus_cfg->reg_val = busnr_reg; al_pcie_target_bus_set(pcie, target_bus_cfg->reg_val, target_bus_cfg->reg_mask); } return pp->va_cfg0_base + PCIE_ECAM_OFFSET(busnr_ecam, devfn, where); } static struct pci_ops al_child_pci_ops = { .map_bus = al_pcie_conf_addr_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, }; static void al_pcie_config_prepare(struct al_pcie *pcie) { struct al_pcie_target_bus_cfg *target_bus_cfg; struct dw_pcie_rp *pp = &pcie->pci->pp; unsigned int ecam_bus_mask; u32 cfg_control_offset; u8 subordinate_bus; u8 secondary_bus; u32 cfg_control; u32 reg; struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res; target_bus_cfg = &pcie->target_bus_cfg; ecam_bus_mask = (pcie->ecam_size >> PCIE_ECAM_BUS_SHIFT) - 1; if (ecam_bus_mask > 255) { dev_warn(pcie->dev, "ECAM window size is larger than 256MB. Cutting off at 256\n"); ecam_bus_mask = 255; } /* This portion is taken from the transaction address */ target_bus_cfg->ecam_mask = ecam_bus_mask; /* This portion is taken from the cfg_target_bus reg */ target_bus_cfg->reg_mask = ~target_bus_cfg->ecam_mask; target_bus_cfg->reg_val = bus->start & target_bus_cfg->reg_mask; al_pcie_target_bus_set(pcie, target_bus_cfg->reg_val, target_bus_cfg->reg_mask); secondary_bus = bus->start + 1; subordinate_bus = bus->end; /* Set the valid values of secondary and subordinate buses */ cfg_control_offset = AXI_BASE_OFFSET + pcie->reg_offsets.ob_ctrl + CFG_CONTROL; cfg_control = al_pcie_controller_readl(pcie, cfg_control_offset); reg = cfg_control & ~(CFG_CONTROL_SEC_BUS_MASK | CFG_CONTROL_SUBBUS_MASK); reg |= FIELD_PREP(CFG_CONTROL_SUBBUS_MASK, subordinate_bus) | FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus); al_pcie_controller_writel(pcie, cfg_control_offset, reg); } static int al_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct al_pcie *pcie = to_al_pcie(pci); int rc; pp->bridge->child_ops = &al_child_pci_ops; rc = al_pcie_rev_id_get(pcie, &pcie->controller_rev_id); if (rc) return rc; rc = al_pcie_reg_offsets_set(pcie); if (rc) return rc; al_pcie_config_prepare(pcie); return 0; } static const struct dw_pcie_host_ops al_pcie_host_ops = { .host_init = al_pcie_host_init, }; static int al_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *controller_res; struct resource *ecam_res; struct al_pcie *al_pcie; struct dw_pcie *pci; al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL); if (!al_pcie) return -ENOMEM; pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); if (!pci) return -ENOMEM; pci->dev = dev; pci->pp.ops = &al_pcie_host_ops; al_pcie->pci = pci; al_pcie->dev = dev; ecam_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); if (!ecam_res) { dev_err(dev, "couldn't find 'config' reg in DT\n"); return -ENOENT; } al_pcie->ecam_size = resource_size(ecam_res); controller_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "controller"); al_pcie->controller_base = devm_ioremap_resource(dev, controller_res); if (IS_ERR(al_pcie->controller_base)) { dev_err(dev, "couldn't remap controller base %pR\n", controller_res); return PTR_ERR(al_pcie->controller_base); } dev_dbg(dev, "From DT: controller_base: %pR\n", controller_res); platform_set_drvdata(pdev, al_pcie); return dw_pcie_host_init(&pci->pp); } static const struct of_device_id al_pcie_of_match[] = { { .compatible = "amazon,al-alpine-v2-pcie", }, { .compatible = "amazon,al-alpine-v3-pcie", }, {}, }; static struct platform_driver al_pcie_driver = { .driver = { .name = "al-pcie", .of_match_table = al_pcie_of_match, .suppress_bind_attrs = true, }, .probe = al_pcie_probe, }; builtin_platform_driver(al_pcie_driver); #endif /* CONFIG_PCIE_AL*/
linux-master
drivers/pci/controller/dwc/pcie-al.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe host controller driver for Rockchip SoCs. * * Copyright (C) 2021 Rockchip Electronics Co., Ltd. * http://www.rock-chips.com * * Author: Simon Xue <[email protected]> */ #include <linux/clk.h> #include <linux/gpio/consumer.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/reset.h> #include "pcie-designware.h" /* * The upper 16 bits of PCIE_CLIENT_CONFIG are a write * mask for the lower 16 bits. */ #define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val)) #define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val) #define HIWORD_DISABLE_BIT(val) HIWORD_UPDATE(val, ~val) #define to_rockchip_pcie(x) dev_get_drvdata((x)->dev) #define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40) #define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc) #define PCIE_SMLH_LINKUP BIT(16) #define PCIE_RDLH_LINKUP BIT(17) #define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP) #define PCIE_L0S_ENTRY 0x11 #define PCIE_CLIENT_GENERAL_CONTROL 0x0 #define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8 #define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c #define PCIE_CLIENT_GENERAL_DEBUG 0x104 #define PCIE_CLIENT_HOT_RESET_CTRL 0x180 #define PCIE_CLIENT_LTSSM_STATUS 0x300 #define PCIE_LTSSM_ENABLE_ENHANCE BIT(4) #define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0) struct rockchip_pcie { struct dw_pcie pci; void __iomem *apb_base; struct phy *phy; struct clk_bulk_data *clks; unsigned int clk_cnt; struct reset_control *rst; struct gpio_desc *rst_gpio; struct regulator *vpcie3v3; struct irq_domain *irq_domain; }; static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip, u32 reg) { return readl_relaxed(rockchip->apb_base + reg); } static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip, u32 val, u32 reg) { writel_relaxed(val, rockchip->apb_base + reg); } static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc); unsigned long reg, hwirq; chained_irq_enter(chip, desc); reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_LEGACY); for_each_set_bit(hwirq, &reg, 4) generic_handle_domain_irq(rockchip->irq_domain, hwirq); chained_irq_exit(chip, desc); } static void rockchip_intx_mask(struct irq_data *data) { rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data), HIWORD_UPDATE_BIT(BIT(data->hwirq)), PCIE_CLIENT_INTR_MASK_LEGACY); }; static void rockchip_intx_unmask(struct irq_data *data) { rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data), HIWORD_DISABLE_BIT(BIT(data->hwirq)), PCIE_CLIENT_INTR_MASK_LEGACY); }; static struct irq_chip rockchip_intx_irq_chip = { .name = "INTx", .irq_mask = rockchip_intx_mask, .irq_unmask = rockchip_intx_unmask, .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, }; static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &rockchip_intx_irq_chip, handle_level_irq); irq_set_chip_data(irq, domain->host_data); return 0; } static const struct irq_domain_ops intx_domain_ops = { .map = rockchip_pcie_intx_map, }; static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->pci.dev; struct device_node *intc; intc = of_get_child_by_name(dev->of_node, "legacy-interrupt-controller"); if (!intc) { dev_err(dev, "missing child interrupt-controller node\n"); return -EINVAL; } rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX, &intx_domain_ops, rockchip); of_node_put(intc); if (!rockchip->irq_domain) { dev_err(dev, "failed to get a INTx IRQ domain\n"); return -EINVAL; } return 0; } static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip) { rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM, PCIE_CLIENT_GENERAL_CONTROL); } static int rockchip_pcie_link_up(struct dw_pcie *pci) { struct rockchip_pcie *rockchip = to_rockchip_pcie(pci); u32 val = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS); if ((val & PCIE_LINKUP) == PCIE_LINKUP && (val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY) return 1; return 0; } static int rockchip_pcie_start_link(struct dw_pcie *pci) { struct rockchip_pcie *rockchip = to_rockchip_pcie(pci); /* Reset device */ gpiod_set_value_cansleep(rockchip->rst_gpio, 0); rockchip_pcie_enable_ltssm(rockchip); /* * PCIe requires the refclk to be stable for 100µs prior to releasing * PERST. See table 2-4 in section 2.6.2 AC Specifications of the PCI * Express Card Electromechanical Specification, 1.1. However, we don't * know if the refclk is coming from RC's PHY or external OSC. If it's * from RC, so enabling LTSSM is the just right place to release #PERST. * We need more extra time as before, rather than setting just * 100us as we don't know how long should the device need to reset. */ msleep(100); gpiod_set_value_cansleep(rockchip->rst_gpio, 1); return 0; } static int rockchip_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct rockchip_pcie *rockchip = to_rockchip_pcie(pci); struct device *dev = rockchip->pci.dev; u32 val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE); int irq, ret; irq = of_irq_get_byname(dev->of_node, "legacy"); if (irq < 0) return irq; ret = rockchip_pcie_init_irq_domain(rockchip); if (ret < 0) dev_err(dev, "failed to init irq domain\n"); irq_set_chained_handler_and_data(irq, rockchip_pcie_legacy_int_handler, rockchip); /* LTSSM enable control mode */ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL); rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE, PCIE_CLIENT_GENERAL_CONTROL); return 0; } static const struct dw_pcie_host_ops rockchip_pcie_host_ops = { .host_init = rockchip_pcie_host_init, }; static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->pci.dev; int ret; ret = devm_clk_bulk_get_all(dev, &rockchip->clks); if (ret < 0) return ret; rockchip->clk_cnt = ret; return clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks); } static int rockchip_pcie_resource_get(struct platform_device *pdev, struct rockchip_pcie *rockchip) { rockchip->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb"); if (IS_ERR(rockchip->apb_base)) return PTR_ERR(rockchip->apb_base); rockchip->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(rockchip->rst_gpio)) return PTR_ERR(rockchip->rst_gpio); rockchip->rst = devm_reset_control_array_get_exclusive(&pdev->dev); if (IS_ERR(rockchip->rst)) return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst), "failed to get reset lines\n"); return 0; } static int rockchip_pcie_phy_init(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->pci.dev; int ret; rockchip->phy = devm_phy_get(dev, "pcie-phy"); if (IS_ERR(rockchip->phy)) return dev_err_probe(dev, PTR_ERR(rockchip->phy), "missing PHY\n"); ret = phy_init(rockchip->phy); if (ret < 0) return ret; ret = phy_power_on(rockchip->phy); if (ret) phy_exit(rockchip->phy); return ret; } static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip) { phy_exit(rockchip->phy); phy_power_off(rockchip->phy); } static const struct dw_pcie_ops dw_pcie_ops = { .link_up = rockchip_pcie_link_up, .start_link = rockchip_pcie_start_link, }; static int rockchip_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rockchip_pcie *rockchip; struct dw_pcie_rp *pp; int ret; rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL); if (!rockchip) return -ENOMEM; platform_set_drvdata(pdev, rockchip); rockchip->pci.dev = dev; rockchip->pci.ops = &dw_pcie_ops; pp = &rockchip->pci.pp; pp->ops = &rockchip_pcie_host_ops; ret = rockchip_pcie_resource_get(pdev, rockchip); if (ret) return ret; ret = reset_control_assert(rockchip->rst); if (ret) return ret; /* DON'T MOVE ME: must be enable before PHY init */ rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3"); if (IS_ERR(rockchip->vpcie3v3)) { if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV) return dev_err_probe(dev, PTR_ERR(rockchip->vpcie3v3), "failed to get vpcie3v3 regulator\n"); rockchip->vpcie3v3 = NULL; } else { ret = regulator_enable(rockchip->vpcie3v3); if (ret) { dev_err(dev, "failed to enable vpcie3v3 regulator\n"); return ret; } } ret = rockchip_pcie_phy_init(rockchip); if (ret) goto disable_regulator; ret = reset_control_deassert(rockchip->rst); if (ret) goto deinit_phy; ret = rockchip_pcie_clk_init(rockchip); if (ret) goto deinit_phy; ret = dw_pcie_host_init(pp); if (!ret) return 0; clk_bulk_disable_unprepare(rockchip->clk_cnt, rockchip->clks); deinit_phy: rockchip_pcie_phy_deinit(rockchip); disable_regulator: if (rockchip->vpcie3v3) regulator_disable(rockchip->vpcie3v3); return ret; } static const struct of_device_id rockchip_pcie_of_match[] = { { .compatible = "rockchip,rk3568-pcie", }, {}, }; static struct platform_driver rockchip_pcie_driver = { .driver = { .name = "rockchip-dw-pcie", .of_match_table = rockchip_pcie_of_match, .suppress_bind_attrs = true, }, .probe = rockchip_pcie_probe, }; builtin_platform_driver(rockchip_pcie_driver);
linux-master
drivers/pci/controller/dwc/pcie-dw-rockchip.c
// SPDX-License-Identifier: GPL-2.0+ /* * PCIe host controller driver for the following SoCs * Tegra194 * Tegra234 * * Copyright (C) 2019-2022 NVIDIA Corporation. * * Author: Vidya Sagar <[email protected]> */ #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/gpio/consumer.h> #include <linux/interconnect.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/of_pci.h> #include <linux/pci.h> #include <linux/phy/phy.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/random.h> #include <linux/reset.h> #include <linux/resource.h> #include <linux/types.h> #include "pcie-designware.h" #include <soc/tegra/bpmp.h> #include <soc/tegra/bpmp-abi.h> #include "../../pci.h" #define TEGRA194_DWC_IP_VER 0x490A #define TEGRA234_DWC_IP_VER 0x562A #define APPL_PINMUX 0x0 #define APPL_PINMUX_PEX_RST BIT(0) #define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2) #define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3) #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4) #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5) #define APPL_CTRL 0x4 #define APPL_CTRL_SYS_PRE_DET_STATE BIT(6) #define APPL_CTRL_LTSSM_EN BIT(7) #define APPL_CTRL_HW_HOT_RST_EN BIT(20) #define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0) #define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN 0x2 #define APPL_INTR_EN_L0_0 0x8 #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0) #define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4) #define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8) #define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15) #define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19) #define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30) #define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31) #define APPL_INTR_STATUS_L0 0xC #define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0) #define APPL_INTR_STATUS_L0_INT_INT BIT(8) #define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15) #define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16) #define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18) #define APPL_INTR_EN_L1_0_0 0x1C #define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1) #define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3) #define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30) #define APPL_INTR_STATUS_L1_0_0 0x20 #define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1) #define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3) #define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30) #define APPL_INTR_STATUS_L1_1 0x2C #define APPL_INTR_STATUS_L1_2 0x30 #define APPL_INTR_STATUS_L1_3 0x34 #define APPL_INTR_STATUS_L1_6 0x3C #define APPL_INTR_STATUS_L1_7 0x40 #define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1) #define APPL_INTR_EN_L1_8_0 0x44 #define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2) #define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3) #define APPL_INTR_EN_L1_8_INTX_EN BIT(11) #define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15) #define APPL_INTR_STATUS_L1_8_0 0x4C #define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK GENMASK(11, 6) #define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS BIT(2) #define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS BIT(3) #define APPL_INTR_STATUS_L1_9 0x54 #define APPL_INTR_STATUS_L1_10 0x58 #define APPL_INTR_STATUS_L1_11 0x64 #define APPL_INTR_STATUS_L1_13 0x74 #define APPL_INTR_STATUS_L1_14 0x78 #define APPL_INTR_STATUS_L1_15 0x7C #define APPL_INTR_STATUS_L1_17 0x88 #define APPL_INTR_EN_L1_18 0x90 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT BIT(2) #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) #define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) #define APPL_INTR_STATUS_L1_18 0x94 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT BIT(2) #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) #define APPL_MSI_CTRL_1 0xAC #define APPL_MSI_CTRL_2 0xB0 #define APPL_LEGACY_INTX 0xB8 #define APPL_LTR_MSG_1 0xC4 #define LTR_MSG_REQ BIT(15) #define LTR_MST_NO_SNOOP_SHIFT 16 #define APPL_LTR_MSG_2 0xC8 #define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3) #define APPL_LINK_STATUS 0xCC #define APPL_LINK_STATUS_RDLH_LINK_UP BIT(0) #define APPL_DEBUG 0xD0 #define APPL_DEBUG_PM_LINKST_IN_L2_LAT BIT(21) #define APPL_DEBUG_PM_LINKST_IN_L0 0x11 #define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3) #define APPL_DEBUG_LTSSM_STATE_SHIFT 3 #define LTSSM_STATE_PRE_DETECT 5 #define APPL_RADM_STATUS 0xE4 #define APPL_PM_XMT_TURNOFF_STATE BIT(0) #define APPL_DM_TYPE 0x100 #define APPL_DM_TYPE_MASK GENMASK(3, 0) #define APPL_DM_TYPE_RP 0x4 #define APPL_DM_TYPE_EP 0x0 #define APPL_CFG_BASE_ADDR 0x104 #define APPL_CFG_BASE_ADDR_MASK GENMASK(31, 12) #define APPL_CFG_IATU_DMA_BASE_ADDR 0x108 #define APPL_CFG_IATU_DMA_BASE_ADDR_MASK GENMASK(31, 18) #define APPL_CFG_MISC 0x110 #define APPL_CFG_MISC_SLV_EP_MODE BIT(14) #define APPL_CFG_MISC_ARCACHE_MASK GENMASK(13, 10) #define APPL_CFG_MISC_ARCACHE_SHIFT 10 #define APPL_CFG_MISC_ARCACHE_VAL 3 #define APPL_CFG_SLCG_OVERRIDE 0x114 #define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER BIT(0) #define APPL_CAR_RESET_OVRD 0x12C #define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N BIT(0) #define IO_BASE_IO_DECODE BIT(0) #define IO_BASE_IO_DECODE_BIT8 BIT(8) #define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE BIT(0) #define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE BIT(16) #define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718 #define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19) #define N_FTS_VAL 52 #define FTS_VAL 52 #define GEN3_EQ_CONTROL_OFF 0x8a8 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8) #define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0) #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0 #define AMBA_ERROR_RESPONSE_CRS_SHIFT 3 #define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0) #define AMBA_ERROR_RESPONSE_CRS_OKAY 0 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2 #define MSIX_ADDR_MATCH_LOW_OFF 0x940 #define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0) #define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2) #define MSIX_ADDR_MATCH_HIGH_OFF 0x944 #define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0) #define PORT_LOGIC_MSIX_DOORBELL 0x948 #define CAP_SPCIE_CAP_OFF 0x154 #define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK GENMASK(3, 0) #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8) #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8 #define PME_ACK_TIMEOUT 10000 #define LTSSM_TIMEOUT 50000 /* 50ms */ #define GEN3_GEN4_EQ_PRESET_INIT 5 #define GEN1_CORE_CLK_FREQ 62500000 #define GEN2_CORE_CLK_FREQ 125000000 #define GEN3_CORE_CLK_FREQ 250000000 #define GEN4_CORE_CLK_FREQ 500000000 #define LTR_MSG_TIMEOUT (100 * 1000) #define PERST_DEBOUNCE_TIME (5 * 1000) #define EP_STATE_DISABLED 0 #define EP_STATE_ENABLED 1 static const unsigned int pcie_gen_freq[] = { GEN1_CORE_CLK_FREQ, /* PCI_EXP_LNKSTA_CLS == 0; undefined */ GEN1_CORE_CLK_FREQ, GEN2_CORE_CLK_FREQ, GEN3_CORE_CLK_FREQ, GEN4_CORE_CLK_FREQ }; struct tegra_pcie_dw_of_data { u32 version; enum dw_pcie_device_mode mode; bool has_msix_doorbell_access_fix; bool has_sbr_reset_fix; bool has_l1ss_exit_fix; bool has_ltr_req_fix; u32 cdm_chk_int_en_bit; u32 gen4_preset_vec; u8 n_fts[2]; }; struct tegra_pcie_dw { struct device *dev; struct resource *appl_res; struct resource *dbi_res; struct resource *atu_dma_res; void __iomem *appl_base; struct clk *core_clk; struct reset_control *core_apb_rst; struct reset_control *core_rst; struct dw_pcie pci; struct tegra_bpmp *bpmp; struct tegra_pcie_dw_of_data *of_data; bool supports_clkreq; bool enable_cdm_check; bool enable_srns; bool link_state; bool update_fc_fixup; bool enable_ext_refclk; u8 init_link_width; u32 msi_ctrl_int; u32 num_lanes; u32 cid; u32 cfg_link_cap_l1sub; u32 ras_des_cap; u32 pcie_cap_base; u32 aspm_cmrt; u32 aspm_pwr_on_t; u32 aspm_l0s_enter_lat; struct regulator *pex_ctl_supply; struct regulator *slot_ctl_3v3; struct regulator *slot_ctl_12v; unsigned int phy_count; struct phy **phys; struct dentry *debugfs; /* Endpoint mode specific */ struct gpio_desc *pex_rst_gpiod; struct gpio_desc *pex_refclk_sel_gpiod; unsigned int pex_rst_irq; int ep_state; long link_status; struct icc_path *icc_path; }; static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci) { return container_of(pci, struct tegra_pcie_dw, pci); } static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value, const u32 reg) { writel_relaxed(value, pcie->appl_base + reg); } static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg) { return readl_relaxed(pcie->appl_base + reg); } struct tegra_pcie_soc { enum dw_pcie_device_mode mode; }; static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie) { struct dw_pcie *pci = &pcie->pci; u32 val, speed, width; val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, val); width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val); val = width * (PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]) / BITS_PER_BYTE); if (icc_set_bw(pcie->icc_path, MBps_to_icc(val), 0)) dev_err(pcie->dev, "can't set bw[%u]\n", val); if (speed >= ARRAY_SIZE(pcie_gen_freq)) speed = 0; clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]); } static void apply_bad_link_workaround(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); u32 current_link_width; u16 val; /* * NOTE:- Since this scenario is uncommon and link as such is not * stable anyway, not waiting to confirm if link is really * transitioning to Gen-2 speed */ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); if (val & PCI_EXP_LNKSTA_LBMS) { current_link_width = (val & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT; if (pcie->init_link_width > current_link_width) { dev_warn(pci->dev, "PCIe link is bad, width reduced\n"); val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL2); val &= ~PCI_EXP_LNKCTL2_TLS; val |= PCI_EXP_LNKCTL2_TLS_2_5GT; dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL2, val); val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL); val |= PCI_EXP_LNKCTL_RL; dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL, val); } } } static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg) { struct tegra_pcie_dw *pcie = arg; struct dw_pcie *pci = &pcie->pci; struct dw_pcie_rp *pp = &pci->pp; u32 val, status_l0, status_l1; u16 val_w; status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0); if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) { status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0); if (!pcie->of_data->has_sbr_reset_fix && status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) { /* SBR & Surprise Link Down WAR */ val = appl_readl(pcie, APPL_CAR_RESET_OVRD); val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; appl_writel(pcie, val, APPL_CAR_RESET_OVRD); udelay(1); val = appl_readl(pcie, APPL_CAR_RESET_OVRD); val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; appl_writel(pcie, val, APPL_CAR_RESET_OVRD); val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); val |= PORT_LOGIC_SPEED_CHANGE; dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); } } if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) { status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0); if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) { appl_writel(pcie, APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS, APPL_INTR_STATUS_L1_8_0); apply_bad_link_workaround(pp); } if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) { val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); val_w |= PCI_EXP_LNKSTA_LBMS; dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA, val_w); appl_writel(pcie, APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS, APPL_INTR_STATUS_L1_8_0); val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w & PCI_EXP_LNKSTA_CLS); } } if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) { status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18); val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) { dev_info(pci->dev, "CDM check complete\n"); val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE; } if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) { dev_err(pci->dev, "CDM comparison mismatch\n"); val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR; } if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) { dev_err(pci->dev, "CDM Logic error\n"); val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR; } dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR); dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val); } return IRQ_HANDLED; } static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie) { u32 val; appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2); val = appl_readl(pcie, APPL_CTRL); val |= APPL_CTRL_LTSSM_EN; appl_writel(pcie, val, APPL_CTRL); } static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg) { struct tegra_pcie_dw *pcie = arg; struct dw_pcie_ep *ep = &pcie->pci.ep; struct dw_pcie *pci = &pcie->pci; u32 val; if (test_and_clear_bit(0, &pcie->link_status)) dw_pcie_ep_linkup(ep); tegra_pcie_icc_set(pcie); if (pcie->of_data->has_ltr_req_fix) return IRQ_HANDLED; /* If EP doesn't advertise L1SS, just return */ val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2))) return IRQ_HANDLED; /* Check if BME is set to '1' */ val = dw_pcie_readl_dbi(pci, PCI_COMMAND); if (val & PCI_COMMAND_MASTER) { ktime_t timeout; /* 110us for both snoop and no-snoop */ val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ; val |= (val << LTR_MST_NO_SNOOP_SHIFT); appl_writel(pcie, val, APPL_LTR_MSG_1); /* Send LTR upstream */ val = appl_readl(pcie, APPL_LTR_MSG_2); val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE; appl_writel(pcie, val, APPL_LTR_MSG_2); timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT); for (;;) { val = appl_readl(pcie, APPL_LTR_MSG_2); if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)) break; if (ktime_after(ktime_get(), timeout)) break; usleep_range(1000, 1100); } if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE) dev_err(pcie->dev, "Failed to send LTR message\n"); } return IRQ_HANDLED; } static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg) { struct tegra_pcie_dw *pcie = arg; int spurious = 1; u32 status_l0, status_l1, link_status; status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0); if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) { status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0); if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE) pex_ep_event_hot_rst_done(pcie); if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) { link_status = appl_readl(pcie, APPL_LINK_STATUS); if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) { dev_dbg(pcie->dev, "Link is up with Host\n"); set_bit(0, &pcie->link_status); return IRQ_WAKE_THREAD; } } spurious = 0; } if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) { status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15); appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15); if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED) return IRQ_WAKE_THREAD; spurious = 0; } if (spurious) { dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n", status_l0); appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0); } return IRQ_HANDLED; } static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { struct dw_pcie_rp *pp = bus->sysdata; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); /* * This is an endpoint mode specific register happen to appear even * when controller is operating in root port mode and system hangs * when it is accessed with link being in ASPM-L1 state. * So skip accessing it altogether */ if (!pcie->of_data->has_msix_doorbell_access_fix && !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) { *val = 0x00000000; return PCIBIOS_SUCCESSFUL; } return pci_generic_config_read(bus, devfn, where, size, val); } static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { struct dw_pcie_rp *pp = bus->sysdata; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); /* * This is an endpoint mode specific register happen to appear even * when controller is operating in root port mode and system hangs * when it is accessed with link being in ASPM-L1 state. * So skip accessing it altogether */ if (!pcie->of_data->has_msix_doorbell_access_fix && !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) return PCIBIOS_SUCCESSFUL; return pci_generic_config_write(bus, devfn, where, size, val); } static struct pci_ops tegra_pci_ops = { .map_bus = dw_pcie_own_conf_map_bus, .read = tegra_pcie_dw_rd_own_conf, .write = tegra_pcie_dw_wr_own_conf, }; #if defined(CONFIG_PCIEASPM) static void disable_aspm_l11(struct tegra_pcie_dw *pcie) { u32 val; val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); val &= ~PCI_L1SS_CAP_ASPM_L1_1; dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); } static void disable_aspm_l12(struct tegra_pcie_dw *pcie) { u32 val; val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); val &= ~PCI_L1SS_CAP_ASPM_L1_2; dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); } static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event) { u32 val; val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap + PCIE_RAS_DES_EVENT_COUNTER_CONTROL); val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT); val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT; val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap + PCIE_RAS_DES_EVENT_COUNTER_DATA); return val; } static int aspm_state_cnt(struct seq_file *s, void *data) { struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *) dev_get_drvdata(s->private); u32 val; seq_printf(s, "Tx L0s entry count : %u\n", event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S)); seq_printf(s, "Rx L0s entry count : %u\n", event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S)); seq_printf(s, "Link L1 entry count : %u\n", event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1)); seq_printf(s, "Link L1.1 entry count : %u\n", event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1)); seq_printf(s, "Link L1.2 entry count : %u\n", event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2)); /* Clear all counters */ dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + PCIE_RAS_DES_EVENT_COUNTER_CONTROL, EVENT_COUNTER_ALL_CLEAR); /* Re-enable counting */ val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); return 0; } static void init_host_aspm(struct tegra_pcie_dw *pcie) { struct dw_pcie *pci = &pcie->pci; u32 val; val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS); pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP; pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci, PCI_EXT_CAP_ID_VNDR); /* Enable ASPM counters */ val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; dw_pcie_writel_dbi(pci, pcie->ras_des_cap + PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); /* Program T_cmrt and T_pwr_on values */ val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE); val |= (pcie->aspm_cmrt << 8); val |= (pcie->aspm_pwr_on_t << 19); dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val); /* Program L0s and L1 entrance latencies */ val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR); val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK; val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT); val |= PORT_AFR_ENTER_ASPM; dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val); } static void init_debugfs(struct tegra_pcie_dw *pcie) { debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs, aspm_state_cnt); } #else static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; } static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; } static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; } static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; } #endif static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); u32 val; u16 val_w; val = appl_readl(pcie, APPL_INTR_EN_L0_0); val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; appl_writel(pcie, val, APPL_INTR_EN_L0_0); if (!pcie->of_data->has_sbr_reset_fix) { val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN; appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); } if (pcie->enable_cdm_check) { val = appl_readl(pcie, APPL_INTR_EN_L0_0); val |= pcie->of_data->cdm_chk_int_en_bit; appl_writel(pcie, val, APPL_INTR_EN_L0_0); val = appl_readl(pcie, APPL_INTR_EN_L1_18); val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR; val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR; appl_writel(pcie, val, APPL_INTR_EN_L1_18); } val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT; val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL); val_w |= PCI_EXP_LNKCTL_LBMIE; dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL, val_w); } static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); u32 val; /* Enable legacy interrupt generation */ val = appl_readl(pcie, APPL_INTR_EN_L0_0); val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; val |= APPL_INTR_EN_L0_0_INT_INT_EN; appl_writel(pcie, val, APPL_INTR_EN_L0_0); val = appl_readl(pcie, APPL_INTR_EN_L1_8_0); val |= APPL_INTR_EN_L1_8_INTX_EN; val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN; val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN; if (IS_ENABLED(CONFIG_PCIEAER)) val |= APPL_INTR_EN_L1_8_AER_INT_EN; appl_writel(pcie, val, APPL_INTR_EN_L1_8_0); } static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); u32 val; /* Enable MSI interrupt generation */ val = appl_readl(pcie, APPL_INTR_EN_L0_0); val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN; val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN; appl_writel(pcie, val, APPL_INTR_EN_L0_0); } static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); /* Clear interrupt statuses before enabling interrupts */ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); tegra_pcie_enable_system_interrupts(pp); tegra_pcie_enable_legacy_interrupts(pp); if (IS_ENABLED(CONFIG_PCI_MSI)) tegra_pcie_enable_msi_interrupts(pp); } static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie) { struct dw_pcie *pci = &pcie->pci; u32 val, offset, i; /* Program init preset */ for (i = 0; i < pcie->num_lanes; i++) { val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2)); val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK; val |= GEN3_GEN4_EQ_PRESET_INIT; val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK; val |= (GEN3_GEN4_EQ_PRESET_INIT << CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT); dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val); offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_PL_16GT) + PCI_PL_16GT_LE_CTRL; val = dw_pcie_readb_dbi(pci, offset + i); val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK; val |= GEN3_GEN4_EQ_PRESET_INIT; val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK; val |= (GEN3_GEN4_EQ_PRESET_INIT << PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT); dw_pcie_writeb_dbi(pci, offset + i, val); } val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK; dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT); dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; val |= (pcie->of_data->gen4_preset_vec << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK; dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); } static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); u32 val; u16 val_16; pp->bridge->ops = &tegra_pci_ops; if (!pcie->pcie_cap_base) pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, PCI_CAP_ID_EXP); val = dw_pcie_readl_dbi(pci, PCI_IO_BASE); val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8); dw_pcie_writel_dbi(pci, PCI_IO_BASE, val); val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE); val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE; val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE; dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val); dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); /* Enable as 0xFFFF0001 response for CRS */ val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT); val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT); val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 << AMBA_ERROR_RESPONSE_CRS_SHIFT); dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val); /* Configure Max lane width from DT */ val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP); val &= ~PCI_EXP_LNKCAP_MLW; val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT); dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val); /* Clear Slot Clock Configuration bit if SRNS configuration */ if (pcie->enable_srns) { val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); val_16 &= ~PCI_EXP_LNKSTA_SLC; dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA, val_16); } config_gen3_gen4_eq_presets(pcie); init_host_aspm(pcie); /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */ if (!pcie->supports_clkreq) { disable_aspm_l11(pcie); disable_aspm_l12(pcie); } if (!pcie->of_data->has_l1ss_exit_fix) { val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); } if (pcie->update_fc_fixup) { val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT; dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val); } clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); return 0; } static int tegra_pcie_dw_start_link(struct dw_pcie *pci) { struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); struct dw_pcie_rp *pp = &pci->pp; u32 val, offset, tmp; bool retry = true; if (pcie->of_data->mode == DW_PCIE_EP_TYPE) { enable_irq(pcie->pex_rst_irq); return 0; } retry_link: /* Assert RST */ val = appl_readl(pcie, APPL_PINMUX); val &= ~APPL_PINMUX_PEX_RST; appl_writel(pcie, val, APPL_PINMUX); usleep_range(100, 200); /* Enable LTSSM */ val = appl_readl(pcie, APPL_CTRL); val |= APPL_CTRL_LTSSM_EN; appl_writel(pcie, val, APPL_CTRL); /* De-assert RST */ val = appl_readl(pcie, APPL_PINMUX); val |= APPL_PINMUX_PEX_RST; appl_writel(pcie, val, APPL_PINMUX); msleep(100); if (dw_pcie_wait_for_link(pci)) { if (!retry) return 0; /* * There are some endpoints which can't get the link up if * root port has Data Link Feature (DLF) enabled. * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info * on Scaled Flow Control and DLF. * So, need to confirm that is indeed the case here and attempt * link up once again with DLF disabled. */ val = appl_readl(pcie, APPL_DEBUG); val &= APPL_DEBUG_LTSSM_STATE_MASK; val >>= APPL_DEBUG_LTSSM_STATE_SHIFT; tmp = appl_readl(pcie, APPL_LINK_STATUS); tmp &= APPL_LINK_STATUS_RDLH_LINK_UP; if (!(val == 0x11 && !tmp)) { /* Link is down for all good reasons */ return 0; } dev_info(pci->dev, "Link is down in DLL"); dev_info(pci->dev, "Trying again with DLFE disabled\n"); /* Disable LTSSM */ val = appl_readl(pcie, APPL_CTRL); val &= ~APPL_CTRL_LTSSM_EN; appl_writel(pcie, val, APPL_CTRL); reset_control_assert(pcie->core_rst); reset_control_deassert(pcie->core_rst); offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF); val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP); val &= ~PCI_DLF_EXCHANGE_ENABLE; dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val); tegra_pcie_dw_host_init(pp); dw_pcie_setup_rc(pp); retry = false; goto retry_link; } tegra_pcie_icc_set(pcie); tegra_pcie_enable_interrupts(pp); return 0; } static int tegra_pcie_dw_link_up(struct dw_pcie *pci) { struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); return !!(val & PCI_EXP_LNKSTA_DLLLA); } static void tegra_pcie_dw_stop_link(struct dw_pcie *pci) { struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); disable_irq(pcie->pex_rst_irq); } static const struct dw_pcie_ops tegra_dw_pcie_ops = { .link_up = tegra_pcie_dw_link_up, .start_link = tegra_pcie_dw_start_link, .stop_link = tegra_pcie_dw_stop_link, }; static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = { .host_init = tegra_pcie_dw_host_init, }; static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie) { unsigned int phy_count = pcie->phy_count; while (phy_count--) { phy_power_off(pcie->phys[phy_count]); phy_exit(pcie->phys[phy_count]); } } static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie) { unsigned int i; int ret; for (i = 0; i < pcie->phy_count; i++) { ret = phy_init(pcie->phys[i]); if (ret < 0) goto phy_power_off; ret = phy_power_on(pcie->phys[i]); if (ret < 0) goto phy_exit; } return 0; phy_power_off: while (i--) { phy_power_off(pcie->phys[i]); phy_exit: phy_exit(pcie->phys[i]); } return ret; } static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie) { struct platform_device *pdev = to_platform_device(pcie->dev); struct device_node *np = pcie->dev->of_node; int ret; pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); if (!pcie->dbi_res) { dev_err(pcie->dev, "Failed to find \"dbi\" region\n"); return -ENODEV; } ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt); if (ret < 0) { dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret); return ret; } ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us", &pcie->aspm_pwr_on_t); if (ret < 0) dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n", ret); ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us", &pcie->aspm_l0s_enter_lat); if (ret < 0) dev_info(pcie->dev, "Failed to read ASPM L0s Entrance latency: %d\n", ret); ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes); if (ret < 0) { dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret); return ret; } ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid); if (ret) { dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret); return ret; } ret = of_property_count_strings(np, "phy-names"); if (ret < 0) { dev_err(pcie->dev, "Failed to find PHY entries: %d\n", ret); return ret; } pcie->phy_count = ret; if (of_property_read_bool(np, "nvidia,update-fc-fixup")) pcie->update_fc_fixup = true; /* RP using an external REFCLK is supported only in Tegra234 */ if (pcie->of_data->version == TEGRA194_DWC_IP_VER) { if (pcie->of_data->mode == DW_PCIE_EP_TYPE) pcie->enable_ext_refclk = true; } else { pcie->enable_ext_refclk = of_property_read_bool(pcie->dev->of_node, "nvidia,enable-ext-refclk"); } pcie->supports_clkreq = of_property_read_bool(pcie->dev->of_node, "supports-clkreq"); pcie->enable_cdm_check = of_property_read_bool(np, "snps,enable-cdm-check"); if (pcie->of_data->version == TEGRA234_DWC_IP_VER) pcie->enable_srns = of_property_read_bool(np, "nvidia,enable-srns"); if (pcie->of_data->mode == DW_PCIE_RC_TYPE) return 0; /* Endpoint mode specific DT entries */ pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN); if (IS_ERR(pcie->pex_rst_gpiod)) { int err = PTR_ERR(pcie->pex_rst_gpiod); const char *level = KERN_ERR; if (err == -EPROBE_DEFER) level = KERN_DEBUG; dev_printk(level, pcie->dev, dev_fmt("Failed to get PERST GPIO: %d\n"), err); return err; } pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev, "nvidia,refclk-select", GPIOD_OUT_HIGH); if (IS_ERR(pcie->pex_refclk_sel_gpiod)) { int err = PTR_ERR(pcie->pex_refclk_sel_gpiod); const char *level = KERN_ERR; if (err == -EPROBE_DEFER) level = KERN_DEBUG; dev_printk(level, pcie->dev, dev_fmt("Failed to get REFCLK select GPIOs: %d\n"), err); pcie->pex_refclk_sel_gpiod = NULL; } return 0; } static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, bool enable) { struct mrq_uphy_response resp; struct tegra_bpmp_message msg; struct mrq_uphy_request req; /* * Controller-5 doesn't need to have its state set by BPMP-FW in * Tegra194 */ if (pcie->of_data->version == TEGRA194_DWC_IP_VER && pcie->cid == 5) return 0; memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE; req.controller_state.pcie_controller = pcie->cid; req.controller_state.enable = enable; memset(&msg, 0, sizeof(msg)); msg.mrq = MRQ_UPHY; msg.tx.data = &req; msg.tx.size = sizeof(req); msg.rx.data = &resp; msg.rx.size = sizeof(resp); return tegra_bpmp_transfer(pcie->bpmp, &msg); } static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, bool enable) { struct mrq_uphy_response resp; struct tegra_bpmp_message msg; struct mrq_uphy_request req; memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); if (enable) { req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT; req.ep_ctrlr_pll_init.ep_controller = pcie->cid; } else { req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF; req.ep_ctrlr_pll_off.ep_controller = pcie->cid; } memset(&msg, 0, sizeof(msg)); msg.mrq = MRQ_UPHY; msg.tx.data = &req; msg.tx.size = sizeof(req); msg.rx.data = &resp; msg.rx.size = sizeof(resp); return tegra_bpmp_transfer(pcie->bpmp, &msg); } static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie) { struct dw_pcie_rp *pp = &pcie->pci.pp; struct pci_bus *child, *root_bus = NULL; struct pci_dev *pdev; /* * link doesn't go into L2 state with some of the endpoints with Tegra * if they are not in D0 state. So, need to make sure that immediate * downstream devices are in D0 state before sending PME_TurnOff to put * link into L2 state. * This is as per PCI Express Base r4.0 v1.0 September 27-2017, * 5.2 Link State Power Management (Page #428). */ list_for_each_entry(child, &pp->bridge->bus->children, node) { /* Bring downstream devices to D0 if they are not already in */ if (child->parent == pp->bridge->bus) { root_bus = child; break; } } if (!root_bus) { dev_err(pcie->dev, "Failed to find downstream devices\n"); return; } list_for_each_entry(pdev, &root_bus->devices, bus_list) { if (PCI_SLOT(pdev->devfn) == 0) { if (pci_set_power_state(pdev, PCI_D0)) dev_err(pcie->dev, "Failed to transition %s to D0 state\n", dev_name(&pdev->dev)); } } } static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie) { pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3"); if (IS_ERR(pcie->slot_ctl_3v3)) { if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV) return PTR_ERR(pcie->slot_ctl_3v3); pcie->slot_ctl_3v3 = NULL; } pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v"); if (IS_ERR(pcie->slot_ctl_12v)) { if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV) return PTR_ERR(pcie->slot_ctl_12v); pcie->slot_ctl_12v = NULL; } return 0; } static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie) { int ret; if (pcie->slot_ctl_3v3) { ret = regulator_enable(pcie->slot_ctl_3v3); if (ret < 0) { dev_err(pcie->dev, "Failed to enable 3.3V slot supply: %d\n", ret); return ret; } } if (pcie->slot_ctl_12v) { ret = regulator_enable(pcie->slot_ctl_12v); if (ret < 0) { dev_err(pcie->dev, "Failed to enable 12V slot supply: %d\n", ret); goto fail_12v_enable; } } /* * According to PCI Express Card Electromechanical Specification * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive) * should be a minimum of 100ms. */ if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v) msleep(100); return 0; fail_12v_enable: if (pcie->slot_ctl_3v3) regulator_disable(pcie->slot_ctl_3v3); return ret; } static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie) { if (pcie->slot_ctl_12v) regulator_disable(pcie->slot_ctl_12v); if (pcie->slot_ctl_3v3) regulator_disable(pcie->slot_ctl_3v3); } static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie, bool en_hw_hot_rst) { int ret; u32 val; ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true); if (ret) { dev_err(pcie->dev, "Failed to enable controller %u: %d\n", pcie->cid, ret); return ret; } if (pcie->enable_ext_refclk) { ret = tegra_pcie_bpmp_set_pll_state(pcie, true); if (ret) { dev_err(pcie->dev, "Failed to init UPHY: %d\n", ret); goto fail_pll_init; } } ret = tegra_pcie_enable_slot_regulators(pcie); if (ret < 0) goto fail_slot_reg_en; ret = regulator_enable(pcie->pex_ctl_supply); if (ret < 0) { dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret); goto fail_reg_en; } ret = clk_prepare_enable(pcie->core_clk); if (ret) { dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret); goto fail_core_clk; } ret = reset_control_deassert(pcie->core_apb_rst); if (ret) { dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n", ret); goto fail_core_apb_rst; } if (en_hw_hot_rst || pcie->of_data->has_sbr_reset_fix) { /* Enable HW_HOT_RST mode */ val = appl_readl(pcie, APPL_CTRL); val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << APPL_CTRL_HW_HOT_RST_MODE_SHIFT); val |= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN << APPL_CTRL_HW_HOT_RST_MODE_SHIFT); val |= APPL_CTRL_HW_HOT_RST_EN; appl_writel(pcie, val, APPL_CTRL); } ret = tegra_pcie_enable_phy(pcie); if (ret) { dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret); goto fail_phy; } /* Update CFG base address */ appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK, APPL_CFG_BASE_ADDR); /* Configure this core for RP mode operation */ appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE); appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE); val = appl_readl(pcie, APPL_CTRL); appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL); val = appl_readl(pcie, APPL_CFG_MISC); val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT); appl_writel(pcie, val, APPL_CFG_MISC); if (pcie->enable_srns || pcie->enable_ext_refclk) { /* * When Tegra PCIe RP is using external clock, it cannot supply * same clock to its downstream hierarchy. Hence, gate PCIe RP * REFCLK out pads when RP & EP are using separate clocks or RP * is using an external REFCLK. */ val = appl_readl(pcie, APPL_PINMUX); val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; val &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; appl_writel(pcie, val, APPL_PINMUX); } if (!pcie->supports_clkreq) { val = appl_readl(pcie, APPL_PINMUX); val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN; val &= ~APPL_PINMUX_CLKREQ_OVERRIDE; appl_writel(pcie, val, APPL_PINMUX); } /* Update iATU_DMA base address */ appl_writel(pcie, pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK, APPL_CFG_IATU_DMA_BASE_ADDR); reset_control_deassert(pcie->core_rst); return ret; fail_phy: reset_control_assert(pcie->core_apb_rst); fail_core_apb_rst: clk_disable_unprepare(pcie->core_clk); fail_core_clk: regulator_disable(pcie->pex_ctl_supply); fail_reg_en: tegra_pcie_disable_slot_regulators(pcie); fail_slot_reg_en: if (pcie->enable_ext_refclk) tegra_pcie_bpmp_set_pll_state(pcie, false); fail_pll_init: tegra_pcie_bpmp_set_ctrl_state(pcie, false); return ret; } static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie) { int ret; ret = reset_control_assert(pcie->core_rst); if (ret) dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret); tegra_pcie_disable_phy(pcie); ret = reset_control_assert(pcie->core_apb_rst); if (ret) dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret); clk_disable_unprepare(pcie->core_clk); ret = regulator_disable(pcie->pex_ctl_supply); if (ret) dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret); tegra_pcie_disable_slot_regulators(pcie); if (pcie->enable_ext_refclk) { ret = tegra_pcie_bpmp_set_pll_state(pcie, false); if (ret) dev_err(pcie->dev, "Failed to deinit UPHY: %d\n", ret); } ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false); if (ret) dev_err(pcie->dev, "Failed to disable controller %d: %d\n", pcie->cid, ret); } static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie) { struct dw_pcie *pci = &pcie->pci; struct dw_pcie_rp *pp = &pci->pp; int ret; ret = tegra_pcie_config_controller(pcie, false); if (ret < 0) return ret; pp->ops = &tegra_pcie_dw_host_ops; ret = dw_pcie_host_init(pp); if (ret < 0) { dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret); goto fail_host_init; } return 0; fail_host_init: tegra_pcie_unconfig_controller(pcie); return ret; } static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie) { u32 val; if (!tegra_pcie_dw_link_up(&pcie->pci)) return 0; val = appl_readl(pcie, APPL_RADM_STATUS); val |= APPL_PM_XMT_TURNOFF_STATE; appl_writel(pcie, val, APPL_RADM_STATUS); return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val, val & APPL_DEBUG_PM_LINKST_IN_L2_LAT, 1, PME_ACK_TIMEOUT); } static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie) { u32 data; int err; if (!tegra_pcie_dw_link_up(&pcie->pci)) { dev_dbg(pcie->dev, "PCIe link is not up...!\n"); return; } /* * PCIe controller exits from L2 only if reset is applied, so * controller doesn't handle interrupts. But in cases where * L2 entry fails, PERST# is asserted which can trigger surprise * link down AER. However this function call happens in * suspend_noirq(), so AER interrupt will not be processed. * Disable all interrupts to avoid such a scenario. */ appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0); if (tegra_pcie_try_link_l2(pcie)) { dev_info(pcie->dev, "Link didn't transition to L2 state\n"); /* * TX lane clock freq will reset to Gen1 only if link is in L2 * or detect state. * So apply pex_rst to end point to force RP to go into detect * state */ data = appl_readl(pcie, APPL_PINMUX); data &= ~APPL_PINMUX_PEX_RST; appl_writel(pcie, data, APPL_PINMUX); /* * Some cards do not go to detect state even after de-asserting * PERST#. So, de-assert LTSSM to bring link to detect state. */ data = readl(pcie->appl_base + APPL_CTRL); data &= ~APPL_CTRL_LTSSM_EN; writel(data, pcie->appl_base + APPL_CTRL); err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, data, ((data & APPL_DEBUG_LTSSM_STATE_MASK) >> APPL_DEBUG_LTSSM_STATE_SHIFT) == LTSSM_STATE_PRE_DETECT, 1, LTSSM_TIMEOUT); if (err) dev_info(pcie->dev, "Link didn't go to detect state\n"); } /* * DBI registers may not be accessible after this as PLL-E would be * down depending on how CLKREQ is pulled by end point */ data = appl_readl(pcie, APPL_PINMUX); data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE); /* Cut REFCLK to slot */ data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; appl_writel(pcie, data, APPL_PINMUX); } static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie) { tegra_pcie_downstream_dev_to_D0(pcie); dw_pcie_host_deinit(&pcie->pci.pp); tegra_pcie_dw_pme_turnoff(pcie); tegra_pcie_unconfig_controller(pcie); } static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) { struct device *dev = pcie->dev; char *name; int ret; pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n", ret); goto fail_pm_get_sync; } ret = pinctrl_pm_select_default_state(dev); if (ret < 0) { dev_err(dev, "Failed to configure sideband pins: %d\n", ret); goto fail_pm_get_sync; } ret = tegra_pcie_init_controller(pcie); if (ret < 0) { dev_err(dev, "Failed to initialize controller: %d\n", ret); goto fail_pm_get_sync; } pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci); if (!pcie->link_state) { ret = -ENOMEDIUM; goto fail_host_init; } name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); if (!name) { ret = -ENOMEM; goto fail_host_init; } pcie->debugfs = debugfs_create_dir(name, NULL); init_debugfs(pcie); return ret; fail_host_init: tegra_pcie_deinit_controller(pcie); fail_pm_get_sync: pm_runtime_put_sync(dev); pm_runtime_disable(dev); return ret; } static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie) { u32 val; int ret; if (pcie->ep_state == EP_STATE_DISABLED) return; /* Disable LTSSM */ val = appl_readl(pcie, APPL_CTRL); val &= ~APPL_CTRL_LTSSM_EN; appl_writel(pcie, val, APPL_CTRL); ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val, ((val & APPL_DEBUG_LTSSM_STATE_MASK) >> APPL_DEBUG_LTSSM_STATE_SHIFT) == LTSSM_STATE_PRE_DETECT, 1, LTSSM_TIMEOUT); if (ret) dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret); reset_control_assert(pcie->core_rst); tegra_pcie_disable_phy(pcie); reset_control_assert(pcie->core_apb_rst); clk_disable_unprepare(pcie->core_clk); pm_runtime_put_sync(pcie->dev); if (pcie->enable_ext_refclk) { ret = tegra_pcie_bpmp_set_pll_state(pcie, false); if (ret) dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret); } ret = tegra_pcie_bpmp_set_pll_state(pcie, false); if (ret) dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret); pcie->ep_state = EP_STATE_DISABLED; dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n"); } static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie) { struct dw_pcie *pci = &pcie->pci; struct dw_pcie_ep *ep = &pci->ep; struct device *dev = pcie->dev; u32 val; int ret; u16 val_16; if (pcie->ep_state == EP_STATE_ENABLED) return; ret = pm_runtime_resume_and_get(dev); if (ret < 0) { dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n", ret); return; } ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true); if (ret) { dev_err(pcie->dev, "Failed to enable controller %u: %d\n", pcie->cid, ret); goto fail_set_ctrl_state; } if (pcie->enable_ext_refclk) { ret = tegra_pcie_bpmp_set_pll_state(pcie, true); if (ret) { dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret); goto fail_pll_init; } } ret = clk_prepare_enable(pcie->core_clk); if (ret) { dev_err(dev, "Failed to enable core clock: %d\n", ret); goto fail_core_clk_enable; } ret = reset_control_deassert(pcie->core_apb_rst); if (ret) { dev_err(dev, "Failed to deassert core APB reset: %d\n", ret); goto fail_core_apb_rst; } ret = tegra_pcie_enable_phy(pcie); if (ret) { dev_err(dev, "Failed to enable PHY: %d\n", ret); goto fail_phy; } /* Clear any stale interrupt statuses */ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); /* configure this core for EP mode operation */ val = appl_readl(pcie, APPL_DM_TYPE); val &= ~APPL_DM_TYPE_MASK; val |= APPL_DM_TYPE_EP; appl_writel(pcie, val, APPL_DM_TYPE); appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE); val = appl_readl(pcie, APPL_CTRL); val |= APPL_CTRL_SYS_PRE_DET_STATE; val |= APPL_CTRL_HW_HOT_RST_EN; appl_writel(pcie, val, APPL_CTRL); val = appl_readl(pcie, APPL_CFG_MISC); val |= APPL_CFG_MISC_SLV_EP_MODE; val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT); appl_writel(pcie, val, APPL_CFG_MISC); val = appl_readl(pcie, APPL_PINMUX); val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; appl_writel(pcie, val, APPL_PINMUX); appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK, APPL_CFG_BASE_ADDR); appl_writel(pcie, pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK, APPL_CFG_IATU_DMA_BASE_ADDR); val = appl_readl(pcie, APPL_INTR_EN_L0_0); val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN; appl_writel(pcie, val, APPL_INTR_EN_L0_0); val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN; val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN; appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); reset_control_deassert(pcie->core_rst); if (pcie->update_fc_fixup) { val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT; dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val); } config_gen3_gen4_eq_presets(pcie); init_host_aspm(pcie); /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */ if (!pcie->supports_clkreq) { disable_aspm_l11(pcie); disable_aspm_l12(pcie); } if (!pcie->of_data->has_l1ss_exit_fix) { val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); } pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, PCI_CAP_ID_EXP); /* Clear Slot Clock Configuration bit if SRNS configuration */ if (pcie->enable_srns) { val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); val_16 &= ~PCI_EXP_LNKSTA_SLC; dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA, val_16); } clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK); val |= MSIX_ADDR_MATCH_LOW_OFF_EN; dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val); val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK); dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val); ret = dw_pcie_ep_init_complete(ep); if (ret) { dev_err(dev, "Failed to complete initialization: %d\n", ret); goto fail_init_complete; } dw_pcie_ep_init_notify(ep); /* Program the private control to allow sending LTR upstream */ if (pcie->of_data->has_ltr_req_fix) { val = appl_readl(pcie, APPL_LTR_MSG_2); val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE; appl_writel(pcie, val, APPL_LTR_MSG_2); } /* Enable LTSSM */ val = appl_readl(pcie, APPL_CTRL); val |= APPL_CTRL_LTSSM_EN; appl_writel(pcie, val, APPL_CTRL); pcie->ep_state = EP_STATE_ENABLED; dev_dbg(dev, "Initialization of endpoint is completed\n"); return; fail_init_complete: reset_control_assert(pcie->core_rst); tegra_pcie_disable_phy(pcie); fail_phy: reset_control_assert(pcie->core_apb_rst); fail_core_apb_rst: clk_disable_unprepare(pcie->core_clk); fail_core_clk_enable: tegra_pcie_bpmp_set_pll_state(pcie, false); fail_pll_init: tegra_pcie_bpmp_set_ctrl_state(pcie, false); fail_set_ctrl_state: pm_runtime_put_sync(dev); } static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg) { struct tegra_pcie_dw *pcie = arg; if (gpiod_get_value(pcie->pex_rst_gpiod)) pex_ep_event_pex_rst_assert(pcie); else pex_ep_event_pex_rst_deassert(pcie); return IRQ_HANDLED; } static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq) { /* Tegra194 supports only INTA */ if (irq > 1) return -EINVAL; appl_writel(pcie, 1, APPL_LEGACY_INTX); usleep_range(1000, 2000); appl_writel(pcie, 0, APPL_LEGACY_INTX); return 0; } static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq) { if (unlikely(irq > 31)) return -EINVAL; appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1); return 0; } static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq) { struct dw_pcie_ep *ep = &pcie->pci.ep; writel(irq, ep->msi_mem); return 0; } static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); switch (type) { case PCI_EPC_IRQ_LEGACY: return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num); case PCI_EPC_IRQ_MSI: return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num); case PCI_EPC_IRQ_MSIX: return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num); default: dev_err(pci->dev, "Unknown IRQ type\n"); return -EPERM; } return 0; } static const struct pci_epc_features tegra_pcie_epc_features = { .linkup_notifier = true, .core_init_notifier = true, .msi_capable = false, .msix_capable = false, .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5, .bar_fixed_64bit = 1 << BAR_0, .bar_fixed_size[0] = SZ_1M, }; static const struct pci_epc_features* tegra_pcie_ep_get_features(struct dw_pcie_ep *ep) { return &tegra_pcie_epc_features; } static const struct dw_pcie_ep_ops pcie_ep_ops = { .raise_irq = tegra_pcie_ep_raise_irq, .get_features = tegra_pcie_ep_get_features, }; static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie, struct platform_device *pdev) { struct dw_pcie *pci = &pcie->pci; struct device *dev = pcie->dev; struct dw_pcie_ep *ep; char *name; int ret; ep = &pci->ep; ep->ops = &pcie_ep_ops; ep->page_size = SZ_64K; ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME); if (ret < 0) { dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n", ret); return ret; } ret = gpiod_to_irq(pcie->pex_rst_gpiod); if (ret < 0) { dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret); return ret; } pcie->pex_rst_irq = (unsigned int)ret; name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq", pcie->cid); if (!name) { dev_err(dev, "Failed to create PERST IRQ string\n"); return -ENOMEM; } irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN); pcie->ep_state = EP_STATE_DISABLED; ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL, tegra_pcie_ep_pex_rst_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, (void *)pcie); if (ret < 0) { dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret); return ret; } pm_runtime_enable(dev); ret = dw_pcie_ep_init(ep); if (ret) { dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n", ret); pm_runtime_disable(dev); return ret; } return 0; } static int tegra_pcie_dw_probe(struct platform_device *pdev) { const struct tegra_pcie_dw_of_data *data; struct device *dev = &pdev->dev; struct resource *atu_dma_res; struct tegra_pcie_dw *pcie; struct dw_pcie_rp *pp; struct dw_pcie *pci; struct phy **phys; char *name; int ret; u32 i; data = of_device_get_match_data(dev); pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; pci = &pcie->pci; pci->dev = &pdev->dev; pci->ops = &tegra_dw_pcie_ops; pcie->dev = &pdev->dev; pcie->of_data = (struct tegra_pcie_dw_of_data *)data; pci->n_fts[0] = pcie->of_data->n_fts[0]; pci->n_fts[1] = pcie->of_data->n_fts[1]; pp = &pci->pp; pp->num_vectors = MAX_MSI_IRQS; ret = tegra_pcie_dw_parse_dt(pcie); if (ret < 0) { const char *level = KERN_ERR; if (ret == -EPROBE_DEFER) level = KERN_DEBUG; dev_printk(level, dev, dev_fmt("Failed to parse device tree: %d\n"), ret); return ret; } ret = tegra_pcie_get_slot_regulators(pcie); if (ret < 0) { const char *level = KERN_ERR; if (ret == -EPROBE_DEFER) level = KERN_DEBUG; dev_printk(level, dev, dev_fmt("Failed to get slot regulators: %d\n"), ret); return ret; } if (pcie->pex_refclk_sel_gpiod) gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1); pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl"); if (IS_ERR(pcie->pex_ctl_supply)) { ret = PTR_ERR(pcie->pex_ctl_supply); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get regulator: %ld\n", PTR_ERR(pcie->pex_ctl_supply)); return ret; } pcie->core_clk = devm_clk_get(dev, "core"); if (IS_ERR(pcie->core_clk)) { dev_err(dev, "Failed to get core clock: %ld\n", PTR_ERR(pcie->core_clk)); return PTR_ERR(pcie->core_clk); } pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "appl"); if (!pcie->appl_res) { dev_err(dev, "Failed to find \"appl\" region\n"); return -ENODEV; } pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res); if (IS_ERR(pcie->appl_base)) return PTR_ERR(pcie->appl_base); pcie->core_apb_rst = devm_reset_control_get(dev, "apb"); if (IS_ERR(pcie->core_apb_rst)) { dev_err(dev, "Failed to get APB reset: %ld\n", PTR_ERR(pcie->core_apb_rst)); return PTR_ERR(pcie->core_apb_rst); } phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL); if (!phys) return -ENOMEM; for (i = 0; i < pcie->phy_count; i++) { name = kasprintf(GFP_KERNEL, "p2u-%u", i); if (!name) { dev_err(dev, "Failed to create P2U string\n"); return -ENOMEM; } phys[i] = devm_phy_get(dev, name); kfree(name); if (IS_ERR(phys[i])) { ret = PTR_ERR(phys[i]); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get PHY: %d\n", ret); return ret; } } pcie->phys = phys; atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu_dma"); if (!atu_dma_res) { dev_err(dev, "Failed to find \"atu_dma\" region\n"); return -ENODEV; } pcie->atu_dma_res = atu_dma_res; pci->atu_size = resource_size(atu_dma_res); pci->atu_base = devm_ioremap_resource(dev, atu_dma_res); if (IS_ERR(pci->atu_base)) return PTR_ERR(pci->atu_base); pcie->core_rst = devm_reset_control_get(dev, "core"); if (IS_ERR(pcie->core_rst)) { dev_err(dev, "Failed to get core reset: %ld\n", PTR_ERR(pcie->core_rst)); return PTR_ERR(pcie->core_rst); } pp->irq = platform_get_irq_byname(pdev, "intr"); if (pp->irq < 0) return pp->irq; pcie->bpmp = tegra_bpmp_get(dev); if (IS_ERR(pcie->bpmp)) return PTR_ERR(pcie->bpmp); platform_set_drvdata(pdev, pcie); pcie->icc_path = devm_of_icc_get(&pdev->dev, "write"); ret = PTR_ERR_OR_ZERO(pcie->icc_path); if (ret) { tegra_bpmp_put(pcie->bpmp); dev_err_probe(&pdev->dev, ret, "failed to get write interconnect\n"); return ret; } switch (pcie->of_data->mode) { case DW_PCIE_RC_TYPE: ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler, IRQF_SHARED, "tegra-pcie-intr", pcie); if (ret) { dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, ret); goto fail; } ret = tegra_pcie_config_rp(pcie); if (ret && ret != -ENOMEDIUM) goto fail; else return 0; break; case DW_PCIE_EP_TYPE: ret = devm_request_threaded_irq(dev, pp->irq, tegra_pcie_ep_hard_irq, tegra_pcie_ep_irq_thread, IRQF_SHARED | IRQF_ONESHOT, "tegra-pcie-ep-intr", pcie); if (ret) { dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, ret); goto fail; } ret = tegra_pcie_config_ep(pcie, pdev); if (ret < 0) goto fail; break; default: dev_err(dev, "Invalid PCIe device type %d\n", pcie->of_data->mode); } fail: tegra_bpmp_put(pcie->bpmp); return ret; } static void tegra_pcie_dw_remove(struct platform_device *pdev) { struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); if (pcie->of_data->mode == DW_PCIE_RC_TYPE) { if (!pcie->link_state) return; debugfs_remove_recursive(pcie->debugfs); tegra_pcie_deinit_controller(pcie); pm_runtime_put_sync(pcie->dev); } else { disable_irq(pcie->pex_rst_irq); pex_ep_event_pex_rst_assert(pcie); } pm_runtime_disable(pcie->dev); tegra_bpmp_put(pcie->bpmp); if (pcie->pex_refclk_sel_gpiod) gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0); } static int tegra_pcie_dw_suspend_late(struct device *dev) { struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); u32 val; if (pcie->of_data->mode == DW_PCIE_EP_TYPE) { dev_err(dev, "Failed to Suspend as Tegra PCIe is in EP mode\n"); return -EPERM; } if (!pcie->link_state) return 0; /* Enable HW_HOT_RST mode */ if (!pcie->of_data->has_sbr_reset_fix) { val = appl_readl(pcie, APPL_CTRL); val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << APPL_CTRL_HW_HOT_RST_MODE_SHIFT); val |= APPL_CTRL_HW_HOT_RST_EN; appl_writel(pcie, val, APPL_CTRL); } return 0; } static int tegra_pcie_dw_suspend_noirq(struct device *dev) { struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); if (!pcie->link_state) return 0; tegra_pcie_downstream_dev_to_D0(pcie); tegra_pcie_dw_pme_turnoff(pcie); tegra_pcie_unconfig_controller(pcie); return 0; } static int tegra_pcie_dw_resume_noirq(struct device *dev) { struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); int ret; if (!pcie->link_state) return 0; ret = tegra_pcie_config_controller(pcie, true); if (ret < 0) return ret; ret = tegra_pcie_dw_host_init(&pcie->pci.pp); if (ret < 0) { dev_err(dev, "Failed to init host: %d\n", ret); goto fail_host_init; } dw_pcie_setup_rc(&pcie->pci.pp); ret = tegra_pcie_dw_start_link(&pcie->pci); if (ret < 0) goto fail_host_init; return 0; fail_host_init: tegra_pcie_unconfig_controller(pcie); return ret; } static int tegra_pcie_dw_resume_early(struct device *dev) { struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); u32 val; if (pcie->of_data->mode == DW_PCIE_EP_TYPE) { dev_err(dev, "Suspend is not supported in EP mode"); return -ENOTSUPP; } if (!pcie->link_state) return 0; /* Disable HW_HOT_RST mode */ if (!pcie->of_data->has_sbr_reset_fix) { val = appl_readl(pcie, APPL_CTRL); val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << APPL_CTRL_HW_HOT_RST_MODE_SHIFT); val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST << APPL_CTRL_HW_HOT_RST_MODE_SHIFT; val &= ~APPL_CTRL_HW_HOT_RST_EN; appl_writel(pcie, val, APPL_CTRL); } return 0; } static void tegra_pcie_dw_shutdown(struct platform_device *pdev) { struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); if (pcie->of_data->mode == DW_PCIE_RC_TYPE) { if (!pcie->link_state) return; debugfs_remove_recursive(pcie->debugfs); tegra_pcie_downstream_dev_to_D0(pcie); disable_irq(pcie->pci.pp.irq); if (IS_ENABLED(CONFIG_PCI_MSI)) disable_irq(pcie->pci.pp.msi_irq[0]); tegra_pcie_dw_pme_turnoff(pcie); tegra_pcie_unconfig_controller(pcie); pm_runtime_put_sync(pcie->dev); } else { disable_irq(pcie->pex_rst_irq); pex_ep_event_pex_rst_assert(pcie); } } static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_rc_of_data = { .version = TEGRA194_DWC_IP_VER, .mode = DW_PCIE_RC_TYPE, .cdm_chk_int_en_bit = BIT(19), /* Gen4 - 5, 6, 8 and 9 presets enabled */ .gen4_preset_vec = 0x360, .n_fts = { 52, 52 }, }; static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_ep_of_data = { .version = TEGRA194_DWC_IP_VER, .mode = DW_PCIE_EP_TYPE, .cdm_chk_int_en_bit = BIT(19), /* Gen4 - 5, 6, 8 and 9 presets enabled */ .gen4_preset_vec = 0x360, .n_fts = { 52, 52 }, }; static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_rc_of_data = { .version = TEGRA234_DWC_IP_VER, .mode = DW_PCIE_RC_TYPE, .has_msix_doorbell_access_fix = true, .has_sbr_reset_fix = true, .has_l1ss_exit_fix = true, .cdm_chk_int_en_bit = BIT(18), /* Gen4 - 6, 8 and 9 presets enabled */ .gen4_preset_vec = 0x340, .n_fts = { 52, 80 }, }; static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_ep_of_data = { .version = TEGRA234_DWC_IP_VER, .mode = DW_PCIE_EP_TYPE, .has_l1ss_exit_fix = true, .has_ltr_req_fix = true, .cdm_chk_int_en_bit = BIT(18), /* Gen4 - 6, 8 and 9 presets enabled */ .gen4_preset_vec = 0x340, .n_fts = { 52, 80 }, }; static const struct of_device_id tegra_pcie_dw_of_match[] = { { .compatible = "nvidia,tegra194-pcie", .data = &tegra194_pcie_dw_rc_of_data, }, { .compatible = "nvidia,tegra194-pcie-ep", .data = &tegra194_pcie_dw_ep_of_data, }, { .compatible = "nvidia,tegra234-pcie", .data = &tegra234_pcie_dw_rc_of_data, }, { .compatible = "nvidia,tegra234-pcie-ep", .data = &tegra234_pcie_dw_ep_of_data, }, {} }; static const struct dev_pm_ops tegra_pcie_dw_pm_ops = { .suspend_late = tegra_pcie_dw_suspend_late, .suspend_noirq = tegra_pcie_dw_suspend_noirq, .resume_noirq = tegra_pcie_dw_resume_noirq, .resume_early = tegra_pcie_dw_resume_early, }; static struct platform_driver tegra_pcie_dw_driver = { .probe = tegra_pcie_dw_probe, .remove_new = tegra_pcie_dw_remove, .shutdown = tegra_pcie_dw_shutdown, .driver = { .name = "tegra194-pcie", .pm = &tegra_pcie_dw_pm_ops, .of_match_table = tegra_pcie_dw_of_match, }, }; module_platform_driver(tegra_pcie_dw_driver); MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match); MODULE_AUTHOR("Vidya Sagar <[email protected]>"); MODULE_DESCRIPTION("NVIDIA PCIe host controller driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/dwc/pcie-tegra194.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe host controller driver for HiSilicon STB SoCs * * Copyright (C) 2016-2017 HiSilicon Co., Ltd. http://www.hisilicon.com * * Authors: Ruqiang Ju <[email protected]> * Jianguo Sun <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/pci.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/resource.h> #include <linux/reset.h> #include "pcie-designware.h" #define to_histb_pcie(x) dev_get_drvdata((x)->dev) #define PCIE_SYS_CTRL0 0x0000 #define PCIE_SYS_CTRL1 0x0004 #define PCIE_SYS_CTRL7 0x001C #define PCIE_SYS_CTRL13 0x0034 #define PCIE_SYS_CTRL15 0x003C #define PCIE_SYS_CTRL16 0x0040 #define PCIE_SYS_CTRL17 0x0044 #define PCIE_SYS_STAT0 0x0100 #define PCIE_SYS_STAT4 0x0110 #define PCIE_RDLH_LINK_UP BIT(5) #define PCIE_XMLH_LINK_UP BIT(15) #define PCIE_ELBI_SLV_DBI_ENABLE BIT(21) #define PCIE_APP_LTSSM_ENABLE BIT(11) #define PCIE_DEVICE_TYPE_MASK GENMASK(31, 28) #define PCIE_WM_EP 0 #define PCIE_WM_LEGACY BIT(1) #define PCIE_WM_RC BIT(30) #define PCIE_LTSSM_STATE_MASK GENMASK(5, 0) #define PCIE_LTSSM_STATE_ACTIVE 0x11 struct histb_pcie { struct dw_pcie *pci; struct clk *aux_clk; struct clk *pipe_clk; struct clk *sys_clk; struct clk *bus_clk; struct phy *phy; struct reset_control *soft_reset; struct reset_control *sys_reset; struct reset_control *bus_reset; void __iomem *ctrl; struct gpio_desc *reset_gpio; struct regulator *vpcie; }; static u32 histb_pcie_readl(struct histb_pcie *histb_pcie, u32 reg) { return readl(histb_pcie->ctrl + reg); } static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val) { writel(val, histb_pcie->ctrl + reg); } static void histb_pcie_dbi_w_mode(struct dw_pcie_rp *pp, bool enable) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct histb_pcie *hipcie = to_histb_pcie(pci); u32 val; val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0); if (enable) val |= PCIE_ELBI_SLV_DBI_ENABLE; else val &= ~PCIE_ELBI_SLV_DBI_ENABLE; histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val); } static void histb_pcie_dbi_r_mode(struct dw_pcie_rp *pp, bool enable) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct histb_pcie *hipcie = to_histb_pcie(pci); u32 val; val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL1); if (enable) val |= PCIE_ELBI_SLV_DBI_ENABLE; else val &= ~PCIE_ELBI_SLV_DBI_ENABLE; histb_pcie_writel(hipcie, PCIE_SYS_CTRL1, val); } static u32 histb_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, size_t size) { u32 val; histb_pcie_dbi_r_mode(&pci->pp, true); dw_pcie_read(base + reg, size, &val); histb_pcie_dbi_r_mode(&pci->pp, false); return val; } static void histb_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, size_t size, u32 val) { histb_pcie_dbi_w_mode(&pci->pp, true); dw_pcie_write(base + reg, size, val); histb_pcie_dbi_w_mode(&pci->pp, false); } static int histb_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata); if (PCI_SLOT(devfn)) return PCIBIOS_DEVICE_NOT_FOUND; *val = dw_pcie_read_dbi(pci, where, size); return PCIBIOS_SUCCESSFUL; } static int histb_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata); if (PCI_SLOT(devfn)) return PCIBIOS_DEVICE_NOT_FOUND; dw_pcie_write_dbi(pci, where, size, val); return PCIBIOS_SUCCESSFUL; } static struct pci_ops histb_pci_ops = { .read = histb_pcie_rd_own_conf, .write = histb_pcie_wr_own_conf, }; static int histb_pcie_link_up(struct dw_pcie *pci) { struct histb_pcie *hipcie = to_histb_pcie(pci); u32 regval; u32 status; regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0); status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4); status &= PCIE_LTSSM_STATE_MASK; if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) && (status == PCIE_LTSSM_STATE_ACTIVE)) return 1; return 0; } static int histb_pcie_start_link(struct dw_pcie *pci) { struct histb_pcie *hipcie = to_histb_pcie(pci); u32 regval; /* assert LTSSM enable */ regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL7); regval |= PCIE_APP_LTSSM_ENABLE; histb_pcie_writel(hipcie, PCIE_SYS_CTRL7, regval); return 0; } static int histb_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct histb_pcie *hipcie = to_histb_pcie(pci); u32 regval; pp->bridge->ops = &histb_pci_ops; /* PCIe RC work mode */ regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0); regval &= ~PCIE_DEVICE_TYPE_MASK; regval |= PCIE_WM_RC; histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval); return 0; } static const struct dw_pcie_host_ops histb_pcie_host_ops = { .host_init = histb_pcie_host_init, }; static void histb_pcie_host_disable(struct histb_pcie *hipcie) { reset_control_assert(hipcie->soft_reset); reset_control_assert(hipcie->sys_reset); reset_control_assert(hipcie->bus_reset); clk_disable_unprepare(hipcie->aux_clk); clk_disable_unprepare(hipcie->pipe_clk); clk_disable_unprepare(hipcie->sys_clk); clk_disable_unprepare(hipcie->bus_clk); if (hipcie->reset_gpio) gpiod_set_value_cansleep(hipcie->reset_gpio, 1); if (hipcie->vpcie) regulator_disable(hipcie->vpcie); } static int histb_pcie_host_enable(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct histb_pcie *hipcie = to_histb_pcie(pci); struct device *dev = pci->dev; int ret; /* power on PCIe device if have */ if (hipcie->vpcie) { ret = regulator_enable(hipcie->vpcie); if (ret) { dev_err(dev, "failed to enable regulator: %d\n", ret); return ret; } } if (hipcie->reset_gpio) gpiod_set_value_cansleep(hipcie->reset_gpio, 0); ret = clk_prepare_enable(hipcie->bus_clk); if (ret) { dev_err(dev, "cannot prepare/enable bus clk\n"); goto err_bus_clk; } ret = clk_prepare_enable(hipcie->sys_clk); if (ret) { dev_err(dev, "cannot prepare/enable sys clk\n"); goto err_sys_clk; } ret = clk_prepare_enable(hipcie->pipe_clk); if (ret) { dev_err(dev, "cannot prepare/enable pipe clk\n"); goto err_pipe_clk; } ret = clk_prepare_enable(hipcie->aux_clk); if (ret) { dev_err(dev, "cannot prepare/enable aux clk\n"); goto err_aux_clk; } reset_control_assert(hipcie->soft_reset); reset_control_deassert(hipcie->soft_reset); reset_control_assert(hipcie->sys_reset); reset_control_deassert(hipcie->sys_reset); reset_control_assert(hipcie->bus_reset); reset_control_deassert(hipcie->bus_reset); return 0; err_aux_clk: clk_disable_unprepare(hipcie->pipe_clk); err_pipe_clk: clk_disable_unprepare(hipcie->sys_clk); err_sys_clk: clk_disable_unprepare(hipcie->bus_clk); err_bus_clk: if (hipcie->vpcie) regulator_disable(hipcie->vpcie); return ret; } static const struct dw_pcie_ops dw_pcie_ops = { .read_dbi = histb_pcie_read_dbi, .write_dbi = histb_pcie_write_dbi, .link_up = histb_pcie_link_up, .start_link = histb_pcie_start_link, }; static int histb_pcie_probe(struct platform_device *pdev) { struct histb_pcie *hipcie; struct dw_pcie *pci; struct dw_pcie_rp *pp; struct device *dev = &pdev->dev; int ret; hipcie = devm_kzalloc(dev, sizeof(*hipcie), GFP_KERNEL); if (!hipcie) return -ENOMEM; pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); if (!pci) return -ENOMEM; hipcie->pci = pci; pp = &pci->pp; pci->dev = dev; pci->ops = &dw_pcie_ops; hipcie->ctrl = devm_platform_ioremap_resource_byname(pdev, "control"); if (IS_ERR(hipcie->ctrl)) { dev_err(dev, "cannot get control reg base\n"); return PTR_ERR(hipcie->ctrl); } pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc-dbi"); if (IS_ERR(pci->dbi_base)) { dev_err(dev, "cannot get rc-dbi base\n"); return PTR_ERR(pci->dbi_base); } hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie"); if (IS_ERR(hipcie->vpcie)) { if (PTR_ERR(hipcie->vpcie) != -ENODEV) return PTR_ERR(hipcie->vpcie); hipcie->vpcie = NULL; } hipcie->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); ret = PTR_ERR_OR_ZERO(hipcie->reset_gpio); if (ret) { dev_err(dev, "unable to request reset gpio: %d\n", ret); return ret; } ret = gpiod_set_consumer_name(hipcie->reset_gpio, "PCIe device power control"); if (ret) { dev_err(dev, "unable to set reset gpio name: %d\n", ret); return ret; } hipcie->aux_clk = devm_clk_get(dev, "aux"); if (IS_ERR(hipcie->aux_clk)) { dev_err(dev, "Failed to get PCIe aux clk\n"); return PTR_ERR(hipcie->aux_clk); } hipcie->pipe_clk = devm_clk_get(dev, "pipe"); if (IS_ERR(hipcie->pipe_clk)) { dev_err(dev, "Failed to get PCIe pipe clk\n"); return PTR_ERR(hipcie->pipe_clk); } hipcie->sys_clk = devm_clk_get(dev, "sys"); if (IS_ERR(hipcie->sys_clk)) { dev_err(dev, "Failed to get PCIEe sys clk\n"); return PTR_ERR(hipcie->sys_clk); } hipcie->bus_clk = devm_clk_get(dev, "bus"); if (IS_ERR(hipcie->bus_clk)) { dev_err(dev, "Failed to get PCIe bus clk\n"); return PTR_ERR(hipcie->bus_clk); } hipcie->soft_reset = devm_reset_control_get(dev, "soft"); if (IS_ERR(hipcie->soft_reset)) { dev_err(dev, "couldn't get soft reset\n"); return PTR_ERR(hipcie->soft_reset); } hipcie->sys_reset = devm_reset_control_get(dev, "sys"); if (IS_ERR(hipcie->sys_reset)) { dev_err(dev, "couldn't get sys reset\n"); return PTR_ERR(hipcie->sys_reset); } hipcie->bus_reset = devm_reset_control_get(dev, "bus"); if (IS_ERR(hipcie->bus_reset)) { dev_err(dev, "couldn't get bus reset\n"); return PTR_ERR(hipcie->bus_reset); } hipcie->phy = devm_phy_get(dev, "phy"); if (IS_ERR(hipcie->phy)) { dev_info(dev, "no pcie-phy found\n"); hipcie->phy = NULL; /* fall through here! * if no pcie-phy found, phy init * should be done under boot! */ } else { phy_init(hipcie->phy); } pp->ops = &histb_pcie_host_ops; platform_set_drvdata(pdev, hipcie); ret = histb_pcie_host_enable(pp); if (ret) { dev_err(dev, "failed to enable host\n"); return ret; } ret = dw_pcie_host_init(pp); if (ret) { dev_err(dev, "failed to initialize host\n"); return ret; } return 0; } static void histb_pcie_remove(struct platform_device *pdev) { struct histb_pcie *hipcie = platform_get_drvdata(pdev); histb_pcie_host_disable(hipcie); if (hipcie->phy) phy_exit(hipcie->phy); } static const struct of_device_id histb_pcie_of_match[] = { { .compatible = "hisilicon,hi3798cv200-pcie", }, {}, }; MODULE_DEVICE_TABLE(of, histb_pcie_of_match); static struct platform_driver histb_pcie_platform_driver = { .probe = histb_pcie_probe, .remove_new = histb_pcie_remove, .driver = { .name = "histb-pcie", .of_match_table = histb_pcie_of_match, }, }; module_platform_driver(histb_pcie_platform_driver); MODULE_DESCRIPTION("HiSilicon STB PCIe host controller driver");
linux-master
drivers/pci/controller/dwc/pcie-histb.c
// SPDX-License-Identifier: GPL-2.0-only /* * PCIe controller driver for Intel Keem Bay * Copyright (C) 2020 Intel Corporation */ #include <linux/bitfield.h> #include <linux/bits.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/iopoll.h> #include <linux/irqchip/chained_irq.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/property.h> #include "pcie-designware.h" /* PCIE_REGS_APB_SLV Registers */ #define PCIE_REGS_PCIE_CFG 0x0004 #define PCIE_DEVICE_TYPE BIT(8) #define PCIE_RSTN BIT(0) #define PCIE_REGS_PCIE_APP_CNTRL 0x0008 #define APP_LTSSM_ENABLE BIT(0) #define PCIE_REGS_INTERRUPT_ENABLE 0x0028 #define MSI_CTRL_INT_EN BIT(8) #define EDMA_INT_EN GENMASK(7, 0) #define PCIE_REGS_INTERRUPT_STATUS 0x002c #define MSI_CTRL_INT BIT(8) #define PCIE_REGS_PCIE_SII_PM_STATE 0x00b0 #define SMLH_LINK_UP BIT(19) #define RDLH_LINK_UP BIT(8) #define PCIE_REGS_PCIE_SII_LINK_UP (SMLH_LINK_UP | RDLH_LINK_UP) #define PCIE_REGS_PCIE_PHY_CNTL 0x0164 #define PHY0_SRAM_BYPASS BIT(8) #define PCIE_REGS_PCIE_PHY_STAT 0x0168 #define PHY0_MPLLA_STATE BIT(1) #define PCIE_REGS_LJPLL_STA 0x016c #define LJPLL_LOCK BIT(0) #define PCIE_REGS_LJPLL_CNTRL_0 0x0170 #define LJPLL_EN BIT(29) #define LJPLL_FOUT_EN GENMASK(24, 21) #define PCIE_REGS_LJPLL_CNTRL_2 0x0178 #define LJPLL_REF_DIV GENMASK(17, 12) #define LJPLL_FB_DIV GENMASK(11, 0) #define PCIE_REGS_LJPLL_CNTRL_3 0x017c #define LJPLL_POST_DIV3A GENMASK(24, 22) #define LJPLL_POST_DIV2A GENMASK(18, 16) #define PERST_DELAY_US 1000 #define AUX_CLK_RATE_HZ 24000000 struct keembay_pcie { struct dw_pcie pci; void __iomem *apb_base; enum dw_pcie_device_mode mode; struct clk *clk_master; struct clk *clk_aux; struct gpio_desc *reset; }; struct keembay_pcie_of_data { enum dw_pcie_device_mode mode; }; static void keembay_ep_reset_assert(struct keembay_pcie *pcie) { gpiod_set_value_cansleep(pcie->reset, 1); usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); } static void keembay_ep_reset_deassert(struct keembay_pcie *pcie) { /* * Ensure that PERST# is asserted for a minimum of 100ms. * * For more details, refer to PCI Express Card Electromechanical * Specification Revision 1.1, Table-2.4. */ msleep(100); gpiod_set_value_cansleep(pcie->reset, 0); usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); } static void keembay_pcie_ltssm_set(struct keembay_pcie *pcie, bool enable) { u32 val; val = readl(pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL); if (enable) val |= APP_LTSSM_ENABLE; else val &= ~APP_LTSSM_ENABLE; writel(val, pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL); } static int keembay_pcie_link_up(struct dw_pcie *pci) { struct keembay_pcie *pcie = dev_get_drvdata(pci->dev); u32 val; val = readl(pcie->apb_base + PCIE_REGS_PCIE_SII_PM_STATE); return (val & PCIE_REGS_PCIE_SII_LINK_UP) == PCIE_REGS_PCIE_SII_LINK_UP; } static int keembay_pcie_start_link(struct dw_pcie *pci) { struct keembay_pcie *pcie = dev_get_drvdata(pci->dev); u32 val; int ret; if (pcie->mode == DW_PCIE_EP_TYPE) return 0; keembay_pcie_ltssm_set(pcie, false); ret = readl_poll_timeout(pcie->apb_base + PCIE_REGS_PCIE_PHY_STAT, val, val & PHY0_MPLLA_STATE, 20, 500 * USEC_PER_MSEC); if (ret) { dev_err(pci->dev, "MPLLA is not locked\n"); return ret; } keembay_pcie_ltssm_set(pcie, true); return 0; } static void keembay_pcie_stop_link(struct dw_pcie *pci) { struct keembay_pcie *pcie = dev_get_drvdata(pci->dev); keembay_pcie_ltssm_set(pcie, false); } static const struct dw_pcie_ops keembay_pcie_ops = { .link_up = keembay_pcie_link_up, .start_link = keembay_pcie_start_link, .stop_link = keembay_pcie_stop_link, }; static inline void keembay_pcie_disable_clock(void *data) { struct clk *clk = data; clk_disable_unprepare(clk); } static inline struct clk *keembay_pcie_probe_clock(struct device *dev, const char *id, u64 rate) { struct clk *clk; int ret; clk = devm_clk_get(dev, id); if (IS_ERR(clk)) return clk; if (rate) { ret = clk_set_rate(clk, rate); if (ret) return ERR_PTR(ret); } ret = clk_prepare_enable(clk); if (ret) return ERR_PTR(ret); ret = devm_add_action_or_reset(dev, keembay_pcie_disable_clock, clk); if (ret) return ERR_PTR(ret); return clk; } static int keembay_pcie_probe_clocks(struct keembay_pcie *pcie) { struct dw_pcie *pci = &pcie->pci; struct device *dev = pci->dev; pcie->clk_master = keembay_pcie_probe_clock(dev, "master", 0); if (IS_ERR(pcie->clk_master)) return dev_err_probe(dev, PTR_ERR(pcie->clk_master), "Failed to enable master clock"); pcie->clk_aux = keembay_pcie_probe_clock(dev, "aux", AUX_CLK_RATE_HZ); if (IS_ERR(pcie->clk_aux)) return dev_err_probe(dev, PTR_ERR(pcie->clk_aux), "Failed to enable auxiliary clock"); return 0; } /* * Initialize the internal PCIe PLL in Host mode. * See the following sections in Keem Bay data book, * (1) 6.4.6.1 PCIe Subsystem Example Initialization, * (2) 6.8 PCIe Low Jitter PLL for Ref Clk Generation. */ static int keembay_pcie_pll_init(struct keembay_pcie *pcie) { struct dw_pcie *pci = &pcie->pci; u32 val; int ret; val = FIELD_PREP(LJPLL_REF_DIV, 0) | FIELD_PREP(LJPLL_FB_DIV, 0x32); writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_2); val = FIELD_PREP(LJPLL_POST_DIV3A, 0x2) | FIELD_PREP(LJPLL_POST_DIV2A, 0x2); writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_3); val = FIELD_PREP(LJPLL_EN, 0x1) | FIELD_PREP(LJPLL_FOUT_EN, 0xc); writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_0); ret = readl_poll_timeout(pcie->apb_base + PCIE_REGS_LJPLL_STA, val, val & LJPLL_LOCK, 20, 500 * USEC_PER_MSEC); if (ret) dev_err(pci->dev, "Low jitter PLL is not locked\n"); return ret; } static void keembay_pcie_msi_irq_handler(struct irq_desc *desc) { struct keembay_pcie *pcie = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); u32 val, mask, status; struct dw_pcie_rp *pp; /* * Keem Bay PCIe Controller provides an additional IP logic on top of * standard DWC IP to clear MSI IRQ by writing '1' to the respective * bit of the status register. * * So, a chained irq handler is defined to handle this additional * IP logic. */ chained_irq_enter(chip, desc); pp = &pcie->pci.pp; val = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_STATUS); mask = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE); status = val & mask; if (status & MSI_CTRL_INT) { dw_handle_msi_irq(pp); writel(status, pcie->apb_base + PCIE_REGS_INTERRUPT_STATUS); } chained_irq_exit(chip, desc); } static int keembay_pcie_setup_msi_irq(struct keembay_pcie *pcie) { struct dw_pcie *pci = &pcie->pci; struct device *dev = pci->dev; struct platform_device *pdev = to_platform_device(dev); int irq; irq = platform_get_irq_byname(pdev, "pcie"); if (irq < 0) return irq; irq_set_chained_handler_and_data(irq, keembay_pcie_msi_irq_handler, pcie); return 0; } static void keembay_pcie_ep_init(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct keembay_pcie *pcie = dev_get_drvdata(pci->dev); writel(EDMA_INT_EN, pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE); } static int keembay_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { case PCI_EPC_IRQ_LEGACY: /* Legacy interrupts are not supported in Keem Bay */ dev_err(pci->dev, "Legacy IRQ is not supported\n"); return -EINVAL; case PCI_EPC_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); case PCI_EPC_IRQ_MSIX: return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); default: dev_err(pci->dev, "Unknown IRQ type %d\n", type); return -EINVAL; } } static const struct pci_epc_features keembay_pcie_epc_features = { .linkup_notifier = false, .msi_capable = true, .msix_capable = true, .reserved_bar = BIT(BAR_1) | BIT(BAR_3) | BIT(BAR_5), .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4), .align = SZ_16K, }; static const struct pci_epc_features * keembay_pcie_get_features(struct dw_pcie_ep *ep) { return &keembay_pcie_epc_features; } static const struct dw_pcie_ep_ops keembay_pcie_ep_ops = { .ep_init = keembay_pcie_ep_init, .raise_irq = keembay_pcie_ep_raise_irq, .get_features = keembay_pcie_get_features, }; static const struct dw_pcie_host_ops keembay_pcie_host_ops = { }; static int keembay_pcie_add_pcie_port(struct keembay_pcie *pcie, struct platform_device *pdev) { struct dw_pcie *pci = &pcie->pci; struct dw_pcie_rp *pp = &pci->pp; struct device *dev = &pdev->dev; u32 val; int ret; pp->ops = &keembay_pcie_host_ops; pp->msi_irq[0] = -ENODEV; ret = keembay_pcie_setup_msi_irq(pcie); if (ret) return ret; pcie->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(pcie->reset)) return PTR_ERR(pcie->reset); ret = keembay_pcie_probe_clocks(pcie); if (ret) return ret; val = readl(pcie->apb_base + PCIE_REGS_PCIE_PHY_CNTL); val |= PHY0_SRAM_BYPASS; writel(val, pcie->apb_base + PCIE_REGS_PCIE_PHY_CNTL); writel(PCIE_DEVICE_TYPE, pcie->apb_base + PCIE_REGS_PCIE_CFG); ret = keembay_pcie_pll_init(pcie); if (ret) return ret; val = readl(pcie->apb_base + PCIE_REGS_PCIE_CFG); writel(val | PCIE_RSTN, pcie->apb_base + PCIE_REGS_PCIE_CFG); keembay_ep_reset_deassert(pcie); ret = dw_pcie_host_init(pp); if (ret) { keembay_ep_reset_assert(pcie); dev_err(dev, "Failed to initialize host: %d\n", ret); return ret; } val = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE); if (IS_ENABLED(CONFIG_PCI_MSI)) val |= MSI_CTRL_INT_EN; writel(val, pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE); return 0; } static int keembay_pcie_probe(struct platform_device *pdev) { const struct keembay_pcie_of_data *data; struct device *dev = &pdev->dev; struct keembay_pcie *pcie; struct dw_pcie *pci; enum dw_pcie_device_mode mode; data = device_get_match_data(dev); if (!data) return -ENODEV; mode = (enum dw_pcie_device_mode)data->mode; pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; pci = &pcie->pci; pci->dev = dev; pci->ops = &keembay_pcie_ops; pcie->mode = mode; pcie->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb"); if (IS_ERR(pcie->apb_base)) return PTR_ERR(pcie->apb_base); platform_set_drvdata(pdev, pcie); switch (pcie->mode) { case DW_PCIE_RC_TYPE: if (!IS_ENABLED(CONFIG_PCIE_KEEMBAY_HOST)) return -ENODEV; return keembay_pcie_add_pcie_port(pcie, pdev); case DW_PCIE_EP_TYPE: if (!IS_ENABLED(CONFIG_PCIE_KEEMBAY_EP)) return -ENODEV; pci->ep.ops = &keembay_pcie_ep_ops; return dw_pcie_ep_init(&pci->ep); default: dev_err(dev, "Invalid device type %d\n", pcie->mode); return -ENODEV; } } static const struct keembay_pcie_of_data keembay_pcie_rc_of_data = { .mode = DW_PCIE_RC_TYPE, }; static const struct keembay_pcie_of_data keembay_pcie_ep_of_data = { .mode = DW_PCIE_EP_TYPE, }; static const struct of_device_id keembay_pcie_of_match[] = { { .compatible = "intel,keembay-pcie", .data = &keembay_pcie_rc_of_data, }, { .compatible = "intel,keembay-pcie-ep", .data = &keembay_pcie_ep_of_data, }, {} }; static struct platform_driver keembay_pcie_driver = { .driver = { .name = "keembay-pcie", .of_match_table = keembay_pcie_of_match, .suppress_bind_attrs = true, }, .probe = keembay_pcie_probe, }; builtin_platform_driver(keembay_pcie_driver);
linux-master
drivers/pci/controller/dwc/pcie-keembay.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe host controller driver for Axis ARTPEC-6 SoC * * Author: Niklas Cassel <[email protected]> * * Based on work done by Phil Edworthy <[email protected]> */ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/of.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/resource.h> #include <linux/signal.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include "pcie-designware.h" #define to_artpec6_pcie(x) dev_get_drvdata((x)->dev) enum artpec_pcie_variants { ARTPEC6, ARTPEC7, }; struct artpec6_pcie { struct dw_pcie *pci; struct regmap *regmap; /* DT axis,syscon-pcie */ void __iomem *phy_base; /* DT phy */ enum artpec_pcie_variants variant; enum dw_pcie_device_mode mode; }; struct artpec_pcie_of_data { enum artpec_pcie_variants variant; enum dw_pcie_device_mode mode; }; static const struct of_device_id artpec6_pcie_of_match[]; /* ARTPEC-6 specific registers */ #define PCIECFG 0x18 #define PCIECFG_DBG_OEN BIT(24) #define PCIECFG_CORE_RESET_REQ BIT(21) #define PCIECFG_LTSSM_ENABLE BIT(20) #define PCIECFG_DEVICE_TYPE_MASK GENMASK(19, 16) #define PCIECFG_CLKREQ_B BIT(11) #define PCIECFG_REFCLK_ENABLE BIT(10) #define PCIECFG_PLL_ENABLE BIT(9) #define PCIECFG_PCLK_ENABLE BIT(8) #define PCIECFG_RISRCREN BIT(4) #define PCIECFG_MODE_TX_DRV_EN BIT(3) #define PCIECFG_CISRREN BIT(2) #define PCIECFG_MACRO_ENABLE BIT(0) /* ARTPEC-7 specific fields */ #define PCIECFG_REFCLKSEL BIT(23) #define PCIECFG_NOC_RESET BIT(3) #define PCIESTAT 0x1c /* ARTPEC-7 specific fields */ #define PCIESTAT_EXTREFCLK BIT(3) #define NOCCFG 0x40 #define NOCCFG_ENABLE_CLK_PCIE BIT(4) #define NOCCFG_POWER_PCIE_IDLEACK BIT(3) #define NOCCFG_POWER_PCIE_IDLE BIT(2) #define NOCCFG_POWER_PCIE_IDLEREQ BIT(1) #define PHY_STATUS 0x118 #define PHY_COSPLLLOCK BIT(0) #define PHY_TX_ASIC_OUT 0x4040 #define PHY_TX_ASIC_OUT_TX_ACK BIT(0) #define PHY_RX_ASIC_OUT 0x405c #define PHY_RX_ASIC_OUT_ACK BIT(0) static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset) { u32 val; regmap_read(artpec6_pcie->regmap, offset, &val); return val; } static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u32 val) { regmap_write(artpec6_pcie->regmap, offset, val); } static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) { struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); struct dw_pcie_rp *pp = &pci->pp; struct dw_pcie_ep *ep = &pci->ep; switch (artpec6_pcie->mode) { case DW_PCIE_RC_TYPE: return pci_addr - pp->cfg0_base; case DW_PCIE_EP_TYPE: return pci_addr - ep->phys_base; default: dev_err(pci->dev, "UNKNOWN device type\n"); } return pci_addr; } static int artpec6_pcie_establish_link(struct dw_pcie *pci) { struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); u32 val; val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); val |= PCIECFG_LTSSM_ENABLE; artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); return 0; } static void artpec6_pcie_stop_link(struct dw_pcie *pci) { struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); u32 val; val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); val &= ~PCIECFG_LTSSM_ENABLE; artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); } static const struct dw_pcie_ops dw_pcie_ops = { .cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup, .start_link = artpec6_pcie_establish_link, .stop_link = artpec6_pcie_stop_link, }; static void artpec6_pcie_wait_for_phy_a6(struct artpec6_pcie *artpec6_pcie) { struct dw_pcie *pci = artpec6_pcie->pci; struct device *dev = pci->dev; u32 val; unsigned int retries; retries = 50; do { usleep_range(1000, 2000); val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); retries--; } while (retries && (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE))); if (!retries) dev_err(dev, "PCIe clock manager did not leave idle state\n"); retries = 50; do { usleep_range(1000, 2000); val = readl(artpec6_pcie->phy_base + PHY_STATUS); retries--; } while (retries && !(val & PHY_COSPLLLOCK)); if (!retries) dev_err(dev, "PHY PLL did not lock\n"); } static void artpec6_pcie_wait_for_phy_a7(struct artpec6_pcie *artpec6_pcie) { struct dw_pcie *pci = artpec6_pcie->pci; struct device *dev = pci->dev; u32 val; u16 phy_status_tx, phy_status_rx; unsigned int retries; retries = 50; do { usleep_range(1000, 2000); val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); retries--; } while (retries && (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE))); if (!retries) dev_err(dev, "PCIe clock manager did not leave idle state\n"); retries = 50; do { usleep_range(1000, 2000); phy_status_tx = readw(artpec6_pcie->phy_base + PHY_TX_ASIC_OUT); phy_status_rx = readw(artpec6_pcie->phy_base + PHY_RX_ASIC_OUT); retries--; } while (retries && ((phy_status_tx & PHY_TX_ASIC_OUT_TX_ACK) || (phy_status_rx & PHY_RX_ASIC_OUT_ACK))); if (!retries) dev_err(dev, "PHY did not enter Pn state\n"); } static void artpec6_pcie_wait_for_phy(struct artpec6_pcie *artpec6_pcie) { switch (artpec6_pcie->variant) { case ARTPEC6: artpec6_pcie_wait_for_phy_a6(artpec6_pcie); break; case ARTPEC7: artpec6_pcie_wait_for_phy_a7(artpec6_pcie); break; } } static void artpec6_pcie_init_phy_a6(struct artpec6_pcie *artpec6_pcie) { u32 val; val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */ PCIECFG_MODE_TX_DRV_EN | PCIECFG_CISRREN | /* Reference clock term. 100 Ohm */ PCIECFG_MACRO_ENABLE; val |= PCIECFG_REFCLK_ENABLE; val &= ~PCIECFG_DBG_OEN; val &= ~PCIECFG_CLKREQ_B; artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); usleep_range(5000, 6000); val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); val |= NOCCFG_ENABLE_CLK_PCIE; artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); usleep_range(20, 30); val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE; artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); usleep_range(6000, 7000); val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); val &= ~NOCCFG_POWER_PCIE_IDLEREQ; artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); } static void artpec6_pcie_init_phy_a7(struct artpec6_pcie *artpec6_pcie) { struct dw_pcie *pci = artpec6_pcie->pci; u32 val; bool extrefclk; /* Check if external reference clock is connected */ val = artpec6_pcie_readl(artpec6_pcie, PCIESTAT); extrefclk = !!(val & PCIESTAT_EXTREFCLK); dev_dbg(pci->dev, "Using reference clock: %s\n", extrefclk ? "external" : "internal"); val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */ PCIECFG_PCLK_ENABLE; if (extrefclk) val |= PCIECFG_REFCLKSEL; else val &= ~PCIECFG_REFCLKSEL; artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); usleep_range(10, 20); val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); val |= NOCCFG_ENABLE_CLK_PCIE; artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); usleep_range(20, 30); val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); val &= ~NOCCFG_POWER_PCIE_IDLEREQ; artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); } static void artpec6_pcie_init_phy(struct artpec6_pcie *artpec6_pcie) { switch (artpec6_pcie->variant) { case ARTPEC6: artpec6_pcie_init_phy_a6(artpec6_pcie); break; case ARTPEC7: artpec6_pcie_init_phy_a7(artpec6_pcie); break; } } static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie) { u32 val; val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); switch (artpec6_pcie->variant) { case ARTPEC6: val |= PCIECFG_CORE_RESET_REQ; break; case ARTPEC7: val &= ~PCIECFG_NOC_RESET; break; } artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); } static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie) { u32 val; val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); switch (artpec6_pcie->variant) { case ARTPEC6: val &= ~PCIECFG_CORE_RESET_REQ; break; case ARTPEC7: val |= PCIECFG_NOC_RESET; break; } artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); usleep_range(100, 200); } static int artpec6_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); if (artpec6_pcie->variant == ARTPEC7) { pci->n_fts[0] = 180; pci->n_fts[1] = 180; } artpec6_pcie_assert_core_reset(artpec6_pcie); artpec6_pcie_init_phy(artpec6_pcie); artpec6_pcie_deassert_core_reset(artpec6_pcie); artpec6_pcie_wait_for_phy(artpec6_pcie); return 0; } static const struct dw_pcie_host_ops artpec6_pcie_host_ops = { .host_init = artpec6_pcie_host_init, }; static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); enum pci_barno bar; artpec6_pcie_assert_core_reset(artpec6_pcie); artpec6_pcie_init_phy(artpec6_pcie); artpec6_pcie_deassert_core_reset(artpec6_pcie); artpec6_pcie_wait_for_phy(artpec6_pcie); for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) dw_pcie_ep_reset_bar(pci, bar); } static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { case PCI_EPC_IRQ_LEGACY: dev_err(pci->dev, "EP cannot trigger legacy IRQs\n"); return -EINVAL; case PCI_EPC_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); default: dev_err(pci->dev, "UNKNOWN IRQ type\n"); } return 0; } static const struct dw_pcie_ep_ops pcie_ep_ops = { .ep_init = artpec6_pcie_ep_init, .raise_irq = artpec6_pcie_raise_irq, }; static int artpec6_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dw_pcie *pci; struct artpec6_pcie *artpec6_pcie; int ret; const struct artpec_pcie_of_data *data; enum artpec_pcie_variants variant; enum dw_pcie_device_mode mode; u32 val; data = of_device_get_match_data(dev); if (!data) return -EINVAL; variant = (enum artpec_pcie_variants)data->variant; mode = (enum dw_pcie_device_mode)data->mode; artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL); if (!artpec6_pcie) return -ENOMEM; pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); if (!pci) return -ENOMEM; pci->dev = dev; pci->ops = &dw_pcie_ops; artpec6_pcie->pci = pci; artpec6_pcie->variant = variant; artpec6_pcie->mode = mode; artpec6_pcie->phy_base = devm_platform_ioremap_resource_byname(pdev, "phy"); if (IS_ERR(artpec6_pcie->phy_base)) return PTR_ERR(artpec6_pcie->phy_base); artpec6_pcie->regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "axis,syscon-pcie"); if (IS_ERR(artpec6_pcie->regmap)) return PTR_ERR(artpec6_pcie->regmap); platform_set_drvdata(pdev, artpec6_pcie); switch (artpec6_pcie->mode) { case DW_PCIE_RC_TYPE: if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_HOST)) return -ENODEV; pci->pp.ops = &artpec6_pcie_host_ops; ret = dw_pcie_host_init(&pci->pp); if (ret < 0) return ret; break; case DW_PCIE_EP_TYPE: if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_EP)) return -ENODEV; val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); val &= ~PCIECFG_DEVICE_TYPE_MASK; artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); pci->ep.ops = &pcie_ep_ops; return dw_pcie_ep_init(&pci->ep); default: dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode); } return 0; } static const struct artpec_pcie_of_data artpec6_pcie_rc_of_data = { .variant = ARTPEC6, .mode = DW_PCIE_RC_TYPE, }; static const struct artpec_pcie_of_data artpec6_pcie_ep_of_data = { .variant = ARTPEC6, .mode = DW_PCIE_EP_TYPE, }; static const struct artpec_pcie_of_data artpec7_pcie_rc_of_data = { .variant = ARTPEC7, .mode = DW_PCIE_RC_TYPE, }; static const struct artpec_pcie_of_data artpec7_pcie_ep_of_data = { .variant = ARTPEC7, .mode = DW_PCIE_EP_TYPE, }; static const struct of_device_id artpec6_pcie_of_match[] = { { .compatible = "axis,artpec6-pcie", .data = &artpec6_pcie_rc_of_data, }, { .compatible = "axis,artpec6-pcie-ep", .data = &artpec6_pcie_ep_of_data, }, { .compatible = "axis,artpec7-pcie", .data = &artpec7_pcie_rc_of_data, }, { .compatible = "axis,artpec7-pcie-ep", .data = &artpec7_pcie_ep_of_data, }, {}, }; static struct platform_driver artpec6_pcie_driver = { .probe = artpec6_pcie_probe, .driver = { .name = "artpec6-pcie", .of_match_table = artpec6_pcie_of_match, .suppress_bind_attrs = true, }, }; builtin_platform_driver(artpec6_pcie_driver);
linux-master
drivers/pci/controller/dwc/pcie-artpec6.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe host controller driver for Texas Instruments Keystone SoCs * * Copyright (C) 2013-2014 Texas Instruments., Ltd. * https://www.ti.com * * Author: Murali Karicheri <[email protected]> * Implementation based on pci-exynos.c and pcie-designware.c */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/mfd/syscon.h> #include <linux/msi.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_pci.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/resource.h> #include <linux/signal.h> #include "../../pci.h" #include "pcie-designware.h" #define PCIE_VENDORID_MASK 0xffff #define PCIE_DEVICEID_SHIFT 16 /* Application registers */ #define CMD_STATUS 0x004 #define LTSSM_EN_VAL BIT(0) #define OB_XLAT_EN_VAL BIT(1) #define DBI_CS2 BIT(5) #define CFG_SETUP 0x008 #define CFG_BUS(x) (((x) & 0xff) << 16) #define CFG_DEVICE(x) (((x) & 0x1f) << 8) #define CFG_FUNC(x) ((x) & 0x7) #define CFG_TYPE1 BIT(24) #define OB_SIZE 0x030 #define OB_OFFSET_INDEX(n) (0x200 + (8 * (n))) #define OB_OFFSET_HI(n) (0x204 + (8 * (n))) #define OB_ENABLEN BIT(0) #define OB_WIN_SIZE 8 /* 8MB */ #define PCIE_LEGACY_IRQ_ENABLE_SET(n) (0x188 + (0x10 * ((n) - 1))) #define PCIE_LEGACY_IRQ_ENABLE_CLR(n) (0x18c + (0x10 * ((n) - 1))) #define PCIE_EP_IRQ_SET 0x64 #define PCIE_EP_IRQ_CLR 0x68 #define INT_ENABLE BIT(0) /* IRQ register defines */ #define IRQ_EOI 0x050 #define MSI_IRQ 0x054 #define MSI_IRQ_STATUS(n) (0x104 + ((n) << 4)) #define MSI_IRQ_ENABLE_SET(n) (0x108 + ((n) << 4)) #define MSI_IRQ_ENABLE_CLR(n) (0x10c + ((n) << 4)) #define MSI_IRQ_OFFSET 4 #define IRQ_STATUS(n) (0x184 + ((n) << 4)) #define IRQ_ENABLE_SET(n) (0x188 + ((n) << 4)) #define INTx_EN BIT(0) #define ERR_IRQ_STATUS 0x1c4 #define ERR_IRQ_ENABLE_SET 0x1c8 #define ERR_AER BIT(5) /* ECRC error */ #define AM6_ERR_AER BIT(4) /* AM6 ECRC error */ #define ERR_AXI BIT(4) /* AXI tag lookup fatal error */ #define ERR_CORR BIT(3) /* Correctable error */ #define ERR_NONFATAL BIT(2) /* Non-fatal error */ #define ERR_FATAL BIT(1) /* Fatal error */ #define ERR_SYS BIT(0) /* System error */ #define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \ ERR_NONFATAL | ERR_FATAL | ERR_SYS) /* PCIE controller device IDs */ #define PCIE_RC_K2HK 0xb008 #define PCIE_RC_K2E 0xb009 #define PCIE_RC_K2L 0xb00a #define PCIE_RC_K2G 0xb00b #define KS_PCIE_DEV_TYPE_MASK (0x3 << 1) #define KS_PCIE_DEV_TYPE(mode) ((mode) << 1) #define EP 0x0 #define LEG_EP 0x1 #define RC 0x2 #define KS_PCIE_SYSCLOCKOUTEN BIT(0) #define AM654_PCIE_DEV_TYPE_MASK 0x3 #define AM654_WIN_SIZE SZ_64K #define APP_ADDR_SPACE_0 (16 * SZ_1K) #define to_keystone_pcie(x) dev_get_drvdata((x)->dev) struct ks_pcie_of_data { enum dw_pcie_device_mode mode; const struct dw_pcie_host_ops *host_ops; const struct dw_pcie_ep_ops *ep_ops; u32 version; }; struct keystone_pcie { struct dw_pcie *pci; /* PCI Device ID */ u32 device_id; int legacy_host_irqs[PCI_NUM_INTX]; struct device_node *legacy_intc_np; int msi_host_irq; int num_lanes; u32 num_viewport; struct phy **phy; struct device_link **link; struct device_node *msi_intc_np; struct irq_domain *legacy_irq_domain; struct device_node *np; /* Application register space */ void __iomem *va_app_base; /* DT 1st resource */ struct resource app; bool is_am6; }; static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset) { return readl(ks_pcie->va_app_base + offset); } static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val) { writel(val, ks_pcie->va_app_base + offset); } static void ks_pcie_msi_irq_ack(struct irq_data *data) { struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data); struct keystone_pcie *ks_pcie; u32 irq = data->hwirq; struct dw_pcie *pci; u32 reg_offset; u32 bit_pos; pci = to_dw_pcie_from_pp(pp); ks_pcie = to_keystone_pcie(pci); reg_offset = irq % 8; bit_pos = irq >> 3; ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset), BIT(bit_pos)); ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET); } static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data); struct keystone_pcie *ks_pcie; struct dw_pcie *pci; u64 msi_target; pci = to_dw_pcie_from_pp(pp); ks_pcie = to_keystone_pcie(pci); msi_target = ks_pcie->app.start + MSI_IRQ; msg->address_lo = lower_32_bits(msi_target); msg->address_hi = upper_32_bits(msi_target); msg->data = data->hwirq; dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", (int)data->hwirq, msg->address_hi, msg->address_lo); } static int ks_pcie_msi_set_affinity(struct irq_data *irq_data, const struct cpumask *mask, bool force) { return -EINVAL; } static void ks_pcie_msi_mask(struct irq_data *data) { struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data); struct keystone_pcie *ks_pcie; u32 irq = data->hwirq; struct dw_pcie *pci; unsigned long flags; u32 reg_offset; u32 bit_pos; raw_spin_lock_irqsave(&pp->lock, flags); pci = to_dw_pcie_from_pp(pp); ks_pcie = to_keystone_pcie(pci); reg_offset = irq % 8; bit_pos = irq >> 3; ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset), BIT(bit_pos)); raw_spin_unlock_irqrestore(&pp->lock, flags); } static void ks_pcie_msi_unmask(struct irq_data *data) { struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data); struct keystone_pcie *ks_pcie; u32 irq = data->hwirq; struct dw_pcie *pci; unsigned long flags; u32 reg_offset; u32 bit_pos; raw_spin_lock_irqsave(&pp->lock, flags); pci = to_dw_pcie_from_pp(pp); ks_pcie = to_keystone_pcie(pci); reg_offset = irq % 8; bit_pos = irq >> 3; ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset), BIT(bit_pos)); raw_spin_unlock_irqrestore(&pp->lock, flags); } static struct irq_chip ks_pcie_msi_irq_chip = { .name = "KEYSTONE-PCI-MSI", .irq_ack = ks_pcie_msi_irq_ack, .irq_compose_msi_msg = ks_pcie_compose_msi_msg, .irq_set_affinity = ks_pcie_msi_set_affinity, .irq_mask = ks_pcie_msi_mask, .irq_unmask = ks_pcie_msi_unmask, }; static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp) { pp->msi_irq_chip = &ks_pcie_msi_irq_chip; return dw_pcie_allocate_domains(pp); } static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset) { struct dw_pcie *pci = ks_pcie->pci; struct device *dev = pci->dev; u32 pending; pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset)); if (BIT(0) & pending) { dev_dbg(dev, ": irq: irq_offset %d", offset); generic_handle_domain_irq(ks_pcie->legacy_irq_domain, offset); } /* EOI the INTx interrupt */ ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset); } static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie) { ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL); } static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie) { u32 reg; struct device *dev = ks_pcie->pci->dev; reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS); if (!reg) return IRQ_NONE; if (reg & ERR_SYS) dev_err(dev, "System Error\n"); if (reg & ERR_FATAL) dev_err(dev, "Fatal Error\n"); if (reg & ERR_NONFATAL) dev_dbg(dev, "Non Fatal Error\n"); if (reg & ERR_CORR) dev_dbg(dev, "Correctable Error\n"); if (!ks_pcie->is_am6 && (reg & ERR_AXI)) dev_err(dev, "AXI tag lookup fatal Error\n"); if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER))) dev_err(dev, "ECRC Error\n"); ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg); return IRQ_HANDLED; } static void ks_pcie_ack_legacy_irq(struct irq_data *d) { } static void ks_pcie_mask_legacy_irq(struct irq_data *d) { } static void ks_pcie_unmask_legacy_irq(struct irq_data *d) { } static struct irq_chip ks_pcie_legacy_irq_chip = { .name = "Keystone-PCI-Legacy-IRQ", .irq_ack = ks_pcie_ack_legacy_irq, .irq_mask = ks_pcie_mask_legacy_irq, .irq_unmask = ks_pcie_unmask_legacy_irq, }; static int ks_pcie_init_legacy_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw_irq) { irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip, handle_level_irq); irq_set_chip_data(irq, d->host_data); return 0; } static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = { .map = ks_pcie_init_legacy_irq_map, .xlate = irq_domain_xlate_onetwocell, }; /** * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone * PCIe host controller driver information. * * Since modification of dbi_cs2 involves different clock domain, read the * status back to ensure the transition is complete. */ static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie) { u32 val; val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); val |= DBI_CS2; ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); do { val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); } while (!(val & DBI_CS2)); } /** * ks_pcie_clear_dbi_mode() - Disable DBI mode * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone * PCIe host controller driver information. * * Since modification of dbi_cs2 involves different clock domain, read the * status back to ensure the transition is complete. */ static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie) { u32 val; val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); val &= ~DBI_CS2; ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); do { val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); } while (val & DBI_CS2); } static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) { u32 val; u32 num_viewport = ks_pcie->num_viewport; struct dw_pcie *pci = ks_pcie->pci; struct dw_pcie_rp *pp = &pci->pp; u64 start, end; struct resource *mem; int i; mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res; start = mem->start; end = mem->end; /* Disable BARs for inbound access */ ks_pcie_set_dbi_mode(ks_pcie); dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0); ks_pcie_clear_dbi_mode(ks_pcie); if (ks_pcie->is_am6) return; val = ilog2(OB_WIN_SIZE); ks_pcie_app_writel(ks_pcie, OB_SIZE, val); /* Using Direct 1:1 mapping of RC <-> PCI memory space */ for (i = 0; i < num_viewport && (start < end); i++) { ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i), lower_32_bits(start) | OB_ENABLEN); ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i), upper_32_bits(start)); start += OB_WIN_SIZE * SZ_1M; } val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); val |= OB_XLAT_EN_VAL; ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); } static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct dw_pcie_rp *pp = bus->sysdata; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); u32 reg; reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) | CFG_FUNC(PCI_FUNC(devfn)); if (!pci_is_root_bus(bus->parent)) reg |= CFG_TYPE1; ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg); return pp->va_cfg0_base + where; } static struct pci_ops ks_child_pcie_ops = { .map_bus = ks_pcie_other_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, }; /** * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization * @bus: A pointer to the PCI bus structure. * * This sets BAR0 to enable inbound access for MSI_IRQ register */ static int ks_pcie_v3_65_add_bus(struct pci_bus *bus) { struct dw_pcie_rp *pp = bus->sysdata; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); if (!pci_is_root_bus(bus)) return 0; /* Configure and set up BAR0 */ ks_pcie_set_dbi_mode(ks_pcie); /* Enable BAR0 */ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1); dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1); ks_pcie_clear_dbi_mode(ks_pcie); /* * For BAR0, just setting bus address for inbound writes (MSI) should * be sufficient. Use physical address to avoid any conflicts. */ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start); return 0; } static struct pci_ops ks_pcie_ops = { .map_bus = dw_pcie_own_conf_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, .add_bus = ks_pcie_v3_65_add_bus, }; /** * ks_pcie_link_up() - Check if link up * @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host * controller driver information. */ static int ks_pcie_link_up(struct dw_pcie *pci) { u32 val; val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0); val &= PORT_LOGIC_LTSSM_STATE_MASK; return (val == PORT_LOGIC_LTSSM_STATE_L0); } static void ks_pcie_stop_link(struct dw_pcie *pci) { struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); u32 val; /* Disable Link training */ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); val &= ~LTSSM_EN_VAL; ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); } static int ks_pcie_start_link(struct dw_pcie *pci) { struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); u32 val; /* Initiate Link Training */ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); return 0; } static void ks_pcie_quirk(struct pci_dev *dev) { struct pci_bus *bus = dev->bus; struct pci_dev *bridge; static const struct pci_device_id rc_pci_devids[] = { { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK), .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E), .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L), .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G), .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, { 0, }, }; if (pci_is_root_bus(bus)) bridge = dev; /* look for the host bridge */ while (!pci_is_root_bus(bus)) { bridge = bus->self; bus = bus->parent; } if (!bridge) return; /* * Keystone PCI controller has a h/w limitation of * 256 bytes maximum read request size. It can't handle * anything higher than this. So force this limit on * all downstream devices. */ if (pci_match_id(rc_pci_devids, bridge)) { if (pcie_get_readrq(dev) > 256) { dev_info(&dev->dev, "limiting MRRS to 256\n"); pcie_set_readrq(dev, 256); } } } DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk); static void ks_pcie_msi_irq_handler(struct irq_desc *desc) { unsigned int irq = desc->irq_data.hwirq; struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); u32 offset = irq - ks_pcie->msi_host_irq; struct dw_pcie *pci = ks_pcie->pci; struct dw_pcie_rp *pp = &pci->pp; struct device *dev = pci->dev; struct irq_chip *chip = irq_desc_get_chip(desc); u32 vector, reg, pos; dev_dbg(dev, "%s, irq %d\n", __func__, irq); /* * The chained irq handler installation would have replaced normal * interrupt driver handler so we need to take care of mask/unmask and * ack operation. */ chained_irq_enter(chip, desc); reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset)); /* * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit * shows 1, 9, 17, 25 and so forth */ for (pos = 0; pos < 4; pos++) { if (!(reg & BIT(pos))) continue; vector = offset + (pos << 3); dev_dbg(dev, "irq: bit %d, vector %d\n", pos, vector); generic_handle_domain_irq(pp->irq_domain, vector); } chained_irq_exit(chip, desc); } /** * ks_pcie_legacy_irq_handler() - Handle legacy interrupt * @desc: Pointer to irq descriptor * * Traverse through pending legacy interrupts and invoke handler for each. Also * takes care of interrupt controller level mask/ack operation. */ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); struct dw_pcie *pci = ks_pcie->pci; struct device *dev = pci->dev; u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0]; struct irq_chip *chip = irq_desc_get_chip(desc); dev_dbg(dev, ": Handling legacy irq %d\n", irq); /* * The chained irq handler installation would have replaced normal * interrupt driver handler so we need to take care of mask/unmask and * ack operation. */ chained_irq_enter(chip, desc); ks_pcie_handle_legacy_irq(ks_pcie, irq_offset); chained_irq_exit(chip, desc); } static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie) { struct device *dev = ks_pcie->pci->dev; struct device_node *np = ks_pcie->np; struct device_node *intc_np; struct irq_data *irq_data; int irq_count, irq, ret, i; if (!IS_ENABLED(CONFIG_PCI_MSI)) return 0; intc_np = of_get_child_by_name(np, "msi-interrupt-controller"); if (!intc_np) { if (ks_pcie->is_am6) return 0; dev_warn(dev, "msi-interrupt-controller node is absent\n"); return -EINVAL; } irq_count = of_irq_count(intc_np); if (!irq_count) { dev_err(dev, "No IRQ entries in msi-interrupt-controller\n"); ret = -EINVAL; goto err; } for (i = 0; i < irq_count; i++) { irq = irq_of_parse_and_map(intc_np, i); if (!irq) { ret = -EINVAL; goto err; } if (!ks_pcie->msi_host_irq) { irq_data = irq_get_irq_data(irq); if (!irq_data) { ret = -EINVAL; goto err; } ks_pcie->msi_host_irq = irq_data->hwirq; } irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler, ks_pcie); } of_node_put(intc_np); return 0; err: of_node_put(intc_np); return ret; } static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie) { struct device *dev = ks_pcie->pci->dev; struct irq_domain *legacy_irq_domain; struct device_node *np = ks_pcie->np; struct device_node *intc_np; int irq_count, irq, ret = 0, i; intc_np = of_get_child_by_name(np, "legacy-interrupt-controller"); if (!intc_np) { /* * Since legacy interrupts are modeled as edge-interrupts in * AM6, keep it disabled for now. */ if (ks_pcie->is_am6) return 0; dev_warn(dev, "legacy-interrupt-controller node is absent\n"); return -EINVAL; } irq_count = of_irq_count(intc_np); if (!irq_count) { dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n"); ret = -EINVAL; goto err; } for (i = 0; i < irq_count; i++) { irq = irq_of_parse_and_map(intc_np, i); if (!irq) { ret = -EINVAL; goto err; } ks_pcie->legacy_host_irqs[i] = irq; irq_set_chained_handler_and_data(irq, ks_pcie_legacy_irq_handler, ks_pcie); } legacy_irq_domain = irq_domain_add_linear(intc_np, PCI_NUM_INTX, &ks_pcie_legacy_irq_domain_ops, NULL); if (!legacy_irq_domain) { dev_err(dev, "Failed to add irq domain for legacy irqs\n"); ret = -EINVAL; goto err; } ks_pcie->legacy_irq_domain = legacy_irq_domain; for (i = 0; i < PCI_NUM_INTX; i++) ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN); err: of_node_put(intc_np); return ret; } #ifdef CONFIG_ARM /* * When a PCI device does not exist during config cycles, keystone host * gets a bus error instead of returning 0xffffffff (PCI_ERROR_RESPONSE). * This handler always returns 0 for this kind of fault. */ static int ks_pcie_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { unsigned long instr = *(unsigned long *) instruction_pointer(regs); if ((instr & 0x0e100090) == 0x00100090) { int reg = (instr >> 12) & 15; regs->uregs[reg] = -1; regs->ARM_pc += 4; } return 0; } #endif static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie) { int ret; unsigned int id; struct regmap *devctrl_regs; struct dw_pcie *pci = ks_pcie->pci; struct device *dev = pci->dev; struct device_node *np = dev->of_node; struct of_phandle_args args; unsigned int offset = 0; devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id"); if (IS_ERR(devctrl_regs)) return PTR_ERR(devctrl_regs); /* Do not error out to maintain old DT compatibility */ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-id", 1, 0, &args); if (!ret) offset = args.args[0]; ret = regmap_read(devctrl_regs, offset, &id); if (ret) return ret; dw_pcie_dbi_ro_wr_en(pci); dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK); dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT); dw_pcie_dbi_ro_wr_dis(pci); return 0; } static int __init ks_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); int ret; pp->bridge->ops = &ks_pcie_ops; if (!ks_pcie->is_am6) pp->bridge->child_ops = &ks_child_pcie_ops; ret = ks_pcie_config_legacy_irq(ks_pcie); if (ret) return ret; ret = ks_pcie_config_msi_irq(ks_pcie); if (ret) return ret; ks_pcie_stop_link(pci); ks_pcie_setup_rc_app_regs(ks_pcie); writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), pci->dbi_base + PCI_IO_BASE); ret = ks_pcie_init_id(ks_pcie); if (ret < 0) return ret; #ifdef CONFIG_ARM /* * PCIe access errors that result into OCP errors are caught by ARM as * "External aborts" */ hook_fault_code(17, ks_pcie_fault, SIGBUS, 0, "Asynchronous external abort"); #endif return 0; } static const struct dw_pcie_host_ops ks_pcie_host_ops = { .host_init = ks_pcie_host_init, .msi_host_init = ks_pcie_msi_host_init, }; static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = { .host_init = ks_pcie_host_init, }; static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv) { struct keystone_pcie *ks_pcie = priv; return ks_pcie_handle_error_irq(ks_pcie); } static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg, size_t size, u32 val) { struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); ks_pcie_set_dbi_mode(ks_pcie); dw_pcie_write(base + reg, size, val); ks_pcie_clear_dbi_mode(ks_pcie); } static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = { .start_link = ks_pcie_start_link, .stop_link = ks_pcie_stop_link, .link_up = ks_pcie_link_up, .write_dbi2 = ks_pcie_am654_write_dbi2, }; static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); int flags; ep->page_size = AM654_WIN_SIZE; flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32; dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1); dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags); } static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie) { struct dw_pcie *pci = ks_pcie->pci; u8 int_pin; int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN); if (int_pin == 0 || int_pin > 4) return; ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin), INT_ENABLE); ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE); mdelay(1); ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE); ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin), INT_ENABLE); } static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); switch (type) { case PCI_EPC_IRQ_LEGACY: ks_pcie_am654_raise_legacy_irq(ks_pcie); break; case PCI_EPC_IRQ_MSI: dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); break; case PCI_EPC_IRQ_MSIX: dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); break; default: dev_err(pci->dev, "UNKNOWN IRQ type\n"); return -EINVAL; } return 0; } static const struct pci_epc_features ks_pcie_am654_epc_features = { .linkup_notifier = false, .msi_capable = true, .msix_capable = true, .reserved_bar = 1 << BAR_0 | 1 << BAR_1, .bar_fixed_64bit = 1 << BAR_0, .bar_fixed_size[2] = SZ_1M, .bar_fixed_size[3] = SZ_64K, .bar_fixed_size[4] = 256, .bar_fixed_size[5] = SZ_1M, .align = SZ_1M, }; static const struct pci_epc_features* ks_pcie_am654_get_features(struct dw_pcie_ep *ep) { return &ks_pcie_am654_epc_features; } static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = { .ep_init = ks_pcie_am654_ep_init, .raise_irq = ks_pcie_am654_raise_irq, .get_features = &ks_pcie_am654_get_features, }; static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie) { int num_lanes = ks_pcie->num_lanes; while (num_lanes--) { phy_power_off(ks_pcie->phy[num_lanes]); phy_exit(ks_pcie->phy[num_lanes]); } } static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie) { int i; int ret; int num_lanes = ks_pcie->num_lanes; for (i = 0; i < num_lanes; i++) { ret = phy_reset(ks_pcie->phy[i]); if (ret < 0) goto err_phy; ret = phy_init(ks_pcie->phy[i]); if (ret < 0) goto err_phy; ret = phy_power_on(ks_pcie->phy[i]); if (ret < 0) { phy_exit(ks_pcie->phy[i]); goto err_phy; } } return 0; err_phy: while (--i >= 0) { phy_power_off(ks_pcie->phy[i]); phy_exit(ks_pcie->phy[i]); } return ret; } static int ks_pcie_set_mode(struct device *dev) { struct device_node *np = dev->of_node; struct of_phandle_args args; unsigned int offset = 0; struct regmap *syscon; u32 val; u32 mask; int ret = 0; syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode"); if (IS_ERR(syscon)) return 0; /* Do not error out to maintain old DT compatibility */ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args); if (!ret) offset = args.args[0]; mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN; val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN; ret = regmap_update_bits(syscon, offset, mask, val); if (ret) { dev_err(dev, "failed to set pcie mode\n"); return ret; } return 0; } static int ks_pcie_am654_set_mode(struct device *dev, enum dw_pcie_device_mode mode) { struct device_node *np = dev->of_node; struct of_phandle_args args; unsigned int offset = 0; struct regmap *syscon; u32 val; u32 mask; int ret = 0; syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode"); if (IS_ERR(syscon)) return 0; /* Do not error out to maintain old DT compatibility */ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args); if (!ret) offset = args.args[0]; mask = AM654_PCIE_DEV_TYPE_MASK; switch (mode) { case DW_PCIE_RC_TYPE: val = RC; break; case DW_PCIE_EP_TYPE: val = EP; break; default: dev_err(dev, "INVALID device type %d\n", mode); return -EINVAL; } ret = regmap_update_bits(syscon, offset, mask, val); if (ret) { dev_err(dev, "failed to set pcie mode\n"); return ret; } return 0; } static const struct ks_pcie_of_data ks_pcie_rc_of_data = { .host_ops = &ks_pcie_host_ops, .version = DW_PCIE_VER_365A, }; static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = { .host_ops = &ks_pcie_am654_host_ops, .mode = DW_PCIE_RC_TYPE, .version = DW_PCIE_VER_490A, }; static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = { .ep_ops = &ks_pcie_am654_ep_ops, .mode = DW_PCIE_EP_TYPE, .version = DW_PCIE_VER_490A, }; static const struct of_device_id ks_pcie_of_match[] = { { .type = "pci", .data = &ks_pcie_rc_of_data, .compatible = "ti,keystone-pcie", }, { .data = &ks_pcie_am654_rc_of_data, .compatible = "ti,am654-pcie-rc", }, { .data = &ks_pcie_am654_ep_of_data, .compatible = "ti,am654-pcie-ep", }, { }, }; static int __init ks_pcie_probe(struct platform_device *pdev) { const struct dw_pcie_host_ops *host_ops; const struct dw_pcie_ep_ops *ep_ops; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; const struct ks_pcie_of_data *data; enum dw_pcie_device_mode mode; struct dw_pcie *pci; struct keystone_pcie *ks_pcie; struct device_link **link; struct gpio_desc *gpiod; struct resource *res; void __iomem *base; u32 num_viewport; struct phy **phy; u32 num_lanes; char name[10]; u32 version; int ret; int irq; int i; data = of_device_get_match_data(dev); if (!data) return -EINVAL; version = data->version; host_ops = data->host_ops; ep_ops = data->ep_ops; mode = data->mode; ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL); if (!ks_pcie) return -ENOMEM; pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); if (!pci) return -ENOMEM; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app"); ks_pcie->va_app_base = devm_ioremap_resource(dev, res); if (IS_ERR(ks_pcie->va_app_base)) return PTR_ERR(ks_pcie->va_app_base); ks_pcie->app = *res; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics"); base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base); if (of_device_is_compatible(np, "ti,am654-pcie-rc")) ks_pcie->is_am6 = true; pci->dbi_base = base; pci->dbi_base2 = base; pci->dev = dev; pci->ops = &ks_pcie_dw_pcie_ops; pci->version = version; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED, "ks-pcie-error-irq", ks_pcie); if (ret < 0) { dev_err(dev, "failed to request error IRQ %d\n", irq); return ret; } ret = of_property_read_u32(np, "num-lanes", &num_lanes); if (ret) num_lanes = 1; phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL); if (!phy) return -ENOMEM; link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL); if (!link) return -ENOMEM; for (i = 0; i < num_lanes; i++) { snprintf(name, sizeof(name), "pcie-phy%d", i); phy[i] = devm_phy_optional_get(dev, name); if (IS_ERR(phy[i])) { ret = PTR_ERR(phy[i]); goto err_link; } if (!phy[i]) continue; link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); if (!link[i]) { ret = -EINVAL; goto err_link; } } ks_pcie->np = np; ks_pcie->pci = pci; ks_pcie->link = link; ks_pcie->num_lanes = num_lanes; ks_pcie->phy = phy; gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(gpiod)) { ret = PTR_ERR(gpiod); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get reset GPIO\n"); goto err_link; } ret = ks_pcie_enable_phy(ks_pcie); if (ret) { dev_err(dev, "failed to enable phy\n"); goto err_link; } platform_set_drvdata(pdev, ks_pcie); pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "pm_runtime_get_sync failed\n"); goto err_get_sync; } if (dw_pcie_ver_is_ge(pci, 480A)) ret = ks_pcie_am654_set_mode(dev, mode); else ret = ks_pcie_set_mode(dev); if (ret < 0) goto err_get_sync; switch (mode) { case DW_PCIE_RC_TYPE: if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) { ret = -ENODEV; goto err_get_sync; } ret = of_property_read_u32(np, "num-viewport", &num_viewport); if (ret < 0) { dev_err(dev, "unable to read *num-viewport* property\n"); goto err_get_sync; } /* * "Power Sequencing and Reset Signal Timings" table in * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0 * indicates PERST# should be deasserted after minimum of 100us * once REFCLK is stable. The REFCLK to the connector in RC * mode is selected while enabling the PHY. So deassert PERST# * after 100 us. */ if (gpiod) { usleep_range(100, 200); gpiod_set_value_cansleep(gpiod, 1); } ks_pcie->num_viewport = num_viewport; pci->pp.ops = host_ops; ret = dw_pcie_host_init(&pci->pp); if (ret < 0) goto err_get_sync; break; case DW_PCIE_EP_TYPE: if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) { ret = -ENODEV; goto err_get_sync; } pci->ep.ops = ep_ops; ret = dw_pcie_ep_init(&pci->ep); if (ret < 0) goto err_get_sync; break; default: dev_err(dev, "INVALID device type %d\n", mode); } ks_pcie_enable_error_irq(ks_pcie); return 0; err_get_sync: pm_runtime_put(dev); pm_runtime_disable(dev); ks_pcie_disable_phy(ks_pcie); err_link: while (--i >= 0 && link[i]) device_link_del(link[i]); return ret; } static int __exit ks_pcie_remove(struct platform_device *pdev) { struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev); struct device_link **link = ks_pcie->link; int num_lanes = ks_pcie->num_lanes; struct device *dev = &pdev->dev; pm_runtime_put(dev); pm_runtime_disable(dev); ks_pcie_disable_phy(ks_pcie); while (num_lanes--) device_link_del(link[num_lanes]); return 0; } static struct platform_driver ks_pcie_driver __refdata = { .probe = ks_pcie_probe, .remove = __exit_p(ks_pcie_remove), .driver = { .name = "keystone-pcie", .of_match_table = ks_pcie_of_match, }, }; builtin_platform_driver(ks_pcie_driver);
linux-master
drivers/pci/controller/dwc/pci-keystone.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe host controller driver for Amlogic MESON SoCs * * Copyright (c) 2018 Amlogic, inc. * Author: Yue Wang <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/of_gpio.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <linux/resource.h> #include <linux/types.h> #include <linux/phy/phy.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include "pcie-designware.h" #define to_meson_pcie(x) dev_get_drvdata((x)->dev) #define PCIE_CAP_MAX_PAYLOAD_SIZE(x) ((x) << 5) #define PCIE_CAP_MAX_READ_REQ_SIZE(x) ((x) << 12) /* PCIe specific config registers */ #define PCIE_CFG0 0x0 #define APP_LTSSM_ENABLE BIT(7) #define PCIE_CFG_STATUS12 0x30 #define IS_SMLH_LINK_UP(x) ((x) & (1 << 6)) #define IS_RDLH_LINK_UP(x) ((x) & (1 << 16)) #define IS_LTSSM_UP(x) ((((x) >> 10) & 0x1f) == 0x11) #define PCIE_CFG_STATUS17 0x44 #define PM_CURRENT_STATE(x) (((x) >> 7) & 0x1) #define WAIT_LINKUP_TIMEOUT 4000 #define PORT_CLK_RATE 100000000UL #define MAX_PAYLOAD_SIZE 256 #define MAX_READ_REQ_SIZE 256 #define PCIE_RESET_DELAY 500 #define PCIE_SHARED_RESET 1 #define PCIE_NORMAL_RESET 0 enum pcie_data_rate { PCIE_GEN1, PCIE_GEN2, PCIE_GEN3, PCIE_GEN4 }; struct meson_pcie_clk_res { struct clk *clk; struct clk *port_clk; struct clk *general_clk; }; struct meson_pcie_rc_reset { struct reset_control *port; struct reset_control *apb; }; struct meson_pcie { struct dw_pcie pci; void __iomem *cfg_base; struct meson_pcie_clk_res clk_res; struct meson_pcie_rc_reset mrst; struct gpio_desc *reset_gpio; struct phy *phy; }; static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp, const char *id, u32 reset_type) { struct device *dev = mp->pci.dev; struct reset_control *reset; if (reset_type == PCIE_SHARED_RESET) reset = devm_reset_control_get_shared(dev, id); else reset = devm_reset_control_get(dev, id); return reset; } static int meson_pcie_get_resets(struct meson_pcie *mp) { struct meson_pcie_rc_reset *mrst = &mp->mrst; mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET); if (IS_ERR(mrst->port)) return PTR_ERR(mrst->port); reset_control_deassert(mrst->port); mrst->apb = meson_pcie_get_reset(mp, "apb", PCIE_SHARED_RESET); if (IS_ERR(mrst->apb)) return PTR_ERR(mrst->apb); reset_control_deassert(mrst->apb); return 0; } static int meson_pcie_get_mems(struct platform_device *pdev, struct meson_pcie *mp) { struct dw_pcie *pci = &mp->pci; pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi"); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); mp->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg"); if (IS_ERR(mp->cfg_base)) return PTR_ERR(mp->cfg_base); return 0; } static int meson_pcie_power_on(struct meson_pcie *mp) { int ret = 0; ret = phy_init(mp->phy); if (ret) return ret; ret = phy_power_on(mp->phy); if (ret) { phy_exit(mp->phy); return ret; } return 0; } static void meson_pcie_power_off(struct meson_pcie *mp) { phy_power_off(mp->phy); phy_exit(mp->phy); } static int meson_pcie_reset(struct meson_pcie *mp) { struct meson_pcie_rc_reset *mrst = &mp->mrst; int ret = 0; ret = phy_reset(mp->phy); if (ret) return ret; reset_control_assert(mrst->port); reset_control_assert(mrst->apb); udelay(PCIE_RESET_DELAY); reset_control_deassert(mrst->port); reset_control_deassert(mrst->apb); udelay(PCIE_RESET_DELAY); return 0; } static inline void meson_pcie_disable_clock(void *data) { struct clk *clk = data; clk_disable_unprepare(clk); } static inline struct clk *meson_pcie_probe_clock(struct device *dev, const char *id, u64 rate) { struct clk *clk; int ret; clk = devm_clk_get(dev, id); if (IS_ERR(clk)) return clk; if (rate) { ret = clk_set_rate(clk, rate); if (ret) { dev_err(dev, "set clk rate failed, ret = %d\n", ret); return ERR_PTR(ret); } } ret = clk_prepare_enable(clk); if (ret) { dev_err(dev, "couldn't enable clk\n"); return ERR_PTR(ret); } devm_add_action_or_reset(dev, meson_pcie_disable_clock, clk); return clk; } static int meson_pcie_probe_clocks(struct meson_pcie *mp) { struct device *dev = mp->pci.dev; struct meson_pcie_clk_res *res = &mp->clk_res; res->port_clk = meson_pcie_probe_clock(dev, "port", PORT_CLK_RATE); if (IS_ERR(res->port_clk)) return PTR_ERR(res->port_clk); res->general_clk = meson_pcie_probe_clock(dev, "general", 0); if (IS_ERR(res->general_clk)) return PTR_ERR(res->general_clk); res->clk = meson_pcie_probe_clock(dev, "pclk", 0); if (IS_ERR(res->clk)) return PTR_ERR(res->clk); return 0; } static inline u32 meson_cfg_readl(struct meson_pcie *mp, u32 reg) { return readl(mp->cfg_base + reg); } static inline void meson_cfg_writel(struct meson_pcie *mp, u32 val, u32 reg) { writel(val, mp->cfg_base + reg); } static void meson_pcie_assert_reset(struct meson_pcie *mp) { gpiod_set_value_cansleep(mp->reset_gpio, 1); udelay(500); gpiod_set_value_cansleep(mp->reset_gpio, 0); } static void meson_pcie_ltssm_enable(struct meson_pcie *mp) { u32 val; val = meson_cfg_readl(mp, PCIE_CFG0); val |= APP_LTSSM_ENABLE; meson_cfg_writel(mp, val, PCIE_CFG0); } static int meson_size_to_payload(struct meson_pcie *mp, int size) { struct device *dev = mp->pci.dev; /* * dwc supports 2^(val+7) payload size, which val is 0~5 default to 1. * So if input size is not 2^order alignment or less than 2^7 or bigger * than 2^12, just set to default size 2^(1+7). */ if (!is_power_of_2(size) || size < 128 || size > 4096) { dev_warn(dev, "payload size %d, set to default 256\n", size); return 1; } return fls(size) - 8; } static void meson_set_max_payload(struct meson_pcie *mp, int size) { struct dw_pcie *pci = &mp->pci; u32 val; u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); int max_payload_size = meson_size_to_payload(mp, size); val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL); val &= ~PCI_EXP_DEVCTL_PAYLOAD; dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val); val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL); val |= PCIE_CAP_MAX_PAYLOAD_SIZE(max_payload_size); dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val); } static void meson_set_max_rd_req_size(struct meson_pcie *mp, int size) { struct dw_pcie *pci = &mp->pci; u32 val; u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); int max_rd_req_size = meson_size_to_payload(mp, size); val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL); val &= ~PCI_EXP_DEVCTL_READRQ; dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val); val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL); val |= PCIE_CAP_MAX_READ_REQ_SIZE(max_rd_req_size); dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val); } static int meson_pcie_start_link(struct dw_pcie *pci) { struct meson_pcie *mp = to_meson_pcie(pci); meson_pcie_ltssm_enable(mp); meson_pcie_assert_reset(mp); return 0; } static int meson_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { int ret; ret = pci_generic_config_read(bus, devfn, where, size, val); if (ret != PCIBIOS_SUCCESSFUL) return ret; /* * There is a bug in the MESON AXG PCIe controller whereby software * cannot program the PCI_CLASS_DEVICE register, so we must fabricate * the return value in the config accessors. */ if ((where & ~3) == PCI_CLASS_REVISION) { if (size <= 2) *val = (*val & ((1 << (size * 8)) - 1)) << (8 * (where & 3)); *val &= ~0xffffff00; *val |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; if (size <= 2) *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); } return PCIBIOS_SUCCESSFUL; } static struct pci_ops meson_pci_ops = { .map_bus = dw_pcie_own_conf_map_bus, .read = meson_pcie_rd_own_conf, .write = pci_generic_config_write, }; static int meson_pcie_link_up(struct dw_pcie *pci) { struct meson_pcie *mp = to_meson_pcie(pci); struct device *dev = pci->dev; u32 speed_okay = 0; u32 cnt = 0; u32 state12, state17, smlh_up, ltssm_up, rdlh_up; do { state12 = meson_cfg_readl(mp, PCIE_CFG_STATUS12); state17 = meson_cfg_readl(mp, PCIE_CFG_STATUS17); smlh_up = IS_SMLH_LINK_UP(state12); rdlh_up = IS_RDLH_LINK_UP(state12); ltssm_up = IS_LTSSM_UP(state12); if (PM_CURRENT_STATE(state17) < PCIE_GEN3) speed_okay = 1; if (smlh_up) dev_dbg(dev, "smlh_link_up is on\n"); if (rdlh_up) dev_dbg(dev, "rdlh_link_up is on\n"); if (ltssm_up) dev_dbg(dev, "ltssm_up is on\n"); if (speed_okay) dev_dbg(dev, "speed_okay\n"); if (smlh_up && rdlh_up && ltssm_up && speed_okay) return 1; cnt++; udelay(10); } while (cnt < WAIT_LINKUP_TIMEOUT); dev_err(dev, "error: wait linkup timeout\n"); return 0; } static int meson_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct meson_pcie *mp = to_meson_pcie(pci); pp->bridge->ops = &meson_pci_ops; meson_set_max_payload(mp, MAX_PAYLOAD_SIZE); meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE); return 0; } static const struct dw_pcie_host_ops meson_pcie_host_ops = { .host_init = meson_pcie_host_init, }; static const struct dw_pcie_ops dw_pcie_ops = { .link_up = meson_pcie_link_up, .start_link = meson_pcie_start_link, }; static int meson_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dw_pcie *pci; struct meson_pcie *mp; int ret; mp = devm_kzalloc(dev, sizeof(*mp), GFP_KERNEL); if (!mp) return -ENOMEM; pci = &mp->pci; pci->dev = dev; pci->ops = &dw_pcie_ops; pci->pp.ops = &meson_pcie_host_ops; pci->num_lanes = 1; mp->phy = devm_phy_get(dev, "pcie"); if (IS_ERR(mp->phy)) { dev_err(dev, "get phy failed, %ld\n", PTR_ERR(mp->phy)); return PTR_ERR(mp->phy); } mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(mp->reset_gpio)) { dev_err(dev, "get reset gpio failed\n"); return PTR_ERR(mp->reset_gpio); } ret = meson_pcie_get_resets(mp); if (ret) { dev_err(dev, "get reset resource failed, %d\n", ret); return ret; } ret = meson_pcie_get_mems(pdev, mp); if (ret) { dev_err(dev, "get memory resource failed, %d\n", ret); return ret; } ret = meson_pcie_power_on(mp); if (ret) { dev_err(dev, "phy power on failed, %d\n", ret); return ret; } ret = meson_pcie_reset(mp); if (ret) { dev_err(dev, "reset failed, %d\n", ret); goto err_phy; } ret = meson_pcie_probe_clocks(mp); if (ret) { dev_err(dev, "init clock resources failed, %d\n", ret); goto err_phy; } platform_set_drvdata(pdev, mp); ret = dw_pcie_host_init(&pci->pp); if (ret < 0) { dev_err(dev, "Add PCIe port failed, %d\n", ret); goto err_phy; } return 0; err_phy: meson_pcie_power_off(mp); return ret; } static const struct of_device_id meson_pcie_of_match[] = { { .compatible = "amlogic,axg-pcie", }, { .compatible = "amlogic,g12a-pcie", }, {}, }; MODULE_DEVICE_TABLE(of, meson_pcie_of_match); static struct platform_driver meson_pcie_driver = { .probe = meson_pcie_probe, .driver = { .name = "meson-pcie", .of_match_table = meson_pcie_of_match, }, }; module_platform_driver(meson_pcie_driver); MODULE_AUTHOR("Yue Wang <[email protected]>"); MODULE_DESCRIPTION("Amlogic PCIe Controller driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/dwc/pci-meson.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe host controller driver for Kirin Phone SoCs * * Copyright (C) 2017 HiSilicon Electronics Co., Ltd. * https://www.huawei.com * * Author: Xiaowei Song <[email protected]> */ #include <linux/clk.h> #include <linux/compiler.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/mfd/syscon.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/of_pci.h> #include <linux/phy/phy.h> #include <linux/pci.h> #include <linux/pci_regs.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/resource.h> #include <linux/types.h> #include "pcie-designware.h" #define to_kirin_pcie(x) dev_get_drvdata((x)->dev) /* PCIe ELBI registers */ #define SOC_PCIECTRL_CTRL0_ADDR 0x000 #define SOC_PCIECTRL_CTRL1_ADDR 0x004 #define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21) /* info located in APB */ #define PCIE_APP_LTSSM_ENABLE 0x01c #define PCIE_APB_PHY_STATUS0 0x400 #define PCIE_LINKUP_ENABLE (0x8020) #define PCIE_LTSSM_ENABLE_BIT (0x1 << 11) /* info located in sysctrl */ #define SCTRL_PCIE_CMOS_OFFSET 0x60 #define SCTRL_PCIE_CMOS_BIT 0x10 #define SCTRL_PCIE_ISO_OFFSET 0x44 #define SCTRL_PCIE_ISO_BIT 0x30 #define SCTRL_PCIE_HPCLK_OFFSET 0x190 #define SCTRL_PCIE_HPCLK_BIT 0x184000 #define SCTRL_PCIE_OE_OFFSET 0x14a #define PCIE_DEBOUNCE_PARAM 0xF0F400 #define PCIE_OE_BYPASS (0x3 << 28) /* * Max number of connected PCI slots at an external PCI bridge * * This is used on HiKey 970, which has a PEX 8606 bridge with 4 connected * lanes (lane 0 upstream, and the other three lanes, one connected to an * in-board Ethernet adapter and the other two connected to M.2 and mini * PCI slots. * * Each slot has a different clock source and uses a separate PERST# pin. */ #define MAX_PCI_SLOTS 3 enum pcie_kirin_phy_type { PCIE_KIRIN_INTERNAL_PHY, PCIE_KIRIN_EXTERNAL_PHY }; struct kirin_pcie { enum pcie_kirin_phy_type type; struct dw_pcie *pci; struct regmap *apb; struct phy *phy; void *phy_priv; /* only for PCIE_KIRIN_INTERNAL_PHY */ /* DWC PERST# */ int gpio_id_dwc_perst; /* Per-slot PERST# */ int num_slots; int gpio_id_reset[MAX_PCI_SLOTS]; const char *reset_names[MAX_PCI_SLOTS]; /* Per-slot clkreq */ int n_gpio_clkreq; int gpio_id_clkreq[MAX_PCI_SLOTS]; const char *clkreq_names[MAX_PCI_SLOTS]; }; /* * Kirin 960 PHY. Can't be split into a PHY driver without changing the * DT schema. */ #define REF_CLK_FREQ 100000000 /* PHY info located in APB */ #define PCIE_APB_PHY_CTRL0 0x0 #define PCIE_APB_PHY_CTRL1 0x4 #define PCIE_APB_PHY_STATUS0 0x400 #define PIPE_CLK_STABLE BIT(19) #define PHY_REF_PAD_BIT BIT(8) #define PHY_PWR_DOWN_BIT BIT(22) #define PHY_RST_ACK_BIT BIT(16) /* peri_crg ctrl */ #define CRGCTRL_PCIE_ASSERT_OFFSET 0x88 #define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000 /* Time for delay */ #define REF_2_PERST_MIN 21000 #define REF_2_PERST_MAX 25000 #define PERST_2_ACCESS_MIN 10000 #define PERST_2_ACCESS_MAX 12000 #define PIPE_CLK_WAIT_MIN 550 #define PIPE_CLK_WAIT_MAX 600 #define TIME_CMOS_MIN 100 #define TIME_CMOS_MAX 105 #define TIME_PHY_PD_MIN 10 #define TIME_PHY_PD_MAX 11 struct hi3660_pcie_phy { struct device *dev; void __iomem *base; struct regmap *crgctrl; struct regmap *sysctrl; struct clk *apb_sys_clk; struct clk *apb_phy_clk; struct clk *phy_ref_clk; struct clk *aclk; struct clk *aux_clk; }; /* Registers in PCIePHY */ static inline void kirin_apb_phy_writel(struct hi3660_pcie_phy *hi3660_pcie_phy, u32 val, u32 reg) { writel(val, hi3660_pcie_phy->base + reg); } static inline u32 kirin_apb_phy_readl(struct hi3660_pcie_phy *hi3660_pcie_phy, u32 reg) { return readl(hi3660_pcie_phy->base + reg); } static int hi3660_pcie_phy_get_clk(struct hi3660_pcie_phy *phy) { struct device *dev = phy->dev; phy->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref"); if (IS_ERR(phy->phy_ref_clk)) return PTR_ERR(phy->phy_ref_clk); phy->aux_clk = devm_clk_get(dev, "pcie_aux"); if (IS_ERR(phy->aux_clk)) return PTR_ERR(phy->aux_clk); phy->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy"); if (IS_ERR(phy->apb_phy_clk)) return PTR_ERR(phy->apb_phy_clk); phy->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys"); if (IS_ERR(phy->apb_sys_clk)) return PTR_ERR(phy->apb_sys_clk); phy->aclk = devm_clk_get(dev, "pcie_aclk"); if (IS_ERR(phy->aclk)) return PTR_ERR(phy->aclk); return 0; } static int hi3660_pcie_phy_get_resource(struct hi3660_pcie_phy *phy) { struct device *dev = phy->dev; struct platform_device *pdev; /* registers */ pdev = container_of(dev, struct platform_device, dev); phy->base = devm_platform_ioremap_resource_byname(pdev, "phy"); if (IS_ERR(phy->base)) return PTR_ERR(phy->base); phy->crgctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl"); if (IS_ERR(phy->crgctrl)) return PTR_ERR(phy->crgctrl); phy->sysctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl"); if (IS_ERR(phy->sysctrl)) return PTR_ERR(phy->sysctrl); return 0; } static int hi3660_pcie_phy_start(struct hi3660_pcie_phy *phy) { struct device *dev = phy->dev; u32 reg_val; reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_CTRL1); reg_val &= ~PHY_REF_PAD_BIT; kirin_apb_phy_writel(phy, reg_val, PCIE_APB_PHY_CTRL1); reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_CTRL0); reg_val &= ~PHY_PWR_DOWN_BIT; kirin_apb_phy_writel(phy, reg_val, PCIE_APB_PHY_CTRL0); usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX); reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_CTRL1); reg_val &= ~PHY_RST_ACK_BIT; kirin_apb_phy_writel(phy, reg_val, PCIE_APB_PHY_CTRL1); usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX); reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_STATUS0); if (reg_val & PIPE_CLK_STABLE) { dev_err(dev, "PIPE clk is not stable\n"); return -EINVAL; } return 0; } static void hi3660_pcie_phy_oe_enable(struct hi3660_pcie_phy *phy) { u32 val; regmap_read(phy->sysctrl, SCTRL_PCIE_OE_OFFSET, &val); val |= PCIE_DEBOUNCE_PARAM; val &= ~PCIE_OE_BYPASS; regmap_write(phy->sysctrl, SCTRL_PCIE_OE_OFFSET, val); } static int hi3660_pcie_phy_clk_ctrl(struct hi3660_pcie_phy *phy, bool enable) { int ret = 0; if (!enable) goto close_clk; ret = clk_set_rate(phy->phy_ref_clk, REF_CLK_FREQ); if (ret) return ret; ret = clk_prepare_enable(phy->phy_ref_clk); if (ret) return ret; ret = clk_prepare_enable(phy->apb_sys_clk); if (ret) goto apb_sys_fail; ret = clk_prepare_enable(phy->apb_phy_clk); if (ret) goto apb_phy_fail; ret = clk_prepare_enable(phy->aclk); if (ret) goto aclk_fail; ret = clk_prepare_enable(phy->aux_clk); if (ret) goto aux_clk_fail; return 0; close_clk: clk_disable_unprepare(phy->aux_clk); aux_clk_fail: clk_disable_unprepare(phy->aclk); aclk_fail: clk_disable_unprepare(phy->apb_phy_clk); apb_phy_fail: clk_disable_unprepare(phy->apb_sys_clk); apb_sys_fail: clk_disable_unprepare(phy->phy_ref_clk); return ret; } static int hi3660_pcie_phy_power_on(struct kirin_pcie *pcie) { struct hi3660_pcie_phy *phy = pcie->phy_priv; int ret; /* Power supply for Host */ regmap_write(phy->sysctrl, SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT); usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX); hi3660_pcie_phy_oe_enable(phy); ret = hi3660_pcie_phy_clk_ctrl(phy, true); if (ret) return ret; /* ISO disable, PCIeCtrl, PHY assert and clk gate clear */ regmap_write(phy->sysctrl, SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT); regmap_write(phy->crgctrl, CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT); regmap_write(phy->sysctrl, SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT); ret = hi3660_pcie_phy_start(phy); if (ret) goto disable_clks; return 0; disable_clks: hi3660_pcie_phy_clk_ctrl(phy, false); return ret; } static int hi3660_pcie_phy_init(struct platform_device *pdev, struct kirin_pcie *pcie) { struct device *dev = &pdev->dev; struct hi3660_pcie_phy *phy; int ret; phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); if (!phy) return -ENOMEM; pcie->phy_priv = phy; phy->dev = dev; ret = hi3660_pcie_phy_get_clk(phy); if (ret) return ret; return hi3660_pcie_phy_get_resource(phy); } static int hi3660_pcie_phy_power_off(struct kirin_pcie *pcie) { struct hi3660_pcie_phy *phy = pcie->phy_priv; /* Drop power supply for Host */ regmap_write(phy->sysctrl, SCTRL_PCIE_CMOS_OFFSET, 0x00); hi3660_pcie_phy_clk_ctrl(phy, false); return 0; } /* * The non-PHY part starts here */ static const struct regmap_config pcie_kirin_regmap_conf = { .name = "kirin_pcie_apb", .reg_bits = 32, .val_bits = 32, .reg_stride = 4, }; static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie, struct platform_device *pdev) { struct device *dev = &pdev->dev; char name[32]; int ret, i; /* This is an optional property */ ret = gpiod_count(dev, "hisilicon,clken"); if (ret < 0) return 0; if (ret > MAX_PCI_SLOTS) { dev_err(dev, "Too many GPIO clock requests!\n"); return -EINVAL; } pcie->n_gpio_clkreq = ret; for (i = 0; i < pcie->n_gpio_clkreq; i++) { pcie->gpio_id_clkreq[i] = of_get_named_gpio(dev->of_node, "hisilicon,clken-gpios", i); if (pcie->gpio_id_clkreq[i] < 0) return pcie->gpio_id_clkreq[i]; sprintf(name, "pcie_clkreq_%d", i); pcie->clkreq_names[i] = devm_kstrdup_const(dev, name, GFP_KERNEL); if (!pcie->clkreq_names[i]) return -ENOMEM; } return 0; } static int kirin_pcie_parse_port(struct kirin_pcie *pcie, struct platform_device *pdev, struct device_node *node) { struct device *dev = &pdev->dev; struct device_node *parent, *child; int ret, slot, i; char name[32]; for_each_available_child_of_node(node, parent) { for_each_available_child_of_node(parent, child) { i = pcie->num_slots; pcie->gpio_id_reset[i] = of_get_named_gpio(child, "reset-gpios", 0); if (pcie->gpio_id_reset[i] < 0) continue; pcie->num_slots++; if (pcie->num_slots > MAX_PCI_SLOTS) { dev_err(dev, "Too many PCI slots!\n"); ret = -EINVAL; goto put_node; } ret = of_pci_get_devfn(child); if (ret < 0) { dev_err(dev, "failed to parse devfn: %d\n", ret); goto put_node; } slot = PCI_SLOT(ret); sprintf(name, "pcie_perst_%d", slot); pcie->reset_names[i] = devm_kstrdup_const(dev, name, GFP_KERNEL); if (!pcie->reset_names[i]) { ret = -ENOMEM; goto put_node; } } } return 0; put_node: of_node_put(child); of_node_put(parent); return ret; } static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie, struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *child, *node = dev->of_node; void __iomem *apb_base; int ret; apb_base = devm_platform_ioremap_resource_byname(pdev, "apb"); if (IS_ERR(apb_base)) return PTR_ERR(apb_base); kirin_pcie->apb = devm_regmap_init_mmio(dev, apb_base, &pcie_kirin_regmap_conf); if (IS_ERR(kirin_pcie->apb)) return PTR_ERR(kirin_pcie->apb); /* pcie internal PERST# gpio */ kirin_pcie->gpio_id_dwc_perst = of_get_named_gpio(dev->of_node, "reset-gpios", 0); if (kirin_pcie->gpio_id_dwc_perst == -EPROBE_DEFER) { return -EPROBE_DEFER; } else if (!gpio_is_valid(kirin_pcie->gpio_id_dwc_perst)) { dev_err(dev, "unable to get a valid gpio pin\n"); return -ENODEV; } ret = kirin_pcie_get_gpio_enable(kirin_pcie, pdev); if (ret) return ret; /* Parse OF children */ for_each_available_child_of_node(node, child) { ret = kirin_pcie_parse_port(kirin_pcie, pdev, child); if (ret) goto put_node; } return 0; put_node: of_node_put(child); return ret; } static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie, bool on) { u32 val; regmap_read(kirin_pcie->apb, SOC_PCIECTRL_CTRL0_ADDR, &val); if (on) val = val | PCIE_ELBI_SLV_DBI_ENABLE; else val = val & ~PCIE_ELBI_SLV_DBI_ENABLE; regmap_write(kirin_pcie->apb, SOC_PCIECTRL_CTRL0_ADDR, val); } static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie, bool on) { u32 val; regmap_read(kirin_pcie->apb, SOC_PCIECTRL_CTRL1_ADDR, &val); if (on) val = val | PCIE_ELBI_SLV_DBI_ENABLE; else val = val & ~PCIE_ELBI_SLV_DBI_ENABLE; regmap_write(kirin_pcie->apb, SOC_PCIECTRL_CTRL1_ADDR, val); } static int kirin_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata); if (PCI_SLOT(devfn)) return PCIBIOS_DEVICE_NOT_FOUND; *val = dw_pcie_read_dbi(pci, where, size); return PCIBIOS_SUCCESSFUL; } static int kirin_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata); if (PCI_SLOT(devfn)) return PCIBIOS_DEVICE_NOT_FOUND; dw_pcie_write_dbi(pci, where, size, val); return PCIBIOS_SUCCESSFUL; } static int kirin_pcie_add_bus(struct pci_bus *bus) { struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata); struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); int i, ret; if (!kirin_pcie->num_slots) return 0; /* Send PERST# to each slot */ for (i = 0; i < kirin_pcie->num_slots; i++) { ret = gpio_direction_output(kirin_pcie->gpio_id_reset[i], 1); if (ret) { dev_err(pci->dev, "PERST# %s error: %d\n", kirin_pcie->reset_names[i], ret); } } usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX); return 0; } static struct pci_ops kirin_pci_ops = { .read = kirin_pcie_rd_own_conf, .write = kirin_pcie_wr_own_conf, .add_bus = kirin_pcie_add_bus, }; static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, size_t size) { struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); u32 ret; kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true); dw_pcie_read(base + reg, size, &ret); kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false); return ret; } static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, size_t size, u32 val) { struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true); dw_pcie_write(base + reg, size, val); kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false); } static int kirin_pcie_link_up(struct dw_pcie *pci) { struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); u32 val; regmap_read(kirin_pcie->apb, PCIE_APB_PHY_STATUS0, &val); if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE) return 1; return 0; } static int kirin_pcie_start_link(struct dw_pcie *pci) { struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); /* assert LTSSM enable */ regmap_write(kirin_pcie->apb, PCIE_APP_LTSSM_ENABLE, PCIE_LTSSM_ENABLE_BIT); return 0; } static int kirin_pcie_host_init(struct dw_pcie_rp *pp) { pp->bridge->ops = &kirin_pci_ops; return 0; } static int kirin_pcie_gpio_request(struct kirin_pcie *kirin_pcie, struct device *dev) { int ret, i; for (i = 0; i < kirin_pcie->num_slots; i++) { if (!gpio_is_valid(kirin_pcie->gpio_id_reset[i])) { dev_err(dev, "unable to get a valid %s gpio\n", kirin_pcie->reset_names[i]); return -ENODEV; } ret = devm_gpio_request(dev, kirin_pcie->gpio_id_reset[i], kirin_pcie->reset_names[i]); if (ret) return ret; } for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++) { if (!gpio_is_valid(kirin_pcie->gpio_id_clkreq[i])) { dev_err(dev, "unable to get a valid %s gpio\n", kirin_pcie->clkreq_names[i]); return -ENODEV; } ret = devm_gpio_request(dev, kirin_pcie->gpio_id_clkreq[i], kirin_pcie->clkreq_names[i]); if (ret) return ret; ret = gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 0); if (ret) return ret; } return 0; } static const struct dw_pcie_ops kirin_dw_pcie_ops = { .read_dbi = kirin_pcie_read_dbi, .write_dbi = kirin_pcie_write_dbi, .link_up = kirin_pcie_link_up, .start_link = kirin_pcie_start_link, }; static const struct dw_pcie_host_ops kirin_pcie_host_ops = { .host_init = kirin_pcie_host_init, }; static int kirin_pcie_power_off(struct kirin_pcie *kirin_pcie) { int i; if (kirin_pcie->type == PCIE_KIRIN_INTERNAL_PHY) return hi3660_pcie_phy_power_off(kirin_pcie); for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++) gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 1); phy_power_off(kirin_pcie->phy); phy_exit(kirin_pcie->phy); return 0; } static int kirin_pcie_power_on(struct platform_device *pdev, struct kirin_pcie *kirin_pcie) { struct device *dev = &pdev->dev; int ret; if (kirin_pcie->type == PCIE_KIRIN_INTERNAL_PHY) { ret = hi3660_pcie_phy_init(pdev, kirin_pcie); if (ret) return ret; ret = hi3660_pcie_phy_power_on(kirin_pcie); if (ret) return ret; } else { kirin_pcie->phy = devm_of_phy_get(dev, dev->of_node, NULL); if (IS_ERR(kirin_pcie->phy)) return PTR_ERR(kirin_pcie->phy); ret = kirin_pcie_gpio_request(kirin_pcie, dev); if (ret) return ret; ret = phy_init(kirin_pcie->phy); if (ret) goto err; ret = phy_power_on(kirin_pcie->phy); if (ret) goto err; } /* perst assert Endpoint */ usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX); if (!gpio_request(kirin_pcie->gpio_id_dwc_perst, "pcie_perst_bridge")) { ret = gpio_direction_output(kirin_pcie->gpio_id_dwc_perst, 1); if (ret) goto err; } usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX); return 0; err: kirin_pcie_power_off(kirin_pcie); return ret; } static int __exit kirin_pcie_remove(struct platform_device *pdev) { struct kirin_pcie *kirin_pcie = platform_get_drvdata(pdev); dw_pcie_host_deinit(&kirin_pcie->pci->pp); kirin_pcie_power_off(kirin_pcie); return 0; } struct kirin_pcie_data { enum pcie_kirin_phy_type phy_type; }; static const struct kirin_pcie_data kirin_960_data = { .phy_type = PCIE_KIRIN_INTERNAL_PHY, }; static const struct kirin_pcie_data kirin_970_data = { .phy_type = PCIE_KIRIN_EXTERNAL_PHY, }; static const struct of_device_id kirin_pcie_match[] = { { .compatible = "hisilicon,kirin960-pcie", .data = &kirin_960_data }, { .compatible = "hisilicon,kirin970-pcie", .data = &kirin_970_data }, {}, }; static int kirin_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct kirin_pcie_data *data; struct kirin_pcie *kirin_pcie; struct dw_pcie *pci; int ret; if (!dev->of_node) { dev_err(dev, "NULL node\n"); return -EINVAL; } data = of_device_get_match_data(dev); if (!data) { dev_err(dev, "OF data missing\n"); return -EINVAL; } kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL); if (!kirin_pcie) return -ENOMEM; pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); if (!pci) return -ENOMEM; pci->dev = dev; pci->ops = &kirin_dw_pcie_ops; pci->pp.ops = &kirin_pcie_host_ops; kirin_pcie->pci = pci; kirin_pcie->type = data->phy_type; ret = kirin_pcie_get_resource(kirin_pcie, pdev); if (ret) return ret; platform_set_drvdata(pdev, kirin_pcie); ret = kirin_pcie_power_on(pdev, kirin_pcie); if (ret) return ret; return dw_pcie_host_init(&pci->pp); } static struct platform_driver kirin_pcie_driver = { .probe = kirin_pcie_probe, .remove = __exit_p(kirin_pcie_remove), .driver = { .name = "kirin-pcie", .of_match_table = kirin_pcie_match, .suppress_bind_attrs = true, }, }; module_platform_driver(kirin_pcie_driver); MODULE_DEVICE_TABLE(of, kirin_pcie_match); MODULE_DESCRIPTION("PCIe host controller driver for Kirin Phone SoCs"); MODULE_AUTHOR("Xiaowei Song <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/dwc/pcie-kirin.c
// SPDX-License-Identifier: GPL-2.0 /* * Synopsys DesignWare PCIe host controller driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. * https://www.samsung.com * * Author: Jingoo Han <[email protected]> */ #include <linux/iopoll.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/pci_regs.h> #include <linux/platform_device.h> #include "../../pci.h" #include "pcie-designware.h" static struct pci_ops dw_pcie_ops; static struct pci_ops dw_child_pcie_ops; static void dw_msi_ack_irq(struct irq_data *d) { irq_chip_ack_parent(d); } static void dw_msi_mask_irq(struct irq_data *d) { pci_msi_mask_irq(d); irq_chip_mask_parent(d); } static void dw_msi_unmask_irq(struct irq_data *d) { pci_msi_unmask_irq(d); irq_chip_unmask_parent(d); } static struct irq_chip dw_pcie_msi_irq_chip = { .name = "PCI-MSI", .irq_ack = dw_msi_ack_irq, .irq_mask = dw_msi_mask_irq, .irq_unmask = dw_msi_unmask_irq, }; static struct msi_domain_info dw_pcie_msi_domain_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), .chip = &dw_pcie_msi_irq_chip, }; /* MSI int handler */ irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp) { int i, pos; unsigned long val; u32 status, num_ctrls; irqreturn_t ret = IRQ_NONE; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; for (i = 0; i < num_ctrls; i++) { status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS + (i * MSI_REG_CTRL_BLOCK_SIZE)); if (!status) continue; ret = IRQ_HANDLED; val = status; pos = 0; while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos)) != MAX_MSI_IRQS_PER_CTRL) { generic_handle_domain_irq(pp->irq_domain, (i * MAX_MSI_IRQS_PER_CTRL) + pos); pos++; } } return ret; } /* Chained MSI interrupt service routine */ static void dw_chained_msi_isr(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct dw_pcie_rp *pp; chained_irq_enter(chip, desc); pp = irq_desc_get_handler_data(desc); dw_handle_msi_irq(pp); chained_irq_exit(chip, desc); } static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) { struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); struct dw_pcie *pci = to_dw_pcie_from_pp(pp); u64 msi_target; msi_target = (u64)pp->msi_data; msg->address_lo = lower_32_bits(msi_target); msg->address_hi = upper_32_bits(msi_target); msg->data = d->hwirq; dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", (int)d->hwirq, msg->address_hi, msg->address_lo); } static int dw_pci_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) { return -EINVAL; } static void dw_pci_bottom_mask(struct irq_data *d) { struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); struct dw_pcie *pci = to_dw_pcie_from_pp(pp); unsigned int res, bit, ctrl; unsigned long flags; raw_spin_lock_irqsave(&pp->lock, flags); ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; pp->irq_mask[ctrl] |= BIT(bit); dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]); raw_spin_unlock_irqrestore(&pp->lock, flags); } static void dw_pci_bottom_unmask(struct irq_data *d) { struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); struct dw_pcie *pci = to_dw_pcie_from_pp(pp); unsigned int res, bit, ctrl; unsigned long flags; raw_spin_lock_irqsave(&pp->lock, flags); ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; pp->irq_mask[ctrl] &= ~BIT(bit); dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]); raw_spin_unlock_irqrestore(&pp->lock, flags); } static void dw_pci_bottom_ack(struct irq_data *d) { struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); struct dw_pcie *pci = to_dw_pcie_from_pp(pp); unsigned int res, bit, ctrl; ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit)); } static struct irq_chip dw_pci_msi_bottom_irq_chip = { .name = "DWPCI-MSI", .irq_ack = dw_pci_bottom_ack, .irq_compose_msi_msg = dw_pci_setup_msi_msg, .irq_set_affinity = dw_pci_msi_set_affinity, .irq_mask = dw_pci_bottom_mask, .irq_unmask = dw_pci_bottom_unmask, }; static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct dw_pcie_rp *pp = domain->host_data; unsigned long flags; u32 i; int bit; raw_spin_lock_irqsave(&pp->lock, flags); bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors, order_base_2(nr_irqs)); raw_spin_unlock_irqrestore(&pp->lock, flags); if (bit < 0) return -ENOSPC; for (i = 0; i < nr_irqs; i++) irq_domain_set_info(domain, virq + i, bit + i, pp->msi_irq_chip, pp, handle_edge_irq, NULL, NULL); return 0; } static void dw_pcie_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct dw_pcie_rp *pp = domain->host_data; unsigned long flags; raw_spin_lock_irqsave(&pp->lock, flags); bitmap_release_region(pp->msi_irq_in_use, d->hwirq, order_base_2(nr_irqs)); raw_spin_unlock_irqrestore(&pp->lock, flags); } static const struct irq_domain_ops dw_pcie_msi_domain_ops = { .alloc = dw_pcie_irq_domain_alloc, .free = dw_pcie_irq_domain_free, }; int dw_pcie_allocate_domains(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node); pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors, &dw_pcie_msi_domain_ops, pp); if (!pp->irq_domain) { dev_err(pci->dev, "Failed to create IRQ domain\n"); return -ENOMEM; } irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS); pp->msi_domain = pci_msi_create_irq_domain(fwnode, &dw_pcie_msi_domain_info, pp->irq_domain); if (!pp->msi_domain) { dev_err(pci->dev, "Failed to create MSI domain\n"); irq_domain_remove(pp->irq_domain); return -ENOMEM; } return 0; } static void dw_pcie_free_msi(struct dw_pcie_rp *pp) { u32 ctrl; for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) { if (pp->msi_irq[ctrl] > 0) irq_set_chained_handler_and_data(pp->msi_irq[ctrl], NULL, NULL); } irq_domain_remove(pp->msi_domain); irq_domain_remove(pp->irq_domain); } static void dw_pcie_msi_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); u64 msi_target = (u64)pp->msi_data; if (!pci_msi_enabled() || !pp->has_msi_ctrl) return; /* Program the msi_data */ dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target)); dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target)); } static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct device *dev = pci->dev; struct platform_device *pdev = to_platform_device(dev); u32 ctrl, max_vectors; int irq; /* Parse any "msiX" IRQs described in the devicetree */ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) { char msi_name[] = "msiX"; msi_name[3] = '0' + ctrl; irq = platform_get_irq_byname_optional(pdev, msi_name); if (irq == -ENXIO) break; if (irq < 0) return dev_err_probe(dev, irq, "Failed to parse MSI IRQ '%s'\n", msi_name); pp->msi_irq[ctrl] = irq; } /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */ if (ctrl == 0) return -ENXIO; max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL; if (pp->num_vectors > max_vectors) { dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n", max_vectors); pp->num_vectors = max_vectors; } if (!pp->num_vectors) pp->num_vectors = max_vectors; return 0; } static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct device *dev = pci->dev; struct platform_device *pdev = to_platform_device(dev); u64 *msi_vaddr; int ret; u32 ctrl, num_ctrls; for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) pp->irq_mask[ctrl] = ~0; if (!pp->msi_irq[0]) { ret = dw_pcie_parse_split_msi_irq(pp); if (ret < 0 && ret != -ENXIO) return ret; } if (!pp->num_vectors) pp->num_vectors = MSI_DEF_NUM_VECTORS; num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; if (!pp->msi_irq[0]) { pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi"); if (pp->msi_irq[0] < 0) { pp->msi_irq[0] = platform_get_irq(pdev, 0); if (pp->msi_irq[0] < 0) return pp->msi_irq[0]; } } dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors); pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip; ret = dw_pcie_allocate_domains(pp); if (ret) return ret; for (ctrl = 0; ctrl < num_ctrls; ctrl++) { if (pp->msi_irq[ctrl] > 0) irq_set_chained_handler_and_data(pp->msi_irq[ctrl], dw_chained_msi_isr, pp); } /* * Even though the iMSI-RX Module supports 64-bit addresses some * peripheral PCIe devices may lack 64-bit message support. In * order not to miss MSI TLPs from those devices the MSI target * address has to be within the lowest 4GB. * * Note until there is a better alternative found the reservation is * done by allocating from the artificially limited DMA-coherent * memory. */ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); if (ret) dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n"); msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data, GFP_KERNEL); if (!msi_vaddr) { dev_err(dev, "Failed to alloc and map MSI data\n"); dw_pcie_free_msi(pp); return -ENOMEM; } return 0; } int dw_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct device *dev = pci->dev; struct device_node *np = dev->of_node; struct platform_device *pdev = to_platform_device(dev); struct resource_entry *win; struct pci_host_bridge *bridge; struct resource *res; int ret; raw_spin_lock_init(&pp->lock); ret = dw_pcie_get_resources(pci); if (ret) return ret; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); if (res) { pp->cfg0_size = resource_size(res); pp->cfg0_base = res->start; pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(pp->va_cfg0_base)) return PTR_ERR(pp->va_cfg0_base); } else { dev_err(dev, "Missing *config* reg space\n"); return -ENODEV; } bridge = devm_pci_alloc_host_bridge(dev, 0); if (!bridge) return -ENOMEM; pp->bridge = bridge; /* Get the I/O range from DT */ win = resource_list_first_type(&bridge->windows, IORESOURCE_IO); if (win) { pp->io_size = resource_size(win->res); pp->io_bus_addr = win->res->start - win->offset; pp->io_base = pci_pio_to_address(win->res->start); } /* Set default bus ops */ bridge->ops = &dw_pcie_ops; bridge->child_ops = &dw_child_pcie_ops; if (pp->ops->host_init) { ret = pp->ops->host_init(pp); if (ret) return ret; } if (pci_msi_enabled()) { pp->has_msi_ctrl = !(pp->ops->msi_host_init || of_property_read_bool(np, "msi-parent") || of_property_read_bool(np, "msi-map")); /* * For the has_msi_ctrl case the default assignment is handled * in the dw_pcie_msi_host_init(). */ if (!pp->has_msi_ctrl && !pp->num_vectors) { pp->num_vectors = MSI_DEF_NUM_VECTORS; } else if (pp->num_vectors > MAX_MSI_IRQS) { dev_err(dev, "Invalid number of vectors\n"); ret = -EINVAL; goto err_deinit_host; } if (pp->ops->msi_host_init) { ret = pp->ops->msi_host_init(pp); if (ret < 0) goto err_deinit_host; } else if (pp->has_msi_ctrl) { ret = dw_pcie_msi_host_init(pp); if (ret < 0) goto err_deinit_host; } } dw_pcie_version_detect(pci); dw_pcie_iatu_detect(pci); ret = dw_pcie_edma_detect(pci); if (ret) goto err_free_msi; ret = dw_pcie_setup_rc(pp); if (ret) goto err_remove_edma; if (!dw_pcie_link_up(pci)) { ret = dw_pcie_start_link(pci); if (ret) goto err_remove_edma; } /* Ignore errors, the link may come up later */ dw_pcie_wait_for_link(pci); bridge->sysdata = pp; ret = pci_host_probe(bridge); if (ret) goto err_stop_link; return 0; err_stop_link: dw_pcie_stop_link(pci); err_remove_edma: dw_pcie_edma_remove(pci); err_free_msi: if (pp->has_msi_ctrl) dw_pcie_free_msi(pp); err_deinit_host: if (pp->ops->host_deinit) pp->ops->host_deinit(pp); return ret; } EXPORT_SYMBOL_GPL(dw_pcie_host_init); void dw_pcie_host_deinit(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pci_stop_root_bus(pp->bridge->bus); pci_remove_root_bus(pp->bridge->bus); dw_pcie_stop_link(pci); dw_pcie_edma_remove(pci); if (pp->has_msi_ctrl) dw_pcie_free_msi(pp); if (pp->ops->host_deinit) pp->ops->host_deinit(pp); } EXPORT_SYMBOL_GPL(dw_pcie_host_deinit); static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct dw_pcie_rp *pp = bus->sysdata; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); int type, ret; u32 busdev; /* * Checking whether the link is up here is a last line of defense * against platforms that forward errors on the system bus as * SError upon PCI configuration transactions issued when the link * is down. This check is racy by definition and does not stop * the system from triggering an SError if the link goes down * after this check is performed. */ if (!dw_pcie_link_up(pci)) return NULL; busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | PCIE_ATU_FUNC(PCI_FUNC(devfn)); if (pci_is_root_bus(bus->parent)) type = PCIE_ATU_TYPE_CFG0; else type = PCIE_ATU_TYPE_CFG1; ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, pp->cfg0_size); if (ret) return NULL; return pp->va_cfg0_base + where; } static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct dw_pcie_rp *pp = bus->sysdata; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); int ret; ret = pci_generic_config_read(bus, devfn, where, size, val); if (ret != PCIBIOS_SUCCESSFUL) return ret; if (pp->cfg0_io_shared) { ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base, pp->io_bus_addr, pp->io_size); if (ret) return PCIBIOS_SET_FAILED; } return PCIBIOS_SUCCESSFUL; } static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct dw_pcie_rp *pp = bus->sysdata; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); int ret; ret = pci_generic_config_write(bus, devfn, where, size, val); if (ret != PCIBIOS_SUCCESSFUL) return ret; if (pp->cfg0_io_shared) { ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base, pp->io_bus_addr, pp->io_size); if (ret) return PCIBIOS_SET_FAILED; } return PCIBIOS_SUCCESSFUL; } static struct pci_ops dw_child_pcie_ops = { .map_bus = dw_pcie_other_conf_map_bus, .read = dw_pcie_rd_other_conf, .write = dw_pcie_wr_other_conf, }; void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct dw_pcie_rp *pp = bus->sysdata; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); if (PCI_SLOT(devfn) > 0) return NULL; return pci->dbi_base + where; } EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus); static struct pci_ops dw_pcie_ops = { .map_bus = dw_pcie_own_conf_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, }; static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct resource_entry *entry; int i, ret; /* Note the very first outbound ATU is used for CFG IOs */ if (!pci->num_ob_windows) { dev_err(pci->dev, "No outbound iATU found\n"); return -EINVAL; } /* * Ensure all out/inbound windows are disabled before proceeding with * the MEM/IO (dma-)ranges setups. */ for (i = 0; i < pci->num_ob_windows; i++) dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i); for (i = 0; i < pci->num_ib_windows; i++) dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i); i = 0; resource_list_for_each_entry(entry, &pp->bridge->windows) { if (resource_type(entry->res) != IORESOURCE_MEM) continue; if (pci->num_ob_windows <= ++i) break; ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM, entry->res->start, entry->res->start - entry->offset, resource_size(entry->res)); if (ret) { dev_err(pci->dev, "Failed to set MEM range %pr\n", entry->res); return ret; } } if (pp->io_size) { if (pci->num_ob_windows > ++i) { ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO, pp->io_base, pp->io_bus_addr, pp->io_size); if (ret) { dev_err(pci->dev, "Failed to set IO range %pr\n", entry->res); return ret; } } else { pp->cfg0_io_shared = true; } } if (pci->num_ob_windows <= i) dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n", pci->num_ob_windows); i = 0; resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) { if (resource_type(entry->res) != IORESOURCE_MEM) continue; if (pci->num_ib_windows <= i) break; ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM, entry->res->start, entry->res->start - entry->offset, resource_size(entry->res)); if (ret) { dev_err(pci->dev, "Failed to set DMA range %pr\n", entry->res); return ret; } } if (pci->num_ib_windows <= i) dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n", pci->num_ib_windows); return 0; } int dw_pcie_setup_rc(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); u32 val, ctrl, num_ctrls; int ret; /* * Enable DBI read-only registers for writing/updating configuration. * Write permission gets disabled towards the end of this function. */ dw_pcie_dbi_ro_wr_en(pci); dw_pcie_setup(pci); if (pp->has_msi_ctrl) { num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; /* Initialize IRQ Status array */ for (ctrl = 0; ctrl < num_ctrls; ctrl++) { dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), pp->irq_mask[ctrl]); dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), ~0); } } dw_pcie_msi_init(pp); /* Setup RC BARs */ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); /* Setup interrupt pins */ val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); val &= 0xffff00ff; val |= 0x00000100; dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); /* Setup bus numbers */ val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); val &= 0xff000000; val |= 0x00ff0100; dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); /* Setup command register */ val = dw_pcie_readl_dbi(pci, PCI_COMMAND); val &= 0xffff0000; val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_SERR; dw_pcie_writel_dbi(pci, PCI_COMMAND, val); /* * If the platform provides its own child bus config accesses, it means * the platform uses its own address translation component rather than * ATU, so we should not program the ATU here. */ if (pp->bridge->child_ops == &dw_child_pcie_ops) { ret = dw_pcie_iatu_setup(pp); if (ret) return ret; } dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); /* Program correct class for RC */ dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI); val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); val |= PORT_LOGIC_SPEED_CHANGE; dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); dw_pcie_dbi_ro_wr_dis(pci); return 0; } EXPORT_SYMBOL_GPL(dw_pcie_setup_rc); int dw_pcie_suspend_noirq(struct dw_pcie *pci) { u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); u32 val; int ret; /* * If L1SS is supported, then do not put the link into L2 as some * devices such as NVMe expect low resume latency. */ if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1) return 0; if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT) return 0; if (!pci->pp.ops->pme_turn_off) return 0; pci->pp.ops->pme_turn_off(&pci->pp); ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE, PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US, false, pci); if (ret) { dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val); return ret; } if (pci->pp.ops->host_deinit) pci->pp.ops->host_deinit(&pci->pp); pci->suspended = true; return ret; } EXPORT_SYMBOL_GPL(dw_pcie_suspend_noirq); int dw_pcie_resume_noirq(struct dw_pcie *pci) { int ret; if (!pci->suspended) return 0; pci->suspended = false; if (pci->pp.ops->host_init) { ret = pci->pp.ops->host_init(&pci->pp); if (ret) { dev_err(pci->dev, "Host init failed: %d\n", ret); return ret; } } dw_pcie_setup_rc(&pci->pp); ret = dw_pcie_start_link(pci); if (ret) return ret; ret = dw_pcie_wait_for_link(pci); if (ret) return ret; return ret; } EXPORT_SYMBOL_GPL(dw_pcie_resume_noirq);
linux-master
drivers/pci/controller/dwc/pcie-designware-host.c
// SPDX-License-Identifier: GPL-2.0+ /* * ACPI quirks for Tegra194 PCIe host controller * * Copyright (C) 2021 NVIDIA Corporation. * * Author: Vidya Sagar <[email protected]> */ #include <linux/pci.h> #include <linux/pci-acpi.h> #include <linux/pci-ecam.h> #include "pcie-designware.h" struct tegra194_pcie_ecam { void __iomem *config_base; void __iomem *iatu_base; void __iomem *dbi_base; }; static int tegra194_acpi_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; struct tegra194_pcie_ecam *pcie_ecam; pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL); if (!pcie_ecam) return -ENOMEM; pcie_ecam->config_base = cfg->win; pcie_ecam->iatu_base = cfg->win + SZ_256K; pcie_ecam->dbi_base = cfg->win + SZ_512K; cfg->priv = pcie_ecam; return 0; } static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index, u32 val, u32 reg) { u32 offset = PCIE_ATU_UNROLL_BASE(PCIE_ATU_REGION_DIR_OB, index) + PCIE_ATU_VIEWPORT_BASE; writel(val, pcie_ecam->iatu_base + offset + reg); } static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam, int index, int type, u64 cpu_addr, u64 pci_addr, u64 size) { atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE); atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr), PCIE_ATU_UPPER_BASE); atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr), PCIE_ATU_LOWER_TARGET); atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1), PCIE_ATU_LIMIT); atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET); atu_reg_write(pcie_ecam, index, type, PCIE_ATU_REGION_CTRL1); atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_REGION_CTRL2); } static void __iomem *tegra194_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct pci_config_window *cfg = bus->sysdata; struct tegra194_pcie_ecam *pcie_ecam = cfg->priv; u32 busdev; int type; if (bus->number < cfg->busr.start || bus->number > cfg->busr.end) return NULL; if (bus->number == cfg->busr.start) { if (PCI_SLOT(devfn) == 0) return pcie_ecam->dbi_base + where; else return NULL; } busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | PCIE_ATU_FUNC(PCI_FUNC(devfn)); if (bus->parent->number == cfg->busr.start) { if (PCI_SLOT(devfn) == 0) type = PCIE_ATU_TYPE_CFG0; else return NULL; } else { type = PCIE_ATU_TYPE_CFG1; } program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev, SZ_256K); return pcie_ecam->config_base + where; } const struct pci_ecam_ops tegra194_pcie_ops = { .init = tegra194_acpi_init, .pci_ops = { .map_bus = tegra194_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, } };
linux-master
drivers/pci/controller/dwc/pcie-tegra194-acpi.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe host controller driver for Freescale i.MX6 SoCs * * Copyright (C) 2013 Kosagi * https://www.kosagi.com * * Author: Sean Cross <[email protected]> */ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> #include <linux/mfd/syscon/imx7-iomuxc-gpr.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/of_address.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/resource.h> #include <linux/signal.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/reset.h> #include <linux/phy/phy.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include "pcie-designware.h" #define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9) #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10) #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11) #define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12) #define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8) #define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000 #define to_imx6_pcie(x) dev_get_drvdata((x)->dev) enum imx6_pcie_variants { IMX6Q, IMX6SX, IMX6QP, IMX7D, IMX8MQ, IMX8MM, IMX8MP, IMX8MQ_EP, IMX8MM_EP, IMX8MP_EP, }; #define IMX6_PCIE_FLAG_IMX6_PHY BIT(0) #define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1) #define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2) struct imx6_pcie_drvdata { enum imx6_pcie_variants variant; enum dw_pcie_device_mode mode; u32 flags; int dbi_length; const char *gpr; }; struct imx6_pcie { struct dw_pcie *pci; int reset_gpio; bool gpio_active_high; bool link_is_up; struct clk *pcie_bus; struct clk *pcie_phy; struct clk *pcie_inbound_axi; struct clk *pcie; struct clk *pcie_aux; struct regmap *iomuxc_gpr; u16 msi_ctrl; u32 controller_id; struct reset_control *pciephy_reset; struct reset_control *apps_reset; struct reset_control *turnoff_reset; u32 tx_deemph_gen1; u32 tx_deemph_gen2_3p5db; u32 tx_deemph_gen2_6db; u32 tx_swing_full; u32 tx_swing_low; struct regulator *vpcie; struct regulator *vph; void __iomem *phy_base; /* power domain for pcie */ struct device *pd_pcie; /* power domain for pcie phy */ struct device *pd_pcie_phy; struct phy *phy; const struct imx6_pcie_drvdata *drvdata; }; /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 #define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX) /* PCIe Port Logic registers (memory-mapped) */ #define PL_OFFSET 0x700 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) #define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x)) #define PCIE_PHY_CTRL_CAP_ADR BIT(16) #define PCIE_PHY_CTRL_CAP_DAT BIT(17) #define PCIE_PHY_CTRL_WR BIT(18) #define PCIE_PHY_CTRL_RD BIT(19) #define PCIE_PHY_STAT (PL_OFFSET + 0x110) #define PCIE_PHY_STAT_ACK BIT(16) /* PHY registers (not memory-mapped) */ #define PCIE_PHY_ATEOVRD 0x10 #define PCIE_PHY_ATEOVRD_EN BIT(2) #define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0 #define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1 #define PCIE_PHY_MPLL_OVRD_IN_LO 0x11 #define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2 #define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f #define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9) #define PCIE_PHY_RX_ASIC_OUT 0x100D #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) /* iMX7 PCIe PHY registers */ #define PCIE_PHY_CMN_REG4 0x14 /* These are probably the bits that *aren't* DCC_FB_EN */ #define PCIE_PHY_CMN_REG4_DCC_FB_EN 0x29 #define PCIE_PHY_CMN_REG15 0x54 #define PCIE_PHY_CMN_REG15_DLY_4 BIT(2) #define PCIE_PHY_CMN_REG15_PLL_PD BIT(5) #define PCIE_PHY_CMN_REG15_OVRD_PLL_PD BIT(7) #define PCIE_PHY_CMN_REG24 0x90 #define PCIE_PHY_CMN_REG24_RX_EQ BIT(6) #define PCIE_PHY_CMN_REG24_RX_EQ_SEL BIT(3) #define PCIE_PHY_CMN_REG26 0x98 #define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC #define PHY_RX_OVRD_IN_LO 0x1005 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5) #define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3) static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie) { WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ && imx6_pcie->drvdata->variant != IMX8MQ_EP && imx6_pcie->drvdata->variant != IMX8MM && imx6_pcie->drvdata->variant != IMX8MM_EP && imx6_pcie->drvdata->variant != IMX8MP && imx6_pcie->drvdata->variant != IMX8MP_EP); return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14; } static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie) { unsigned int mask, val, mode; if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE) mode = PCI_EXP_TYPE_ENDPOINT; else mode = PCI_EXP_TYPE_ROOT_PORT; switch (imx6_pcie->drvdata->variant) { case IMX8MQ: case IMX8MQ_EP: if (imx6_pcie->controller_id == 1) { mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE; val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, mode); } else { mask = IMX6Q_GPR12_DEVICE_TYPE; val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode); } break; default: mask = IMX6Q_GPR12_DEVICE_TYPE; val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode); break; } regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val); } static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val) { struct dw_pcie *pci = imx6_pcie->pci; bool val; u32 max_iterations = 10; u32 wait_counter = 0; do { val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) & PCIE_PHY_STAT_ACK; wait_counter++; if (val == exp_val) return 0; udelay(1); } while (wait_counter < max_iterations); return -ETIMEDOUT; } static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr) { struct dw_pcie *pci = imx6_pcie->pci; u32 val; int ret; val = PCIE_PHY_CTRL_DATA(addr); dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); val |= PCIE_PHY_CTRL_CAP_ADR; dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); ret = pcie_phy_poll_ack(imx6_pcie, true); if (ret) return ret; val = PCIE_PHY_CTRL_DATA(addr); dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); return pcie_phy_poll_ack(imx6_pcie, false); } /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data) { struct dw_pcie *pci = imx6_pcie->pci; u32 phy_ctl; int ret; ret = pcie_phy_wait_ack(imx6_pcie, addr); if (ret) return ret; /* assert Read signal */ phy_ctl = PCIE_PHY_CTRL_RD; dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl); ret = pcie_phy_poll_ack(imx6_pcie, true); if (ret) return ret; *data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); /* deassert Read signal */ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00); return pcie_phy_poll_ack(imx6_pcie, false); } static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data) { struct dw_pcie *pci = imx6_pcie->pci; u32 var; int ret; /* write addr */ /* cap addr */ ret = pcie_phy_wait_ack(imx6_pcie, addr); if (ret) return ret; var = PCIE_PHY_CTRL_DATA(data); dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); /* capture data */ var |= PCIE_PHY_CTRL_CAP_DAT; dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); ret = pcie_phy_poll_ack(imx6_pcie, true); if (ret) return ret; /* deassert cap data */ var = PCIE_PHY_CTRL_DATA(data); dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); /* wait for ack de-assertion */ ret = pcie_phy_poll_ack(imx6_pcie, false); if (ret) return ret; /* assert wr signal */ var = PCIE_PHY_CTRL_WR; dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); /* wait for ack */ ret = pcie_phy_poll_ack(imx6_pcie, true); if (ret) return ret; /* deassert wr signal */ var = PCIE_PHY_CTRL_DATA(data); dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); /* wait for ack de-assertion */ ret = pcie_phy_poll_ack(imx6_pcie, false); if (ret) return ret; dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0); return 0; } static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) { switch (imx6_pcie->drvdata->variant) { case IMX8MM: case IMX8MM_EP: case IMX8MP: case IMX8MP_EP: /* * The PHY initialization had been done in the PHY * driver, break here directly. */ break; case IMX8MQ: case IMX8MQ_EP: /* * TODO: Currently this code assumes external * oscillator is being used */ regmap_update_bits(imx6_pcie->iomuxc_gpr, imx6_pcie_grp_offset(imx6_pcie), IMX8MQ_GPR_PCIE_REF_USE_PAD, IMX8MQ_GPR_PCIE_REF_USE_PAD); /* * Regarding the datasheet, the PCIE_VPH is suggested * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the * VREG_BYPASS should be cleared to zero. */ if (imx6_pcie->vph && regulator_get_voltage(imx6_pcie->vph) > 3000000) regmap_update_bits(imx6_pcie->iomuxc_gpr, imx6_pcie_grp_offset(imx6_pcie), IMX8MQ_GPR_PCIE_VREG_BYPASS, 0); break; case IMX7D: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); break; case IMX6SX: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2); fallthrough; default: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); /* configure constant input signal to the pcie ctrl and phy */ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6Q_GPR12_LOS_LEVEL, 9 << 4); regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, IMX6Q_GPR8_TX_DEEMPH_GEN1, imx6_pcie->tx_deemph_gen1 << 0); regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, imx6_pcie->tx_deemph_gen2_3p5db << 6); regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, imx6_pcie->tx_deemph_gen2_6db << 12); regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, IMX6Q_GPR8_TX_SWING_FULL, imx6_pcie->tx_swing_full << 18); regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, IMX6Q_GPR8_TX_SWING_LOW, imx6_pcie->tx_swing_low << 25); break; } imx6_pcie_configure_type(imx6_pcie); } static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) { u32 val; struct device *dev = imx6_pcie->pci->dev; if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, val, val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED, PHY_PLL_LOCK_WAIT_USLEEP_MAX, PHY_PLL_LOCK_WAIT_TIMEOUT)) dev_err(dev, "PCIe PLL lock timeout\n"); } static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) { unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy); int mult, div; u16 val; if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY)) return 0; switch (phy_rate) { case 125000000: /* * The default settings of the MPLL are for a 125MHz input * clock, so no need to reconfigure anything in that case. */ return 0; case 100000000: mult = 25; div = 0; break; case 200000000: mult = 25; div = 1; break; default: dev_err(imx6_pcie->pci->dev, "Unsupported PHY reference clock rate %lu\n", phy_rate); return -EINVAL; } pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val); val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK << PCIE_PHY_MPLL_MULTIPLIER_SHIFT); val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT; val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD; pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val); pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val); val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT); val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT; val |= PCIE_PHY_ATEOVRD_EN; pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val); return 0; } static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) { u16 tmp; if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY)) return; pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); usleep_range(2000, 3000); pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); } #ifdef CONFIG_ARM /* Added for PCI abort handling */ static int imx6q_pcie_abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { unsigned long pc = instruction_pointer(regs); unsigned long instr = *(unsigned long *)pc; int reg = (instr >> 12) & 15; /* * If the instruction being executed was a read, * make it look like it read all-ones. */ if ((instr & 0x0c100000) == 0x04100000) { unsigned long val; if (instr & 0x00400000) val = 255; else val = -1; regs->uregs[reg] = val; regs->ARM_pc += 4; return 0; } if ((instr & 0x0e100090) == 0x00100090) { regs->uregs[reg] = -1; regs->ARM_pc += 4; return 0; } return 1; } #endif static int imx6_pcie_attach_pd(struct device *dev) { struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); struct device_link *link; /* Do nothing when in a single power domain */ if (dev->pm_domain) return 0; imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); if (IS_ERR(imx6_pcie->pd_pcie)) return PTR_ERR(imx6_pcie->pd_pcie); /* Do nothing when power domain missing */ if (!imx6_pcie->pd_pcie) return 0; link = device_link_add(dev, imx6_pcie->pd_pcie, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); if (!link) { dev_err(dev, "Failed to add device_link to pcie pd.\n"); return -EINVAL; } imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy"); if (IS_ERR(imx6_pcie->pd_pcie_phy)) return PTR_ERR(imx6_pcie->pd_pcie_phy); link = device_link_add(dev, imx6_pcie->pd_pcie_phy, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); if (!link) { dev_err(dev, "Failed to add device_link to pcie_phy pd.\n"); return -EINVAL; } return 0; } static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) { struct dw_pcie *pci = imx6_pcie->pci; struct device *dev = pci->dev; unsigned int offset; int ret = 0; switch (imx6_pcie->drvdata->variant) { case IMX6SX: ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi); if (ret) { dev_err(dev, "unable to enable pcie_axi clock\n"); break; } regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0); break; case IMX6QP: case IMX6Q: /* power up core phy and enable ref clock */ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18); /* * the async reset input need ref clock to sync internally, * when the ref clock comes after reset, internal synced * reset time is too short, cannot meet the requirement. * add one ~10us delay here. */ usleep_range(10, 100); regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); break; case IMX7D: break; case IMX8MM: case IMX8MM_EP: case IMX8MQ: case IMX8MQ_EP: case IMX8MP: case IMX8MP_EP: ret = clk_prepare_enable(imx6_pcie->pcie_aux); if (ret) { dev_err(dev, "unable to enable pcie_aux clock\n"); break; } offset = imx6_pcie_grp_offset(imx6_pcie); /* * Set the over ride low and enabled * make sure that REF_CLK is turned on. */ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE, 0); regmap_update_bits(imx6_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN); break; } return ret; } static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie) { switch (imx6_pcie->drvdata->variant) { case IMX6SX: clk_disable_unprepare(imx6_pcie->pcie_inbound_axi); break; case IMX6QP: case IMX6Q: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN, 0); regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD, IMX6Q_GPR1_PCIE_TEST_PD); break; case IMX7D: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); break; case IMX8MM: case IMX8MM_EP: case IMX8MQ: case IMX8MQ_EP: case IMX8MP: case IMX8MP_EP: clk_disable_unprepare(imx6_pcie->pcie_aux); break; default: break; } } static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie) { struct dw_pcie *pci = imx6_pcie->pci; struct device *dev = pci->dev; int ret; ret = clk_prepare_enable(imx6_pcie->pcie_phy); if (ret) { dev_err(dev, "unable to enable pcie_phy clock\n"); return ret; } ret = clk_prepare_enable(imx6_pcie->pcie_bus); if (ret) { dev_err(dev, "unable to enable pcie_bus clock\n"); goto err_pcie_bus; } ret = clk_prepare_enable(imx6_pcie->pcie); if (ret) { dev_err(dev, "unable to enable pcie clock\n"); goto err_pcie; } ret = imx6_pcie_enable_ref_clk(imx6_pcie); if (ret) { dev_err(dev, "unable to enable pcie ref clock\n"); goto err_ref_clk; } /* allow the clocks to stabilize */ usleep_range(200, 500); return 0; err_ref_clk: clk_disable_unprepare(imx6_pcie->pcie); err_pcie: clk_disable_unprepare(imx6_pcie->pcie_bus); err_pcie_bus: clk_disable_unprepare(imx6_pcie->pcie_phy); return ret; } static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) { imx6_pcie_disable_ref_clk(imx6_pcie); clk_disable_unprepare(imx6_pcie->pcie); clk_disable_unprepare(imx6_pcie->pcie_bus); clk_disable_unprepare(imx6_pcie->pcie_phy); } static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) { switch (imx6_pcie->drvdata->variant) { case IMX7D: case IMX8MQ: case IMX8MQ_EP: reset_control_assert(imx6_pcie->pciephy_reset); fallthrough; case IMX8MM: case IMX8MM_EP: case IMX8MP: case IMX8MP_EP: reset_control_assert(imx6_pcie->apps_reset); break; case IMX6SX: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_TEST_POWERDOWN, IMX6SX_GPR12_PCIE_TEST_POWERDOWN); /* Force PCIe PHY reset */ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, IMX6SX_GPR5_PCIE_BTNRST_RESET, IMX6SX_GPR5_PCIE_BTNRST_RESET); break; case IMX6QP: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_SW_RST, IMX6Q_GPR1_PCIE_SW_RST); break; case IMX6Q: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); break; } /* Some boards don't have PCIe reset GPIO. */ if (gpio_is_valid(imx6_pcie->reset_gpio)) gpio_set_value_cansleep(imx6_pcie->reset_gpio, imx6_pcie->gpio_active_high); } static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) { struct dw_pcie *pci = imx6_pcie->pci; struct device *dev = pci->dev; switch (imx6_pcie->drvdata->variant) { case IMX8MQ: case IMX8MQ_EP: reset_control_deassert(imx6_pcie->pciephy_reset); break; case IMX7D: reset_control_deassert(imx6_pcie->pciephy_reset); /* Workaround for ERR010728, failure of PCI-e PLL VCO to * oscillate, especially when cold. This turns off "Duty-cycle * Corrector" and other mysterious undocumented things. */ if (likely(imx6_pcie->phy_base)) { /* De-assert DCC_FB_EN */ writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, imx6_pcie->phy_base + PCIE_PHY_CMN_REG4); /* Assert RX_EQS and RX_EQS_SEL */ writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL | PCIE_PHY_CMN_REG24_RX_EQ, imx6_pcie->phy_base + PCIE_PHY_CMN_REG24); /* Assert ATT_MODE */ writel(PCIE_PHY_CMN_REG26_ATT_MODE, imx6_pcie->phy_base + PCIE_PHY_CMN_REG26); } else { dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n"); } imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie); break; case IMX6SX: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, IMX6SX_GPR5_PCIE_BTNRST_RESET, 0); break; case IMX6QP: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_SW_RST, 0); usleep_range(200, 500); break; case IMX6Q: /* Nothing to do */ case IMX8MM: case IMX8MM_EP: case IMX8MP: case IMX8MP_EP: break; } /* Some boards don't have PCIe reset GPIO. */ if (gpio_is_valid(imx6_pcie->reset_gpio)) { msleep(100); gpio_set_value_cansleep(imx6_pcie->reset_gpio, !imx6_pcie->gpio_active_high); /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */ msleep(100); } return 0; } static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie) { struct dw_pcie *pci = imx6_pcie->pci; struct device *dev = pci->dev; u32 tmp; unsigned int retries; for (retries = 0; retries < 200; retries++) { tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); /* Test if the speed change finished. */ if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) return 0; usleep_range(100, 1000); } dev_err(dev, "Speed change timeout\n"); return -ETIMEDOUT; } static void imx6_pcie_ltssm_enable(struct device *dev) { struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); switch (imx6_pcie->drvdata->variant) { case IMX6Q: case IMX6SX: case IMX6QP: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6Q_GPR12_PCIE_CTL_2, IMX6Q_GPR12_PCIE_CTL_2); break; case IMX7D: case IMX8MQ: case IMX8MQ_EP: case IMX8MM: case IMX8MM_EP: case IMX8MP: case IMX8MP_EP: reset_control_deassert(imx6_pcie->apps_reset); break; } } static void imx6_pcie_ltssm_disable(struct device *dev) { struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); switch (imx6_pcie->drvdata->variant) { case IMX6Q: case IMX6SX: case IMX6QP: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6Q_GPR12_PCIE_CTL_2, 0); break; case IMX7D: case IMX8MQ: case IMX8MQ_EP: case IMX8MM: case IMX8MM_EP: case IMX8MP: case IMX8MP_EP: reset_control_assert(imx6_pcie->apps_reset); break; } } static int imx6_pcie_start_link(struct dw_pcie *pci) { struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); struct device *dev = pci->dev; u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); u32 tmp; int ret; /* * Force Gen1 operation when starting the link. In case the link is * started in Gen2 mode, there is a possibility the devices on the * bus will not be detected at all. This happens with PCIe switches. */ dw_pcie_dbi_ro_wr_en(pci); tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); tmp &= ~PCI_EXP_LNKCAP_SLS; tmp |= PCI_EXP_LNKCAP_SLS_2_5GB; dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); dw_pcie_dbi_ro_wr_dis(pci); /* Start LTSSM. */ imx6_pcie_ltssm_enable(dev); ret = dw_pcie_wait_for_link(pci); if (ret) goto err_reset_phy; if (pci->link_gen > 1) { /* Allow faster modes after the link is up */ dw_pcie_dbi_ro_wr_en(pci); tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); tmp &= ~PCI_EXP_LNKCAP_SLS; tmp |= pci->link_gen; dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); /* * Start Directed Speed Change so the best possible * speed both link partners support can be negotiated. */ tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); tmp |= PORT_LOGIC_SPEED_CHANGE; dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); dw_pcie_dbi_ro_wr_dis(pci); if (imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) { /* * On i.MX7, DIRECT_SPEED_CHANGE behaves differently * from i.MX6 family when no link speed transition * occurs and we go Gen1 -> yep, Gen1. The difference * is that, in such case, it will not be cleared by HW * which will cause the following code to report false * failure. */ ret = imx6_pcie_wait_for_speed_change(imx6_pcie); if (ret) { dev_err(dev, "Failed to bring link up!\n"); goto err_reset_phy; } } /* Make sure link training is finished as well! */ ret = dw_pcie_wait_for_link(pci); if (ret) goto err_reset_phy; } else { dev_info(dev, "Link: Only Gen1 is enabled\n"); } imx6_pcie->link_is_up = true; tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS); return 0; err_reset_phy: imx6_pcie->link_is_up = false; dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0), dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1)); imx6_pcie_reset_phy(imx6_pcie); return 0; } static void imx6_pcie_stop_link(struct dw_pcie *pci) { struct device *dev = pci->dev; /* Turn off PCIe LTSSM */ imx6_pcie_ltssm_disable(dev); } static int imx6_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct device *dev = pci->dev; struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); int ret; if (imx6_pcie->vpcie) { ret = regulator_enable(imx6_pcie->vpcie); if (ret) { dev_err(dev, "failed to enable vpcie regulator: %d\n", ret); return ret; } } imx6_pcie_assert_core_reset(imx6_pcie); imx6_pcie_init_phy(imx6_pcie); ret = imx6_pcie_clk_enable(imx6_pcie); if (ret) { dev_err(dev, "unable to enable pcie clocks: %d\n", ret); goto err_reg_disable; } if (imx6_pcie->phy) { ret = phy_init(imx6_pcie->phy); if (ret) { dev_err(dev, "pcie PHY power up failed\n"); goto err_clk_disable; } } if (imx6_pcie->phy) { ret = phy_power_on(imx6_pcie->phy); if (ret) { dev_err(dev, "waiting for PHY ready timeout!\n"); goto err_phy_off; } } ret = imx6_pcie_deassert_core_reset(imx6_pcie); if (ret < 0) { dev_err(dev, "pcie deassert core reset failed: %d\n", ret); goto err_phy_off; } imx6_setup_phy_mpll(imx6_pcie); return 0; err_phy_off: if (imx6_pcie->phy) phy_exit(imx6_pcie->phy); err_clk_disable: imx6_pcie_clk_disable(imx6_pcie); err_reg_disable: if (imx6_pcie->vpcie) regulator_disable(imx6_pcie->vpcie); return ret; } static void imx6_pcie_host_exit(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); if (imx6_pcie->phy) { if (phy_power_off(imx6_pcie->phy)) dev_err(pci->dev, "unable to power off PHY\n"); phy_exit(imx6_pcie->phy); } imx6_pcie_clk_disable(imx6_pcie); if (imx6_pcie->vpcie) regulator_disable(imx6_pcie->vpcie); } static const struct dw_pcie_host_ops imx6_pcie_host_ops = { .host_init = imx6_pcie_host_init, .host_deinit = imx6_pcie_host_exit, }; static const struct dw_pcie_ops dw_pcie_ops = { .start_link = imx6_pcie_start_link, .stop_link = imx6_pcie_stop_link, }; static void imx6_pcie_ep_init(struct dw_pcie_ep *ep) { enum pci_barno bar; struct dw_pcie *pci = to_dw_pcie_from_ep(ep); for (bar = BAR_0; bar <= BAR_5; bar++) dw_pcie_ep_reset_bar(pci, bar); } static int imx6_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { case PCI_EPC_IRQ_LEGACY: return dw_pcie_ep_raise_legacy_irq(ep, func_no); case PCI_EPC_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); case PCI_EPC_IRQ_MSIX: return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); default: dev_err(pci->dev, "UNKNOWN IRQ type\n"); return -EINVAL; } return 0; } static const struct pci_epc_features imx8m_pcie_epc_features = { .linkup_notifier = false, .msi_capable = true, .msix_capable = false, .reserved_bar = 1 << BAR_1 | 1 << BAR_3, .align = SZ_64K, }; static const struct pci_epc_features* imx6_pcie_ep_get_features(struct dw_pcie_ep *ep) { return &imx8m_pcie_epc_features; } static const struct dw_pcie_ep_ops pcie_ep_ops = { .ep_init = imx6_pcie_ep_init, .raise_irq = imx6_pcie_ep_raise_irq, .get_features = imx6_pcie_ep_get_features, }; static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie, struct platform_device *pdev) { int ret; unsigned int pcie_dbi2_offset; struct dw_pcie_ep *ep; struct resource *res; struct dw_pcie *pci = imx6_pcie->pci; struct dw_pcie_rp *pp = &pci->pp; struct device *dev = pci->dev; imx6_pcie_host_init(pp); ep = &pci->ep; ep->ops = &pcie_ep_ops; switch (imx6_pcie->drvdata->variant) { case IMX8MQ_EP: case IMX8MM_EP: case IMX8MP_EP: pcie_dbi2_offset = SZ_1M; break; default: pcie_dbi2_offset = SZ_4K; break; } pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); if (!res) return -EINVAL; ep->phys_base = res->start; ep->addr_size = resource_size(res); ep->page_size = SZ_64K; ret = dw_pcie_ep_init(ep); if (ret) { dev_err(dev, "failed to initialize endpoint\n"); return ret; } /* Start LTSSM. */ imx6_pcie_ltssm_enable(dev); return 0; } static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie) { struct device *dev = imx6_pcie->pci->dev; /* Some variants have a turnoff reset in DT */ if (imx6_pcie->turnoff_reset) { reset_control_assert(imx6_pcie->turnoff_reset); reset_control_deassert(imx6_pcie->turnoff_reset); goto pm_turnoff_sleep; } /* Others poke directly at IOMUXC registers */ switch (imx6_pcie->drvdata->variant) { case IMX6SX: case IMX6QP: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF, IMX6SX_GPR12_PCIE_PM_TURN_OFF); regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0); break; default: dev_err(dev, "PME_Turn_Off not implemented\n"); return; } /* * Components with an upstream port must respond to * PME_Turn_Off with PME_TO_Ack but we can't check. * * The standard recommends a 1-10ms timeout after which to * proceed anyway as if acks were received. */ pm_turnoff_sleep: usleep_range(1000, 10000); } static void imx6_pcie_msi_save_restore(struct imx6_pcie *imx6_pcie, bool save) { u8 offset; u16 val; struct dw_pcie *pci = imx6_pcie->pci; if (pci_msi_enabled()) { offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); if (save) { val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS); imx6_pcie->msi_ctrl = val; } else { dw_pcie_dbi_ro_wr_en(pci); val = imx6_pcie->msi_ctrl; dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val); dw_pcie_dbi_ro_wr_dis(pci); } } } static int imx6_pcie_suspend_noirq(struct device *dev) { struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); struct dw_pcie_rp *pp = &imx6_pcie->pci->pp; if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND)) return 0; imx6_pcie_msi_save_restore(imx6_pcie, true); imx6_pcie_pm_turnoff(imx6_pcie); imx6_pcie_stop_link(imx6_pcie->pci); imx6_pcie_host_exit(pp); return 0; } static int imx6_pcie_resume_noirq(struct device *dev) { int ret; struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); struct dw_pcie_rp *pp = &imx6_pcie->pci->pp; if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND)) return 0; ret = imx6_pcie_host_init(pp); if (ret) return ret; imx6_pcie_msi_save_restore(imx6_pcie, false); dw_pcie_setup_rc(pp); if (imx6_pcie->link_is_up) imx6_pcie_start_link(imx6_pcie->pci); return 0; } static const struct dev_pm_ops imx6_pcie_pm_ops = { NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq, imx6_pcie_resume_noirq) }; static int imx6_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dw_pcie *pci; struct imx6_pcie *imx6_pcie; struct device_node *np; struct resource *dbi_base; struct device_node *node = dev->of_node; int ret; u16 val; imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL); if (!imx6_pcie) return -ENOMEM; pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); if (!pci) return -ENOMEM; pci->dev = dev; pci->ops = &dw_pcie_ops; pci->pp.ops = &imx6_pcie_host_ops; imx6_pcie->pci = pci; imx6_pcie->drvdata = of_device_get_match_data(dev); /* Find the PHY if one is defined, only imx7d uses it */ np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0); if (np) { struct resource res; ret = of_address_to_resource(np, 0, &res); if (ret) { dev_err(dev, "Unable to map PCIe PHY\n"); return ret; } imx6_pcie->phy_base = devm_ioremap_resource(dev, &res); if (IS_ERR(imx6_pcie->phy_base)) return PTR_ERR(imx6_pcie->phy_base); } pci->dbi_base = devm_platform_get_and_ioremap_resource(pdev, 0, &dbi_base); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); /* Fetch GPIOs */ imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0); imx6_pcie->gpio_active_high = of_property_read_bool(node, "reset-gpio-active-high"); if (gpio_is_valid(imx6_pcie->reset_gpio)) { ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio, imx6_pcie->gpio_active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, "PCIe reset"); if (ret) { dev_err(dev, "unable to get reset gpio\n"); return ret; } } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) { return imx6_pcie->reset_gpio; } /* Fetch clocks */ imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus"); if (IS_ERR(imx6_pcie->pcie_bus)) return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus), "pcie_bus clock source missing or invalid\n"); imx6_pcie->pcie = devm_clk_get(dev, "pcie"); if (IS_ERR(imx6_pcie->pcie)) return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie), "pcie clock source missing or invalid\n"); switch (imx6_pcie->drvdata->variant) { case IMX6SX: imx6_pcie->pcie_inbound_axi = devm_clk_get(dev, "pcie_inbound_axi"); if (IS_ERR(imx6_pcie->pcie_inbound_axi)) return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_inbound_axi), "pcie_inbound_axi clock missing or invalid\n"); break; case IMX8MQ: case IMX8MQ_EP: imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux"); if (IS_ERR(imx6_pcie->pcie_aux)) return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux), "pcie_aux clock source missing or invalid\n"); fallthrough; case IMX7D: if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) imx6_pcie->controller_id = 1; imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy"); if (IS_ERR(imx6_pcie->pciephy_reset)) { dev_err(dev, "Failed to get PCIEPHY reset control\n"); return PTR_ERR(imx6_pcie->pciephy_reset); } imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps"); if (IS_ERR(imx6_pcie->apps_reset)) { dev_err(dev, "Failed to get PCIE APPS reset control\n"); return PTR_ERR(imx6_pcie->apps_reset); } break; case IMX8MM: case IMX8MM_EP: case IMX8MP: case IMX8MP_EP: imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux"); if (IS_ERR(imx6_pcie->pcie_aux)) return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux), "pcie_aux clock source missing or invalid\n"); imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps"); if (IS_ERR(imx6_pcie->apps_reset)) return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset), "failed to get pcie apps reset control\n"); imx6_pcie->phy = devm_phy_get(dev, "pcie-phy"); if (IS_ERR(imx6_pcie->phy)) return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy), "failed to get pcie phy\n"); break; default: break; } /* Don't fetch the pcie_phy clock, if it has abstract PHY driver */ if (imx6_pcie->phy == NULL) { imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy"); if (IS_ERR(imx6_pcie->pcie_phy)) return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy), "pcie_phy clock source missing or invalid\n"); } /* Grab turnoff reset */ imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff"); if (IS_ERR(imx6_pcie->turnoff_reset)) { dev_err(dev, "Failed to get TURNOFF reset control\n"); return PTR_ERR(imx6_pcie->turnoff_reset); } /* Grab GPR config register range */ imx6_pcie->iomuxc_gpr = syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr); if (IS_ERR(imx6_pcie->iomuxc_gpr)) { dev_err(dev, "unable to find iomuxc registers\n"); return PTR_ERR(imx6_pcie->iomuxc_gpr); } /* Grab PCIe PHY Tx Settings */ if (of_property_read_u32(node, "fsl,tx-deemph-gen1", &imx6_pcie->tx_deemph_gen1)) imx6_pcie->tx_deemph_gen1 = 0; if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db", &imx6_pcie->tx_deemph_gen2_3p5db)) imx6_pcie->tx_deemph_gen2_3p5db = 0; if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db", &imx6_pcie->tx_deemph_gen2_6db)) imx6_pcie->tx_deemph_gen2_6db = 20; if (of_property_read_u32(node, "fsl,tx-swing-full", &imx6_pcie->tx_swing_full)) imx6_pcie->tx_swing_full = 127; if (of_property_read_u32(node, "fsl,tx-swing-low", &imx6_pcie->tx_swing_low)) imx6_pcie->tx_swing_low = 127; /* Limit link speed */ pci->link_gen = 1; of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen); imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); if (IS_ERR(imx6_pcie->vpcie)) { if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV) return PTR_ERR(imx6_pcie->vpcie); imx6_pcie->vpcie = NULL; } imx6_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph"); if (IS_ERR(imx6_pcie->vph)) { if (PTR_ERR(imx6_pcie->vph) != -ENODEV) return PTR_ERR(imx6_pcie->vph); imx6_pcie->vph = NULL; } platform_set_drvdata(pdev, imx6_pcie); ret = imx6_pcie_attach_pd(dev); if (ret) return ret; if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE) { ret = imx6_add_pcie_ep(imx6_pcie, pdev); if (ret < 0) return ret; } else { ret = dw_pcie_host_init(&pci->pp); if (ret < 0) return ret; if (pci_msi_enabled()) { u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS); val |= PCI_MSI_FLAGS_ENABLE; dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val); } } return 0; } static void imx6_pcie_shutdown(struct platform_device *pdev) { struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev); /* bring down link, so bootloader gets clean state in case of reboot */ imx6_pcie_assert_core_reset(imx6_pcie); } static const struct imx6_pcie_drvdata drvdata[] = { [IMX6Q] = { .variant = IMX6Q, .flags = IMX6_PCIE_FLAG_IMX6_PHY | IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE, .dbi_length = 0x200, .gpr = "fsl,imx6q-iomuxc-gpr", }, [IMX6SX] = { .variant = IMX6SX, .flags = IMX6_PCIE_FLAG_IMX6_PHY | IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE | IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, .gpr = "fsl,imx6q-iomuxc-gpr", }, [IMX6QP] = { .variant = IMX6QP, .flags = IMX6_PCIE_FLAG_IMX6_PHY | IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE | IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, .dbi_length = 0x200, .gpr = "fsl,imx6q-iomuxc-gpr", }, [IMX7D] = { .variant = IMX7D, .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, .gpr = "fsl,imx7d-iomuxc-gpr", }, [IMX8MQ] = { .variant = IMX8MQ, .gpr = "fsl,imx8mq-iomuxc-gpr", }, [IMX8MM] = { .variant = IMX8MM, .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, .gpr = "fsl,imx8mm-iomuxc-gpr", }, [IMX8MP] = { .variant = IMX8MP, .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, .gpr = "fsl,imx8mp-iomuxc-gpr", }, [IMX8MQ_EP] = { .variant = IMX8MQ_EP, .mode = DW_PCIE_EP_TYPE, .gpr = "fsl,imx8mq-iomuxc-gpr", }, [IMX8MM_EP] = { .variant = IMX8MM_EP, .mode = DW_PCIE_EP_TYPE, .gpr = "fsl,imx8mm-iomuxc-gpr", }, [IMX8MP_EP] = { .variant = IMX8MP_EP, .mode = DW_PCIE_EP_TYPE, .gpr = "fsl,imx8mp-iomuxc-gpr", }, }; static const struct of_device_id imx6_pcie_of_match[] = { { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], }, { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], }, { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], }, { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], }, { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], }, { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], }, { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], }, { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], }, { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], }, { .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], }, {}, }; static struct platform_driver imx6_pcie_driver = { .driver = { .name = "imx6q-pcie", .of_match_table = imx6_pcie_of_match, .suppress_bind_attrs = true, .pm = &imx6_pcie_pm_ops, .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, .probe = imx6_pcie_probe, .shutdown = imx6_pcie_shutdown, }; static void imx6_pcie_quirk(struct pci_dev *dev) { struct pci_bus *bus = dev->bus; struct dw_pcie_rp *pp = bus->sysdata; /* Bus parent is the PCI bridge, its parent is this platform driver */ if (!bus->dev.parent || !bus->dev.parent->parent) return; /* Make sure we only quirk devices associated with this driver */ if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver) return; if (pci_is_root_bus(bus)) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); /* * Limit config length to avoid the kernel reading beyond * the register set and causing an abort on i.MX 6Quad */ if (imx6_pcie->drvdata->dbi_length) { dev->cfg_size = imx6_pcie->drvdata->dbi_length; dev_info(&dev->dev, "Limiting cfg_size to %d\n", dev->cfg_size); } } } DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd, PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk); static int __init imx6_pcie_init(void) { #ifdef CONFIG_ARM struct device_node *np; np = of_find_matching_node(NULL, imx6_pcie_of_match); if (!np) return -ENODEV; of_node_put(np); /* * Since probe() can be deferred we need to make sure that * hook_fault_code is not called after __init memory is freed * by kernel and since imx6q_pcie_abort_handler() is a no-op, * we can install the handler here without risking it * accessing some uninitialized driver state. */ hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, "external abort on non-linefetch"); #endif return platform_driver_register(&imx6_pcie_driver); } device_initcall(imx6_pcie_init);
linux-master
drivers/pci/controller/dwc/pci-imx6.c
// SPDX-License-Identifier: GPL-2.0 /* * Qualcomm PCIe root complex driver * * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. * Copyright 2015 Linaro Limited. * * Author: Stanimir Varbanov <[email protected]> */ #include <linux/clk.h> #include <linux/crc8.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/interconnect.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/pci.h> #include <linux/pm_runtime.h> #include <linux/platform_device.h> #include <linux/phy/pcie.h> #include <linux/phy/phy.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> #include <linux/slab.h> #include <linux/types.h> #include "../../pci.h" #include "pcie-designware.h" /* PARF registers */ #define PARF_SYS_CTRL 0x00 #define PARF_PM_CTRL 0x20 #define PARF_PCS_DEEMPH 0x34 #define PARF_PCS_SWING 0x38 #define PARF_PHY_CTRL 0x40 #define PARF_PHY_REFCLK 0x4c #define PARF_CONFIG_BITS 0x50 #define PARF_DBI_BASE_ADDR 0x168 #define PARF_SLV_ADDR_SPACE_SIZE_2_3_3 0x16c /* Register offset specific to IP ver 2.3.3 */ #define PARF_MHI_CLOCK_RESET_CTRL 0x174 #define PARF_AXI_MSTR_WR_ADDR_HALT 0x178 #define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8 #define PARF_Q2A_FLUSH 0x1ac #define PARF_LTSSM 0x1b0 #define PARF_SID_OFFSET 0x234 #define PARF_BDF_TRANSLATE_CFG 0x24c #define PARF_SLV_ADDR_SPACE_SIZE 0x358 #define PARF_DEVICE_TYPE 0x1000 #define PARF_BDF_TO_SID_TABLE_N 0x2000 /* ELBI registers */ #define ELBI_SYS_CTRL 0x04 /* DBI registers */ #define AXI_MSTR_RESP_COMP_CTRL0 0x818 #define AXI_MSTR_RESP_COMP_CTRL1 0x81c /* MHI registers */ #define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04 #define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c #define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88 /* PARF_SYS_CTRL register fields */ #define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29) #define MST_WAKEUP_EN BIT(13) #define SLV_WAKEUP_EN BIT(12) #define MSTR_ACLK_CGC_DIS BIT(10) #define SLV_ACLK_CGC_DIS BIT(9) #define CORE_CLK_CGC_DIS BIT(6) #define AUX_PWR_DET BIT(4) #define L23_CLK_RMV_DIS BIT(2) #define L1_CLK_RMV_DIS BIT(1) /* PARF_PM_CTRL register fields */ #define REQ_NOT_ENTR_L1 BIT(5) /* PARF_PCS_DEEMPH register fields */ #define PCS_DEEMPH_TX_DEEMPH_GEN1(x) FIELD_PREP(GENMASK(21, 16), x) #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) FIELD_PREP(GENMASK(13, 8), x) #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) FIELD_PREP(GENMASK(5, 0), x) /* PARF_PCS_SWING register fields */ #define PCS_SWING_TX_SWING_FULL(x) FIELD_PREP(GENMASK(14, 8), x) #define PCS_SWING_TX_SWING_LOW(x) FIELD_PREP(GENMASK(6, 0), x) /* PARF_PHY_CTRL register fields */ #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) FIELD_PREP(PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, x) #define PHY_TEST_PWR_DOWN BIT(0) /* PARF_PHY_REFCLK register fields */ #define PHY_REFCLK_SSP_EN BIT(16) #define PHY_REFCLK_USE_PAD BIT(12) /* PARF_CONFIG_BITS register fields */ #define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x) /* PARF_SLV_ADDR_SPACE_SIZE register value */ #define SLV_ADDR_SPACE_SZ 0x10000000 /* PARF_MHI_CLOCK_RESET_CTRL register fields */ #define AHB_CLK_EN BIT(0) #define MSTR_AXI_CLK_EN BIT(1) #define BYPASS BIT(4) /* PARF_AXI_MSTR_WR_ADDR_HALT register fields */ #define EN BIT(31) /* PARF_LTSSM register fields */ #define LTSSM_EN BIT(8) /* PARF_DEVICE_TYPE register fields */ #define DEVICE_TYPE_RC 0x4 /* ELBI_SYS_CTRL register fields */ #define ELBI_SYS_CTRL_LT_ENABLE BIT(0) /* AXI_MSTR_RESP_COMP_CTRL0 register fields */ #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 /* AXI_MSTR_RESP_COMP_CTRL1 register fields */ #define CFG_BRIDGE_SB_INIT BIT(0) /* PCI_EXP_SLTCAP register fields */ #define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250) #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1) #define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \ PCI_EXP_SLTCAP_PCP | \ PCI_EXP_SLTCAP_MRLSP | \ PCI_EXP_SLTCAP_AIP | \ PCI_EXP_SLTCAP_PIP | \ PCI_EXP_SLTCAP_HPS | \ PCI_EXP_SLTCAP_EIP | \ PCIE_CAP_SLOT_POWER_LIMIT_VAL | \ PCIE_CAP_SLOT_POWER_LIMIT_SCALE) #define PERST_DELAY_US 1000 #define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0)) #define QCOM_PCIE_1_0_0_MAX_CLOCKS 4 struct qcom_pcie_resources_1_0_0 { struct clk_bulk_data clks[QCOM_PCIE_1_0_0_MAX_CLOCKS]; struct reset_control *core; struct regulator *vdda; }; #define QCOM_PCIE_2_1_0_MAX_CLOCKS 5 #define QCOM_PCIE_2_1_0_MAX_RESETS 6 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 struct qcom_pcie_resources_2_1_0 { struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS]; struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS]; int num_resets; struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; }; #define QCOM_PCIE_2_3_2_MAX_CLOCKS 4 #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 struct qcom_pcie_resources_2_3_2 { struct clk_bulk_data clks[QCOM_PCIE_2_3_2_MAX_CLOCKS]; struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; }; #define QCOM_PCIE_2_3_3_MAX_CLOCKS 5 #define QCOM_PCIE_2_3_3_MAX_RESETS 7 struct qcom_pcie_resources_2_3_3 { struct clk_bulk_data clks[QCOM_PCIE_2_3_3_MAX_CLOCKS]; struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS]; }; #define QCOM_PCIE_2_4_0_MAX_CLOCKS 4 #define QCOM_PCIE_2_4_0_MAX_RESETS 12 struct qcom_pcie_resources_2_4_0 { struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS]; int num_clks; struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS]; int num_resets; }; #define QCOM_PCIE_2_7_0_MAX_CLOCKS 15 #define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2 struct qcom_pcie_resources_2_7_0 { struct clk_bulk_data clks[QCOM_PCIE_2_7_0_MAX_CLOCKS]; int num_clks; struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES]; struct reset_control *rst; }; #define QCOM_PCIE_2_9_0_MAX_CLOCKS 5 struct qcom_pcie_resources_2_9_0 { struct clk_bulk_data clks[QCOM_PCIE_2_9_0_MAX_CLOCKS]; struct reset_control *rst; }; union qcom_pcie_resources { struct qcom_pcie_resources_1_0_0 v1_0_0; struct qcom_pcie_resources_2_1_0 v2_1_0; struct qcom_pcie_resources_2_3_2 v2_3_2; struct qcom_pcie_resources_2_3_3 v2_3_3; struct qcom_pcie_resources_2_4_0 v2_4_0; struct qcom_pcie_resources_2_7_0 v2_7_0; struct qcom_pcie_resources_2_9_0 v2_9_0; }; struct qcom_pcie; struct qcom_pcie_ops { int (*get_resources)(struct qcom_pcie *pcie); int (*init)(struct qcom_pcie *pcie); int (*post_init)(struct qcom_pcie *pcie); void (*deinit)(struct qcom_pcie *pcie); void (*ltssm_enable)(struct qcom_pcie *pcie); int (*config_sid)(struct qcom_pcie *pcie); }; struct qcom_pcie_cfg { const struct qcom_pcie_ops *ops; }; struct qcom_pcie { struct dw_pcie *pci; void __iomem *parf; /* DT parf */ void __iomem *elbi; /* DT elbi */ void __iomem *mhi; union qcom_pcie_resources res; struct phy *phy; struct gpio_desc *reset; struct icc_path *icc_mem; const struct qcom_pcie_cfg *cfg; struct dentry *debugfs; bool suspended; }; #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) static void qcom_ep_reset_assert(struct qcom_pcie *pcie) { gpiod_set_value_cansleep(pcie->reset, 1); usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); } static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) { /* Ensure that PERST has been asserted for at least 100 ms */ msleep(100); gpiod_set_value_cansleep(pcie->reset, 0); usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); } static int qcom_pcie_start_link(struct dw_pcie *pci) { struct qcom_pcie *pcie = to_qcom_pcie(pci); /* Enable Link Training state machine */ if (pcie->cfg->ops->ltssm_enable) pcie->cfg->ops->ltssm_enable(pcie); return 0; } static void qcom_pcie_clear_hpc(struct dw_pcie *pci) { u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); u32 val; dw_pcie_dbi_ro_wr_en(pci); val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP); val &= ~PCI_EXP_SLTCAP_HPC; writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP); dw_pcie_dbi_ro_wr_dis(pci); } static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) { u32 val; /* enable link training */ val = readl(pcie->elbi + ELBI_SYS_CTRL); val |= ELBI_SYS_CTRL_LT_ENABLE; writel(val, pcie->elbi + ELBI_SYS_CTRL); } static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064"); int ret; res->supplies[0].supply = "vdda"; res->supplies[1].supply = "vdda_phy"; res->supplies[2].supply = "vdda_refclk"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), res->supplies); if (ret) return ret; res->clks[0].id = "iface"; res->clks[1].id = "core"; res->clks[2].id = "phy"; res->clks[3].id = "aux"; res->clks[4].id = "ref"; /* iface, core, phy are required */ ret = devm_clk_bulk_get(dev, 3, res->clks); if (ret < 0) return ret; /* aux, ref are optional */ ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3); if (ret < 0) return ret; res->resets[0].id = "pci"; res->resets[1].id = "axi"; res->resets[2].id = "ahb"; res->resets[3].id = "por"; res->resets[4].id = "phy"; res->resets[5].id = "ext"; /* ext is optional on APQ8016 */ res->num_resets = is_apq ? 5 : 6; ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); if (ret < 0) return ret; return 0; } static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); reset_control_bulk_assert(res->num_resets, res->resets); writel(1, pcie->parf + PARF_PHY_CTRL); regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); } static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; int ret; /* reset the PCIe interface as uboot can leave it undefined state */ ret = reset_control_bulk_assert(res->num_resets, res->resets); if (ret < 0) { dev_err(dev, "cannot assert resets\n"); return ret; } ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); if (ret < 0) { dev_err(dev, "cannot enable regulators\n"); return ret; } ret = reset_control_bulk_deassert(res->num_resets, res->resets); if (ret < 0) { dev_err(dev, "cannot deassert resets\n"); regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); return ret; } return 0; } static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; struct device_node *node = dev->of_node; u32 val; int ret; /* enable PCIe clocks and resets */ val = readl(pcie->parf + PARF_PHY_CTRL); val &= ~PHY_TEST_PWR_DOWN; writel(val, pcie->parf + PARF_PHY_CTRL); ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); if (ret) return ret; if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), pcie->parf + PARF_PCS_DEEMPH); writel(PCS_SWING_TX_SWING_FULL(120) | PCS_SWING_TX_SWING_LOW(120), pcie->parf + PARF_PCS_SWING); writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS); } if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { /* set TX termination offset */ val = readl(pcie->parf + PARF_PHY_CTRL); val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); writel(val, pcie->parf + PARF_PHY_CTRL); } /* enable external reference clock */ val = readl(pcie->parf + PARF_PHY_REFCLK); /* USE_PAD is required only for ipq806x */ if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) val &= ~PHY_REFCLK_USE_PAD; val |= PHY_REFCLK_SSP_EN; writel(val, pcie->parf + PARF_PHY_REFCLK); /* wait for clock acquisition */ usleep_range(1000, 1500); /* Set the Max TLP size to 2K, instead of using default of 4K */ writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0); writel(CFG_BRIDGE_SB_INIT, pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1); qcom_pcie_clear_hpc(pcie->pci); return 0; } static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; int ret; res->vdda = devm_regulator_get(dev, "vdda"); if (IS_ERR(res->vdda)) return PTR_ERR(res->vdda); res->clks[0].id = "iface"; res->clks[1].id = "aux"; res->clks[2].id = "master_bus"; res->clks[3].id = "slave_bus"; ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); if (ret < 0) return ret; res->core = devm_reset_control_get_exclusive(dev, "core"); return PTR_ERR_OR_ZERO(res->core); } static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; reset_control_assert(res->core); clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); regulator_disable(res->vdda); } static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; int ret; ret = reset_control_deassert(res->core); if (ret) { dev_err(dev, "cannot deassert core reset\n"); return ret; } ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); if (ret) { dev_err(dev, "cannot prepare/enable clocks\n"); goto err_assert_reset; } ret = regulator_enable(res->vdda); if (ret) { dev_err(dev, "cannot enable vdda regulator\n"); goto err_disable_clks; } return 0; err_disable_clks: clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); err_assert_reset: reset_control_assert(res->core); return ret; } static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie) { /* change DBI base address */ writel(0, pcie->parf + PARF_DBI_BASE_ADDR); if (IS_ENABLED(CONFIG_PCI_MSI)) { u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); val |= EN; writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); } qcom_pcie_clear_hpc(pcie->pci); return 0; } static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) { u32 val; /* enable link training */ val = readl(pcie->parf + PARF_LTSSM); val |= LTSSM_EN; writel(val, pcie->parf + PARF_LTSSM); } static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; int ret; res->supplies[0].supply = "vdda"; res->supplies[1].supply = "vddpe-3v3"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), res->supplies); if (ret) return ret; res->clks[0].id = "aux"; res->clks[1].id = "cfg"; res->clks[2].id = "bus_master"; res->clks[3].id = "bus_slave"; ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); if (ret < 0) return ret; return 0; } static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); } static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; int ret; ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); if (ret < 0) { dev_err(dev, "cannot enable regulators\n"); return ret; } ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); if (ret) { dev_err(dev, "cannot prepare/enable clocks\n"); regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); return ret; } return 0; } static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) { u32 val; /* enable PCIe clocks and resets */ val = readl(pcie->parf + PARF_PHY_CTRL); val &= ~PHY_TEST_PWR_DOWN; writel(val, pcie->parf + PARF_PHY_CTRL); /* change DBI base address */ writel(0, pcie->parf + PARF_DBI_BASE_ADDR); /* MAC PHY_POWERDOWN MUX DISABLE */ val = readl(pcie->parf + PARF_SYS_CTRL); val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; writel(val, pcie->parf + PARF_SYS_CTRL); val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); val |= BYPASS; writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); val |= EN; writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); qcom_pcie_clear_hpc(pcie->pci); return 0; } static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); int ret; res->clks[0].id = "aux"; res->clks[1].id = "master_bus"; res->clks[2].id = "slave_bus"; res->clks[3].id = "iface"; /* qcom,pcie-ipq4019 is defined without "iface" */ res->num_clks = is_ipq ? 3 : 4; ret = devm_clk_bulk_get(dev, res->num_clks, res->clks); if (ret < 0) return ret; res->resets[0].id = "axi_m"; res->resets[1].id = "axi_s"; res->resets[2].id = "axi_m_sticky"; res->resets[3].id = "pipe_sticky"; res->resets[4].id = "pwr"; res->resets[5].id = "ahb"; res->resets[6].id = "pipe"; res->resets[7].id = "axi_m_vmid"; res->resets[8].id = "axi_s_xpu"; res->resets[9].id = "parf"; res->resets[10].id = "phy"; res->resets[11].id = "phy_ahb"; res->num_resets = is_ipq ? 12 : 6; ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); if (ret < 0) return ret; return 0; } static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; reset_control_bulk_assert(res->num_resets, res->resets); clk_bulk_disable_unprepare(res->num_clks, res->clks); } static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; int ret; ret = reset_control_bulk_assert(res->num_resets, res->resets); if (ret < 0) { dev_err(dev, "cannot assert resets\n"); return ret; } usleep_range(10000, 12000); ret = reset_control_bulk_deassert(res->num_resets, res->resets); if (ret < 0) { dev_err(dev, "cannot deassert resets\n"); return ret; } usleep_range(10000, 12000); ret = clk_bulk_prepare_enable(res->num_clks, res->clks); if (ret) { reset_control_bulk_assert(res->num_resets, res->resets); return ret; } return 0; } static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; int ret; res->clks[0].id = "iface"; res->clks[1].id = "axi_m"; res->clks[2].id = "axi_s"; res->clks[3].id = "ahb"; res->clks[4].id = "aux"; ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); if (ret < 0) return ret; res->rst[0].id = "axi_m"; res->rst[1].id = "axi_s"; res->rst[2].id = "pipe"; res->rst[3].id = "axi_m_sticky"; res->rst[4].id = "sticky"; res->rst[5].id = "ahb"; res->rst[6].id = "sleep"; ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst); if (ret < 0) return ret; return 0; } static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); } static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; int ret; ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); if (ret < 0) { dev_err(dev, "cannot assert resets\n"); return ret; } usleep_range(2000, 2500); ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst); if (ret < 0) { dev_err(dev, "cannot deassert resets\n"); return ret; } /* * Don't have a way to see if the reset has completed. * Wait for some time. */ usleep_range(2000, 2500); ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); if (ret) { dev_err(dev, "cannot prepare/enable clocks\n"); goto err_assert_resets; } return 0; err_assert_resets: /* * Not checking for failure, will anyway return * the original failure in 'ret'. */ reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); return ret; } static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) { struct dw_pcie *pci = pcie->pci; u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); u32 val; writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_2_3_3); val = readl(pcie->parf + PARF_PHY_CTRL); val &= ~PHY_TEST_PWR_DOWN; writel(val, pcie->parf + PARF_PHY_CTRL); writel(0, pcie->parf + PARF_DBI_BASE_ADDR); writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, pcie->parf + PARF_SYS_CTRL); writel(0, pcie->parf + PARF_Q2A_FLUSH); writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); dw_pcie_dbi_ro_wr_en(pci); writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); val &= ~PCI_EXP_LNKCAP_ASPMS; writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + PCI_EXP_DEVCTL2); dw_pcie_dbi_ro_wr_dis(pci); return 0; } static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; unsigned int num_clks, num_opt_clks; unsigned int idx; int ret; res->rst = devm_reset_control_array_get_exclusive(dev); if (IS_ERR(res->rst)) return PTR_ERR(res->rst); res->supplies[0].supply = "vdda"; res->supplies[1].supply = "vddpe-3v3"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), res->supplies); if (ret) return ret; idx = 0; res->clks[idx++].id = "aux"; res->clks[idx++].id = "cfg"; res->clks[idx++].id = "bus_master"; res->clks[idx++].id = "bus_slave"; res->clks[idx++].id = "slave_q2a"; num_clks = idx; ret = devm_clk_bulk_get(dev, num_clks, res->clks); if (ret < 0) return ret; res->clks[idx++].id = "tbu"; res->clks[idx++].id = "ddrss_sf_tbu"; res->clks[idx++].id = "aggre0"; res->clks[idx++].id = "aggre1"; res->clks[idx++].id = "noc_aggr"; res->clks[idx++].id = "noc_aggr_4"; res->clks[idx++].id = "noc_aggr_south_sf"; res->clks[idx++].id = "cnoc_qx"; res->clks[idx++].id = "sleep"; res->clks[idx++].id = "cnoc_sf_axi"; num_opt_clks = idx - num_clks; res->num_clks = idx; ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks); if (ret < 0) return ret; return 0; } static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; u32 val; int ret; ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); if (ret < 0) { dev_err(dev, "cannot enable regulators\n"); return ret; } ret = clk_bulk_prepare_enable(res->num_clks, res->clks); if (ret < 0) goto err_disable_regulators; ret = reset_control_assert(res->rst); if (ret) { dev_err(dev, "reset assert failed (%d)\n", ret); goto err_disable_clocks; } usleep_range(1000, 1500); ret = reset_control_deassert(res->rst); if (ret) { dev_err(dev, "reset deassert failed (%d)\n", ret); goto err_disable_clocks; } /* Wait for reset to complete, required on SM8450 */ usleep_range(1000, 1500); /* configure PCIe to RC mode */ writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); /* enable PCIe clocks and resets */ val = readl(pcie->parf + PARF_PHY_CTRL); val &= ~PHY_TEST_PWR_DOWN; writel(val, pcie->parf + PARF_PHY_CTRL); /* change DBI base address */ writel(0, pcie->parf + PARF_DBI_BASE_ADDR); /* MAC PHY_POWERDOWN MUX DISABLE */ val = readl(pcie->parf + PARF_SYS_CTRL); val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; writel(val, pcie->parf + PARF_SYS_CTRL); val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); val |= BYPASS; writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); /* Enable L1 and L1SS */ val = readl(pcie->parf + PARF_PM_CTRL); val &= ~REQ_NOT_ENTR_L1; writel(val, pcie->parf + PARF_PM_CTRL); val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); val |= EN; writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); return 0; err_disable_clocks: clk_bulk_disable_unprepare(res->num_clks, res->clks); err_disable_regulators: regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); return ret; } static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) { qcom_pcie_clear_hpc(pcie->pci); return 0; } static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; clk_bulk_disable_unprepare(res->num_clks, res->clks); regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); } static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie) { /* iommu map structure */ struct { u32 bdf; u32 phandle; u32 smmu_sid; u32 smmu_sid_len; } *map; void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N; struct device *dev = pcie->pci->dev; u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; int i, nr_map, size = 0; u32 smmu_sid_base; of_get_property(dev->of_node, "iommu-map", &size); if (!size) return 0; map = kzalloc(size, GFP_KERNEL); if (!map) return -ENOMEM; of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map, size / sizeof(u32)); nr_map = size / (sizeof(*map)); crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); /* Registers need to be zero out first */ memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); /* Extract the SMMU SID base from the first entry of iommu-map */ smmu_sid_base = map[0].smmu_sid; /* Look for an available entry to hold the mapping */ for (i = 0; i < nr_map; i++) { __be16 bdf_be = cpu_to_be16(map[i].bdf); u32 val; u8 hash; hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0); val = readl(bdf_to_sid_base + hash * sizeof(u32)); /* If the register is already populated, look for next available entry */ while (val) { u8 current_hash = hash++; u8 next_mask = 0xff; /* If NEXT field is NULL then update it with next hash */ if (!(val & next_mask)) { val |= (u32)hash; writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); } val = readl(bdf_to_sid_base + hash * sizeof(u32)); } /* BDF [31:16] | SID [15:8] | NEXT [7:0] */ val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; writel(val, bdf_to_sid_base + hash * sizeof(u32)); } kfree(map); return 0; } static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; int ret; res->clks[0].id = "iface"; res->clks[1].id = "axi_m"; res->clks[2].id = "axi_s"; res->clks[3].id = "axi_bridge"; res->clks[4].id = "rchng"; ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); if (ret < 0) return ret; res->rst = devm_reset_control_array_get_exclusive(dev); if (IS_ERR(res->rst)) return PTR_ERR(res->rst); return 0; } static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); } static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; struct device *dev = pcie->pci->dev; int ret; ret = reset_control_assert(res->rst); if (ret) { dev_err(dev, "reset assert failed (%d)\n", ret); return ret; } /* * Delay periods before and after reset deassert are working values * from downstream Codeaurora kernel */ usleep_range(2000, 2500); ret = reset_control_deassert(res->rst); if (ret) { dev_err(dev, "reset deassert failed (%d)\n", ret); return ret; } usleep_range(2000, 2500); return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); } static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) { struct dw_pcie *pci = pcie->pci; u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); u32 val; int i; writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE); val = readl(pcie->parf + PARF_PHY_CTRL); val &= ~PHY_TEST_PWR_DOWN; writel(val, pcie->parf + PARF_PHY_CTRL); writel(0, pcie->parf + PARF_DBI_BASE_ADDR); writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS | GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL, pci->dbi_base + GEN3_RELATED_OFF); writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, pcie->parf + PARF_SYS_CTRL); writel(0, pcie->parf + PARF_Q2A_FLUSH); dw_pcie_dbi_ro_wr_en(pci); writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); val &= ~PCI_EXP_LNKCAP_ASPMS; writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + PCI_EXP_DEVCTL2); dw_pcie_dbi_ro_wr_dis(pci); for (i = 0; i < 256; i++) writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i)); return 0; } static int qcom_pcie_link_up(struct dw_pcie *pci) { u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); return !!(val & PCI_EXP_LNKSTA_DLLLA); } static int qcom_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct qcom_pcie *pcie = to_qcom_pcie(pci); int ret; qcom_ep_reset_assert(pcie); ret = pcie->cfg->ops->init(pcie); if (ret) return ret; ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); if (ret) goto err_deinit; ret = phy_power_on(pcie->phy); if (ret) goto err_deinit; if (pcie->cfg->ops->post_init) { ret = pcie->cfg->ops->post_init(pcie); if (ret) goto err_disable_phy; } qcom_ep_reset_deassert(pcie); if (pcie->cfg->ops->config_sid) { ret = pcie->cfg->ops->config_sid(pcie); if (ret) goto err_assert_reset; } return 0; err_assert_reset: qcom_ep_reset_assert(pcie); err_disable_phy: phy_power_off(pcie->phy); err_deinit: pcie->cfg->ops->deinit(pcie); return ret; } static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct qcom_pcie *pcie = to_qcom_pcie(pci); qcom_ep_reset_assert(pcie); phy_power_off(pcie->phy); pcie->cfg->ops->deinit(pcie); } static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { .host_init = qcom_pcie_host_init, .host_deinit = qcom_pcie_host_deinit, }; /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ static const struct qcom_pcie_ops ops_2_1_0 = { .get_resources = qcom_pcie_get_resources_2_1_0, .init = qcom_pcie_init_2_1_0, .post_init = qcom_pcie_post_init_2_1_0, .deinit = qcom_pcie_deinit_2_1_0, .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, }; /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ static const struct qcom_pcie_ops ops_1_0_0 = { .get_resources = qcom_pcie_get_resources_1_0_0, .init = qcom_pcie_init_1_0_0, .post_init = qcom_pcie_post_init_1_0_0, .deinit = qcom_pcie_deinit_1_0_0, .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, }; /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ static const struct qcom_pcie_ops ops_2_3_2 = { .get_resources = qcom_pcie_get_resources_2_3_2, .init = qcom_pcie_init_2_3_2, .post_init = qcom_pcie_post_init_2_3_2, .deinit = qcom_pcie_deinit_2_3_2, .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, }; /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ static const struct qcom_pcie_ops ops_2_4_0 = { .get_resources = qcom_pcie_get_resources_2_4_0, .init = qcom_pcie_init_2_4_0, .post_init = qcom_pcie_post_init_2_3_2, .deinit = qcom_pcie_deinit_2_4_0, .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, }; /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ static const struct qcom_pcie_ops ops_2_3_3 = { .get_resources = qcom_pcie_get_resources_2_3_3, .init = qcom_pcie_init_2_3_3, .post_init = qcom_pcie_post_init_2_3_3, .deinit = qcom_pcie_deinit_2_3_3, .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, }; /* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */ static const struct qcom_pcie_ops ops_2_7_0 = { .get_resources = qcom_pcie_get_resources_2_7_0, .init = qcom_pcie_init_2_7_0, .post_init = qcom_pcie_post_init_2_7_0, .deinit = qcom_pcie_deinit_2_7_0, .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, }; /* Qcom IP rev.: 1.9.0 */ static const struct qcom_pcie_ops ops_1_9_0 = { .get_resources = qcom_pcie_get_resources_2_7_0, .init = qcom_pcie_init_2_7_0, .post_init = qcom_pcie_post_init_2_7_0, .deinit = qcom_pcie_deinit_2_7_0, .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, .config_sid = qcom_pcie_config_sid_1_9_0, }; /* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */ static const struct qcom_pcie_ops ops_2_9_0 = { .get_resources = qcom_pcie_get_resources_2_9_0, .init = qcom_pcie_init_2_9_0, .post_init = qcom_pcie_post_init_2_9_0, .deinit = qcom_pcie_deinit_2_9_0, .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, }; static const struct qcom_pcie_cfg cfg_1_0_0 = { .ops = &ops_1_0_0, }; static const struct qcom_pcie_cfg cfg_1_9_0 = { .ops = &ops_1_9_0, }; static const struct qcom_pcie_cfg cfg_2_1_0 = { .ops = &ops_2_1_0, }; static const struct qcom_pcie_cfg cfg_2_3_2 = { .ops = &ops_2_3_2, }; static const struct qcom_pcie_cfg cfg_2_3_3 = { .ops = &ops_2_3_3, }; static const struct qcom_pcie_cfg cfg_2_4_0 = { .ops = &ops_2_4_0, }; static const struct qcom_pcie_cfg cfg_2_7_0 = { .ops = &ops_2_7_0, }; static const struct qcom_pcie_cfg cfg_2_9_0 = { .ops = &ops_2_9_0, }; static const struct dw_pcie_ops dw_pcie_ops = { .link_up = qcom_pcie_link_up, .start_link = qcom_pcie_start_link, }; static int qcom_pcie_icc_init(struct qcom_pcie *pcie) { struct dw_pcie *pci = pcie->pci; int ret; pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem"); if (IS_ERR(pcie->icc_mem)) return PTR_ERR(pcie->icc_mem); /* * Some Qualcomm platforms require interconnect bandwidth constraints * to be set before enabling interconnect clocks. * * Set an initial peak bandwidth corresponding to single-lane Gen 1 * for the pcie-mem path. */ ret = icc_set_bw(pcie->icc_mem, 0, MBps_to_icc(250)); if (ret) { dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n", ret); return ret; } return 0; } static void qcom_pcie_icc_update(struct qcom_pcie *pcie) { struct dw_pcie *pci = pcie->pci; u32 offset, status, bw; int speed, width; int ret; if (!pcie->icc_mem) return; offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); /* Only update constraints if link is up. */ if (!(status & PCI_EXP_LNKSTA_DLLLA)) return; speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status); width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status); switch (speed) { case 1: bw = MBps_to_icc(250); break; case 2: bw = MBps_to_icc(500); break; default: WARN_ON_ONCE(1); fallthrough; case 3: bw = MBps_to_icc(985); break; } ret = icc_set_bw(pcie->icc_mem, 0, width * bw); if (ret) { dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n", ret); } } static int qcom_pcie_link_transition_count(struct seq_file *s, void *data) { struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private); seq_printf(s, "L0s transition count: %u\n", readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S)); seq_printf(s, "L1 transition count: %u\n", readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1)); seq_printf(s, "L1.1 transition count: %u\n", readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1)); seq_printf(s, "L1.2 transition count: %u\n", readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2)); seq_printf(s, "L2 transition count: %u\n", readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2)); return 0; } static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie) { struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; char *name; name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); if (!name) return; pcie->debugfs = debugfs_create_dir(name, NULL); debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs, qcom_pcie_link_transition_count); } static int qcom_pcie_probe(struct platform_device *pdev) { const struct qcom_pcie_cfg *pcie_cfg; struct device *dev = &pdev->dev; struct qcom_pcie *pcie; struct dw_pcie_rp *pp; struct resource *res; struct dw_pcie *pci; int ret; pcie_cfg = of_device_get_match_data(dev); if (!pcie_cfg || !pcie_cfg->ops) { dev_err(dev, "Invalid platform data\n"); return -EINVAL; } pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); if (!pci) return -ENOMEM; pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) goto err_pm_runtime_put; pci->dev = dev; pci->ops = &dw_pcie_ops; pp = &pci->pp; pcie->pci = pci; pcie->cfg = pcie_cfg; pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); if (IS_ERR(pcie->reset)) { ret = PTR_ERR(pcie->reset); goto err_pm_runtime_put; } pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); if (IS_ERR(pcie->parf)) { ret = PTR_ERR(pcie->parf); goto err_pm_runtime_put; } pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi"); if (IS_ERR(pcie->elbi)) { ret = PTR_ERR(pcie->elbi); goto err_pm_runtime_put; } /* MHI region is optional */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi"); if (res) { pcie->mhi = devm_ioremap_resource(dev, res); if (IS_ERR(pcie->mhi)) { ret = PTR_ERR(pcie->mhi); goto err_pm_runtime_put; } } pcie->phy = devm_phy_optional_get(dev, "pciephy"); if (IS_ERR(pcie->phy)) { ret = PTR_ERR(pcie->phy); goto err_pm_runtime_put; } ret = qcom_pcie_icc_init(pcie); if (ret) goto err_pm_runtime_put; ret = pcie->cfg->ops->get_resources(pcie); if (ret) goto err_pm_runtime_put; pp->ops = &qcom_pcie_dw_ops; ret = phy_init(pcie->phy); if (ret) goto err_pm_runtime_put; platform_set_drvdata(pdev, pcie); ret = dw_pcie_host_init(pp); if (ret) { dev_err(dev, "cannot initialize host\n"); goto err_phy_exit; } qcom_pcie_icc_update(pcie); if (pcie->mhi) qcom_pcie_init_debugfs(pcie); return 0; err_phy_exit: phy_exit(pcie->phy); err_pm_runtime_put: pm_runtime_put(dev); pm_runtime_disable(dev); return ret; } static int qcom_pcie_suspend_noirq(struct device *dev) { struct qcom_pcie *pcie = dev_get_drvdata(dev); int ret; /* * Set minimum bandwidth required to keep data path functional during * suspend. */ ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1)); if (ret) { dev_err(dev, "Failed to set interconnect bandwidth: %d\n", ret); return ret; } /* * Turn OFF the resources only for controllers without active PCIe * devices. For controllers with active devices, the resources are kept * ON and the link is expected to be in L0/L1 (sub)states. * * Turning OFF the resources for controllers with active PCIe devices * will trigger access violation during the end of the suspend cycle, * as kernel tries to access the PCIe devices config space for masking * MSIs. * * Also, it is not desirable to put the link into L2/L3 state as that * implies VDD supply will be removed and the devices may go into * powerdown state. This will affect the lifetime of the storage devices * like NVMe. */ if (!dw_pcie_link_up(pcie->pci)) { qcom_pcie_host_deinit(&pcie->pci->pp); pcie->suspended = true; } return 0; } static int qcom_pcie_resume_noirq(struct device *dev) { struct qcom_pcie *pcie = dev_get_drvdata(dev); int ret; if (pcie->suspended) { ret = qcom_pcie_host_init(&pcie->pci->pp); if (ret) return ret; pcie->suspended = false; } qcom_pcie_icc_update(pcie); return 0; } static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 }, { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 }, { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 }, { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 }, { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 }, { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 }, { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 }, { .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 }, { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 }, { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 }, { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 }, { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_9_0}, { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 }, { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 }, { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 }, { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 }, { .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 }, { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 }, { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 }, { .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 }, { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 }, { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 }, { .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 }, { } }; static void qcom_fixup_class(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); static const struct dev_pm_ops qcom_pcie_pm_ops = { NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq) }; static struct platform_driver qcom_pcie_driver = { .probe = qcom_pcie_probe, .driver = { .name = "qcom-pcie", .suppress_bind_attrs = true, .of_match_table = qcom_pcie_match, .pm = &qcom_pcie_pm_ops, .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, }; builtin_platform_driver(qcom_pcie_driver);
linux-master
drivers/pci/controller/dwc/pcie-qcom.c
// SPDX-License-Identifier: GPL-2.0 /* * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs * * Copyright (C) 2013-2014 Texas Instruments Incorporated - https://www.ti.com * * Authors: Kishon Vijay Abraham I <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/of_pci.h> #include <linux/pci.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/resource.h> #include <linux/types.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <linux/gpio/consumer.h> #include "../../pci.h" #include "pcie-designware.h" /* PCIe controller wrapper DRA7XX configuration registers */ #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028 #define ERR_SYS BIT(0) #define ERR_FATAL BIT(1) #define ERR_NONFATAL BIT(2) #define ERR_COR BIT(3) #define ERR_AXI BIT(4) #define ERR_ECRC BIT(5) #define PME_TURN_OFF BIT(8) #define PME_TO_ACK BIT(9) #define PM_PME BIT(10) #define LINK_REQ_RST BIT(11) #define LINK_UP_EVT BIT(12) #define CFG_BME_EVT BIT(13) #define CFG_MSE_EVT BIT(14) #define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \ ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \ LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT) #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038 #define INTA BIT(0) #define INTB BIT(1) #define INTC BIT(2) #define INTD BIT(3) #define MSI BIT(4) #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) #define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100 #define DEVICE_TYPE_EP 0x0 #define DEVICE_TYPE_LEG_EP 0x1 #define DEVICE_TYPE_RC 0x4 #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 #define LTSSM_EN 0x1 #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C #define LINK_UP BIT(16) #define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF #define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124 #define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128 #define PCIECTRL_TI_CONF_MSI_XMT 0x012c #define MSI_REQ_GRANT BIT(0) #define MSI_VECTOR_SHIFT 7 #define PCIE_1LANE_2LANE_SELECTION BIT(13) #define PCIE_B1C0_MODE_SEL BIT(2) #define PCIE_B0_B1_TSYNCEN BIT(0) struct dra7xx_pcie { struct dw_pcie *pci; void __iomem *base; /* DT ti_conf */ int phy_count; /* DT phy-names count */ struct phy **phy; struct irq_domain *irq_domain; struct clk *clk; enum dw_pcie_device_mode mode; }; struct dra7xx_pcie_of_data { enum dw_pcie_device_mode mode; u32 b1co_mode_sel_mask; }; #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset) { return readl(pcie->base + offset); } static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, u32 value) { writel(value, pcie->base + offset); } static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) { return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; } static int dra7xx_pcie_link_up(struct dw_pcie *pci) { struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); return !!(reg & LINK_UP); } static void dra7xx_pcie_stop_link(struct dw_pcie *pci) { struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); u32 reg; reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); reg &= ~LTSSM_EN; dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); } static int dra7xx_pcie_establish_link(struct dw_pcie *pci) { struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); struct device *dev = pci->dev; u32 reg; if (dw_pcie_link_up(pci)) { dev_err(dev, "link is already up\n"); return 0; } reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); reg |= LTSSM_EN; dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); return 0; } static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) { dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, LEG_EP_INTERRUPTS | MSI); dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, MSI | LEG_EP_INTERRUPTS); } static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) { dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, INTERRUPTS); dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, INTERRUPTS); } static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) { dra7xx_pcie_enable_wrapper_interrupts(dra7xx); dra7xx_pcie_enable_msi_interrupts(dra7xx); } static int dra7xx_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); dra7xx_pcie_enable_interrupts(dra7xx); return 0; } static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); irq_set_chip_data(irq, domain->host_data); return 0; } static const struct irq_domain_ops intx_domain_ops = { .map = dra7xx_pcie_intx_map, .xlate = pci_irqd_intx_xlate, }; static int dra7xx_pcie_handle_msi(struct dw_pcie_rp *pp, int index) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); unsigned long val; int pos; val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS + (index * MSI_REG_CTRL_BLOCK_SIZE)); if (!val) return 0; pos = find_first_bit(&val, MAX_MSI_IRQS_PER_CTRL); while (pos != MAX_MSI_IRQS_PER_CTRL) { generic_handle_domain_irq(pp->irq_domain, (index * MAX_MSI_IRQS_PER_CTRL) + pos); pos++; pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos); } return 1; } static void dra7xx_pcie_handle_msi_irq(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); int ret, i, count, num_ctrls; num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; /** * Need to make sure all MSI status bits read 0 before exiting. * Else, new MSI IRQs are not registered by the wrapper. Have an * upperbound for the loop and exit the IRQ in case of IRQ flood * to avoid locking up system in interrupt context. */ count = 0; do { ret = 0; for (i = 0; i < num_ctrls; i++) ret |= dra7xx_pcie_handle_msi(pp, i); count++; } while (ret && count <= 1000); if (count > 1000) dev_warn_ratelimited(pci->dev, "Too many MSI IRQs to handle\n"); } static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct dra7xx_pcie *dra7xx; struct dw_pcie_rp *pp; struct dw_pcie *pci; unsigned long reg; u32 bit; chained_irq_enter(chip, desc); pp = irq_desc_get_handler_data(desc); pci = to_dw_pcie_from_pp(pp); dra7xx = to_dra7xx_pcie(pci); reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); switch (reg) { case MSI: dra7xx_pcie_handle_msi_irq(pp); break; case INTA: case INTB: case INTC: case INTD: for_each_set_bit(bit, &reg, PCI_NUM_INTX) generic_handle_domain_irq(dra7xx->irq_domain, bit); break; } chained_irq_exit(chip, desc); } static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) { struct dra7xx_pcie *dra7xx = arg; struct dw_pcie *pci = dra7xx->pci; struct device *dev = pci->dev; struct dw_pcie_ep *ep = &pci->ep; u32 reg; reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); if (reg & ERR_SYS) dev_dbg(dev, "System Error\n"); if (reg & ERR_FATAL) dev_dbg(dev, "Fatal Error\n"); if (reg & ERR_NONFATAL) dev_dbg(dev, "Non Fatal Error\n"); if (reg & ERR_COR) dev_dbg(dev, "Correctable Error\n"); if (reg & ERR_AXI) dev_dbg(dev, "AXI tag lookup fatal Error\n"); if (reg & ERR_ECRC) dev_dbg(dev, "ECRC Error\n"); if (reg & PME_TURN_OFF) dev_dbg(dev, "Power Management Event Turn-Off message received\n"); if (reg & PME_TO_ACK) dev_dbg(dev, "Power Management Turn-Off Ack message received\n"); if (reg & PM_PME) dev_dbg(dev, "PM Power Management Event message received\n"); if (reg & LINK_REQ_RST) dev_dbg(dev, "Link Request Reset\n"); if (reg & LINK_UP_EVT) { if (dra7xx->mode == DW_PCIE_EP_TYPE) dw_pcie_ep_linkup(ep); dev_dbg(dev, "Link-up state change\n"); } if (reg & CFG_BME_EVT) dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); if (reg & CFG_MSE_EVT) dev_dbg(dev, "CFG 'Memory Space Enable' change\n"); dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg); return IRQ_HANDLED; } static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct device *dev = pci->dev; struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); struct device_node *node = dev->of_node; struct device_node *pcie_intc_node = of_get_next_child(node, NULL); if (!pcie_intc_node) { dev_err(dev, "No PCIe Intc node found\n"); return -ENODEV; } irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler, pp); dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, &intx_domain_ops, pp); of_node_put(pcie_intc_node); if (!dra7xx->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); return -ENODEV; } return 0; } static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { .host_init = dra7xx_pcie_host_init, }; static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); enum pci_barno bar; for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) dw_pcie_ep_reset_bar(pci, bar); dra7xx_pcie_enable_wrapper_interrupts(dra7xx); } static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) { dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); mdelay(1); dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1); } static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, u8 interrupt_num) { u32 reg; reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT; reg |= MSI_REQ_GRANT; dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg); } static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); switch (type) { case PCI_EPC_IRQ_LEGACY: dra7xx_pcie_raise_legacy_irq(dra7xx); break; case PCI_EPC_IRQ_MSI: dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); break; default: dev_err(pci->dev, "UNKNOWN IRQ type\n"); } return 0; } static const struct pci_epc_features dra7xx_pcie_epc_features = { .linkup_notifier = true, .msi_capable = true, .msix_capable = false, }; static const struct pci_epc_features* dra7xx_pcie_get_features(struct dw_pcie_ep *ep) { return &dra7xx_pcie_epc_features; } static const struct dw_pcie_ep_ops pcie_ep_ops = { .ep_init = dra7xx_pcie_ep_init, .raise_irq = dra7xx_pcie_raise_irq, .get_features = dra7xx_pcie_get_features, }; static int dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, struct platform_device *pdev) { int ret; struct dw_pcie_ep *ep; struct device *dev = &pdev->dev; struct dw_pcie *pci = dra7xx->pci; ep = &pci->ep; ep->ops = &pcie_ep_ops; pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "ep_dbics"); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "ep_dbics2"); if (IS_ERR(pci->dbi_base2)) return PTR_ERR(pci->dbi_base2); ret = dw_pcie_ep_init(ep); if (ret) { dev_err(dev, "failed to initialize endpoint\n"); return ret; } return 0; } static int dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, struct platform_device *pdev) { int ret; struct dw_pcie *pci = dra7xx->pci; struct dw_pcie_rp *pp = &pci->pp; struct device *dev = pci->dev; pp->irq = platform_get_irq(pdev, 1); if (pp->irq < 0) return pp->irq; /* MSI IRQ is muxed */ pp->msi_irq[0] = -ENODEV; ret = dra7xx_pcie_init_irq_domain(pp); if (ret < 0) return ret; pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc_dbics"); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); pp->ops = &dra7xx_pcie_host_ops; ret = dw_pcie_host_init(pp); if (ret) { dev_err(dev, "failed to initialize host\n"); return ret; } return 0; } static const struct dw_pcie_ops dw_pcie_ops = { .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup, .start_link = dra7xx_pcie_establish_link, .stop_link = dra7xx_pcie_stop_link, .link_up = dra7xx_pcie_link_up, }; static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx) { int phy_count = dra7xx->phy_count; while (phy_count--) { phy_power_off(dra7xx->phy[phy_count]); phy_exit(dra7xx->phy[phy_count]); } } static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx) { int phy_count = dra7xx->phy_count; int ret; int i; for (i = 0; i < phy_count; i++) { ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE); if (ret < 0) goto err_phy; ret = phy_init(dra7xx->phy[i]); if (ret < 0) goto err_phy; ret = phy_power_on(dra7xx->phy[i]); if (ret < 0) { phy_exit(dra7xx->phy[i]); goto err_phy; } } return 0; err_phy: while (--i >= 0) { phy_power_off(dra7xx->phy[i]); phy_exit(dra7xx->phy[i]); } return ret; } static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = { .mode = DW_PCIE_RC_TYPE, }; static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { .mode = DW_PCIE_EP_TYPE, }; static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = { .b1co_mode_sel_mask = BIT(2), .mode = DW_PCIE_RC_TYPE, }; static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = { .b1co_mode_sel_mask = GENMASK(3, 2), .mode = DW_PCIE_RC_TYPE, }; static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = { .b1co_mode_sel_mask = BIT(2), .mode = DW_PCIE_EP_TYPE, }; static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = { .b1co_mode_sel_mask = GENMASK(3, 2), .mode = DW_PCIE_EP_TYPE, }; static const struct of_device_id of_dra7xx_pcie_match[] = { { .compatible = "ti,dra7-pcie", .data = &dra7xx_pcie_rc_of_data, }, { .compatible = "ti,dra7-pcie-ep", .data = &dra7xx_pcie_ep_of_data, }, { .compatible = "ti,dra746-pcie-rc", .data = &dra746_pcie_rc_of_data, }, { .compatible = "ti,dra726-pcie-rc", .data = &dra726_pcie_rc_of_data, }, { .compatible = "ti,dra746-pcie-ep", .data = &dra746_pcie_ep_of_data, }, { .compatible = "ti,dra726-pcie-ep", .data = &dra726_pcie_ep_of_data, }, {}, }; MODULE_DEVICE_TABLE(of, of_dra7xx_pcie_match); /* * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 * @dra7xx: the dra7xx device where the workaround should be applied * * Access to the PCIe slave port that are not 32-bit aligned will result * in incorrect mapping to TLP Address and Byte enable fields. Therefore, * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or * 0x3. * * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. */ static int dra7xx_pcie_unaligned_memaccess(struct device *dev) { int ret; struct device_node *np = dev->of_node; struct of_phandle_args args; struct regmap *regmap; regmap = syscon_regmap_lookup_by_phandle(np, "ti,syscon-unaligned-access"); if (IS_ERR(regmap)) { dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); return -EINVAL; } ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", 2, 0, &args); if (ret) { dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); return ret; } ret = regmap_update_bits(regmap, args.args[0], args.args[1], args.args[1]); if (ret) dev_err(dev, "failed to enable unaligned access\n"); of_node_put(args.np); return ret; } static int dra7xx_pcie_configure_two_lane(struct device *dev, u32 b1co_mode_sel_mask) { struct device_node *np = dev->of_node; struct regmap *pcie_syscon; unsigned int pcie_reg; u32 mask; u32 val; pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel"); if (IS_ERR(pcie_syscon)) { dev_err(dev, "unable to get ti,syscon-lane-sel\n"); return -EINVAL; } if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1, &pcie_reg)) { dev_err(dev, "couldn't get lane selection reg offset\n"); return -EINVAL; } mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN; val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN; regmap_update_bits(pcie_syscon, pcie_reg, mask, val); return 0; } static int dra7xx_pcie_probe(struct platform_device *pdev) { u32 reg; int ret; int irq; int i; int phy_count; struct phy **phy; struct device_link **link; void __iomem *base; struct dw_pcie *pci; struct dra7xx_pcie *dra7xx; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; char name[10]; struct gpio_desc *reset; const struct dra7xx_pcie_of_data *data; enum dw_pcie_device_mode mode; u32 b1co_mode_sel_mask; data = of_device_get_match_data(dev); if (!data) return -EINVAL; mode = (enum dw_pcie_device_mode)data->mode; b1co_mode_sel_mask = data->b1co_mode_sel_mask; dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); if (!dra7xx) return -ENOMEM; pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); if (!pci) return -ENOMEM; pci->dev = dev; pci->ops = &dw_pcie_ops; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; base = devm_platform_ioremap_resource_byname(pdev, "ti_conf"); if (IS_ERR(base)) return PTR_ERR(base); phy_count = of_property_count_strings(np, "phy-names"); if (phy_count < 0) { dev_err(dev, "unable to find the strings\n"); return phy_count; } phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL); if (!phy) return -ENOMEM; link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL); if (!link) return -ENOMEM; dra7xx->clk = devm_clk_get_optional(dev, NULL); if (IS_ERR(dra7xx->clk)) return dev_err_probe(dev, PTR_ERR(dra7xx->clk), "clock request failed"); ret = clk_prepare_enable(dra7xx->clk); if (ret) return ret; for (i = 0; i < phy_count; i++) { snprintf(name, sizeof(name), "pcie-phy%d", i); phy[i] = devm_phy_get(dev, name); if (IS_ERR(phy[i])) return PTR_ERR(phy[i]); link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); if (!link[i]) { ret = -EINVAL; goto err_link; } } dra7xx->base = base; dra7xx->phy = phy; dra7xx->pci = pci; dra7xx->phy_count = phy_count; if (phy_count == 2) { ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask); if (ret < 0) dra7xx->phy_count = 1; /* Fallback to x1 lane mode */ } ret = dra7xx_pcie_enable_phy(dra7xx); if (ret) { dev_err(dev, "failed to enable phy\n"); return ret; } platform_set_drvdata(pdev, dra7xx); pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "pm_runtime_get_sync failed\n"); goto err_get_sync; } reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH); if (IS_ERR(reset)) { ret = PTR_ERR(reset); dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret); goto err_gpio; } reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); reg &= ~LTSSM_EN; dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); switch (mode) { case DW_PCIE_RC_TYPE: if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) { ret = -ENODEV; goto err_gpio; } dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, DEVICE_TYPE_RC); ret = dra7xx_pcie_unaligned_memaccess(dev); if (ret) dev_err(dev, "WA for Errata i870 not applied\n"); ret = dra7xx_add_pcie_port(dra7xx, pdev); if (ret < 0) goto err_gpio; break; case DW_PCIE_EP_TYPE: if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) { ret = -ENODEV; goto err_gpio; } dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, DEVICE_TYPE_EP); ret = dra7xx_pcie_unaligned_memaccess(dev); if (ret) goto err_gpio; ret = dra7xx_add_pcie_ep(dra7xx, pdev); if (ret < 0) goto err_gpio; break; default: dev_err(dev, "INVALID device type %d\n", mode); } dra7xx->mode = mode; ret = devm_request_threaded_irq(dev, irq, NULL, dra7xx_pcie_irq_handler, IRQF_SHARED, "dra7xx-pcie-main", dra7xx); if (ret) { dev_err(dev, "failed to request irq\n"); goto err_gpio; } return 0; err_gpio: err_get_sync: pm_runtime_put(dev); pm_runtime_disable(dev); dra7xx_pcie_disable_phy(dra7xx); err_link: while (--i >= 0) device_link_del(link[i]); return ret; } static int dra7xx_pcie_suspend(struct device *dev) { struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); struct dw_pcie *pci = dra7xx->pci; u32 val; if (dra7xx->mode != DW_PCIE_RC_TYPE) return 0; /* clear MSE */ val = dw_pcie_readl_dbi(pci, PCI_COMMAND); val &= ~PCI_COMMAND_MEMORY; dw_pcie_writel_dbi(pci, PCI_COMMAND, val); return 0; } static int dra7xx_pcie_resume(struct device *dev) { struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); struct dw_pcie *pci = dra7xx->pci; u32 val; if (dra7xx->mode != DW_PCIE_RC_TYPE) return 0; /* set MSE */ val = dw_pcie_readl_dbi(pci, PCI_COMMAND); val |= PCI_COMMAND_MEMORY; dw_pcie_writel_dbi(pci, PCI_COMMAND, val); return 0; } static int dra7xx_pcie_suspend_noirq(struct device *dev) { struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); dra7xx_pcie_disable_phy(dra7xx); return 0; } static int dra7xx_pcie_resume_noirq(struct device *dev) { struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); int ret; ret = dra7xx_pcie_enable_phy(dra7xx); if (ret) { dev_err(dev, "failed to enable phy\n"); return ret; } return 0; } static void dra7xx_pcie_shutdown(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); int ret; dra7xx_pcie_stop_link(dra7xx->pci); ret = pm_runtime_put_sync(dev); if (ret < 0) dev_dbg(dev, "pm_runtime_put_sync failed\n"); pm_runtime_disable(dev); dra7xx_pcie_disable_phy(dra7xx); clk_disable_unprepare(dra7xx->clk); } static const struct dev_pm_ops dra7xx_pcie_pm_ops = { SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, dra7xx_pcie_resume_noirq) }; static struct platform_driver dra7xx_pcie_driver = { .probe = dra7xx_pcie_probe, .driver = { .name = "dra7-pcie", .of_match_table = of_dra7xx_pcie_match, .suppress_bind_attrs = true, .pm = &dra7xx_pcie_pm_ops, }, .shutdown = dra7xx_pcie_shutdown, }; module_platform_driver(dra7xx_pcie_driver); MODULE_AUTHOR("Kishon Vijay Abraham I <[email protected]>"); MODULE_DESCRIPTION("PCIe controller driver for TI DRA7xx SoCs"); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/dwc/pci-dra7xx.c
// SPDX-License-Identifier: GPL-2.0 /* * FU740 DesignWare PCIe Controller integration * Copyright (C) 2019-2021 SiFive, Inc. * Paul Walmsley * Greentime Hu * * Based in part on the i.MX6 PCIe host controller shim which is: * * Copyright (C) 2013 Kosagi * https://www.kosagi.com */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/gpio/consumer.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/resource.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/reset.h> #include "pcie-designware.h" #define to_fu740_pcie(x) dev_get_drvdata((x)->dev) struct fu740_pcie { struct dw_pcie pci; void __iomem *mgmt_base; struct gpio_desc *reset; struct gpio_desc *pwren; struct clk *pcie_aux; struct reset_control *rst; }; #define SIFIVE_DEVICESRESETREG 0x28 #define PCIEX8MGMT_PERST_N 0x0 #define PCIEX8MGMT_APP_LTSSM_ENABLE 0x10 #define PCIEX8MGMT_APP_HOLD_PHY_RST 0x18 #define PCIEX8MGMT_DEVICE_TYPE 0x708 #define PCIEX8MGMT_PHY0_CR_PARA_ADDR 0x860 #define PCIEX8MGMT_PHY0_CR_PARA_RD_EN 0x870 #define PCIEX8MGMT_PHY0_CR_PARA_RD_DATA 0x878 #define PCIEX8MGMT_PHY0_CR_PARA_SEL 0x880 #define PCIEX8MGMT_PHY0_CR_PARA_WR_DATA 0x888 #define PCIEX8MGMT_PHY0_CR_PARA_WR_EN 0x890 #define PCIEX8MGMT_PHY0_CR_PARA_ACK 0x898 #define PCIEX8MGMT_PHY1_CR_PARA_ADDR 0x8a0 #define PCIEX8MGMT_PHY1_CR_PARA_RD_EN 0x8b0 #define PCIEX8MGMT_PHY1_CR_PARA_RD_DATA 0x8b8 #define PCIEX8MGMT_PHY1_CR_PARA_SEL 0x8c0 #define PCIEX8MGMT_PHY1_CR_PARA_WR_DATA 0x8c8 #define PCIEX8MGMT_PHY1_CR_PARA_WR_EN 0x8d0 #define PCIEX8MGMT_PHY1_CR_PARA_ACK 0x8d8 #define PCIEX8MGMT_PHY_CDR_TRACK_EN BIT(0) #define PCIEX8MGMT_PHY_LOS_THRSHLD BIT(5) #define PCIEX8MGMT_PHY_TERM_EN BIT(9) #define PCIEX8MGMT_PHY_TERM_ACDC BIT(10) #define PCIEX8MGMT_PHY_EN BIT(11) #define PCIEX8MGMT_PHY_INIT_VAL (PCIEX8MGMT_PHY_CDR_TRACK_EN|\ PCIEX8MGMT_PHY_LOS_THRSHLD|\ PCIEX8MGMT_PHY_TERM_EN|\ PCIEX8MGMT_PHY_TERM_ACDC|\ PCIEX8MGMT_PHY_EN) #define PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 0x1008 #define PCIEX8MGMT_PHY_LANE_OFF 0x100 #define PCIEX8MGMT_PHY_LANE0_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 0) #define PCIEX8MGMT_PHY_LANE1_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 1) #define PCIEX8MGMT_PHY_LANE2_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 2) #define PCIEX8MGMT_PHY_LANE3_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 3) static void fu740_pcie_assert_reset(struct fu740_pcie *afp) { /* Assert PERST_N GPIO */ gpiod_set_value_cansleep(afp->reset, 0); /* Assert controller PERST_N */ writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_PERST_N); } static void fu740_pcie_deassert_reset(struct fu740_pcie *afp) { /* Deassert controller PERST_N */ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PERST_N); /* Deassert PERST_N GPIO */ gpiod_set_value_cansleep(afp->reset, 1); } static void fu740_pcie_power_on(struct fu740_pcie *afp) { gpiod_set_value_cansleep(afp->pwren, 1); /* * Ensure that PERST has been asserted for at least 100 ms. * Section 2.2 of PCI Express Card Electromechanical Specification * Revision 3.0 */ msleep(100); } static void fu740_pcie_drive_reset(struct fu740_pcie *afp) { fu740_pcie_assert_reset(afp); fu740_pcie_power_on(afp); fu740_pcie_deassert_reset(afp); } static void fu740_phyregwrite(const uint8_t phy, const uint16_t addr, const uint16_t wrdata, struct fu740_pcie *afp) { struct device *dev = afp->pci.dev; void __iomem *phy_cr_para_addr; void __iomem *phy_cr_para_wr_data; void __iomem *phy_cr_para_wr_en; void __iomem *phy_cr_para_ack; int ret, val; /* Setup */ if (phy) { phy_cr_para_addr = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_ADDR; phy_cr_para_wr_data = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_WR_DATA; phy_cr_para_wr_en = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_WR_EN; phy_cr_para_ack = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_ACK; } else { phy_cr_para_addr = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_ADDR; phy_cr_para_wr_data = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_WR_DATA; phy_cr_para_wr_en = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_WR_EN; phy_cr_para_ack = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_ACK; } writel_relaxed(addr, phy_cr_para_addr); writel_relaxed(wrdata, phy_cr_para_wr_data); writel_relaxed(1, phy_cr_para_wr_en); /* Wait for wait_idle */ ret = readl_poll_timeout(phy_cr_para_ack, val, val, 10, 5000); if (ret) dev_warn(dev, "Wait for wait_idle state failed!\n"); /* Clear */ writel_relaxed(0, phy_cr_para_wr_en); /* Wait for ~wait_idle */ ret = readl_poll_timeout(phy_cr_para_ack, val, !val, 10, 5000); if (ret) dev_warn(dev, "Wait for !wait_idle state failed!\n"); } static void fu740_pcie_init_phy(struct fu740_pcie *afp) { /* Enable phy cr_para_sel interfaces */ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_SEL); writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_SEL); /* * Wait 10 cr_para cycles to guarantee that the registers are ready * to be edited. */ ndelay(10); /* Set PHY AC termination mode */ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE0_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp); fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE1_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp); fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE2_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp); fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE3_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp); fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE0_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp); fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE1_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp); fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE2_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp); fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE3_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp); } static int fu740_pcie_start_link(struct dw_pcie *pci) { struct device *dev = pci->dev; struct fu740_pcie *afp = dev_get_drvdata(dev); u8 cap_exp = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); int ret; u32 orig, tmp; /* * Force 2.5GT/s when starting the link, due to some devices not * probing at higher speeds. This happens with the PCIe switch * on the Unmatched board when U-Boot has not initialised the PCIe. * The fix in U-Boot is to force 2.5GT/s, which then gets cleared * by the soft reset done by this driver. */ dev_dbg(dev, "cap_exp at %x\n", cap_exp); dw_pcie_dbi_ro_wr_en(pci); tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP); orig = tmp & PCI_EXP_LNKCAP_SLS; tmp &= ~PCI_EXP_LNKCAP_SLS; tmp |= PCI_EXP_LNKCAP_SLS_2_5GB; dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp); /* Enable LTSSM */ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_LTSSM_ENABLE); ret = dw_pcie_wait_for_link(pci); if (ret) { dev_err(dev, "error: link did not start\n"); goto err; } tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP); if ((tmp & PCI_EXP_LNKCAP_SLS) != orig) { dev_dbg(dev, "changing speed back to original\n"); tmp &= ~PCI_EXP_LNKCAP_SLS; tmp |= orig; dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp); tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); tmp |= PORT_LOGIC_SPEED_CHANGE; dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); ret = dw_pcie_wait_for_link(pci); if (ret) { dev_err(dev, "error: link did not start at new speed\n"); goto err; } } ret = 0; err: WARN_ON(ret); /* we assume that errors will be very rare */ dw_pcie_dbi_ro_wr_dis(pci); return ret; } static int fu740_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct fu740_pcie *afp = to_fu740_pcie(pci); struct device *dev = pci->dev; int ret; /* Power on reset */ fu740_pcie_drive_reset(afp); /* Enable pcieauxclk */ ret = clk_prepare_enable(afp->pcie_aux); if (ret) { dev_err(dev, "unable to enable pcie_aux clock\n"); return ret; } /* * Assert hold_phy_rst (hold the controller LTSSM in reset after * power_up_rst_n for register programming with cr_para) */ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST); /* Deassert power_up_rst_n */ ret = reset_control_deassert(afp->rst); if (ret) { dev_err(dev, "unable to deassert pcie_power_up_rst_n\n"); return ret; } fu740_pcie_init_phy(afp); /* Disable pcieauxclk */ clk_disable_unprepare(afp->pcie_aux); /* Clear hold_phy_rst */ writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST); /* Enable pcieauxclk */ clk_prepare_enable(afp->pcie_aux); /* Set RC mode */ writel_relaxed(0x4, afp->mgmt_base + PCIEX8MGMT_DEVICE_TYPE); return 0; } static const struct dw_pcie_host_ops fu740_pcie_host_ops = { .host_init = fu740_pcie_host_init, }; static const struct dw_pcie_ops dw_pcie_ops = { .start_link = fu740_pcie_start_link, }; static int fu740_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dw_pcie *pci; struct fu740_pcie *afp; afp = devm_kzalloc(dev, sizeof(*afp), GFP_KERNEL); if (!afp) return -ENOMEM; pci = &afp->pci; pci->dev = dev; pci->ops = &dw_pcie_ops; pci->pp.ops = &fu740_pcie_host_ops; pci->pp.num_vectors = MAX_MSI_IRQS; /* SiFive specific region: mgmt */ afp->mgmt_base = devm_platform_ioremap_resource_byname(pdev, "mgmt"); if (IS_ERR(afp->mgmt_base)) return PTR_ERR(afp->mgmt_base); /* Fetch GPIOs */ afp->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(afp->reset)) return dev_err_probe(dev, PTR_ERR(afp->reset), "unable to get reset-gpios\n"); afp->pwren = devm_gpiod_get_optional(dev, "pwren", GPIOD_OUT_LOW); if (IS_ERR(afp->pwren)) return dev_err_probe(dev, PTR_ERR(afp->pwren), "unable to get pwren-gpios\n"); /* Fetch clocks */ afp->pcie_aux = devm_clk_get(dev, "pcie_aux"); if (IS_ERR(afp->pcie_aux)) return dev_err_probe(dev, PTR_ERR(afp->pcie_aux), "pcie_aux clock source missing or invalid\n"); /* Fetch reset */ afp->rst = devm_reset_control_get_exclusive(dev, NULL); if (IS_ERR(afp->rst)) return dev_err_probe(dev, PTR_ERR(afp->rst), "unable to get reset\n"); platform_set_drvdata(pdev, afp); return dw_pcie_host_init(&pci->pp); } static void fu740_pcie_shutdown(struct platform_device *pdev) { struct fu740_pcie *afp = platform_get_drvdata(pdev); /* Bring down link, so bootloader gets clean state in case of reboot */ fu740_pcie_assert_reset(afp); } static const struct of_device_id fu740_pcie_of_match[] = { { .compatible = "sifive,fu740-pcie", }, {}, }; static struct platform_driver fu740_pcie_driver = { .driver = { .name = "fu740-pcie", .of_match_table = fu740_pcie_of_match, .suppress_bind_attrs = true, }, .probe = fu740_pcie_probe, .shutdown = fu740_pcie_shutdown, }; builtin_platform_driver(fu740_pcie_driver);
linux-master
drivers/pci/controller/dwc/pcie-fu740.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe host controller driver for Samsung Exynos SoCs * * Copyright (C) 2013-2020 Samsung Electronics Co., Ltd. * https://www.samsung.com * * Author: Jingoo Han <[email protected]> * Jaehoon Chung <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/phy/phy.h> #include <linux/regulator/consumer.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include "pcie-designware.h" #define to_exynos_pcie(x) dev_get_drvdata((x)->dev) /* PCIe ELBI registers */ #define PCIE_IRQ_PULSE 0x000 #define IRQ_INTA_ASSERT BIT(0) #define IRQ_INTB_ASSERT BIT(2) #define IRQ_INTC_ASSERT BIT(4) #define IRQ_INTD_ASSERT BIT(6) #define PCIE_IRQ_LEVEL 0x004 #define PCIE_IRQ_SPECIAL 0x008 #define PCIE_IRQ_EN_PULSE 0x00c #define PCIE_IRQ_EN_LEVEL 0x010 #define PCIE_IRQ_EN_SPECIAL 0x014 #define PCIE_SW_WAKE 0x018 #define PCIE_BUS_EN BIT(1) #define PCIE_CORE_RESET 0x01c #define PCIE_CORE_RESET_ENABLE BIT(0) #define PCIE_STICKY_RESET 0x020 #define PCIE_NONSTICKY_RESET 0x024 #define PCIE_APP_INIT_RESET 0x028 #define PCIE_APP_LTSSM_ENABLE 0x02c #define PCIE_ELBI_RDLH_LINKUP 0x074 #define PCIE_ELBI_XMLH_LINKUP BIT(4) #define PCIE_ELBI_LTSSM_ENABLE 0x1 #define PCIE_ELBI_SLV_AWMISC 0x11c #define PCIE_ELBI_SLV_ARMISC 0x120 #define PCIE_ELBI_SLV_DBI_ENABLE BIT(21) struct exynos_pcie { struct dw_pcie pci; void __iomem *elbi_base; struct clk *clk; struct clk *bus_clk; struct phy *phy; struct regulator_bulk_data supplies[2]; }; static int exynos_pcie_init_clk_resources(struct exynos_pcie *ep) { struct device *dev = ep->pci.dev; int ret; ret = clk_prepare_enable(ep->clk); if (ret) { dev_err(dev, "cannot enable pcie rc clock"); return ret; } ret = clk_prepare_enable(ep->bus_clk); if (ret) { dev_err(dev, "cannot enable pcie bus clock"); goto err_bus_clk; } return 0; err_bus_clk: clk_disable_unprepare(ep->clk); return ret; } static void exynos_pcie_deinit_clk_resources(struct exynos_pcie *ep) { clk_disable_unprepare(ep->bus_clk); clk_disable_unprepare(ep->clk); } static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg) { writel(val, base + reg); } static u32 exynos_pcie_readl(void __iomem *base, u32 reg) { return readl(base + reg); } static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on) { u32 val; val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_AWMISC); if (on) val |= PCIE_ELBI_SLV_DBI_ENABLE; else val &= ~PCIE_ELBI_SLV_DBI_ENABLE; exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_AWMISC); } static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on) { u32 val; val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_ARMISC); if (on) val |= PCIE_ELBI_SLV_DBI_ENABLE; else val &= ~PCIE_ELBI_SLV_DBI_ENABLE; exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_ARMISC); } static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep) { u32 val; val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET); val &= ~PCIE_CORE_RESET_ENABLE; exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET); exynos_pcie_writel(ep->elbi_base, 0, PCIE_STICKY_RESET); exynos_pcie_writel(ep->elbi_base, 0, PCIE_NONSTICKY_RESET); } static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep) { u32 val; val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET); val |= PCIE_CORE_RESET_ENABLE; exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET); exynos_pcie_writel(ep->elbi_base, 1, PCIE_STICKY_RESET); exynos_pcie_writel(ep->elbi_base, 1, PCIE_NONSTICKY_RESET); exynos_pcie_writel(ep->elbi_base, 1, PCIE_APP_INIT_RESET); exynos_pcie_writel(ep->elbi_base, 0, PCIE_APP_INIT_RESET); } static int exynos_pcie_start_link(struct dw_pcie *pci) { struct exynos_pcie *ep = to_exynos_pcie(pci); u32 val; val = exynos_pcie_readl(ep->elbi_base, PCIE_SW_WAKE); val &= ~PCIE_BUS_EN; exynos_pcie_writel(ep->elbi_base, val, PCIE_SW_WAKE); /* assert LTSSM enable */ exynos_pcie_writel(ep->elbi_base, PCIE_ELBI_LTSSM_ENABLE, PCIE_APP_LTSSM_ENABLE); return 0; } static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep) { u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_IRQ_PULSE); exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_PULSE); } static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg) { struct exynos_pcie *ep = arg; exynos_pcie_clear_irq_pulse(ep); return IRQ_HANDLED; } static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep) { u32 val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT | IRQ_INTC_ASSERT | IRQ_INTD_ASSERT; exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_EN_PULSE); exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_LEVEL); exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_SPECIAL); } static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, size_t size) { struct exynos_pcie *ep = to_exynos_pcie(pci); u32 val; exynos_pcie_sideband_dbi_r_mode(ep, true); dw_pcie_read(base + reg, size, &val); exynos_pcie_sideband_dbi_r_mode(ep, false); return val; } static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, size_t size, u32 val) { struct exynos_pcie *ep = to_exynos_pcie(pci); exynos_pcie_sideband_dbi_w_mode(ep, true); dw_pcie_write(base + reg, size, val); exynos_pcie_sideband_dbi_w_mode(ep, false); } static int exynos_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata); if (PCI_SLOT(devfn)) return PCIBIOS_DEVICE_NOT_FOUND; *val = dw_pcie_read_dbi(pci, where, size); return PCIBIOS_SUCCESSFUL; } static int exynos_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata); if (PCI_SLOT(devfn)) return PCIBIOS_DEVICE_NOT_FOUND; dw_pcie_write_dbi(pci, where, size, val); return PCIBIOS_SUCCESSFUL; } static struct pci_ops exynos_pci_ops = { .read = exynos_pcie_rd_own_conf, .write = exynos_pcie_wr_own_conf, }; static int exynos_pcie_link_up(struct dw_pcie *pci) { struct exynos_pcie *ep = to_exynos_pcie(pci); u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_RDLH_LINKUP); return (val & PCIE_ELBI_XMLH_LINKUP); } static int exynos_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct exynos_pcie *ep = to_exynos_pcie(pci); pp->bridge->ops = &exynos_pci_ops; exynos_pcie_assert_core_reset(ep); phy_init(ep->phy); phy_power_on(ep->phy); exynos_pcie_deassert_core_reset(ep); exynos_pcie_enable_irq_pulse(ep); return 0; } static const struct dw_pcie_host_ops exynos_pcie_host_ops = { .host_init = exynos_pcie_host_init, }; static int exynos_add_pcie_port(struct exynos_pcie *ep, struct platform_device *pdev) { struct dw_pcie *pci = &ep->pci; struct dw_pcie_rp *pp = &pci->pp; struct device *dev = &pdev->dev; int ret; pp->irq = platform_get_irq(pdev, 0); if (pp->irq < 0) return pp->irq; ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler, IRQF_SHARED, "exynos-pcie", ep); if (ret) { dev_err(dev, "failed to request irq\n"); return ret; } pp->ops = &exynos_pcie_host_ops; pp->msi_irq[0] = -ENODEV; ret = dw_pcie_host_init(pp); if (ret) { dev_err(dev, "failed to initialize host\n"); return ret; } return 0; } static const struct dw_pcie_ops dw_pcie_ops = { .read_dbi = exynos_pcie_read_dbi, .write_dbi = exynos_pcie_write_dbi, .link_up = exynos_pcie_link_up, .start_link = exynos_pcie_start_link, }; static int exynos_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct exynos_pcie *ep; struct device_node *np = dev->of_node; int ret; ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); if (!ep) return -ENOMEM; ep->pci.dev = dev; ep->pci.ops = &dw_pcie_ops; ep->phy = devm_of_phy_get(dev, np, NULL); if (IS_ERR(ep->phy)) return PTR_ERR(ep->phy); /* External Local Bus interface (ELBI) registers */ ep->elbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi"); if (IS_ERR(ep->elbi_base)) return PTR_ERR(ep->elbi_base); ep->clk = devm_clk_get(dev, "pcie"); if (IS_ERR(ep->clk)) { dev_err(dev, "Failed to get pcie rc clock\n"); return PTR_ERR(ep->clk); } ep->bus_clk = devm_clk_get(dev, "pcie_bus"); if (IS_ERR(ep->bus_clk)) { dev_err(dev, "Failed to get pcie bus clock\n"); return PTR_ERR(ep->bus_clk); } ep->supplies[0].supply = "vdd18"; ep->supplies[1].supply = "vdd10"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ep->supplies), ep->supplies); if (ret) return ret; ret = exynos_pcie_init_clk_resources(ep); if (ret) return ret; ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies); if (ret) return ret; platform_set_drvdata(pdev, ep); ret = exynos_add_pcie_port(ep, pdev); if (ret < 0) goto fail_probe; return 0; fail_probe: phy_exit(ep->phy); exynos_pcie_deinit_clk_resources(ep); regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies); return ret; } static int __exit exynos_pcie_remove(struct platform_device *pdev) { struct exynos_pcie *ep = platform_get_drvdata(pdev); dw_pcie_host_deinit(&ep->pci.pp); exynos_pcie_assert_core_reset(ep); phy_power_off(ep->phy); phy_exit(ep->phy); exynos_pcie_deinit_clk_resources(ep); regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies); return 0; } static int exynos_pcie_suspend_noirq(struct device *dev) { struct exynos_pcie *ep = dev_get_drvdata(dev); exynos_pcie_assert_core_reset(ep); phy_power_off(ep->phy); phy_exit(ep->phy); regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies); return 0; } static int exynos_pcie_resume_noirq(struct device *dev) { struct exynos_pcie *ep = dev_get_drvdata(dev); struct dw_pcie *pci = &ep->pci; struct dw_pcie_rp *pp = &pci->pp; int ret; ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies); if (ret) return ret; /* exynos_pcie_host_init controls ep->phy */ exynos_pcie_host_init(pp); dw_pcie_setup_rc(pp); exynos_pcie_start_link(pci); return dw_pcie_wait_for_link(pci); } static const struct dev_pm_ops exynos_pcie_pm_ops = { NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_pcie_suspend_noirq, exynos_pcie_resume_noirq) }; static const struct of_device_id exynos_pcie_of_match[] = { { .compatible = "samsung,exynos5433-pcie", }, { }, }; static struct platform_driver exynos_pcie_driver = { .probe = exynos_pcie_probe, .remove = __exit_p(exynos_pcie_remove), .driver = { .name = "exynos-pcie", .of_match_table = exynos_pcie_of_match, .pm = &exynos_pcie_pm_ops, }, }; module_platform_driver(exynos_pcie_driver); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
linux-master
drivers/pci/controller/dwc/pci-exynos.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe host controller driver for Marvell Armada-8K SoCs * * Armada-8K PCIe Glue Layer Source Code * * Copyright (C) 2016 Marvell Technology Group Ltd. * * Author: Yehuda Yitshak <[email protected]> * Author: Shadi Ammouri <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/of.h> #include <linux/pci.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/resource.h> #include <linux/of_pci.h> #include "pcie-designware.h" #define ARMADA8K_PCIE_MAX_LANES PCIE_LNK_X4 struct armada8k_pcie { struct dw_pcie *pci; struct clk *clk; struct clk *clk_reg; struct phy *phy[ARMADA8K_PCIE_MAX_LANES]; unsigned int phy_count; }; #define PCIE_VENDOR_REGS_OFFSET 0x8000 #define PCIE_GLOBAL_CONTROL_REG (PCIE_VENDOR_REGS_OFFSET + 0x0) #define PCIE_APP_LTSSM_EN BIT(2) #define PCIE_DEVICE_TYPE_SHIFT 4 #define PCIE_DEVICE_TYPE_MASK 0xF #define PCIE_DEVICE_TYPE_RC 0x4 /* Root complex */ #define PCIE_GLOBAL_STATUS_REG (PCIE_VENDOR_REGS_OFFSET + 0x8) #define PCIE_GLB_STS_RDLH_LINK_UP BIT(1) #define PCIE_GLB_STS_PHY_LINK_UP BIT(9) #define PCIE_GLOBAL_INT_CAUSE1_REG (PCIE_VENDOR_REGS_OFFSET + 0x1C) #define PCIE_GLOBAL_INT_MASK1_REG (PCIE_VENDOR_REGS_OFFSET + 0x20) #define PCIE_INT_A_ASSERT_MASK BIT(9) #define PCIE_INT_B_ASSERT_MASK BIT(10) #define PCIE_INT_C_ASSERT_MASK BIT(11) #define PCIE_INT_D_ASSERT_MASK BIT(12) #define PCIE_ARCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x50) #define PCIE_AWCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x54) #define PCIE_ARUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x5C) #define PCIE_AWUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x60) /* * AR/AW Cache defaults: Normal memory, Write-Back, Read / Write * allocate */ #define ARCACHE_DEFAULT_VALUE 0x3511 #define AWCACHE_DEFAULT_VALUE 0x5311 #define DOMAIN_OUTER_SHAREABLE 0x2 #define AX_USER_DOMAIN_MASK 0x3 #define AX_USER_DOMAIN_SHIFT 4 #define to_armada8k_pcie(x) dev_get_drvdata((x)->dev) static void armada8k_pcie_disable_phys(struct armada8k_pcie *pcie) { int i; for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) { phy_power_off(pcie->phy[i]); phy_exit(pcie->phy[i]); } } static int armada8k_pcie_enable_phys(struct armada8k_pcie *pcie) { int ret; int i; for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) { ret = phy_init(pcie->phy[i]); if (ret) return ret; ret = phy_set_mode_ext(pcie->phy[i], PHY_MODE_PCIE, pcie->phy_count); if (ret) { phy_exit(pcie->phy[i]); return ret; } ret = phy_power_on(pcie->phy[i]); if (ret) { phy_exit(pcie->phy[i]); return ret; } } return 0; } static int armada8k_pcie_setup_phys(struct armada8k_pcie *pcie) { struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; struct device_node *node = dev->of_node; int ret = 0; int i; for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) { pcie->phy[i] = devm_of_phy_get_by_index(dev, node, i); if (IS_ERR(pcie->phy[i])) { if (PTR_ERR(pcie->phy[i]) != -ENODEV) return PTR_ERR(pcie->phy[i]); pcie->phy[i] = NULL; continue; } pcie->phy_count++; } /* Old bindings miss the PHY handle, so just warn if there is no PHY */ if (!pcie->phy_count) dev_warn(dev, "No available PHY\n"); ret = armada8k_pcie_enable_phys(pcie); if (ret) dev_err(dev, "Failed to initialize PHY(s) (%d)\n", ret); return ret; } static int armada8k_pcie_link_up(struct dw_pcie *pci) { u32 reg; u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP; reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG); if ((reg & mask) == mask) return 1; dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg); return 0; } static int armada8k_pcie_start_link(struct dw_pcie *pci) { u32 reg; /* Start LTSSM */ reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); reg |= PCIE_APP_LTSSM_EN; dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); return 0; } static int armada8k_pcie_host_init(struct dw_pcie_rp *pp) { u32 reg; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); if (!dw_pcie_link_up(pci)) { /* Disable LTSSM state machine to enable configuration */ reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); reg &= ~(PCIE_APP_LTSSM_EN); dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); } /* Set the device to root complex mode */ reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT); reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT; dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); /* Set the PCIe master AxCache attributes */ dw_pcie_writel_dbi(pci, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE); dw_pcie_writel_dbi(pci, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE); /* Set the PCIe master AxDomain attributes */ reg = dw_pcie_readl_dbi(pci, PCIE_ARUSER_REG); reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; dw_pcie_writel_dbi(pci, PCIE_ARUSER_REG, reg); reg = dw_pcie_readl_dbi(pci, PCIE_AWUSER_REG); reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; dw_pcie_writel_dbi(pci, PCIE_AWUSER_REG, reg); /* Enable INT A-D interrupts */ reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG); reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK | PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK; dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG, reg); return 0; } static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) { struct armada8k_pcie *pcie = arg; struct dw_pcie *pci = pcie->pci; u32 val; /* * Interrupts are directly handled by the device driver of the * PCI device. However, they are also latched into the PCIe * controller, so we simply discard them. */ val = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG); dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG, val); return IRQ_HANDLED; } static const struct dw_pcie_host_ops armada8k_pcie_host_ops = { .host_init = armada8k_pcie_host_init, }; static int armada8k_add_pcie_port(struct armada8k_pcie *pcie, struct platform_device *pdev) { struct dw_pcie *pci = pcie->pci; struct dw_pcie_rp *pp = &pci->pp; struct device *dev = &pdev->dev; int ret; pp->ops = &armada8k_pcie_host_ops; pp->irq = platform_get_irq(pdev, 0); if (pp->irq < 0) return pp->irq; ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler, IRQF_SHARED, "armada8k-pcie", pcie); if (ret) { dev_err(dev, "failed to request irq %d\n", pp->irq); return ret; } ret = dw_pcie_host_init(pp); if (ret) { dev_err(dev, "failed to initialize host: %d\n", ret); return ret; } return 0; } static const struct dw_pcie_ops dw_pcie_ops = { .link_up = armada8k_pcie_link_up, .start_link = armada8k_pcie_start_link, }; static int armada8k_pcie_probe(struct platform_device *pdev) { struct dw_pcie *pci; struct armada8k_pcie *pcie; struct device *dev = &pdev->dev; struct resource *base; int ret; pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); if (!pci) return -ENOMEM; pci->dev = dev; pci->ops = &dw_pcie_ops; pcie->pci = pci; pcie->clk = devm_clk_get(dev, NULL); if (IS_ERR(pcie->clk)) return PTR_ERR(pcie->clk); ret = clk_prepare_enable(pcie->clk); if (ret) return ret; pcie->clk_reg = devm_clk_get(dev, "reg"); if (pcie->clk_reg == ERR_PTR(-EPROBE_DEFER)) { ret = -EPROBE_DEFER; goto fail; } if (!IS_ERR(pcie->clk_reg)) { ret = clk_prepare_enable(pcie->clk_reg); if (ret) goto fail_clkreg; } /* Get the dw-pcie unit configuration/control registers base. */ base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); pci->dbi_base = devm_pci_remap_cfg_resource(dev, base); if (IS_ERR(pci->dbi_base)) { ret = PTR_ERR(pci->dbi_base); goto fail_clkreg; } ret = armada8k_pcie_setup_phys(pcie); if (ret) goto fail_clkreg; platform_set_drvdata(pdev, pcie); ret = armada8k_add_pcie_port(pcie, pdev); if (ret) goto disable_phy; return 0; disable_phy: armada8k_pcie_disable_phys(pcie); fail_clkreg: clk_disable_unprepare(pcie->clk_reg); fail: clk_disable_unprepare(pcie->clk); return ret; } static const struct of_device_id armada8k_pcie_of_match[] = { { .compatible = "marvell,armada8k-pcie", }, {}, }; static struct platform_driver armada8k_pcie_driver = { .probe = armada8k_pcie_probe, .driver = { .name = "armada8k-pcie", .of_match_table = armada8k_pcie_of_match, .suppress_bind_attrs = true, }, }; builtin_platform_driver(armada8k_pcie_driver);
linux-master
drivers/pci/controller/dwc/pcie-armada8k.c
// SPDX-License-Identifier: GPL-2.0 /* * PCIe RC driver for Synopsys DesignWare Core * * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) * * Authors: Joao Pinto <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/of.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/resource.h> #include <linux/types.h> #include "pcie-designware.h" struct dw_plat_pcie { struct dw_pcie *pci; enum dw_pcie_device_mode mode; }; struct dw_plat_pcie_of_data { enum dw_pcie_device_mode mode; }; static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = { }; static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); enum pci_barno bar; for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) dw_pcie_ep_reset_bar(pci, bar); } static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { case PCI_EPC_IRQ_LEGACY: return dw_pcie_ep_raise_legacy_irq(ep, func_no); case PCI_EPC_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); case PCI_EPC_IRQ_MSIX: return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); default: dev_err(pci->dev, "UNKNOWN IRQ type\n"); } return 0; } static const struct pci_epc_features dw_plat_pcie_epc_features = { .linkup_notifier = false, .msi_capable = true, .msix_capable = true, }; static const struct pci_epc_features* dw_plat_pcie_get_features(struct dw_pcie_ep *ep) { return &dw_plat_pcie_epc_features; } static const struct dw_pcie_ep_ops pcie_ep_ops = { .ep_init = dw_plat_pcie_ep_init, .raise_irq = dw_plat_pcie_ep_raise_irq, .get_features = dw_plat_pcie_get_features, }; static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie, struct platform_device *pdev) { struct dw_pcie *pci = dw_plat_pcie->pci; struct dw_pcie_rp *pp = &pci->pp; struct device *dev = &pdev->dev; int ret; pp->irq = platform_get_irq(pdev, 1); if (pp->irq < 0) return pp->irq; pp->num_vectors = MAX_MSI_IRQS; pp->ops = &dw_plat_pcie_host_ops; ret = dw_pcie_host_init(pp); if (ret) { dev_err(dev, "Failed to initialize host\n"); return ret; } return 0; } static int dw_plat_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dw_plat_pcie *dw_plat_pcie; struct dw_pcie *pci; int ret; const struct dw_plat_pcie_of_data *data; enum dw_pcie_device_mode mode; data = of_device_get_match_data(dev); if (!data) return -EINVAL; mode = (enum dw_pcie_device_mode)data->mode; dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL); if (!dw_plat_pcie) return -ENOMEM; pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); if (!pci) return -ENOMEM; pci->dev = dev; dw_plat_pcie->pci = pci; dw_plat_pcie->mode = mode; platform_set_drvdata(pdev, dw_plat_pcie); switch (dw_plat_pcie->mode) { case DW_PCIE_RC_TYPE: if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST)) return -ENODEV; ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev); break; case DW_PCIE_EP_TYPE: if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP)) return -ENODEV; pci->ep.ops = &pcie_ep_ops; ret = dw_pcie_ep_init(&pci->ep); break; default: dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode); ret = -EINVAL; break; } return ret; } static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = { .mode = DW_PCIE_RC_TYPE, }; static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = { .mode = DW_PCIE_EP_TYPE, }; static const struct of_device_id dw_plat_pcie_of_match[] = { { .compatible = "snps,dw-pcie", .data = &dw_plat_pcie_rc_of_data, }, { .compatible = "snps,dw-pcie-ep", .data = &dw_plat_pcie_ep_of_data, }, {}, }; static struct platform_driver dw_plat_pcie_driver = { .driver = { .name = "dw-pcie", .of_match_table = dw_plat_pcie_of_match, .suppress_bind_attrs = true, }, .probe = dw_plat_pcie_probe, }; builtin_platform_driver(dw_plat_pcie_driver);
linux-master
drivers/pci/controller/dwc/pcie-designware-plat.c
// SPDX-License-Identifier: GPL-2.0 /* * Qualcomm PCIe Endpoint controller driver * * Copyright (c) 2020, The Linux Foundation. All rights reserved. * Author: Siddartha Mohanadoss <[email protected] * * Copyright (c) 2021, Linaro Ltd. * Author: Manivannan Sadhasivam <[email protected] */ #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/interconnect.h> #include <linux/mfd/syscon.h> #include <linux/phy/pcie.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/regmap.h> #include <linux/reset.h> #include <linux/module.h> #include "pcie-designware.h" /* PARF registers */ #define PARF_SYS_CTRL 0x00 #define PARF_DB_CTRL 0x10 #define PARF_PM_CTRL 0x20 #define PARF_MHI_CLOCK_RESET_CTRL 0x174 #define PARF_MHI_BASE_ADDR_LOWER 0x178 #define PARF_MHI_BASE_ADDR_UPPER 0x17c #define PARF_DEBUG_INT_EN 0x190 #define PARF_AXI_MSTR_RD_HALT_NO_WRITES 0x1a4 #define PARF_AXI_MSTR_WR_ADDR_HALT 0x1a8 #define PARF_Q2A_FLUSH 0x1ac #define PARF_LTSSM 0x1b0 #define PARF_CFG_BITS 0x210 #define PARF_INT_ALL_STATUS 0x224 #define PARF_INT_ALL_CLEAR 0x228 #define PARF_INT_ALL_MASK 0x22c #define PARF_SLV_ADDR_MSB_CTRL 0x2c0 #define PARF_DBI_BASE_ADDR 0x350 #define PARF_DBI_BASE_ADDR_HI 0x354 #define PARF_SLV_ADDR_SPACE_SIZE 0x358 #define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c #define PARF_ATU_BASE_ADDR 0x634 #define PARF_ATU_BASE_ADDR_HI 0x638 #define PARF_SRIS_MODE 0x644 #define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04 #define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c #define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88 #define PARF_DEVICE_TYPE 0x1000 #define PARF_BDF_TO_SID_CFG 0x2c00 /* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */ #define PARF_INT_ALL_LINK_DOWN BIT(1) #define PARF_INT_ALL_BME BIT(2) #define PARF_INT_ALL_PM_TURNOFF BIT(3) #define PARF_INT_ALL_DEBUG BIT(4) #define PARF_INT_ALL_LTR BIT(5) #define PARF_INT_ALL_MHI_Q6 BIT(6) #define PARF_INT_ALL_MHI_A7 BIT(7) #define PARF_INT_ALL_DSTATE_CHANGE BIT(8) #define PARF_INT_ALL_L1SUB_TIMEOUT BIT(9) #define PARF_INT_ALL_MMIO_WRITE BIT(10) #define PARF_INT_ALL_CFG_WRITE BIT(11) #define PARF_INT_ALL_BRIDGE_FLUSH_N BIT(12) #define PARF_INT_ALL_LINK_UP BIT(13) #define PARF_INT_ALL_AER_LEGACY BIT(14) #define PARF_INT_ALL_PLS_ERR BIT(15) #define PARF_INT_ALL_PME_LEGACY BIT(16) #define PARF_INT_ALL_PLS_PME BIT(17) #define PARF_INT_ALL_EDMA BIT(22) /* PARF_BDF_TO_SID_CFG register fields */ #define PARF_BDF_TO_SID_BYPASS BIT(0) /* PARF_DEBUG_INT_EN register fields */ #define PARF_DEBUG_INT_PM_DSTATE_CHANGE BIT(1) #define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2) #define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3) /* PARF_DEVICE_TYPE register fields */ #define PARF_DEVICE_TYPE_EP 0x0 /* PARF_PM_CTRL register fields */ #define PARF_PM_CTRL_REQ_EXIT_L1 BIT(1) #define PARF_PM_CTRL_READY_ENTR_L23 BIT(2) #define PARF_PM_CTRL_REQ_NOT_ENTR_L1 BIT(5) /* PARF_MHI_CLOCK_RESET_CTRL fields */ #define PARF_MSTR_AXI_CLK_EN BIT(1) /* PARF_AXI_MSTR_RD_HALT_NO_WRITES register fields */ #define PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN BIT(0) /* PARF_AXI_MSTR_WR_ADDR_HALT register fields */ #define PARF_AXI_MSTR_WR_ADDR_HALT_EN BIT(31) /* PARF_Q2A_FLUSH register fields */ #define PARF_Q2A_FLUSH_EN BIT(16) /* PARF_SYS_CTRL register fields */ #define PARF_SYS_CTRL_AUX_PWR_DET BIT(4) #define PARF_SYS_CTRL_CORE_CLK_CGC_DIS BIT(6) #define PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS BIT(10) #define PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE BIT(11) /* PARF_DB_CTRL register fields */ #define PARF_DB_CTRL_INSR_DBNCR_BLOCK BIT(0) #define PARF_DB_CTRL_RMVL_DBNCR_BLOCK BIT(1) #define PARF_DB_CTRL_DBI_WKP_BLOCK BIT(4) #define PARF_DB_CTRL_SLV_WKP_BLOCK BIT(5) #define PARF_DB_CTRL_MST_WKP_BLOCK BIT(6) /* PARF_CFG_BITS register fields */ #define PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN BIT(1) /* ELBI registers */ #define ELBI_SYS_STTS 0x08 /* DBI registers */ #define DBI_CON_STATUS 0x44 /* DBI register fields */ #define DBI_CON_STATUS_POWER_STATE_MASK GENMASK(1, 0) #define XMLH_LINK_UP 0x400 #define CORE_RESET_TIME_US_MIN 1000 #define CORE_RESET_TIME_US_MAX 1005 #define WAKE_DELAY_US 2000 /* 2 ms */ #define PCIE_GEN1_BW_MBPS 250 #define PCIE_GEN2_BW_MBPS 500 #define PCIE_GEN3_BW_MBPS 985 #define PCIE_GEN4_BW_MBPS 1969 #define to_pcie_ep(x) dev_get_drvdata((x)->dev) enum qcom_pcie_ep_link_status { QCOM_PCIE_EP_LINK_DISABLED, QCOM_PCIE_EP_LINK_ENABLED, QCOM_PCIE_EP_LINK_UP, QCOM_PCIE_EP_LINK_DOWN, }; /** * struct qcom_pcie_ep - Qualcomm PCIe Endpoint Controller * @pci: Designware PCIe controller struct * @parf: Qualcomm PCIe specific PARF register base * @elbi: Designware PCIe specific ELBI register base * @mmio: MMIO register base * @perst_map: PERST regmap * @mmio_res: MMIO region resource * @core_reset: PCIe Endpoint core reset * @reset: PERST# GPIO * @wake: WAKE# GPIO * @phy: PHY controller block * @debugfs: PCIe Endpoint Debugfs directory * @icc_mem: Handle to an interconnect path between PCIe and MEM * @clks: PCIe clocks * @num_clks: PCIe clocks count * @perst_en: Flag for PERST enable * @perst_sep_en: Flag for PERST separation enable * @link_status: PCIe Link status * @global_irq: Qualcomm PCIe specific Global IRQ * @perst_irq: PERST# IRQ */ struct qcom_pcie_ep { struct dw_pcie pci; void __iomem *parf; void __iomem *elbi; void __iomem *mmio; struct regmap *perst_map; struct resource *mmio_res; struct reset_control *core_reset; struct gpio_desc *reset; struct gpio_desc *wake; struct phy *phy; struct dentry *debugfs; struct icc_path *icc_mem; struct clk_bulk_data *clks; int num_clks; u32 perst_en; u32 perst_sep_en; enum qcom_pcie_ep_link_status link_status; int global_irq; int perst_irq; }; static int qcom_pcie_ep_core_reset(struct qcom_pcie_ep *pcie_ep) { struct dw_pcie *pci = &pcie_ep->pci; struct device *dev = pci->dev; int ret; ret = reset_control_assert(pcie_ep->core_reset); if (ret) { dev_err(dev, "Cannot assert core reset\n"); return ret; } usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX); ret = reset_control_deassert(pcie_ep->core_reset); if (ret) { dev_err(dev, "Cannot de-assert core reset\n"); return ret; } usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX); return 0; } /* * Delatch PERST_EN and PERST_SEPARATION_ENABLE with TCSR to avoid * device reset during host reboot and hibernation. The driver is * expected to handle this situation. */ static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep) { if (pcie_ep->perst_map) { regmap_write(pcie_ep->perst_map, pcie_ep->perst_en, 0); regmap_write(pcie_ep->perst_map, pcie_ep->perst_sep_en, 0); } } static int qcom_pcie_dw_link_up(struct dw_pcie *pci) { struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); u32 reg; reg = readl_relaxed(pcie_ep->elbi + ELBI_SYS_STTS); return reg & XMLH_LINK_UP; } static int qcom_pcie_dw_start_link(struct dw_pcie *pci) { struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); enable_irq(pcie_ep->perst_irq); return 0; } static void qcom_pcie_dw_stop_link(struct dw_pcie *pci) { struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); disable_irq(pcie_ep->perst_irq); } static void qcom_pcie_ep_icc_update(struct qcom_pcie_ep *pcie_ep) { struct dw_pcie *pci = &pcie_ep->pci; u32 offset, status, bw; int speed, width; int ret; if (!pcie_ep->icc_mem) return; offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status); width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status); switch (speed) { case 1: bw = MBps_to_icc(PCIE_GEN1_BW_MBPS); break; case 2: bw = MBps_to_icc(PCIE_GEN2_BW_MBPS); break; case 3: bw = MBps_to_icc(PCIE_GEN3_BW_MBPS); break; default: dev_warn(pci->dev, "using default GEN4 bandwidth\n"); fallthrough; case 4: bw = MBps_to_icc(PCIE_GEN4_BW_MBPS); break; } ret = icc_set_bw(pcie_ep->icc_mem, 0, width * bw); if (ret) dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n", ret); } static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep) { struct dw_pcie *pci = &pcie_ep->pci; int ret; ret = clk_bulk_prepare_enable(pcie_ep->num_clks, pcie_ep->clks); if (ret) return ret; ret = qcom_pcie_ep_core_reset(pcie_ep); if (ret) goto err_disable_clk; ret = phy_init(pcie_ep->phy); if (ret) goto err_disable_clk; ret = phy_set_mode_ext(pcie_ep->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_EP); if (ret) goto err_phy_exit; ret = phy_power_on(pcie_ep->phy); if (ret) goto err_phy_exit; /* * Some Qualcomm platforms require interconnect bandwidth constraints * to be set before enabling interconnect clocks. * * Set an initial peak bandwidth corresponding to single-lane Gen 1 * for the pcie-mem path. */ ret = icc_set_bw(pcie_ep->icc_mem, 0, MBps_to_icc(PCIE_GEN1_BW_MBPS)); if (ret) { dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n", ret); goto err_phy_off; } return 0; err_phy_off: phy_power_off(pcie_ep->phy); err_phy_exit: phy_exit(pcie_ep->phy); err_disable_clk: clk_bulk_disable_unprepare(pcie_ep->num_clks, pcie_ep->clks); return ret; } static void qcom_pcie_disable_resources(struct qcom_pcie_ep *pcie_ep) { icc_set_bw(pcie_ep->icc_mem, 0, 0); phy_power_off(pcie_ep->phy); phy_exit(pcie_ep->phy); clk_bulk_disable_unprepare(pcie_ep->num_clks, pcie_ep->clks); } static int qcom_pcie_perst_deassert(struct dw_pcie *pci) { struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); struct device *dev = pci->dev; u32 val, offset; int ret; ret = qcom_pcie_enable_resources(pcie_ep); if (ret) { dev_err(dev, "Failed to enable resources: %d\n", ret); return ret; } /* Assert WAKE# to RC to indicate device is ready */ gpiod_set_value_cansleep(pcie_ep->wake, 1); usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500); gpiod_set_value_cansleep(pcie_ep->wake, 0); qcom_pcie_ep_configure_tcsr(pcie_ep); /* Disable BDF to SID mapping */ val = readl_relaxed(pcie_ep->parf + PARF_BDF_TO_SID_CFG); val |= PARF_BDF_TO_SID_BYPASS; writel_relaxed(val, pcie_ep->parf + PARF_BDF_TO_SID_CFG); /* Enable debug IRQ */ val = readl_relaxed(pcie_ep->parf + PARF_DEBUG_INT_EN); val |= PARF_DEBUG_INT_RADM_PM_TURNOFF | PARF_DEBUG_INT_CFG_BUS_MASTER_EN | PARF_DEBUG_INT_PM_DSTATE_CHANGE; writel_relaxed(val, pcie_ep->parf + PARF_DEBUG_INT_EN); /* Configure PCIe to endpoint mode */ writel_relaxed(PARF_DEVICE_TYPE_EP, pcie_ep->parf + PARF_DEVICE_TYPE); /* Allow entering L1 state */ val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL); val &= ~PARF_PM_CTRL_REQ_NOT_ENTR_L1; writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL); /* Read halts write */ val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES); val &= ~PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN; writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES); /* Write after write halt */ val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT); val |= PARF_AXI_MSTR_WR_ADDR_HALT_EN; writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT); /* Q2A flush disable */ val = readl_relaxed(pcie_ep->parf + PARF_Q2A_FLUSH); val &= ~PARF_Q2A_FLUSH_EN; writel_relaxed(val, pcie_ep->parf + PARF_Q2A_FLUSH); /* * Disable Master AXI clock during idle. Do not allow DBI access * to take the core out of L1. Disable core clock gating that * gates PIPE clock from propagating to core clock. Report to the * host that Vaux is present. */ val = readl_relaxed(pcie_ep->parf + PARF_SYS_CTRL); val &= ~PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS; val |= PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE | PARF_SYS_CTRL_CORE_CLK_CGC_DIS | PARF_SYS_CTRL_AUX_PWR_DET; writel_relaxed(val, pcie_ep->parf + PARF_SYS_CTRL); /* Disable the debouncers */ val = readl_relaxed(pcie_ep->parf + PARF_DB_CTRL); val |= PARF_DB_CTRL_INSR_DBNCR_BLOCK | PARF_DB_CTRL_RMVL_DBNCR_BLOCK | PARF_DB_CTRL_DBI_WKP_BLOCK | PARF_DB_CTRL_SLV_WKP_BLOCK | PARF_DB_CTRL_MST_WKP_BLOCK; writel_relaxed(val, pcie_ep->parf + PARF_DB_CTRL); /* Request to exit from L1SS for MSI and LTR MSG */ val = readl_relaxed(pcie_ep->parf + PARF_CFG_BITS); val |= PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN; writel_relaxed(val, pcie_ep->parf + PARF_CFG_BITS); dw_pcie_dbi_ro_wr_en(pci); /* Set the L0s Exit Latency to 2us-4us = 0x6 */ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); val &= ~PCI_EXP_LNKCAP_L0SEL; val |= FIELD_PREP(PCI_EXP_LNKCAP_L0SEL, 0x6); dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, val); /* Set the L1 Exit Latency to be 32us-64 us = 0x6 */ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); val &= ~PCI_EXP_LNKCAP_L1EL; val |= FIELD_PREP(PCI_EXP_LNKCAP_L1EL, 0x6); dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, val); dw_pcie_dbi_ro_wr_dis(pci); writel_relaxed(0, pcie_ep->parf + PARF_INT_ALL_MASK); val = PARF_INT_ALL_LINK_DOWN | PARF_INT_ALL_BME | PARF_INT_ALL_PM_TURNOFF | PARF_INT_ALL_DSTATE_CHANGE | PARF_INT_ALL_LINK_UP | PARF_INT_ALL_EDMA; writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_MASK); ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep); if (ret) { dev_err(dev, "Failed to complete initialization: %d\n", ret); goto err_disable_resources; } /* * The physical address of the MMIO region which is exposed as the BAR * should be written to MHI BASE registers. */ writel_relaxed(pcie_ep->mmio_res->start, pcie_ep->parf + PARF_MHI_BASE_ADDR_LOWER); writel_relaxed(0, pcie_ep->parf + PARF_MHI_BASE_ADDR_UPPER); /* Gate Master AXI clock to MHI bus during L1SS */ val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL); val &= ~PARF_MSTR_AXI_CLK_EN; writel_relaxed(val, pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL); dw_pcie_ep_init_notify(&pcie_ep->pci.ep); /* Enable LTSSM */ val = readl_relaxed(pcie_ep->parf + PARF_LTSSM); val |= BIT(8); writel_relaxed(val, pcie_ep->parf + PARF_LTSSM); return 0; err_disable_resources: qcom_pcie_disable_resources(pcie_ep); return ret; } static void qcom_pcie_perst_assert(struct dw_pcie *pci) { struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); struct device *dev = pci->dev; if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) { dev_dbg(dev, "Link is already disabled\n"); return; } qcom_pcie_disable_resources(pcie_ep); pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED; } /* Common DWC controller ops */ static const struct dw_pcie_ops pci_ops = { .link_up = qcom_pcie_dw_link_up, .start_link = qcom_pcie_dw_start_link, .stop_link = qcom_pcie_dw_stop_link, }; static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev, struct qcom_pcie_ep *pcie_ep) { struct device *dev = &pdev->dev; struct dw_pcie *pci = &pcie_ep->pci; struct device_node *syscon; struct resource *res; int ret; pcie_ep->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); if (IS_ERR(pcie_ep->parf)) return PTR_ERR(pcie_ep->parf); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); pci->dbi_base2 = pci->dbi_base; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); pcie_ep->elbi = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(pcie_ep->elbi)) return PTR_ERR(pcie_ep->elbi); pcie_ep->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio"); if (!pcie_ep->mmio_res) { dev_err(dev, "Failed to get mmio resource\n"); return -EINVAL; } pcie_ep->mmio = devm_pci_remap_cfg_resource(dev, pcie_ep->mmio_res); if (IS_ERR(pcie_ep->mmio)) return PTR_ERR(pcie_ep->mmio); syscon = of_parse_phandle(dev->of_node, "qcom,perst-regs", 0); if (!syscon) { dev_dbg(dev, "PERST separation not available\n"); return 0; } pcie_ep->perst_map = syscon_node_to_regmap(syscon); of_node_put(syscon); if (IS_ERR(pcie_ep->perst_map)) return PTR_ERR(pcie_ep->perst_map); ret = of_property_read_u32_index(dev->of_node, "qcom,perst-regs", 1, &pcie_ep->perst_en); if (ret < 0) { dev_err(dev, "No Perst Enable offset in syscon\n"); return ret; } ret = of_property_read_u32_index(dev->of_node, "qcom,perst-regs", 2, &pcie_ep->perst_sep_en); if (ret < 0) { dev_err(dev, "No Perst Separation Enable offset in syscon\n"); return ret; } return 0; } static int qcom_pcie_ep_get_resources(struct platform_device *pdev, struct qcom_pcie_ep *pcie_ep) { struct device *dev = &pdev->dev; int ret; ret = qcom_pcie_ep_get_io_resources(pdev, pcie_ep); if (ret) { dev_err(dev, "Failed to get io resources %d\n", ret); return ret; } pcie_ep->num_clks = devm_clk_bulk_get_all(dev, &pcie_ep->clks); if (pcie_ep->num_clks < 0) { dev_err(dev, "Failed to get clocks\n"); return pcie_ep->num_clks; } pcie_ep->core_reset = devm_reset_control_get_exclusive(dev, "core"); if (IS_ERR(pcie_ep->core_reset)) return PTR_ERR(pcie_ep->core_reset); pcie_ep->reset = devm_gpiod_get(dev, "reset", GPIOD_IN); if (IS_ERR(pcie_ep->reset)) return PTR_ERR(pcie_ep->reset); pcie_ep->wake = devm_gpiod_get_optional(dev, "wake", GPIOD_OUT_LOW); if (IS_ERR(pcie_ep->wake)) return PTR_ERR(pcie_ep->wake); pcie_ep->phy = devm_phy_optional_get(dev, "pciephy"); if (IS_ERR(pcie_ep->phy)) ret = PTR_ERR(pcie_ep->phy); pcie_ep->icc_mem = devm_of_icc_get(dev, "pcie-mem"); if (IS_ERR(pcie_ep->icc_mem)) ret = PTR_ERR(pcie_ep->icc_mem); return ret; } /* TODO: Notify clients about PCIe state change */ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data) { struct qcom_pcie_ep *pcie_ep = data; struct dw_pcie *pci = &pcie_ep->pci; struct device *dev = pci->dev; u32 status = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_STATUS); u32 mask = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_MASK); u32 dstate, val; writel_relaxed(status, pcie_ep->parf + PARF_INT_ALL_CLEAR); status &= mask; if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) { dev_dbg(dev, "Received Linkdown event\n"); pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN; pci_epc_linkdown(pci->ep.epc); } else if (FIELD_GET(PARF_INT_ALL_BME, status)) { dev_dbg(dev, "Received BME event. Link is enabled!\n"); pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED; qcom_pcie_ep_icc_update(pcie_ep); pci_epc_bme_notify(pci->ep.epc); } else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) { dev_dbg(dev, "Received PM Turn-off event! Entering L23\n"); val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL); val |= PARF_PM_CTRL_READY_ENTR_L23; writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL); } else if (FIELD_GET(PARF_INT_ALL_DSTATE_CHANGE, status)) { dstate = dw_pcie_readl_dbi(pci, DBI_CON_STATUS) & DBI_CON_STATUS_POWER_STATE_MASK; dev_dbg(dev, "Received D%d state event\n", dstate); if (dstate == 3) { val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL); val |= PARF_PM_CTRL_REQ_EXIT_L1; writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL); } } else if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) { dev_dbg(dev, "Received Linkup event. Enumeration complete!\n"); dw_pcie_ep_linkup(&pci->ep); pcie_ep->link_status = QCOM_PCIE_EP_LINK_UP; } else { dev_err(dev, "Received unknown event: %d\n", status); } return IRQ_HANDLED; } static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data) { struct qcom_pcie_ep *pcie_ep = data; struct dw_pcie *pci = &pcie_ep->pci; struct device *dev = pci->dev; u32 perst; perst = gpiod_get_value(pcie_ep->reset); if (perst) { dev_dbg(dev, "PERST asserted by host. Shutting down the PCIe link!\n"); qcom_pcie_perst_assert(pci); } else { dev_dbg(dev, "PERST de-asserted by host. Starting link training!\n"); qcom_pcie_perst_deassert(pci); } irq_set_irq_type(gpiod_to_irq(pcie_ep->reset), (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW)); return IRQ_HANDLED; } static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev, struct qcom_pcie_ep *pcie_ep) { int ret; pcie_ep->global_irq = platform_get_irq_byname(pdev, "global"); if (pcie_ep->global_irq < 0) return pcie_ep->global_irq; ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->global_irq, NULL, qcom_pcie_ep_global_irq_thread, IRQF_ONESHOT, "global_irq", pcie_ep); if (ret) { dev_err(&pdev->dev, "Failed to request Global IRQ\n"); return ret; } pcie_ep->perst_irq = gpiod_to_irq(pcie_ep->reset); irq_set_status_flags(pcie_ep->perst_irq, IRQ_NOAUTOEN); ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->perst_irq, NULL, qcom_pcie_ep_perst_irq_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "perst_irq", pcie_ep); if (ret) { dev_err(&pdev->dev, "Failed to request PERST IRQ\n"); disable_irq(pcie_ep->global_irq); return ret; } return 0; } static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { case PCI_EPC_IRQ_LEGACY: return dw_pcie_ep_raise_legacy_irq(ep, func_no); case PCI_EPC_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); default: dev_err(pci->dev, "Unknown IRQ type\n"); return -EINVAL; } } static int qcom_pcie_ep_link_transition_count(struct seq_file *s, void *data) { struct qcom_pcie_ep *pcie_ep = (struct qcom_pcie_ep *) dev_get_drvdata(s->private); seq_printf(s, "L0s transition count: %u\n", readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L0S)); seq_printf(s, "L1 transition count: %u\n", readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L1)); seq_printf(s, "L1.1 transition count: %u\n", readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1)); seq_printf(s, "L1.2 transition count: %u\n", readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2)); seq_printf(s, "L2 transition count: %u\n", readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L2)); return 0; } static void qcom_pcie_ep_init_debugfs(struct qcom_pcie_ep *pcie_ep) { struct dw_pcie *pci = &pcie_ep->pci; debugfs_create_devm_seqfile(pci->dev, "link_transition_count", pcie_ep->debugfs, qcom_pcie_ep_link_transition_count); } static const struct pci_epc_features qcom_pcie_epc_features = { .linkup_notifier = true, .core_init_notifier = true, .msi_capable = true, .msix_capable = false, .align = SZ_4K, }; static const struct pci_epc_features * qcom_pcie_epc_get_features(struct dw_pcie_ep *pci_ep) { return &qcom_pcie_epc_features; } static void qcom_pcie_ep_init(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); enum pci_barno bar; for (bar = BAR_0; bar <= BAR_5; bar++) dw_pcie_ep_reset_bar(pci, bar); } static const struct dw_pcie_ep_ops pci_ep_ops = { .ep_init = qcom_pcie_ep_init, .raise_irq = qcom_pcie_ep_raise_irq, .get_features = qcom_pcie_epc_get_features, }; static int qcom_pcie_ep_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct qcom_pcie_ep *pcie_ep; char *name; int ret; pcie_ep = devm_kzalloc(dev, sizeof(*pcie_ep), GFP_KERNEL); if (!pcie_ep) return -ENOMEM; pcie_ep->pci.dev = dev; pcie_ep->pci.ops = &pci_ops; pcie_ep->pci.ep.ops = &pci_ep_ops; pcie_ep->pci.edma.nr_irqs = 1; platform_set_drvdata(pdev, pcie_ep); ret = qcom_pcie_ep_get_resources(pdev, pcie_ep); if (ret) return ret; ret = qcom_pcie_enable_resources(pcie_ep); if (ret) { dev_err(dev, "Failed to enable resources: %d\n", ret); return ret; } ret = dw_pcie_ep_init(&pcie_ep->pci.ep); if (ret) { dev_err(dev, "Failed to initialize endpoint: %d\n", ret); goto err_disable_resources; } ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep); if (ret) goto err_disable_resources; name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); if (!name) { ret = -ENOMEM; goto err_disable_irqs; } pcie_ep->debugfs = debugfs_create_dir(name, NULL); qcom_pcie_ep_init_debugfs(pcie_ep); return 0; err_disable_irqs: disable_irq(pcie_ep->global_irq); disable_irq(pcie_ep->perst_irq); err_disable_resources: qcom_pcie_disable_resources(pcie_ep); return ret; } static void qcom_pcie_ep_remove(struct platform_device *pdev) { struct qcom_pcie_ep *pcie_ep = platform_get_drvdata(pdev); disable_irq(pcie_ep->global_irq); disable_irq(pcie_ep->perst_irq); debugfs_remove_recursive(pcie_ep->debugfs); if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) return; qcom_pcie_disable_resources(pcie_ep); } static const struct of_device_id qcom_pcie_ep_match[] = { { .compatible = "qcom,sdx55-pcie-ep", }, { .compatible = "qcom,sm8450-pcie-ep", }, { } }; MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match); static struct platform_driver qcom_pcie_ep_driver = { .probe = qcom_pcie_ep_probe, .remove_new = qcom_pcie_ep_remove, .driver = { .name = "qcom-pcie-ep", .of_match_table = qcom_pcie_ep_match, }, }; builtin_platform_driver(qcom_pcie_ep_driver); MODULE_AUTHOR("Siddartha Mohanadoss <[email protected]>"); MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>"); MODULE_DESCRIPTION("Qualcomm PCIe Endpoint controller driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/pci/controller/dwc/pcie-qcom-ep.c
// SPDX-License-Identifier: GPL-2.0 /* * pci-j721e - PCIe controller driver for TI's J721E SoCs * * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com * Author: Kishon Vijay Abraham I <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/io.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/mfd/syscon.h> #include <linux/of.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include "../../pci.h" #include "pcie-cadence.h" #define ENABLE_REG_SYS_2 0x108 #define STATUS_REG_SYS_2 0x508 #define STATUS_CLR_REG_SYS_2 0x708 #define LINK_DOWN BIT(1) #define J7200_LINK_DOWN BIT(10) #define J721E_PCIE_USER_CMD_STATUS 0x4 #define LINK_TRAINING_ENABLE BIT(0) #define J721E_PCIE_USER_LINKSTATUS 0x14 #define LINK_STATUS GENMASK(1, 0) enum link_status { NO_RECEIVERS_DETECTED, LINK_TRAINING_IN_PROGRESS, LINK_UP_DL_IN_PROGRESS, LINK_UP_DL_COMPLETED, }; #define J721E_MODE_RC BIT(7) #define LANE_COUNT_MASK BIT(8) #define LANE_COUNT(n) ((n) << 8) #define GENERATION_SEL_MASK GENMASK(1, 0) #define MAX_LANES 2 struct j721e_pcie { struct cdns_pcie *cdns_pcie; struct clk *refclk; u32 mode; u32 num_lanes; void __iomem *user_cfg_base; void __iomem *intd_cfg_base; u32 linkdown_irq_regfield; }; enum j721e_pcie_mode { PCI_MODE_RC, PCI_MODE_EP, }; struct j721e_pcie_data { enum j721e_pcie_mode mode; unsigned int quirk_retrain_flag:1; unsigned int quirk_detect_quiet_flag:1; unsigned int quirk_disable_flr:1; u32 linkdown_irq_regfield; unsigned int byte_access_allowed:1; }; static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset) { return readl(pcie->user_cfg_base + offset); } static inline void j721e_pcie_user_writel(struct j721e_pcie *pcie, u32 offset, u32 value) { writel(value, pcie->user_cfg_base + offset); } static inline u32 j721e_pcie_intd_readl(struct j721e_pcie *pcie, u32 offset) { return readl(pcie->intd_cfg_base + offset); } static inline void j721e_pcie_intd_writel(struct j721e_pcie *pcie, u32 offset, u32 value) { writel(value, pcie->intd_cfg_base + offset); } static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv) { struct j721e_pcie *pcie = priv; struct device *dev = pcie->cdns_pcie->dev; u32 reg; reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_2); if (!(reg & pcie->linkdown_irq_regfield)) return IRQ_NONE; dev_err(dev, "LINK DOWN!\n"); j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, pcie->linkdown_irq_regfield); return IRQ_HANDLED; } static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie) { u32 reg; reg = j721e_pcie_intd_readl(pcie, ENABLE_REG_SYS_2); reg |= pcie->linkdown_irq_regfield; j721e_pcie_intd_writel(pcie, ENABLE_REG_SYS_2, reg); } static int j721e_pcie_start_link(struct cdns_pcie *cdns_pcie) { struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev); u32 reg; reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_CMD_STATUS); reg |= LINK_TRAINING_ENABLE; j721e_pcie_user_writel(pcie, J721E_PCIE_USER_CMD_STATUS, reg); return 0; } static void j721e_pcie_stop_link(struct cdns_pcie *cdns_pcie) { struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev); u32 reg; reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_CMD_STATUS); reg &= ~LINK_TRAINING_ENABLE; j721e_pcie_user_writel(pcie, J721E_PCIE_USER_CMD_STATUS, reg); } static bool j721e_pcie_link_up(struct cdns_pcie *cdns_pcie) { struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev); u32 reg; reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_LINKSTATUS); reg &= LINK_STATUS; if (reg == LINK_UP_DL_COMPLETED) return true; return false; } static const struct cdns_pcie_ops j721e_pcie_ops = { .start_link = j721e_pcie_start_link, .stop_link = j721e_pcie_stop_link, .link_up = j721e_pcie_link_up, }; static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon, unsigned int offset) { struct device *dev = pcie->cdns_pcie->dev; u32 mask = J721E_MODE_RC; u32 mode = pcie->mode; u32 val = 0; int ret = 0; if (mode == PCI_MODE_RC) val = J721E_MODE_RC; ret = regmap_update_bits(syscon, offset, mask, val); if (ret) dev_err(dev, "failed to set pcie mode\n"); return ret; } static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie, struct regmap *syscon, unsigned int offset) { struct device *dev = pcie->cdns_pcie->dev; struct device_node *np = dev->of_node; int link_speed; u32 val = 0; int ret; link_speed = of_pci_get_max_link_speed(np); if (link_speed < 2) link_speed = 2; val = link_speed - 1; ret = regmap_update_bits(syscon, offset, GENERATION_SEL_MASK, val); if (ret) dev_err(dev, "failed to set link speed\n"); return ret; } static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie, struct regmap *syscon, unsigned int offset) { struct device *dev = pcie->cdns_pcie->dev; u32 lanes = pcie->num_lanes; u32 val = 0; int ret; val = LANE_COUNT(lanes - 1); ret = regmap_update_bits(syscon, offset, LANE_COUNT_MASK, val); if (ret) dev_err(dev, "failed to set link count\n"); return ret; } static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie) { struct device *dev = pcie->cdns_pcie->dev; struct device_node *node = dev->of_node; struct of_phandle_args args; unsigned int offset = 0; struct regmap *syscon; int ret; syscon = syscon_regmap_lookup_by_phandle(node, "ti,syscon-pcie-ctrl"); if (IS_ERR(syscon)) { dev_err(dev, "Unable to get ti,syscon-pcie-ctrl regmap\n"); return PTR_ERR(syscon); } /* Do not error out to maintain old DT compatibility */ ret = of_parse_phandle_with_fixed_args(node, "ti,syscon-pcie-ctrl", 1, 0, &args); if (!ret) offset = args.args[0]; ret = j721e_pcie_set_mode(pcie, syscon, offset); if (ret < 0) { dev_err(dev, "Failed to set pci mode\n"); return ret; } ret = j721e_pcie_set_link_speed(pcie, syscon, offset); if (ret < 0) { dev_err(dev, "Failed to set link speed\n"); return ret; } ret = j721e_pcie_set_lane_count(pcie, syscon, offset); if (ret < 0) { dev_err(dev, "Failed to set num-lanes\n"); return ret; } return 0; } static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { if (pci_is_root_bus(bus)) return pci_generic_config_read32(bus, devfn, where, size, value); return pci_generic_config_read(bus, devfn, where, size, value); } static int cdns_ti_pcie_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { if (pci_is_root_bus(bus)) return pci_generic_config_write32(bus, devfn, where, size, value); return pci_generic_config_write(bus, devfn, where, size, value); } static struct pci_ops cdns_ti_pcie_host_ops = { .map_bus = cdns_pci_map_bus, .read = cdns_ti_pcie_config_read, .write = cdns_ti_pcie_config_write, }; static const struct j721e_pcie_data j721e_pcie_rc_data = { .mode = PCI_MODE_RC, .quirk_retrain_flag = true, .byte_access_allowed = false, .linkdown_irq_regfield = LINK_DOWN, }; static const struct j721e_pcie_data j721e_pcie_ep_data = { .mode = PCI_MODE_EP, .linkdown_irq_regfield = LINK_DOWN, }; static const struct j721e_pcie_data j7200_pcie_rc_data = { .mode = PCI_MODE_RC, .quirk_detect_quiet_flag = true, .linkdown_irq_regfield = J7200_LINK_DOWN, .byte_access_allowed = true, }; static const struct j721e_pcie_data j7200_pcie_ep_data = { .mode = PCI_MODE_EP, .quirk_detect_quiet_flag = true, .quirk_disable_flr = true, }; static const struct j721e_pcie_data am64_pcie_rc_data = { .mode = PCI_MODE_RC, .linkdown_irq_regfield = J7200_LINK_DOWN, .byte_access_allowed = true, }; static const struct j721e_pcie_data am64_pcie_ep_data = { .mode = PCI_MODE_EP, .linkdown_irq_regfield = J7200_LINK_DOWN, }; static const struct of_device_id of_j721e_pcie_match[] = { { .compatible = "ti,j721e-pcie-host", .data = &j721e_pcie_rc_data, }, { .compatible = "ti,j721e-pcie-ep", .data = &j721e_pcie_ep_data, }, { .compatible = "ti,j7200-pcie-host", .data = &j7200_pcie_rc_data, }, { .compatible = "ti,j7200-pcie-ep", .data = &j7200_pcie_ep_data, }, { .compatible = "ti,am64-pcie-host", .data = &am64_pcie_rc_data, }, { .compatible = "ti,am64-pcie-ep", .data = &am64_pcie_ep_data, }, {}, }; static int j721e_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; struct pci_host_bridge *bridge; const struct j721e_pcie_data *data; struct cdns_pcie *cdns_pcie; struct j721e_pcie *pcie; struct cdns_pcie_rc *rc = NULL; struct cdns_pcie_ep *ep = NULL; struct gpio_desc *gpiod; void __iomem *base; struct clk *clk; u32 num_lanes; u32 mode; int ret; int irq; data = of_device_get_match_data(dev); if (!data) return -EINVAL; mode = (u32)data->mode; pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; switch (mode) { case PCI_MODE_RC: if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) return -ENODEV; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); if (!bridge) return -ENOMEM; if (!data->byte_access_allowed) bridge->ops = &cdns_ti_pcie_host_ops; rc = pci_host_bridge_priv(bridge); rc->quirk_retrain_flag = data->quirk_retrain_flag; rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; cdns_pcie = &rc->pcie; cdns_pcie->dev = dev; cdns_pcie->ops = &j721e_pcie_ops; pcie->cdns_pcie = cdns_pcie; break; case PCI_MODE_EP: if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) return -ENODEV; ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); if (!ep) return -ENOMEM; ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; ep->quirk_disable_flr = data->quirk_disable_flr; cdns_pcie = &ep->pcie; cdns_pcie->dev = dev; cdns_pcie->ops = &j721e_pcie_ops; pcie->cdns_pcie = cdns_pcie; break; default: dev_err(dev, "INVALID device type %d\n", mode); return 0; } pcie->mode = mode; pcie->linkdown_irq_regfield = data->linkdown_irq_regfield; base = devm_platform_ioremap_resource_byname(pdev, "intd_cfg"); if (IS_ERR(base)) return PTR_ERR(base); pcie->intd_cfg_base = base; base = devm_platform_ioremap_resource_byname(pdev, "user_cfg"); if (IS_ERR(base)) return PTR_ERR(base); pcie->user_cfg_base = base; ret = of_property_read_u32(node, "num-lanes", &num_lanes); if (ret || num_lanes > MAX_LANES) num_lanes = 1; pcie->num_lanes = num_lanes; if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48))) return -EINVAL; irq = platform_get_irq_byname(pdev, "link_state"); if (irq < 0) return irq; dev_set_drvdata(dev, pcie); pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "pm_runtime_get_sync failed\n"); goto err_get_sync; } ret = j721e_pcie_ctrl_init(pcie); if (ret < 0) { dev_err(dev, "pm_runtime_get_sync failed\n"); goto err_get_sync; } ret = devm_request_irq(dev, irq, j721e_pcie_link_irq_handler, 0, "j721e-pcie-link-down-irq", pcie); if (ret < 0) { dev_err(dev, "failed to request link state IRQ %d\n", irq); goto err_get_sync; } j721e_pcie_config_link_irq(pcie); switch (mode) { case PCI_MODE_RC: gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(gpiod)) { ret = PTR_ERR(gpiod); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get reset GPIO\n"); goto err_get_sync; } ret = cdns_pcie_init_phy(dev, cdns_pcie); if (ret) { dev_err(dev, "Failed to init phy\n"); goto err_get_sync; } clk = devm_clk_get_optional(dev, "pcie_refclk"); if (IS_ERR(clk)) { ret = PTR_ERR(clk); dev_err(dev, "failed to get pcie_refclk\n"); goto err_pcie_setup; } ret = clk_prepare_enable(clk); if (ret) { dev_err(dev, "failed to enable pcie_refclk\n"); goto err_pcie_setup; } pcie->refclk = clk; /* * "Power Sequencing and Reset Signal Timings" table in * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0 * indicates PERST# should be deasserted after minimum of 100us * once REFCLK is stable. The REFCLK to the connector in RC * mode is selected while enabling the PHY. So deassert PERST# * after 100 us. */ if (gpiod) { usleep_range(100, 200); gpiod_set_value_cansleep(gpiod, 1); } ret = cdns_pcie_host_setup(rc); if (ret < 0) { clk_disable_unprepare(pcie->refclk); goto err_pcie_setup; } break; case PCI_MODE_EP: ret = cdns_pcie_init_phy(dev, cdns_pcie); if (ret) { dev_err(dev, "Failed to init phy\n"); goto err_get_sync; } ret = cdns_pcie_ep_setup(ep); if (ret < 0) goto err_pcie_setup; break; } return 0; err_pcie_setup: cdns_pcie_disable_phy(cdns_pcie); err_get_sync: pm_runtime_put(dev); pm_runtime_disable(dev); return ret; } static void j721e_pcie_remove(struct platform_device *pdev) { struct j721e_pcie *pcie = platform_get_drvdata(pdev); struct cdns_pcie *cdns_pcie = pcie->cdns_pcie; struct device *dev = &pdev->dev; clk_disable_unprepare(pcie->refclk); cdns_pcie_disable_phy(cdns_pcie); pm_runtime_put(dev); pm_runtime_disable(dev); } static struct platform_driver j721e_pcie_driver = { .probe = j721e_pcie_probe, .remove_new = j721e_pcie_remove, .driver = { .name = "j721e-pcie", .of_match_table = of_j721e_pcie_match, .suppress_bind_attrs = true, }, }; builtin_platform_driver(j721e_pcie_driver);
linux-master
drivers/pci/controller/cadence/pci-j721e.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2017 Cadence // Cadence PCIe host controller driver. // Author: Cyrille Pitchen <[email protected]> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/list_sort.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/platform_device.h> #include "pcie-cadence.h" #define LINK_RETRAIN_TIMEOUT HZ static u64 bar_max_size[] = { [RP_BAR0] = _ULL(128 * SZ_2G), [RP_BAR1] = SZ_2G, [RP_NO_BAR] = _BITULL(63), }; static u8 bar_aperture_mask[] = { [RP_BAR0] = 0x1F, [RP_BAR1] = 0xF, }; void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct pci_host_bridge *bridge = pci_find_host_bridge(bus); struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge); struct cdns_pcie *pcie = &rc->pcie; unsigned int busn = bus->number; u32 addr0, desc0; if (pci_is_root_bus(bus)) { /* * Only the root port (devfn == 0) is connected to this bus. * All other PCI devices are behind some bridge hence on another * bus. */ if (devfn) return NULL; return pcie->reg_base + (where & 0xfff); } /* Check that the link is up */ if (!(cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE) & 0x1)) return NULL; /* Clear AXI link-down status */ cdns_pcie_writel(pcie, CDNS_PCIE_AT_LINKDOWN, 0x0); /* Update Output registers for AXI region 0. */ addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) | CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) | CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busn); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0); /* Configuration Type 0 or Type 1 access. */ desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); /* * The bus number was already set once for all in desc1 by * cdns_pcie_host_init_address_translation(). */ if (busn == bridge->busnr + 1) desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0; else desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1; cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0); return rc->cfg_base + (where & 0xfff); } static struct pci_ops cdns_pcie_host_ops = { .map_bus = cdns_pci_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, }; static int cdns_pcie_host_training_complete(struct cdns_pcie *pcie) { u32 pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET; unsigned long end_jiffies; u16 lnk_stat; /* Wait for link training to complete. Exit after timeout. */ end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT; do { lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA); if (!(lnk_stat & PCI_EXP_LNKSTA_LT)) break; usleep_range(0, 1000); } while (time_before(jiffies, end_jiffies)); if (!(lnk_stat & PCI_EXP_LNKSTA_LT)) return 0; return -ETIMEDOUT; } static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie) { struct device *dev = pcie->dev; int retries; /* Check if the link is up or not */ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { if (cdns_pcie_link_up(pcie)) { dev_info(dev, "Link up\n"); return 0; } usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); } return -ETIMEDOUT; } static int cdns_pcie_retrain(struct cdns_pcie *pcie) { u32 lnk_cap_sls, pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET; u16 lnk_stat, lnk_ctl; int ret = 0; /* * Set retrain bit if current speed is 2.5 GB/s, * but the PCIe root port support is > 2.5 GB/s. */ lnk_cap_sls = cdns_pcie_readl(pcie, (CDNS_PCIE_RP_BASE + pcie_cap_off + PCI_EXP_LNKCAP)); if ((lnk_cap_sls & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB) return ret; lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA); if ((lnk_stat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) { lnk_ctl = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKCTL); lnk_ctl |= PCI_EXP_LNKCTL_RL; cdns_pcie_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL, lnk_ctl); ret = cdns_pcie_host_training_complete(pcie); if (ret) return ret; ret = cdns_pcie_host_wait_for_link(pcie); } return ret; } static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie) { u32 val; val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL); cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val | CDNS_PCIE_LM_TPM_CTRL_PTMRSEN); } static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; int ret; ret = cdns_pcie_host_wait_for_link(pcie); /* * Retrain link for Gen2 training defect * if quirk flag is set. */ if (!ret && rc->quirk_retrain_flag) ret = cdns_pcie_retrain(pcie); return ret; } static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; u32 value, ctrl; u32 id; /* * Set the root complex BAR configuration register: * - disable both BAR0 and BAR1. * - enable Prefetchable Memory Base and Limit registers in type 1 * config space (64 bits). * - enable IO Base and Limit registers in type 1 config * space (32 bits). */ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; value = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) | CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) | CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE | CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS | CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE | CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS; cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); /* Set root port configuration space */ if (rc->vendor_id != 0xffff) { id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) | CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id); cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); } if (rc->device_id != 0xffff) cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id); cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0); cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0); cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI); return 0; } static int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc, enum cdns_pcie_rp_bar bar, u64 cpu_addr, u64 size, unsigned long flags) { struct cdns_pcie *pcie = &rc->pcie; u32 addr0, addr1, aperture, value; if (!rc->avail_ib_bar[bar]) return -EBUSY; rc->avail_ib_bar[bar] = false; aperture = ilog2(size); addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(aperture) | (lower_32_bits(cpu_addr) & GENMASK(31, 8)); addr1 = upper_32_bits(cpu_addr); cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), addr0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), addr1); if (bar == RP_NO_BAR) return 0; value = cdns_pcie_readl(pcie, CDNS_PCIE_LM_RC_BAR_CFG); value &= ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) | LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) | LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) | LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) | LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2)); if (size + cpu_addr >= SZ_4G) { if (!(flags & IORESOURCE_PREFETCH)) value |= LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar); value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar); } else { if (!(flags & IORESOURCE_PREFETCH)) value |= LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar); value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar); } value |= LM_RC_BAR_CFG_APERTURE(bar, aperture); cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); return 0; } static enum cdns_pcie_rp_bar cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size) { enum cdns_pcie_rp_bar bar, sel_bar; sel_bar = RP_BAR_UNDEFINED; for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) { if (!rc->avail_ib_bar[bar]) continue; if (size <= bar_max_size[bar]) { if (sel_bar == RP_BAR_UNDEFINED) { sel_bar = bar; continue; } if (bar_max_size[bar] < bar_max_size[sel_bar]) sel_bar = bar; } } return sel_bar; } static enum cdns_pcie_rp_bar cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size) { enum cdns_pcie_rp_bar bar, sel_bar; sel_bar = RP_BAR_UNDEFINED; for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) { if (!rc->avail_ib_bar[bar]) continue; if (size >= bar_max_size[bar]) { if (sel_bar == RP_BAR_UNDEFINED) { sel_bar = bar; continue; } if (bar_max_size[bar] > bar_max_size[sel_bar]) sel_bar = bar; } } return sel_bar; } static int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc, struct resource_entry *entry) { u64 cpu_addr, pci_addr, size, winsize; struct cdns_pcie *pcie = &rc->pcie; struct device *dev = pcie->dev; enum cdns_pcie_rp_bar bar; unsigned long flags; int ret; cpu_addr = entry->res->start; pci_addr = entry->res->start - entry->offset; flags = entry->res->flags; size = resource_size(entry->res); if (entry->offset) { dev_err(dev, "PCI addr: %llx must be equal to CPU addr: %llx\n", pci_addr, cpu_addr); return -EINVAL; } while (size > 0) { /* * Try to find a minimum BAR whose size is greater than * or equal to the remaining resource_entry size. This will * fail if the size of each of the available BARs is less than * the remaining resource_entry size. * If a minimum BAR is found, IB ATU will be configured and * exited. */ bar = cdns_pcie_host_find_min_bar(rc, size); if (bar != RP_BAR_UNDEFINED) { ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, size, flags); if (ret) dev_err(dev, "IB BAR: %d config failed\n", bar); return ret; } /* * If the control reaches here, it would mean the remaining * resource_entry size cannot be fitted in a single BAR. So we * find a maximum BAR whose size is less than or equal to the * remaining resource_entry size and split the resource entry * so that part of resource entry is fitted inside the maximum * BAR. The remaining size would be fitted during the next * iteration of the loop. * If a maximum BAR is not found, there is no way we can fit * this resource_entry, so we error out. */ bar = cdns_pcie_host_find_max_bar(rc, size); if (bar == RP_BAR_UNDEFINED) { dev_err(dev, "No free BAR to map cpu_addr %llx\n", cpu_addr); return -EINVAL; } winsize = bar_max_size[bar]; ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, winsize, flags); if (ret) { dev_err(dev, "IB BAR: %d config failed\n", bar); return ret; } size -= winsize; cpu_addr += winsize; } return 0; } static int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a, const struct list_head *b) { struct resource_entry *entry1, *entry2; entry1 = container_of(a, struct resource_entry, node); entry2 = container_of(b, struct resource_entry, node); return resource_size(entry2->res) - resource_size(entry1->res); } static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; struct device *dev = pcie->dev; struct device_node *np = dev->of_node; struct pci_host_bridge *bridge; struct resource_entry *entry; u32 no_bar_nbits = 32; int err; bridge = pci_host_bridge_from_priv(rc); if (!bridge) return -ENOMEM; if (list_empty(&bridge->dma_ranges)) { of_property_read_u32(np, "cdns,no-bar-match-nbits", &no_bar_nbits); err = cdns_pcie_host_bar_ib_config(rc, RP_NO_BAR, 0x0, (u64)1 << no_bar_nbits, 0); if (err) dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR); return err; } list_sort(NULL, &bridge->dma_ranges, cdns_pcie_host_dma_ranges_cmp); resource_list_for_each_entry(entry, &bridge->dma_ranges) { err = cdns_pcie_host_bar_config(rc, entry); if (err) { dev_err(dev, "Fail to configure IB using dma-ranges\n"); return err; } } return 0; } static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc); struct resource *cfg_res = rc->cfg_res; struct resource_entry *entry; u64 cpu_addr = cfg_res->start; u32 addr0, addr1, desc1; int r, busnr = 0; entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); if (entry) busnr = entry->res->start; /* * Reserve region 0 for PCI configure space accesses: * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by * cdns_pci_map_bus(), other region registers are set here once for all. */ addr1 = 0; /* Should be programmed to zero. */ desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1); if (pcie->ops->cpu_addr_fixup) cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) | (lower_32_bits(cpu_addr) & GENMASK(31, 8)); addr1 = upper_32_bits(cpu_addr); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1); r = 1; resource_list_for_each_entry(entry, &bridge->windows) { struct resource *res = entry->res; u64 pci_addr = res->start - entry->offset; if (resource_type(res) == IORESOURCE_IO) cdns_pcie_set_outbound_region(pcie, busnr, 0, r, true, pci_pio_to_address(res->start), pci_addr, resource_size(res)); else cdns_pcie_set_outbound_region(pcie, busnr, 0, r, false, res->start, pci_addr, resource_size(res)); r++; } return cdns_pcie_host_map_dma_ranges(rc); } static int cdns_pcie_host_init(struct device *dev, struct cdns_pcie_rc *rc) { int err; err = cdns_pcie_host_init_root_port(rc); if (err) return err; return cdns_pcie_host_init_address_translation(rc); } int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) { struct device *dev = rc->pcie.dev; struct platform_device *pdev = to_platform_device(dev); struct device_node *np = dev->of_node; struct pci_host_bridge *bridge; enum cdns_pcie_rp_bar bar; struct cdns_pcie *pcie; struct resource *res; int ret; bridge = pci_host_bridge_from_priv(rc); if (!bridge) return -ENOMEM; pcie = &rc->pcie; pcie->is_rc = true; rc->vendor_id = 0xffff; of_property_read_u32(np, "vendor-id", &rc->vendor_id); rc->device_id = 0xffff; of_property_read_u32(np, "device-id", &rc->device_id); pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg"); if (IS_ERR(pcie->reg_base)) { dev_err(dev, "missing \"reg\"\n"); return PTR_ERR(pcie->reg_base); } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); rc->cfg_base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(rc->cfg_base)) return PTR_ERR(rc->cfg_base); rc->cfg_res = res; if (rc->quirk_detect_quiet_flag) cdns_pcie_detect_quiet_min_delay_set(&rc->pcie); cdns_pcie_host_enable_ptm_response(pcie); ret = cdns_pcie_start_link(pcie); if (ret) { dev_err(dev, "Failed to start link\n"); return ret; } ret = cdns_pcie_host_start_link(rc); if (ret) dev_dbg(dev, "PCIe link never came up\n"); for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) rc->avail_ib_bar[bar] = true; ret = cdns_pcie_host_init(dev, rc); if (ret) return ret; if (!bridge->ops) bridge->ops = &cdns_pcie_host_ops; ret = pci_host_probe(bridge); if (ret < 0) goto err_init; return 0; err_init: pm_runtime_put_sync(dev); return ret; }
linux-master
drivers/pci/controller/cadence/pcie-cadence-host.c
// SPDX-License-Identifier: GPL-2.0 /* * Cadence PCIe platform driver. * * Copyright (c) 2019, Cadence Design Systems * Author: Tom Joseph <[email protected]> */ #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_pci.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include "pcie-cadence.h" #define CDNS_PLAT_CPU_TO_BUS_ADDR 0x0FFFFFFF /** * struct cdns_plat_pcie - private data for this PCIe platform driver * @pcie: Cadence PCIe controller * @is_rc: Set to 1 indicates the PCIe controller mode is Root Complex, * if 0 it is in Endpoint mode. */ struct cdns_plat_pcie { struct cdns_pcie *pcie; bool is_rc; }; struct cdns_plat_pcie_of_data { bool is_rc; }; static const struct of_device_id cdns_plat_pcie_of_match[]; static u64 cdns_plat_cpu_addr_fixup(struct cdns_pcie *pcie, u64 cpu_addr) { return cpu_addr & CDNS_PLAT_CPU_TO_BUS_ADDR; } static const struct cdns_pcie_ops cdns_plat_ops = { .cpu_addr_fixup = cdns_plat_cpu_addr_fixup, }; static int cdns_plat_pcie_probe(struct platform_device *pdev) { const struct cdns_plat_pcie_of_data *data; struct cdns_plat_pcie *cdns_plat_pcie; struct device *dev = &pdev->dev; struct pci_host_bridge *bridge; struct cdns_pcie_ep *ep; struct cdns_pcie_rc *rc; int phy_count; bool is_rc; int ret; data = of_device_get_match_data(dev); if (!data) return -EINVAL; is_rc = data->is_rc; pr_debug(" Started %s with is_rc: %d\n", __func__, is_rc); cdns_plat_pcie = devm_kzalloc(dev, sizeof(*cdns_plat_pcie), GFP_KERNEL); if (!cdns_plat_pcie) return -ENOMEM; platform_set_drvdata(pdev, cdns_plat_pcie); if (is_rc) { if (!IS_ENABLED(CONFIG_PCIE_CADENCE_PLAT_HOST)) return -ENODEV; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); if (!bridge) return -ENOMEM; rc = pci_host_bridge_priv(bridge); rc->pcie.dev = dev; rc->pcie.ops = &cdns_plat_ops; cdns_plat_pcie->pcie = &rc->pcie; cdns_plat_pcie->is_rc = is_rc; ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie); if (ret) { dev_err(dev, "failed to init phy\n"); return ret; } pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "pm_runtime_get_sync() failed\n"); goto err_get_sync; } ret = cdns_pcie_host_setup(rc); if (ret) goto err_init; } else { if (!IS_ENABLED(CONFIG_PCIE_CADENCE_PLAT_EP)) return -ENODEV; ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); if (!ep) return -ENOMEM; ep->pcie.dev = dev; ep->pcie.ops = &cdns_plat_ops; cdns_plat_pcie->pcie = &ep->pcie; cdns_plat_pcie->is_rc = is_rc; ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie); if (ret) { dev_err(dev, "failed to init phy\n"); return ret; } pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "pm_runtime_get_sync() failed\n"); goto err_get_sync; } ret = cdns_pcie_ep_setup(ep); if (ret) goto err_init; } return 0; err_init: err_get_sync: pm_runtime_put_sync(dev); pm_runtime_disable(dev); cdns_pcie_disable_phy(cdns_plat_pcie->pcie); phy_count = cdns_plat_pcie->pcie->phy_count; while (phy_count--) device_link_del(cdns_plat_pcie->pcie->link[phy_count]); return 0; } static void cdns_plat_pcie_shutdown(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct cdns_pcie *pcie = dev_get_drvdata(dev); int ret; ret = pm_runtime_put_sync(dev); if (ret < 0) dev_dbg(dev, "pm_runtime_put_sync failed\n"); pm_runtime_disable(dev); cdns_pcie_disable_phy(pcie); } static const struct cdns_plat_pcie_of_data cdns_plat_pcie_host_of_data = { .is_rc = true, }; static const struct cdns_plat_pcie_of_data cdns_plat_pcie_ep_of_data = { .is_rc = false, }; static const struct of_device_id cdns_plat_pcie_of_match[] = { { .compatible = "cdns,cdns-pcie-host", .data = &cdns_plat_pcie_host_of_data, }, { .compatible = "cdns,cdns-pcie-ep", .data = &cdns_plat_pcie_ep_of_data, }, {}, }; static struct platform_driver cdns_plat_pcie_driver = { .driver = { .name = "cdns-pcie", .of_match_table = cdns_plat_pcie_of_match, .pm = &cdns_pcie_pm_ops, }, .probe = cdns_plat_pcie_probe, .shutdown = cdns_plat_pcie_shutdown, }; builtin_platform_driver(cdns_plat_pcie_driver);
linux-master
drivers/pci/controller/cadence/pcie-cadence-plat.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2017 Cadence // Cadence PCIe endpoint controller driver. // Author: Cyrille Pitchen <[email protected]> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/pci-epc.h> #include <linux/platform_device.h> #include <linux/sizes.h> #include "pcie-cadence.h" #define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */ #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3 static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn) { u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET; u32 first_vf_offset, stride; if (vfn == 0) return fn; first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET); stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE); fn = fn + first_vf_offset + ((vfn - 1) * stride); return fn; } static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn, struct pci_epf_header *hdr) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET; struct cdns_pcie *pcie = &ep->pcie; u32 reg; if (vfn > 1) { dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n"); return -EINVAL; } else if (vfn == 1) { reg = cap + PCI_SRIOV_VF_DID; cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid); return 0; } cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid); cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid); cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code); cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE, hdr->subclass_code | hdr->baseclass_code << 8); cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE, hdr->cache_line_size); cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id); cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin); /* * Vendor ID can only be modified from function 0, all other functions * use the same vendor ID as function 0. */ if (fn == 0) { /* Update the vendor IDs. */ u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) | CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id); cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); } return 0; } static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn, struct pci_epf_bar *epf_bar) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie_epf *epf = &ep->epf[fn]; struct cdns_pcie *pcie = &ep->pcie; dma_addr_t bar_phys = epf_bar->phys_addr; enum pci_barno bar = epf_bar->barno; int flags = epf_bar->flags; u32 addr0, addr1, reg, cfg, b, aperture, ctrl; u64 sz; /* BAR size is 2^(aperture + 7) */ sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE); /* * roundup_pow_of_two() returns an unsigned long, which is not suited * for 64bit values. */ sz = 1ULL << fls64(sz - 1); aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */ if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS; } else { bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH); bool is_64bits = sz > SZ_2G; if (is_64bits && (bar & 1)) return -EINVAL; if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; if (is_64bits && is_prefetch) ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS; else if (is_prefetch) ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS; else if (is_64bits) ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS; else ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS; } addr0 = lower_32_bits(bar_phys); addr1 = upper_32_bits(bar_phys); if (vfn == 1) reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn); else reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn); b = (bar < BAR_4) ? bar : bar - BAR_4; if (vfn == 0 || vfn == 1) { cfg = cdns_pcie_readl(pcie, reg); cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); cdns_pcie_writel(pcie, reg, cfg); } fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), addr0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), addr1); if (vfn > 0) epf = &epf->epf[vfn - 1]; epf->epf_bar[bar] = epf_bar; return 0; } static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn, struct pci_epf_bar *epf_bar) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie_epf *epf = &ep->epf[fn]; struct cdns_pcie *pcie = &ep->pcie; enum pci_barno bar = epf_bar->barno; u32 reg, cfg, b, ctrl; if (vfn == 1) reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn); else reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn); b = (bar < BAR_4) ? bar : bar - BAR_4; if (vfn == 0 || vfn == 1) { ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; cfg = cdns_pcie_readl(pcie, reg); cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl); cdns_pcie_writel(pcie, reg, cfg); } fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0); if (vfn > 0) epf = &epf->epf[vfn - 1]; epf->epf_bar[bar] = NULL; } static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn, phys_addr_t addr, u64 pci_addr, size_t size) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; u32 r; r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG); if (r >= ep->max_regions - 1) { dev_err(&epc->dev, "no free outbound region\n"); return -EINVAL; } fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size); set_bit(r, &ep->ob_region_map); ep->ob_addr[r] = addr; return 0; } static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn, phys_addr_t addr) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; u32 r; for (r = 0; r < ep->max_regions - 1; r++) if (ep->ob_addr[r] == addr) break; if (r == ep->max_regions - 1) return; cdns_pcie_reset_outbound_region(pcie, r); ep->ob_addr[r] = 0; clear_bit(r, &ep->ob_region_map); } static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; u16 flags; fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); /* * Set the Multiple Message Capable bitfield into the Message Control * register. */ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1); flags |= PCI_MSI_FLAGS_64BIT; flags &= ~PCI_MSI_FLAGS_MASKBIT; cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags); return 0; } static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; u16 flags, mme; fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); /* Validate that the MSI feature is actually enabled. */ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); if (!(flags & PCI_MSI_FLAGS_ENABLE)) return -EINVAL; /* * Get the Multiple Message Enable bitfield from the Message Control * register. */ mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; return mme; } static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; u32 val, reg; func_no = cdns_pcie_get_fn_from_vfn(pcie, func_no, vfunc_no); reg = cap + PCI_MSIX_FLAGS; val = cdns_pcie_ep_fn_readw(pcie, func_no, reg); if (!(val & PCI_MSIX_FLAGS_ENABLE)) return -EINVAL; val &= PCI_MSIX_FLAGS_QSIZE; return val; } static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn, u16 interrupts, enum pci_barno bir, u32 offset) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; u32 val, reg; fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); reg = cap + PCI_MSIX_FLAGS; val = cdns_pcie_ep_fn_readw(pcie, fn, reg); val &= ~PCI_MSIX_FLAGS_QSIZE; val |= interrupts; cdns_pcie_ep_fn_writew(pcie, fn, reg, val); /* Set MSIX BAR and offset */ reg = cap + PCI_MSIX_TABLE; val = offset | bir; cdns_pcie_ep_fn_writel(pcie, fn, reg, val); /* Set PBA BAR and offset. BAR must match MSIX BAR */ reg = cap + PCI_MSIX_PBA; val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; cdns_pcie_ep_fn_writel(pcie, fn, reg, val); return 0; } static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, bool is_asserted) { struct cdns_pcie *pcie = &ep->pcie; unsigned long flags; u32 offset; u16 status; u8 msg_code; intx &= 3; /* Set the outbound region if needed. */ if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY || ep->irq_pci_fn != fn)) { /* First region was reserved for IRQ writes. */ cdns_pcie_set_outbound_region_for_normal_msg(pcie, 0, fn, 0, ep->irq_phys_addr); ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY; ep->irq_pci_fn = fn; } if (is_asserted) { ep->irq_pending |= BIT(intx); msg_code = MSG_CODE_ASSERT_INTA + intx; } else { ep->irq_pending &= ~BIT(intx); msg_code = MSG_CODE_DEASSERT_INTA + intx; } spin_lock_irqsave(&ep->lock, flags); status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS); if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) { status ^= PCI_STATUS_INTERRUPT; cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status); } spin_unlock_irqrestore(&ep->lock, flags); offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | CDNS_PCIE_MSG_NO_DATA; writel(0, ep->irq_cpu_addr + offset); } static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, u8 intx) { u16 cmd; cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND); if (cmd & PCI_COMMAND_INTX_DISABLE) return -EINVAL; cdns_pcie_ep_assert_intx(ep, fn, intx, true); /* * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq() */ mdelay(1); cdns_pcie_ep_assert_intx(ep, fn, intx, false); return 0; } static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, u8 interrupt_num) { struct cdns_pcie *pcie = &ep->pcie; u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; u16 flags, mme, data, data_mask; u8 msi_count; u64 pci_addr, pci_addr_mask = 0xff; fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); /* Check whether the MSI feature has been enabled by the PCI host. */ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); if (!(flags & PCI_MSI_FLAGS_ENABLE)) return -EINVAL; /* Get the number of enabled MSIs */ mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; msi_count = 1 << mme; if (!interrupt_num || interrupt_num > msi_count) return -EINVAL; /* Compute the data value to be written. */ data_mask = msi_count - 1; data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); /* Get the PCI address where to write the data into. */ pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); pci_addr <<= 32; pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); pci_addr &= GENMASK_ULL(63, 2); /* Set the outbound region if needed. */ if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || ep->irq_pci_fn != fn)) { /* First region was reserved for IRQ writes. */ cdns_pcie_set_outbound_region(pcie, 0, fn, 0, false, ep->irq_phys_addr, pci_addr & ~pci_addr_mask, pci_addr_mask + 1); ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); ep->irq_pci_fn = fn; } writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); return 0; } static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn, phys_addr_t addr, u8 interrupt_num, u32 entry_size, u32 *msi_data, u32 *msi_addr_offset) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; struct cdns_pcie *pcie = &ep->pcie; u64 pci_addr, pci_addr_mask = 0xff; u16 flags, mme, data, data_mask; u8 msi_count; int ret; int i; fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); /* Check whether the MSI feature has been enabled by the PCI host. */ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); if (!(flags & PCI_MSI_FLAGS_ENABLE)) return -EINVAL; /* Get the number of enabled MSIs */ mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; msi_count = 1 << mme; if (!interrupt_num || interrupt_num > msi_count) return -EINVAL; /* Compute the data value to be written. */ data_mask = msi_count - 1; data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); data = data & ~data_mask; /* Get the PCI address where to write the data into. */ pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); pci_addr <<= 32; pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); pci_addr &= GENMASK_ULL(63, 2); for (i = 0; i < interrupt_num; i++) { ret = cdns_pcie_ep_map_addr(epc, fn, vfn, addr, pci_addr & ~pci_addr_mask, entry_size); if (ret) return ret; addr = addr + entry_size; } *msi_data = data; *msi_addr_offset = pci_addr & pci_addr_mask; return 0; } static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, u16 interrupt_num) { u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; u32 tbl_offset, msg_data, reg; struct cdns_pcie *pcie = &ep->pcie; struct pci_epf_msix_tbl *msix_tbl; struct cdns_pcie_epf *epf; u64 pci_addr_mask = 0xff; u64 msg_addr; u16 flags; u8 bir; epf = &ep->epf[fn]; if (vfn > 0) epf = &epf->epf[vfn - 1]; fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); /* Check whether the MSI-X feature has been enabled by the PCI host. */ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS); if (!(flags & PCI_MSIX_FLAGS_ENABLE)) return -EINVAL; reg = cap + PCI_MSIX_TABLE; tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg); bir = tbl_offset & PCI_MSIX_TABLE_BIR; tbl_offset &= PCI_MSIX_TABLE_OFFSET; msix_tbl = epf->epf_bar[bir]->addr + tbl_offset; msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; msg_data = msix_tbl[(interrupt_num - 1)].msg_data; /* Set the outbound region if needed. */ if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) || ep->irq_pci_fn != fn) { /* First region was reserved for IRQ writes. */ cdns_pcie_set_outbound_region(pcie, 0, fn, 0, false, ep->irq_phys_addr, msg_addr & ~pci_addr_mask, pci_addr_mask + 1); ep->irq_pci_addr = (msg_addr & ~pci_addr_mask); ep->irq_pci_fn = fn; } writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask)); return 0; } static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, enum pci_epc_irq_type type, u16 interrupt_num) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; struct device *dev = pcie->dev; switch (type) { case PCI_EPC_IRQ_LEGACY: if (vfn > 0) { dev_err(dev, "Cannot raise legacy interrupts for VF\n"); return -EINVAL; } return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0); case PCI_EPC_IRQ_MSI: return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num); case PCI_EPC_IRQ_MSIX: return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num); default: break; } return -EINVAL; } static int cdns_pcie_ep_start(struct pci_epc *epc) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; struct device *dev = pcie->dev; int max_epfs = sizeof(epc->function_num_map) * 8; int ret, value, epf; /* * BIT(0) is hardwired to 1, hence function 0 is always enabled * and can't be disabled anyway. */ cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map); if (ep->quirk_disable_flr) { for (epf = 0; epf < max_epfs; epf++) { if (!(epc->function_num_map & BIT(epf))) continue; value = cdns_pcie_ep_fn_readl(pcie, epf, CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET + PCI_EXP_DEVCAP); value &= ~PCI_EXP_DEVCAP_FLR; cdns_pcie_ep_fn_writel(pcie, epf, CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET + PCI_EXP_DEVCAP, value); } } ret = cdns_pcie_start_link(pcie); if (ret) { dev_err(dev, "Failed to start link\n"); return ret; } return 0; } static const struct pci_epc_features cdns_pcie_epc_vf_features = { .linkup_notifier = false, .msi_capable = true, .msix_capable = true, .align = 65536, }; static const struct pci_epc_features cdns_pcie_epc_features = { .linkup_notifier = false, .msi_capable = true, .msix_capable = true, .align = 256, }; static const struct pci_epc_features* cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) { if (!vfunc_no) return &cdns_pcie_epc_features; return &cdns_pcie_epc_vf_features; } static const struct pci_epc_ops cdns_pcie_epc_ops = { .write_header = cdns_pcie_ep_write_header, .set_bar = cdns_pcie_ep_set_bar, .clear_bar = cdns_pcie_ep_clear_bar, .map_addr = cdns_pcie_ep_map_addr, .unmap_addr = cdns_pcie_ep_unmap_addr, .set_msi = cdns_pcie_ep_set_msi, .get_msi = cdns_pcie_ep_get_msi, .set_msix = cdns_pcie_ep_set_msix, .get_msix = cdns_pcie_ep_get_msix, .raise_irq = cdns_pcie_ep_raise_irq, .map_msi_irq = cdns_pcie_ep_map_msi_irq, .start = cdns_pcie_ep_start, .get_features = cdns_pcie_ep_get_features, }; int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) { struct device *dev = ep->pcie.dev; struct platform_device *pdev = to_platform_device(dev); struct device_node *np = dev->of_node; struct cdns_pcie *pcie = &ep->pcie; struct cdns_pcie_epf *epf; struct resource *res; struct pci_epc *epc; int ret; int i; pcie->is_rc = false; pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg"); if (IS_ERR(pcie->reg_base)) { dev_err(dev, "missing \"reg\"\n"); return PTR_ERR(pcie->reg_base); } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); if (!res) { dev_err(dev, "missing \"mem\"\n"); return -EINVAL; } pcie->mem_res = res; ep->max_regions = CDNS_PCIE_MAX_OB; of_property_read_u32(np, "cdns,max-outbound-regions", &ep->max_regions); ep->ob_addr = devm_kcalloc(dev, ep->max_regions, sizeof(*ep->ob_addr), GFP_KERNEL); if (!ep->ob_addr) return -ENOMEM; /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */ cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0)); epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops); if (IS_ERR(epc)) { dev_err(dev, "failed to create epc device\n"); return PTR_ERR(epc); } epc_set_drvdata(epc, ep); if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0) epc->max_functions = 1; ep->epf = devm_kcalloc(dev, epc->max_functions, sizeof(*ep->epf), GFP_KERNEL); if (!ep->epf) return -ENOMEM; epc->max_vfs = devm_kcalloc(dev, epc->max_functions, sizeof(*epc->max_vfs), GFP_KERNEL); if (!epc->max_vfs) return -ENOMEM; ret = of_property_read_u8_array(np, "max-virtual-functions", epc->max_vfs, epc->max_functions); if (ret == 0) { for (i = 0; i < epc->max_functions; i++) { epf = &ep->epf[i]; if (epc->max_vfs[i] == 0) continue; epf->epf = devm_kcalloc(dev, epc->max_vfs[i], sizeof(*ep->epf), GFP_KERNEL); if (!epf->epf) return -ENOMEM; } } ret = pci_epc_mem_init(epc, pcie->mem_res->start, resource_size(pcie->mem_res), PAGE_SIZE); if (ret < 0) { dev_err(dev, "failed to initialize the memory space\n"); return ret; } ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, SZ_128K); if (!ep->irq_cpu_addr) { dev_err(dev, "failed to reserve memory space for MSI\n"); ret = -ENOMEM; goto free_epc_mem; } ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; /* Reserve region 0 for IRQs */ set_bit(0, &ep->ob_region_map); if (ep->quirk_detect_quiet_flag) cdns_pcie_detect_quiet_min_delay_set(&ep->pcie); spin_lock_init(&ep->lock); return 0; free_epc_mem: pci_epc_mem_exit(epc); return ret; }
linux-master
drivers/pci/controller/cadence/pcie-cadence-ep.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2017 Cadence // Cadence PCIe controller driver. // Author: Cyrille Pitchen <[email protected]> #include <linux/kernel.h> #include <linux/of.h> #include "pcie-cadence.h" void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie) { u32 delay = 0x3; u32 ltssm_control_cap; /* * Set the LTSSM Detect Quiet state min. delay to 2ms. */ ltssm_control_cap = cdns_pcie_readl(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP); ltssm_control_cap = ((ltssm_control_cap & ~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) | CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay)); cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap); } void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn, u32 r, bool is_io, u64 cpu_addr, u64 pci_addr, size_t size) { /* * roundup_pow_of_two() returns an unsigned long, which is not suited * for 64bit values. */ u64 sz = 1ULL << fls64(size - 1); int nbits = ilog2(sz); u32 addr0, addr1, desc0, desc1; if (nbits < 8) nbits = 8; /* Set the PCI address */ addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) | (lower_32_bits(pci_addr) & GENMASK(31, 8)); addr1 = upper_32_bits(pci_addr); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1); /* Set the PCIe header descriptor */ if (is_io) desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO; else desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM; desc1 = 0; /* * Whatever Bit [23] is set or not inside DESC0 register of the outbound * PCIe descriptor, the PCI function number must be set into * Bits [26:24] of DESC0 anyway. * * In Root Complex mode, the function number is always 0 but in Endpoint * mode, the PCIe controller may support more than one function. This * function number needs to be set properly into the outbound PCIe * descriptor. * * Besides, setting Bit [23] is mandatory when in Root Complex mode: * then the driver must provide the bus, resp. device, number in * Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function * number, the device number is always 0 in Root Complex mode. * * However when in Endpoint mode, we can clear Bit [23] of DESC0, hence * the PCIe controller will use the captured values for the bus and * device numbers. */ if (pcie->is_rc) { /* The device and function numbers are always 0. */ desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr); } else { /* * Use captured values for bus and device numbers but still * need to set the function number. */ desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn); } cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1); /* Set the CPU address */ if (pcie->ops->cpu_addr_fixup) cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) | (lower_32_bits(cpu_addr) & GENMASK(31, 8)); addr1 = upper_32_bits(cpu_addr); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1); } void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 busnr, u8 fn, u32 r, u64 cpu_addr) { u32 addr0, addr1, desc0, desc1; desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG; desc1 = 0; /* See cdns_pcie_set_outbound_region() comments above. */ if (pcie->is_rc) { desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr); } else { desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn); } /* Set the CPU address */ if (pcie->ops->cpu_addr_fixup) cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) | (lower_32_bits(cpu_addr) & GENMASK(31, 8)); addr1 = upper_32_bits(cpu_addr); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1); } void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r) { cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0); } void cdns_pcie_disable_phy(struct cdns_pcie *pcie) { int i = pcie->phy_count; while (i--) { phy_power_off(pcie->phy[i]); phy_exit(pcie->phy[i]); } } int cdns_pcie_enable_phy(struct cdns_pcie *pcie) { int ret; int i; for (i = 0; i < pcie->phy_count; i++) { ret = phy_init(pcie->phy[i]); if (ret < 0) goto err_phy; ret = phy_power_on(pcie->phy[i]); if (ret < 0) { phy_exit(pcie->phy[i]); goto err_phy; } } return 0; err_phy: while (--i >= 0) { phy_power_off(pcie->phy[i]); phy_exit(pcie->phy[i]); } return ret; } int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie) { struct device_node *np = dev->of_node; int phy_count; struct phy **phy; struct device_link **link; int i; int ret; const char *name; phy_count = of_property_count_strings(np, "phy-names"); if (phy_count < 1) { dev_err(dev, "no phy-names. PHY will not be initialized\n"); pcie->phy_count = 0; return 0; } phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL); if (!phy) return -ENOMEM; link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL); if (!link) return -ENOMEM; for (i = 0; i < phy_count; i++) { of_property_read_string_index(np, "phy-names", i, &name); phy[i] = devm_phy_get(dev, name); if (IS_ERR(phy[i])) { ret = PTR_ERR(phy[i]); goto err_phy; } link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); if (!link[i]) { devm_phy_put(dev, phy[i]); ret = -EINVAL; goto err_phy; } } pcie->phy_count = phy_count; pcie->phy = phy; pcie->link = link; ret = cdns_pcie_enable_phy(pcie); if (ret) goto err_phy; return 0; err_phy: while (--i >= 0) { device_link_del(link[i]); devm_phy_put(dev, phy[i]); } return ret; } static int cdns_pcie_suspend_noirq(struct device *dev) { struct cdns_pcie *pcie = dev_get_drvdata(dev); cdns_pcie_disable_phy(pcie); return 0; } static int cdns_pcie_resume_noirq(struct device *dev) { struct cdns_pcie *pcie = dev_get_drvdata(dev); int ret; ret = cdns_pcie_enable_phy(pcie); if (ret) { dev_err(dev, "failed to enable phy\n"); return ret; } return 0; } const struct dev_pm_ops cdns_pcie_pm_ops = { NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq, cdns_pcie_resume_noirq) };
linux-master
drivers/pci/controller/cadence/pcie-cadence.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI Endpoint *Function* (EPF) library * * Copyright (C) 2017 Texas Instruments * Author: Kishon Vijay Abraham I <[email protected]> */ #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/pci-epc.h> #include <linux/pci-epf.h> #include <linux/pci-ep-cfs.h> static DEFINE_MUTEX(pci_epf_mutex); static struct bus_type pci_epf_bus_type; static const struct device_type pci_epf_type; /** * pci_epf_unbind() - Notify the function driver that the binding between the * EPF device and EPC device has been lost * @epf: the EPF device which has lost the binding with the EPC device * * Invoke to notify the function driver that the binding between the EPF device * and EPC device has been lost. */ void pci_epf_unbind(struct pci_epf *epf) { struct pci_epf *epf_vf; if (!epf->driver) { dev_WARN(&epf->dev, "epf device not bound to driver\n"); return; } mutex_lock(&epf->lock); list_for_each_entry(epf_vf, &epf->pci_vepf, list) { if (epf_vf->is_bound) epf_vf->driver->ops->unbind(epf_vf); } if (epf->is_bound) epf->driver->ops->unbind(epf); mutex_unlock(&epf->lock); module_put(epf->driver->owner); } EXPORT_SYMBOL_GPL(pci_epf_unbind); /** * pci_epf_bind() - Notify the function driver that the EPF device has been * bound to a EPC device * @epf: the EPF device which has been bound to the EPC device * * Invoke to notify the function driver that it has been bound to a EPC device */ int pci_epf_bind(struct pci_epf *epf) { struct device *dev = &epf->dev; struct pci_epf *epf_vf; u8 func_no, vfunc_no; struct pci_epc *epc; int ret; if (!epf->driver) { dev_WARN(dev, "epf device not bound to driver\n"); return -EINVAL; } if (!try_module_get(epf->driver->owner)) return -EAGAIN; mutex_lock(&epf->lock); list_for_each_entry(epf_vf, &epf->pci_vepf, list) { vfunc_no = epf_vf->vfunc_no; if (vfunc_no < 1) { dev_err(dev, "Invalid virtual function number\n"); ret = -EINVAL; goto ret; } epc = epf->epc; func_no = epf->func_no; if (!IS_ERR_OR_NULL(epc)) { if (!epc->max_vfs) { dev_err(dev, "No support for virt function\n"); ret = -EINVAL; goto ret; } if (vfunc_no > epc->max_vfs[func_no]) { dev_err(dev, "PF%d: Exceeds max vfunc number\n", func_no); ret = -EINVAL; goto ret; } } epc = epf->sec_epc; func_no = epf->sec_epc_func_no; if (!IS_ERR_OR_NULL(epc)) { if (!epc->max_vfs) { dev_err(dev, "No support for virt function\n"); ret = -EINVAL; goto ret; } if (vfunc_no > epc->max_vfs[func_no]) { dev_err(dev, "PF%d: Exceeds max vfunc number\n", func_no); ret = -EINVAL; goto ret; } } epf_vf->func_no = epf->func_no; epf_vf->sec_epc_func_no = epf->sec_epc_func_no; epf_vf->epc = epf->epc; epf_vf->sec_epc = epf->sec_epc; ret = epf_vf->driver->ops->bind(epf_vf); if (ret) goto ret; epf_vf->is_bound = true; } ret = epf->driver->ops->bind(epf); if (ret) goto ret; epf->is_bound = true; mutex_unlock(&epf->lock); return 0; ret: mutex_unlock(&epf->lock); pci_epf_unbind(epf); return ret; } EXPORT_SYMBOL_GPL(pci_epf_bind); /** * pci_epf_add_vepf() - associate virtual EP function to physical EP function * @epf_pf: the physical EP function to which the virtual EP function should be * associated * @epf_vf: the virtual EP function to be added * * A physical endpoint function can be associated with multiple virtual * endpoint functions. Invoke pci_epf_add_epf() to add a virtual PCI endpoint * function to a physical PCI endpoint function. */ int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf) { u32 vfunc_no; if (IS_ERR_OR_NULL(epf_pf) || IS_ERR_OR_NULL(epf_vf)) return -EINVAL; if (epf_pf->epc || epf_vf->epc || epf_vf->epf_pf) return -EBUSY; if (epf_pf->sec_epc || epf_vf->sec_epc) return -EBUSY; mutex_lock(&epf_pf->lock); vfunc_no = find_first_zero_bit(&epf_pf->vfunction_num_map, BITS_PER_LONG); if (vfunc_no >= BITS_PER_LONG) { mutex_unlock(&epf_pf->lock); return -EINVAL; } set_bit(vfunc_no, &epf_pf->vfunction_num_map); epf_vf->vfunc_no = vfunc_no; epf_vf->epf_pf = epf_pf; epf_vf->is_vf = true; list_add_tail(&epf_vf->list, &epf_pf->pci_vepf); mutex_unlock(&epf_pf->lock); return 0; } EXPORT_SYMBOL_GPL(pci_epf_add_vepf); /** * pci_epf_remove_vepf() - remove virtual EP function from physical EP function * @epf_pf: the physical EP function from which the virtual EP function should * be removed * @epf_vf: the virtual EP function to be removed * * Invoke to remove a virtual endpoint function from the physical endpoint * function. */ void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf) { if (IS_ERR_OR_NULL(epf_pf) || IS_ERR_OR_NULL(epf_vf)) return; mutex_lock(&epf_pf->lock); clear_bit(epf_vf->vfunc_no, &epf_pf->vfunction_num_map); list_del(&epf_vf->list); mutex_unlock(&epf_pf->lock); } EXPORT_SYMBOL_GPL(pci_epf_remove_vepf); /** * pci_epf_free_space() - free the allocated PCI EPF register space * @epf: the EPF device from whom to free the memory * @addr: the virtual address of the PCI EPF register space * @bar: the BAR number corresponding to the register space * @type: Identifies if the allocated space is for primary EPC or secondary EPC * * Invoke to free the allocated PCI EPF register space. */ void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar, enum pci_epc_interface_type type) { struct device *dev; struct pci_epf_bar *epf_bar; struct pci_epc *epc; if (!addr) return; if (type == PRIMARY_INTERFACE) { epc = epf->epc; epf_bar = epf->bar; } else { epc = epf->sec_epc; epf_bar = epf->sec_epc_bar; } dev = epc->dev.parent; dma_free_coherent(dev, epf_bar[bar].size, addr, epf_bar[bar].phys_addr); epf_bar[bar].phys_addr = 0; epf_bar[bar].addr = NULL; epf_bar[bar].size = 0; epf_bar[bar].barno = 0; epf_bar[bar].flags = 0; } EXPORT_SYMBOL_GPL(pci_epf_free_space); /** * pci_epf_alloc_space() - allocate memory for the PCI EPF register space * @epf: the EPF device to whom allocate the memory * @size: the size of the memory that has to be allocated * @bar: the BAR number corresponding to the allocated register space * @align: alignment size for the allocation region * @type: Identifies if the allocation is for primary EPC or secondary EPC * * Invoke to allocate memory for the PCI EPF register space. */ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, size_t align, enum pci_epc_interface_type type) { struct pci_epf_bar *epf_bar; dma_addr_t phys_addr; struct pci_epc *epc; struct device *dev; void *space; if (size < 128) size = 128; if (align) size = ALIGN(size, align); else size = roundup_pow_of_two(size); if (type == PRIMARY_INTERFACE) { epc = epf->epc; epf_bar = epf->bar; } else { epc = epf->sec_epc; epf_bar = epf->sec_epc_bar; } dev = epc->dev.parent; space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL); if (!space) { dev_err(dev, "failed to allocate mem space\n"); return NULL; } epf_bar[bar].phys_addr = phys_addr; epf_bar[bar].addr = space; epf_bar[bar].size = size; epf_bar[bar].barno = bar; epf_bar[bar].flags |= upper_32_bits(size) ? PCI_BASE_ADDRESS_MEM_TYPE_64 : PCI_BASE_ADDRESS_MEM_TYPE_32; return space; } EXPORT_SYMBOL_GPL(pci_epf_alloc_space); static void pci_epf_remove_cfs(struct pci_epf_driver *driver) { struct config_group *group, *tmp; if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS)) return; mutex_lock(&pci_epf_mutex); list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry) pci_ep_cfs_remove_epf_group(group); list_del(&driver->epf_group); mutex_unlock(&pci_epf_mutex); } /** * pci_epf_unregister_driver() - unregister the PCI EPF driver * @driver: the PCI EPF driver that has to be unregistered * * Invoke to unregister the PCI EPF driver. */ void pci_epf_unregister_driver(struct pci_epf_driver *driver) { pci_epf_remove_cfs(driver); driver_unregister(&driver->driver); } EXPORT_SYMBOL_GPL(pci_epf_unregister_driver); static int pci_epf_add_cfs(struct pci_epf_driver *driver) { struct config_group *group; const struct pci_epf_device_id *id; if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS)) return 0; INIT_LIST_HEAD(&driver->epf_group); id = driver->id_table; while (id->name[0]) { group = pci_ep_cfs_add_epf_group(id->name); if (IS_ERR(group)) { pci_epf_remove_cfs(driver); return PTR_ERR(group); } mutex_lock(&pci_epf_mutex); list_add_tail(&group->group_entry, &driver->epf_group); mutex_unlock(&pci_epf_mutex); id++; } return 0; } /** * __pci_epf_register_driver() - register a new PCI EPF driver * @driver: structure representing PCI EPF driver * @owner: the owner of the module that registers the PCI EPF driver * * Invoke to register a new PCI EPF driver. */ int __pci_epf_register_driver(struct pci_epf_driver *driver, struct module *owner) { int ret; if (!driver->ops) return -EINVAL; if (!driver->ops->bind || !driver->ops->unbind) return -EINVAL; driver->driver.bus = &pci_epf_bus_type; driver->driver.owner = owner; ret = driver_register(&driver->driver); if (ret) return ret; pci_epf_add_cfs(driver); return 0; } EXPORT_SYMBOL_GPL(__pci_epf_register_driver); /** * pci_epf_destroy() - destroy the created PCI EPF device * @epf: the PCI EPF device that has to be destroyed. * * Invoke to destroy the PCI EPF device created by invoking pci_epf_create(). */ void pci_epf_destroy(struct pci_epf *epf) { device_unregister(&epf->dev); } EXPORT_SYMBOL_GPL(pci_epf_destroy); /** * pci_epf_create() - create a new PCI EPF device * @name: the name of the PCI EPF device. This name will be used to bind the * EPF device to a EPF driver * * Invoke to create a new PCI EPF device by providing the name of the function * device. */ struct pci_epf *pci_epf_create(const char *name) { int ret; struct pci_epf *epf; struct device *dev; int len; epf = kzalloc(sizeof(*epf), GFP_KERNEL); if (!epf) return ERR_PTR(-ENOMEM); len = strchrnul(name, '.') - name; epf->name = kstrndup(name, len, GFP_KERNEL); if (!epf->name) { kfree(epf); return ERR_PTR(-ENOMEM); } /* VFs are numbered starting with 1. So set BIT(0) by default */ epf->vfunction_num_map = 1; INIT_LIST_HEAD(&epf->pci_vepf); dev = &epf->dev; device_initialize(dev); dev->bus = &pci_epf_bus_type; dev->type = &pci_epf_type; mutex_init(&epf->lock); ret = dev_set_name(dev, "%s", name); if (ret) { put_device(dev); return ERR_PTR(ret); } ret = device_add(dev); if (ret) { put_device(dev); return ERR_PTR(ret); } return epf; } EXPORT_SYMBOL_GPL(pci_epf_create); static void pci_epf_dev_release(struct device *dev) { struct pci_epf *epf = to_pci_epf(dev); kfree(epf->name); kfree(epf); } static const struct device_type pci_epf_type = { .release = pci_epf_dev_release, }; static const struct pci_epf_device_id * pci_epf_match_id(const struct pci_epf_device_id *id, const struct pci_epf *epf) { while (id->name[0]) { if (strcmp(epf->name, id->name) == 0) return id; id++; } return NULL; } static int pci_epf_device_match(struct device *dev, struct device_driver *drv) { struct pci_epf *epf = to_pci_epf(dev); struct pci_epf_driver *driver = to_pci_epf_driver(drv); if (driver->id_table) return !!pci_epf_match_id(driver->id_table, epf); return !strcmp(epf->name, drv->name); } static int pci_epf_device_probe(struct device *dev) { struct pci_epf *epf = to_pci_epf(dev); struct pci_epf_driver *driver = to_pci_epf_driver(dev->driver); if (!driver->probe) return -ENODEV; epf->driver = driver; return driver->probe(epf, pci_epf_match_id(driver->id_table, epf)); } static void pci_epf_device_remove(struct device *dev) { struct pci_epf *epf = to_pci_epf(dev); struct pci_epf_driver *driver = to_pci_epf_driver(dev->driver); if (driver->remove) driver->remove(epf); epf->driver = NULL; } static struct bus_type pci_epf_bus_type = { .name = "pci-epf", .match = pci_epf_device_match, .probe = pci_epf_device_probe, .remove = pci_epf_device_remove, }; static int __init pci_epf_init(void) { int ret; ret = bus_register(&pci_epf_bus_type); if (ret) { pr_err("failed to register pci epf bus --> %d\n", ret); return ret; } return 0; } module_init(pci_epf_init); static void __exit pci_epf_exit(void) { bus_unregister(&pci_epf_bus_type); } module_exit(pci_epf_exit); MODULE_DESCRIPTION("PCI EPF Library"); MODULE_AUTHOR("Kishon Vijay Abraham I <[email protected]>");
linux-master
drivers/pci/endpoint/pci-epf-core.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI Endpoint *Controller* Address Space Management * * Copyright (C) 2017 Texas Instruments * Author: Kishon Vijay Abraham I <[email protected]> */ #include <linux/io.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/pci-epc.h> /** * pci_epc_mem_get_order() - determine the allocation order of a memory size * @mem: address space of the endpoint controller * @size: the size for which to get the order * * Reimplement get_order() for mem->page_size since the generic get_order * always gets order with a constant PAGE_SIZE. */ static int pci_epc_mem_get_order(struct pci_epc_mem *mem, size_t size) { int order; unsigned int page_shift = ilog2(mem->window.page_size); size--; size >>= page_shift; #if BITS_PER_LONG == 32 order = fls(size); #else order = fls64(size); #endif return order; } /** * pci_epc_multi_mem_init() - initialize the pci_epc_mem structure * @epc: the EPC device that invoked pci_epc_mem_init * @windows: pointer to windows supported by the device * @num_windows: number of windows device supports * * Invoke to initialize the pci_epc_mem structure used by the * endpoint functions to allocate mapped PCI address. */ int pci_epc_multi_mem_init(struct pci_epc *epc, struct pci_epc_mem_window *windows, unsigned int num_windows) { struct pci_epc_mem *mem = NULL; unsigned long *bitmap = NULL; unsigned int page_shift; size_t page_size; int bitmap_size; int pages; int ret; int i; epc->num_windows = 0; if (!windows || !num_windows) return -EINVAL; epc->windows = kcalloc(num_windows, sizeof(*epc->windows), GFP_KERNEL); if (!epc->windows) return -ENOMEM; for (i = 0; i < num_windows; i++) { page_size = windows[i].page_size; if (page_size < PAGE_SIZE) page_size = PAGE_SIZE; page_shift = ilog2(page_size); pages = windows[i].size >> page_shift; bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) { ret = -ENOMEM; i--; goto err_mem; } bitmap = kzalloc(bitmap_size, GFP_KERNEL); if (!bitmap) { ret = -ENOMEM; kfree(mem); i--; goto err_mem; } mem->window.phys_base = windows[i].phys_base; mem->window.size = windows[i].size; mem->window.page_size = page_size; mem->bitmap = bitmap; mem->pages = pages; mutex_init(&mem->lock); epc->windows[i] = mem; } epc->mem = epc->windows[0]; epc->num_windows = num_windows; return 0; err_mem: for (; i >= 0; i--) { mem = epc->windows[i]; kfree(mem->bitmap); kfree(mem); } kfree(epc->windows); return ret; } EXPORT_SYMBOL_GPL(pci_epc_multi_mem_init); /** * pci_epc_mem_init() - Initialize the pci_epc_mem structure * @epc: the EPC device that invoked pci_epc_mem_init * @base: Physical address of the window region * @size: Total Size of the window region * @page_size: Page size of the window region * * Invoke to initialize a single pci_epc_mem structure used by the * endpoint functions to allocate memory for mapping the PCI host memory */ int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t base, size_t size, size_t page_size) { struct pci_epc_mem_window mem_window; mem_window.phys_base = base; mem_window.size = size; mem_window.page_size = page_size; return pci_epc_multi_mem_init(epc, &mem_window, 1); } EXPORT_SYMBOL_GPL(pci_epc_mem_init); /** * pci_epc_mem_exit() - cleanup the pci_epc_mem structure * @epc: the EPC device that invoked pci_epc_mem_exit * * Invoke to cleanup the pci_epc_mem structure allocated in * pci_epc_mem_init(). */ void pci_epc_mem_exit(struct pci_epc *epc) { struct pci_epc_mem *mem; int i; if (!epc->num_windows) return; for (i = 0; i < epc->num_windows; i++) { mem = epc->windows[i]; kfree(mem->bitmap); kfree(mem); } kfree(epc->windows); epc->windows = NULL; epc->mem = NULL; epc->num_windows = 0; } EXPORT_SYMBOL_GPL(pci_epc_mem_exit); /** * pci_epc_mem_alloc_addr() - allocate memory address from EPC addr space * @epc: the EPC device on which memory has to be allocated * @phys_addr: populate the allocated physical address here * @size: the size of the address space that has to be allocated * * Invoke to allocate memory address from the EPC address space. This * is usually done to map the remote RC address into the local system. */ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, phys_addr_t *phys_addr, size_t size) { void __iomem *virt_addr = NULL; struct pci_epc_mem *mem; unsigned int page_shift; size_t align_size; int pageno; int order; int i; for (i = 0; i < epc->num_windows; i++) { mem = epc->windows[i]; mutex_lock(&mem->lock); align_size = ALIGN(size, mem->window.page_size); order = pci_epc_mem_get_order(mem, align_size); pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order); if (pageno >= 0) { page_shift = ilog2(mem->window.page_size); *phys_addr = mem->window.phys_base + ((phys_addr_t)pageno << page_shift); virt_addr = ioremap(*phys_addr, align_size); if (!virt_addr) { bitmap_release_region(mem->bitmap, pageno, order); mutex_unlock(&mem->lock); continue; } mutex_unlock(&mem->lock); return virt_addr; } mutex_unlock(&mem->lock); } return virt_addr; } EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr); static struct pci_epc_mem *pci_epc_get_matching_window(struct pci_epc *epc, phys_addr_t phys_addr) { struct pci_epc_mem *mem; int i; for (i = 0; i < epc->num_windows; i++) { mem = epc->windows[i]; if (phys_addr >= mem->window.phys_base && phys_addr < (mem->window.phys_base + mem->window.size)) return mem; } return NULL; } /** * pci_epc_mem_free_addr() - free the allocated memory address * @epc: the EPC device on which memory was allocated * @phys_addr: the allocated physical address * @virt_addr: virtual address of the allocated mem space * @size: the size of the allocated address space * * Invoke to free the memory allocated using pci_epc_mem_alloc_addr. */ void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr, void __iomem *virt_addr, size_t size) { struct pci_epc_mem *mem; unsigned int page_shift; size_t page_size; int pageno; int order; mem = pci_epc_get_matching_window(epc, phys_addr); if (!mem) { pr_err("failed to get matching window\n"); return; } page_size = mem->window.page_size; page_shift = ilog2(page_size); iounmap(virt_addr); pageno = (phys_addr - mem->window.phys_base) >> page_shift; size = ALIGN(size, page_size); order = pci_epc_mem_get_order(mem, size); mutex_lock(&mem->lock); bitmap_release_region(mem->bitmap, pageno, order); mutex_unlock(&mem->lock); } EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr); MODULE_DESCRIPTION("PCI EPC Address Space Management"); MODULE_AUTHOR("Kishon Vijay Abraham I <[email protected]>");
linux-master
drivers/pci/endpoint/pci-epc-mem.c
// SPDX-License-Identifier: GPL-2.0 /* * configfs to configure the PCI endpoint * * Copyright (C) 2017 Texas Instruments * Author: Kishon Vijay Abraham I <[email protected]> */ #include <linux/module.h> #include <linux/idr.h> #include <linux/slab.h> #include <linux/pci-epc.h> #include <linux/pci-epf.h> #include <linux/pci-ep-cfs.h> static DEFINE_IDR(functions_idr); static DEFINE_MUTEX(functions_mutex); static struct config_group *functions_group; static struct config_group *controllers_group; struct pci_epf_group { struct config_group group; struct config_group primary_epc_group; struct config_group secondary_epc_group; struct config_group *type_group; struct delayed_work cfs_work; struct pci_epf *epf; int index; }; struct pci_epc_group { struct config_group group; struct pci_epc *epc; bool start; }; static inline struct pci_epf_group *to_pci_epf_group(struct config_item *item) { return container_of(to_config_group(item), struct pci_epf_group, group); } static inline struct pci_epc_group *to_pci_epc_group(struct config_item *item) { return container_of(to_config_group(item), struct pci_epc_group, group); } static int pci_secondary_epc_epf_link(struct config_item *epf_item, struct config_item *epc_item) { int ret; struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent); struct pci_epc_group *epc_group = to_pci_epc_group(epc_item); struct pci_epc *epc = epc_group->epc; struct pci_epf *epf = epf_group->epf; ret = pci_epc_add_epf(epc, epf, SECONDARY_INTERFACE); if (ret) return ret; ret = pci_epf_bind(epf); if (ret) { pci_epc_remove_epf(epc, epf, SECONDARY_INTERFACE); return ret; } return 0; } static void pci_secondary_epc_epf_unlink(struct config_item *epc_item, struct config_item *epf_item) { struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent); struct pci_epc_group *epc_group = to_pci_epc_group(epc_item); struct pci_epc *epc; struct pci_epf *epf; WARN_ON_ONCE(epc_group->start); epc = epc_group->epc; epf = epf_group->epf; pci_epf_unbind(epf); pci_epc_remove_epf(epc, epf, SECONDARY_INTERFACE); } static struct configfs_item_operations pci_secondary_epc_item_ops = { .allow_link = pci_secondary_epc_epf_link, .drop_link = pci_secondary_epc_epf_unlink, }; static const struct config_item_type pci_secondary_epc_type = { .ct_item_ops = &pci_secondary_epc_item_ops, .ct_owner = THIS_MODULE, }; static struct config_group *pci_ep_cfs_add_secondary_group(struct pci_epf_group *epf_group) { struct config_group *secondary_epc_group; secondary_epc_group = &epf_group->secondary_epc_group; config_group_init_type_name(secondary_epc_group, "secondary", &pci_secondary_epc_type); configfs_register_group(&epf_group->group, secondary_epc_group); return secondary_epc_group; } static int pci_primary_epc_epf_link(struct config_item *epf_item, struct config_item *epc_item) { int ret; struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent); struct pci_epc_group *epc_group = to_pci_epc_group(epc_item); struct pci_epc *epc = epc_group->epc; struct pci_epf *epf = epf_group->epf; ret = pci_epc_add_epf(epc, epf, PRIMARY_INTERFACE); if (ret) return ret; ret = pci_epf_bind(epf); if (ret) { pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE); return ret; } return 0; } static void pci_primary_epc_epf_unlink(struct config_item *epc_item, struct config_item *epf_item) { struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent); struct pci_epc_group *epc_group = to_pci_epc_group(epc_item); struct pci_epc *epc; struct pci_epf *epf; WARN_ON_ONCE(epc_group->start); epc = epc_group->epc; epf = epf_group->epf; pci_epf_unbind(epf); pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE); } static struct configfs_item_operations pci_primary_epc_item_ops = { .allow_link = pci_primary_epc_epf_link, .drop_link = pci_primary_epc_epf_unlink, }; static const struct config_item_type pci_primary_epc_type = { .ct_item_ops = &pci_primary_epc_item_ops, .ct_owner = THIS_MODULE, }; static struct config_group *pci_ep_cfs_add_primary_group(struct pci_epf_group *epf_group) { struct config_group *primary_epc_group = &epf_group->primary_epc_group; config_group_init_type_name(primary_epc_group, "primary", &pci_primary_epc_type); configfs_register_group(&epf_group->group, primary_epc_group); return primary_epc_group; } static ssize_t pci_epc_start_store(struct config_item *item, const char *page, size_t len) { int ret; bool start; struct pci_epc *epc; struct pci_epc_group *epc_group = to_pci_epc_group(item); epc = epc_group->epc; if (kstrtobool(page, &start) < 0) return -EINVAL; if (start == epc_group->start) return -EALREADY; if (!start) { pci_epc_stop(epc); epc_group->start = 0; return len; } ret = pci_epc_start(epc); if (ret) { dev_err(&epc->dev, "failed to start endpoint controller\n"); return -EINVAL; } epc_group->start = start; return len; } static ssize_t pci_epc_start_show(struct config_item *item, char *page) { return sysfs_emit(page, "%d\n", to_pci_epc_group(item)->start); } CONFIGFS_ATTR(pci_epc_, start); static struct configfs_attribute *pci_epc_attrs[] = { &pci_epc_attr_start, NULL, }; static int pci_epc_epf_link(struct config_item *epc_item, struct config_item *epf_item) { int ret; struct pci_epf_group *epf_group = to_pci_epf_group(epf_item); struct pci_epc_group *epc_group = to_pci_epc_group(epc_item); struct pci_epc *epc = epc_group->epc; struct pci_epf *epf = epf_group->epf; ret = pci_epc_add_epf(epc, epf, PRIMARY_INTERFACE); if (ret) return ret; ret = pci_epf_bind(epf); if (ret) { pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE); return ret; } return 0; } static void pci_epc_epf_unlink(struct config_item *epc_item, struct config_item *epf_item) { struct pci_epc *epc; struct pci_epf *epf; struct pci_epf_group *epf_group = to_pci_epf_group(epf_item); struct pci_epc_group *epc_group = to_pci_epc_group(epc_item); WARN_ON_ONCE(epc_group->start); epc = epc_group->epc; epf = epf_group->epf; pci_epf_unbind(epf); pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE); } static struct configfs_item_operations pci_epc_item_ops = { .allow_link = pci_epc_epf_link, .drop_link = pci_epc_epf_unlink, }; static const struct config_item_type pci_epc_type = { .ct_item_ops = &pci_epc_item_ops, .ct_attrs = pci_epc_attrs, .ct_owner = THIS_MODULE, }; struct config_group *pci_ep_cfs_add_epc_group(const char *name) { int ret; struct pci_epc *epc; struct config_group *group; struct pci_epc_group *epc_group; epc_group = kzalloc(sizeof(*epc_group), GFP_KERNEL); if (!epc_group) { ret = -ENOMEM; goto err; } group = &epc_group->group; config_group_init_type_name(group, name, &pci_epc_type); ret = configfs_register_group(controllers_group, group); if (ret) { pr_err("failed to register configfs group for %s\n", name); goto err_register_group; } epc = pci_epc_get(name); if (IS_ERR(epc)) { ret = PTR_ERR(epc); goto err_epc_get; } epc_group->epc = epc; return group; err_epc_get: configfs_unregister_group(group); err_register_group: kfree(epc_group); err: return ERR_PTR(ret); } EXPORT_SYMBOL(pci_ep_cfs_add_epc_group); void pci_ep_cfs_remove_epc_group(struct config_group *group) { struct pci_epc_group *epc_group; if (!group) return; epc_group = container_of(group, struct pci_epc_group, group); pci_epc_put(epc_group->epc); configfs_unregister_group(&epc_group->group); kfree(epc_group); } EXPORT_SYMBOL(pci_ep_cfs_remove_epc_group); #define PCI_EPF_HEADER_R(_name) \ static ssize_t pci_epf_##_name##_show(struct config_item *item, char *page) \ { \ struct pci_epf *epf = to_pci_epf_group(item)->epf; \ if (WARN_ON_ONCE(!epf->header)) \ return -EINVAL; \ return sysfs_emit(page, "0x%04x\n", epf->header->_name); \ } #define PCI_EPF_HEADER_W_u32(_name) \ static ssize_t pci_epf_##_name##_store(struct config_item *item, \ const char *page, size_t len) \ { \ u32 val; \ struct pci_epf *epf = to_pci_epf_group(item)->epf; \ if (WARN_ON_ONCE(!epf->header)) \ return -EINVAL; \ if (kstrtou32(page, 0, &val) < 0) \ return -EINVAL; \ epf->header->_name = val; \ return len; \ } #define PCI_EPF_HEADER_W_u16(_name) \ static ssize_t pci_epf_##_name##_store(struct config_item *item, \ const char *page, size_t len) \ { \ u16 val; \ struct pci_epf *epf = to_pci_epf_group(item)->epf; \ if (WARN_ON_ONCE(!epf->header)) \ return -EINVAL; \ if (kstrtou16(page, 0, &val) < 0) \ return -EINVAL; \ epf->header->_name = val; \ return len; \ } #define PCI_EPF_HEADER_W_u8(_name) \ static ssize_t pci_epf_##_name##_store(struct config_item *item, \ const char *page, size_t len) \ { \ u8 val; \ struct pci_epf *epf = to_pci_epf_group(item)->epf; \ if (WARN_ON_ONCE(!epf->header)) \ return -EINVAL; \ if (kstrtou8(page, 0, &val) < 0) \ return -EINVAL; \ epf->header->_name = val; \ return len; \ } static ssize_t pci_epf_msi_interrupts_store(struct config_item *item, const char *page, size_t len) { u8 val; if (kstrtou8(page, 0, &val) < 0) return -EINVAL; to_pci_epf_group(item)->epf->msi_interrupts = val; return len; } static ssize_t pci_epf_msi_interrupts_show(struct config_item *item, char *page) { return sysfs_emit(page, "%d\n", to_pci_epf_group(item)->epf->msi_interrupts); } static ssize_t pci_epf_msix_interrupts_store(struct config_item *item, const char *page, size_t len) { u16 val; if (kstrtou16(page, 0, &val) < 0) return -EINVAL; to_pci_epf_group(item)->epf->msix_interrupts = val; return len; } static ssize_t pci_epf_msix_interrupts_show(struct config_item *item, char *page) { return sysfs_emit(page, "%d\n", to_pci_epf_group(item)->epf->msix_interrupts); } PCI_EPF_HEADER_R(vendorid) PCI_EPF_HEADER_W_u16(vendorid) PCI_EPF_HEADER_R(deviceid) PCI_EPF_HEADER_W_u16(deviceid) PCI_EPF_HEADER_R(revid) PCI_EPF_HEADER_W_u8(revid) PCI_EPF_HEADER_R(progif_code) PCI_EPF_HEADER_W_u8(progif_code) PCI_EPF_HEADER_R(subclass_code) PCI_EPF_HEADER_W_u8(subclass_code) PCI_EPF_HEADER_R(baseclass_code) PCI_EPF_HEADER_W_u8(baseclass_code) PCI_EPF_HEADER_R(cache_line_size) PCI_EPF_HEADER_W_u8(cache_line_size) PCI_EPF_HEADER_R(subsys_vendor_id) PCI_EPF_HEADER_W_u16(subsys_vendor_id) PCI_EPF_HEADER_R(subsys_id) PCI_EPF_HEADER_W_u16(subsys_id) PCI_EPF_HEADER_R(interrupt_pin) PCI_EPF_HEADER_W_u8(interrupt_pin) CONFIGFS_ATTR(pci_epf_, vendorid); CONFIGFS_ATTR(pci_epf_, deviceid); CONFIGFS_ATTR(pci_epf_, revid); CONFIGFS_ATTR(pci_epf_, progif_code); CONFIGFS_ATTR(pci_epf_, subclass_code); CONFIGFS_ATTR(pci_epf_, baseclass_code); CONFIGFS_ATTR(pci_epf_, cache_line_size); CONFIGFS_ATTR(pci_epf_, subsys_vendor_id); CONFIGFS_ATTR(pci_epf_, subsys_id); CONFIGFS_ATTR(pci_epf_, interrupt_pin); CONFIGFS_ATTR(pci_epf_, msi_interrupts); CONFIGFS_ATTR(pci_epf_, msix_interrupts); static struct configfs_attribute *pci_epf_attrs[] = { &pci_epf_attr_vendorid, &pci_epf_attr_deviceid, &pci_epf_attr_revid, &pci_epf_attr_progif_code, &pci_epf_attr_subclass_code, &pci_epf_attr_baseclass_code, &pci_epf_attr_cache_line_size, &pci_epf_attr_subsys_vendor_id, &pci_epf_attr_subsys_id, &pci_epf_attr_interrupt_pin, &pci_epf_attr_msi_interrupts, &pci_epf_attr_msix_interrupts, NULL, }; static int pci_epf_vepf_link(struct config_item *epf_pf_item, struct config_item *epf_vf_item) { struct pci_epf_group *epf_vf_group = to_pci_epf_group(epf_vf_item); struct pci_epf_group *epf_pf_group = to_pci_epf_group(epf_pf_item); struct pci_epf *epf_pf = epf_pf_group->epf; struct pci_epf *epf_vf = epf_vf_group->epf; return pci_epf_add_vepf(epf_pf, epf_vf); } static void pci_epf_vepf_unlink(struct config_item *epf_pf_item, struct config_item *epf_vf_item) { struct pci_epf_group *epf_vf_group = to_pci_epf_group(epf_vf_item); struct pci_epf_group *epf_pf_group = to_pci_epf_group(epf_pf_item); struct pci_epf *epf_pf = epf_pf_group->epf; struct pci_epf *epf_vf = epf_vf_group->epf; pci_epf_remove_vepf(epf_pf, epf_vf); } static void pci_epf_release(struct config_item *item) { struct pci_epf_group *epf_group = to_pci_epf_group(item); mutex_lock(&functions_mutex); idr_remove(&functions_idr, epf_group->index); mutex_unlock(&functions_mutex); pci_epf_destroy(epf_group->epf); kfree(epf_group); } static struct configfs_item_operations pci_epf_ops = { .allow_link = pci_epf_vepf_link, .drop_link = pci_epf_vepf_unlink, .release = pci_epf_release, }; static const struct config_item_type pci_epf_type = { .ct_item_ops = &pci_epf_ops, .ct_attrs = pci_epf_attrs, .ct_owner = THIS_MODULE, }; /** * pci_epf_type_add_cfs() - Help function drivers to expose function specific * attributes in configfs * @epf: the EPF device that has to be configured using configfs * @group: the parent configfs group (corresponding to entries in * pci_epf_device_id) * * Invoke to expose function specific attributes in configfs. * * Return: A pointer to a config_group structure or NULL if the function driver * does not have anything to expose (attributes configured by user) or if * the function driver does not implement the add_cfs() method. * * Returns an error pointer if this function is called for an unbound EPF device * or if the EPF driver add_cfs() method fails. */ static struct config_group *pci_epf_type_add_cfs(struct pci_epf *epf, struct config_group *group) { struct config_group *epf_type_group; if (!epf->driver) { dev_err(&epf->dev, "epf device not bound to driver\n"); return ERR_PTR(-ENODEV); } if (!epf->driver->ops->add_cfs) return NULL; mutex_lock(&epf->lock); epf_type_group = epf->driver->ops->add_cfs(epf, group); mutex_unlock(&epf->lock); return epf_type_group; } static void pci_ep_cfs_add_type_group(struct pci_epf_group *epf_group) { struct config_group *group; group = pci_epf_type_add_cfs(epf_group->epf, &epf_group->group); if (!group) return; if (IS_ERR(group)) { dev_err(&epf_group->epf->dev, "failed to create epf type specific attributes\n"); return; } configfs_register_group(&epf_group->group, group); } static void pci_epf_cfs_work(struct work_struct *work) { struct pci_epf_group *epf_group; struct config_group *group; epf_group = container_of(work, struct pci_epf_group, cfs_work.work); group = pci_ep_cfs_add_primary_group(epf_group); if (IS_ERR(group)) { pr_err("failed to create 'primary' EPC interface\n"); return; } group = pci_ep_cfs_add_secondary_group(epf_group); if (IS_ERR(group)) { pr_err("failed to create 'secondary' EPC interface\n"); return; } pci_ep_cfs_add_type_group(epf_group); } static struct config_group *pci_epf_make(struct config_group *group, const char *name) { struct pci_epf_group *epf_group; struct pci_epf *epf; char *epf_name; int index, err; epf_group = kzalloc(sizeof(*epf_group), GFP_KERNEL); if (!epf_group) return ERR_PTR(-ENOMEM); mutex_lock(&functions_mutex); index = idr_alloc(&functions_idr, epf_group, 0, 0, GFP_KERNEL); mutex_unlock(&functions_mutex); if (index < 0) { err = index; goto free_group; } epf_group->index = index; config_group_init_type_name(&epf_group->group, name, &pci_epf_type); epf_name = kasprintf(GFP_KERNEL, "%s.%d", group->cg_item.ci_name, epf_group->index); if (!epf_name) { err = -ENOMEM; goto remove_idr; } epf = pci_epf_create(epf_name); if (IS_ERR(epf)) { pr_err("failed to create endpoint function device\n"); err = -EINVAL; goto free_name; } epf->group = &epf_group->group; epf_group->epf = epf; kfree(epf_name); INIT_DELAYED_WORK(&epf_group->cfs_work, pci_epf_cfs_work); queue_delayed_work(system_wq, &epf_group->cfs_work, msecs_to_jiffies(1)); return &epf_group->group; free_name: kfree(epf_name); remove_idr: mutex_lock(&functions_mutex); idr_remove(&functions_idr, epf_group->index); mutex_unlock(&functions_mutex); free_group: kfree(epf_group); return ERR_PTR(err); } static void pci_epf_drop(struct config_group *group, struct config_item *item) { config_item_put(item); } static struct configfs_group_operations pci_epf_group_ops = { .make_group = &pci_epf_make, .drop_item = &pci_epf_drop, }; static const struct config_item_type pci_epf_group_type = { .ct_group_ops = &pci_epf_group_ops, .ct_owner = THIS_MODULE, }; struct config_group *pci_ep_cfs_add_epf_group(const char *name) { struct config_group *group; group = configfs_register_default_group(functions_group, name, &pci_epf_group_type); if (IS_ERR(group)) pr_err("failed to register configfs group for %s function\n", name); return group; } EXPORT_SYMBOL(pci_ep_cfs_add_epf_group); void pci_ep_cfs_remove_epf_group(struct config_group *group) { if (IS_ERR_OR_NULL(group)) return; configfs_unregister_default_group(group); } EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group); static const struct config_item_type pci_functions_type = { .ct_owner = THIS_MODULE, }; static const struct config_item_type pci_controllers_type = { .ct_owner = THIS_MODULE, }; static const struct config_item_type pci_ep_type = { .ct_owner = THIS_MODULE, }; static struct configfs_subsystem pci_ep_cfs_subsys = { .su_group = { .cg_item = { .ci_namebuf = "pci_ep", .ci_type = &pci_ep_type, }, }, .su_mutex = __MUTEX_INITIALIZER(pci_ep_cfs_subsys.su_mutex), }; static int __init pci_ep_cfs_init(void) { int ret; struct config_group *root = &pci_ep_cfs_subsys.su_group; config_group_init(root); ret = configfs_register_subsystem(&pci_ep_cfs_subsys); if (ret) { pr_err("Error %d while registering subsystem %s\n", ret, root->cg_item.ci_namebuf); goto err; } functions_group = configfs_register_default_group(root, "functions", &pci_functions_type); if (IS_ERR(functions_group)) { ret = PTR_ERR(functions_group); pr_err("Error %d while registering functions group\n", ret); goto err_functions_group; } controllers_group = configfs_register_default_group(root, "controllers", &pci_controllers_type); if (IS_ERR(controllers_group)) { ret = PTR_ERR(controllers_group); pr_err("Error %d while registering controllers group\n", ret); goto err_controllers_group; } return 0; err_controllers_group: configfs_unregister_default_group(functions_group); err_functions_group: configfs_unregister_subsystem(&pci_ep_cfs_subsys); err: return ret; } module_init(pci_ep_cfs_init); static void __exit pci_ep_cfs_exit(void) { configfs_unregister_default_group(controllers_group); configfs_unregister_default_group(functions_group); configfs_unregister_subsystem(&pci_ep_cfs_subsys); } module_exit(pci_ep_cfs_exit); MODULE_DESCRIPTION("PCI EP CONFIGFS"); MODULE_AUTHOR("Kishon Vijay Abraham I <[email protected]>");
linux-master
drivers/pci/endpoint/pci-ep-cfs.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI Endpoint *Controller* (EPC) library * * Copyright (C) 2017 Texas Instruments * Author: Kishon Vijay Abraham I <[email protected]> */ #include <linux/device.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/pci-epc.h> #include <linux/pci-epf.h> #include <linux/pci-ep-cfs.h> static struct class *pci_epc_class; static void devm_pci_epc_release(struct device *dev, void *res) { struct pci_epc *epc = *(struct pci_epc **)res; pci_epc_destroy(epc); } static int devm_pci_epc_match(struct device *dev, void *res, void *match_data) { struct pci_epc **epc = res; return *epc == match_data; } /** * pci_epc_put() - release the PCI endpoint controller * @epc: epc returned by pci_epc_get() * * release the refcount the caller obtained by invoking pci_epc_get() */ void pci_epc_put(struct pci_epc *epc) { if (!epc || IS_ERR(epc)) return; module_put(epc->ops->owner); put_device(&epc->dev); } EXPORT_SYMBOL_GPL(pci_epc_put); /** * pci_epc_get() - get the PCI endpoint controller * @epc_name: device name of the endpoint controller * * Invoke to get struct pci_epc * corresponding to the device name of the * endpoint controller */ struct pci_epc *pci_epc_get(const char *epc_name) { int ret = -EINVAL; struct pci_epc *epc; struct device *dev; struct class_dev_iter iter; class_dev_iter_init(&iter, pci_epc_class, NULL, NULL); while ((dev = class_dev_iter_next(&iter))) { if (strcmp(epc_name, dev_name(dev))) continue; epc = to_pci_epc(dev); if (!try_module_get(epc->ops->owner)) { ret = -EINVAL; goto err; } class_dev_iter_exit(&iter); get_device(&epc->dev); return epc; } err: class_dev_iter_exit(&iter); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(pci_epc_get); /** * pci_epc_get_first_free_bar() - helper to get first unreserved BAR * @epc_features: pci_epc_features structure that holds the reserved bar bitmap * * Invoke to get the first unreserved BAR that can be used by the endpoint * function. For any incorrect value in reserved_bar return '0'. */ enum pci_barno pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features) { return pci_epc_get_next_free_bar(epc_features, BAR_0); } EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar); /** * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar * @epc_features: pci_epc_features structure that holds the reserved bar bitmap * @bar: the starting BAR number from where unreserved BAR should be searched * * Invoke to get the next unreserved BAR starting from @bar that can be used * for endpoint function. For any incorrect value in reserved_bar return '0'. */ enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features *epc_features, enum pci_barno bar) { unsigned long free_bar; if (!epc_features) return BAR_0; /* If 'bar - 1' is a 64-bit BAR, move to the next BAR */ if ((epc_features->bar_fixed_64bit << 1) & 1 << bar) bar++; /* Find if the reserved BAR is also a 64-bit BAR */ free_bar = epc_features->reserved_bar & epc_features->bar_fixed_64bit; /* Set the adjacent bit if the reserved BAR is also a 64-bit BAR */ free_bar <<= 1; free_bar |= epc_features->reserved_bar; free_bar = find_next_zero_bit(&free_bar, 6, bar); if (free_bar > 5) return NO_BAR; return free_bar; } EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar); /** * pci_epc_get_features() - get the features supported by EPC * @epc: the features supported by *this* EPC device will be returned * @func_no: the features supported by the EPC device specific to the * endpoint function with func_no will be returned * @vfunc_no: the features supported by the EPC device specific to the * virtual endpoint function with vfunc_no will be returned * * Invoke to get the features provided by the EPC which may be * specific to an endpoint function. Returns pci_epc_features on success * and NULL for any failures. */ const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) { const struct pci_epc_features *epc_features; if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) return NULL; if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) return NULL; if (!epc->ops->get_features) return NULL; mutex_lock(&epc->lock); epc_features = epc->ops->get_features(epc, func_no, vfunc_no); mutex_unlock(&epc->lock); return epc_features; } EXPORT_SYMBOL_GPL(pci_epc_get_features); /** * pci_epc_stop() - stop the PCI link * @epc: the link of the EPC device that has to be stopped * * Invoke to stop the PCI link */ void pci_epc_stop(struct pci_epc *epc) { if (IS_ERR(epc) || !epc->ops->stop) return; mutex_lock(&epc->lock); epc->ops->stop(epc); mutex_unlock(&epc->lock); } EXPORT_SYMBOL_GPL(pci_epc_stop); /** * pci_epc_start() - start the PCI link * @epc: the link of *this* EPC device has to be started * * Invoke to start the PCI link */ int pci_epc_start(struct pci_epc *epc) { int ret; if (IS_ERR(epc)) return -EINVAL; if (!epc->ops->start) return 0; mutex_lock(&epc->lock); ret = epc->ops->start(epc); mutex_unlock(&epc->lock); return ret; } EXPORT_SYMBOL_GPL(pci_epc_start); /** * pci_epc_raise_irq() - interrupt the host system * @epc: the EPC device which has to interrupt the host * @func_no: the physical endpoint function number in the EPC device * @vfunc_no: the virtual endpoint function number in the physical function * @type: specify the type of interrupt; legacy, MSI or MSI-X * @interrupt_num: the MSI or MSI-X interrupt number with range (1-N) * * Invoke to raise an legacy, MSI or MSI-X interrupt */ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, enum pci_epc_irq_type type, u16 interrupt_num) { int ret; if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) return -EINVAL; if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) return -EINVAL; if (!epc->ops->raise_irq) return 0; mutex_lock(&epc->lock); ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num); mutex_unlock(&epc->lock); return ret; } EXPORT_SYMBOL_GPL(pci_epc_raise_irq); /** * pci_epc_map_msi_irq() - Map physical address to MSI address and return * MSI data * @epc: the EPC device which has the MSI capability * @func_no: the physical endpoint function number in the EPC device * @vfunc_no: the virtual endpoint function number in the physical function * @phys_addr: the physical address of the outbound region * @interrupt_num: the MSI interrupt number with range (1-N) * @entry_size: Size of Outbound address region for each interrupt * @msi_data: the data that should be written in order to raise MSI interrupt * with interrupt number as 'interrupt num' * @msi_addr_offset: Offset of MSI address from the aligned outbound address * to which the MSI address is mapped * * Invoke to map physical address to MSI address and return MSI data. The * physical address should be an address in the outbound region. This is * required to implement doorbell functionality of NTB wherein EPC on either * side of the interface (primary and secondary) can directly write to the * physical address (in outbound region) of the other interface to ring * doorbell. */ int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size, u32 *msi_data, u32 *msi_addr_offset) { int ret; if (IS_ERR_OR_NULL(epc)) return -EINVAL; if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) return -EINVAL; if (!epc->ops->map_msi_irq) return -EINVAL; mutex_lock(&epc->lock); ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr, interrupt_num, entry_size, msi_data, msi_addr_offset); mutex_unlock(&epc->lock); return ret; } EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq); /** * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated * @epc: the EPC device to which MSI interrupts was requested * @func_no: the physical endpoint function number in the EPC device * @vfunc_no: the virtual endpoint function number in the physical function * * Invoke to get the number of MSI interrupts allocated by the RC */ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no) { int interrupt; if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) return 0; if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) return 0; if (!epc->ops->get_msi) return 0; mutex_lock(&epc->lock); interrupt = epc->ops->get_msi(epc, func_no, vfunc_no); mutex_unlock(&epc->lock); if (interrupt < 0) return 0; interrupt = 1 << interrupt; return interrupt; } EXPORT_SYMBOL_GPL(pci_epc_get_msi); /** * pci_epc_set_msi() - set the number of MSI interrupt numbers required * @epc: the EPC device on which MSI has to be configured * @func_no: the physical endpoint function number in the EPC device * @vfunc_no: the virtual endpoint function number in the physical function * @interrupts: number of MSI interrupts required by the EPF * * Invoke to set the required number of MSI interrupts. */ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts) { int ret; u8 encode_int; if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || interrupts < 1 || interrupts > 32) return -EINVAL; if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) return -EINVAL; if (!epc->ops->set_msi) return 0; encode_int = order_base_2(interrupts); mutex_lock(&epc->lock); ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int); mutex_unlock(&epc->lock); return ret; } EXPORT_SYMBOL_GPL(pci_epc_set_msi); /** * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated * @epc: the EPC device to which MSI-X interrupts was requested * @func_no: the physical endpoint function number in the EPC device * @vfunc_no: the virtual endpoint function number in the physical function * * Invoke to get the number of MSI-X interrupts allocated by the RC */ int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) { int interrupt; if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) return 0; if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) return 0; if (!epc->ops->get_msix) return 0; mutex_lock(&epc->lock); interrupt = epc->ops->get_msix(epc, func_no, vfunc_no); mutex_unlock(&epc->lock); if (interrupt < 0) return 0; return interrupt + 1; } EXPORT_SYMBOL_GPL(pci_epc_get_msix); /** * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required * @epc: the EPC device on which MSI-X has to be configured * @func_no: the physical endpoint function number in the EPC device * @vfunc_no: the virtual endpoint function number in the physical function * @interrupts: number of MSI-X interrupts required by the EPF * @bir: BAR where the MSI-X table resides * @offset: Offset pointing to the start of MSI-X table * * Invoke to set the required number of MSI-X interrupts. */ int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u16 interrupts, enum pci_barno bir, u32 offset) { int ret; if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || interrupts < 1 || interrupts > 2048) return -EINVAL; if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) return -EINVAL; if (!epc->ops->set_msix) return 0; mutex_lock(&epc->lock); ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir, offset); mutex_unlock(&epc->lock); return ret; } EXPORT_SYMBOL_GPL(pci_epc_set_msix); /** * pci_epc_unmap_addr() - unmap CPU address from PCI address * @epc: the EPC device on which address is allocated * @func_no: the physical endpoint function number in the EPC device * @vfunc_no: the virtual endpoint function number in the physical function * @phys_addr: physical address of the local system * * Invoke to unmap the CPU address from PCI address. */ void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t phys_addr) { if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) return; if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) return; if (!epc->ops->unmap_addr) return; mutex_lock(&epc->lock); epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr); mutex_unlock(&epc->lock); } EXPORT_SYMBOL_GPL(pci_epc_unmap_addr); /** * pci_epc_map_addr() - map CPU address to PCI address * @epc: the EPC device on which address is allocated * @func_no: the physical endpoint function number in the EPC device * @vfunc_no: the virtual endpoint function number in the physical function * @phys_addr: physical address of the local system * @pci_addr: PCI address to which the physical address should be mapped * @size: the size of the allocation * * Invoke to map CPU address with PCI address. */ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t phys_addr, u64 pci_addr, size_t size) { int ret; if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) return -EINVAL; if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) return -EINVAL; if (!epc->ops->map_addr) return 0; mutex_lock(&epc->lock); ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr, size); mutex_unlock(&epc->lock); return ret; } EXPORT_SYMBOL_GPL(pci_epc_map_addr); /** * pci_epc_clear_bar() - reset the BAR * @epc: the EPC device for which the BAR has to be cleared * @func_no: the physical endpoint function number in the EPC device * @vfunc_no: the virtual endpoint function number in the physical function * @epf_bar: the struct epf_bar that contains the BAR information * * Invoke to reset the BAR of the endpoint device. */ void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar) { if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || (epf_bar->barno == BAR_5 && epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) return; if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) return; if (!epc->ops->clear_bar) return; mutex_lock(&epc->lock); epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar); mutex_unlock(&epc->lock); } EXPORT_SYMBOL_GPL(pci_epc_clear_bar); /** * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space * @epc: the EPC device on which BAR has to be configured * @func_no: the physical endpoint function number in the EPC device * @vfunc_no: the virtual endpoint function number in the physical function * @epf_bar: the struct epf_bar that contains the BAR information * * Invoke to configure the BAR of the endpoint device. */ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar) { int ret; int flags = epf_bar->flags; if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || (epf_bar->barno == BAR_5 && flags & PCI_BASE_ADDRESS_MEM_TYPE_64) || (flags & PCI_BASE_ADDRESS_SPACE_IO && flags & PCI_BASE_ADDRESS_IO_MASK) || (upper_32_bits(epf_bar->size) && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))) return -EINVAL; if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) return -EINVAL; if (!epc->ops->set_bar) return 0; mutex_lock(&epc->lock); ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar); mutex_unlock(&epc->lock); return ret; } EXPORT_SYMBOL_GPL(pci_epc_set_bar); /** * pci_epc_write_header() - write standard configuration header * @epc: the EPC device to which the configuration header should be written * @func_no: the physical endpoint function number in the EPC device * @vfunc_no: the virtual endpoint function number in the physical function * @header: standard configuration header fields * * Invoke to write the configuration header to the endpoint controller. Every * endpoint controller will have a dedicated location to which the standard * configuration header would be written. The callback function should write * the header fields to this dedicated location. */ int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_header *header) { int ret; if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) return -EINVAL; if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) return -EINVAL; /* Only Virtual Function #1 has deviceID */ if (vfunc_no > 1) return -EINVAL; if (!epc->ops->write_header) return 0; mutex_lock(&epc->lock); ret = epc->ops->write_header(epc, func_no, vfunc_no, header); mutex_unlock(&epc->lock); return ret; } EXPORT_SYMBOL_GPL(pci_epc_write_header); /** * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller * @epc: the EPC device to which the endpoint function should be added * @epf: the endpoint function to be added * @type: Identifies if the EPC is connected to the primary or secondary * interface of EPF * * A PCI endpoint device can have one or more functions. In the case of PCIe, * the specification allows up to 8 PCIe endpoint functions. Invoke * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller. */ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf, enum pci_epc_interface_type type) { struct list_head *list; u32 func_no; int ret = 0; if (IS_ERR_OR_NULL(epc) || epf->is_vf) return -EINVAL; if (type == PRIMARY_INTERFACE && epf->epc) return -EBUSY; if (type == SECONDARY_INTERFACE && epf->sec_epc) return -EBUSY; mutex_lock(&epc->list_lock); func_no = find_first_zero_bit(&epc->function_num_map, BITS_PER_LONG); if (func_no >= BITS_PER_LONG) { ret = -EINVAL; goto ret; } if (func_no > epc->max_functions - 1) { dev_err(&epc->dev, "Exceeding max supported Function Number\n"); ret = -EINVAL; goto ret; } set_bit(func_no, &epc->function_num_map); if (type == PRIMARY_INTERFACE) { epf->func_no = func_no; epf->epc = epc; list = &epf->list; } else { epf->sec_epc_func_no = func_no; epf->sec_epc = epc; list = &epf->sec_epc_list; } list_add_tail(list, &epc->pci_epf); ret: mutex_unlock(&epc->list_lock); return ret; } EXPORT_SYMBOL_GPL(pci_epc_add_epf); /** * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller * @epc: the EPC device from which the endpoint function should be removed * @epf: the endpoint function to be removed * @type: identifies if the EPC is connected to the primary or secondary * interface of EPF * * Invoke to remove PCI endpoint function from the endpoint controller. */ void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf, enum pci_epc_interface_type type) { struct list_head *list; u32 func_no = 0; if (!epc || IS_ERR(epc) || !epf) return; if (type == PRIMARY_INTERFACE) { func_no = epf->func_no; list = &epf->list; } else { func_no = epf->sec_epc_func_no; list = &epf->sec_epc_list; } mutex_lock(&epc->list_lock); clear_bit(func_no, &epc->function_num_map); list_del(list); epf->epc = NULL; mutex_unlock(&epc->list_lock); } EXPORT_SYMBOL_GPL(pci_epc_remove_epf); /** * pci_epc_linkup() - Notify the EPF device that EPC device has established a * connection with the Root Complex. * @epc: the EPC device which has established link with the host * * Invoke to Notify the EPF device that the EPC device has established a * connection with the Root Complex. */ void pci_epc_linkup(struct pci_epc *epc) { struct pci_epf *epf; if (!epc || IS_ERR(epc)) return; mutex_lock(&epc->list_lock); list_for_each_entry(epf, &epc->pci_epf, list) { mutex_lock(&epf->lock); if (epf->event_ops && epf->event_ops->link_up) epf->event_ops->link_up(epf); mutex_unlock(&epf->lock); } mutex_unlock(&epc->list_lock); } EXPORT_SYMBOL_GPL(pci_epc_linkup); /** * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the * connection with the Root Complex. * @epc: the EPC device which has dropped the link with the host * * Invoke to Notify the EPF device that the EPC device has dropped the * connection with the Root Complex. */ void pci_epc_linkdown(struct pci_epc *epc) { struct pci_epf *epf; if (!epc || IS_ERR(epc)) return; mutex_lock(&epc->list_lock); list_for_each_entry(epf, &epc->pci_epf, list) { mutex_lock(&epf->lock); if (epf->event_ops && epf->event_ops->link_down) epf->event_ops->link_down(epf); mutex_unlock(&epf->lock); } mutex_unlock(&epc->list_lock); } EXPORT_SYMBOL_GPL(pci_epc_linkdown); /** * pci_epc_init_notify() - Notify the EPF device that EPC device's core * initialization is completed. * @epc: the EPC device whose core initialization is completed * * Invoke to Notify the EPF device that the EPC device's initialization * is completed. */ void pci_epc_init_notify(struct pci_epc *epc) { struct pci_epf *epf; if (!epc || IS_ERR(epc)) return; mutex_lock(&epc->list_lock); list_for_each_entry(epf, &epc->pci_epf, list) { mutex_lock(&epf->lock); if (epf->event_ops && epf->event_ops->core_init) epf->event_ops->core_init(epf); mutex_unlock(&epf->lock); } mutex_unlock(&epc->list_lock); } EXPORT_SYMBOL_GPL(pci_epc_init_notify); /** * pci_epc_bme_notify() - Notify the EPF device that the EPC device has received * the BME event from the Root complex * @epc: the EPC device that received the BME event * * Invoke to Notify the EPF device that the EPC device has received the Bus * Master Enable (BME) event from the Root complex */ void pci_epc_bme_notify(struct pci_epc *epc) { struct pci_epf *epf; if (!epc || IS_ERR(epc)) return; mutex_lock(&epc->list_lock); list_for_each_entry(epf, &epc->pci_epf, list) { mutex_lock(&epf->lock); if (epf->event_ops && epf->event_ops->bme) epf->event_ops->bme(epf); mutex_unlock(&epf->lock); } mutex_unlock(&epc->list_lock); } EXPORT_SYMBOL_GPL(pci_epc_bme_notify); /** * pci_epc_destroy() - destroy the EPC device * @epc: the EPC device that has to be destroyed * * Invoke to destroy the PCI EPC device */ void pci_epc_destroy(struct pci_epc *epc) { pci_ep_cfs_remove_epc_group(epc->group); device_unregister(&epc->dev); } EXPORT_SYMBOL_GPL(pci_epc_destroy); /** * devm_pci_epc_destroy() - destroy the EPC device * @dev: device that wants to destroy the EPC * @epc: the EPC device that has to be destroyed * * Invoke to destroy the devres associated with this * pci_epc and destroy the EPC device. */ void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc) { int r; r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match, epc); dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n"); } EXPORT_SYMBOL_GPL(devm_pci_epc_destroy); static void pci_epc_release(struct device *dev) { kfree(to_pci_epc(dev)); } /** * __pci_epc_create() - create a new endpoint controller (EPC) device * @dev: device that is creating the new EPC * @ops: function pointers for performing EPC operations * @owner: the owner of the module that creates the EPC device * * Invoke to create a new EPC device and add it to pci_epc class. */ struct pci_epc * __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, struct module *owner) { int ret; struct pci_epc *epc; if (WARN_ON(!dev)) { ret = -EINVAL; goto err_ret; } epc = kzalloc(sizeof(*epc), GFP_KERNEL); if (!epc) { ret = -ENOMEM; goto err_ret; } mutex_init(&epc->lock); mutex_init(&epc->list_lock); INIT_LIST_HEAD(&epc->pci_epf); device_initialize(&epc->dev); epc->dev.class = pci_epc_class; epc->dev.parent = dev; epc->dev.release = pci_epc_release; epc->ops = ops; ret = dev_set_name(&epc->dev, "%s", dev_name(dev)); if (ret) goto put_dev; ret = device_add(&epc->dev); if (ret) goto put_dev; epc->group = pci_ep_cfs_add_epc_group(dev_name(dev)); return epc; put_dev: put_device(&epc->dev); kfree(epc); err_ret: return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(__pci_epc_create); /** * __devm_pci_epc_create() - create a new endpoint controller (EPC) device * @dev: device that is creating the new EPC * @ops: function pointers for performing EPC operations * @owner: the owner of the module that creates the EPC device * * Invoke to create a new EPC device and add it to pci_epc class. * While at that, it also associates the device with the pci_epc using devres. * On driver detach, release function is invoked on the devres data, * then, devres data is freed. */ struct pci_epc * __devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, struct module *owner) { struct pci_epc **ptr, *epc; ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); epc = __pci_epc_create(dev, ops, owner); if (!IS_ERR(epc)) { *ptr = epc; devres_add(dev, ptr); } else { devres_free(ptr); } return epc; } EXPORT_SYMBOL_GPL(__devm_pci_epc_create); static int __init pci_epc_init(void) { pci_epc_class = class_create("pci_epc"); if (IS_ERR(pci_epc_class)) { pr_err("failed to create pci epc class --> %ld\n", PTR_ERR(pci_epc_class)); return PTR_ERR(pci_epc_class); } return 0; } module_init(pci_epc_init); static void __exit pci_epc_exit(void) { class_destroy(pci_epc_class); } module_exit(pci_epc_exit); MODULE_DESCRIPTION("PCI EPC Library"); MODULE_AUTHOR("Kishon Vijay Abraham I <[email protected]>");
linux-master
drivers/pci/endpoint/pci-epc-core.c