python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2005, 2012 IBM Corporation * * Authors: * Kent Yoder <[email protected]> * Seiji Munetoh <[email protected]> * Stefan Berger <[email protected]> * Reiner Sailer <[email protected]> * Kylene Hall <[email protected]> * Nayna Jain <[email protected]> * * Maintained by: <[email protected]> * * Access to the event log created by a system's firmware / BIOS */ #include <linux/seq_file.h> #include <linux/efi.h> #include <linux/fs.h> #include <linux/security.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/tpm_eventlog.h> #include "../tpm.h" #include "common.h" static const char* tcpa_event_type_strings[] = { "PREBOOT", "POST CODE", "", "NO ACTION", "SEPARATOR", "ACTION", "EVENT TAG", "S-CRTM Contents", "S-CRTM Version", "CPU Microcode", "Platform Config Flags", "Table of Devices", "Compact Hash", "IPL", "IPL Partition Data", "Non-Host Code", "Non-Host Config", "Non-Host Info" }; static const char* tcpa_pc_event_id_strings[] = { "", "SMBIOS", "BIS Certificate", "POST BIOS ", "ESCD ", "CMOS", "NVRAM", "Option ROM", "Option ROM config", "", "Option ROM microcode ", "S-CRTM Version", "S-CRTM Contents ", "POST Contents ", "Table of Devices", }; /* returns pointer to start of pos. entry of tcg log */ static void *tpm1_bios_measurements_start(struct seq_file *m, loff_t *pos) { loff_t i = 0; struct tpm_chip *chip = m->private; struct tpm_bios_log *log = &chip->log; void *addr = log->bios_event_log; void *limit = log->bios_event_log_end; struct tcpa_event *event; u32 converted_event_size; u32 converted_event_type; /* read over *pos measurements */ do { event = addr; /* check if current entry is valid */ if (addr + sizeof(struct tcpa_event) > limit) return NULL; converted_event_size = do_endian_conversion(event->event_size); converted_event_type = do_endian_conversion(event->event_type); if (((converted_event_type == 0) && (converted_event_size == 0)) || ((addr + sizeof(struct tcpa_event) + converted_event_size) > limit)) return NULL; if (i++ == *pos) break; addr += (sizeof(struct tcpa_event) + converted_event_size); } while (1); return addr; } static void *tpm1_bios_measurements_next(struct seq_file *m, void *v, loff_t *pos) { struct tcpa_event *event = v; struct tpm_chip *chip = m->private; struct tpm_bios_log *log = &chip->log; void *limit = log->bios_event_log_end; u32 converted_event_size; u32 converted_event_type; (*pos)++; converted_event_size = do_endian_conversion(event->event_size); v += sizeof(struct tcpa_event) + converted_event_size; /* now check if current entry is valid */ if ((v + sizeof(struct tcpa_event)) > limit) return NULL; event = v; converted_event_size = do_endian_conversion(event->event_size); converted_event_type = do_endian_conversion(event->event_type); if (((converted_event_type == 0) && (converted_event_size == 0)) || ((v + sizeof(struct tcpa_event) + converted_event_size) > limit)) return NULL; return v; } static void tpm1_bios_measurements_stop(struct seq_file *m, void *v) { } static int get_event_name(char *dest, struct tcpa_event *event, unsigned char * event_entry) { const char *name = ""; /* 41 so there is room for 40 data and 1 nul */ char data[41] = ""; int i, n_len = 0, d_len = 0; struct tcpa_pc_event *pc_event; switch (do_endian_conversion(event->event_type)) { case PREBOOT: case POST_CODE: case UNUSED: case NO_ACTION: case SCRTM_CONTENTS: case SCRTM_VERSION: case CPU_MICROCODE: case PLATFORM_CONFIG_FLAGS: case TABLE_OF_DEVICES: case COMPACT_HASH: case IPL: case IPL_PARTITION_DATA: case NONHOST_CODE: case NONHOST_CONFIG: case NONHOST_INFO: name = tcpa_event_type_strings[do_endian_conversion (event->event_type)]; n_len = strlen(name); break; case SEPARATOR: case ACTION: if (MAX_TEXT_EVENT > do_endian_conversion(event->event_size)) { name = event_entry; n_len = do_endian_conversion(event->event_size); } break; case EVENT_TAG: pc_event = (struct tcpa_pc_event *)event_entry; /* ToDo Row data -> Base64 */ switch (do_endian_conversion(pc_event->event_id)) { case SMBIOS: case BIS_CERT: case CMOS: case NVRAM: case OPTION_ROM_EXEC: case OPTION_ROM_CONFIG: case S_CRTM_VERSION: name = tcpa_pc_event_id_strings[do_endian_conversion (pc_event->event_id)]; n_len = strlen(name); break; /* hash data */ case POST_BIOS_ROM: case ESCD: case OPTION_ROM_MICROCODE: case S_CRTM_CONTENTS: case POST_CONTENTS: name = tcpa_pc_event_id_strings[do_endian_conversion (pc_event->event_id)]; n_len = strlen(name); for (i = 0; i < 20; i++) d_len += sprintf(&data[2*i], "%02x", pc_event->event_data[i]); break; default: break; } break; default: break; } return snprintf(dest, MAX_TEXT_EVENT, "[%.*s%.*s]", n_len, name, d_len, data); } static int tpm1_binary_bios_measurements_show(struct seq_file *m, void *v) { struct tcpa_event *event = v; struct tcpa_event temp_event; char *temp_ptr; int i; memcpy(&temp_event, event, sizeof(struct tcpa_event)); /* convert raw integers for endianness */ temp_event.pcr_index = do_endian_conversion(event->pcr_index); temp_event.event_type = do_endian_conversion(event->event_type); temp_event.event_size = do_endian_conversion(event->event_size); temp_ptr = (char *) &temp_event; for (i = 0; i < (sizeof(struct tcpa_event) - 1) ; i++) seq_putc(m, temp_ptr[i]); temp_ptr = (char *) v; for (i = (sizeof(struct tcpa_event) - 1); i < (sizeof(struct tcpa_event) + temp_event.event_size); i++) seq_putc(m, temp_ptr[i]); return 0; } static int tpm1_ascii_bios_measurements_show(struct seq_file *m, void *v) { char *eventname; struct tcpa_event *event = v; unsigned char *event_entry = (unsigned char *)(v + sizeof(struct tcpa_event)); eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL); if (!eventname) { printk(KERN_ERR "%s: ERROR - No Memory for event name\n ", __func__); return -EFAULT; } /* 1st: PCR */ seq_printf(m, "%2d ", do_endian_conversion(event->pcr_index)); /* 2nd: SHA1 */ seq_printf(m, "%20phN", event->pcr_value); /* 3rd: event type identifier */ seq_printf(m, " %02x", do_endian_conversion(event->event_type)); get_event_name(eventname, event, event_entry); /* 4th: eventname <= max + \'0' delimiter */ seq_printf(m, " %s\n", eventname); kfree(eventname); return 0; } const struct seq_operations tpm1_ascii_b_measurements_seqops = { .start = tpm1_bios_measurements_start, .next = tpm1_bios_measurements_next, .stop = tpm1_bios_measurements_stop, .show = tpm1_ascii_bios_measurements_show, }; const struct seq_operations tpm1_binary_b_measurements_seqops = { .start = tpm1_bios_measurements_start, .next = tpm1_bios_measurements_next, .stop = tpm1_bios_measurements_stop, .show = tpm1_binary_bios_measurements_show, };
linux-master
drivers/char/tpm/eventlog/tpm1.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2017 Google * * Authors: * Thiebaud Weksteen <[email protected]> */ #include <linux/device.h> #include <linux/efi.h> #include <linux/tpm_eventlog.h> #include "../tpm.h" #include "common.h" /* read binary bios log from EFI configuration table */ int tpm_read_log_efi(struct tpm_chip *chip) { struct efi_tcg2_final_events_table *final_tbl = NULL; int final_events_log_size = efi_tpm_final_log_size; struct linux_efi_tpm_eventlog *log_tbl; struct tpm_bios_log *log; u32 log_size; u8 tpm_log_version; void *tmp; int ret; if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) return -ENODEV; if (efi.tpm_log == EFI_INVALID_TABLE_ADDR) return -ENODEV; log = &chip->log; log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl), MEMREMAP_WB); if (!log_tbl) { pr_err("Could not map UEFI TPM log table !\n"); return -ENOMEM; } log_size = log_tbl->size; memunmap(log_tbl); if (!log_size) { pr_warn("UEFI TPM log area empty\n"); return -EIO; } log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size, MEMREMAP_WB); if (!log_tbl) { pr_err("Could not map UEFI TPM log table payload!\n"); return -ENOMEM; } /* malloc EventLog space */ log->bios_event_log = devm_kmemdup(&chip->dev, log_tbl->log, log_size, GFP_KERNEL); if (!log->bios_event_log) { ret = -ENOMEM; goto out; } log->bios_event_log_end = log->bios_event_log + log_size; tpm_log_version = log_tbl->version; ret = tpm_log_version; if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR || final_events_log_size == 0 || tpm_log_version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) goto out; final_tbl = memremap(efi.tpm_final_log, sizeof(*final_tbl) + final_events_log_size, MEMREMAP_WB); if (!final_tbl) { pr_err("Could not map UEFI TPM final log\n"); devm_kfree(&chip->dev, log->bios_event_log); ret = -ENOMEM; goto out; } /* * The 'final events log' size excludes the 'final events preboot log' * at its beginning. */ final_events_log_size -= log_tbl->final_events_preboot_size; /* * Allocate memory for the 'combined log' where we will append the * 'final events log' to. */ tmp = devm_krealloc(&chip->dev, log->bios_event_log, log_size + final_events_log_size, GFP_KERNEL); if (!tmp) { devm_kfree(&chip->dev, log->bios_event_log); ret = -ENOMEM; goto out; } log->bios_event_log = tmp; /* * Append any of the 'final events log' that didn't also end up in the * 'main log'. Events can be logged in both if events are generated * between GetEventLog() and ExitBootServices(). */ memcpy((void *)log->bios_event_log + log_size, final_tbl->events + log_tbl->final_events_preboot_size, final_events_log_size); /* * The size of the 'combined log' is the size of the 'main log' plus * the size of the 'final events log'. */ log->bios_event_log_end = log->bios_event_log + log_size + final_events_log_size; out: memunmap(final_tbl); memunmap(log_tbl); return ret; }
linux-master
drivers/char/tpm/eventlog/efi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2012 IBM Corporation * * Author: Ashley Lai <[email protected]> * Nayna Jain <[email protected]> * * Maintained by: <[email protected]> * * Read the event log created by the firmware on PPC64 */ #include <linux/device.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_reserved_mem.h> #include <linux/tpm_eventlog.h> #include "../tpm.h" #include "common.h" static int tpm_read_log_memory_region(struct tpm_chip *chip) { struct device_node *node; struct resource res; int rc; node = of_parse_phandle(chip->dev.parent->of_node, "memory-region", 0); if (!node) return -ENODEV; rc = of_address_to_resource(node, 0, &res); of_node_put(node); if (rc) return rc; chip->log.bios_event_log = devm_memremap(&chip->dev, res.start, resource_size(&res), MEMREMAP_WB); if (IS_ERR(chip->log.bios_event_log)) return -ENOMEM; chip->log.bios_event_log_end = chip->log.bios_event_log + resource_size(&res); return chip->flags & TPM_CHIP_FLAG_TPM2 ? EFI_TCG2_EVENT_LOG_FORMAT_TCG_2 : EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2; } int tpm_read_log_of(struct tpm_chip *chip) { struct device_node *np; const u32 *sizep; const u64 *basep; struct tpm_bios_log *log; u32 size; u64 base; log = &chip->log; if (chip->dev.parent && chip->dev.parent->of_node) np = chip->dev.parent->of_node; else return -ENODEV; if (of_property_read_bool(np, "powered-while-suspended")) chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED; sizep = of_get_property(np, "linux,sml-size", NULL); basep = of_get_property(np, "linux,sml-base", NULL); if (sizep == NULL && basep == NULL) return tpm_read_log_memory_region(chip); if (sizep == NULL || basep == NULL) return -EIO; /* * For both vtpm/tpm, firmware has log addr and log size in big * endian format. But in case of vtpm, there is a method called * sml-handover which is run during kernel init even before * device tree is setup. This sml-handover function takes care * of endianness and writes to sml-base and sml-size in little * endian format. For this reason, vtpm doesn't need conversion * but physical tpm needs the conversion. */ if (of_property_match_string(np, "compatible", "IBM,vtpm") < 0 && of_property_match_string(np, "compatible", "IBM,vtpm20") < 0) { size = be32_to_cpup((__force __be32 *)sizep); base = be64_to_cpup((__force __be64 *)basep); } else { size = *sizep; base = *basep; } if (size == 0) { dev_warn(&chip->dev, "%s: Event log area empty\n", __func__); return -EIO; } log->bios_event_log = devm_kmemdup(&chip->dev, __va(base), size, GFP_KERNEL); if (!log->bios_event_log) return -ENOMEM; log->bios_event_log_end = log->bios_event_log + size; if (chip->flags & TPM_CHIP_FLAG_TPM2) return EFI_TCG2_EVENT_LOG_FORMAT_TCG_2; return EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2; }
linux-master
drivers/char/tpm/eventlog/of.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2005 IBM Corporation * * Authors: * Seiji Munetoh <[email protected]> * Stefan Berger <[email protected]> * Reiner Sailer <[email protected]> * Kylene Hall <[email protected]> * Nayna Jain <[email protected]> * * Maintained by: <[email protected]> * * Access to the event log extended by the TCG BIOS of PC platform */ #include <linux/device.h> #include <linux/seq_file.h> #include <linux/fs.h> #include <linux/security.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/acpi.h> #include <linux/tpm_eventlog.h> #include "../tpm.h" #include "common.h" struct acpi_tcpa { struct acpi_table_header hdr; u16 platform_class; union { struct client_hdr { u32 log_max_len __packed; u64 log_start_addr __packed; } client; struct server_hdr { u16 reserved; u64 log_max_len __packed; u64 log_start_addr __packed; } server; }; }; /* Check that the given log is indeed a TPM2 log. */ static bool tpm_is_tpm2_log(void *bios_event_log, u64 len) { struct tcg_efi_specid_event_head *efispecid; struct tcg_pcr_event *event_header; int n; if (len < sizeof(*event_header)) return false; len -= sizeof(*event_header); event_header = bios_event_log; if (len < sizeof(*efispecid)) return false; efispecid = (struct tcg_efi_specid_event_head *)event_header->event; n = memcmp(efispecid->signature, TCG_SPECID_SIG, sizeof(TCG_SPECID_SIG)); return n == 0; } /* read binary bios log */ int tpm_read_log_acpi(struct tpm_chip *chip) { struct acpi_tcpa *buff; acpi_status status; void __iomem *virt; u64 len, start; struct tpm_bios_log *log; struct acpi_table_tpm2 *tbl; struct acpi_tpm2_phy *tpm2_phy; int format; int ret; log = &chip->log; /* Unfortuntely ACPI does not associate the event log with a specific * TPM, like PPI. Thus all ACPI TPMs will read the same log. */ if (!chip->acpi_dev_handle) return -ENODEV; if (chip->flags & TPM_CHIP_FLAG_TPM2) { status = acpi_get_table("TPM2", 1, (struct acpi_table_header **)&tbl); if (ACPI_FAILURE(status)) return -ENODEV; if (tbl->header.length < sizeof(*tbl) + sizeof(struct acpi_tpm2_phy)) { acpi_put_table((struct acpi_table_header *)tbl); return -ENODEV; } tpm2_phy = (void *)tbl + sizeof(*tbl); len = tpm2_phy->log_area_minimum_length; start = tpm2_phy->log_area_start_address; if (!start || !len) { acpi_put_table((struct acpi_table_header *)tbl); return -ENODEV; } acpi_put_table((struct acpi_table_header *)tbl); format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2; } else { /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */ status = acpi_get_table(ACPI_SIG_TCPA, 1, (struct acpi_table_header **)&buff); if (ACPI_FAILURE(status)) return -ENODEV; switch (buff->platform_class) { case BIOS_SERVER: len = buff->server.log_max_len; start = buff->server.log_start_addr; break; case BIOS_CLIENT: default: len = buff->client.log_max_len; start = buff->client.log_start_addr; break; } acpi_put_table((struct acpi_table_header *)buff); format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2; } if (!len) { dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__); return -EIO; } /* malloc EventLog space */ log->bios_event_log = devm_kmalloc(&chip->dev, len, GFP_KERNEL); if (!log->bios_event_log) return -ENOMEM; log->bios_event_log_end = log->bios_event_log + len; ret = -EIO; virt = acpi_os_map_iomem(start, len); if (!virt) { dev_warn(&chip->dev, "%s: Failed to map ACPI memory\n", __func__); /* try EFI log next */ ret = -ENODEV; goto err; } memcpy_fromio(log->bios_event_log, virt, len); acpi_os_unmap_iomem(virt, len); if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_tpm2_log(log->bios_event_log, len)) { /* try EFI log next */ ret = -ENODEV; goto err; } return format; err: devm_kfree(&chip->dev, log->bios_event_log); log->bios_event_log = NULL; return ret; }
linux-master
drivers/char/tpm/eventlog/acpi.c
#include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/agp_backend.h> #include <linux/mm.h> #include <linux/slab.h> #include <asm/machvec.h> #include <asm/agp_backend.h> #include "../../../arch/alpha/kernel/pci_impl.h" #include "agp.h" static vm_fault_t alpha_core_agp_vm_fault(struct vm_fault *vmf) { alpha_agp_info *agp = agp_bridge->dev_private_data; dma_addr_t dma_addr; unsigned long pa; struct page *page; dma_addr = vmf->address - vmf->vma->vm_start + agp->aperture.bus_base; pa = agp->ops->translate(agp, dma_addr); if (pa == (unsigned long)-EINVAL) return VM_FAULT_SIGBUS; /* no translation */ /* * Get the page, inc the use count, and return it */ page = virt_to_page(__va(pa)); get_page(page); vmf->page = page; return 0; } static struct aper_size_info_fixed alpha_core_agp_sizes[] = { { 0, 0, 0 }, /* filled in by alpha_core_agp_setup */ }; static const struct vm_operations_struct alpha_core_agp_vm_ops = { .fault = alpha_core_agp_vm_fault, }; static int alpha_core_agp_fetch_size(void) { return alpha_core_agp_sizes[0].size; } static int alpha_core_agp_configure(void) { alpha_agp_info *agp = agp_bridge->dev_private_data; agp_bridge->gart_bus_addr = agp->aperture.bus_base; return 0; } static void alpha_core_agp_cleanup(void) { alpha_agp_info *agp = agp_bridge->dev_private_data; agp->ops->cleanup(agp); } static void alpha_core_agp_tlbflush(struct agp_memory *mem) { alpha_agp_info *agp = agp_bridge->dev_private_data; alpha_mv.mv_pci_tbi(agp->hose, 0, -1); } static void alpha_core_agp_enable(struct agp_bridge_data *bridge, u32 mode) { alpha_agp_info *agp = bridge->dev_private_data; agp->mode.lw = agp_collect_device_status(bridge, mode, agp->capability.lw); agp->mode.bits.enable = 1; agp->ops->configure(agp); agp_device_command(agp->mode.lw, false); } static int alpha_core_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type) { alpha_agp_info *agp = agp_bridge->dev_private_data; int num_entries, status; void *temp; if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES) return -EINVAL; temp = agp_bridge->current_size; num_entries = A_SIZE_FIX(temp)->num_entries; if ((pg_start + mem->page_count) > num_entries) return -EINVAL; status = agp->ops->bind(agp, pg_start, mem); mb(); alpha_core_agp_tlbflush(mem); return status; } static int alpha_core_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type) { alpha_agp_info *agp = agp_bridge->dev_private_data; int status; status = agp->ops->unbind(agp, pg_start, mem); alpha_core_agp_tlbflush(mem); return status; } static int alpha_core_agp_create_free_gatt_table(struct agp_bridge_data *a) { return 0; } struct agp_bridge_driver alpha_core_agp_driver = { .owner = THIS_MODULE, .aperture_sizes = alpha_core_agp_sizes, .num_aperture_sizes = 1, .size_type = FIXED_APER_SIZE, .cant_use_aperture = true, .masks = NULL, .fetch_size = alpha_core_agp_fetch_size, .configure = alpha_core_agp_configure, .agp_enable = alpha_core_agp_enable, .cleanup = alpha_core_agp_cleanup, .tlb_flush = alpha_core_agp_tlbflush, .mask_memory = agp_generic_mask_memory, .cache_flush = global_cache_flush, .create_gatt_table = alpha_core_agp_create_free_gatt_table, .free_gatt_table = alpha_core_agp_create_free_gatt_table, .insert_memory = alpha_core_agp_insert_memory, .remove_memory = alpha_core_agp_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; struct agp_bridge_data *alpha_bridge; int __init alpha_core_agp_setup(void) { alpha_agp_info *agp = alpha_mv.agp_info(); struct pci_dev *pdev; /* faked */ struct aper_size_info_fixed *aper_size; if (!agp) return -ENODEV; if (agp->ops->setup(agp)) return -ENODEV; /* * Build the aperture size descriptor */ aper_size = alpha_core_agp_sizes; aper_size->size = agp->aperture.size / (1024 * 1024); aper_size->num_entries = agp->aperture.size / PAGE_SIZE; aper_size->page_order = __ffs(aper_size->num_entries / 1024); /* * Build a fake pci_dev struct */ pdev = pci_alloc_dev(NULL); if (!pdev) return -ENOMEM; pdev->vendor = 0xffff; pdev->device = 0xffff; pdev->sysdata = agp->hose; alpha_bridge = agp_alloc_bridge(); if (!alpha_bridge) goto fail; alpha_bridge->driver = &alpha_core_agp_driver; alpha_bridge->vm_ops = &alpha_core_agp_vm_ops; alpha_bridge->current_size = aper_size; /* only 1 size */ alpha_bridge->dev_private_data = agp; alpha_bridge->dev = pdev; alpha_bridge->mode = agp->capability.lw; printk(KERN_INFO PFX "Detected AGP on hose %d\n", agp->hose->index); return agp_add_bridge(alpha_bridge); fail: kfree(pdev); return -ENOMEM; } static int __init agp_alpha_core_init(void) { if (agp_off) return -EINVAL; if (alpha_mv.agp_info) return alpha_core_agp_setup(); return -ENODEV; } static void __exit agp_alpha_core_cleanup(void) { agp_remove_bridge(alpha_bridge); agp_put_bridge(alpha_bridge); } module_init(agp_alpha_core_init); module_exit(agp_alpha_core_cleanup); MODULE_AUTHOR("Jeff Wiedemeier <[email protected]>"); MODULE_LICENSE("GPL and additional rights");
linux-master
drivers/char/agp/alpha-agp.c
/* * AGPGART driver frontend * Copyright (C) 2004 Silicon Graphics, Inc. * Copyright (C) 2002-2003 Dave Jones * Copyright (C) 1999 Jeff Hartmann * Copyright (C) 1999 Precision Insight, Inc. * Copyright (C) 1999 Xi Graphics, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mman.h> #include <linux/pci.h> #include <linux/miscdevice.h> #include <linux/agp_backend.h> #include <linux/agpgart.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/uaccess.h> #include "agp.h" #include "compat_ioctl.h" struct agp_front_data agp_fe; struct agp_memory *agp_find_mem_by_key(int key) { struct agp_memory *curr; if (agp_fe.current_controller == NULL) return NULL; curr = agp_fe.current_controller->pool; while (curr != NULL) { if (curr->key == key) break; curr = curr->next; } DBG("key=%d -> mem=%p", key, curr); return curr; } static void agp_remove_from_pool(struct agp_memory *temp) { struct agp_memory *prev; struct agp_memory *next; /* Check to see if this is even in the memory pool */ DBG("mem=%p", temp); if (agp_find_mem_by_key(temp->key) != NULL) { next = temp->next; prev = temp->prev; if (prev != NULL) { prev->next = next; if (next != NULL) next->prev = prev; } else { /* This is the first item on the list */ if (next != NULL) next->prev = NULL; agp_fe.current_controller->pool = next; } } } /* * Routines for managing each client's segment list - * These routines handle adding and removing segments * to each auth'ed client. */ static struct agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client, unsigned long offset, int size, pgprot_t page_prot) { struct agp_segment_priv *seg; int i; off_t pg_start; size_t pg_count; pg_start = offset / 4096; pg_count = size / 4096; seg = *(client->segments); for (i = 0; i < client->num_segments; i++) { if ((seg[i].pg_start == pg_start) && (seg[i].pg_count == pg_count) && (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) { return seg + i; } } return NULL; } static void agp_remove_seg_from_client(struct agp_client *client) { DBG("client=%p", client); if (client->segments != NULL) { if (*(client->segments) != NULL) { DBG("Freeing %p from client %p", *(client->segments), client); kfree(*(client->segments)); } DBG("Freeing %p from client %p", client->segments, client); kfree(client->segments); client->segments = NULL; } } static void agp_add_seg_to_client(struct agp_client *client, struct agp_segment_priv ** seg, int num_segments) { struct agp_segment_priv **prev_seg; prev_seg = client->segments; if (prev_seg != NULL) agp_remove_seg_from_client(client); DBG("Adding seg %p (%d segments) to client %p", seg, num_segments, client); client->num_segments = num_segments; client->segments = seg; } static pgprot_t agp_convert_mmap_flags(int prot) { unsigned long prot_bits; prot_bits = calc_vm_prot_bits(prot, 0) | VM_SHARED; return vm_get_page_prot(prot_bits); } int agp_create_segment(struct agp_client *client, struct agp_region *region) { struct agp_segment_priv **ret_seg; struct agp_segment_priv *seg; struct agp_segment *user_seg; size_t i; seg = kzalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL); if (seg == NULL) { kfree(region->seg_list); region->seg_list = NULL; return -ENOMEM; } user_seg = region->seg_list; for (i = 0; i < region->seg_count; i++) { seg[i].pg_start = user_seg[i].pg_start; seg[i].pg_count = user_seg[i].pg_count; seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot); } kfree(region->seg_list); region->seg_list = NULL; ret_seg = kmalloc(sizeof(void *), GFP_KERNEL); if (ret_seg == NULL) { kfree(seg); return -ENOMEM; } *ret_seg = seg; agp_add_seg_to_client(client, ret_seg, region->seg_count); return 0; } /* End - Routines for managing each client's segment list */ /* This function must only be called when current_controller != NULL */ static void agp_insert_into_pool(struct agp_memory * temp) { struct agp_memory *prev; prev = agp_fe.current_controller->pool; if (prev != NULL) { prev->prev = temp; temp->next = prev; } agp_fe.current_controller->pool = temp; } /* File private list routines */ struct agp_file_private *agp_find_private(pid_t pid) { struct agp_file_private *curr; curr = agp_fe.file_priv_list; while (curr != NULL) { if (curr->my_pid == pid) return curr; curr = curr->next; } return NULL; } static void agp_insert_file_private(struct agp_file_private * priv) { struct agp_file_private *prev; prev = agp_fe.file_priv_list; if (prev != NULL) prev->prev = priv; priv->next = prev; agp_fe.file_priv_list = priv; } static void agp_remove_file_private(struct agp_file_private * priv) { struct agp_file_private *next; struct agp_file_private *prev; next = priv->next; prev = priv->prev; if (prev != NULL) { prev->next = next; if (next != NULL) next->prev = prev; } else { if (next != NULL) next->prev = NULL; agp_fe.file_priv_list = next; } } /* End - File flag list routines */ /* * Wrappers for agp_free_memory & agp_allocate_memory * These make sure that internal lists are kept updated. */ void agp_free_memory_wrap(struct agp_memory *memory) { agp_remove_from_pool(memory); agp_free_memory(memory); } struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type) { struct agp_memory *memory; memory = agp_allocate_memory(agp_bridge, pg_count, type); if (memory == NULL) return NULL; agp_insert_into_pool(memory); return memory; } /* Routines for managing the list of controllers - * These routines manage the current controller, and the list of * controllers */ static struct agp_controller *agp_find_controller_by_pid(pid_t id) { struct agp_controller *controller; controller = agp_fe.controllers; while (controller != NULL) { if (controller->pid == id) return controller; controller = controller->next; } return NULL; } static struct agp_controller *agp_create_controller(pid_t id) { struct agp_controller *controller; controller = kzalloc(sizeof(struct agp_controller), GFP_KERNEL); if (controller == NULL) return NULL; controller->pid = id; return controller; } static int agp_insert_controller(struct agp_controller *controller) { struct agp_controller *prev_controller; prev_controller = agp_fe.controllers; controller->next = prev_controller; if (prev_controller != NULL) prev_controller->prev = controller; agp_fe.controllers = controller; return 0; } static void agp_remove_all_clients(struct agp_controller *controller) { struct agp_client *client; struct agp_client *temp; client = controller->clients; while (client) { struct agp_file_private *priv; temp = client; agp_remove_seg_from_client(temp); priv = agp_find_private(temp->pid); if (priv != NULL) { clear_bit(AGP_FF_IS_VALID, &priv->access_flags); clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags); } client = client->next; kfree(temp); } } static void agp_remove_all_memory(struct agp_controller *controller) { struct agp_memory *memory; struct agp_memory *temp; memory = controller->pool; while (memory) { temp = memory; memory = memory->next; agp_free_memory_wrap(temp); } } static int agp_remove_controller(struct agp_controller *controller) { struct agp_controller *prev_controller; struct agp_controller *next_controller; prev_controller = controller->prev; next_controller = controller->next; if (prev_controller != NULL) { prev_controller->next = next_controller; if (next_controller != NULL) next_controller->prev = prev_controller; } else { if (next_controller != NULL) next_controller->prev = NULL; agp_fe.controllers = next_controller; } agp_remove_all_memory(controller); agp_remove_all_clients(controller); if (agp_fe.current_controller == controller) { agp_fe.current_controller = NULL; agp_fe.backend_acquired = false; agp_backend_release(agp_bridge); } kfree(controller); return 0; } static void agp_controller_make_current(struct agp_controller *controller) { struct agp_client *clients; clients = controller->clients; while (clients != NULL) { struct agp_file_private *priv; priv = agp_find_private(clients->pid); if (priv != NULL) { set_bit(AGP_FF_IS_VALID, &priv->access_flags); set_bit(AGP_FF_IS_CLIENT, &priv->access_flags); } clients = clients->next; } agp_fe.current_controller = controller; } static void agp_controller_release_current(struct agp_controller *controller, struct agp_file_private *controller_priv) { struct agp_client *clients; clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags); clients = controller->clients; while (clients != NULL) { struct agp_file_private *priv; priv = agp_find_private(clients->pid); if (priv != NULL) clear_bit(AGP_FF_IS_VALID, &priv->access_flags); clients = clients->next; } agp_fe.current_controller = NULL; agp_fe.used_by_controller = false; agp_backend_release(agp_bridge); } /* * Routines for managing client lists - * These routines are for managing the list of auth'ed clients. */ static struct agp_client *agp_find_client_in_controller(struct agp_controller *controller, pid_t id) { struct agp_client *client; if (controller == NULL) return NULL; client = controller->clients; while (client != NULL) { if (client->pid == id) return client; client = client->next; } return NULL; } static struct agp_controller *agp_find_controller_for_client(pid_t id) { struct agp_controller *controller; controller = agp_fe.controllers; while (controller != NULL) { if ((agp_find_client_in_controller(controller, id)) != NULL) return controller; controller = controller->next; } return NULL; } struct agp_client *agp_find_client_by_pid(pid_t id) { struct agp_client *temp; if (agp_fe.current_controller == NULL) return NULL; temp = agp_find_client_in_controller(agp_fe.current_controller, id); return temp; } static void agp_insert_client(struct agp_client *client) { struct agp_client *prev_client; prev_client = agp_fe.current_controller->clients; client->next = prev_client; if (prev_client != NULL) prev_client->prev = client; agp_fe.current_controller->clients = client; agp_fe.current_controller->num_clients++; } struct agp_client *agp_create_client(pid_t id) { struct agp_client *new_client; new_client = kzalloc(sizeof(struct agp_client), GFP_KERNEL); if (new_client == NULL) return NULL; new_client->pid = id; agp_insert_client(new_client); return new_client; } int agp_remove_client(pid_t id) { struct agp_client *client; struct agp_client *prev_client; struct agp_client *next_client; struct agp_controller *controller; controller = agp_find_controller_for_client(id); if (controller == NULL) return -EINVAL; client = agp_find_client_in_controller(controller, id); if (client == NULL) return -EINVAL; prev_client = client->prev; next_client = client->next; if (prev_client != NULL) { prev_client->next = next_client; if (next_client != NULL) next_client->prev = prev_client; } else { if (next_client != NULL) next_client->prev = NULL; controller->clients = next_client; } controller->num_clients--; agp_remove_seg_from_client(client); kfree(client); return 0; } /* End - Routines for managing client lists */ /* File Operations */ static int agp_mmap(struct file *file, struct vm_area_struct *vma) { unsigned int size, current_size; unsigned long offset; struct agp_client *client; struct agp_file_private *priv = file->private_data; struct agp_kern_info kerninfo; mutex_lock(&(agp_fe.agp_mutex)); if (agp_fe.backend_acquired != true) goto out_eperm; if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags))) goto out_eperm; agp_copy_info(agp_bridge, &kerninfo); size = vma->vm_end - vma->vm_start; current_size = kerninfo.aper_size; current_size = current_size * 0x100000; offset = vma->vm_pgoff << PAGE_SHIFT; DBG("%lx:%lx", offset, offset+size); if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) { if ((size + offset) > current_size) goto out_inval; client = agp_find_client_by_pid(current->pid); if (client == NULL) goto out_eperm; if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot)) goto out_inval; DBG("client vm_ops=%p", kerninfo.vm_ops); if (kerninfo.vm_ops) { vma->vm_ops = kerninfo.vm_ops; } else if (io_remap_pfn_range(vma, vma->vm_start, (kerninfo.aper_base + offset) >> PAGE_SHIFT, size, pgprot_writecombine(vma->vm_page_prot))) { goto out_again; } mutex_unlock(&(agp_fe.agp_mutex)); return 0; } if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) { if (size != current_size) goto out_inval; DBG("controller vm_ops=%p", kerninfo.vm_ops); if (kerninfo.vm_ops) { vma->vm_ops = kerninfo.vm_ops; } else if (io_remap_pfn_range(vma, vma->vm_start, kerninfo.aper_base >> PAGE_SHIFT, size, pgprot_writecombine(vma->vm_page_prot))) { goto out_again; } mutex_unlock(&(agp_fe.agp_mutex)); return 0; } out_eperm: mutex_unlock(&(agp_fe.agp_mutex)); return -EPERM; out_inval: mutex_unlock(&(agp_fe.agp_mutex)); return -EINVAL; out_again: mutex_unlock(&(agp_fe.agp_mutex)); return -EAGAIN; } static int agp_release(struct inode *inode, struct file *file) { struct agp_file_private *priv = file->private_data; mutex_lock(&(agp_fe.agp_mutex)); DBG("priv=%p", priv); if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) { struct agp_controller *controller; controller = agp_find_controller_by_pid(priv->my_pid); if (controller != NULL) { if (controller == agp_fe.current_controller) agp_controller_release_current(controller, priv); agp_remove_controller(controller); controller = NULL; } } if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) agp_remove_client(priv->my_pid); agp_remove_file_private(priv); kfree(priv); file->private_data = NULL; mutex_unlock(&(agp_fe.agp_mutex)); return 0; } static int agp_open(struct inode *inode, struct file *file) { int minor = iminor(inode); struct agp_file_private *priv; struct agp_client *client; if (minor != AGPGART_MINOR) return -ENXIO; mutex_lock(&(agp_fe.agp_mutex)); priv = kzalloc(sizeof(struct agp_file_private), GFP_KERNEL); if (priv == NULL) { mutex_unlock(&(agp_fe.agp_mutex)); return -ENOMEM; } set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags); priv->my_pid = current->pid; if (capable(CAP_SYS_RAWIO)) /* Root priv, can be controller */ set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags); client = agp_find_client_by_pid(current->pid); if (client != NULL) { set_bit(AGP_FF_IS_CLIENT, &priv->access_flags); set_bit(AGP_FF_IS_VALID, &priv->access_flags); } file->private_data = (void *) priv; agp_insert_file_private(priv); DBG("private=%p, client=%p", priv, client); mutex_unlock(&(agp_fe.agp_mutex)); return 0; } static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg) { struct agp_info userinfo; struct agp_kern_info kerninfo; agp_copy_info(agp_bridge, &kerninfo); memset(&userinfo, 0, sizeof(userinfo)); userinfo.version.major = kerninfo.version.major; userinfo.version.minor = kerninfo.version.minor; userinfo.bridge_id = kerninfo.device->vendor | (kerninfo.device->device << 16); userinfo.agp_mode = kerninfo.mode; userinfo.aper_base = kerninfo.aper_base; userinfo.aper_size = kerninfo.aper_size; userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory; userinfo.pg_used = kerninfo.current_memory; if (copy_to_user(arg, &userinfo, sizeof(struct agp_info))) return -EFAULT; return 0; } int agpioc_acquire_wrap(struct agp_file_private *priv) { struct agp_controller *controller; DBG(""); if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags))) return -EPERM; if (agp_fe.current_controller != NULL) return -EBUSY; if (!agp_bridge) return -ENODEV; if (atomic_read(&agp_bridge->agp_in_use)) return -EBUSY; atomic_inc(&agp_bridge->agp_in_use); agp_fe.backend_acquired = true; controller = agp_find_controller_by_pid(priv->my_pid); if (controller != NULL) { agp_controller_make_current(controller); } else { controller = agp_create_controller(priv->my_pid); if (controller == NULL) { agp_fe.backend_acquired = false; agp_backend_release(agp_bridge); return -ENOMEM; } agp_insert_controller(controller); agp_controller_make_current(controller); } set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags); set_bit(AGP_FF_IS_VALID, &priv->access_flags); return 0; } int agpioc_release_wrap(struct agp_file_private *priv) { DBG(""); agp_controller_release_current(agp_fe.current_controller, priv); return 0; } int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg) { struct agp_setup mode; DBG(""); if (copy_from_user(&mode, arg, sizeof(struct agp_setup))) return -EFAULT; agp_enable(agp_bridge, mode.agp_mode); return 0; } static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) { struct agp_region reserve; struct agp_client *client; struct agp_file_private *client_priv; DBG(""); if (copy_from_user(&reserve, arg, sizeof(struct agp_region))) return -EFAULT; if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment)) return -EFAULT; client = agp_find_client_by_pid(reserve.pid); if (reserve.seg_count == 0) { /* remove a client */ client_priv = agp_find_private(reserve.pid); if (client_priv != NULL) { set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); } if (client == NULL) { /* client is already removed */ return 0; } return agp_remove_client(reserve.pid); } else { struct agp_segment *segment; if (reserve.seg_count >= 16384) return -EINVAL; segment = kmalloc((sizeof(struct agp_segment) * reserve.seg_count), GFP_KERNEL); if (segment == NULL) return -ENOMEM; if (copy_from_user(segment, (void __user *) reserve.seg_list, sizeof(struct agp_segment) * reserve.seg_count)) { kfree(segment); return -EFAULT; } reserve.seg_list = segment; if (client == NULL) { /* Create the client and add the segment */ client = agp_create_client(reserve.pid); if (client == NULL) { kfree(segment); return -ENOMEM; } client_priv = agp_find_private(reserve.pid); if (client_priv != NULL) { set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); } } return agp_create_segment(client, &reserve); } /* Will never really happen */ return -EINVAL; } int agpioc_protect_wrap(struct agp_file_private *priv) { DBG(""); /* This function is not currently implemented */ return -EINVAL; } static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg) { struct agp_memory *memory; struct agp_allocate alloc; DBG(""); if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate))) return -EFAULT; if (alloc.type >= AGP_USER_TYPES) return -EINVAL; memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); if (memory == NULL) return -ENOMEM; alloc.key = memory->key; alloc.physical = memory->physical; if (copy_to_user(arg, &alloc, sizeof(struct agp_allocate))) { agp_free_memory_wrap(memory); return -EFAULT; } return 0; } int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg) { struct agp_memory *memory; DBG(""); memory = agp_find_mem_by_key(arg); if (memory == NULL) return -EINVAL; agp_free_memory_wrap(memory); return 0; } static int agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg) { struct agp_bind bind_info; struct agp_memory *memory; DBG(""); if (copy_from_user(&bind_info, arg, sizeof(struct agp_bind))) return -EFAULT; memory = agp_find_mem_by_key(bind_info.key); if (memory == NULL) return -EINVAL; return agp_bind_memory(memory, bind_info.pg_start); } static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg) { struct agp_memory *memory; struct agp_unbind unbind; DBG(""); if (copy_from_user(&unbind, arg, sizeof(struct agp_unbind))) return -EFAULT; memory = agp_find_mem_by_key(unbind.key); if (memory == NULL) return -EINVAL; return agp_unbind_memory(memory); } static long agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct agp_file_private *curr_priv = file->private_data; int ret_val = -ENOTTY; DBG("priv=%p, cmd=%x", curr_priv, cmd); mutex_lock(&(agp_fe.agp_mutex)); if ((agp_fe.current_controller == NULL) && (cmd != AGPIOC_ACQUIRE)) { ret_val = -EINVAL; goto ioctl_out; } if ((agp_fe.backend_acquired != true) && (cmd != AGPIOC_ACQUIRE)) { ret_val = -EBUSY; goto ioctl_out; } if (cmd != AGPIOC_ACQUIRE) { if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) { ret_val = -EPERM; goto ioctl_out; } /* Use the original pid of the controller, * in case it's threaded */ if (agp_fe.current_controller->pid != curr_priv->my_pid) { ret_val = -EBUSY; goto ioctl_out; } } switch (cmd) { case AGPIOC_INFO: ret_val = agpioc_info_wrap(curr_priv, (void __user *) arg); break; case AGPIOC_ACQUIRE: ret_val = agpioc_acquire_wrap(curr_priv); break; case AGPIOC_RELEASE: ret_val = agpioc_release_wrap(curr_priv); break; case AGPIOC_SETUP: ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg); break; case AGPIOC_RESERVE: ret_val = agpioc_reserve_wrap(curr_priv, (void __user *) arg); break; case AGPIOC_PROTECT: ret_val = agpioc_protect_wrap(curr_priv); break; case AGPIOC_ALLOCATE: ret_val = agpioc_allocate_wrap(curr_priv, (void __user *) arg); break; case AGPIOC_DEALLOCATE: ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg); break; case AGPIOC_BIND: ret_val = agpioc_bind_wrap(curr_priv, (void __user *) arg); break; case AGPIOC_UNBIND: ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg); break; case AGPIOC_CHIPSET_FLUSH: break; } ioctl_out: DBG("ioctl returns %d\n", ret_val); mutex_unlock(&(agp_fe.agp_mutex)); return ret_val; } static const struct file_operations agp_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .unlocked_ioctl = agp_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_agp_ioctl, #endif .mmap = agp_mmap, .open = agp_open, .release = agp_release, }; static struct miscdevice agp_miscdev = { .minor = AGPGART_MINOR, .name = "agpgart", .fops = &agp_fops }; int agp_frontend_initialize(void) { memset(&agp_fe, 0, sizeof(struct agp_front_data)); mutex_init(&(agp_fe.agp_mutex)); if (misc_register(&agp_miscdev)) { printk(KERN_ERR PFX "unable to get minor: %d\n", AGPGART_MINOR); return -EIO; } return 0; } void agp_frontend_cleanup(void) { misc_deregister(&agp_miscdev); }
linux-master
drivers/char/agp/frontend.c
/* * AGPGART driver frontend compatibility ioctls * Copyright (C) 2004 Silicon Graphics, Inc. * Copyright (C) 2002-2003 Dave Jones * Copyright (C) 1999 Jeff Hartmann * Copyright (C) 1999 Precision Insight, Inc. * Copyright (C) 1999 Xi Graphics, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/fs.h> #include <linux/agpgart.h> #include <linux/slab.h> #include <linux/uaccess.h> #include "agp.h" #include "compat_ioctl.h" static int compat_agpioc_info_wrap(struct agp_file_private *priv, void __user *arg) { struct agp_info32 userinfo; struct agp_kern_info kerninfo; agp_copy_info(agp_bridge, &kerninfo); userinfo.version.major = kerninfo.version.major; userinfo.version.minor = kerninfo.version.minor; userinfo.bridge_id = kerninfo.device->vendor | (kerninfo.device->device << 16); userinfo.agp_mode = kerninfo.mode; userinfo.aper_base = (compat_long_t)kerninfo.aper_base; userinfo.aper_size = kerninfo.aper_size; userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory; userinfo.pg_used = kerninfo.current_memory; if (copy_to_user(arg, &userinfo, sizeof(userinfo))) return -EFAULT; return 0; } static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) { struct agp_region32 ureserve; struct agp_region kreserve; struct agp_client *client; struct agp_file_private *client_priv; DBG(""); if (copy_from_user(&ureserve, arg, sizeof(ureserve))) return -EFAULT; if ((unsigned) ureserve.seg_count >= ~0U/sizeof(struct agp_segment32)) return -EFAULT; kreserve.pid = ureserve.pid; kreserve.seg_count = ureserve.seg_count; client = agp_find_client_by_pid(kreserve.pid); if (kreserve.seg_count == 0) { /* remove a client */ client_priv = agp_find_private(kreserve.pid); if (client_priv != NULL) { set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); } if (client == NULL) { /* client is already removed */ return 0; } return agp_remove_client(kreserve.pid); } else { struct agp_segment32 *usegment; struct agp_segment *ksegment; int seg; if (ureserve.seg_count >= 16384) return -EINVAL; usegment = kmalloc_array(ureserve.seg_count, sizeof(*usegment), GFP_KERNEL); if (!usegment) return -ENOMEM; ksegment = kmalloc_array(kreserve.seg_count, sizeof(*ksegment), GFP_KERNEL); if (!ksegment) { kfree(usegment); return -ENOMEM; } if (copy_from_user(usegment, (void __user *) ureserve.seg_list, sizeof(*usegment) * ureserve.seg_count)) { kfree(usegment); kfree(ksegment); return -EFAULT; } for (seg = 0; seg < ureserve.seg_count; seg++) { ksegment[seg].pg_start = usegment[seg].pg_start; ksegment[seg].pg_count = usegment[seg].pg_count; ksegment[seg].prot = usegment[seg].prot; } kfree(usegment); kreserve.seg_list = ksegment; if (client == NULL) { /* Create the client and add the segment */ client = agp_create_client(kreserve.pid); if (client == NULL) { kfree(ksegment); return -ENOMEM; } client_priv = agp_find_private(kreserve.pid); if (client_priv != NULL) { set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); } } return agp_create_segment(client, &kreserve); } /* Will never really happen */ return -EINVAL; } static int compat_agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg) { struct agp_memory *memory; struct agp_allocate32 alloc; DBG(""); if (copy_from_user(&alloc, arg, sizeof(alloc))) return -EFAULT; memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); if (memory == NULL) return -ENOMEM; alloc.key = memory->key; alloc.physical = memory->physical; if (copy_to_user(arg, &alloc, sizeof(alloc))) { agp_free_memory_wrap(memory); return -EFAULT; } return 0; } static int compat_agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg) { struct agp_bind32 bind_info; struct agp_memory *memory; DBG(""); if (copy_from_user(&bind_info, arg, sizeof(bind_info))) return -EFAULT; memory = agp_find_mem_by_key(bind_info.key); if (memory == NULL) return -EINVAL; return agp_bind_memory(memory, bind_info.pg_start); } static int compat_agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg) { struct agp_memory *memory; struct agp_unbind32 unbind; DBG(""); if (copy_from_user(&unbind, arg, sizeof(unbind))) return -EFAULT; memory = agp_find_mem_by_key(unbind.key); if (memory == NULL) return -EINVAL; return agp_unbind_memory(memory); } long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct agp_file_private *curr_priv = file->private_data; int ret_val = -ENOTTY; mutex_lock(&(agp_fe.agp_mutex)); if ((agp_fe.current_controller == NULL) && (cmd != AGPIOC_ACQUIRE32)) { ret_val = -EINVAL; goto ioctl_out; } if ((agp_fe.backend_acquired != true) && (cmd != AGPIOC_ACQUIRE32)) { ret_val = -EBUSY; goto ioctl_out; } if (cmd != AGPIOC_ACQUIRE32) { if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) { ret_val = -EPERM; goto ioctl_out; } /* Use the original pid of the controller, * in case it's threaded */ if (agp_fe.current_controller->pid != curr_priv->my_pid) { ret_val = -EBUSY; goto ioctl_out; } } switch (cmd) { case AGPIOC_INFO32: ret_val = compat_agpioc_info_wrap(curr_priv, (void __user *) arg); break; case AGPIOC_ACQUIRE32: ret_val = agpioc_acquire_wrap(curr_priv); break; case AGPIOC_RELEASE32: ret_val = agpioc_release_wrap(curr_priv); break; case AGPIOC_SETUP32: ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg); break; case AGPIOC_RESERVE32: ret_val = compat_agpioc_reserve_wrap(curr_priv, (void __user *) arg); break; case AGPIOC_PROTECT32: ret_val = agpioc_protect_wrap(curr_priv); break; case AGPIOC_ALLOCATE32: ret_val = compat_agpioc_allocate_wrap(curr_priv, (void __user *) arg); break; case AGPIOC_DEALLOCATE32: ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg); break; case AGPIOC_BIND32: ret_val = compat_agpioc_bind_wrap(curr_priv, (void __user *) arg); break; case AGPIOC_UNBIND32: ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg); break; case AGPIOC_CHIPSET_FLUSH32: break; } ioctl_out: DBG("ioctl returns %d\n", ret_val); mutex_unlock(&(agp_fe.agp_mutex)); return ret_val; }
linux-master
drivers/char/agp/compat_ioctl.c
// SPDX-License-Identifier: GPL-2.0-only /* * VIA AGPGART routines. */ #include <linux/types.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/agp_backend.h> #include "agp.h" static const struct pci_device_id agp_via_pci_table[]; #define VIA_GARTCTRL 0x80 #define VIA_APSIZE 0x84 #define VIA_ATTBASE 0x88 #define VIA_AGP3_GARTCTRL 0x90 #define VIA_AGP3_APSIZE 0x94 #define VIA_AGP3_ATTBASE 0x98 #define VIA_AGPSEL 0xfd static int via_fetch_size(void) { int i; u8 temp; struct aper_size_info_8 *values; values = A_SIZE_8(agp_bridge->driver->aperture_sizes); pci_read_config_byte(agp_bridge->dev, VIA_APSIZE, &temp); for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (temp == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } printk(KERN_ERR PFX "Unknown aperture size from AGP bridge (0x%x)\n", temp); return 0; } static int via_configure(void) { struct aper_size_info_8 *current_size; current_size = A_SIZE_8(agp_bridge->current_size); /* aperture size */ pci_write_config_byte(agp_bridge->dev, VIA_APSIZE, current_size->size_value); /* address to map to */ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); /* GART control register */ pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, 0x0000000f); /* attbase - aperture GATT base */ pci_write_config_dword(agp_bridge->dev, VIA_ATTBASE, (agp_bridge->gatt_bus_addr & 0xfffff000) | 3); return 0; } static void via_cleanup(void) { struct aper_size_info_8 *previous_size; previous_size = A_SIZE_8(agp_bridge->previous_size); pci_write_config_byte(agp_bridge->dev, VIA_APSIZE, previous_size->size_value); /* Do not disable by writing 0 to VIA_ATTBASE, it screws things up * during reinitialization. */ } static void via_tlbflush(struct agp_memory *mem) { u32 temp; pci_read_config_dword(agp_bridge->dev, VIA_GARTCTRL, &temp); temp |= (1<<7); pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, temp); temp &= ~(1<<7); pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, temp); } static const struct aper_size_info_8 via_generic_sizes[9] = { {256, 65536, 6, 0}, {128, 32768, 5, 128}, {64, 16384, 4, 192}, {32, 8192, 3, 224}, {16, 4096, 2, 240}, {8, 2048, 1, 248}, {4, 1024, 0, 252}, {2, 512, 0, 254}, {1, 256, 0, 255} }; static int via_fetch_size_agp3(void) { int i; u16 temp; struct aper_size_info_16 *values; values = A_SIZE_16(agp_bridge->driver->aperture_sizes); pci_read_config_word(agp_bridge->dev, VIA_AGP3_APSIZE, &temp); temp &= 0xfff; for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (temp == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } static int via_configure_agp3(void) { u32 temp; /* address to map to */ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); /* attbase - aperture GATT base */ pci_write_config_dword(agp_bridge->dev, VIA_AGP3_ATTBASE, agp_bridge->gatt_bus_addr & 0xfffff000); /* 1. Enable GTLB in RX90<7>, all AGP aperture access needs to fetch * translation table first. * 2. Enable AGP aperture in RX91<0>. This bit controls the enabling of the * graphics AGP aperture for the AGP3.0 port. */ pci_read_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, &temp); pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp | (3<<7)); return 0; } static void via_cleanup_agp3(void) { struct aper_size_info_16 *previous_size; previous_size = A_SIZE_16(agp_bridge->previous_size); pci_write_config_byte(agp_bridge->dev, VIA_APSIZE, previous_size->size_value); } static void via_tlbflush_agp3(struct agp_memory *mem) { u32 temp; pci_read_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, &temp); pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp & ~(1<<7)); pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp); } static const struct agp_bridge_driver via_agp3_driver = { .owner = THIS_MODULE, .aperture_sizes = agp3_generic_sizes, .size_type = U8_APER_SIZE, .num_aperture_sizes = 10, .needs_scratch_page = true, .configure = via_configure_agp3, .fetch_size = via_fetch_size_agp3, .cleanup = via_cleanup_agp3, .tlb_flush = via_tlbflush_agp3, .mask_memory = agp_generic_mask_memory, .masks = NULL, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = agp_generic_insert_memory, .remove_memory = agp_generic_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static const struct agp_bridge_driver via_driver = { .owner = THIS_MODULE, .aperture_sizes = via_generic_sizes, .size_type = U8_APER_SIZE, .num_aperture_sizes = 9, .needs_scratch_page = true, .configure = via_configure, .fetch_size = via_fetch_size, .cleanup = via_cleanup, .tlb_flush = via_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = NULL, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = agp_generic_insert_memory, .remove_memory = agp_generic_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static struct agp_device_ids via_agp_device_ids[] = { { .device_id = PCI_DEVICE_ID_VIA_82C597_0, .chipset_name = "Apollo VP3", }, { .device_id = PCI_DEVICE_ID_VIA_82C598_0, .chipset_name = "Apollo MVP3", }, { .device_id = PCI_DEVICE_ID_VIA_8501_0, .chipset_name = "Apollo MVP4", }, /* VT8601 */ { .device_id = PCI_DEVICE_ID_VIA_8601_0, .chipset_name = "Apollo ProMedia/PLE133Ta", }, /* VT82C693A / VT28C694T */ { .device_id = PCI_DEVICE_ID_VIA_82C691_0, .chipset_name = "Apollo Pro 133", }, { .device_id = PCI_DEVICE_ID_VIA_8371_0, .chipset_name = "KX133", }, /* VT8633 */ { .device_id = PCI_DEVICE_ID_VIA_8633_0, .chipset_name = "Pro 266", }, { .device_id = PCI_DEVICE_ID_VIA_XN266, .chipset_name = "Apollo Pro266", }, /* VT8361 */ { .device_id = PCI_DEVICE_ID_VIA_8361, .chipset_name = "KLE133", }, /* VT8365 / VT8362 */ { .device_id = PCI_DEVICE_ID_VIA_8363_0, .chipset_name = "Twister-K/KT133x/KM133", }, /* VT8753A */ { .device_id = PCI_DEVICE_ID_VIA_8753_0, .chipset_name = "P4X266", }, /* VT8366 */ { .device_id = PCI_DEVICE_ID_VIA_8367_0, .chipset_name = "KT266/KY266x/KT333", }, /* VT8633 (for CuMine/ Celeron) */ { .device_id = PCI_DEVICE_ID_VIA_8653_0, .chipset_name = "Pro266T", }, /* KM266 / PM266 */ { .device_id = PCI_DEVICE_ID_VIA_XM266, .chipset_name = "PM266/KM266", }, /* CLE266 */ { .device_id = PCI_DEVICE_ID_VIA_862X_0, .chipset_name = "CLE266", }, { .device_id = PCI_DEVICE_ID_VIA_8377_0, .chipset_name = "KT400/KT400A/KT600", }, /* VT8604 / VT8605 / VT8603 * (Apollo Pro133A chipset with S3 Savage4) */ { .device_id = PCI_DEVICE_ID_VIA_8605_0, .chipset_name = "ProSavage PM133/PL133/PN133" }, /* P4M266x/P4N266 */ { .device_id = PCI_DEVICE_ID_VIA_8703_51_0, .chipset_name = "P4M266x/P4N266", }, /* VT8754 */ { .device_id = PCI_DEVICE_ID_VIA_8754C_0, .chipset_name = "PT800", }, /* P4X600 */ { .device_id = PCI_DEVICE_ID_VIA_8763_0, .chipset_name = "P4X600" }, /* KM400 */ { .device_id = PCI_DEVICE_ID_VIA_8378_0, .chipset_name = "KM400/KM400A", }, /* PT880 */ { .device_id = PCI_DEVICE_ID_VIA_PT880, .chipset_name = "PT880", }, /* PT880 Ultra */ { .device_id = PCI_DEVICE_ID_VIA_PT880ULTRA, .chipset_name = "PT880 Ultra", }, /* PT890 */ { .device_id = PCI_DEVICE_ID_VIA_8783_0, .chipset_name = "PT890", }, /* PM800/PN800/PM880/PN880 */ { .device_id = PCI_DEVICE_ID_VIA_PX8X0_0, .chipset_name = "PM800/PN800/PM880/PN880", }, /* KT880 */ { .device_id = PCI_DEVICE_ID_VIA_3269_0, .chipset_name = "KT880", }, /* KTxxx/Px8xx */ { .device_id = PCI_DEVICE_ID_VIA_83_87XX_1, .chipset_name = "VT83xx/VT87xx/KTxxx/Px8xx", }, /* P4M800 */ { .device_id = PCI_DEVICE_ID_VIA_3296_0, .chipset_name = "P4M800", }, /* P4M800CE */ { .device_id = PCI_DEVICE_ID_VIA_P4M800CE, .chipset_name = "VT3314", }, /* VT3324 / CX700 */ { .device_id = PCI_DEVICE_ID_VIA_VT3324, .chipset_name = "CX700", }, /* VT3336 - this is a chipset for AMD Athlon/K8 CPU. Due to K8's unique * architecture, the AGP resource and behavior are different from * the traditional AGP which resides only in chipset. AGP is used * by 3D driver which wasn't available for the VT3336 and VT3364 * generation until now. Unfortunately, by testing, VT3364 works * but VT3336 doesn't. - explanation from via, just leave this as * as a placeholder to avoid future patches adding it back in. */ #if 0 { .device_id = PCI_DEVICE_ID_VIA_VT3336, .chipset_name = "VT3336", }, #endif /* P4M890 */ { .device_id = PCI_DEVICE_ID_VIA_P4M890, .chipset_name = "P4M890", }, /* P4M900 */ { .device_id = PCI_DEVICE_ID_VIA_VT3364, .chipset_name = "P4M900", }, { }, /* dummy final entry, always present */ }; /* * VIA's AGP3 chipsets do magick to put the AGP bridge compliant * with the same standards version as the graphics card. */ static void check_via_agp3 (struct agp_bridge_data *bridge) { u8 reg; pci_read_config_byte(bridge->dev, VIA_AGPSEL, &reg); /* Check AGP 2.0 compatibility mode. */ if ((reg & (1<<1))==0) bridge->driver = &via_agp3_driver; } static int agp_via_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_device_ids *devs = via_agp_device_ids; struct agp_bridge_data *bridge; int j = 0; u8 cap_ptr; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return -ENODEV; j = ent - agp_via_pci_table; printk (KERN_INFO PFX "Detected VIA %s chipset\n", devs[j].chipset_name); bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; bridge->dev = pdev; bridge->capndx = cap_ptr; bridge->driver = &via_driver; /* * Garg, there are KT400s with KT266 IDs. */ if (pdev->device == PCI_DEVICE_ID_VIA_8367_0) { /* Is there a KT400 subsystem ? */ if (pdev->subsystem_device == PCI_DEVICE_ID_VIA_8377_0) { printk(KERN_INFO PFX "Found KT400 in disguise as a KT266.\n"); check_via_agp3(bridge); } } /* If this is an AGP3 bridge, check which mode its in and adjust. */ get_agp_version(bridge); if (bridge->major_version >= 3) check_via_agp3(bridge); /* Fill in the mode register */ pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); pci_set_drvdata(pdev, bridge); return agp_add_bridge(bridge); } static void agp_via_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); agp_remove_bridge(bridge); agp_put_bridge(bridge); } static int agp_via_resume(struct device *dev) { struct agp_bridge_data *bridge = dev_get_drvdata(dev); if (bridge->driver == &via_agp3_driver) return via_configure_agp3(); else if (bridge->driver == &via_driver) return via_configure(); return 0; } /* must be the same order as name table above */ static const struct pci_device_id agp_via_pci_table[] = { #define ID(x) \ { \ .class = (PCI_CLASS_BRIDGE_HOST << 8), \ .class_mask = ~0, \ .vendor = PCI_VENDOR_ID_VIA, \ .device = x, \ .subvendor = PCI_ANY_ID, \ .subdevice = PCI_ANY_ID, \ } ID(PCI_DEVICE_ID_VIA_82C597_0), ID(PCI_DEVICE_ID_VIA_82C598_0), ID(PCI_DEVICE_ID_VIA_8501_0), ID(PCI_DEVICE_ID_VIA_8601_0), ID(PCI_DEVICE_ID_VIA_82C691_0), ID(PCI_DEVICE_ID_VIA_8371_0), ID(PCI_DEVICE_ID_VIA_8633_0), ID(PCI_DEVICE_ID_VIA_XN266), ID(PCI_DEVICE_ID_VIA_8361), ID(PCI_DEVICE_ID_VIA_8363_0), ID(PCI_DEVICE_ID_VIA_8753_0), ID(PCI_DEVICE_ID_VIA_8367_0), ID(PCI_DEVICE_ID_VIA_8653_0), ID(PCI_DEVICE_ID_VIA_XM266), ID(PCI_DEVICE_ID_VIA_862X_0), ID(PCI_DEVICE_ID_VIA_8377_0), ID(PCI_DEVICE_ID_VIA_8605_0), ID(PCI_DEVICE_ID_VIA_8703_51_0), ID(PCI_DEVICE_ID_VIA_8754C_0), ID(PCI_DEVICE_ID_VIA_8763_0), ID(PCI_DEVICE_ID_VIA_8378_0), ID(PCI_DEVICE_ID_VIA_PT880), ID(PCI_DEVICE_ID_VIA_PT880ULTRA), ID(PCI_DEVICE_ID_VIA_8783_0), ID(PCI_DEVICE_ID_VIA_PX8X0_0), ID(PCI_DEVICE_ID_VIA_3269_0), ID(PCI_DEVICE_ID_VIA_83_87XX_1), ID(PCI_DEVICE_ID_VIA_3296_0), ID(PCI_DEVICE_ID_VIA_P4M800CE), ID(PCI_DEVICE_ID_VIA_VT3324), ID(PCI_DEVICE_ID_VIA_P4M890), ID(PCI_DEVICE_ID_VIA_VT3364), { } }; MODULE_DEVICE_TABLE(pci, agp_via_pci_table); static DEFINE_SIMPLE_DEV_PM_OPS(agp_via_pm_ops, NULL, agp_via_resume); static struct pci_driver agp_via_pci_driver = { .name = "agpgart-via", .id_table = agp_via_pci_table, .probe = agp_via_probe, .remove = agp_via_remove, .driver.pm = &agp_via_pm_ops, }; static int __init agp_via_init(void) { if (agp_off) return -EINVAL; return pci_register_driver(&agp_via_pci_driver); } static void __exit agp_via_cleanup(void) { pci_unregister_driver(&agp_via_pci_driver); } module_init(agp_via_init); module_exit(agp_via_cleanup); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Dave Jones");
linux-master
drivers/char/agp/via-agp.c
/* * SiS AGPGART routines. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/agp_backend.h> #include <linux/delay.h> #include "agp.h" #define SIS_ATTBASE 0x90 #define SIS_APSIZE 0x94 #define SIS_TLBCNTRL 0x97 #define SIS_TLBFLUSH 0x98 #define PCI_DEVICE_ID_SI_662 0x0662 #define PCI_DEVICE_ID_SI_671 0x0671 static bool agp_sis_force_delay = 0; static int agp_sis_agp_spec = -1; static int sis_fetch_size(void) { u8 temp_size; int i; struct aper_size_info_8 *values; pci_read_config_byte(agp_bridge->dev, SIS_APSIZE, &temp_size); values = A_SIZE_8(agp_bridge->driver->aperture_sizes); for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if ((temp_size == values[i].size_value) || ((temp_size & ~(0x07)) == (values[i].size_value & ~(0x07)))) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } static void sis_tlbflush(struct agp_memory *mem) { pci_write_config_byte(agp_bridge->dev, SIS_TLBFLUSH, 0x02); } static int sis_configure(void) { struct aper_size_info_8 *current_size; current_size = A_SIZE_8(agp_bridge->current_size); pci_write_config_byte(agp_bridge->dev, SIS_TLBCNTRL, 0x05); agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); pci_write_config_dword(agp_bridge->dev, SIS_ATTBASE, agp_bridge->gatt_bus_addr); pci_write_config_byte(agp_bridge->dev, SIS_APSIZE, current_size->size_value); return 0; } static void sis_cleanup(void) { struct aper_size_info_8 *previous_size; previous_size = A_SIZE_8(agp_bridge->previous_size); pci_write_config_byte(agp_bridge->dev, SIS_APSIZE, (previous_size->size_value & ~(0x03))); } static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode) { struct pci_dev *device = NULL; u32 command; int rate; dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n", agp_bridge->major_version, agp_bridge->minor_version); pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command); command = agp_collect_device_status(bridge, mode, command); command |= AGPSTAT_AGP_ENABLE; rate = (command & 0x7) << 2; for_each_pci_dev(device) { u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP); if (!agp) continue; dev_info(&agp_bridge->dev->dev, "putting AGP V3 device at %s into %dx mode\n", pci_name(device), rate); pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command); /* * Weird: on some sis chipsets any rate change in the target * command register triggers a 5ms screwup during which the master * cannot be configured */ if (device->device == bridge->dev->device) { dev_info(&agp_bridge->dev->dev, "SiS delay workaround: giving bridge time to recover\n"); msleep(10); } } } static const struct aper_size_info_8 sis_generic_sizes[7] = { {256, 65536, 6, 99}, {128, 32768, 5, 83}, {64, 16384, 4, 67}, {32, 8192, 3, 51}, {16, 4096, 2, 35}, {8, 2048, 1, 19}, {4, 1024, 0, 3} }; static struct agp_bridge_driver sis_driver = { .owner = THIS_MODULE, .aperture_sizes = sis_generic_sizes, .size_type = U8_APER_SIZE, .num_aperture_sizes = 7, .needs_scratch_page = true, .configure = sis_configure, .fetch_size = sis_fetch_size, .cleanup = sis_cleanup, .tlb_flush = sis_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = NULL, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = agp_generic_insert_memory, .remove_memory = agp_generic_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; // chipsets that require the 'delay hack' static int sis_broken_chipsets[] = { PCI_DEVICE_ID_SI_648, PCI_DEVICE_ID_SI_746, 0 // terminator }; static void sis_get_driver(struct agp_bridge_data *bridge) { int i; for (i=0; sis_broken_chipsets[i]!=0; ++i) if (bridge->dev->device==sis_broken_chipsets[i]) break; if (sis_broken_chipsets[i] || agp_sis_force_delay) sis_driver.agp_enable=sis_delayed_enable; // sis chipsets that indicate less than agp3.5 // are not actually fully agp3 compliant if ((agp_bridge->major_version == 3 && agp_bridge->minor_version >= 5 && agp_sis_agp_spec!=0) || agp_sis_agp_spec==1) { sis_driver.aperture_sizes = agp3_generic_sizes; sis_driver.size_type = U16_APER_SIZE; sis_driver.num_aperture_sizes = AGP_GENERIC_SIZES_ENTRIES; sis_driver.configure = agp3_generic_configure; sis_driver.fetch_size = agp3_generic_fetch_size; sis_driver.cleanup = agp3_generic_cleanup; sis_driver.tlb_flush = agp3_generic_tlbflush; } } static int agp_sis_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_bridge_data *bridge; u8 cap_ptr; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return -ENODEV; dev_info(&pdev->dev, "SiS chipset [%04x/%04x]\n", pdev->vendor, pdev->device); bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; bridge->driver = &sis_driver; bridge->dev = pdev; bridge->capndx = cap_ptr; get_agp_version(bridge); /* Fill in the mode register */ pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); sis_get_driver(bridge); pci_set_drvdata(pdev, bridge); return agp_add_bridge(bridge); } static void agp_sis_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); agp_remove_bridge(bridge); agp_put_bridge(bridge); } static int agp_sis_resume(__attribute__((unused)) struct device *dev) { return sis_driver.configure(); } static const struct pci_device_id agp_sis_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_5591, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_530, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_540, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_550, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_620, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_630, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_635, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_645, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_646, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_648, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_650, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_651, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_655, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_661, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_662, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_671, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_730, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_735, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_740, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_741, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_745, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_746, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE(pci, agp_sis_pci_table); static DEFINE_SIMPLE_DEV_PM_OPS(agp_sis_pm_ops, NULL, agp_sis_resume); static struct pci_driver agp_sis_pci_driver = { .name = "agpgart-sis", .id_table = agp_sis_pci_table, .probe = agp_sis_probe, .remove = agp_sis_remove, .driver.pm = &agp_sis_pm_ops, }; static int __init agp_sis_init(void) { if (agp_off) return -EINVAL; return pci_register_driver(&agp_sis_pci_driver); } static void __exit agp_sis_cleanup(void) { pci_unregister_driver(&agp_sis_pci_driver); } module_init(agp_sis_init); module_exit(agp_sis_cleanup); module_param(agp_sis_force_delay, bool, 0); MODULE_PARM_DESC(agp_sis_force_delay,"forces sis delay hack"); module_param(agp_sis_agp_spec, int, 0); MODULE_PARM_DESC(agp_sis_agp_spec,"0=force sis init, 1=force generic agp3 init, default: autodetect"); MODULE_LICENSE("GPL and additional rights");
linux-master
drivers/char/agp/sis-agp.c
// SPDX-License-Identifier: GPL-2.0-only /* * UniNorth AGPGART routines. */ #include <linux/module.h> #include <linux/of.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/pagemap.h> #include <linux/agp_backend.h> #include <linux/delay.h> #include <linux/vmalloc.h> #include <asm/uninorth.h> #include <asm/prom.h> #include <asm/pmac_feature.h> #include "agp.h" /* * NOTES for uninorth3 (G5 AGP) supports : * * There maybe also possibility to have bigger cache line size for * agp (see pmac_pci.c and look for cache line). Need to be investigated * by someone. * * PAGE size are hardcoded but this may change, see asm/page.h. * * Jerome Glisse <[email protected]> */ static int uninorth_rev; static int is_u3; static u32 scratch_value; #define DEFAULT_APERTURE_SIZE 256 #define DEFAULT_APERTURE_STRING "256" static char *aperture = NULL; static int uninorth_fetch_size(void) { int i, size = 0; struct aper_size_info_32 *values = A_SIZE_32(agp_bridge->driver->aperture_sizes); if (aperture) { char *save = aperture; size = memparse(aperture, &aperture) >> 20; aperture = save; for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) if (size == values[i].size) break; if (i == agp_bridge->driver->num_aperture_sizes) { dev_err(&agp_bridge->dev->dev, "invalid aperture size, " "using default\n"); size = 0; aperture = NULL; } } if (!size) { for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) if (values[i].size == DEFAULT_APERTURE_SIZE) break; } agp_bridge->previous_size = agp_bridge->current_size = (void *)(values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } static void uninorth_tlbflush(struct agp_memory *mem) { u32 ctrl = UNI_N_CFG_GART_ENABLE; if (is_u3) ctrl |= U3_N_CFG_GART_PERFRD; pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, ctrl | UNI_N_CFG_GART_INVAL); pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, ctrl); if (!mem && uninorth_rev <= 0x30) { pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, ctrl | UNI_N_CFG_GART_2xRESET); pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, ctrl); } } static void uninorth_cleanup(void) { u32 tmp; pci_read_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, &tmp); if (!(tmp & UNI_N_CFG_GART_ENABLE)) return; tmp |= UNI_N_CFG_GART_INVAL; pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, tmp); pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 0); if (uninorth_rev <= 0x30) { pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, UNI_N_CFG_GART_2xRESET); pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 0); } } static int uninorth_configure(void) { struct aper_size_info_32 *current_size; current_size = A_SIZE_32(agp_bridge->current_size); dev_info(&agp_bridge->dev->dev, "configuring for size idx: %d\n", current_size->size_value); /* aperture size and gatt addr */ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_BASE, (agp_bridge->gatt_bus_addr & 0xfffff000) | current_size->size_value); /* HACK ALERT * UniNorth seem to be buggy enough not to handle properly when * the AGP aperture isn't mapped at bus physical address 0 */ agp_bridge->gart_bus_addr = 0; #ifdef CONFIG_PPC64 /* Assume U3 or later on PPC64 systems */ /* high 4 bits of GART physical address go in UNI_N_CFG_AGP_BASE */ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_AGP_BASE, (agp_bridge->gatt_bus_addr >> 32) & 0xf); #else pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_AGP_BASE, agp_bridge->gart_bus_addr); #endif if (is_u3) { pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_DUMMY_PAGE, page_to_phys(agp_bridge->scratch_page_page) >> 12); } return 0; } static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int type) { int i, num_entries; void *temp; u32 *gp; int mask_type; if (type != mem->type) return -EINVAL; mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); if (mask_type != 0) { /* We know nothing of memory types */ return -EINVAL; } if (mem->page_count == 0) return 0; temp = agp_bridge->current_size; num_entries = A_SIZE_32(temp)->num_entries; if ((pg_start + mem->page_count) > num_entries) return -EINVAL; gp = (u32 *) &agp_bridge->gatt_table[pg_start]; for (i = 0; i < mem->page_count; ++i) { if (gp[i] != scratch_value) { dev_info(&agp_bridge->dev->dev, "uninorth_insert_memory: entry 0x%x occupied (%x)\n", i, gp[i]); return -EBUSY; } } for (i = 0; i < mem->page_count; i++) { if (is_u3) gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL; else gp[i] = cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) | 0x1UL); flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])), (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000); } mb(); uninorth_tlbflush(mem); return 0; } static int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type) { size_t i; u32 *gp; int mask_type; if (type != mem->type) return -EINVAL; mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); if (mask_type != 0) { /* We know nothing of memory types */ return -EINVAL; } if (mem->page_count == 0) return 0; gp = (u32 *) &agp_bridge->gatt_table[pg_start]; for (i = 0; i < mem->page_count; ++i) { gp[i] = scratch_value; } mb(); uninorth_tlbflush(mem); return 0; } static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode) { u32 command, scratch, status; int timeout; pci_read_config_dword(bridge->dev, bridge->capndx + PCI_AGP_STATUS, &status); command = agp_collect_device_status(bridge, mode, status); command |= PCI_AGP_COMMAND_AGP; if (uninorth_rev == 0x21) { /* * Darwin disable AGP 4x on this revision, thus we * may assume it's broken. This is an AGP2 controller. */ command &= ~AGPSTAT2_4X; } if ((uninorth_rev >= 0x30) && (uninorth_rev <= 0x33)) { /* * We need to set REQ_DEPTH to 7 for U3 versions 1.0, 2.1, * 2.2 and 2.3, Darwin do so. */ if ((command >> AGPSTAT_RQ_DEPTH_SHIFT) > 7) command = (command & ~AGPSTAT_RQ_DEPTH) | (7 << AGPSTAT_RQ_DEPTH_SHIFT); } uninorth_tlbflush(NULL); timeout = 0; do { pci_write_config_dword(bridge->dev, bridge->capndx + PCI_AGP_COMMAND, command); pci_read_config_dword(bridge->dev, bridge->capndx + PCI_AGP_COMMAND, &scratch); } while ((scratch & PCI_AGP_COMMAND_AGP) == 0 && ++timeout < 1000); if ((scratch & PCI_AGP_COMMAND_AGP) == 0) dev_err(&bridge->dev->dev, "can't write UniNorth AGP " "command register\n"); if (uninorth_rev >= 0x30) { /* This is an AGP V3 */ agp_device_command(command, (status & AGPSTAT_MODE_3_0) != 0); } else { /* AGP V2 */ agp_device_command(command, false); } uninorth_tlbflush(NULL); } #ifdef CONFIG_PM /* * These Power Management routines are _not_ called by the normal PCI PM layer, * but directly by the video driver through function pointers in the device * tree. */ static int agp_uninorth_suspend(struct pci_dev *pdev) { struct agp_bridge_data *bridge; u32 cmd; u8 agp; struct pci_dev *device = NULL; bridge = agp_find_bridge(pdev); if (bridge == NULL) return -ENODEV; /* Only one suspend supported */ if (bridge->dev_private_data) return 0; /* turn off AGP on the video chip, if it was enabled */ for_each_pci_dev(device) { /* Don't touch the bridge yet, device first */ if (device == pdev) continue; /* Only deal with devices on the same bus here, no Mac has a P2P * bridge on the AGP port, and mucking around the entire PCI * tree is source of problems on some machines because of a bug * in some versions of pci_find_capability() when hitting a dead * device */ if (device->bus != pdev->bus) continue; agp = pci_find_capability(device, PCI_CAP_ID_AGP); if (!agp) continue; pci_read_config_dword(device, agp + PCI_AGP_COMMAND, &cmd); if (!(cmd & PCI_AGP_COMMAND_AGP)) continue; dev_info(&pdev->dev, "disabling AGP on device %s\n", pci_name(device)); cmd &= ~PCI_AGP_COMMAND_AGP; pci_write_config_dword(device, agp + PCI_AGP_COMMAND, cmd); } /* turn off AGP on the bridge */ agp = pci_find_capability(pdev, PCI_CAP_ID_AGP); pci_read_config_dword(pdev, agp + PCI_AGP_COMMAND, &cmd); bridge->dev_private_data = (void *)(long)cmd; if (cmd & PCI_AGP_COMMAND_AGP) { dev_info(&pdev->dev, "disabling AGP on bridge\n"); cmd &= ~PCI_AGP_COMMAND_AGP; pci_write_config_dword(pdev, agp + PCI_AGP_COMMAND, cmd); } /* turn off the GART */ uninorth_cleanup(); return 0; } static int agp_uninorth_resume(struct pci_dev *pdev) { struct agp_bridge_data *bridge; u32 command; bridge = agp_find_bridge(pdev); if (bridge == NULL) return -ENODEV; command = (long)bridge->dev_private_data; bridge->dev_private_data = NULL; if (!(command & PCI_AGP_COMMAND_AGP)) return 0; uninorth_agp_enable(bridge, command); return 0; } #endif /* CONFIG_PM */ static struct { struct page **pages_arr; } uninorth_priv; static int uninorth_create_gatt_table(struct agp_bridge_data *bridge) { char *table; char *table_end; int size; int page_order; int num_entries; int i; void *temp; struct page *page; /* We can't handle 2 level gatt's */ if (bridge->driver->size_type == LVL2_APER_SIZE) return -EINVAL; table = NULL; i = bridge->aperture_size_idx; temp = bridge->current_size; size = page_order = num_entries = 0; do { size = A_SIZE_32(temp)->size; page_order = A_SIZE_32(temp)->page_order; num_entries = A_SIZE_32(temp)->num_entries; table = (char *) __get_free_pages(GFP_KERNEL, page_order); if (table == NULL) { i++; bridge->current_size = A_IDX32(bridge); } else { bridge->aperture_size_idx = i; } } while (!table && (i < bridge->driver->num_aperture_sizes)); if (table == NULL) return -ENOMEM; uninorth_priv.pages_arr = kmalloc_array(1 << page_order, sizeof(struct page *), GFP_KERNEL); if (uninorth_priv.pages_arr == NULL) goto enomem; table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); for (page = virt_to_page(table), i = 0; page <= virt_to_page(table_end); page++, i++) { SetPageReserved(page); uninorth_priv.pages_arr[i] = page; } bridge->gatt_table_real = (u32 *) table; /* Need to clear out any dirty data still sitting in caches */ flush_dcache_range((unsigned long)table, (unsigned long)table_end + 1); bridge->gatt_table = vmap(uninorth_priv.pages_arr, (1 << page_order), 0, PAGE_KERNEL_NCG); if (bridge->gatt_table == NULL) goto enomem; bridge->gatt_bus_addr = virt_to_phys(table); if (is_u3) scratch_value = (page_to_phys(agp_bridge->scratch_page_page) >> PAGE_SHIFT) | 0x80000000UL; else scratch_value = cpu_to_le32((page_to_phys(agp_bridge->scratch_page_page) & 0xFFFFF000UL) | 0x1UL); for (i = 0; i < num_entries; i++) bridge->gatt_table[i] = scratch_value; return 0; enomem: kfree(uninorth_priv.pages_arr); if (table) free_pages((unsigned long)table, page_order); return -ENOMEM; } static int uninorth_free_gatt_table(struct agp_bridge_data *bridge) { int page_order; char *table, *table_end; void *temp; struct page *page; temp = bridge->current_size; page_order = A_SIZE_32(temp)->page_order; /* Do not worry about freeing memory, because if this is * called, then all agp memory is deallocated and removed * from the table. */ vunmap(bridge->gatt_table); kfree(uninorth_priv.pages_arr); table = (char *) bridge->gatt_table_real; table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) ClearPageReserved(page); free_pages((unsigned long) bridge->gatt_table_real, page_order); return 0; } static void null_cache_flush(void) { mb(); } /* Setup function */ static const struct aper_size_info_32 uninorth_sizes[] = { {256, 65536, 6, 64}, {128, 32768, 5, 32}, {64, 16384, 4, 16}, {32, 8192, 3, 8}, {16, 4096, 2, 4}, {8, 2048, 1, 2}, {4, 1024, 0, 1} }; /* * Not sure that u3 supports that high aperture sizes but it * would strange if it did not :) */ static const struct aper_size_info_32 u3_sizes[] = { {512, 131072, 7, 128}, {256, 65536, 6, 64}, {128, 32768, 5, 32}, {64, 16384, 4, 16}, {32, 8192, 3, 8}, {16, 4096, 2, 4}, {8, 2048, 1, 2}, {4, 1024, 0, 1} }; const struct agp_bridge_driver uninorth_agp_driver = { .owner = THIS_MODULE, .aperture_sizes = (void *)uninorth_sizes, .size_type = U32_APER_SIZE, .num_aperture_sizes = ARRAY_SIZE(uninorth_sizes), .configure = uninorth_configure, .fetch_size = uninorth_fetch_size, .cleanup = uninorth_cleanup, .tlb_flush = uninorth_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = NULL, .cache_flush = null_cache_flush, .agp_enable = uninorth_agp_enable, .create_gatt_table = uninorth_create_gatt_table, .free_gatt_table = uninorth_free_gatt_table, .insert_memory = uninorth_insert_memory, .remove_memory = uninorth_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, .cant_use_aperture = true, .needs_scratch_page = true, }; const struct agp_bridge_driver u3_agp_driver = { .owner = THIS_MODULE, .aperture_sizes = (void *)u3_sizes, .size_type = U32_APER_SIZE, .num_aperture_sizes = ARRAY_SIZE(u3_sizes), .configure = uninorth_configure, .fetch_size = uninorth_fetch_size, .cleanup = uninorth_cleanup, .tlb_flush = uninorth_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = NULL, .cache_flush = null_cache_flush, .agp_enable = uninorth_agp_enable, .create_gatt_table = uninorth_create_gatt_table, .free_gatt_table = uninorth_free_gatt_table, .insert_memory = uninorth_insert_memory, .remove_memory = uninorth_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, .cant_use_aperture = true, .needs_scratch_page = true, }; static struct agp_device_ids uninorth_agp_device_ids[] = { { .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP, .chipset_name = "UniNorth", }, { .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP_P, .chipset_name = "UniNorth/Pangea", }, { .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP15, .chipset_name = "UniNorth 1.5", }, { .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP2, .chipset_name = "UniNorth 2", }, { .device_id = PCI_DEVICE_ID_APPLE_U3_AGP, .chipset_name = "U3", }, { .device_id = PCI_DEVICE_ID_APPLE_U3L_AGP, .chipset_name = "U3L", }, { .device_id = PCI_DEVICE_ID_APPLE_U3H_AGP, .chipset_name = "U3H", }, { .device_id = PCI_DEVICE_ID_APPLE_IPID2_AGP, .chipset_name = "UniNorth/Intrepid2", }, }; static int agp_uninorth_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_device_ids *devs = uninorth_agp_device_ids; struct agp_bridge_data *bridge; struct device_node *uninorth_node; u8 cap_ptr; int j; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (cap_ptr == 0) return -ENODEV; /* probe for known chipsets */ for (j = 0; devs[j].chipset_name != NULL; ++j) { if (pdev->device == devs[j].device_id) { dev_info(&pdev->dev, "Apple %s chipset\n", devs[j].chipset_name); goto found; } } dev_err(&pdev->dev, "unsupported Apple chipset [%04x/%04x]\n", pdev->vendor, pdev->device); return -ENODEV; found: /* Set revision to 0 if we could not read it. */ uninorth_rev = 0; is_u3 = 0; /* Locate core99 Uni-N */ uninorth_node = of_find_node_by_name(NULL, "uni-n"); /* Locate G5 u3 */ if (uninorth_node == NULL) { is_u3 = 1; uninorth_node = of_find_node_by_name(NULL, "u3"); } if (uninorth_node) { const int *revprop = of_get_property(uninorth_node, "device-rev", NULL); if (revprop != NULL) uninorth_rev = *revprop & 0x3f; of_node_put(uninorth_node); } #ifdef CONFIG_PM /* Inform platform of our suspend/resume caps */ pmac_register_agp_pm(pdev, agp_uninorth_suspend, agp_uninorth_resume); #endif /* Allocate & setup our driver */ bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; if (is_u3) bridge->driver = &u3_agp_driver; else bridge->driver = &uninorth_agp_driver; bridge->dev = pdev; bridge->capndx = cap_ptr; bridge->flags = AGP_ERRATA_FASTWRITES; /* Fill in the mode register */ pci_read_config_dword(pdev, cap_ptr+PCI_AGP_STATUS, &bridge->mode); pci_set_drvdata(pdev, bridge); return agp_add_bridge(bridge); } static void agp_uninorth_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); #ifdef CONFIG_PM /* Inform platform of our suspend/resume caps */ pmac_register_agp_pm(pdev, NULL, NULL); #endif agp_remove_bridge(bridge); agp_put_bridge(bridge); } static const struct pci_device_id agp_uninorth_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_APPLE, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE(pci, agp_uninorth_pci_table); static struct pci_driver agp_uninorth_pci_driver = { .name = "agpgart-uninorth", .id_table = agp_uninorth_pci_table, .probe = agp_uninorth_probe, .remove = agp_uninorth_remove, }; static int __init agp_uninorth_init(void) { if (agp_off) return -EINVAL; return pci_register_driver(&agp_uninorth_pci_driver); } static void __exit agp_uninorth_cleanup(void) { pci_unregister_driver(&agp_uninorth_pci_driver); } module_init(agp_uninorth_init); module_exit(agp_uninorth_cleanup); module_param(aperture, charp, 0); MODULE_PARM_DESC(aperture, "Aperture size, must be power of two between 4MB and an\n" "\t\tupper limit specific to the UniNorth revision.\n" "\t\tDefault: " DEFAULT_APERTURE_STRING "M"); MODULE_AUTHOR("Ben Herrenschmidt & Paul Mackerras"); MODULE_LICENSE("GPL");
linux-master
drivers/char/agp/uninorth-agp.c
// SPDX-License-Identifier: GPL-2.0-only /* * HP Quicksilver AGP GART routines * * Copyright (c) 2006, Kyle McMartin <[email protected]> * * Based on drivers/char/agpgart/hp-agp.c which is * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P. * Bjorn Helgaas <[email protected]> */ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/klist.h> #include <linux/agp_backend.h> #include <linux/log2.h> #include <linux/slab.h> #include <asm/parisc-device.h> #include <asm/ropes.h> #include "agp.h" #define DRVNAME "quicksilver" #define DRVPFX DRVNAME ": " #define AGP8X_MODE_BIT 3 #define AGP8X_MODE (1 << AGP8X_MODE_BIT) static unsigned long parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr, int type); static struct _parisc_agp_info { void __iomem *ioc_regs; void __iomem *lba_regs; int lba_cap_offset; u64 *gatt; u64 gatt_entries; u64 gart_base; u64 gart_size; int io_page_size; int io_pages_per_kpage; } parisc_agp_info; static struct gatt_mask parisc_agp_masks[] = { { .mask = SBA_PDIR_VALID_BIT, .type = 0 } }; static struct aper_size_info_fixed parisc_agp_sizes[] = { {0, 0, 0}, /* filled in by parisc_agp_fetch_size() */ }; static int parisc_agp_fetch_size(void) { int size; size = parisc_agp_info.gart_size / MB(1); parisc_agp_sizes[0].size = size; agp_bridge->current_size = (void *) &parisc_agp_sizes[0]; return size; } static int parisc_agp_configure(void) { struct _parisc_agp_info *info = &parisc_agp_info; agp_bridge->gart_bus_addr = info->gart_base; agp_bridge->capndx = info->lba_cap_offset; agp_bridge->mode = readl(info->lba_regs+info->lba_cap_offset+PCI_AGP_STATUS); return 0; } static void parisc_agp_tlbflush(struct agp_memory *mem) { struct _parisc_agp_info *info = &parisc_agp_info; /* force fdc ops to be visible to IOMMU */ asm_io_sync(); writeq(info->gart_base | ilog2(info->gart_size), info->ioc_regs+IOC_PCOM); readq(info->ioc_regs+IOC_PCOM); /* flush */ } static int parisc_agp_create_gatt_table(struct agp_bridge_data *bridge) { struct _parisc_agp_info *info = &parisc_agp_info; int i; for (i = 0; i < info->gatt_entries; i++) { info->gatt[i] = (unsigned long)agp_bridge->scratch_page; } return 0; } static int parisc_agp_free_gatt_table(struct agp_bridge_data *bridge) { struct _parisc_agp_info *info = &parisc_agp_info; info->gatt[0] = SBA_AGPGART_COOKIE; return 0; } static int parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type) { struct _parisc_agp_info *info = &parisc_agp_info; int i, k; off_t j, io_pg_start; int io_pg_count; if (type != mem->type || agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) { return -EINVAL; } io_pg_start = info->io_pages_per_kpage * pg_start; io_pg_count = info->io_pages_per_kpage * mem->page_count; if ((io_pg_start + io_pg_count) > info->gatt_entries) { return -EINVAL; } j = io_pg_start; while (j < (io_pg_start + io_pg_count)) { if (info->gatt[j]) return -EBUSY; j++; } if (!mem->is_flushed) { global_cache_flush(); mem->is_flushed = true; } for (i = 0, j = io_pg_start; i < mem->page_count; i++) { unsigned long paddr; paddr = page_to_phys(mem->pages[i]); for (k = 0; k < info->io_pages_per_kpage; k++, j++, paddr += info->io_page_size) { info->gatt[j] = parisc_agp_mask_memory(agp_bridge, paddr, type); asm_io_fdc(&info->gatt[j]); } } agp_bridge->driver->tlb_flush(mem); return 0; } static int parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type) { struct _parisc_agp_info *info = &parisc_agp_info; int i, io_pg_start, io_pg_count; if (type != mem->type || agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) { return -EINVAL; } io_pg_start = info->io_pages_per_kpage * pg_start; io_pg_count = info->io_pages_per_kpage * mem->page_count; for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) { info->gatt[i] = agp_bridge->scratch_page; } agp_bridge->driver->tlb_flush(mem); return 0; } static unsigned long parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr, int type) { unsigned ci; /* coherent index */ dma_addr_t pa; pa = addr & IOVP_MASK; asm("lci 0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pa))); pa |= (ci >> PAGE_SHIFT) & 0xff;/* move CI (8 bits) into lowest byte */ pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */ return cpu_to_le64(pa); } static void parisc_agp_enable(struct agp_bridge_data *bridge, u32 mode) { struct _parisc_agp_info *info = &parisc_agp_info; u32 command; command = readl(info->lba_regs + info->lba_cap_offset + PCI_AGP_STATUS); command = agp_collect_device_status(bridge, mode, command); command |= 0x00000100; writel(command, info->lba_regs + info->lba_cap_offset + PCI_AGP_COMMAND); agp_device_command(command, (mode & AGP8X_MODE) != 0); } static const struct agp_bridge_driver parisc_agp_driver = { .owner = THIS_MODULE, .size_type = FIXED_APER_SIZE, .configure = parisc_agp_configure, .fetch_size = parisc_agp_fetch_size, .tlb_flush = parisc_agp_tlbflush, .mask_memory = parisc_agp_mask_memory, .masks = parisc_agp_masks, .agp_enable = parisc_agp_enable, .cache_flush = global_cache_flush, .create_gatt_table = parisc_agp_create_gatt_table, .free_gatt_table = parisc_agp_free_gatt_table, .insert_memory = parisc_agp_insert_memory, .remove_memory = parisc_agp_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, .cant_use_aperture = true, }; static int __init agp_ioc_init(void __iomem *ioc_regs) { struct _parisc_agp_info *info = &parisc_agp_info; u64 iova_base, *io_pdir, io_tlb_ps; int io_tlb_shift; printk(KERN_INFO DRVPFX "IO PDIR shared with sba_iommu\n"); info->ioc_regs = ioc_regs; io_tlb_ps = readq(info->ioc_regs+IOC_TCNFG); switch (io_tlb_ps) { case 0: io_tlb_shift = 12; break; case 1: io_tlb_shift = 13; break; case 2: io_tlb_shift = 14; break; case 3: io_tlb_shift = 16; break; default: printk(KERN_ERR DRVPFX "Invalid IOTLB page size " "configuration 0x%llx\n", io_tlb_ps); info->gatt = NULL; info->gatt_entries = 0; return -ENODEV; } info->io_page_size = 1 << io_tlb_shift; info->io_pages_per_kpage = PAGE_SIZE / info->io_page_size; iova_base = readq(info->ioc_regs+IOC_IBASE) & ~0x1; info->gart_base = iova_base + PLUTO_IOVA_SIZE - PLUTO_GART_SIZE; info->gart_size = PLUTO_GART_SIZE; info->gatt_entries = info->gart_size / info->io_page_size; io_pdir = phys_to_virt(readq(info->ioc_regs+IOC_PDIR_BASE)); info->gatt = &io_pdir[(PLUTO_IOVA_SIZE/2) >> PAGE_SHIFT]; if (info->gatt[0] != SBA_AGPGART_COOKIE) { info->gatt = NULL; info->gatt_entries = 0; printk(KERN_ERR DRVPFX "No reserved IO PDIR entry found; " "GART disabled\n"); return -ENODEV; } return 0; } static int __init lba_find_capability(int cap) { struct _parisc_agp_info *info = &parisc_agp_info; u16 status; u8 pos, id; int ttl = 48; status = readw(info->lba_regs + PCI_STATUS); if (!(status & PCI_STATUS_CAP_LIST)) return 0; pos = readb(info->lba_regs + PCI_CAPABILITY_LIST); while (ttl-- && pos >= 0x40) { pos &= ~3; id = readb(info->lba_regs + pos + PCI_CAP_LIST_ID); if (id == 0xff) break; if (id == cap) return pos; pos = readb(info->lba_regs + pos + PCI_CAP_LIST_NEXT); } return 0; } static int __init agp_lba_init(void __iomem *lba_hpa) { struct _parisc_agp_info *info = &parisc_agp_info; int cap; info->lba_regs = lba_hpa; info->lba_cap_offset = lba_find_capability(PCI_CAP_ID_AGP); cap = readl(lba_hpa + info->lba_cap_offset) & 0xff; if (cap != PCI_CAP_ID_AGP) { printk(KERN_ERR DRVPFX "Invalid capability ID 0x%02x at 0x%x\n", cap, info->lba_cap_offset); return -ENODEV; } return 0; } static int __init parisc_agp_setup(void __iomem *ioc_hpa, void __iomem *lba_hpa) { struct pci_dev *fake_bridge_dev = NULL; struct agp_bridge_data *bridge; int error = 0; fake_bridge_dev = pci_alloc_dev(NULL); if (!fake_bridge_dev) { error = -ENOMEM; goto fail; } error = agp_ioc_init(ioc_hpa); if (error) goto fail; error = agp_lba_init(lba_hpa); if (error) goto fail; bridge = agp_alloc_bridge(); if (!bridge) { error = -ENOMEM; goto fail; } bridge->driver = &parisc_agp_driver; fake_bridge_dev->vendor = PCI_VENDOR_ID_HP; fake_bridge_dev->device = PCI_DEVICE_ID_HP_PCIX_LBA; bridge->dev = fake_bridge_dev; error = agp_add_bridge(bridge); if (error) goto fail; return 0; fail: kfree(fake_bridge_dev); return error; } static int __init find_quicksilver(struct device *dev, void *data) { struct parisc_device **lba = data; struct parisc_device *padev = to_parisc_device(dev); if (IS_QUICKSILVER(padev)) *lba = padev; return 0; } static int __init parisc_agp_init(void) { int err = -1; struct parisc_device *sba = NULL, *lba = NULL; struct lba_device *lbadev = NULL; if (!sba_list) goto out; /* Find our parent Pluto */ sba = sba_list->dev; if (!IS_PLUTO(sba)) { printk(KERN_INFO DRVPFX "No Pluto found, so no AGPGART for you.\n"); goto out; } /* Now search our Pluto for our precious AGP device... */ device_for_each_child(&sba->dev, &lba, find_quicksilver); if (!lba) { printk(KERN_INFO DRVPFX "No AGP devices found.\n"); goto out; } lbadev = parisc_get_drvdata(lba); /* w00t, let's go find our cookies... */ parisc_agp_setup(sba_list->ioc[0].ioc_hpa, lbadev->hba.base_addr); return 0; out: return err; } module_init(parisc_agp_init); MODULE_AUTHOR("Kyle McMartin <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/char/agp/parisc-agp.c
/* * ATi AGPGART routines. */ #include <linux/types.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/agp_backend.h> #include <asm/agp.h> #include <asm/set_memory.h> #include "agp.h" #define ATI_GART_MMBASE_BAR 1 #define ATI_RS100_APSIZE 0xac #define ATI_RS100_IG_AGPMODE 0xb0 #define ATI_RS300_APSIZE 0xf8 #define ATI_RS300_IG_AGPMODE 0xfc #define ATI_GART_FEATURE_ID 0x00 #define ATI_GART_BASE 0x04 #define ATI_GART_CACHE_SZBASE 0x08 #define ATI_GART_CACHE_CNTRL 0x0c #define ATI_GART_CACHE_ENTRY_CNTRL 0x10 static const struct aper_size_info_lvl2 ati_generic_sizes[7] = { {2048, 524288, 0x0000000c}, {1024, 262144, 0x0000000a}, {512, 131072, 0x00000008}, {256, 65536, 0x00000006}, {128, 32768, 0x00000004}, {64, 16384, 0x00000002}, {32, 8192, 0x00000000} }; static struct gatt_mask ati_generic_masks[] = { { .mask = 1, .type = 0} }; struct ati_page_map { unsigned long *real; unsigned long __iomem *remapped; }; static struct _ati_generic_private { volatile u8 __iomem *registers; struct ati_page_map **gatt_pages; int num_tables; } ati_generic_private; static int ati_create_page_map(struct ati_page_map *page_map) { int i, err; page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); if (page_map->real == NULL) return -ENOMEM; set_memory_uc((unsigned long)page_map->real, 1); err = map_page_into_agp(virt_to_page(page_map->real)); if (err) { free_page((unsigned long)page_map->real); return err; } page_map->remapped = page_map->real; for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { writel(agp_bridge->scratch_page, page_map->remapped+i); readl(page_map->remapped+i); /* PCI Posting. */ } return 0; } static void ati_free_page_map(struct ati_page_map *page_map) { unmap_page_from_agp(virt_to_page(page_map->real)); set_memory_wb((unsigned long)page_map->real, 1); free_page((unsigned long) page_map->real); } static void ati_free_gatt_pages(void) { int i; struct ati_page_map **tables; struct ati_page_map *entry; tables = ati_generic_private.gatt_pages; for (i = 0; i < ati_generic_private.num_tables; i++) { entry = tables[i]; if (entry != NULL) { if (entry->real != NULL) ati_free_page_map(entry); kfree(entry); } } kfree(tables); } static int ati_create_gatt_pages(int nr_tables) { struct ati_page_map **tables; struct ati_page_map *entry; int retval = 0; int i; tables = kcalloc(nr_tables + 1, sizeof(struct ati_page_map *), GFP_KERNEL); if (tables == NULL) return -ENOMEM; for (i = 0; i < nr_tables; i++) { entry = kzalloc(sizeof(struct ati_page_map), GFP_KERNEL); tables[i] = entry; if (entry == NULL) { retval = -ENOMEM; break; } retval = ati_create_page_map(entry); if (retval != 0) break; } ati_generic_private.num_tables = i; ati_generic_private.gatt_pages = tables; if (retval != 0) ati_free_gatt_pages(); return retval; } static int is_r200(void) { if ((agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS100) || (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200) || (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200_B) || (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS250)) return 1; return 0; } static int ati_fetch_size(void) { int i; u32 temp; struct aper_size_info_lvl2 *values; if (is_r200()) pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp); else pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp); temp = (temp & 0x0000000e); values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes); for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (temp == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } static void ati_tlbflush(struct agp_memory * mem) { writel(1, ati_generic_private.registers+ATI_GART_CACHE_CNTRL); readl(ati_generic_private.registers+ATI_GART_CACHE_CNTRL); /* PCI Posting. */ } static void ati_cleanup(void) { struct aper_size_info_lvl2 *previous_size; u32 temp; previous_size = A_SIZE_LVL2(agp_bridge->previous_size); /* Write back the previous size and disable gart translation */ if (is_r200()) { pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp); temp = ((temp & ~(0x0000000f)) | previous_size->size_value); pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp); } else { pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp); temp = ((temp & ~(0x0000000f)) | previous_size->size_value); pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp); } iounmap((volatile u8 __iomem *)ati_generic_private.registers); } static int ati_configure(void) { phys_addr_t reg; u32 temp; /* Get the memory mapped registers */ reg = pci_resource_start(agp_bridge->dev, ATI_GART_MMBASE_BAR); ati_generic_private.registers = (volatile u8 __iomem *) ioremap(reg, 4096); if (!ati_generic_private.registers) return -ENOMEM; if (is_r200()) pci_write_config_dword(agp_bridge->dev, ATI_RS100_IG_AGPMODE, 0x20000); else pci_write_config_dword(agp_bridge->dev, ATI_RS300_IG_AGPMODE, 0x20000); /* address to map to */ /* agp_bridge.gart_bus_addr = pci_bus_address(agp_bridge.dev, AGP_APERTURE_BAR); printk(KERN_INFO PFX "IGP320 gart_bus_addr: %x\n", agp_bridge.gart_bus_addr); */ writel(0x60000, ati_generic_private.registers+ATI_GART_FEATURE_ID); readl(ati_generic_private.registers+ATI_GART_FEATURE_ID); /* PCI Posting.*/ /* SIGNALED_SYSTEM_ERROR @ NB_STATUS */ pci_read_config_dword(agp_bridge->dev, PCI_COMMAND, &temp); pci_write_config_dword(agp_bridge->dev, PCI_COMMAND, temp | (1<<14)); /* Write out the address of the gatt table */ writel(agp_bridge->gatt_bus_addr, ati_generic_private.registers+ATI_GART_BASE); readl(ati_generic_private.registers+ATI_GART_BASE); /* PCI Posting. */ return 0; } static int agp_ati_resume(struct device *dev) { return ati_configure(); } /* *Since we don't need contiguous memory we just try * to get the gatt table once */ #define GET_PAGE_DIR_OFF(addr) (addr >> 22) #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) #undef GET_GATT #define GET_GATT(addr) (ati_generic_private.gatt_pages[\ GET_PAGE_DIR_IDX(addr)]->remapped) static int ati_insert_memory(struct agp_memory * mem, off_t pg_start, int type) { int i, j, num_entries; unsigned long __iomem *cur_gatt; unsigned long addr; int mask_type; num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; mask_type = agp_generic_type_to_mask_type(mem->bridge, type); if (mask_type != 0 || type != mem->type) return -EINVAL; if (mem->page_count == 0) return 0; if ((pg_start + mem->page_count) > num_entries) return -EINVAL; j = pg_start; while (j < (pg_start + mem->page_count)) { addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); if (!PGE_EMPTY(agp_bridge,readl(cur_gatt+GET_GATT_OFF(addr)))) return -EBUSY; j++; } if (!mem->is_flushed) { /*CACHE_FLUSH(); */ global_cache_flush(); mem->is_flushed = true; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); writel(agp_bridge->driver->mask_memory(agp_bridge, page_to_phys(mem->pages[i]), mem->type), cur_gatt+GET_GATT_OFF(addr)); } readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */ agp_bridge->driver->tlb_flush(mem); return 0; } static int ati_remove_memory(struct agp_memory * mem, off_t pg_start, int type) { int i; unsigned long __iomem *cur_gatt; unsigned long addr; int mask_type; mask_type = agp_generic_type_to_mask_type(mem->bridge, type); if (mask_type != 0 || type != mem->type) return -EINVAL; if (mem->page_count == 0) return 0; for (i = pg_start; i < (mem->page_count + pg_start); i++) { addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); } readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */ agp_bridge->driver->tlb_flush(mem); return 0; } static int ati_create_gatt_table(struct agp_bridge_data *bridge) { struct aper_size_info_lvl2 *value; struct ati_page_map page_dir; unsigned long __iomem *cur_gatt; unsigned long addr; int retval; u32 temp; int i; struct aper_size_info_lvl2 *current_size; value = A_SIZE_LVL2(agp_bridge->current_size); retval = ati_create_page_map(&page_dir); if (retval != 0) return retval; retval = ati_create_gatt_pages(value->num_entries / 1024); if (retval != 0) { ati_free_page_map(&page_dir); return retval; } agp_bridge->gatt_table_real = (u32 *)page_dir.real; agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped; agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real); /* Write out the size register */ current_size = A_SIZE_LVL2(agp_bridge->current_size); if (is_r200()) { pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp); temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 0x00000001); pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp); pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp); } else { pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp); temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 0x00000001); pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp); pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp); } /* * Get the address for the gart region. * This is a bus address even on the alpha, b/c its * used to program the agp master not the cpu */ addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); agp_bridge->gart_bus_addr = addr; /* Calculate the agp offset */ for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { writel(virt_to_phys(ati_generic_private.gatt_pages[i]->real) | 1, page_dir.remapped+GET_PAGE_DIR_OFF(addr)); readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ } for (i = 0; i < value->num_entries; i++) { addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); } return 0; } static int ati_free_gatt_table(struct agp_bridge_data *bridge) { struct ati_page_map page_dir; page_dir.real = (unsigned long *)agp_bridge->gatt_table_real; page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table; ati_free_gatt_pages(); ati_free_page_map(&page_dir); return 0; } static const struct agp_bridge_driver ati_generic_bridge = { .owner = THIS_MODULE, .aperture_sizes = ati_generic_sizes, .size_type = LVL2_APER_SIZE, .num_aperture_sizes = 7, .needs_scratch_page = true, .configure = ati_configure, .fetch_size = ati_fetch_size, .cleanup = ati_cleanup, .tlb_flush = ati_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = ati_generic_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = ati_create_gatt_table, .free_gatt_table = ati_free_gatt_table, .insert_memory = ati_insert_memory, .remove_memory = ati_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static struct agp_device_ids ati_agp_device_ids[] = { { .device_id = PCI_DEVICE_ID_ATI_RS100, .chipset_name = "IGP320/M", }, { .device_id = PCI_DEVICE_ID_ATI_RS200, .chipset_name = "IGP330/340/345/350/M", }, { .device_id = PCI_DEVICE_ID_ATI_RS200_B, .chipset_name = "IGP345M", }, { .device_id = PCI_DEVICE_ID_ATI_RS250, .chipset_name = "IGP7000/M", }, { .device_id = PCI_DEVICE_ID_ATI_RS300_100, .chipset_name = "IGP9100/M", }, { .device_id = PCI_DEVICE_ID_ATI_RS300_133, .chipset_name = "IGP9100/M", }, { .device_id = PCI_DEVICE_ID_ATI_RS300_166, .chipset_name = "IGP9100/M", }, { .device_id = PCI_DEVICE_ID_ATI_RS300_200, .chipset_name = "IGP9100/M", }, { .device_id = PCI_DEVICE_ID_ATI_RS350_133, .chipset_name = "IGP9000/M", }, { .device_id = PCI_DEVICE_ID_ATI_RS350_200, .chipset_name = "IGP9100/M", }, { }, /* dummy final entry, always present */ }; static int agp_ati_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_device_ids *devs = ati_agp_device_ids; struct agp_bridge_data *bridge; u8 cap_ptr; int j; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return -ENODEV; /* probe for known chipsets */ for (j = 0; devs[j].chipset_name; j++) { if (pdev->device == devs[j].device_id) goto found; } dev_err(&pdev->dev, "unsupported Ati chipset [%04x/%04x])\n", pdev->vendor, pdev->device); return -ENODEV; found: bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; bridge->dev = pdev; bridge->capndx = cap_ptr; bridge->driver = &ati_generic_bridge; dev_info(&pdev->dev, "Ati %s chipset\n", devs[j].chipset_name); /* Fill in the mode register */ pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); pci_set_drvdata(pdev, bridge); return agp_add_bridge(bridge); } static void agp_ati_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); agp_remove_bridge(bridge); agp_put_bridge(bridge); } static const struct pci_device_id agp_ati_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_ATI, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE(pci, agp_ati_pci_table); static DEFINE_SIMPLE_DEV_PM_OPS(agp_ati_pm_ops, NULL, agp_ati_resume); static struct pci_driver agp_ati_pci_driver = { .name = "agpgart-ati", .id_table = agp_ati_pci_table, .probe = agp_ati_probe, .remove = agp_ati_remove, .driver.pm = &agp_ati_pm_ops, }; static int __init agp_ati_init(void) { if (agp_off) return -EINVAL; return pci_register_driver(&agp_ati_pci_driver); } static void __exit agp_ati_cleanup(void) { pci_unregister_driver(&agp_ati_pci_driver); } module_init(agp_ati_init); module_exit(agp_ati_cleanup); MODULE_AUTHOR("Dave Jones"); MODULE_LICENSE("GPL and additional rights");
linux-master
drivers/char/agp/ati-agp.c
/* * ALi AGPGART routines. */ #include <linux/types.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/agp_backend.h> #include <asm/page.h> /* PAGE_SIZE */ #include "agp.h" #define ALI_AGPCTRL 0xb8 #define ALI_ATTBASE 0xbc #define ALI_TLBCTRL 0xc0 #define ALI_TAGCTRL 0xc4 #define ALI_CACHE_FLUSH_CTRL 0xD0 #define ALI_CACHE_FLUSH_ADDR_MASK 0xFFFFF000 #define ALI_CACHE_FLUSH_EN 0x100 static int ali_fetch_size(void) { int i; u32 temp; struct aper_size_info_32 *values; pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp); temp &= ~(0xfffffff0); values = A_SIZE_32(agp_bridge->driver->aperture_sizes); for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (temp == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } static void ali_tlbflush(struct agp_memory *mem) { u32 temp; pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); temp &= 0xfffffff0; temp |= (1<<0 | 1<<1); pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL, temp); } static void ali_cleanup(void) { struct aper_size_info_32 *previous_size; u32 temp; previous_size = A_SIZE_32(agp_bridge->previous_size); pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); // clear tag pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL, ((temp & 0xffffff00) | 0x00000001|0x00000002)); pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp); pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE, ((temp & 0x00000ff0) | previous_size->size_value)); } static int ali_configure(void) { u32 temp; struct aper_size_info_32 *current_size; current_size = A_SIZE_32(agp_bridge->current_size); /* aperture size and gatt addr */ pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp); temp = (((temp & 0x00000ff0) | (agp_bridge->gatt_bus_addr & 0xfffff000)) | (current_size->size_value & 0xf)); pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE, temp); /* tlb control */ pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, ((temp & 0xffffff00) | 0x00000010)); /* address to map to */ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); #if 0 if (agp_bridge->type == ALI_M1541) { u32 nlvm_addr = 0; switch (current_size->size_value) { case 0: break; case 1: nlvm_addr = 0x100000;break; case 2: nlvm_addr = 0x200000;break; case 3: nlvm_addr = 0x400000;break; case 4: nlvm_addr = 0x800000;break; case 6: nlvm_addr = 0x1000000;break; case 7: nlvm_addr = 0x2000000;break; case 8: nlvm_addr = 0x4000000;break; case 9: nlvm_addr = 0x8000000;break; case 10: nlvm_addr = 0x10000000;break; default: break; } nlvm_addr--; nlvm_addr&=0xfff00000; nlvm_addr+= agp_bridge->gart_bus_addr; nlvm_addr|=(agp_bridge->gart_bus_addr>>12); dev_info(&agp_bridge->dev->dev, "nlvm top &base = %8x\n", nlvm_addr); } #endif pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); temp &= 0xffffff7f; //enable TLB pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, temp); return 0; } static void m1541_cache_flush(void) { int i, page_count; u32 temp; global_cache_flush(); page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order; for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) { pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | (agp_bridge->gatt_bus_addr + i)) | ALI_CACHE_FLUSH_EN)); } } static struct page *m1541_alloc_page(struct agp_bridge_data *bridge) { struct page *page = agp_generic_alloc_page(agp_bridge); u32 temp; if (!page) return NULL; pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | page_to_phys(page)) | ALI_CACHE_FLUSH_EN )); return page; } static void ali_destroy_page(struct page *page, int flags) { if (page) { if (flags & AGP_PAGE_DESTROY_UNMAP) { global_cache_flush(); /* is this really needed? --hch */ agp_generic_destroy_page(page, flags); } else agp_generic_destroy_page(page, flags); } } static void m1541_destroy_page(struct page *page, int flags) { u32 temp; if (page == NULL) return; if (flags & AGP_PAGE_DESTROY_UNMAP) { global_cache_flush(); pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | page_to_phys(page)) | ALI_CACHE_FLUSH_EN)); } agp_generic_destroy_page(page, flags); } /* Setup function */ static const struct aper_size_info_32 ali_generic_sizes[7] = { {256, 65536, 6, 10}, {128, 32768, 5, 9}, {64, 16384, 4, 8}, {32, 8192, 3, 7}, {16, 4096, 2, 6}, {8, 2048, 1, 4}, {4, 1024, 0, 3} }; static const struct agp_bridge_driver ali_generic_bridge = { .owner = THIS_MODULE, .aperture_sizes = ali_generic_sizes, .size_type = U32_APER_SIZE, .num_aperture_sizes = 7, .needs_scratch_page = true, .configure = ali_configure, .fetch_size = ali_fetch_size, .cleanup = ali_cleanup, .tlb_flush = ali_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = NULL, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = agp_generic_insert_memory, .remove_memory = agp_generic_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_destroy_page = ali_destroy_page, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static const struct agp_bridge_driver ali_m1541_bridge = { .owner = THIS_MODULE, .aperture_sizes = ali_generic_sizes, .size_type = U32_APER_SIZE, .num_aperture_sizes = 7, .configure = ali_configure, .fetch_size = ali_fetch_size, .cleanup = ali_cleanup, .tlb_flush = ali_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = NULL, .agp_enable = agp_generic_enable, .cache_flush = m1541_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = agp_generic_insert_memory, .remove_memory = agp_generic_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = m1541_alloc_page, .agp_destroy_page = m1541_destroy_page, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static struct agp_device_ids ali_agp_device_ids[] = { { .device_id = PCI_DEVICE_ID_AL_M1541, .chipset_name = "M1541", }, { .device_id = PCI_DEVICE_ID_AL_M1621, .chipset_name = "M1621", }, { .device_id = PCI_DEVICE_ID_AL_M1631, .chipset_name = "M1631", }, { .device_id = PCI_DEVICE_ID_AL_M1632, .chipset_name = "M1632", }, { .device_id = PCI_DEVICE_ID_AL_M1641, .chipset_name = "M1641", }, { .device_id = PCI_DEVICE_ID_AL_M1644, .chipset_name = "M1644", }, { .device_id = PCI_DEVICE_ID_AL_M1647, .chipset_name = "M1647", }, { .device_id = PCI_DEVICE_ID_AL_M1651, .chipset_name = "M1651", }, { .device_id = PCI_DEVICE_ID_AL_M1671, .chipset_name = "M1671", }, { .device_id = PCI_DEVICE_ID_AL_M1681, .chipset_name = "M1681", }, { .device_id = PCI_DEVICE_ID_AL_M1683, .chipset_name = "M1683", }, { }, /* dummy final entry, always present */ }; static int agp_ali_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_device_ids *devs = ali_agp_device_ids; struct agp_bridge_data *bridge; u8 hidden_1621_id, cap_ptr; int j; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return -ENODEV; /* probe for known chipsets */ for (j = 0; devs[j].chipset_name; j++) { if (pdev->device == devs[j].device_id) goto found; } dev_err(&pdev->dev, "unsupported ALi chipset [%04x/%04x])\n", pdev->vendor, pdev->device); return -ENODEV; found: bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; bridge->dev = pdev; bridge->capndx = cap_ptr; switch (pdev->device) { case PCI_DEVICE_ID_AL_M1541: bridge->driver = &ali_m1541_bridge; break; case PCI_DEVICE_ID_AL_M1621: pci_read_config_byte(pdev, 0xFB, &hidden_1621_id); switch (hidden_1621_id) { case 0x31: devs[j].chipset_name = "M1631"; break; case 0x32: devs[j].chipset_name = "M1632"; break; case 0x41: devs[j].chipset_name = "M1641"; break; case 0x43: devs[j].chipset_name = "M1621"; break; case 0x47: devs[j].chipset_name = "M1647"; break; case 0x51: devs[j].chipset_name = "M1651"; break; default: break; } fallthrough; default: bridge->driver = &ali_generic_bridge; } dev_info(&pdev->dev, "ALi %s chipset\n", devs[j].chipset_name); /* Fill in the mode register */ pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); pci_set_drvdata(pdev, bridge); return agp_add_bridge(bridge); } static void agp_ali_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); agp_remove_bridge(bridge); agp_put_bridge(bridge); } static const struct pci_device_id agp_ali_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_AL, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE(pci, agp_ali_pci_table); static struct pci_driver agp_ali_pci_driver = { .name = "agpgart-ali", .id_table = agp_ali_pci_table, .probe = agp_ali_probe, .remove = agp_ali_remove, }; static int __init agp_ali_init(void) { if (agp_off) return -EINVAL; return pci_register_driver(&agp_ali_pci_driver); } static void __exit agp_ali_cleanup(void) { pci_unregister_driver(&agp_ali_pci_driver); } module_init(agp_ali_init); module_exit(agp_ali_cleanup); MODULE_AUTHOR("Dave Jones"); MODULE_LICENSE("GPL and additional rights");
linux-master
drivers/char/agp/ali-agp.c
/* * AMD K7 AGPGART routines. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/agp_backend.h> #include <linux/page-flags.h> #include <linux/mm.h> #include <linux/slab.h> #include <asm/set_memory.h> #include "agp.h" #define AMD_MMBASE_BAR 1 #define AMD_APSIZE 0xac #define AMD_MODECNTL 0xb0 #define AMD_MODECNTL2 0xb2 #define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */ #define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */ #define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */ #define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */ static const struct pci_device_id agp_amdk7_pci_table[]; struct amd_page_map { unsigned long *real; unsigned long __iomem *remapped; }; static struct _amd_irongate_private { volatile u8 __iomem *registers; struct amd_page_map **gatt_pages; int num_tables; } amd_irongate_private; static int amd_create_page_map(struct amd_page_map *page_map) { int i; page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); if (page_map->real == NULL) return -ENOMEM; set_memory_uc((unsigned long)page_map->real, 1); page_map->remapped = page_map->real; for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { writel(agp_bridge->scratch_page, page_map->remapped+i); readl(page_map->remapped+i); /* PCI Posting. */ } return 0; } static void amd_free_page_map(struct amd_page_map *page_map) { set_memory_wb((unsigned long)page_map->real, 1); free_page((unsigned long) page_map->real); } static void amd_free_gatt_pages(void) { int i; struct amd_page_map **tables; struct amd_page_map *entry; tables = amd_irongate_private.gatt_pages; for (i = 0; i < amd_irongate_private.num_tables; i++) { entry = tables[i]; if (entry != NULL) { if (entry->real != NULL) amd_free_page_map(entry); kfree(entry); } } kfree(tables); amd_irongate_private.gatt_pages = NULL; } static int amd_create_gatt_pages(int nr_tables) { struct amd_page_map **tables; struct amd_page_map *entry; int retval = 0; int i; tables = kcalloc(nr_tables + 1, sizeof(struct amd_page_map *), GFP_KERNEL); if (tables == NULL) return -ENOMEM; for (i = 0; i < nr_tables; i++) { entry = kzalloc(sizeof(struct amd_page_map), GFP_KERNEL); tables[i] = entry; if (entry == NULL) { retval = -ENOMEM; break; } retval = amd_create_page_map(entry); if (retval != 0) break; } amd_irongate_private.num_tables = i; amd_irongate_private.gatt_pages = tables; if (retval != 0) amd_free_gatt_pages(); return retval; } /* Since we don't need contiguous memory we just try * to get the gatt table once */ #define GET_PAGE_DIR_OFF(addr) (addr >> 22) #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) #define GET_GATT(addr) (amd_irongate_private.gatt_pages[\ GET_PAGE_DIR_IDX(addr)]->remapped) static int amd_create_gatt_table(struct agp_bridge_data *bridge) { struct aper_size_info_lvl2 *value; struct amd_page_map page_dir; unsigned long __iomem *cur_gatt; unsigned long addr; int retval; int i; value = A_SIZE_LVL2(agp_bridge->current_size); retval = amd_create_page_map(&page_dir); if (retval != 0) return retval; retval = amd_create_gatt_pages(value->num_entries / 1024); if (retval != 0) { amd_free_page_map(&page_dir); return retval; } agp_bridge->gatt_table_real = (u32 *)page_dir.real; agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped; agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real); /* Get the address for the gart region. * This is a bus address even on the alpha, b/c its * used to program the agp master not the cpu */ addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); agp_bridge->gart_bus_addr = addr; /* Calculate the agp offset */ for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1, page_dir.remapped+GET_PAGE_DIR_OFF(addr)); readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ } for (i = 0; i < value->num_entries; i++) { addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ } return 0; } static int amd_free_gatt_table(struct agp_bridge_data *bridge) { struct amd_page_map page_dir; page_dir.real = (unsigned long *)agp_bridge->gatt_table_real; page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table; amd_free_gatt_pages(); amd_free_page_map(&page_dir); return 0; } static int amd_irongate_fetch_size(void) { int i; u32 temp; struct aper_size_info_lvl2 *values; pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp); temp = (temp & 0x0000000e); values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes); for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (temp == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } static int amd_irongate_configure(void) { struct aper_size_info_lvl2 *current_size; phys_addr_t reg; u32 temp; u16 enable_reg; current_size = A_SIZE_LVL2(agp_bridge->current_size); if (!amd_irongate_private.registers) { /* Get the memory mapped registers */ reg = pci_resource_start(agp_bridge->dev, AMD_MMBASE_BAR); amd_irongate_private.registers = (volatile u8 __iomem *) ioremap(reg, 4096); if (!amd_irongate_private.registers) return -ENOMEM; } /* Write out the address of the gatt table */ writel(agp_bridge->gatt_bus_addr, amd_irongate_private.registers+AMD_ATTBASE); readl(amd_irongate_private.registers+AMD_ATTBASE); /* PCI Posting. */ /* Write the Sync register */ pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL, 0x80); /* Set indexing mode */ pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL2, 0x00); /* Write the enable register */ enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE); enable_reg = (enable_reg | 0x0004); writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE); readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */ /* Write out the size register */ pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp); temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 1); pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp); /* Flush the tlb */ writel(1, amd_irongate_private.registers+AMD_TLBFLUSH); readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting.*/ return 0; } static void amd_irongate_cleanup(void) { struct aper_size_info_lvl2 *previous_size; u32 temp; u16 enable_reg; previous_size = A_SIZE_LVL2(agp_bridge->previous_size); enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE); enable_reg = (enable_reg & ~(0x0004)); writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE); readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */ /* Write back the previous size and disable gart translation */ pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp); temp = ((temp & ~(0x0000000f)) | previous_size->size_value); pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp); iounmap((void __iomem *) amd_irongate_private.registers); } /* * This routine could be implemented by taking the addresses * written to the GATT, and flushing them individually. However * currently it just flushes the whole table. Which is probably * more efficient, since agp_memory blocks can be a large number of * entries. */ static void amd_irongate_tlbflush(struct agp_memory *temp) { writel(1, amd_irongate_private.registers+AMD_TLBFLUSH); readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting. */ } static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type) { int i, j, num_entries; unsigned long __iomem *cur_gatt; unsigned long addr; num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; if (type != mem->type || agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) return -EINVAL; if ((pg_start + mem->page_count) > num_entries) return -EINVAL; j = pg_start; while (j < (pg_start + mem->page_count)) { addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr)))) return -EBUSY; j++; } if (!mem->is_flushed) { global_cache_flush(); mem->is_flushed = true; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); writel(agp_generic_mask_memory(agp_bridge, page_to_phys(mem->pages[i]), mem->type), cur_gatt+GET_GATT_OFF(addr)); readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ } amd_irongate_tlbflush(mem); return 0; } static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type) { int i; unsigned long __iomem *cur_gatt; unsigned long addr; if (type != mem->type || agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) return -EINVAL; for (i = pg_start; i < (mem->page_count + pg_start); i++) { addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ } amd_irongate_tlbflush(mem); return 0; } static const struct aper_size_info_lvl2 amd_irongate_sizes[7] = { {2048, 524288, 0x0000000c}, {1024, 262144, 0x0000000a}, {512, 131072, 0x00000008}, {256, 65536, 0x00000006}, {128, 32768, 0x00000004}, {64, 16384, 0x00000002}, {32, 8192, 0x00000000} }; static const struct gatt_mask amd_irongate_masks[] = { {.mask = 1, .type = 0} }; static const struct agp_bridge_driver amd_irongate_driver = { .owner = THIS_MODULE, .aperture_sizes = amd_irongate_sizes, .size_type = LVL2_APER_SIZE, .num_aperture_sizes = 7, .needs_scratch_page = true, .configure = amd_irongate_configure, .fetch_size = amd_irongate_fetch_size, .cleanup = amd_irongate_cleanup, .tlb_flush = amd_irongate_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = amd_irongate_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = amd_create_gatt_table, .free_gatt_table = amd_free_gatt_table, .insert_memory = amd_insert_memory, .remove_memory = amd_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static struct agp_device_ids amd_agp_device_ids[] = { { .device_id = PCI_DEVICE_ID_AMD_FE_GATE_7006, .chipset_name = "Irongate", }, { .device_id = PCI_DEVICE_ID_AMD_FE_GATE_700E, .chipset_name = "761", }, { .device_id = PCI_DEVICE_ID_AMD_FE_GATE_700C, .chipset_name = "760MP", }, { }, /* dummy final entry, always present */ }; static int agp_amdk7_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_bridge_data *bridge; u8 cap_ptr; int j; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return -ENODEV; j = ent - agp_amdk7_pci_table; dev_info(&pdev->dev, "AMD %s chipset\n", amd_agp_device_ids[j].chipset_name); bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; bridge->driver = &amd_irongate_driver; bridge->dev_private_data = &amd_irongate_private; bridge->dev = pdev; bridge->capndx = cap_ptr; /* 751 Errata (22564_B-1.PDF) erratum 20: strobe glitch with Nvidia NV10 GeForce cards. system controller may experience noise due to strong drive strengths */ if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) { struct pci_dev *gfxcard=NULL; cap_ptr = 0; while (!cap_ptr) { gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard); if (!gfxcard) { dev_info(&pdev->dev, "no AGP VGA controller\n"); return -ENODEV; } cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP); } /* With so many variants of NVidia cards, it's simpler just to blacklist them all, and then whitelist them as needed (if necessary at all). */ if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) { agp_bridge->flags |= AGP_ERRATA_1X; dev_info(&pdev->dev, "AMD 751 chipset with NVidia GeForce; forcing 1X due to errata\n"); } pci_dev_put(gfxcard); } /* 761 Errata (23613_F.pdf) * Revisions B0/B1 were a disaster. * erratum 44: SYSCLK/AGPCLK skew causes 2X failures -- Force mode to 1X * erratum 45: Timing problem prevents fast writes -- Disable fast write. * erratum 46: Setup violation on AGP SBA pins - Disable side band addressing. * With this lot disabled, we should prevent lockups. */ if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_700E) { if (pdev->revision == 0x10 || pdev->revision == 0x11) { agp_bridge->flags = AGP_ERRATA_FASTWRITES; agp_bridge->flags |= AGP_ERRATA_SBA; agp_bridge->flags |= AGP_ERRATA_1X; dev_info(&pdev->dev, "AMD 761 chipset with errata; disabling AGP fast writes & SBA and forcing to 1X\n"); } } /* Fill in the mode register */ pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); pci_set_drvdata(pdev, bridge); return agp_add_bridge(bridge); } static void agp_amdk7_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); agp_remove_bridge(bridge); agp_put_bridge(bridge); } static int agp_amdk7_resume(struct device *dev) { return amd_irongate_driver.configure(); } /* must be the same order as name table above */ static const struct pci_device_id agp_amdk7_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_AMD, .device = PCI_DEVICE_ID_AMD_FE_GATE_7006, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_AMD, .device = PCI_DEVICE_ID_AMD_FE_GATE_700E, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_AMD, .device = PCI_DEVICE_ID_AMD_FE_GATE_700C, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE(pci, agp_amdk7_pci_table); static DEFINE_SIMPLE_DEV_PM_OPS(agp_amdk7_pm_ops, NULL, agp_amdk7_resume); static struct pci_driver agp_amdk7_pci_driver = { .name = "agpgart-amdk7", .id_table = agp_amdk7_pci_table, .probe = agp_amdk7_probe, .remove = agp_amdk7_remove, .driver.pm = &agp_amdk7_pm_ops, }; static int __init agp_amdk7_init(void) { if (agp_off) return -EINVAL; return pci_register_driver(&agp_amdk7_pci_driver); } static void __exit agp_amdk7_cleanup(void) { pci_unregister_driver(&agp_amdk7_pci_driver); } module_init(agp_amdk7_init); module_exit(agp_amdk7_cleanup); MODULE_LICENSE("GPL and additional rights");
linux-master
drivers/char/agp/amd-k7-agp.c
/* * AGPGART driver. * Copyright (C) 2004 Silicon Graphics, Inc. * Copyright (C) 2002-2005 Dave Jones. * Copyright (C) 1999 Jeff Hartmann. * Copyright (C) 1999 Precision Insight, Inc. * Copyright (C) 1999 Xi Graphics, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * TODO: * - Allocate more than order 0 pages to avoid too much linear map splitting. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/pagemap.h> #include <linux/miscdevice.h> #include <linux/pm.h> #include <linux/agp_backend.h> #include <linux/vmalloc.h> #include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/slab.h> #include <asm/io.h> #ifdef CONFIG_X86 #include <asm/set_memory.h> #endif #include "agp.h" __u32 *agp_gatt_table; int agp_memory_reserved; /* * Needed by the Nforce GART driver for the time being. Would be * nice to do this some other way instead of needing this export. */ EXPORT_SYMBOL_GPL(agp_memory_reserved); /* * Generic routines for handling agp_memory structures - * They use the basic page allocation routines to do the brunt of the work. */ void agp_free_key(int key) { if (key < 0) return; if (key < MAXKEY) clear_bit(key, agp_bridge->key_list); } EXPORT_SYMBOL(agp_free_key); static int agp_get_key(void) { int bit; bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY); if (bit < MAXKEY) { set_bit(bit, agp_bridge->key_list); return bit; } return -1; } /* * Use kmalloc if possible for the page list. Otherwise fall back to * vmalloc. This speeds things up and also saves memory for small AGP * regions. */ void agp_alloc_page_array(size_t size, struct agp_memory *mem) { mem->pages = kvmalloc(size, GFP_KERNEL); } EXPORT_SYMBOL(agp_alloc_page_array); static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages) { struct agp_memory *new; unsigned long alloc_size = num_agp_pages*sizeof(struct page *); if (INT_MAX/sizeof(struct page *) < num_agp_pages) return NULL; new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); if (new == NULL) return NULL; new->key = agp_get_key(); if (new->key < 0) { kfree(new); return NULL; } agp_alloc_page_array(alloc_size, new); if (new->pages == NULL) { agp_free_key(new->key); kfree(new); return NULL; } new->num_scratch_pages = 0; return new; } struct agp_memory *agp_create_memory(int scratch_pages) { struct agp_memory *new; new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); if (new == NULL) return NULL; new->key = agp_get_key(); if (new->key < 0) { kfree(new); return NULL; } agp_alloc_page_array(PAGE_SIZE * scratch_pages, new); if (new->pages == NULL) { agp_free_key(new->key); kfree(new); return NULL; } new->num_scratch_pages = scratch_pages; new->type = AGP_NORMAL_MEMORY; return new; } EXPORT_SYMBOL(agp_create_memory); /** * agp_free_memory - free memory associated with an agp_memory pointer. * * @curr: agp_memory pointer to be freed. * * It is the only function that can be called when the backend is not owned * by the caller. (So it can free memory on client death.) */ void agp_free_memory(struct agp_memory *curr) { size_t i; if (curr == NULL) return; if (curr->is_bound) agp_unbind_memory(curr); if (curr->type >= AGP_USER_TYPES) { agp_generic_free_by_type(curr); return; } if (curr->type != 0) { curr->bridge->driver->free_by_type(curr); return; } if (curr->page_count != 0) { if (curr->bridge->driver->agp_destroy_pages) { curr->bridge->driver->agp_destroy_pages(curr); } else { for (i = 0; i < curr->page_count; i++) { curr->bridge->driver->agp_destroy_page( curr->pages[i], AGP_PAGE_DESTROY_UNMAP); } for (i = 0; i < curr->page_count; i++) { curr->bridge->driver->agp_destroy_page( curr->pages[i], AGP_PAGE_DESTROY_FREE); } } } agp_free_key(curr->key); agp_free_page_array(curr); kfree(curr); } EXPORT_SYMBOL(agp_free_memory); #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) /** * agp_allocate_memory - allocate a group of pages of a certain type. * * @bridge: an agp_bridge_data struct allocated for the AGP host bridge. * @page_count: size_t argument of the number of pages * @type: u32 argument of the type of memory to be allocated. * * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which * maps to physical ram. Any other type is device dependent. * * It returns NULL whenever memory is unavailable. */ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, size_t page_count, u32 type) { int scratch_pages; struct agp_memory *new; size_t i; int cur_memory; if (!bridge) return NULL; cur_memory = atomic_read(&bridge->current_memory_agp); if ((cur_memory + page_count > bridge->max_memory_agp) || (cur_memory + page_count < page_count)) return NULL; if (type >= AGP_USER_TYPES) { new = agp_generic_alloc_user(page_count, type); if (new) new->bridge = bridge; return new; } if (type != 0) { new = bridge->driver->alloc_by_type(page_count, type); if (new) new->bridge = bridge; return new; } scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; new = agp_create_memory(scratch_pages); if (new == NULL) return NULL; if (bridge->driver->agp_alloc_pages) { if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) { agp_free_memory(new); return NULL; } new->bridge = bridge; return new; } for (i = 0; i < page_count; i++) { struct page *page = bridge->driver->agp_alloc_page(bridge); if (page == NULL) { agp_free_memory(new); return NULL; } new->pages[i] = page; new->page_count++; } new->bridge = bridge; return new; } EXPORT_SYMBOL(agp_allocate_memory); /* End - Generic routines for handling agp_memory structures */ static int agp_return_size(void) { int current_size; void *temp; temp = agp_bridge->current_size; switch (agp_bridge->driver->size_type) { case U8_APER_SIZE: current_size = A_SIZE_8(temp)->size; break; case U16_APER_SIZE: current_size = A_SIZE_16(temp)->size; break; case U32_APER_SIZE: current_size = A_SIZE_32(temp)->size; break; case LVL2_APER_SIZE: current_size = A_SIZE_LVL2(temp)->size; break; case FIXED_APER_SIZE: current_size = A_SIZE_FIX(temp)->size; break; default: current_size = 0; break; } current_size -= (agp_memory_reserved / (1024*1024)); if (current_size <0) current_size = 0; return current_size; } int agp_num_entries(void) { int num_entries; void *temp; temp = agp_bridge->current_size; switch (agp_bridge->driver->size_type) { case U8_APER_SIZE: num_entries = A_SIZE_8(temp)->num_entries; break; case U16_APER_SIZE: num_entries = A_SIZE_16(temp)->num_entries; break; case U32_APER_SIZE: num_entries = A_SIZE_32(temp)->num_entries; break; case LVL2_APER_SIZE: num_entries = A_SIZE_LVL2(temp)->num_entries; break; case FIXED_APER_SIZE: num_entries = A_SIZE_FIX(temp)->num_entries; break; default: num_entries = 0; break; } num_entries -= agp_memory_reserved>>PAGE_SHIFT; if (num_entries<0) num_entries = 0; return num_entries; } EXPORT_SYMBOL_GPL(agp_num_entries); /** * agp_copy_info - copy bridge state information * * @bridge: an agp_bridge_data struct allocated for the AGP host bridge. * @info: agp_kern_info pointer. The caller should insure that this pointer is valid. * * This function copies information about the agp bridge device and the state of * the agp backend into an agp_kern_info pointer. */ int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info) { memset(info, 0, sizeof(struct agp_kern_info)); if (!bridge) { info->chipset = NOT_SUPPORTED; return -EIO; } info->version.major = bridge->version->major; info->version.minor = bridge->version->minor; info->chipset = SUPPORTED; info->device = bridge->dev; if (bridge->mode & AGPSTAT_MODE_3_0) info->mode = bridge->mode & ~AGP3_RESERVED_MASK; else info->mode = bridge->mode & ~AGP2_RESERVED_MASK; info->aper_base = bridge->gart_bus_addr; info->aper_size = agp_return_size(); info->max_memory = bridge->max_memory_agp; info->current_memory = atomic_read(&bridge->current_memory_agp); info->cant_use_aperture = bridge->driver->cant_use_aperture; info->vm_ops = bridge->vm_ops; info->page_mask = ~0UL; return 0; } EXPORT_SYMBOL(agp_copy_info); /* End - Routine to copy over information structure */ /* * Routines for handling swapping of agp_memory into the GATT - * These routines take agp_memory and insert them into the GATT. * They call device specific routines to actually write to the GATT. */ /** * agp_bind_memory - Bind an agp_memory structure into the GATT. * * @curr: agp_memory pointer * @pg_start: an offset into the graphics aperture translation table * * It returns -EINVAL if the pointer == NULL. * It returns -EBUSY if the area of the table requested is already in use. */ int agp_bind_memory(struct agp_memory *curr, off_t pg_start) { int ret_val; if (curr == NULL) return -EINVAL; if (curr->is_bound) { printk(KERN_INFO PFX "memory %p is already bound!\n", curr); return -EINVAL; } if (!curr->is_flushed) { curr->bridge->driver->cache_flush(); curr->is_flushed = true; } ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); if (ret_val != 0) return ret_val; curr->is_bound = true; curr->pg_start = pg_start; spin_lock(&agp_bridge->mapped_lock); list_add(&curr->mapped_list, &agp_bridge->mapped_list); spin_unlock(&agp_bridge->mapped_lock); return 0; } EXPORT_SYMBOL(agp_bind_memory); /** * agp_unbind_memory - Removes an agp_memory structure from the GATT * * @curr: agp_memory pointer to be removed from the GATT. * * It returns -EINVAL if this piece of agp_memory is not currently bound to * the graphics aperture translation table or if the agp_memory pointer == NULL */ int agp_unbind_memory(struct agp_memory *curr) { int ret_val; if (curr == NULL) return -EINVAL; if (!curr->is_bound) { printk(KERN_INFO PFX "memory %p was not bound!\n", curr); return -EINVAL; } ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type); if (ret_val != 0) return ret_val; curr->is_bound = false; curr->pg_start = 0; spin_lock(&curr->bridge->mapped_lock); list_del(&curr->mapped_list); spin_unlock(&curr->bridge->mapped_lock); return 0; } EXPORT_SYMBOL(agp_unbind_memory); /* End - Routines for handling swapping of agp_memory into the GATT */ /* Generic Agp routines - Start */ static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) { u32 tmp; if (*requested_mode & AGP2_RESERVED_MASK) { printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", *requested_mode & AGP2_RESERVED_MASK, *requested_mode); *requested_mode &= ~AGP2_RESERVED_MASK; } /* * Some dumb bridges are programmed to disobey the AGP2 spec. * This is likely a BIOS misprogramming rather than poweron default, or * it would be a lot more common. * https://bugs.freedesktop.org/show_bug.cgi?id=8816 * AGPv2 spec 6.1.9 states: * The RATE field indicates the data transfer rates supported by this * device. A.G.P. devices must report all that apply. * Fix them up as best we can. */ switch (*bridge_agpstat & 7) { case 4: *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X); printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. " "Fixing up support for x2 & x1\n"); break; case 2: *bridge_agpstat |= AGPSTAT2_1X; printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. " "Fixing up support for x1\n"); break; default: break; } /* Check the speed bits make sense. Only one should be set. */ tmp = *requested_mode & 7; switch (tmp) { case 0: printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm); *requested_mode |= AGPSTAT2_1X; break; case 1: case 2: break; case 3: *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */ break; case 4: break; case 5: case 6: case 7: *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/ break; } /* disable SBA if it's not supported */ if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA))) *bridge_agpstat &= ~AGPSTAT_SBA; /* Set rate */ if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X))) *bridge_agpstat &= ~AGPSTAT2_4X; if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X))) *bridge_agpstat &= ~AGPSTAT2_2X; if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X))) *bridge_agpstat &= ~AGPSTAT2_1X; /* Now we know what mode it should be, clear out the unwanted bits. */ if (*bridge_agpstat & AGPSTAT2_4X) *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */ if (*bridge_agpstat & AGPSTAT2_2X) *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */ if (*bridge_agpstat & AGPSTAT2_1X) *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */ /* Apply any errata. */ if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) *bridge_agpstat &= ~AGPSTAT_FW; if (agp_bridge->flags & AGP_ERRATA_SBA) *bridge_agpstat &= ~AGPSTAT_SBA; if (agp_bridge->flags & AGP_ERRATA_1X) { *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); *bridge_agpstat |= AGPSTAT2_1X; } /* If we've dropped down to 1X, disable fast writes. */ if (*bridge_agpstat & AGPSTAT2_1X) *bridge_agpstat &= ~AGPSTAT_FW; } /* * requested_mode = Mode requested by (typically) X. * bridge_agpstat = PCI_AGP_STATUS from agp bridge. * vga_agpstat = PCI_AGP_STATUS from graphic card. */ static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) { u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat; u32 tmp; if (*requested_mode & AGP3_RESERVED_MASK) { printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", *requested_mode & AGP3_RESERVED_MASK, *requested_mode); *requested_mode &= ~AGP3_RESERVED_MASK; } /* Check the speed bits make sense. */ tmp = *requested_mode & 7; if (tmp == 0) { printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm); *requested_mode |= AGPSTAT3_4X; } if (tmp >= 3) { printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4); *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X; } /* ARQSZ - Set the value to the maximum one. * Don't allow the mode register to override values. */ *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) | max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ))); /* Calibration cycle. * Don't allow the mode register to override values. */ *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) | min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK))); /* SBA *must* be supported for AGP v3 */ *bridge_agpstat |= AGPSTAT_SBA; /* * Set speed. * Check for invalid speeds. This can happen when applications * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware */ if (*requested_mode & AGPSTAT_MODE_3_0) { /* * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode, * have been passed a 3.0 mode, but with 2.x speed bits set. * AGP2.x 4x -> AGP3.0 4x. */ if (*requested_mode & AGPSTAT2_4X) { printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n", current->comm, *requested_mode); *requested_mode &= ~AGPSTAT2_4X; *requested_mode |= AGPSTAT3_4X; } } else { /* * The caller doesn't know what they are doing. We are in 3.0 mode, * but have been passed an AGP 2.x mode. * Convert AGP 1x,2x,4x -> AGP 3.0 4x. */ printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n", current->comm, *requested_mode); *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X); *requested_mode |= AGPSTAT3_4X; } if (*requested_mode & AGPSTAT3_8X) { if (!(*bridge_agpstat & AGPSTAT3_8X)) { *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); *bridge_agpstat |= AGPSTAT3_4X; printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm); return; } if (!(*vga_agpstat & AGPSTAT3_8X)) { *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); *bridge_agpstat |= AGPSTAT3_4X; printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm); return; } /* All set, bridge & device can do AGP x8*/ *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); goto done; } else if (*requested_mode & AGPSTAT3_4X) { *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); *bridge_agpstat |= AGPSTAT3_4X; goto done; } else { /* * If we didn't specify an AGP mode, we see if both * the graphics card, and the bridge can do x8, and use if so. * If not, we fall back to x4 mode. */ if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) { printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode " "supported by bridge & card (x8).\n"); *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); } else { printk(KERN_INFO PFX "Fell back to AGPx4 mode because "); if (!(*bridge_agpstat & AGPSTAT3_8X)) { printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n", *bridge_agpstat, origbridge); *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); *bridge_agpstat |= AGPSTAT3_4X; } if (!(*vga_agpstat & AGPSTAT3_8X)) { printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n", *vga_agpstat, origvga); *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); *vga_agpstat |= AGPSTAT3_4X; } } } done: /* Apply any errata. */ if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) *bridge_agpstat &= ~AGPSTAT_FW; if (agp_bridge->flags & AGP_ERRATA_SBA) *bridge_agpstat &= ~AGPSTAT_SBA; if (agp_bridge->flags & AGP_ERRATA_1X) { *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); *bridge_agpstat |= AGPSTAT2_1X; } } /** * agp_collect_device_status - determine correct agp_cmd from various agp_stat's * @bridge: an agp_bridge_data struct allocated for the AGP host bridge. * @requested_mode: requested agp_stat from userspace (Typically from X) * @bridge_agpstat: current agp_stat from AGP bridge. * * This function will hunt for an AGP graphics card, and try to match * the requested mode to the capabilities of both the bridge and the card. */ u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat) { struct pci_dev *device = NULL; u32 vga_agpstat; u8 cap_ptr; for (;;) { device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device); if (!device) { printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n"); return 0; } cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); if (cap_ptr) break; } /* * Ok, here we have a AGP device. Disable impossible * settings, and adjust the readqueue to the minimum. */ pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat); /* adjust RQ depth */ bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) | min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH), min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH)))); /* disable FW if it's not supported */ if (!((bridge_agpstat & AGPSTAT_FW) && (vga_agpstat & AGPSTAT_FW) && (requested_mode & AGPSTAT_FW))) bridge_agpstat &= ~AGPSTAT_FW; /* Check to see if we are operating in 3.0 mode */ if (agp_bridge->mode & AGPSTAT_MODE_3_0) agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); else agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); pci_dev_put(device); return bridge_agpstat; } EXPORT_SYMBOL(agp_collect_device_status); void agp_device_command(u32 bridge_agpstat, bool agp_v3) { struct pci_dev *device = NULL; int mode; mode = bridge_agpstat & 0x7; if (agp_v3) mode *= 4; for_each_pci_dev(device) { u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP); if (!agp) continue; dev_info(&device->dev, "putting AGP V%d device into %dx mode\n", agp_v3 ? 3 : 2, mode); pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat); } } EXPORT_SYMBOL(agp_device_command); void get_agp_version(struct agp_bridge_data *bridge) { u32 ncapid; /* Exit early if already set by errata workarounds. */ if (bridge->major_version != 0) return; pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid); bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf; } EXPORT_SYMBOL(get_agp_version); void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) { u32 bridge_agpstat, temp; get_agp_version(agp_bridge); dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n", agp_bridge->major_version, agp_bridge->minor_version); pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat); bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat); if (bridge_agpstat == 0) /* Something bad happened. FIXME: Return error code? */ return; bridge_agpstat |= AGPSTAT_AGP_ENABLE; /* Do AGP version specific frobbing. */ if (bridge->major_version >= 3) { if (bridge->mode & AGPSTAT_MODE_3_0) { /* If we have 3.5, we can do the isoch stuff. */ if (bridge->minor_version >= 5) agp_3_5_enable(bridge); agp_device_command(bridge_agpstat, true); return; } else { /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/ bridge_agpstat &= ~(7<<10) ; pci_read_config_dword(bridge->dev, bridge->capndx+AGPCTRL, &temp); temp |= (1<<9); pci_write_config_dword(bridge->dev, bridge->capndx+AGPCTRL, temp); dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n"); } } /* AGP v<3 */ agp_device_command(bridge_agpstat, false); } EXPORT_SYMBOL(agp_generic_enable); int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) { char *table; char *table_end; int page_order; int num_entries; int i; void *temp; struct page *page; /* The generic routines can't handle 2 level gatt's */ if (bridge->driver->size_type == LVL2_APER_SIZE) return -EINVAL; table = NULL; i = bridge->aperture_size_idx; temp = bridge->current_size; page_order = num_entries = 0; if (bridge->driver->size_type != FIXED_APER_SIZE) { do { switch (bridge->driver->size_type) { case U8_APER_SIZE: page_order = A_SIZE_8(temp)->page_order; num_entries = A_SIZE_8(temp)->num_entries; break; case U16_APER_SIZE: page_order = A_SIZE_16(temp)->page_order; num_entries = A_SIZE_16(temp)->num_entries; break; case U32_APER_SIZE: page_order = A_SIZE_32(temp)->page_order; num_entries = A_SIZE_32(temp)->num_entries; break; /* This case will never really happen. */ case FIXED_APER_SIZE: case LVL2_APER_SIZE: default: page_order = num_entries = 0; break; } table = alloc_gatt_pages(page_order); if (table == NULL) { i++; switch (bridge->driver->size_type) { case U8_APER_SIZE: bridge->current_size = A_IDX8(bridge); break; case U16_APER_SIZE: bridge->current_size = A_IDX16(bridge); break; case U32_APER_SIZE: bridge->current_size = A_IDX32(bridge); break; /* These cases will never really happen. */ case FIXED_APER_SIZE: case LVL2_APER_SIZE: default: break; } temp = bridge->current_size; } else { bridge->aperture_size_idx = i; } } while (!table && (i < bridge->driver->num_aperture_sizes)); } else { page_order = ((struct aper_size_info_fixed *) temp)->page_order; num_entries = ((struct aper_size_info_fixed *) temp)->num_entries; table = alloc_gatt_pages(page_order); } if (table == NULL) return -ENOMEM; table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) SetPageReserved(page); bridge->gatt_table_real = (u32 *) table; agp_gatt_table = (void *)table; bridge->driver->cache_flush(); #ifdef CONFIG_X86 if (set_memory_uc((unsigned long)table, 1 << page_order)) printk(KERN_WARNING "Could not set GATT table memory to UC!\n"); bridge->gatt_table = (u32 __iomem *)table; #else bridge->gatt_table = ioremap(virt_to_phys(table), (PAGE_SIZE * (1 << page_order))); bridge->driver->cache_flush(); #endif if (bridge->gatt_table == NULL) { for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) ClearPageReserved(page); free_gatt_pages(table, page_order); return -ENOMEM; } bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real); /* AK: bogus, should encode addresses > 4GB */ for (i = 0; i < num_entries; i++) { writel(bridge->scratch_page, bridge->gatt_table+i); readl(bridge->gatt_table+i); /* PCI Posting. */ } return 0; } EXPORT_SYMBOL(agp_generic_create_gatt_table); int agp_generic_free_gatt_table(struct agp_bridge_data *bridge) { int page_order; char *table, *table_end; void *temp; struct page *page; temp = bridge->current_size; switch (bridge->driver->size_type) { case U8_APER_SIZE: page_order = A_SIZE_8(temp)->page_order; break; case U16_APER_SIZE: page_order = A_SIZE_16(temp)->page_order; break; case U32_APER_SIZE: page_order = A_SIZE_32(temp)->page_order; break; case FIXED_APER_SIZE: page_order = A_SIZE_FIX(temp)->page_order; break; case LVL2_APER_SIZE: /* The generic routines can't deal with 2 level gatt's */ return -EINVAL; default: page_order = 0; break; } /* Do not worry about freeing memory, because if this is * called, then all agp memory is deallocated and removed * from the table. */ #ifdef CONFIG_X86 set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order); #else iounmap(bridge->gatt_table); #endif table = (char *) bridge->gatt_table_real; table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) ClearPageReserved(page); free_gatt_pages(bridge->gatt_table_real, page_order); agp_gatt_table = NULL; bridge->gatt_table = NULL; bridge->gatt_table_real = NULL; bridge->gatt_bus_addr = 0; return 0; } EXPORT_SYMBOL(agp_generic_free_gatt_table); int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) { int num_entries; size_t i; off_t j; void *temp; struct agp_bridge_data *bridge; int mask_type; bridge = mem->bridge; if (!bridge) return -EINVAL; if (mem->page_count == 0) return 0; temp = bridge->current_size; switch (bridge->driver->size_type) { case U8_APER_SIZE: num_entries = A_SIZE_8(temp)->num_entries; break; case U16_APER_SIZE: num_entries = A_SIZE_16(temp)->num_entries; break; case U32_APER_SIZE: num_entries = A_SIZE_32(temp)->num_entries; break; case FIXED_APER_SIZE: num_entries = A_SIZE_FIX(temp)->num_entries; break; case LVL2_APER_SIZE: /* The generic routines can't deal with 2 level gatt's */ return -EINVAL; default: num_entries = 0; break; } num_entries -= agp_memory_reserved/PAGE_SIZE; if (num_entries < 0) num_entries = 0; if (type != mem->type) return -EINVAL; mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); if (mask_type != 0) { /* The generic routines know nothing of memory types */ return -EINVAL; } if (((pg_start + mem->page_count) > num_entries) || ((pg_start + mem->page_count) < pg_start)) return -EINVAL; j = pg_start; while (j < (pg_start + mem->page_count)) { if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j))) return -EBUSY; j++; } if (!mem->is_flushed) { bridge->driver->cache_flush(); mem->is_flushed = true; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { writel(bridge->driver->mask_memory(bridge, page_to_phys(mem->pages[i]), mask_type), bridge->gatt_table+j); } readl(bridge->gatt_table+j-1); /* PCI Posting. */ bridge->driver->tlb_flush(mem); return 0; } EXPORT_SYMBOL(agp_generic_insert_memory); int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) { size_t i; struct agp_bridge_data *bridge; int mask_type, num_entries; bridge = mem->bridge; if (!bridge) return -EINVAL; if (mem->page_count == 0) return 0; if (type != mem->type) return -EINVAL; num_entries = agp_num_entries(); if (((pg_start + mem->page_count) > num_entries) || ((pg_start + mem->page_count) < pg_start)) return -EINVAL; mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); if (mask_type != 0) { /* The generic routines know nothing of memory types */ return -EINVAL; } /* AK: bogus, should encode addresses > 4GB */ for (i = pg_start; i < (mem->page_count + pg_start); i++) { writel(bridge->scratch_page, bridge->gatt_table+i); } readl(bridge->gatt_table+i-1); /* PCI Posting. */ bridge->driver->tlb_flush(mem); return 0; } EXPORT_SYMBOL(agp_generic_remove_memory); struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) { return NULL; } EXPORT_SYMBOL(agp_generic_alloc_by_type); void agp_generic_free_by_type(struct agp_memory *curr) { agp_free_page_array(curr); agp_free_key(curr->key); kfree(curr); } EXPORT_SYMBOL(agp_generic_free_by_type); struct agp_memory *agp_generic_alloc_user(size_t page_count, int type) { struct agp_memory *new; int i; int pages; pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; new = agp_create_user_memory(page_count); if (new == NULL) return NULL; for (i = 0; i < page_count; i++) new->pages[i] = NULL; new->page_count = 0; new->type = type; new->num_scratch_pages = pages; return new; } EXPORT_SYMBOL(agp_generic_alloc_user); /* * Basic Page Allocation Routines - * These routines handle page allocation and by default they reserve the allocated * memory. They also handle incrementing the current_memory_agp value, Which is checked * against a maximum value. */ int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages) { struct page * page; int i, ret = -ENOMEM; for (i = 0; i < num_pages; i++) { page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); /* agp_free_memory() needs gart address */ if (page == NULL) goto out; #ifndef CONFIG_X86 map_page_into_agp(page); #endif get_page(page); atomic_inc(&agp_bridge->current_memory_agp); mem->pages[i] = page; mem->page_count++; } #ifdef CONFIG_X86 set_pages_array_uc(mem->pages, num_pages); #endif ret = 0; out: return ret; } EXPORT_SYMBOL(agp_generic_alloc_pages); struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge) { struct page * page; page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); if (page == NULL) return NULL; map_page_into_agp(page); get_page(page); atomic_inc(&agp_bridge->current_memory_agp); return page; } EXPORT_SYMBOL(agp_generic_alloc_page); void agp_generic_destroy_pages(struct agp_memory *mem) { int i; struct page *page; if (!mem) return; #ifdef CONFIG_X86 set_pages_array_wb(mem->pages, mem->page_count); #endif for (i = 0; i < mem->page_count; i++) { page = mem->pages[i]; #ifndef CONFIG_X86 unmap_page_from_agp(page); #endif put_page(page); __free_page(page); atomic_dec(&agp_bridge->current_memory_agp); mem->pages[i] = NULL; } } EXPORT_SYMBOL(agp_generic_destroy_pages); void agp_generic_destroy_page(struct page *page, int flags) { if (page == NULL) return; if (flags & AGP_PAGE_DESTROY_UNMAP) unmap_page_from_agp(page); if (flags & AGP_PAGE_DESTROY_FREE) { put_page(page); __free_page(page); atomic_dec(&agp_bridge->current_memory_agp); } } EXPORT_SYMBOL(agp_generic_destroy_page); /* End Basic Page Allocation Routines */ /** * agp_enable - initialise the agp point-to-point connection. * * @bridge: an agp_bridge_data struct allocated for the AGP host bridge. * @mode: agp mode register value to configure with. */ void agp_enable(struct agp_bridge_data *bridge, u32 mode) { if (!bridge) return; bridge->driver->agp_enable(bridge, mode); } EXPORT_SYMBOL(agp_enable); /* When we remove the global variable agp_bridge from all drivers * then agp_alloc_bridge and agp_generic_find_bridge need to be updated */ struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev) { if (list_empty(&agp_bridges)) return NULL; return agp_bridge; } static void ipi_handler(void *null) { flush_agp_cache(); } void global_cache_flush(void) { on_each_cpu(ipi_handler, NULL, 1); } EXPORT_SYMBOL(global_cache_flush); unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr, int type) { /* memory type is ignored in the generic routine */ if (bridge->driver->masks) return addr | bridge->driver->masks[0].mask; else return addr; } EXPORT_SYMBOL(agp_generic_mask_memory); int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge, int type) { if (type >= AGP_USER_TYPES) return 0; return type; } EXPORT_SYMBOL(agp_generic_type_to_mask_type); /* * These functions are implemented according to the AGPv3 spec, * which covers implementation details that had previously been * left open. */ int agp3_generic_fetch_size(void) { u16 temp_size; int i; struct aper_size_info_16 *values; pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size); values = A_SIZE_16(agp_bridge->driver->aperture_sizes); for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (temp_size == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } EXPORT_SYMBOL(agp3_generic_fetch_size); void agp3_generic_tlbflush(struct agp_memory *mem) { u32 ctrl; pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN); pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl); } EXPORT_SYMBOL(agp3_generic_tlbflush); int agp3_generic_configure(void) { u32 temp; struct aper_size_info_16 *current_size; current_size = A_SIZE_16(agp_bridge->current_size); agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); /* set aperture size */ pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value); /* set gart pointer */ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr); /* enable aperture and GTLB */ pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp); pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN); return 0; } EXPORT_SYMBOL(agp3_generic_configure); void agp3_generic_cleanup(void) { u32 ctrl; pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB); } EXPORT_SYMBOL(agp3_generic_cleanup); const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] = { {4096, 1048576, 10,0x000}, {2048, 524288, 9, 0x800}, {1024, 262144, 8, 0xc00}, { 512, 131072, 7, 0xe00}, { 256, 65536, 6, 0xf00}, { 128, 32768, 5, 0xf20}, { 64, 16384, 4, 0xf30}, { 32, 8192, 3, 0xf38}, { 16, 4096, 2, 0xf3c}, { 8, 2048, 1, 0xf3e}, { 4, 1024, 0, 0xf3f} }; EXPORT_SYMBOL(agp3_generic_sizes);
linux-master
drivers/char/agp/generic.c
// SPDX-License-Identifier: GPL-2.0 /* * Setup routines for AGP 3.5 compliant bridges. */ #include <linux/list.h> #include <linux/pci.h> #include <linux/agp_backend.h> #include <linux/module.h> #include <linux/slab.h> #include "agp.h" /* Generic AGP 3.5 enabling routines */ struct agp_3_5_dev { struct list_head list; u8 capndx; u32 maxbw; struct pci_dev *dev; }; static void agp_3_5_dev_list_insert(struct list_head *head, struct list_head *new) { struct agp_3_5_dev *cur, *n = list_entry(new, struct agp_3_5_dev, list); struct list_head *pos; list_for_each(pos, head) { cur = list_entry(pos, struct agp_3_5_dev, list); if (cur->maxbw > n->maxbw) break; } list_add_tail(new, pos); } static void agp_3_5_dev_list_sort(struct agp_3_5_dev *list, unsigned int ndevs) { struct agp_3_5_dev *cur; struct pci_dev *dev; struct list_head *pos, *tmp, *head = &list->list, *start = head->next; u32 nistat; INIT_LIST_HEAD(head); for (pos=start; pos!=head; ) { cur = list_entry(pos, struct agp_3_5_dev, list); dev = cur->dev; pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &nistat); cur->maxbw = (nistat >> 16) & 0xff; tmp = pos; pos = pos->next; agp_3_5_dev_list_insert(head, tmp); } } /* * Initialize all isochronous transfer parameters for an AGP 3.0 * node (i.e. a host bridge in combination with the adapters * lying behind it...) */ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, struct agp_3_5_dev *dev_list, unsigned int ndevs) { /* * Convenience structure to make the calculations clearer * here. The field names come straight from the AGP 3.0 spec. */ struct isoch_data { u32 maxbw; u32 n; u32 y; u32 l; u32 rq; struct agp_3_5_dev *dev; }; struct pci_dev *td = bridge->dev, *dev; struct list_head *head = &dev_list->list, *pos; struct agp_3_5_dev *cur; struct isoch_data *master, target; unsigned int cdev = 0; u32 mnistat, tnistat, tstatus, mcmd; u16 tnicmd, mnicmd; u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async; u32 step, rem, rem_isoch, rem_async; int ret = 0; /* * We'll work with an array of isoch_data's (one for each * device in dev_list) throughout this function. */ master = kmalloc_array(ndevs, sizeof(*master), GFP_KERNEL); if (master == NULL) { ret = -ENOMEM; goto get_out; } /* * Sort the device list by maxbw. We need to do this because the * spec suggests that the devices with the smallest requirements * have their resources allocated first, with all remaining resources * falling to the device with the largest requirement. * * We don't exactly do this, we divide target resources by ndevs * and split them amongst the AGP 3.0 devices. The remainder of such * division operations are dropped on the last device, sort of like * the spec mentions it should be done. * * We can't do this sort when we initially construct the dev_list * because we don't know until this function whether isochronous * transfers are enabled and consequently whether maxbw will mean * anything. */ agp_3_5_dev_list_sort(dev_list, ndevs); pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat); pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus); /* Extract power-on defaults from the target */ target.maxbw = (tnistat >> 16) & 0xff; target.n = (tnistat >> 8) & 0xff; target.y = (tnistat >> 6) & 0x3; target.l = (tnistat >> 3) & 0x7; target.rq = (tstatus >> 24) & 0xff; y_max = target.y; /* * Extract power-on defaults for each device in dev_list. Along * the way, calculate the total isochronous bandwidth required * by these devices and the largest requested payload size. */ list_for_each(pos, head) { cur = list_entry(pos, struct agp_3_5_dev, list); dev = cur->dev; pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat); master[cdev].maxbw = (mnistat >> 16) & 0xff; master[cdev].n = (mnistat >> 8) & 0xff; master[cdev].y = (mnistat >> 6) & 0x3; master[cdev].dev = cur; tot_bw += master[cdev].maxbw; y_max = max(y_max, master[cdev].y); cdev++; } /* Check if this configuration has any chance of working */ if (tot_bw > target.maxbw) { dev_err(&td->dev, "isochronous bandwidth required " "by AGP 3.0 devices exceeds that which is supported by " "the AGP 3.0 bridge!\n"); ret = -ENODEV; goto free_and_exit; } target.y = y_max; /* * Write the calculated payload size into the target's NICMD * register. Doing this directly effects the ISOCH_N value * in the target's NISTAT register, so we need to do this now * to get an accurate value for ISOCH_N later. */ pci_read_config_word(td, bridge->capndx+AGPNICMD, &tnicmd); tnicmd &= ~(0x3 << 6); tnicmd |= target.y << 6; pci_write_config_word(td, bridge->capndx+AGPNICMD, tnicmd); /* Reread the target's ISOCH_N */ pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat); target.n = (tnistat >> 8) & 0xff; /* Calculate the minimum ISOCH_N needed by each master */ for (cdev=0; cdev<ndevs; cdev++) { master[cdev].y = target.y; master[cdev].n = master[cdev].maxbw / (master[cdev].y + 1); tot_n += master[cdev].n; } /* Exit if the minimal ISOCH_N allocation among the masters is more * than the target can handle. */ if (tot_n > target.n) { dev_err(&td->dev, "number of isochronous " "transactions per period required by AGP 3.0 devices " "exceeds that which is supported by the AGP 3.0 " "bridge!\n"); ret = -ENODEV; goto free_and_exit; } /* Calculate left over ISOCH_N capability in the target. We'll give * this to the hungriest device (as per the spec) */ rem = target.n - tot_n; /* * Calculate the minimum isochronous RQ depth needed by each master. * Along the way, distribute the extra ISOCH_N capability calculated * above. */ for (cdev=0; cdev<ndevs; cdev++) { /* * This is a little subtle. If ISOCH_Y > 64B, then ISOCH_Y * byte isochronous writes will be broken into 64B pieces. * This means we need to budget more RQ depth to account for * these kind of writes (each isochronous write is actually * many writes on the AGP bus). */ master[cdev].rq = master[cdev].n; if (master[cdev].y > 0x1) master[cdev].rq *= (1 << (master[cdev].y - 1)); tot_rq += master[cdev].rq; } master[ndevs-1].n += rem; /* Figure the number of isochronous and asynchronous RQ slots the * target is providing. */ rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n; rq_async = target.rq - rq_isoch; /* Exit if the minimal RQ needs of the masters exceeds what the target * can provide. */ if (tot_rq > rq_isoch) { dev_err(&td->dev, "number of request queue slots " "required by the isochronous bandwidth requested by " "AGP 3.0 devices exceeds the number provided by the " "AGP 3.0 bridge!\n"); ret = -ENODEV; goto free_and_exit; } /* Calculate asynchronous RQ capability in the target (per master) as * well as the total number of leftover isochronous RQ slots. */ step = rq_async / ndevs; rem_async = step + (rq_async % ndevs); rem_isoch = rq_isoch - tot_rq; /* Distribute the extra RQ slots calculated above and write our * isochronous settings out to the actual devices. */ for (cdev=0; cdev<ndevs; cdev++) { cur = master[cdev].dev; dev = cur->dev; master[cdev].rq += (cdev == ndevs - 1) ? (rem_async + rem_isoch) : step; pci_read_config_word(dev, cur->capndx+AGPNICMD, &mnicmd); pci_read_config_dword(dev, cur->capndx+AGPCMD, &mcmd); mnicmd &= ~(0xff << 8); mnicmd &= ~(0x3 << 6); mcmd &= ~(0xff << 24); mnicmd |= master[cdev].n << 8; mnicmd |= master[cdev].y << 6; mcmd |= master[cdev].rq << 24; pci_write_config_dword(dev, cur->capndx+AGPCMD, mcmd); pci_write_config_word(dev, cur->capndx+AGPNICMD, mnicmd); } free_and_exit: kfree(master); get_out: return ret; } /* * This function basically allocates request queue slots among the * AGP 3.0 systems in nonisochronous nodes. The algorithm is * pretty stupid, divide the total number of RQ slots provided by the * target by ndevs. Distribute this many slots to each AGP 3.0 device, * giving any left over slots to the last device in dev_list. */ static void agp_3_5_nonisochronous_node_enable(struct agp_bridge_data *bridge, struct agp_3_5_dev *dev_list, unsigned int ndevs) { struct agp_3_5_dev *cur; struct list_head *head = &dev_list->list, *pos; u32 tstatus, mcmd; u32 trq, mrq, rem; unsigned int cdev = 0; pci_read_config_dword(bridge->dev, bridge->capndx+AGPSTAT, &tstatus); trq = (tstatus >> 24) & 0xff; mrq = trq / ndevs; rem = mrq + (trq % ndevs); for (pos=head->next; cdev<ndevs; cdev++, pos=pos->next) { cur = list_entry(pos, struct agp_3_5_dev, list); pci_read_config_dword(cur->dev, cur->capndx+AGPCMD, &mcmd); mcmd &= ~(0xff << 24); mcmd |= ((cdev == ndevs - 1) ? rem : mrq) << 24; pci_write_config_dword(cur->dev, cur->capndx+AGPCMD, mcmd); } } /* * Fully configure and enable an AGP 3.0 host bridge and all the devices * lying behind it. */ int agp_3_5_enable(struct agp_bridge_data *bridge) { struct pci_dev *td = bridge->dev, *dev = NULL; u8 mcapndx; u32 isoch; u32 tstatus, mstatus, ncapid; u32 mmajor; u16 mpstat; struct agp_3_5_dev *dev_list, *cur; struct list_head *head, *pos; unsigned int ndevs = 0; int ret = 0; /* Extract some power-on defaults from the target */ pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus); isoch = (tstatus >> 17) & 0x1; if (isoch == 0) /* isoch xfers not available, bail out. */ return -ENODEV; /* * Allocate a head for our AGP 3.5 device list * (multiple AGP v3 devices are allowed behind a single bridge). */ if ((dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL)) == NULL) { ret = -ENOMEM; goto get_out; } head = &dev_list->list; INIT_LIST_HEAD(head); /* Find all AGP devices, and add them to dev_list. */ for_each_pci_dev(dev) { mcapndx = pci_find_capability(dev, PCI_CAP_ID_AGP); if (mcapndx == 0) continue; switch ((dev->class >>8) & 0xff00) { case 0x0600: /* Bridge */ /* Skip bridges. We should call this function for each one. */ continue; case 0x0001: /* Unclassified device */ /* Don't know what this is, but log it for investigation. */ if (mcapndx != 0) { dev_info(&td->dev, "wacky, found unclassified AGP device %s [%04x/%04x]\n", pci_name(dev), dev->vendor, dev->device); } continue; case 0x0300: /* Display controller */ case 0x0400: /* Multimedia controller */ if ((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) { ret = -ENOMEM; goto free_and_exit; } cur->dev = dev; pos = &cur->list; list_add(pos, head); ndevs++; continue; default: continue; } } /* * Take an initial pass through the devices lying behind our host * bridge. Make sure each one is actually an AGP 3.0 device, otherwise * exit with an error message. Along the way store the AGP 3.0 * cap_ptr for each device */ list_for_each(pos, head) { cur = list_entry(pos, struct agp_3_5_dev, list); dev = cur->dev; pci_read_config_word(dev, PCI_STATUS, &mpstat); if ((mpstat & PCI_STATUS_CAP_LIST) == 0) continue; pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &mcapndx); if (mcapndx != 0) { do { pci_read_config_dword(dev, mcapndx, &ncapid); if ((ncapid & 0xff) != 2) mcapndx = (ncapid >> 8) & 0xff; } while (((ncapid & 0xff) != 2) && (mcapndx != 0)); } if (mcapndx == 0) { dev_err(&td->dev, "woah! Non-AGP device %s on " "secondary bus of AGP 3.5 bridge!\n", pci_name(dev)); ret = -ENODEV; goto free_and_exit; } mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; if (mmajor < 3) { dev_err(&td->dev, "woah! AGP 2.0 device %s on " "secondary bus of AGP 3.5 bridge operating " "with AGP 3.0 electricals!\n", pci_name(dev)); ret = -ENODEV; goto free_and_exit; } cur->capndx = mcapndx; pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus); if (((mstatus >> 3) & 0x1) == 0) { dev_err(&td->dev, "woah! AGP 3.x device %s not " "operating in AGP 3.x mode on secondary bus " "of AGP 3.5 bridge operating with AGP 3.0 " "electricals!\n", pci_name(dev)); ret = -ENODEV; goto free_and_exit; } } /* * Call functions to divide target resources amongst the AGP 3.0 * masters. This process is dramatically different depending on * whether isochronous transfers are supported. */ if (isoch) { ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs); if (ret) { dev_info(&td->dev, "something bad happened setting " "up isochronous xfers; falling back to " "non-isochronous xfer mode\n"); } else { goto free_and_exit; } } agp_3_5_nonisochronous_node_enable(bridge, dev_list, ndevs); free_and_exit: /* Be sure to free the dev_list */ for (pos=head->next; pos!=head; ) { cur = list_entry(pos, struct agp_3_5_dev, list); pos = pos->next; kfree(cur); } kfree(dev_list); get_out: return ret; }
linux-master
drivers/char/agp/isoch.c
/* * Intel AGPGART routines. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/pagemap.h> #include <linux/agp_backend.h> #include <asm/smp.h> #include "agp.h" #include "intel-agp.h" #include <drm/intel-gtt.h> static int intel_fetch_size(void) { int i; u16 temp; struct aper_size_info_16 *values; pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp); values = A_SIZE_16(agp_bridge->driver->aperture_sizes); for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (temp == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } static int __intel_8xx_fetch_size(u8 temp) { int i; struct aper_size_info_8 *values; values = A_SIZE_8(agp_bridge->driver->aperture_sizes); for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (temp == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } static int intel_8xx_fetch_size(void) { u8 temp; pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp); return __intel_8xx_fetch_size(temp); } static int intel_815_fetch_size(void) { u8 temp; /* Intel 815 chipsets have a _weird_ APSIZE register with only * one non-reserved bit, so mask the others out ... */ pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp); temp &= (1 << 3); return __intel_8xx_fetch_size(temp); } static void intel_tlbflush(struct agp_memory *mem) { pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200); pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280); } static void intel_8xx_tlbflush(struct agp_memory *mem) { u32 temp; pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp); pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp & ~(1 << 7)); pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp); pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp | (1 << 7)); } static void intel_cleanup(void) { u16 temp; struct aper_size_info_16 *previous_size; previous_size = A_SIZE_16(agp_bridge->previous_size); pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp); pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9)); pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value); } static void intel_8xx_cleanup(void) { u16 temp; struct aper_size_info_8 *previous_size; previous_size = A_SIZE_8(agp_bridge->previous_size); pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp); pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9)); pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value); } static int intel_configure(void) { u16 temp2; struct aper_size_info_16 *current_size; current_size = A_SIZE_16(agp_bridge->current_size); /* aperture size */ pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); /* address to map to */ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); /* attbase - aperture base */ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); /* agpctrl */ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280); /* paccfg/nbxcfg */ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2); pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, (temp2 & ~(1 << 10)) | (1 << 9)); /* clear any possible error conditions */ pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7); return 0; } static int intel_815_configure(void) { u32 addr; u8 temp2; struct aper_size_info_8 *current_size; /* attbase - aperture base */ /* the Intel 815 chipset spec. says that bits 29-31 in the * ATTBASE register are reserved -> try not to write them */ if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) { dev_emerg(&agp_bridge->dev->dev, "gatt bus addr too high"); return -EINVAL; } current_size = A_SIZE_8(agp_bridge->current_size); /* aperture size */ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); /* address to map to */ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); pci_read_config_dword(agp_bridge->dev, INTEL_ATTBASE, &addr); addr &= INTEL_815_ATTBASE_MASK; addr |= agp_bridge->gatt_bus_addr; pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, addr); /* agpctrl */ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); /* apcont */ pci_read_config_byte(agp_bridge->dev, INTEL_815_APCONT, &temp2); pci_write_config_byte(agp_bridge->dev, INTEL_815_APCONT, temp2 | (1 << 1)); /* clear any possible error conditions */ /* Oddness : this chipset seems to have no ERRSTS register ! */ return 0; } static void intel_820_tlbflush(struct agp_memory *mem) { return; } static void intel_820_cleanup(void) { u8 temp; struct aper_size_info_8 *previous_size; previous_size = A_SIZE_8(agp_bridge->previous_size); pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp); pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR, temp & ~(1 << 1)); pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value); } static int intel_820_configure(void) { u8 temp2; struct aper_size_info_8 *current_size; current_size = A_SIZE_8(agp_bridge->current_size); /* aperture size */ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); /* address to map to */ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); /* attbase - aperture base */ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); /* agpctrl */ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); /* global enable aperture access */ /* This flag is not accessed through MCHCFG register as in */ /* i850 chipset. */ pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp2); pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR, temp2 | (1 << 1)); /* clear any possible AGP-related error conditions */ pci_write_config_word(agp_bridge->dev, INTEL_I820_ERRSTS, 0x001c); return 0; } static int intel_840_configure(void) { u16 temp2; struct aper_size_info_8 *current_size; current_size = A_SIZE_8(agp_bridge->current_size); /* aperture size */ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); /* address to map to */ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); /* attbase - aperture base */ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); /* agpctrl */ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); /* mcgcfg */ pci_read_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, &temp2); pci_write_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, temp2 | (1 << 9)); /* clear any possible error conditions */ pci_write_config_word(agp_bridge->dev, INTEL_I840_ERRSTS, 0xc000); return 0; } static int intel_845_configure(void) { u8 temp2; struct aper_size_info_8 *current_size; current_size = A_SIZE_8(agp_bridge->current_size); /* aperture size */ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); if (agp_bridge->apbase_config != 0) { pci_write_config_dword(agp_bridge->dev, AGP_APBASE, agp_bridge->apbase_config); } else { /* address to map to */ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); agp_bridge->apbase_config = agp_bridge->gart_bus_addr; } /* attbase - aperture base */ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); /* agpctrl */ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); /* agpm */ pci_read_config_byte(agp_bridge->dev, INTEL_I845_AGPM, &temp2); pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1)); /* clear any possible error conditions */ pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c); return 0; } static int intel_850_configure(void) { u16 temp2; struct aper_size_info_8 *current_size; current_size = A_SIZE_8(agp_bridge->current_size); /* aperture size */ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); /* address to map to */ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); /* attbase - aperture base */ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); /* agpctrl */ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); /* mcgcfg */ pci_read_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, &temp2); pci_write_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, temp2 | (1 << 9)); /* clear any possible AGP-related error conditions */ pci_write_config_word(agp_bridge->dev, INTEL_I850_ERRSTS, 0x001c); return 0; } static int intel_860_configure(void) { u16 temp2; struct aper_size_info_8 *current_size; current_size = A_SIZE_8(agp_bridge->current_size); /* aperture size */ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); /* address to map to */ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); /* attbase - aperture base */ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); /* agpctrl */ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); /* mcgcfg */ pci_read_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, &temp2); pci_write_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, temp2 | (1 << 9)); /* clear any possible AGP-related error conditions */ pci_write_config_word(agp_bridge->dev, INTEL_I860_ERRSTS, 0xf700); return 0; } static int intel_830mp_configure(void) { u16 temp2; struct aper_size_info_8 *current_size; current_size = A_SIZE_8(agp_bridge->current_size); /* aperture size */ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); /* address to map to */ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); /* attbase - aperture base */ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); /* agpctrl */ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); /* gmch */ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2); pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp2 | (1 << 9)); /* clear any possible AGP-related error conditions */ pci_write_config_word(agp_bridge->dev, INTEL_I830_ERRSTS, 0x1c); return 0; } static int intel_7505_configure(void) { u16 temp2; struct aper_size_info_8 *current_size; current_size = A_SIZE_8(agp_bridge->current_size); /* aperture size */ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); /* address to map to */ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); /* attbase - aperture base */ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); /* agpctrl */ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); /* mchcfg */ pci_read_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, &temp2); pci_write_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, temp2 | (1 << 9)); return 0; } /* Setup function */ static const struct gatt_mask intel_generic_masks[] = { {.mask = 0x00000017, .type = 0} }; static const struct aper_size_info_8 intel_815_sizes[2] = { {64, 16384, 4, 0}, {32, 8192, 3, 8}, }; static const struct aper_size_info_8 intel_8xx_sizes[7] = { {256, 65536, 6, 0}, {128, 32768, 5, 32}, {64, 16384, 4, 48}, {32, 8192, 3, 56}, {16, 4096, 2, 60}, {8, 2048, 1, 62}, {4, 1024, 0, 63} }; static const struct aper_size_info_16 intel_generic_sizes[7] = { {256, 65536, 6, 0}, {128, 32768, 5, 32}, {64, 16384, 4, 48}, {32, 8192, 3, 56}, {16, 4096, 2, 60}, {8, 2048, 1, 62}, {4, 1024, 0, 63} }; static const struct aper_size_info_8 intel_830mp_sizes[4] = { {256, 65536, 6, 0}, {128, 32768, 5, 32}, {64, 16384, 4, 48}, {32, 8192, 3, 56} }; static const struct agp_bridge_driver intel_generic_driver = { .owner = THIS_MODULE, .aperture_sizes = intel_generic_sizes, .size_type = U16_APER_SIZE, .num_aperture_sizes = 7, .needs_scratch_page = true, .configure = intel_configure, .fetch_size = intel_fetch_size, .cleanup = intel_cleanup, .tlb_flush = intel_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = intel_generic_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = agp_generic_insert_memory, .remove_memory = agp_generic_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static const struct agp_bridge_driver intel_815_driver = { .owner = THIS_MODULE, .aperture_sizes = intel_815_sizes, .size_type = U8_APER_SIZE, .num_aperture_sizes = 2, .needs_scratch_page = true, .configure = intel_815_configure, .fetch_size = intel_815_fetch_size, .cleanup = intel_8xx_cleanup, .tlb_flush = intel_8xx_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = intel_generic_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = agp_generic_insert_memory, .remove_memory = agp_generic_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static const struct agp_bridge_driver intel_820_driver = { .owner = THIS_MODULE, .aperture_sizes = intel_8xx_sizes, .size_type = U8_APER_SIZE, .num_aperture_sizes = 7, .needs_scratch_page = true, .configure = intel_820_configure, .fetch_size = intel_8xx_fetch_size, .cleanup = intel_820_cleanup, .tlb_flush = intel_820_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = intel_generic_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = agp_generic_insert_memory, .remove_memory = agp_generic_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static const struct agp_bridge_driver intel_830mp_driver = { .owner = THIS_MODULE, .aperture_sizes = intel_830mp_sizes, .size_type = U8_APER_SIZE, .num_aperture_sizes = 4, .needs_scratch_page = true, .configure = intel_830mp_configure, .fetch_size = intel_8xx_fetch_size, .cleanup = intel_8xx_cleanup, .tlb_flush = intel_8xx_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = intel_generic_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = agp_generic_insert_memory, .remove_memory = agp_generic_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static const struct agp_bridge_driver intel_840_driver = { .owner = THIS_MODULE, .aperture_sizes = intel_8xx_sizes, .size_type = U8_APER_SIZE, .num_aperture_sizes = 7, .needs_scratch_page = true, .configure = intel_840_configure, .fetch_size = intel_8xx_fetch_size, .cleanup = intel_8xx_cleanup, .tlb_flush = intel_8xx_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = intel_generic_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = agp_generic_insert_memory, .remove_memory = agp_generic_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static const struct agp_bridge_driver intel_845_driver = { .owner = THIS_MODULE, .aperture_sizes = intel_8xx_sizes, .size_type = U8_APER_SIZE, .num_aperture_sizes = 7, .needs_scratch_page = true, .configure = intel_845_configure, .fetch_size = intel_8xx_fetch_size, .cleanup = intel_8xx_cleanup, .tlb_flush = intel_8xx_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = intel_generic_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = agp_generic_insert_memory, .remove_memory = agp_generic_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static const struct agp_bridge_driver intel_850_driver = { .owner = THIS_MODULE, .aperture_sizes = intel_8xx_sizes, .size_type = U8_APER_SIZE, .num_aperture_sizes = 7, .needs_scratch_page = true, .configure = intel_850_configure, .fetch_size = intel_8xx_fetch_size, .cleanup = intel_8xx_cleanup, .tlb_flush = intel_8xx_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = intel_generic_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = agp_generic_insert_memory, .remove_memory = agp_generic_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static const struct agp_bridge_driver intel_860_driver = { .owner = THIS_MODULE, .aperture_sizes = intel_8xx_sizes, .size_type = U8_APER_SIZE, .num_aperture_sizes = 7, .needs_scratch_page = true, .configure = intel_860_configure, .fetch_size = intel_8xx_fetch_size, .cleanup = intel_8xx_cleanup, .tlb_flush = intel_8xx_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = intel_generic_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = agp_generic_insert_memory, .remove_memory = agp_generic_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static const struct agp_bridge_driver intel_7505_driver = { .owner = THIS_MODULE, .aperture_sizes = intel_8xx_sizes, .size_type = U8_APER_SIZE, .num_aperture_sizes = 7, .needs_scratch_page = true, .configure = intel_7505_configure, .fetch_size = intel_8xx_fetch_size, .cleanup = intel_8xx_cleanup, .tlb_flush = intel_8xx_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = intel_generic_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = agp_generic_insert_memory, .remove_memory = agp_generic_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of * driver and gmch_driver must be non-null, and find_gmch will determine * which one should be used if a gmch_chip_id is present. */ static const struct intel_agp_driver_description { unsigned int chip_id; char *name; const struct agp_bridge_driver *driver; } intel_agp_chipsets[] = { { PCI_DEVICE_ID_INTEL_82443LX_0, "440LX", &intel_generic_driver }, { PCI_DEVICE_ID_INTEL_82443BX_0, "440BX", &intel_generic_driver }, { PCI_DEVICE_ID_INTEL_82443GX_0, "440GX", &intel_generic_driver }, { PCI_DEVICE_ID_INTEL_82815_MC, "i815", &intel_815_driver }, { PCI_DEVICE_ID_INTEL_82820_HB, "i820", &intel_820_driver }, { PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver }, { PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver }, { PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver }, { PCI_DEVICE_ID_INTEL_82845_HB, "i845", &intel_845_driver }, { PCI_DEVICE_ID_INTEL_82845G_HB, "845G", &intel_845_driver }, { PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver }, { PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver }, { PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver }, { PCI_DEVICE_ID_INTEL_82855GM_HB, "855GM", &intel_845_driver }, { PCI_DEVICE_ID_INTEL_82860_HB, "i860", &intel_860_driver }, { PCI_DEVICE_ID_INTEL_82865_HB, "865", &intel_845_driver }, { PCI_DEVICE_ID_INTEL_82875_HB, "i875", &intel_845_driver }, { PCI_DEVICE_ID_INTEL_7505_0, "E7505", &intel_7505_driver }, { PCI_DEVICE_ID_INTEL_7205_0, "E7205", &intel_7505_driver }, { 0, NULL, NULL } }; static int agp_intel_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_bridge_data *bridge; u8 cap_ptr = 0; struct resource *r; int i, err; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; bridge->capndx = cap_ptr; if (intel_gmch_probe(pdev, NULL, bridge)) goto found_gmch; for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { /* In case that multiple models of gfx chip may stand on same host bridge type, this can be sure we detect the right IGD. */ if (pdev->device == intel_agp_chipsets[i].chip_id) { bridge->driver = intel_agp_chipsets[i].driver; break; } } if (!bridge->driver) { if (cap_ptr) dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n", pdev->vendor, pdev->device); agp_put_bridge(bridge); return -ENODEV; } bridge->dev = pdev; bridge->dev_private_data = NULL; dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); /* * The following fixes the case where the BIOS has "forgotten" to * provide an address range for the GART. * 20030610 - [email protected] * This happens before pci_enable_device() intentionally; * calling pci_enable_device() before assigning the resource * will result in the GART being disabled on machines with such * BIOSs (the GART ends up with a BAR starting at 0, which * conflicts a lot of other devices). */ r = &pdev->resource[0]; if (!r->start && r->end) { if (pci_assign_resource(pdev, 0)) { dev_err(&pdev->dev, "can't assign resource 0\n"); agp_put_bridge(bridge); return -ENODEV; } } /* * If the device has not been properly setup, the following will catch * the problem and should stop the system from crashing. * 20030610 - [email protected] */ if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "can't enable PCI device\n"); agp_put_bridge(bridge); return -ENODEV; } /* Fill in the mode register */ if (cap_ptr) { pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); } found_gmch: pci_set_drvdata(pdev, bridge); err = agp_add_bridge(bridge); return err; } static void agp_intel_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); agp_remove_bridge(bridge); intel_gmch_remove(); agp_put_bridge(bridge); } static int agp_intel_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct agp_bridge_data *bridge = pci_get_drvdata(pdev); bridge->driver->configure(); return 0; } static const struct pci_device_id agp_intel_pci_table[] = { #define ID(x) \ { \ .class = (PCI_CLASS_BRIDGE_HOST << 8), \ .class_mask = ~0, \ .vendor = PCI_VENDOR_ID_INTEL, \ .device = x, \ .subvendor = PCI_ANY_ID, \ .subdevice = PCI_ANY_ID, \ } ID(PCI_DEVICE_ID_INTEL_82441), /* for HAS2 support */ ID(PCI_DEVICE_ID_INTEL_82443LX_0), ID(PCI_DEVICE_ID_INTEL_82443BX_0), ID(PCI_DEVICE_ID_INTEL_82443GX_0), ID(PCI_DEVICE_ID_INTEL_82810_MC1), ID(PCI_DEVICE_ID_INTEL_82810_MC3), ID(PCI_DEVICE_ID_INTEL_82810E_MC), ID(PCI_DEVICE_ID_INTEL_82815_MC), ID(PCI_DEVICE_ID_INTEL_82820_HB), ID(PCI_DEVICE_ID_INTEL_82820_UP_HB), ID(PCI_DEVICE_ID_INTEL_82830_HB), ID(PCI_DEVICE_ID_INTEL_82840_HB), ID(PCI_DEVICE_ID_INTEL_82845_HB), ID(PCI_DEVICE_ID_INTEL_82845G_HB), ID(PCI_DEVICE_ID_INTEL_82850_HB), ID(PCI_DEVICE_ID_INTEL_82854_HB), ID(PCI_DEVICE_ID_INTEL_82855PM_HB), ID(PCI_DEVICE_ID_INTEL_82855GM_HB), ID(PCI_DEVICE_ID_INTEL_82860_HB), ID(PCI_DEVICE_ID_INTEL_82865_HB), ID(PCI_DEVICE_ID_INTEL_82875_HB), ID(PCI_DEVICE_ID_INTEL_7505_0), ID(PCI_DEVICE_ID_INTEL_7205_0), ID(PCI_DEVICE_ID_INTEL_E7221_HB), ID(PCI_DEVICE_ID_INTEL_82915G_HB), ID(PCI_DEVICE_ID_INTEL_82915GM_HB), ID(PCI_DEVICE_ID_INTEL_82945G_HB), ID(PCI_DEVICE_ID_INTEL_82945GM_HB), ID(PCI_DEVICE_ID_INTEL_82945GME_HB), ID(PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB), ID(PCI_DEVICE_ID_INTEL_PINEVIEW_HB), ID(PCI_DEVICE_ID_INTEL_82946GZ_HB), ID(PCI_DEVICE_ID_INTEL_82G35_HB), ID(PCI_DEVICE_ID_INTEL_82965Q_HB), ID(PCI_DEVICE_ID_INTEL_82965G_HB), ID(PCI_DEVICE_ID_INTEL_82965GM_HB), ID(PCI_DEVICE_ID_INTEL_82965GME_HB), ID(PCI_DEVICE_ID_INTEL_G33_HB), ID(PCI_DEVICE_ID_INTEL_Q35_HB), ID(PCI_DEVICE_ID_INTEL_Q33_HB), ID(PCI_DEVICE_ID_INTEL_GM45_HB), ID(PCI_DEVICE_ID_INTEL_EAGLELAKE_HB), ID(PCI_DEVICE_ID_INTEL_Q45_HB), ID(PCI_DEVICE_ID_INTEL_G45_HB), ID(PCI_DEVICE_ID_INTEL_G41_HB), ID(PCI_DEVICE_ID_INTEL_B43_HB), ID(PCI_DEVICE_ID_INTEL_B43_1_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), { } }; MODULE_DEVICE_TABLE(pci, agp_intel_pci_table); static DEFINE_SIMPLE_DEV_PM_OPS(agp_intel_pm_ops, NULL, agp_intel_resume); static struct pci_driver agp_intel_pci_driver = { .name = "agpgart-intel", .id_table = agp_intel_pci_table, .probe = agp_intel_probe, .remove = agp_intel_remove, .driver.pm = &agp_intel_pm_ops, }; static int __init agp_intel_init(void) { if (agp_off) return -EINVAL; return pci_register_driver(&agp_intel_pci_driver); } static void __exit agp_intel_cleanup(void) { pci_unregister_driver(&agp_intel_pci_driver); } module_init(agp_intel_init); module_exit(agp_intel_cleanup); MODULE_AUTHOR("Dave Jones, Various @Intel"); MODULE_LICENSE("GPL and additional rights");
linux-master
drivers/char/agp/intel-agp.c
/* * Nvidia AGPGART routines. * Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up * to work in 2.5 by Dave Jones. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/agp_backend.h> #include <linux/page-flags.h> #include <linux/mm.h> #include <linux/jiffies.h> #include "agp.h" /* NVIDIA registers */ #define NVIDIA_0_APSIZE 0x80 #define NVIDIA_1_WBC 0xf0 #define NVIDIA_2_GARTCTRL 0xd0 #define NVIDIA_2_APBASE 0xd8 #define NVIDIA_2_APLIMIT 0xdc #define NVIDIA_2_ATTBASE(i) (0xe0 + (i) * 4) #define NVIDIA_3_APBASE 0x50 #define NVIDIA_3_APLIMIT 0x54 static struct _nvidia_private { struct pci_dev *dev_1; struct pci_dev *dev_2; struct pci_dev *dev_3; volatile u32 __iomem *aperture; int num_active_entries; off_t pg_offset; u32 wbc_mask; } nvidia_private; static int nvidia_fetch_size(void) { int i; u8 size_value; struct aper_size_info_8 *values; pci_read_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, &size_value); size_value &= 0x0f; values = A_SIZE_8(agp_bridge->driver->aperture_sizes); for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (size_value == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } #define SYSCFG 0xC0010010 #define IORR_BASE0 0xC0010016 #define IORR_MASK0 0xC0010017 #define AMD_K7_NUM_IORR 2 static int nvidia_init_iorr(u32 base, u32 size) { u32 base_hi, base_lo; u32 mask_hi, mask_lo; u32 sys_hi, sys_lo; u32 iorr_addr, free_iorr_addr; /* Find the iorr that is already used for the base */ /* If not found, determine the uppermost available iorr */ free_iorr_addr = AMD_K7_NUM_IORR; for (iorr_addr = 0; iorr_addr < AMD_K7_NUM_IORR; iorr_addr++) { rdmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi); rdmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi); if ((base_lo & 0xfffff000) == (base & 0xfffff000)) break; if ((mask_lo & 0x00000800) == 0) free_iorr_addr = iorr_addr; } if (iorr_addr >= AMD_K7_NUM_IORR) { iorr_addr = free_iorr_addr; if (iorr_addr >= AMD_K7_NUM_IORR) return -EINVAL; } base_hi = 0x0; base_lo = (base & ~0xfff) | 0x18; mask_hi = 0xf; mask_lo = ((~(size - 1)) & 0xfffff000) | 0x800; wrmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi); wrmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi); rdmsr(SYSCFG, sys_lo, sys_hi); sys_lo |= 0x00100000; wrmsr(SYSCFG, sys_lo, sys_hi); return 0; } static int nvidia_configure(void) { int i, rc, num_dirs; u32 apbase, aplimit; phys_addr_t apbase_phys; struct aper_size_info_8 *current_size; u32 temp; current_size = A_SIZE_8(agp_bridge->current_size); /* aperture size */ pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, current_size->size_value); /* address to map to */ apbase = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); agp_bridge->gart_bus_addr = apbase; aplimit = apbase + (current_size->size * 1024 * 1024) - 1; pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APBASE, apbase); pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APLIMIT, aplimit); pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APBASE, apbase); pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APLIMIT, aplimit); if (0 != (rc = nvidia_init_iorr(apbase, current_size->size * 1024 * 1024))) return rc; /* directory size is 64k */ num_dirs = current_size->size / 64; nvidia_private.num_active_entries = current_size->num_entries; nvidia_private.pg_offset = 0; if (num_dirs == 0) { num_dirs = 1; nvidia_private.num_active_entries /= (64 / current_size->size); nvidia_private.pg_offset = (apbase & (64 * 1024 * 1024 - 1) & ~(current_size->size * 1024 * 1024 - 1)) / PAGE_SIZE; } /* attbase */ for (i = 0; i < 8; i++) { pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_ATTBASE(i), (agp_bridge->gatt_bus_addr + (i % num_dirs) * 64 * 1024) | 1); } /* gtlb control */ pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp); pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp | 0x11); /* gart control */ pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp); pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp | 0x100); /* map aperture */ apbase_phys = pci_resource_start(agp_bridge->dev, AGP_APERTURE_BAR); nvidia_private.aperture = (volatile u32 __iomem *) ioremap(apbase_phys, 33 * PAGE_SIZE); if (!nvidia_private.aperture) return -ENOMEM; return 0; } static void nvidia_cleanup(void) { struct aper_size_info_8 *previous_size; u32 temp; /* gart control */ pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp); pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp & ~(0x100)); /* gtlb control */ pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp); pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp & ~(0x11)); /* unmap aperture */ iounmap((void __iomem *) nvidia_private.aperture); /* restore previous aperture size */ previous_size = A_SIZE_8(agp_bridge->previous_size); pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, previous_size->size_value); /* restore iorr for previous aperture size */ nvidia_init_iorr(agp_bridge->gart_bus_addr, previous_size->size * 1024 * 1024); } /* * Note we can't use the generic routines, even though they are 99% the same. * Aperture sizes <64M still requires a full 64k GART directory, but * only use the portion of the TLB entries that correspond to the apertures * alignment inside the surrounding 64M block. */ extern int agp_memory_reserved; static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type) { int i, j; int mask_type; mask_type = agp_generic_type_to_mask_type(mem->bridge, type); if (mask_type != 0 || type != mem->type) return -EINVAL; if (mem->page_count == 0) return 0; if ((pg_start + mem->page_count) > (nvidia_private.num_active_entries - agp_memory_reserved/PAGE_SIZE)) return -EINVAL; for (j = pg_start; j < (pg_start + mem->page_count); j++) { if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j))) return -EBUSY; } if (!mem->is_flushed) { global_cache_flush(); mem->is_flushed = true; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { writel(agp_bridge->driver->mask_memory(agp_bridge, page_to_phys(mem->pages[i]), mask_type), agp_bridge->gatt_table+nvidia_private.pg_offset+j); } /* PCI Posting. */ readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j - 1); agp_bridge->driver->tlb_flush(mem); return 0; } static int nvidia_remove_memory(struct agp_memory *mem, off_t pg_start, int type) { int i; int mask_type; mask_type = agp_generic_type_to_mask_type(mem->bridge, type); if (mask_type != 0 || type != mem->type) return -EINVAL; if (mem->page_count == 0) return 0; for (i = pg_start; i < (mem->page_count + pg_start); i++) writel(agp_bridge->scratch_page, agp_bridge->gatt_table+nvidia_private.pg_offset+i); agp_bridge->driver->tlb_flush(mem); return 0; } static void nvidia_tlbflush(struct agp_memory *mem) { unsigned long end; u32 wbc_reg; u32 __maybe_unused temp; int i; /* flush chipset */ if (nvidia_private.wbc_mask) { pci_read_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, &wbc_reg); wbc_reg |= nvidia_private.wbc_mask; pci_write_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, wbc_reg); end = jiffies + 3*HZ; do { pci_read_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, &wbc_reg); if (time_before_eq(end, jiffies)) { printk(KERN_ERR PFX "TLB flush took more than 3 seconds.\n"); } } while (wbc_reg & nvidia_private.wbc_mask); } /* flush TLB entries */ for (i = 0; i < 32 + 1; i++) temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32))); for (i = 0; i < 32 + 1; i++) temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32))); } static const struct aper_size_info_8 nvidia_generic_sizes[5] = { {512, 131072, 7, 0}, {256, 65536, 6, 8}, {128, 32768, 5, 12}, {64, 16384, 4, 14}, /* The 32M mode still requires a 64k gatt */ {32, 16384, 4, 15} }; static const struct gatt_mask nvidia_generic_masks[] = { { .mask = 1, .type = 0} }; static const struct agp_bridge_driver nvidia_driver = { .owner = THIS_MODULE, .aperture_sizes = nvidia_generic_sizes, .size_type = U8_APER_SIZE, .num_aperture_sizes = 5, .needs_scratch_page = true, .configure = nvidia_configure, .fetch_size = nvidia_fetch_size, .cleanup = nvidia_cleanup, .tlb_flush = nvidia_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = nvidia_generic_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = nvidia_insert_memory, .remove_memory = nvidia_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static int agp_nvidia_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_bridge_data *bridge; u8 cap_ptr; nvidia_private.dev_1 = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), (unsigned int)pdev->bus->number, PCI_DEVFN(0, 1)); nvidia_private.dev_2 = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), (unsigned int)pdev->bus->number, PCI_DEVFN(0, 2)); nvidia_private.dev_3 = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), (unsigned int)pdev->bus->number, PCI_DEVFN(30, 0)); if (!nvidia_private.dev_1 || !nvidia_private.dev_2 || !nvidia_private.dev_3) { printk(KERN_INFO PFX "Detected an NVIDIA nForce/nForce2 " "chipset, but could not find the secondary devices.\n"); return -ENODEV; } cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return -ENODEV; switch (pdev->device) { case PCI_DEVICE_ID_NVIDIA_NFORCE: printk(KERN_INFO PFX "Detected NVIDIA nForce chipset\n"); nvidia_private.wbc_mask = 0x00010000; break; case PCI_DEVICE_ID_NVIDIA_NFORCE2: printk(KERN_INFO PFX "Detected NVIDIA nForce2 chipset\n"); nvidia_private.wbc_mask = 0x80000000; break; default: printk(KERN_ERR PFX "Unsupported NVIDIA chipset (device id: %04x)\n", pdev->device); return -ENODEV; } bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; bridge->driver = &nvidia_driver; bridge->dev_private_data = &nvidia_private; bridge->dev = pdev; bridge->capndx = cap_ptr; /* Fill in the mode register */ pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); pci_set_drvdata(pdev, bridge); return agp_add_bridge(bridge); } static void agp_nvidia_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); agp_remove_bridge(bridge); agp_put_bridge(bridge); } static int agp_nvidia_resume(struct device *dev) { /* reconfigure AGP hardware again */ nvidia_configure(); return 0; } static const struct pci_device_id agp_nvidia_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_NVIDIA, .device = PCI_DEVICE_ID_NVIDIA_NFORCE, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_NVIDIA, .device = PCI_DEVICE_ID_NVIDIA_NFORCE2, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE(pci, agp_nvidia_pci_table); static DEFINE_SIMPLE_DEV_PM_OPS(agp_nvidia_pm_ops, NULL, agp_nvidia_resume); static struct pci_driver agp_nvidia_pci_driver = { .name = "agpgart-nvidia", .id_table = agp_nvidia_pci_table, .probe = agp_nvidia_probe, .remove = agp_nvidia_remove, .driver.pm = &agp_nvidia_pm_ops, }; static int __init agp_nvidia_init(void) { if (agp_off) return -EINVAL; return pci_register_driver(&agp_nvidia_pci_driver); } static void __exit agp_nvidia_cleanup(void) { pci_unregister_driver(&agp_nvidia_pci_driver); pci_dev_put(nvidia_private.dev_1); pci_dev_put(nvidia_private.dev_2); pci_dev_put(nvidia_private.dev_3); } module_init(agp_nvidia_init); module_exit(agp_nvidia_cleanup); MODULE_LICENSE("GPL and additional rights"); MODULE_AUTHOR("NVIDIA Corporation");
linux-master
drivers/char/agp/nvidia-agp.c
/* * Transmeta's Efficeon AGPGART driver. * * Based upon a diff by Linus around November '02. * * Ported to the 2.6 kernel by Carlos Puchol <[email protected]> * and H. Peter Anvin <[email protected]>. */ /* * NOTE-cpg-040217: * * - when compiled as a module, after loading the module, * it will refuse to unload, indicating it is in use, * when it is not. * - no s3 (suspend to ram) testing. * - tested on the efficeon integrated nothbridge for tens * of iterations of starting x and glxgears. * - tested with radeon 9000 and radeon mobility m9 cards * - tested with c3/c4 enabled (with the mobility m9 card) */ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/agp_backend.h> #include <linux/gfp.h> #include <linux/page-flags.h> #include <linux/mm.h> #include "agp.h" #include "intel-agp.h" /* * The real differences to the generic AGP code is * in the GART mappings - a two-level setup with the * first level being an on-chip 64-entry table. * * The page array is filled through the ATTPAGE register * (Aperture Translation Table Page Register) at 0xB8. Bits: * 31:20: physical page address * 11:9: Page Attribute Table Index (PATI) * must match the PAT index for the * mapped pages (the 2nd level page table pages * themselves should be just regular WB-cacheable, * so this is normally zero.) * 8: Present * 7:6: reserved, write as zero * 5:0: GATT directory index: which 1st-level entry * * The Efficeon AGP spec requires pages to be WB-cacheable * but to be explicitly CLFLUSH'd after any changes. */ #define EFFICEON_ATTPAGE 0xb8 #define EFFICEON_L1_SIZE 64 /* Number of PDE pages */ #define EFFICEON_PATI (0 << 9) #define EFFICEON_PRESENT (1 << 8) static struct _efficeon_private { unsigned long l1_table[EFFICEON_L1_SIZE]; } efficeon_private; static const struct gatt_mask efficeon_generic_masks[] = { {.mask = 0x00000001, .type = 0} }; /* This function does the same thing as mask_memory() for this chipset... */ static inline unsigned long efficeon_mask_memory(struct page *page) { unsigned long addr = page_to_phys(page); return addr | 0x00000001; } static const struct aper_size_info_lvl2 efficeon_generic_sizes[4] = { {256, 65536, 0}, {128, 32768, 32}, {64, 16384, 48}, {32, 8192, 56} }; /* * Control interfaces are largely identical to * the legacy Intel 440BX.. */ static int efficeon_fetch_size(void) { int i; u16 temp; struct aper_size_info_lvl2 *values; pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp); values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes); for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (temp == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } static void efficeon_tlbflush(struct agp_memory * mem) { printk(KERN_DEBUG PFX "efficeon_tlbflush()\n"); pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200); pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280); } static void efficeon_cleanup(void) { u16 temp; struct aper_size_info_lvl2 *previous_size; printk(KERN_DEBUG PFX "efficeon_cleanup()\n"); previous_size = A_SIZE_LVL2(agp_bridge->previous_size); pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp); pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9)); pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value); } static int efficeon_configure(void) { u16 temp2; struct aper_size_info_lvl2 *current_size; printk(KERN_DEBUG PFX "efficeon_configure()\n"); current_size = A_SIZE_LVL2(agp_bridge->current_size); /* aperture size */ pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); /* address to map to */ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); /* agpctrl */ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280); /* paccfg/nbxcfg */ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2); pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, (temp2 & ~(1 << 10)) | (1 << 9) | (1 << 11)); /* clear any possible error conditions */ pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7); return 0; } static int efficeon_free_gatt_table(struct agp_bridge_data *bridge) { int index, freed = 0; for (index = 0; index < EFFICEON_L1_SIZE; index++) { unsigned long page = efficeon_private.l1_table[index]; if (page) { efficeon_private.l1_table[index] = 0; free_page(page); freed++; } printk(KERN_DEBUG PFX "efficeon_free_gatt_table(%p, %02x, %08x)\n", agp_bridge->dev, EFFICEON_ATTPAGE, index); pci_write_config_dword(agp_bridge->dev, EFFICEON_ATTPAGE, index); } printk(KERN_DEBUG PFX "efficeon_free_gatt_table() freed %d pages\n", freed); return 0; } /* * Since we don't need contiguous memory we just try * to get the gatt table once */ #define GET_PAGE_DIR_OFF(addr) (addr >> 22) #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) #undef GET_GATT #define GET_GATT(addr) (efficeon_private.gatt_pages[\ GET_PAGE_DIR_IDX(addr)]->remapped) static int efficeon_create_gatt_table(struct agp_bridge_data *bridge) { int index; const int pati = EFFICEON_PATI; const int present = EFFICEON_PRESENT; const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3; int num_entries, l1_pages; num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; printk(KERN_DEBUG PFX "efficeon_create_gatt_table(%d)\n", num_entries); /* There are 2^10 PTE pages per PDE page */ BUG_ON(num_entries & 0x3ff); l1_pages = num_entries >> 10; for (index = 0 ; index < l1_pages ; index++) { int offset; unsigned long page; unsigned long value; page = efficeon_private.l1_table[index]; BUG_ON(page); page = get_zeroed_page(GFP_KERNEL); if (!page) { efficeon_free_gatt_table(agp_bridge); return -ENOMEM; } for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk) clflush((char *)page+offset); efficeon_private.l1_table[index] = page; value = virt_to_phys((unsigned long *)page) | pati | present | index; pci_write_config_dword(agp_bridge->dev, EFFICEON_ATTPAGE, value); } return 0; } static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int type) { int i, count = mem->page_count, num_entries; unsigned int *page, *last_page; const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3; const unsigned long clflush_mask = ~(clflush_chunk-1); printk(KERN_DEBUG PFX "efficeon_insert_memory(%lx, %d)\n", pg_start, count); num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; if ((pg_start + mem->page_count) > num_entries) return -EINVAL; if (type != 0 || mem->type != 0) return -EINVAL; if (!mem->is_flushed) { global_cache_flush(); mem->is_flushed = true; } last_page = NULL; for (i = 0; i < count; i++) { int index = pg_start + i; unsigned long insert = efficeon_mask_memory(mem->pages[i]); page = (unsigned int *) efficeon_private.l1_table[index >> 10]; if (!page) continue; page += (index & 0x3ff); *page = insert; /* clflush is slow, so don't clflush until we have to */ if (last_page && (((unsigned long)page^(unsigned long)last_page) & clflush_mask)) clflush(last_page); last_page = page; } if ( last_page ) clflush(last_page); agp_bridge->driver->tlb_flush(mem); return 0; } static int efficeon_remove_memory(struct agp_memory * mem, off_t pg_start, int type) { int i, count = mem->page_count, num_entries; printk(KERN_DEBUG PFX "efficeon_remove_memory(%lx, %d)\n", pg_start, count); num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; if ((pg_start + mem->page_count) > num_entries) return -EINVAL; if (type != 0 || mem->type != 0) return -EINVAL; for (i = 0; i < count; i++) { int index = pg_start + i; unsigned int *page = (unsigned int *) efficeon_private.l1_table[index >> 10]; if (!page) continue; page += (index & 0x3ff); *page = 0; } agp_bridge->driver->tlb_flush(mem); return 0; } static const struct agp_bridge_driver efficeon_driver = { .owner = THIS_MODULE, .aperture_sizes = efficeon_generic_sizes, .size_type = LVL2_APER_SIZE, .num_aperture_sizes = 4, .configure = efficeon_configure, .fetch_size = efficeon_fetch_size, .cleanup = efficeon_cleanup, .tlb_flush = efficeon_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = efficeon_generic_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, // Efficeon-specific GATT table setup / populate / teardown .create_gatt_table = efficeon_create_gatt_table, .free_gatt_table = efficeon_free_gatt_table, .insert_memory = efficeon_insert_memory, .remove_memory = efficeon_remove_memory, .cant_use_aperture = false, // true might be faster? // Generic .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static int agp_efficeon_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_bridge_data *bridge; u8 cap_ptr; struct resource *r; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return -ENODEV; /* Probe for Efficeon controller */ if (pdev->device != PCI_DEVICE_ID_EFFICEON) { printk(KERN_ERR PFX "Unsupported Efficeon chipset (device id: %04x)\n", pdev->device); return -ENODEV; } printk(KERN_INFO PFX "Detected Transmeta Efficeon TM8000 series chipset\n"); bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; bridge->driver = &efficeon_driver; bridge->dev = pdev; bridge->capndx = cap_ptr; /* * If the device has not been properly setup, the following will catch * the problem and should stop the system from crashing. * 20030610 - [email protected] */ if (pci_enable_device(pdev)) { printk(KERN_ERR PFX "Unable to Enable PCI device\n"); agp_put_bridge(bridge); return -ENODEV; } /* * The following fixes the case where the BIOS has "forgotten" to * provide an address range for the GART. * 20030610 - [email protected] */ r = &pdev->resource[0]; if (!r->start && r->end) { if (pci_assign_resource(pdev, 0)) { printk(KERN_ERR PFX "could not assign resource 0\n"); agp_put_bridge(bridge); return -ENODEV; } } /* Fill in the mode register */ if (cap_ptr) { pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); } pci_set_drvdata(pdev, bridge); return agp_add_bridge(bridge); } static void agp_efficeon_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); agp_remove_bridge(bridge); agp_put_bridge(bridge); } static int agp_efficeon_resume(struct device *dev) { printk(KERN_DEBUG PFX "agp_efficeon_resume()\n"); return efficeon_configure(); } static const struct pci_device_id agp_efficeon_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_TRANSMETA, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } }; static DEFINE_SIMPLE_DEV_PM_OPS(agp_efficeon_pm_ops, NULL, agp_efficeon_resume); MODULE_DEVICE_TABLE(pci, agp_efficeon_pci_table); static struct pci_driver agp_efficeon_pci_driver = { .name = "agpgart-efficeon", .id_table = agp_efficeon_pci_table, .probe = agp_efficeon_probe, .remove = agp_efficeon_remove, .driver.pm = &agp_efficeon_pm_ops, }; static int __init agp_efficeon_init(void) { static int agp_initialised=0; if (agp_off) return -EINVAL; if (agp_initialised == 1) return 0; agp_initialised=1; return pci_register_driver(&agp_efficeon_pci_driver); } static void __exit agp_efficeon_cleanup(void) { pci_unregister_driver(&agp_efficeon_pci_driver); } module_init(agp_efficeon_init); module_exit(agp_efficeon_cleanup); MODULE_AUTHOR("Carlos Puchol <[email protected]>"); MODULE_LICENSE("GPL and additional rights");
linux-master
drivers/char/agp/efficeon-agp.c
/* * AGPGART driver backend routines. * Copyright (C) 2004 Silicon Graphics, Inc. * Copyright (C) 2002-2003 Dave Jones. * Copyright (C) 1999 Jeff Hartmann. * Copyright (C) 1999 Precision Insight, Inc. * Copyright (C) 1999 Xi Graphics, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * JEFF HARTMANN, DAVE JONES, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * TODO: * - Allocate more than order 0 pages to avoid too much linear map splitting. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/miscdevice.h> #include <linux/pm.h> #include <linux/agp_backend.h> #include <linux/agpgart.h> #include <linux/vmalloc.h> #include <asm/io.h> #include "agp.h" /* Due to XFree86 brain-damage, we can't go to 1.0 until they * fix some real stupidity. It's only by chance we can bump * past 0.99 at all due to some boolean logic error. */ #define AGPGART_VERSION_MAJOR 0 #define AGPGART_VERSION_MINOR 103 static const struct agp_version agp_current_version = { .major = AGPGART_VERSION_MAJOR, .minor = AGPGART_VERSION_MINOR, }; struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *) = &agp_generic_find_bridge; struct agp_bridge_data *agp_bridge; LIST_HEAD(agp_bridges); EXPORT_SYMBOL(agp_bridge); EXPORT_SYMBOL(agp_bridges); EXPORT_SYMBOL(agp_find_bridge); /** * agp_backend_acquire - attempt to acquire an agp backend. * @pdev: the PCI device * */ struct agp_bridge_data *agp_backend_acquire(struct pci_dev *pdev) { struct agp_bridge_data *bridge; bridge = agp_find_bridge(pdev); if (!bridge) return NULL; if (atomic_read(&bridge->agp_in_use)) return NULL; atomic_inc(&bridge->agp_in_use); return bridge; } EXPORT_SYMBOL(agp_backend_acquire); /** * agp_backend_release - release the lock on the agp backend. * @bridge: the AGP backend to release * * The caller must insure that the graphics aperture translation table * is read for use by another entity. * * (Ensure that all memory it bound is unbound.) */ void agp_backend_release(struct agp_bridge_data *bridge) { if (bridge) atomic_dec(&bridge->agp_in_use); } EXPORT_SYMBOL(agp_backend_release); static const struct { int mem, agp; } maxes_table[] = { {0, 0}, {32, 4}, {64, 28}, {128, 96}, {256, 204}, {512, 440}, {1024, 942}, {2048, 1920}, {4096, 3932} }; static int agp_find_max(void) { long memory, index, result; #if PAGE_SHIFT < 20 memory = totalram_pages() >> (20 - PAGE_SHIFT); #else memory = totalram_pages() << (PAGE_SHIFT - 20); #endif index = 1; while ((memory > maxes_table[index].mem) && (index < 8)) index++; result = maxes_table[index - 1].agp + ( (memory - maxes_table[index - 1].mem) * (maxes_table[index].agp - maxes_table[index - 1].agp)) / (maxes_table[index].mem - maxes_table[index - 1].mem); result = result << (20 - PAGE_SHIFT); return result; } static int agp_backend_initialize(struct agp_bridge_data *bridge) { int size_value, rc, got_gatt=0, got_keylist=0; bridge->max_memory_agp = agp_find_max(); bridge->version = &agp_current_version; if (bridge->driver->needs_scratch_page) { struct page *page = bridge->driver->agp_alloc_page(bridge); if (!page) { dev_err(&bridge->dev->dev, "can't get memory for scratch page\n"); return -ENOMEM; } bridge->scratch_page_page = page; bridge->scratch_page_dma = page_to_phys(page); bridge->scratch_page = bridge->driver->mask_memory(bridge, bridge->scratch_page_dma, 0); } size_value = bridge->driver->fetch_size(); if (size_value == 0) { dev_err(&bridge->dev->dev, "can't determine aperture size\n"); rc = -EINVAL; goto err_out; } if (bridge->driver->create_gatt_table(bridge)) { dev_err(&bridge->dev->dev, "can't get memory for graphics translation table\n"); rc = -ENOMEM; goto err_out; } got_gatt = 1; bridge->key_list = vzalloc(PAGE_SIZE * 4); if (bridge->key_list == NULL) { dev_err(&bridge->dev->dev, "can't allocate memory for key lists\n"); rc = -ENOMEM; goto err_out; } got_keylist = 1; /* FIXME vmalloc'd memory not guaranteed contiguous */ if (bridge->driver->configure()) { dev_err(&bridge->dev->dev, "error configuring host chipset\n"); rc = -EINVAL; goto err_out; } INIT_LIST_HEAD(&bridge->mapped_list); spin_lock_init(&bridge->mapped_lock); return 0; err_out: if (bridge->driver->needs_scratch_page) { struct page *page = bridge->scratch_page_page; bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP); bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE); } if (got_gatt) bridge->driver->free_gatt_table(bridge); if (got_keylist) { vfree(bridge->key_list); bridge->key_list = NULL; } return rc; } /* cannot be __exit b/c as it could be called from __init code */ static void agp_backend_cleanup(struct agp_bridge_data *bridge) { if (bridge->driver->cleanup) bridge->driver->cleanup(); if (bridge->driver->free_gatt_table) bridge->driver->free_gatt_table(bridge); vfree(bridge->key_list); bridge->key_list = NULL; if (bridge->driver->agp_destroy_page && bridge->driver->needs_scratch_page) { struct page *page = bridge->scratch_page_page; bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP); bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE); } } /* When we remove the global variable agp_bridge from all drivers * then agp_alloc_bridge and agp_generic_find_bridge need to be updated */ struct agp_bridge_data *agp_alloc_bridge(void) { struct agp_bridge_data *bridge; bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); if (!bridge) return NULL; atomic_set(&bridge->agp_in_use, 0); atomic_set(&bridge->current_memory_agp, 0); if (list_empty(&agp_bridges)) agp_bridge = bridge; return bridge; } EXPORT_SYMBOL(agp_alloc_bridge); void agp_put_bridge(struct agp_bridge_data *bridge) { kfree(bridge); if (list_empty(&agp_bridges)) agp_bridge = NULL; } EXPORT_SYMBOL(agp_put_bridge); int agp_add_bridge(struct agp_bridge_data *bridge) { int error; if (agp_off) { error = -ENODEV; goto err_put_bridge; } if (!bridge->dev) { printk (KERN_DEBUG PFX "Erk, registering with no pci_dev!\n"); error = -EINVAL; goto err_put_bridge; } /* Grab reference on the chipset driver. */ if (!try_module_get(bridge->driver->owner)) { dev_info(&bridge->dev->dev, "can't lock chipset driver\n"); error = -EINVAL; goto err_put_bridge; } error = agp_backend_initialize(bridge); if (error) { dev_info(&bridge->dev->dev, "agp_backend_initialize() failed\n"); goto err_out; } if (list_empty(&agp_bridges)) { error = agp_frontend_initialize(); if (error) { dev_info(&bridge->dev->dev, "agp_frontend_initialize() failed\n"); goto frontend_err; } dev_info(&bridge->dev->dev, "AGP aperture is %dM @ 0x%lx\n", bridge->driver->fetch_size(), bridge->gart_bus_addr); } list_add(&bridge->list, &agp_bridges); return 0; frontend_err: agp_backend_cleanup(bridge); err_out: module_put(bridge->driver->owner); err_put_bridge: agp_put_bridge(bridge); return error; } EXPORT_SYMBOL_GPL(agp_add_bridge); void agp_remove_bridge(struct agp_bridge_data *bridge) { agp_backend_cleanup(bridge); list_del(&bridge->list); if (list_empty(&agp_bridges)) agp_frontend_cleanup(); module_put(bridge->driver->owner); } EXPORT_SYMBOL_GPL(agp_remove_bridge); int agp_off; int agp_try_unsupported_boot; EXPORT_SYMBOL(agp_off); EXPORT_SYMBOL(agp_try_unsupported_boot); static int __init agp_init(void) { if (!agp_off) printk(KERN_INFO "Linux agpgart interface v%d.%d\n", AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR); return 0; } static void __exit agp_exit(void) { } #ifndef MODULE static __init int agp_setup(char *s) { if (!strcmp(s,"off")) agp_off = 1; if (!strcmp(s,"try_unsupported")) agp_try_unsupported_boot = 1; return 1; } __setup("agp=", agp_setup); #endif MODULE_AUTHOR("Dave Jones, Jeff Hartmann"); MODULE_DESCRIPTION("AGP GART driver"); MODULE_LICENSE("GPL and additional rights"); MODULE_ALIAS_MISCDEV(AGPGART_MINOR); module_init(agp_init); module_exit(agp_exit);
linux-master
drivers/char/agp/backend.c
/* * For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of * the "Intel 460GTX Chipset Software Developer's Manual": * http://www.intel.com/design/archives/itanium/downloads/248704.htm */ /* * 460GX support by Chris Ahna <[email protected]> * Clean up & simplification by David Mosberger-Tang <[email protected]> */ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/agp_backend.h> #include <linux/log2.h> #include "agp.h" #define INTEL_I460_BAPBASE 0x98 #define INTEL_I460_GXBCTL 0xa0 #define INTEL_I460_AGPSIZ 0xa2 #define INTEL_I460_ATTBASE 0xfe200000 #define INTEL_I460_GATT_VALID (1UL << 24) #define INTEL_I460_GATT_COHERENT (1UL << 25) /* * The i460 can operate with large (4MB) pages, but there is no sane way to support this * within the current kernel/DRM environment, so we disable the relevant code for now. * See also comments in ia64_alloc_page()... */ #define I460_LARGE_IO_PAGES 0 #if I460_LARGE_IO_PAGES # define I460_IO_PAGE_SHIFT i460.io_page_shift #else # define I460_IO_PAGE_SHIFT 12 #endif #define I460_IOPAGES_PER_KPAGE (PAGE_SIZE >> I460_IO_PAGE_SHIFT) #define I460_KPAGES_PER_IOPAGE (1 << (I460_IO_PAGE_SHIFT - PAGE_SHIFT)) #define I460_SRAM_IO_DISABLE (1 << 4) #define I460_BAPBASE_ENABLE (1 << 3) #define I460_AGPSIZ_MASK 0x7 #define I460_4M_PS (1 << 1) /* Control bits for Out-Of-GART coherency and Burst Write Combining */ #define I460_GXBCTL_OOG (1UL << 0) #define I460_GXBCTL_BWC (1UL << 2) /* * gatt_table entries are 32-bits wide on the i460; the generic code ought to declare the * gatt_table and gatt_table_real pointers a "void *"... */ #define RD_GATT(index) readl((u32 *) i460.gatt + (index)) #define WR_GATT(index, val) writel((val), (u32 *) i460.gatt + (index)) /* * The 460 spec says we have to read the last location written to make sure that all * writes have taken effect */ #define WR_FLUSH_GATT(index) RD_GATT(index) static unsigned long i460_mask_memory (struct agp_bridge_data *bridge, dma_addr_t addr, int type); static struct { void *gatt; /* ioremap'd GATT area */ /* i460 supports multiple GART page sizes, so GART pageshift is dynamic: */ u8 io_page_shift; /* BIOS configures chipset to one of 2 possible apbase values: */ u8 dynamic_apbase; /* structure for tracking partial use of 4MB GART pages: */ struct lp_desc { unsigned long *alloced_map; /* bitmap of kernel-pages in use */ int refcount; /* number of kernel pages using the large page */ u64 paddr; /* physical address of large page */ struct page *page; /* page pointer */ } *lp_desc; } i460; static const struct aper_size_info_8 i460_sizes[3] = { /* * The 32GB aperture is only available with a 4M GART page size. Due to the * dynamic GART page size, we can't figure out page_order or num_entries until * runtime. */ {32768, 0, 0, 4}, {1024, 0, 0, 2}, {256, 0, 0, 1} }; static struct gatt_mask i460_masks[] = { { .mask = INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT, .type = 0 } }; static int i460_fetch_size (void) { int i; u8 temp; struct aper_size_info_8 *values; /* Determine the GART page size */ pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &temp); i460.io_page_shift = (temp & I460_4M_PS) ? 22 : 12; pr_debug("i460_fetch_size: io_page_shift=%d\n", i460.io_page_shift); if (i460.io_page_shift != I460_IO_PAGE_SHIFT) { printk(KERN_ERR PFX "I/O (GART) page-size %luKB doesn't match expected " "size %luKB\n", 1UL << (i460.io_page_shift - 10), 1UL << (I460_IO_PAGE_SHIFT)); return 0; } values = A_SIZE_8(agp_bridge->driver->aperture_sizes); pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp); /* Exit now if the IO drivers for the GART SRAMS are turned off */ if (temp & I460_SRAM_IO_DISABLE) { printk(KERN_ERR PFX "GART SRAMS disabled on 460GX chipset\n"); printk(KERN_ERR PFX "AGPGART operation not possible\n"); return 0; } /* Make sure we don't try to create an 2 ^ 23 entry GATT */ if ((i460.io_page_shift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) { printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n"); return 0; } /* Determine the proper APBASE register */ if (temp & I460_BAPBASE_ENABLE) i460.dynamic_apbase = INTEL_I460_BAPBASE; else i460.dynamic_apbase = AGP_APBASE; for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { /* * Dynamically calculate the proper num_entries and page_order values for * the define aperture sizes. Take care not to shift off the end of * values[i].size. */ values[i].num_entries = (values[i].size << 8) >> (I460_IO_PAGE_SHIFT - 12); values[i].page_order = ilog2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT); } for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { /* Neglect control bits when matching up size_value */ if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } /* There isn't anything to do here since 460 has no GART TLB. */ static void i460_tlb_flush (struct agp_memory *mem) { return; } /* * This utility function is needed to prevent corruption of the control bits * which are stored along with the aperture size in 460's AGPSIZ register */ static void i460_write_agpsiz (u8 size_value) { u8 temp; pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp); pci_write_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, ((temp & ~I460_AGPSIZ_MASK) | size_value)); } static void i460_cleanup (void) { struct aper_size_info_8 *previous_size; previous_size = A_SIZE_8(agp_bridge->previous_size); i460_write_agpsiz(previous_size->size_value); if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) kfree(i460.lp_desc); } static int i460_configure (void) { union { u32 small[2]; u64 large; } temp; size_t size; u8 scratch; struct aper_size_info_8 *current_size; temp.large = 0; current_size = A_SIZE_8(agp_bridge->current_size); i460_write_agpsiz(current_size->size_value); /* * Do the necessary rigmarole to read all eight bytes of APBASE. * This has to be done since the AGP aperture can be above 4GB on * 460 based systems. */ pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase, &(temp.small[0])); pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase + 4, &(temp.small[1])); /* Clear BAR control bits */ agp_bridge->gart_bus_addr = temp.large & ~((1UL << 3) - 1); pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &scratch); pci_write_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC); /* * Initialize partial allocation trackers if a GART page is bigger than a kernel * page. */ if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) { size = current_size->num_entries * sizeof(i460.lp_desc[0]); i460.lp_desc = kzalloc(size, GFP_KERNEL); if (!i460.lp_desc) return -ENOMEM; } return 0; } static int i460_create_gatt_table (struct agp_bridge_data *bridge) { int page_order, num_entries, i; void *temp; /* * Load up the fixed address of the GART SRAMS which hold our GATT table. */ temp = agp_bridge->current_size; page_order = A_SIZE_8(temp)->page_order; num_entries = A_SIZE_8(temp)->num_entries; i460.gatt = ioremap(INTEL_I460_ATTBASE, PAGE_SIZE << page_order); if (!i460.gatt) { printk(KERN_ERR PFX "ioremap failed\n"); return -ENOMEM; } /* These are no good, the should be removed from the agp_bridge strucure... */ agp_bridge->gatt_table_real = NULL; agp_bridge->gatt_table = NULL; agp_bridge->gatt_bus_addr = 0; for (i = 0; i < num_entries; ++i) WR_GATT(i, 0); WR_FLUSH_GATT(i - 1); return 0; } static int i460_free_gatt_table (struct agp_bridge_data *bridge) { int num_entries, i; void *temp; temp = agp_bridge->current_size; num_entries = A_SIZE_8(temp)->num_entries; for (i = 0; i < num_entries; ++i) WR_GATT(i, 0); WR_FLUSH_GATT(num_entries - 1); iounmap(i460.gatt); return 0; } /* * The following functions are called when the I/O (GART) page size is smaller than * PAGE_SIZE. */ static int i460_insert_memory_small_io_page (struct agp_memory *mem, off_t pg_start, int type) { unsigned long paddr, io_pg_start, io_page_size; int i, j, k, num_entries; void *temp; pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n", mem, pg_start, type, page_to_phys(mem->pages[0])); if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES) return -EINVAL; io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start; temp = agp_bridge->current_size; num_entries = A_SIZE_8(temp)->num_entries; if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) { printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); return -EINVAL; } j = io_pg_start; while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) { if (!PGE_EMPTY(agp_bridge, RD_GATT(j))) { pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n", j, RD_GATT(j)); return -EBUSY; } j++; } io_page_size = 1UL << I460_IO_PAGE_SHIFT; for (i = 0, j = io_pg_start; i < mem->page_count; i++) { paddr = page_to_phys(mem->pages[i]); for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size) WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type)); } WR_FLUSH_GATT(j - 1); return 0; } static int i460_remove_memory_small_io_page(struct agp_memory *mem, off_t pg_start, int type) { int i; pr_debug("i460_remove_memory_small_io_page(mem=%p, pg_start=%ld, type=%d)\n", mem, pg_start, type); pg_start = I460_IOPAGES_PER_KPAGE * pg_start; for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++) WR_GATT(i, 0); WR_FLUSH_GATT(i - 1); return 0; } #if I460_LARGE_IO_PAGES /* * These functions are called when the I/O (GART) page size exceeds PAGE_SIZE. * * This situation is interesting since AGP memory allocations that are smaller than a * single GART page are possible. The i460.lp_desc array tracks partial allocation of the * large GART pages to work around this issue. * * i460.lp_desc[pg_num].refcount tracks the number of kernel pages in use within GART page * pg_num. i460.lp_desc[pg_num].paddr is the physical address of the large page and * i460.lp_desc[pg_num].alloced_map is a bitmap of kernel pages that are in use (allocated). */ static int i460_alloc_large_page (struct lp_desc *lp) { unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT; size_t map_size; lp->page = alloc_pages(GFP_KERNEL, order); if (!lp->page) { printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n"); return -ENOMEM; } map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8; lp->alloced_map = kzalloc(map_size, GFP_KERNEL); if (!lp->alloced_map) { __free_pages(lp->page, order); printk(KERN_ERR PFX "Out of memory, we're in trouble...\n"); return -ENOMEM; } lp->paddr = page_to_phys(lp->page); lp->refcount = 0; atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); return 0; } static void i460_free_large_page (struct lp_desc *lp) { kfree(lp->alloced_map); lp->alloced_map = NULL; __free_pages(lp->page, I460_IO_PAGE_SHIFT - PAGE_SHIFT); atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); } static int i460_insert_memory_large_io_page (struct agp_memory *mem, off_t pg_start, int type) { int i, start_offset, end_offset, idx, pg, num_entries; struct lp_desc *start, *end, *lp; void *temp; if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES) return -EINVAL; temp = agp_bridge->current_size; num_entries = A_SIZE_8(temp)->num_entries; /* Figure out what pg_start means in terms of our large GART pages */ start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE]; end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; start_offset = pg_start % I460_KPAGES_PER_IOPAGE; end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; if (end > i460.lp_desc + num_entries) { printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); return -EINVAL; } /* Check if the requested region of the aperture is free */ for (lp = start; lp <= end; ++lp) { if (!lp->alloced_map) continue; /* OK, the entire large page is available... */ for (idx = ((lp == start) ? start_offset : 0); idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); idx++) { if (test_bit(idx, lp->alloced_map)) return -EBUSY; } } for (lp = start, i = 0; lp <= end; ++lp) { if (!lp->alloced_map) { /* Allocate new GART pages... */ if (i460_alloc_large_page(lp) < 0) return -ENOMEM; pg = lp - i460.lp_desc; WR_GATT(pg, i460_mask_memory(agp_bridge, lp->paddr, 0)); WR_FLUSH_GATT(pg); } for (idx = ((lp == start) ? start_offset : 0); idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); idx++, i++) { mem->pages[i] = lp->page; __set_bit(idx, lp->alloced_map); ++lp->refcount; } } return 0; } static int i460_remove_memory_large_io_page (struct agp_memory *mem, off_t pg_start, int type) { int i, pg, start_offset, end_offset, idx, num_entries; struct lp_desc *start, *end, *lp; void *temp; temp = agp_bridge->current_size; num_entries = A_SIZE_8(temp)->num_entries; /* Figure out what pg_start means in terms of our large GART pages */ start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE]; end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; start_offset = pg_start % I460_KPAGES_PER_IOPAGE; end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; for (i = 0, lp = start; lp <= end; ++lp) { for (idx = ((lp == start) ? start_offset : 0); idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); idx++, i++) { mem->pages[i] = NULL; __clear_bit(idx, lp->alloced_map); --lp->refcount; } /* Free GART pages if they are unused */ if (lp->refcount == 0) { pg = lp - i460.lp_desc; WR_GATT(pg, 0); WR_FLUSH_GATT(pg); i460_free_large_page(lp); } } return 0; } /* Wrapper routines to call the approriate {small_io_page,large_io_page} function */ static int i460_insert_memory (struct agp_memory *mem, off_t pg_start, int type) { if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) return i460_insert_memory_small_io_page(mem, pg_start, type); else return i460_insert_memory_large_io_page(mem, pg_start, type); } static int i460_remove_memory (struct agp_memory *mem, off_t pg_start, int type) { if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) return i460_remove_memory_small_io_page(mem, pg_start, type); else return i460_remove_memory_large_io_page(mem, pg_start, type); } /* * If the I/O (GART) page size is bigger than the kernel page size, we don't want to * allocate memory until we know where it is to be bound in the aperture (a * multi-kernel-page alloc might fit inside of an already allocated GART page). * * Let's just hope nobody counts on the allocated AGP memory being there before bind time * (I don't think current drivers do)... */ static struct page *i460_alloc_page (struct agp_bridge_data *bridge) { void *page; if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { page = agp_generic_alloc_page(agp_bridge); } else /* Returning NULL would cause problems */ /* AK: really dubious code. */ page = (void *)~0UL; return page; } static void i460_destroy_page (struct page *page, int flags) { if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { agp_generic_destroy_page(page, flags); } } #endif /* I460_LARGE_IO_PAGES */ static unsigned long i460_mask_memory (struct agp_bridge_data *bridge, dma_addr_t addr, int type) { /* Make sure the returned address is a valid GATT entry */ return bridge->driver->masks[0].mask | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12); } const struct agp_bridge_driver intel_i460_driver = { .owner = THIS_MODULE, .aperture_sizes = i460_sizes, .size_type = U8_APER_SIZE, .num_aperture_sizes = 3, .configure = i460_configure, .fetch_size = i460_fetch_size, .cleanup = i460_cleanup, .tlb_flush = i460_tlb_flush, .mask_memory = i460_mask_memory, .masks = i460_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = i460_create_gatt_table, .free_gatt_table = i460_free_gatt_table, #if I460_LARGE_IO_PAGES .insert_memory = i460_insert_memory, .remove_memory = i460_remove_memory, .agp_alloc_page = i460_alloc_page, .agp_destroy_page = i460_destroy_page, #else .insert_memory = i460_insert_memory_small_io_page, .remove_memory = i460_remove_memory_small_io_page, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, #endif .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_type_to_mask_type = agp_generic_type_to_mask_type, .cant_use_aperture = true, }; static int agp_intel_i460_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_bridge_data *bridge; u8 cap_ptr; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return -ENODEV; bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; bridge->driver = &intel_i460_driver; bridge->dev = pdev; bridge->capndx = cap_ptr; printk(KERN_INFO PFX "Detected Intel 460GX chipset\n"); pci_set_drvdata(pdev, bridge); return agp_add_bridge(bridge); } static void agp_intel_i460_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); agp_remove_bridge(bridge); agp_put_bridge(bridge); } static struct pci_device_id agp_intel_i460_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_84460GX, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE(pci, agp_intel_i460_pci_table); static struct pci_driver agp_intel_i460_pci_driver = { .name = "agpgart-intel-i460", .id_table = agp_intel_i460_pci_table, .probe = agp_intel_i460_probe, .remove = agp_intel_i460_remove, }; static int __init agp_intel_i460_init(void) { if (agp_off) return -EINVAL; return pci_register_driver(&agp_intel_i460_pci_driver); } static void __exit agp_intel_i460_cleanup(void) { pci_unregister_driver(&agp_intel_i460_pci_driver); } module_init(agp_intel_i460_init); module_exit(agp_intel_i460_cleanup); MODULE_AUTHOR("Chris Ahna <[email protected]>"); MODULE_LICENSE("GPL and additional rights");
linux-master
drivers/char/agp/i460-agp.c
/* * Intel GTT (Graphics Translation Table) routines * * Caveat: This driver implements the linux agp interface, but this is far from * a agp driver! GTT support ended up here for purely historical reasons: The * old userspace intel graphics drivers needed an interface to map memory into * the GTT. And the drm provides a default interface for graphic devices sitting * on an agp port. So it made sense to fake the GTT support as an agp port to * avoid having to create a new api. * * With gem this does not make much sense anymore, just needlessly complicates * the code. But as long as the old graphics stack is still support, it's stuck * here. * * /fairy-tale-mode off */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/pagemap.h> #include <linux/agp_backend.h> #include <linux/iommu.h> #include <linux/delay.h> #include <asm/smp.h> #include "agp.h" #include "intel-agp.h" #include <drm/intel-gtt.h> #include <asm/set_memory.h> /* * If we have Intel graphics, we're not going to have anything other than * an Intel IOMMU. So make the correct use of the PCI DMA API contingent * on the Intel IOMMU support (CONFIG_INTEL_IOMMU). * Only newer chipsets need to bother with this, of course. */ #ifdef CONFIG_INTEL_IOMMU #define USE_PCI_DMA_API 1 #else #define USE_PCI_DMA_API 0 #endif struct intel_gtt_driver { unsigned int gen : 8; unsigned int is_g33 : 1; unsigned int is_pineview : 1; unsigned int is_ironlake : 1; unsigned int has_pgtbl_enable : 1; unsigned int dma_mask_size : 8; /* Chipset specific GTT setup */ int (*setup)(void); /* This should undo anything done in ->setup() save the unmapping * of the mmio register file, that's done in the generic code. */ void (*cleanup)(void); void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags); /* Flags is a more or less chipset specific opaque value. * For chipsets that need to support old ums (non-gem) code, this * needs to be identical to the various supported agp memory types! */ bool (*check_flags)(unsigned int flags); void (*chipset_flush)(void); }; static struct _intel_private { const struct intel_gtt_driver *driver; struct pci_dev *pcidev; /* device one */ struct pci_dev *bridge_dev; u8 __iomem *registers; phys_addr_t gtt_phys_addr; u32 PGETBL_save; u32 __iomem *gtt; /* I915G */ bool clear_fake_agp; /* on first access via agp, fill with scratch */ int num_dcache_entries; void __iomem *i9xx_flush_page; char *i81x_gtt_table; struct resource ifp_resource; int resource_valid; struct page *scratch_page; phys_addr_t scratch_page_dma; int refcount; /* Whether i915 needs to use the dmar apis or not. */ unsigned int needs_dmar : 1; phys_addr_t gma_bus_addr; /* Size of memory reserved for graphics by the BIOS */ resource_size_t stolen_size; /* Total number of gtt entries. */ unsigned int gtt_total_entries; /* Part of the gtt that is mappable by the cpu, for those chips where * this is not the full gtt. */ unsigned int gtt_mappable_entries; } intel_private; #define INTEL_GTT_GEN intel_private.driver->gen #define IS_G33 intel_private.driver->is_g33 #define IS_PINEVIEW intel_private.driver->is_pineview #define IS_IRONLAKE intel_private.driver->is_ironlake #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable #if IS_ENABLED(CONFIG_AGP_INTEL) static int intel_gtt_map_memory(struct page **pages, unsigned int num_entries, struct sg_table *st) { struct scatterlist *sg; int i; DBG("try mapping %lu pages\n", (unsigned long)num_entries); if (sg_alloc_table(st, num_entries, GFP_KERNEL)) goto err; for_each_sg(st->sgl, sg, num_entries, i) sg_set_page(sg, pages[i], PAGE_SIZE, 0); if (!dma_map_sg(&intel_private.pcidev->dev, st->sgl, st->nents, DMA_BIDIRECTIONAL)) goto err; return 0; err: sg_free_table(st); return -ENOMEM; } static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg) { struct sg_table st; DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); dma_unmap_sg(&intel_private.pcidev->dev, sg_list, num_sg, DMA_BIDIRECTIONAL); st.sgl = sg_list; st.orig_nents = st.nents = num_sg; sg_free_table(&st); } static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) { return; } /* Exists to support ARGB cursors */ static struct page *i8xx_alloc_pages(void) { struct page *page; page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2); if (page == NULL) return NULL; if (set_pages_uc(page, 4) < 0) { set_pages_wb(page, 4); __free_pages(page, 2); return NULL; } atomic_inc(&agp_bridge->current_memory_agp); return page; } static void i8xx_destroy_pages(struct page *page) { if (page == NULL) return; set_pages_wb(page, 4); __free_pages(page, 2); atomic_dec(&agp_bridge->current_memory_agp); } #endif #define I810_GTT_ORDER 4 static int i810_setup(void) { phys_addr_t reg_addr; char *gtt_table; /* i81x does not preallocate the gtt. It's always 64kb in size. */ gtt_table = alloc_gatt_pages(I810_GTT_ORDER); if (gtt_table == NULL) return -ENOMEM; intel_private.i81x_gtt_table = gtt_table; reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR); intel_private.registers = ioremap(reg_addr, KB(64)); if (!intel_private.registers) return -ENOMEM; writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE; if ((readl(intel_private.registers+I810_DRAM_CTL) & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { dev_info(&intel_private.pcidev->dev, "detected 4MB dedicated video ram\n"); intel_private.num_dcache_entries = 1024; } return 0; } static void i810_cleanup(void) { writel(0, intel_private.registers+I810_PGETBL_CTL); free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER); } #if IS_ENABLED(CONFIG_AGP_INTEL) static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start, int type) { int i; if ((pg_start + mem->page_count) > intel_private.num_dcache_entries) return -EINVAL; if (!mem->is_flushed) global_cache_flush(); for (i = pg_start; i < (pg_start + mem->page_count); i++) { dma_addr_t addr = i << PAGE_SHIFT; intel_private.driver->write_entry(addr, i, type); } wmb(); return 0; } /* * The i810/i830 requires a physical address to program its mouse * pointer into hardware. * However the Xserver still writes to it through the agp aperture. */ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) { struct agp_memory *new; struct page *page; switch (pg_count) { case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge); break; case 4: /* kludge to get 4 physical pages for ARGB cursor */ page = i8xx_alloc_pages(); break; default: return NULL; } if (page == NULL) return NULL; new = agp_create_memory(pg_count); if (new == NULL) return NULL; new->pages[0] = page; if (pg_count == 4) { /* kludge to get 4 physical pages for ARGB cursor */ new->pages[1] = new->pages[0] + 1; new->pages[2] = new->pages[1] + 1; new->pages[3] = new->pages[2] + 1; } new->page_count = pg_count; new->num_scratch_pages = pg_count; new->type = AGP_PHYS_MEMORY; new->physical = page_to_phys(new->pages[0]); return new; } static void intel_i810_free_by_type(struct agp_memory *curr) { agp_free_key(curr->key); if (curr->type == AGP_PHYS_MEMORY) { if (curr->page_count == 4) i8xx_destroy_pages(curr->pages[0]); else { agp_bridge->driver->agp_destroy_page(curr->pages[0], AGP_PAGE_DESTROY_UNMAP); agp_bridge->driver->agp_destroy_page(curr->pages[0], AGP_PAGE_DESTROY_FREE); } agp_free_page_array(curr); } kfree(curr); } #endif static int intel_gtt_setup_scratch_page(void) { struct page *page; dma_addr_t dma_addr; page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); if (page == NULL) return -ENOMEM; set_pages_uc(page, 1); if (intel_private.needs_dmar) { dma_addr = dma_map_page(&intel_private.pcidev->dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(&intel_private.pcidev->dev, dma_addr)) { __free_page(page); return -EINVAL; } intel_private.scratch_page_dma = dma_addr; } else intel_private.scratch_page_dma = page_to_phys(page); intel_private.scratch_page = page; return 0; } static void i810_write_entry(dma_addr_t addr, unsigned int entry, unsigned int flags) { u32 pte_flags = I810_PTE_VALID; switch (flags) { case AGP_DCACHE_MEMORY: pte_flags |= I810_PTE_LOCAL; break; case AGP_USER_CACHED_MEMORY: pte_flags |= I830_PTE_SYSTEM_CACHED; break; } writel_relaxed(addr | pte_flags, intel_private.gtt + entry); } static resource_size_t intel_gtt_stolen_size(void) { u16 gmch_ctrl; u8 rdct; int local = 0; static const int ddt[4] = { 0, 16, 32, 64 }; resource_size_t stolen_size = 0; if (INTEL_GTT_GEN == 1) return 0; /* no stolen mem on i81x */ pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB || intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { switch (gmch_ctrl & I830_GMCH_GMS_MASK) { case I830_GMCH_GMS_STOLEN_512: stolen_size = KB(512); break; case I830_GMCH_GMS_STOLEN_1024: stolen_size = MB(1); break; case I830_GMCH_GMS_STOLEN_8192: stolen_size = MB(8); break; case I830_GMCH_GMS_LOCAL: rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); stolen_size = (I830_RDRAM_ND(rdct) + 1) * MB(ddt[I830_RDRAM_DDT(rdct)]); local = 1; break; default: stolen_size = 0; break; } } else { switch (gmch_ctrl & I855_GMCH_GMS_MASK) { case I855_GMCH_GMS_STOLEN_1M: stolen_size = MB(1); break; case I855_GMCH_GMS_STOLEN_4M: stolen_size = MB(4); break; case I855_GMCH_GMS_STOLEN_8M: stolen_size = MB(8); break; case I855_GMCH_GMS_STOLEN_16M: stolen_size = MB(16); break; case I855_GMCH_GMS_STOLEN_32M: stolen_size = MB(32); break; case I915_GMCH_GMS_STOLEN_48M: stolen_size = MB(48); break; case I915_GMCH_GMS_STOLEN_64M: stolen_size = MB(64); break; case G33_GMCH_GMS_STOLEN_128M: stolen_size = MB(128); break; case G33_GMCH_GMS_STOLEN_256M: stolen_size = MB(256); break; case INTEL_GMCH_GMS_STOLEN_96M: stolen_size = MB(96); break; case INTEL_GMCH_GMS_STOLEN_160M: stolen_size = MB(160); break; case INTEL_GMCH_GMS_STOLEN_224M: stolen_size = MB(224); break; case INTEL_GMCH_GMS_STOLEN_352M: stolen_size = MB(352); break; default: stolen_size = 0; break; } } if (stolen_size > 0) { dev_info(&intel_private.bridge_dev->dev, "detected %lluK %s memory\n", (u64)stolen_size / KB(1), local ? "local" : "stolen"); } else { dev_info(&intel_private.bridge_dev->dev, "no pre-allocated video memory detected\n"); stolen_size = 0; } return stolen_size; } static void i965_adjust_pgetbl_size(unsigned int size_flag) { u32 pgetbl_ctl, pgetbl_ctl2; /* ensure that ppgtt is disabled */ pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2); pgetbl_ctl2 &= ~I810_PGETBL_ENABLED; writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2); /* write the new ggtt size */ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK; pgetbl_ctl |= size_flag; writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL); } static unsigned int i965_gtt_total_entries(void) { int size; u32 pgetbl_ctl; u16 gmch_ctl; pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctl); if (INTEL_GTT_GEN == 5) { switch (gmch_ctl & G4x_GMCH_SIZE_MASK) { case G4x_GMCH_SIZE_1M: case G4x_GMCH_SIZE_VT_1M: i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB); break; case G4x_GMCH_SIZE_VT_1_5M: i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB); break; case G4x_GMCH_SIZE_2M: case G4x_GMCH_SIZE_VT_2M: i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB); break; } } pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { case I965_PGETBL_SIZE_128KB: size = KB(128); break; case I965_PGETBL_SIZE_256KB: size = KB(256); break; case I965_PGETBL_SIZE_512KB: size = KB(512); break; /* GTT pagetable sizes bigger than 512KB are not possible on G33! */ case I965_PGETBL_SIZE_1MB: size = KB(1024); break; case I965_PGETBL_SIZE_2MB: size = KB(2048); break; case I965_PGETBL_SIZE_1_5MB: size = KB(1024 + 512); break; default: dev_info(&intel_private.pcidev->dev, "unknown page table size, assuming 512KB\n"); size = KB(512); } return size/4; } static unsigned int intel_gtt_total_entries(void) { if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) return i965_gtt_total_entries(); else { /* On previous hardware, the GTT size was just what was * required to map the aperture. */ return intel_private.gtt_mappable_entries; } } static unsigned int intel_gtt_mappable_entries(void) { unsigned int aperture_size; if (INTEL_GTT_GEN == 1) { u32 smram_miscc; pci_read_config_dword(intel_private.bridge_dev, I810_SMRAM_MISCC, &smram_miscc); if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) aperture_size = MB(32); else aperture_size = MB(64); } else if (INTEL_GTT_GEN == 2) { u16 gmch_ctrl; pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M) aperture_size = MB(64); else aperture_size = MB(128); } else { /* 9xx supports large sizes, just look at the length */ aperture_size = pci_resource_len(intel_private.pcidev, 2); } return aperture_size >> PAGE_SHIFT; } static void intel_gtt_teardown_scratch_page(void) { set_pages_wb(intel_private.scratch_page, 1); if (intel_private.needs_dmar) dma_unmap_page(&intel_private.pcidev->dev, intel_private.scratch_page_dma, PAGE_SIZE, DMA_BIDIRECTIONAL); __free_page(intel_private.scratch_page); } static void intel_gtt_cleanup(void) { intel_private.driver->cleanup(); iounmap(intel_private.gtt); iounmap(intel_private.registers); intel_gtt_teardown_scratch_page(); } /* Certain Gen5 chipsets require require idling the GPU before * unmapping anything from the GTT when VT-d is enabled. */ static inline int needs_ilk_vtd_wa(void) { const unsigned short gpu_devid = intel_private.pcidev->device; /* * Query iommu subsystem to see if we need the workaround. Presumably * that was loaded first. */ return ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG || gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && device_iommu_mapped(&intel_private.pcidev->dev)); } static bool intel_gtt_can_wc(void) { if (INTEL_GTT_GEN <= 2) return false; if (INTEL_GTT_GEN >= 6) return false; /* Reports of major corruption with ILK vt'd enabled */ if (needs_ilk_vtd_wa()) return false; return true; } static int intel_gtt_init(void) { u32 gtt_map_size; int ret, bar; ret = intel_private.driver->setup(); if (ret != 0) return ret; intel_private.gtt_mappable_entries = intel_gtt_mappable_entries(); intel_private.gtt_total_entries = intel_gtt_total_entries(); /* save the PGETBL reg for resume */ intel_private.PGETBL_save = readl(intel_private.registers+I810_PGETBL_CTL) & ~I810_PGETBL_ENABLED; /* we only ever restore the register when enabling the PGTBL... */ if (HAS_PGTBL_EN) intel_private.PGETBL_save |= I810_PGETBL_ENABLED; dev_info(&intel_private.bridge_dev->dev, "detected gtt size: %dK total, %dK mappable\n", intel_private.gtt_total_entries * 4, intel_private.gtt_mappable_entries * 4); gtt_map_size = intel_private.gtt_total_entries * 4; intel_private.gtt = NULL; if (intel_gtt_can_wc()) intel_private.gtt = ioremap_wc(intel_private.gtt_phys_addr, gtt_map_size); if (intel_private.gtt == NULL) intel_private.gtt = ioremap(intel_private.gtt_phys_addr, gtt_map_size); if (intel_private.gtt == NULL) { intel_private.driver->cleanup(); iounmap(intel_private.registers); return -ENOMEM; } #if IS_ENABLED(CONFIG_AGP_INTEL) global_cache_flush(); /* FIXME: ? */ #endif intel_private.stolen_size = intel_gtt_stolen_size(); intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; ret = intel_gtt_setup_scratch_page(); if (ret != 0) { intel_gtt_cleanup(); return ret; } if (INTEL_GTT_GEN <= 2) bar = I810_GMADR_BAR; else bar = I915_GMADR_BAR; intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar); return 0; } #if IS_ENABLED(CONFIG_AGP_INTEL) static const struct aper_size_info_fixed intel_fake_agp_sizes[] = { {32, 8192, 3}, {64, 16384, 4}, {128, 32768, 5}, {256, 65536, 6}, {512, 131072, 7}, }; static int intel_fake_agp_fetch_size(void) { int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes); unsigned int aper_size; int i; aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1); for (i = 0; i < num_sizes; i++) { if (aper_size == intel_fake_agp_sizes[i].size) { agp_bridge->current_size = (void *) (intel_fake_agp_sizes + i); return aper_size; } } return 0; } #endif static void i830_cleanup(void) { } /* The chipset_flush interface needs to get data that has already been * flushed out of the CPU all the way out to main memory, because the GPU * doesn't snoop those buffers. * * The 8xx series doesn't have the same lovely interface for flushing the * chipset write buffers that the later chips do. According to the 865 * specs, it's 64 octwords, or 1KB. So, to get those previous things in * that buffer out, we just fill 1KB and clflush it out, on the assumption * that it'll push whatever was in there out. It appears to work. */ static void i830_chipset_flush(void) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); /* Forcibly evict everything from the CPU write buffers. * clflush appears to be insufficient. */ wbinvd_on_all_cpus(); /* Now we've only seen documents for this magic bit on 855GM, * we hope it exists for the other gen2 chipsets... * * Also works as advertised on my 845G. */ writel(readl(intel_private.registers+I830_HIC) | (1<<31), intel_private.registers+I830_HIC); while (readl(intel_private.registers+I830_HIC) & (1<<31)) { if (time_after(jiffies, timeout)) break; udelay(50); } } static void i830_write_entry(dma_addr_t addr, unsigned int entry, unsigned int flags) { u32 pte_flags = I810_PTE_VALID; if (flags == AGP_USER_CACHED_MEMORY) pte_flags |= I830_PTE_SYSTEM_CACHED; writel_relaxed(addr | pte_flags, intel_private.gtt + entry); } bool intel_gmch_enable_gtt(void) { u8 __iomem *reg; if (INTEL_GTT_GEN == 2) { u16 gmch_ctrl; pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); gmch_ctrl |= I830_GMCH_ENABLED; pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl); pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) { dev_err(&intel_private.pcidev->dev, "failed to enable the GTT: GMCH_CTRL=%x\n", gmch_ctrl); return false; } } /* On the resume path we may be adjusting the PGTBL value, so * be paranoid and flush all chipset write buffers... */ if (INTEL_GTT_GEN >= 3) writel(0, intel_private.registers+GFX_FLSH_CNTL); reg = intel_private.registers+I810_PGETBL_CTL; writel(intel_private.PGETBL_save, reg); if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) { dev_err(&intel_private.pcidev->dev, "failed to enable the GTT: PGETBL=%x [expected %x]\n", readl(reg), intel_private.PGETBL_save); return false; } if (INTEL_GTT_GEN >= 3) writel(0, intel_private.registers+GFX_FLSH_CNTL); return true; } EXPORT_SYMBOL(intel_gmch_enable_gtt); static int i830_setup(void) { phys_addr_t reg_addr; reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR); intel_private.registers = ioremap(reg_addr, KB(64)); if (!intel_private.registers) return -ENOMEM; intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE; return 0; } #if IS_ENABLED(CONFIG_AGP_INTEL) static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge) { agp_bridge->gatt_table_real = NULL; agp_bridge->gatt_table = NULL; agp_bridge->gatt_bus_addr = 0; return 0; } static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge) { return 0; } static int intel_fake_agp_configure(void) { if (!intel_gmch_enable_gtt()) return -EIO; intel_private.clear_fake_agp = true; agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; return 0; } #endif static bool i830_check_flags(unsigned int flags) { switch (flags) { case 0: case AGP_PHYS_MEMORY: case AGP_USER_CACHED_MEMORY: case AGP_USER_MEMORY: return true; } return false; } void intel_gmch_gtt_insert_page(dma_addr_t addr, unsigned int pg, unsigned int flags) { intel_private.driver->write_entry(addr, pg, flags); readl(intel_private.gtt + pg); if (intel_private.driver->chipset_flush) intel_private.driver->chipset_flush(); } EXPORT_SYMBOL(intel_gmch_gtt_insert_page); void intel_gmch_gtt_insert_sg_entries(struct sg_table *st, unsigned int pg_start, unsigned int flags) { struct scatterlist *sg; unsigned int len, m; int i, j; j = pg_start; /* sg may merge pages, but we have to separate * per-page addr for GTT */ for_each_sg(st->sgl, sg, st->nents, i) { len = sg_dma_len(sg) >> PAGE_SHIFT; for (m = 0; m < len; m++) { dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); intel_private.driver->write_entry(addr, j, flags); j++; } } readl(intel_private.gtt + j - 1); if (intel_private.driver->chipset_flush) intel_private.driver->chipset_flush(); } EXPORT_SYMBOL(intel_gmch_gtt_insert_sg_entries); #if IS_ENABLED(CONFIG_AGP_INTEL) static void intel_gmch_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries, struct page **pages, unsigned int flags) { int i, j; for (i = 0, j = first_entry; i < num_entries; i++, j++) { dma_addr_t addr = page_to_phys(pages[i]); intel_private.driver->write_entry(addr, j, flags); } wmb(); } static int intel_fake_agp_insert_entries(struct agp_memory *mem, off_t pg_start, int type) { int ret = -EINVAL; if (intel_private.clear_fake_agp) { int start = intel_private.stolen_size / PAGE_SIZE; int end = intel_private.gtt_mappable_entries; intel_gmch_gtt_clear_range(start, end - start); intel_private.clear_fake_agp = false; } if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY) return i810_insert_dcache_entries(mem, pg_start, type); if (mem->page_count == 0) goto out; if (pg_start + mem->page_count > intel_private.gtt_total_entries) goto out_err; if (type != mem->type) goto out_err; if (!intel_private.driver->check_flags(type)) goto out_err; if (!mem->is_flushed) global_cache_flush(); if (intel_private.needs_dmar) { struct sg_table st; ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); if (ret != 0) return ret; intel_gmch_gtt_insert_sg_entries(&st, pg_start, type); mem->sg_list = st.sgl; mem->num_sg = st.nents; } else intel_gmch_gtt_insert_pages(pg_start, mem->page_count, mem->pages, type); out: ret = 0; out_err: mem->is_flushed = true; return ret; } #endif void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries) { unsigned int i; for (i = first_entry; i < (first_entry + num_entries); i++) { intel_private.driver->write_entry(intel_private.scratch_page_dma, i, 0); } wmb(); } EXPORT_SYMBOL(intel_gmch_gtt_clear_range); #if IS_ENABLED(CONFIG_AGP_INTEL) static int intel_fake_agp_remove_entries(struct agp_memory *mem, off_t pg_start, int type) { if (mem->page_count == 0) return 0; intel_gmch_gtt_clear_range(pg_start, mem->page_count); if (intel_private.needs_dmar) { intel_gtt_unmap_memory(mem->sg_list, mem->num_sg); mem->sg_list = NULL; mem->num_sg = 0; } return 0; } static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count, int type) { struct agp_memory *new; if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) { if (pg_count != intel_private.num_dcache_entries) return NULL; new = agp_create_memory(1); if (new == NULL) return NULL; new->type = AGP_DCACHE_MEMORY; new->page_count = pg_count; new->num_scratch_pages = 0; agp_free_page_array(new); return new; } if (type == AGP_PHYS_MEMORY) return alloc_agpphysmem_i8xx(pg_count, type); /* always return NULL for other allocation types for now */ return NULL; } #endif static int intel_alloc_chipset_flush_resource(void) { int ret; ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE, PAGE_SIZE, PCIBIOS_MIN_MEM, 0, pcibios_align_resource, intel_private.bridge_dev); return ret; } static void intel_i915_setup_chipset_flush(void) { int ret; u32 temp; pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp); if (!(temp & 0x1)) { intel_alloc_chipset_flush_resource(); intel_private.resource_valid = 1; pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); } else { temp &= ~1; intel_private.resource_valid = 1; intel_private.ifp_resource.start = temp; intel_private.ifp_resource.end = temp + PAGE_SIZE; ret = request_resource(&iomem_resource, &intel_private.ifp_resource); /* some BIOSes reserve this area in a pnp some don't */ if (ret) intel_private.resource_valid = 0; } } static void intel_i965_g33_setup_chipset_flush(void) { u32 temp_hi, temp_lo; int ret; pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi); pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo); if (!(temp_lo & 0x1)) { intel_alloc_chipset_flush_resource(); intel_private.resource_valid = 1; pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, upper_32_bits(intel_private.ifp_resource.start)); pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); } else { u64 l64; temp_lo &= ~0x1; l64 = ((u64)temp_hi << 32) | temp_lo; intel_private.resource_valid = 1; intel_private.ifp_resource.start = l64; intel_private.ifp_resource.end = l64 + PAGE_SIZE; ret = request_resource(&iomem_resource, &intel_private.ifp_resource); /* some BIOSes reserve this area in a pnp some don't */ if (ret) intel_private.resource_valid = 0; } } static void intel_i9xx_setup_flush(void) { /* return if already configured */ if (intel_private.ifp_resource.start) return; if (INTEL_GTT_GEN == 6) return; /* setup a resource for this object */ intel_private.ifp_resource.name = "Intel Flush Page"; intel_private.ifp_resource.flags = IORESOURCE_MEM; /* Setup chipset flush for 915 */ if (IS_G33 || INTEL_GTT_GEN >= 4) { intel_i965_g33_setup_chipset_flush(); } else { intel_i915_setup_chipset_flush(); } if (intel_private.ifp_resource.start) intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE); if (!intel_private.i9xx_flush_page) dev_err(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing\n"); } static void i9xx_cleanup(void) { if (intel_private.i9xx_flush_page) iounmap(intel_private.i9xx_flush_page); if (intel_private.resource_valid) release_resource(&intel_private.ifp_resource); intel_private.ifp_resource.start = 0; intel_private.resource_valid = 0; } static void i9xx_chipset_flush(void) { wmb(); if (intel_private.i9xx_flush_page) writel(1, intel_private.i9xx_flush_page); } static void i965_write_entry(dma_addr_t addr, unsigned int entry, unsigned int flags) { u32 pte_flags; pte_flags = I810_PTE_VALID; if (flags == AGP_USER_CACHED_MEMORY) pte_flags |= I830_PTE_SYSTEM_CACHED; /* Shift high bits down */ addr |= (addr >> 28) & 0xf0; writel_relaxed(addr | pte_flags, intel_private.gtt + entry); } static int i9xx_setup(void) { phys_addr_t reg_addr; int size = KB(512); reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR); intel_private.registers = ioremap(reg_addr, size); if (!intel_private.registers) return -ENOMEM; switch (INTEL_GTT_GEN) { case 3: intel_private.gtt_phys_addr = pci_resource_start(intel_private.pcidev, I915_PTE_BAR); break; case 5: intel_private.gtt_phys_addr = reg_addr + MB(2); break; default: intel_private.gtt_phys_addr = reg_addr + KB(512); break; } intel_i9xx_setup_flush(); return 0; } #if IS_ENABLED(CONFIG_AGP_INTEL) static const struct agp_bridge_driver intel_fake_agp_driver = { .owner = THIS_MODULE, .size_type = FIXED_APER_SIZE, .aperture_sizes = intel_fake_agp_sizes, .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), .configure = intel_fake_agp_configure, .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_gtt_cleanup, .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, .create_gatt_table = intel_fake_agp_create_gatt_table, .free_gatt_table = intel_fake_agp_free_gatt_table, .insert_memory = intel_fake_agp_insert_entries, .remove_memory = intel_fake_agp_remove_entries, .alloc_by_type = intel_fake_agp_alloc_by_type, .free_by_type = intel_i810_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, }; #endif static const struct intel_gtt_driver i81x_gtt_driver = { .gen = 1, .has_pgtbl_enable = 1, .dma_mask_size = 32, .setup = i810_setup, .cleanup = i810_cleanup, .check_flags = i830_check_flags, .write_entry = i810_write_entry, }; static const struct intel_gtt_driver i8xx_gtt_driver = { .gen = 2, .has_pgtbl_enable = 1, .setup = i830_setup, .cleanup = i830_cleanup, .write_entry = i830_write_entry, .dma_mask_size = 32, .check_flags = i830_check_flags, .chipset_flush = i830_chipset_flush, }; static const struct intel_gtt_driver i915_gtt_driver = { .gen = 3, .has_pgtbl_enable = 1, .setup = i9xx_setup, .cleanup = i9xx_cleanup, /* i945 is the last gpu to need phys mem (for overlay and cursors). */ .write_entry = i830_write_entry, .dma_mask_size = 32, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; static const struct intel_gtt_driver g33_gtt_driver = { .gen = 3, .is_g33 = 1, .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; static const struct intel_gtt_driver pineview_gtt_driver = { .gen = 3, .is_pineview = 1, .is_g33 = 1, .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; static const struct intel_gtt_driver i965_gtt_driver = { .gen = 4, .has_pgtbl_enable = 1, .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; static const struct intel_gtt_driver g4x_gtt_driver = { .gen = 5, .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; static const struct intel_gtt_driver ironlake_gtt_driver = { .gen = 5, .is_ironlake = 1, .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of * driver and gmch_driver must be non-null, and find_gmch will determine * which one should be used if a gmch_chip_id is present. */ static const struct intel_gtt_driver_description { unsigned int gmch_chip_id; char *name; const struct intel_gtt_driver *gtt_driver; } intel_gtt_chipsets[] = { { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &i81x_gtt_driver}, { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &i81x_gtt_driver}, { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &i81x_gtt_driver}, { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &i81x_gtt_driver}, { PCI_DEVICE_ID_INTEL_82830_CGC, "830M", &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82845G_IG, "845G", &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82854_IG, "854", &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82865_IG, "865", &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_G33_IG, "G33", &g33_gtt_driver }, { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", &g33_gtt_driver }, { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", &g33_gtt_driver }, { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", &pineview_gtt_driver }, { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", &pineview_gtt_driver }, { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_B43_IG, "B43", &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43", &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_G41_IG, "G41", &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, "HD Graphics", &ironlake_gtt_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, "HD Graphics", &ironlake_gtt_driver }, { 0, NULL, NULL } }; static int find_gmch(u16 device) { struct pci_dev *gmch_device; gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) { gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, gmch_device); } if (!gmch_device) return 0; intel_private.pcidev = gmch_device; return 1; } int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, struct agp_bridge_data *bridge) { int i, mask; for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { if (gpu_pdev) { if (gpu_pdev->device == intel_gtt_chipsets[i].gmch_chip_id) { intel_private.pcidev = pci_dev_get(gpu_pdev); intel_private.driver = intel_gtt_chipsets[i].gtt_driver; break; } } else if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { intel_private.driver = intel_gtt_chipsets[i].gtt_driver; break; } } if (!intel_private.driver) return 0; #if IS_ENABLED(CONFIG_AGP_INTEL) if (bridge) { if (INTEL_GTT_GEN > 1) return 0; bridge->driver = &intel_fake_agp_driver; bridge->dev_private_data = &intel_private; bridge->dev = bridge_pdev; } #endif /* * Can be called from the fake agp driver but also directly from * drm/i915.ko. Hence we need to check whether everything is set up * already. */ if (intel_private.refcount++) return 1; intel_private.bridge_dev = pci_dev_get(bridge_pdev); dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); if (bridge) { mask = intel_private.driver->dma_mask_size; if (dma_set_mask(&intel_private.pcidev->dev, DMA_BIT_MASK(mask))) dev_err(&intel_private.pcidev->dev, "set gfx device dma mask %d-bit failed!\n", mask); else dma_set_coherent_mask(&intel_private.pcidev->dev, DMA_BIT_MASK(mask)); } if (intel_gtt_init() != 0) { intel_gmch_remove(); return 0; } return 1; } EXPORT_SYMBOL(intel_gmch_probe); void intel_gmch_gtt_get(u64 *gtt_total, phys_addr_t *mappable_base, resource_size_t *mappable_end) { *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT; *mappable_base = intel_private.gma_bus_addr; *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT; } EXPORT_SYMBOL(intel_gmch_gtt_get); void intel_gmch_gtt_flush(void) { if (intel_private.driver->chipset_flush) intel_private.driver->chipset_flush(); } EXPORT_SYMBOL(intel_gmch_gtt_flush); void intel_gmch_remove(void) { if (--intel_private.refcount) return; if (intel_private.scratch_page) intel_gtt_teardown_scratch_page(); if (intel_private.pcidev) pci_dev_put(intel_private.pcidev); if (intel_private.bridge_dev) pci_dev_put(intel_private.bridge_dev); intel_private.driver = NULL; } EXPORT_SYMBOL(intel_gmch_remove); MODULE_AUTHOR("Dave Jones, Various @Intel"); MODULE_LICENSE("GPL and additional rights");
linux-master
drivers/char/agp/intel-gtt.c
/* * Serverworks AGPGART routines. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/agp_backend.h> #include <asm/set_memory.h> #include "agp.h" #define SVWRKS_COMMAND 0x04 #define SVWRKS_APSIZE 0x10 #define SVWRKS_MMBASE 0x14 #define SVWRKS_CACHING 0x4b #define SVWRKS_AGP_ENABLE 0x60 #define SVWRKS_FEATURE 0x68 #define SVWRKS_SIZE_MASK 0xfe000000 /* Memory mapped registers */ #define SVWRKS_GART_CACHE 0x02 #define SVWRKS_GATTBASE 0x04 #define SVWRKS_TLBFLUSH 0x10 #define SVWRKS_POSTFLUSH 0x14 #define SVWRKS_DIRFLUSH 0x0c struct serverworks_page_map { unsigned long *real; unsigned long __iomem *remapped; }; static struct _serverworks_private { struct pci_dev *svrwrks_dev; /* device one */ volatile u8 __iomem *registers; struct serverworks_page_map **gatt_pages; int num_tables; struct serverworks_page_map scratch_dir; int gart_addr_ofs; int mm_addr_ofs; } serverworks_private; static int serverworks_create_page_map(struct serverworks_page_map *page_map) { int i; page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); if (page_map->real == NULL) { return -ENOMEM; } set_memory_uc((unsigned long)page_map->real, 1); page_map->remapped = page_map->real; for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) writel(agp_bridge->scratch_page, page_map->remapped+i); /* Red Pen: Everyone else does pci posting flush here */ return 0; } static void serverworks_free_page_map(struct serverworks_page_map *page_map) { set_memory_wb((unsigned long)page_map->real, 1); free_page((unsigned long) page_map->real); } static void serverworks_free_gatt_pages(void) { int i; struct serverworks_page_map **tables; struct serverworks_page_map *entry; tables = serverworks_private.gatt_pages; for (i = 0; i < serverworks_private.num_tables; i++) { entry = tables[i]; if (entry != NULL) { if (entry->real != NULL) { serverworks_free_page_map(entry); } kfree(entry); } } kfree(tables); } static int serverworks_create_gatt_pages(int nr_tables) { struct serverworks_page_map **tables; struct serverworks_page_map *entry; int retval = 0; int i; tables = kcalloc(nr_tables + 1, sizeof(struct serverworks_page_map *), GFP_KERNEL); if (tables == NULL) return -ENOMEM; for (i = 0; i < nr_tables; i++) { entry = kzalloc(sizeof(struct serverworks_page_map), GFP_KERNEL); if (entry == NULL) { retval = -ENOMEM; break; } tables[i] = entry; retval = serverworks_create_page_map(entry); if (retval != 0) break; } serverworks_private.num_tables = nr_tables; serverworks_private.gatt_pages = tables; if (retval != 0) serverworks_free_gatt_pages(); return retval; } #define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\ GET_PAGE_DIR_IDX(addr)]->remapped) #ifndef GET_PAGE_DIR_OFF #define GET_PAGE_DIR_OFF(addr) (addr >> 22) #endif #ifndef GET_PAGE_DIR_IDX #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) #endif #ifndef GET_GATT_OFF #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) #endif static int serverworks_create_gatt_table(struct agp_bridge_data *bridge) { struct aper_size_info_lvl2 *value; struct serverworks_page_map page_dir; int retval; u32 temp; int i; value = A_SIZE_LVL2(agp_bridge->current_size); retval = serverworks_create_page_map(&page_dir); if (retval != 0) { return retval; } retval = serverworks_create_page_map(&serverworks_private.scratch_dir); if (retval != 0) { serverworks_free_page_map(&page_dir); return retval; } /* Create a fake scratch directory */ for (i = 0; i < 1024; i++) { writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i); writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i); } retval = serverworks_create_gatt_pages(value->num_entries / 1024); if (retval != 0) { serverworks_free_page_map(&page_dir); serverworks_free_page_map(&serverworks_private.scratch_dir); return retval; } agp_bridge->gatt_table_real = (u32 *)page_dir.real; agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped; agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real); /* Get the address for the gart region. * This is a bus address even on the alpha, b/c its * used to program the agp master not the cpu */ pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp); agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); /* Calculate the agp offset */ for (i = 0; i < value->num_entries / 1024; i++) writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i); return 0; } static int serverworks_free_gatt_table(struct agp_bridge_data *bridge) { struct serverworks_page_map page_dir; page_dir.real = (unsigned long *)agp_bridge->gatt_table_real; page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table; serverworks_free_gatt_pages(); serverworks_free_page_map(&page_dir); serverworks_free_page_map(&serverworks_private.scratch_dir); return 0; } static int serverworks_fetch_size(void) { int i; u32 temp; u32 temp2; struct aper_size_info_lvl2 *values; values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes); pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp); pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs, SVWRKS_SIZE_MASK); pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2); pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp); temp2 &= SVWRKS_SIZE_MASK; for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (temp2 == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } /* * This routine could be implemented by taking the addresses * written to the GATT, and flushing them individually. However * currently it just flushes the whole table. Which is probably * more efficient, since agp_memory blocks can be a large number of * entries. */ static void serverworks_tlbflush(struct agp_memory *temp) { unsigned long timeout; writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH); timeout = jiffies + 3*HZ; while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) { cpu_relax(); if (time_after(jiffies, timeout)) { dev_err(&serverworks_private.svrwrks_dev->dev, "TLB post flush took more than 3 seconds\n"); break; } } writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH); timeout = jiffies + 3*HZ; while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) { cpu_relax(); if (time_after(jiffies, timeout)) { dev_err(&serverworks_private.svrwrks_dev->dev, "TLB Dir flush took more than 3 seconds\n"); break; } } } static int serverworks_configure(void) { u32 temp; u8 enable_reg; u16 cap_reg; /* Get the memory mapped registers */ pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp); temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096); if (!serverworks_private.registers) { dev_err(&agp_bridge->dev->dev, "can't ioremap(%#x)\n", temp); return -ENOMEM; } writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE); readb(serverworks_private.registers+SVWRKS_GART_CACHE); /* PCI Posting. */ writel(agp_bridge->gatt_bus_addr, serverworks_private.registers+SVWRKS_GATTBASE); readl(serverworks_private.registers+SVWRKS_GATTBASE); /* PCI Posting. */ cap_reg = readw(serverworks_private.registers+SVWRKS_COMMAND); cap_reg &= ~0x0007; cap_reg |= 0x4; writew(cap_reg, serverworks_private.registers+SVWRKS_COMMAND); readw(serverworks_private.registers+SVWRKS_COMMAND); pci_read_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, &enable_reg); enable_reg |= 0x1; /* Agp Enable bit */ pci_write_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, enable_reg); serverworks_tlbflush(NULL); agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP); /* Fill in the mode register */ pci_read_config_dword(serverworks_private.svrwrks_dev, agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode); pci_read_config_byte(agp_bridge->dev, SVWRKS_CACHING, &enable_reg); enable_reg &= ~0x3; pci_write_config_byte(agp_bridge->dev, SVWRKS_CACHING, enable_reg); pci_read_config_byte(agp_bridge->dev, SVWRKS_FEATURE, &enable_reg); enable_reg |= (1<<6); pci_write_config_byte(agp_bridge->dev,SVWRKS_FEATURE, enable_reg); return 0; } static void serverworks_cleanup(void) { iounmap((void __iomem *) serverworks_private.registers); } static int serverworks_insert_memory(struct agp_memory *mem, off_t pg_start, int type) { int i, j, num_entries; unsigned long __iomem *cur_gatt; unsigned long addr; num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; if (type != 0 || mem->type != 0) { return -EINVAL; } if ((pg_start + mem->page_count) > num_entries) { return -EINVAL; } j = pg_start; while (j < (pg_start + mem->page_count)) { addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = SVRWRKS_GET_GATT(addr); if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr)))) return -EBUSY; j++; } if (!mem->is_flushed) { global_cache_flush(); mem->is_flushed = true; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = SVRWRKS_GET_GATT(addr); writel(agp_bridge->driver->mask_memory(agp_bridge, page_to_phys(mem->pages[i]), mem->type), cur_gatt+GET_GATT_OFF(addr)); } serverworks_tlbflush(mem); return 0; } static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start, int type) { int i; unsigned long __iomem *cur_gatt; unsigned long addr; if (type != 0 || mem->type != 0) { return -EINVAL; } global_cache_flush(); serverworks_tlbflush(mem); for (i = pg_start; i < (mem->page_count + pg_start); i++) { addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = SVRWRKS_GET_GATT(addr); writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); } serverworks_tlbflush(mem); return 0; } static const struct gatt_mask serverworks_masks[] = { {.mask = 1, .type = 0} }; static const struct aper_size_info_lvl2 serverworks_sizes[7] = { {2048, 524288, 0x80000000}, {1024, 262144, 0xc0000000}, {512, 131072, 0xe0000000}, {256, 65536, 0xf0000000}, {128, 32768, 0xf8000000}, {64, 16384, 0xfc000000}, {32, 8192, 0xfe000000} }; static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode) { u32 command; pci_read_config_dword(serverworks_private.svrwrks_dev, bridge->capndx + PCI_AGP_STATUS, &command); command = agp_collect_device_status(bridge, mode, command); command &= ~0x10; /* disable FW */ command &= ~0x08; command |= 0x100; pci_write_config_dword(serverworks_private.svrwrks_dev, bridge->capndx + PCI_AGP_COMMAND, command); agp_device_command(command, false); } static const struct agp_bridge_driver sworks_driver = { .owner = THIS_MODULE, .aperture_sizes = serverworks_sizes, .size_type = LVL2_APER_SIZE, .num_aperture_sizes = 7, .configure = serverworks_configure, .fetch_size = serverworks_fetch_size, .cleanup = serverworks_cleanup, .tlb_flush = serverworks_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = serverworks_masks, .agp_enable = serverworks_agp_enable, .cache_flush = global_cache_flush, .create_gatt_table = serverworks_create_gatt_table, .free_gatt_table = serverworks_free_gatt_table, .insert_memory = serverworks_insert_memory, .remove_memory = serverworks_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static int agp_serverworks_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_bridge_data *bridge; struct pci_dev *bridge_dev; u32 temp, temp2; u8 cap_ptr = 0; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); switch (pdev->device) { case 0x0006: dev_err(&pdev->dev, "ServerWorks CNB20HE is unsupported due to lack of documentation\n"); return -ENODEV; case PCI_DEVICE_ID_SERVERWORKS_HE: case PCI_DEVICE_ID_SERVERWORKS_LE: case 0x0007: break; default: if (cap_ptr) dev_err(&pdev->dev, "unsupported Serverworks chipset " "[%04x/%04x]\n", pdev->vendor, pdev->device); return -ENODEV; } /* Everything is on func 1 here so we are hardcoding function one */ bridge_dev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), (unsigned int)pdev->bus->number, PCI_DEVFN(0, 1)); if (!bridge_dev) { dev_info(&pdev->dev, "can't find secondary device\n"); return -ENODEV; } serverworks_private.svrwrks_dev = bridge_dev; serverworks_private.gart_addr_ofs = 0x10; pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp); if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2); if (temp2 != 0) { dev_info(&pdev->dev, "64 bit aperture address, " "but top bits are not zero; disabling AGP\n"); return -ENODEV; } serverworks_private.mm_addr_ofs = 0x18; } else serverworks_private.mm_addr_ofs = 0x14; pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp); if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs + 4, &temp2); if (temp2 != 0) { dev_info(&pdev->dev, "64 bit MMIO address, but top " "bits are not zero; disabling AGP\n"); return -ENODEV; } } bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; bridge->driver = &sworks_driver; bridge->dev_private_data = &serverworks_private; bridge->dev = pci_dev_get(pdev); pci_set_drvdata(pdev, bridge); return agp_add_bridge(bridge); } static void agp_serverworks_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); pci_dev_put(bridge->dev); agp_remove_bridge(bridge); agp_put_bridge(bridge); pci_dev_put(serverworks_private.svrwrks_dev); serverworks_private.svrwrks_dev = NULL; } static struct pci_device_id agp_serverworks_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SERVERWORKS, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table); static struct pci_driver agp_serverworks_pci_driver = { .name = "agpgart-serverworks", .id_table = agp_serverworks_pci_table, .probe = agp_serverworks_probe, .remove = agp_serverworks_remove, }; static int __init agp_serverworks_init(void) { if (agp_off) return -EINVAL; return pci_register_driver(&agp_serverworks_pci_driver); } static void __exit agp_serverworks_cleanup(void) { pci_unregister_driver(&agp_serverworks_pci_driver); } module_init(agp_serverworks_init); module_exit(agp_serverworks_cleanup); MODULE_LICENSE("GPL and additional rights");
linux-master
drivers/char/agp/sworks-agp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2001-2003 SuSE Labs. * Distributed under the GNU public license, v2. * * This is a GART driver for the AMD Opteron/Athlon64 on-CPU northbridge. * It also includes support for the AMD 8151 AGP bridge, * although it doesn't actually do much, as all the real * work is done in the northbridge(s). */ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/agp_backend.h> #include <linux/mmzone.h> #include <asm/page.h> /* PAGE_SIZE */ #include <asm/e820/api.h> #include <asm/amd_nb.h> #include <asm/gart.h> #include "agp.h" /* NVIDIA K8 registers */ #define NVIDIA_X86_64_0_APBASE 0x10 #define NVIDIA_X86_64_1_APBASE1 0x50 #define NVIDIA_X86_64_1_APLIMIT1 0x54 #define NVIDIA_X86_64_1_APSIZE 0xa8 #define NVIDIA_X86_64_1_APBASE2 0xd8 #define NVIDIA_X86_64_1_APLIMIT2 0xdc /* ULi K8 registers */ #define ULI_X86_64_BASE_ADDR 0x10 #define ULI_X86_64_HTT_FEA_REG 0x50 #define ULI_X86_64_ENU_SCR_REG 0x54 static struct resource *aperture_resource; static bool __initdata agp_try_unsupported = 1; static int agp_bridges_found; static void amd64_tlbflush(struct agp_memory *temp) { amd_flush_garts(); } static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type) { int i, j, num_entries; long long tmp; int mask_type; struct agp_bridge_data *bridge = mem->bridge; u32 pte; num_entries = agp_num_entries(); if (type != mem->type) return -EINVAL; mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); if (mask_type != 0) return -EINVAL; /* Make sure we can fit the range in the gatt table. */ /* FIXME: could wrap */ if (((unsigned long)pg_start + mem->page_count) > num_entries) return -EINVAL; j = pg_start; /* gatt table should be empty. */ while (j < (pg_start + mem->page_count)) { if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) return -EBUSY; j++; } if (!mem->is_flushed) { global_cache_flush(); mem->is_flushed = true; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { tmp = agp_bridge->driver->mask_memory(agp_bridge, page_to_phys(mem->pages[i]), mask_type); BUG_ON(tmp & 0xffffff0000000ffcULL); pte = (tmp & 0x000000ff00000000ULL) >> 28; pte |=(tmp & 0x00000000fffff000ULL); pte |= GPTE_VALID | GPTE_COHERENT; writel(pte, agp_bridge->gatt_table+j); readl(agp_bridge->gatt_table+j); /* PCI Posting. */ } amd64_tlbflush(mem); return 0; } /* * This hack alters the order element according * to the size of a long. It sucks. I totally disown this, even * though it does appear to work for the most part. */ static struct aper_size_info_32 amd64_aperture_sizes[7] = { {32, 8192, 3+(sizeof(long)/8), 0 }, {64, 16384, 4+(sizeof(long)/8), 1<<1 }, {128, 32768, 5+(sizeof(long)/8), 1<<2 }, {256, 65536, 6+(sizeof(long)/8), 1<<1 | 1<<2 }, {512, 131072, 7+(sizeof(long)/8), 1<<3 }, {1024, 262144, 8+(sizeof(long)/8), 1<<1 | 1<<3}, {2048, 524288, 9+(sizeof(long)/8), 1<<2 | 1<<3} }; /* * Get the current Aperture size from the x86-64. * Note, that there may be multiple x86-64's, but we just return * the value from the first one we find. The set_size functions * keep the rest coherent anyway. Or at least should do. */ static int amd64_fetch_size(void) { struct pci_dev *dev; int i; u32 temp; struct aper_size_info_32 *values; dev = node_to_amd_nb(0)->misc; if (dev==NULL) return 0; pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &temp); temp = (temp & 0xe); values = A_SIZE_32(amd64_aperture_sizes); for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (temp == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } /* * In a multiprocessor x86-64 system, this function gets * called once for each CPU. */ static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table) { u64 aperturebase; u32 tmp; u64 aper_base; /* Address to map to */ pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp); aperturebase = (u64)tmp << 25; aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK); enable_gart_translation(hammer, gatt_table); return aper_base; } static const struct aper_size_info_32 amd_8151_sizes[7] = { {2048, 524288, 9, 0x00000000 }, /* 0 0 0 0 0 0 */ {1024, 262144, 8, 0x00000400 }, /* 1 0 0 0 0 0 */ {512, 131072, 7, 0x00000600 }, /* 1 1 0 0 0 0 */ {256, 65536, 6, 0x00000700 }, /* 1 1 1 0 0 0 */ {128, 32768, 5, 0x00000720 }, /* 1 1 1 1 0 0 */ {64, 16384, 4, 0x00000730 }, /* 1 1 1 1 1 0 */ {32, 8192, 3, 0x00000738 } /* 1 1 1 1 1 1 */ }; static int amd_8151_configure(void) { unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); int i; if (!amd_nb_has_feature(AMD_NB_GART)) return 0; /* Configure AGP regs in each x86-64 host bridge. */ for (i = 0; i < amd_nb_num(); i++) { agp_bridge->gart_bus_addr = amd64_configure(node_to_amd_nb(i)->misc, gatt_bus); } amd_flush_garts(); return 0; } static void amd64_cleanup(void) { u32 tmp; int i; if (!amd_nb_has_feature(AMD_NB_GART)) return; for (i = 0; i < amd_nb_num(); i++) { struct pci_dev *dev = node_to_amd_nb(i)->misc; /* disable gart translation */ pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp); tmp &= ~GARTEN; pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp); } } static const struct agp_bridge_driver amd_8151_driver = { .owner = THIS_MODULE, .aperture_sizes = amd_8151_sizes, .size_type = U32_APER_SIZE, .num_aperture_sizes = 7, .needs_scratch_page = true, .configure = amd_8151_configure, .fetch_size = amd64_fetch_size, .cleanup = amd64_cleanup, .tlb_flush = amd64_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = NULL, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = amd64_insert_memory, .remove_memory = agp_generic_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; /* Some basic sanity checks for the aperture. */ static int agp_aperture_valid(u64 aper, u32 size) { if (!aperture_valid(aper, size, 32*1024*1024)) return 0; /* Request the Aperture. This catches cases when someone else already put a mapping in there - happens with some very broken BIOS Maybe better to use pci_assign_resource/pci_enable_device instead trusting the bridges? */ if (!aperture_resource && !(aperture_resource = request_mem_region(aper, size, "aperture"))) { printk(KERN_ERR PFX "Aperture conflicts with PCI mapping.\n"); return 0; } return 1; } /* * W*s centric BIOS sometimes only set up the aperture in the AGP * bridge, not the northbridge. On AMD64 this is handled early * in aperture.c, but when IOMMU is not enabled or we run * on a 32bit kernel this needs to be redone. * Unfortunately it is impossible to fix the aperture here because it's too late * to allocate that much memory. But at least error out cleanly instead of * crashing. */ static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap) { u64 aper, nb_aper; int order = 0; u32 nb_order, nb_base; u16 apsize; pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order); nb_order = (nb_order >> 1) & 7; pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base); nb_aper = (u64)nb_base << 25; /* Northbridge seems to contain crap. Try the AGP bridge. */ pci_read_config_word(agp, cap+0x14, &apsize); if (apsize == 0xffff) { if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order)) return 0; return -1; } apsize &= 0xfff; /* Some BIOS use weird encodings not in the AGPv3 table. */ if (apsize & 0xff) apsize |= 0xf00; order = 7 - hweight16(apsize); aper = pci_bus_address(agp, AGP_APERTURE_BAR); /* * On some sick chips APSIZE is 0. This means it wants 4G * so let double check that order, and lets trust the AMD NB settings */ if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) { dev_info(&agp->dev, "aperture size %u MB is not right, using settings from NB\n", 32 << order); order = nb_order; } if (nb_order >= order) { if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order)) return 0; } dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n", aper, 32 << order); if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order)) return -1; gart_set_size_and_enable(nb, order); pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25); return 0; } static int cache_nbs(struct pci_dev *pdev, u32 cap_ptr) { int i; if (!amd_nb_num()) return -ENODEV; if (!amd_nb_has_feature(AMD_NB_GART)) return -ENODEV; i = 0; for (i = 0; i < amd_nb_num(); i++) { struct pci_dev *dev = node_to_amd_nb(i)->misc; if (fix_northbridge(dev, pdev, cap_ptr) < 0) { dev_err(&dev->dev, "no usable aperture found\n"); #ifdef __x86_64__ /* should port this to i386 */ dev_err(&dev->dev, "consider rebooting with iommu=memaper=2 to get a good aperture\n"); #endif return -1; } } return 0; } /* Handle AMD 8151 quirks */ static void amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge) { char *revstring; switch (pdev->revision) { case 0x01: revstring="A0"; break; case 0x02: revstring="A1"; break; case 0x11: revstring="B0"; break; case 0x12: revstring="B1"; break; case 0x13: revstring="B2"; break; case 0x14: revstring="B3"; break; default: revstring="??"; break; } dev_info(&pdev->dev, "AMD 8151 AGP Bridge rev %s\n", revstring); /* * Work around errata. * Chips before B2 stepping incorrectly reporting v3.5 */ if (pdev->revision < 0x13) { dev_info(&pdev->dev, "correcting AGP revision (reports 3.5, is really 3.0)\n"); bridge->major_version = 3; bridge->minor_version = 0; } } static const struct aper_size_info_32 uli_sizes[7] = { {256, 65536, 6, 10}, {128, 32768, 5, 9}, {64, 16384, 4, 8}, {32, 8192, 3, 7}, {16, 4096, 2, 6}, {8, 2048, 1, 4}, {4, 1024, 0, 3} }; static int uli_agp_init(struct pci_dev *pdev) { u32 httfea,baseaddr,enuscr; struct pci_dev *dev1; int i, ret; unsigned size = amd64_fetch_size(); dev_info(&pdev->dev, "setting up ULi AGP\n"); dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0)); if (dev1 == NULL) { dev_info(&pdev->dev, "can't find ULi secondary device\n"); return -ENODEV; } for (i = 0; i < ARRAY_SIZE(uli_sizes); i++) if (uli_sizes[i].size == size) break; if (i == ARRAY_SIZE(uli_sizes)) { dev_info(&pdev->dev, "no ULi size found for %d\n", size); ret = -ENODEV; goto put; } /* shadow x86-64 registers into ULi registers */ pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE, &httfea); /* if x86-64 aperture base is beyond 4G, exit here */ if ((httfea & 0x7fff) >> (32 - 25)) { ret = -ENODEV; goto put; } httfea = (httfea& 0x7fff) << 25; pci_read_config_dword(pdev, ULI_X86_64_BASE_ADDR, &baseaddr); baseaddr&= ~PCI_BASE_ADDRESS_MEM_MASK; baseaddr|= httfea; pci_write_config_dword(pdev, ULI_X86_64_BASE_ADDR, baseaddr); enuscr= httfea+ (size * 1024 * 1024) - 1; pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea); pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr); ret = 0; put: pci_dev_put(dev1); return ret; } static const struct aper_size_info_32 nforce3_sizes[5] = { {512, 131072, 7, 0x00000000 }, {256, 65536, 6, 0x00000008 }, {128, 32768, 5, 0x0000000C }, {64, 16384, 4, 0x0000000E }, {32, 8192, 3, 0x0000000F } }; /* Handle shadow device of the Nvidia NForce3 */ /* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */ static int nforce3_agp_init(struct pci_dev *pdev) { u32 tmp, apbase, apbar, aplimit; struct pci_dev *dev1; int i, ret; unsigned size = amd64_fetch_size(); dev_info(&pdev->dev, "setting up Nforce3 AGP\n"); dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0)); if (dev1 == NULL) { dev_info(&pdev->dev, "can't find Nforce3 secondary device\n"); return -ENODEV; } for (i = 0; i < ARRAY_SIZE(nforce3_sizes); i++) if (nforce3_sizes[i].size == size) break; if (i == ARRAY_SIZE(nforce3_sizes)) { dev_info(&pdev->dev, "no NForce3 size found for %d\n", size); ret = -ENODEV; goto put; } pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp); tmp &= ~(0xf); tmp |= nforce3_sizes[i].size_value; pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); /* shadow x86-64 registers into NVIDIA registers */ pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE, &apbase); /* if x86-64 aperture base is beyond 4G, exit here */ if ( (apbase & 0x7fff) >> (32 - 25) ) { dev_info(&pdev->dev, "aperture base > 4G\n"); ret = -ENODEV; goto put; } apbase = (apbase & 0x7fff) << 25; pci_read_config_dword(pdev, NVIDIA_X86_64_0_APBASE, &apbar); apbar &= ~PCI_BASE_ADDRESS_MEM_MASK; apbar |= apbase; pci_write_config_dword(pdev, NVIDIA_X86_64_0_APBASE, apbar); aplimit = apbase + (size * 1024 * 1024) - 1; pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE1, apbase); pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT1, aplimit); pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase); pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit); ret = 0; put: pci_dev_put(dev1); return ret; } static int agp_amd64_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_bridge_data *bridge; u8 cap_ptr; int err; /* The Highlander principle */ if (agp_bridges_found) return -ENODEV; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return -ENODEV; /* Could check for AGPv3 here */ bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == PCI_DEVICE_ID_AMD_8151_0) { amd8151_init(pdev, bridge); } else { dev_info(&pdev->dev, "AGP bridge [%04x/%04x]\n", pdev->vendor, pdev->device); } bridge->driver = &amd_8151_driver; bridge->dev = pdev; bridge->capndx = cap_ptr; /* Fill in the mode register */ pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); if (cache_nbs(pdev, cap_ptr) == -1) { agp_put_bridge(bridge); return -ENODEV; } if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) { int ret = nforce3_agp_init(pdev); if (ret) { agp_put_bridge(bridge); return ret; } } if (pdev->vendor == PCI_VENDOR_ID_AL) { int ret = uli_agp_init(pdev); if (ret) { agp_put_bridge(bridge); return ret; } } pci_set_drvdata(pdev, bridge); err = agp_add_bridge(bridge); if (err < 0) return err; agp_bridges_found++; return 0; } static void agp_amd64_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); release_mem_region(virt_to_phys(bridge->gatt_table_real), amd64_aperture_sizes[bridge->aperture_size_idx].size); agp_remove_bridge(bridge); agp_put_bridge(bridge); agp_bridges_found--; } static int agp_amd64_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) nforce3_agp_init(pdev); return amd_8151_configure(); } static const struct pci_device_id agp_amd64_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_AMD, .device = PCI_DEVICE_ID_AMD_8151_0, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* ULi M1689 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_AL, .device = PCI_DEVICE_ID_AL_M1689, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* VIA K8T800Pro */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_VIA, .device = PCI_DEVICE_ID_VIA_K8T800PRO_0, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* VIA K8T800 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_VIA, .device = PCI_DEVICE_ID_VIA_8385_0, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* VIA K8M800 / K8N800 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_VIA, .device = PCI_DEVICE_ID_VIA_8380_0, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* VIA K8M890 / K8N890 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_VIA, .device = PCI_DEVICE_ID_VIA_VT3336, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* VIA K8T890 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_VIA, .device = PCI_DEVICE_ID_VIA_3238_0, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* VIA K8T800/K8M800/K8N800 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_VIA, .device = PCI_DEVICE_ID_VIA_838X_1, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* NForce3 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_NVIDIA, .device = PCI_DEVICE_ID_NVIDIA_NFORCE3, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_NVIDIA, .device = PCI_DEVICE_ID_NVIDIA_NFORCE3S, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* SIS 755 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_755, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* SIS 760 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_760, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* ALI/ULI M1695 */ { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_AL, .device = 0x1695, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); static const struct pci_device_id agp_amd64_pci_promisc_table[] = { { PCI_DEVICE_CLASS(0, 0) }, { } }; static DEFINE_SIMPLE_DEV_PM_OPS(agp_amd64_pm_ops, NULL, agp_amd64_resume); static struct pci_driver agp_amd64_pci_driver = { .name = "agpgart-amd64", .id_table = agp_amd64_pci_table, .probe = agp_amd64_probe, .remove = agp_amd64_remove, .driver.pm = &agp_amd64_pm_ops, }; /* Not static due to IOMMU code calling it early. */ int __init agp_amd64_init(void) { int err = 0; if (agp_off) return -EINVAL; err = pci_register_driver(&agp_amd64_pci_driver); if (err < 0) return err; if (agp_bridges_found == 0) { if (!agp_try_unsupported && !agp_try_unsupported_boot) { printk(KERN_INFO PFX "No supported AGP bridge found.\n"); #ifdef MODULE printk(KERN_INFO PFX "You can try agp_try_unsupported=1\n"); #else printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n"); #endif pci_unregister_driver(&agp_amd64_pci_driver); return -ENODEV; } /* First check that we have at least one AMD64 NB */ if (!amd_nb_num()) { pci_unregister_driver(&agp_amd64_pci_driver); return -ENODEV; } /* Look for any AGP bridge */ agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; err = driver_attach(&agp_amd64_pci_driver.driver); if (err == 0 && agp_bridges_found == 0) { pci_unregister_driver(&agp_amd64_pci_driver); err = -ENODEV; } } return err; } static int __init agp_amd64_mod_init(void) { #ifndef MODULE if (gart_iommu_aperture) return agp_bridges_found ? 0 : -ENODEV; #endif return agp_amd64_init(); } static void __exit agp_amd64_cleanup(void) { #ifndef MODULE if (gart_iommu_aperture) return; #endif if (aperture_resource) release_resource(aperture_resource); pci_unregister_driver(&agp_amd64_pci_driver); } module_init(agp_amd64_mod_init); module_exit(agp_amd64_cleanup); MODULE_AUTHOR("Dave Jones, Andi Kleen"); module_param(agp_try_unsupported, bool, 0); MODULE_LICENSE("GPL");
linux-master
drivers/char/agp/amd64-agp.c
// SPDX-License-Identifier: GPL-2.0-only /* * HP zx1 AGPGART routines. * * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P. * Bjorn Helgaas <[email protected]> */ #include <linux/acpi.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/agp_backend.h> #include <linux/log2.h> #include <linux/slab.h> #include <asm/acpi-ext.h> #include "agp.h" #define HP_ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */ /* HP ZX1 IOC registers */ #define HP_ZX1_IBASE 0x300 #define HP_ZX1_IMASK 0x308 #define HP_ZX1_PCOM 0x310 #define HP_ZX1_TCNFG 0x318 #define HP_ZX1_PDIR_BASE 0x320 #define HP_ZX1_IOVA_BASE GB(1UL) #define HP_ZX1_IOVA_SIZE GB(1UL) #define HP_ZX1_GART_SIZE (HP_ZX1_IOVA_SIZE / 2) #define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL #define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL #define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift) #define AGP8X_MODE_BIT 3 #define AGP8X_MODE (1 << AGP8X_MODE_BIT) /* AGP bridge need not be PCI device, but DRM thinks it is. */ static struct pci_dev fake_bridge_dev; static int hp_zx1_gart_found; static struct aper_size_info_fixed hp_zx1_sizes[] = { {0, 0, 0}, /* filled in by hp_zx1_fetch_size() */ }; static struct gatt_mask hp_zx1_masks[] = { {.mask = HP_ZX1_PDIR_VALID_BIT, .type = 0} }; static struct _hp_private { volatile u8 __iomem *ioc_regs; volatile u8 __iomem *lba_regs; int lba_cap_offset; u64 *io_pdir; // PDIR for entire IOVA u64 *gatt; // PDIR just for GART (subset of above) u64 gatt_entries; u64 iova_base; u64 gart_base; u64 gart_size; u64 io_pdir_size; int io_pdir_owner; // do we own it, or share it with sba_iommu? int io_page_size; int io_tlb_shift; int io_tlb_ps; // IOC ps config int io_pages_per_kpage; } hp_private; static int __init hp_zx1_ioc_shared(void) { struct _hp_private *hp = &hp_private; printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n"); /* * IOC already configured by sba_iommu module; just use * its setup. We assume: * - IOVA space is 1Gb in size * - first 512Mb is IOMMU, second 512Mb is GART */ hp->io_tlb_ps = readq(hp->ioc_regs+HP_ZX1_TCNFG); switch (hp->io_tlb_ps) { case 0: hp->io_tlb_shift = 12; break; case 1: hp->io_tlb_shift = 13; break; case 2: hp->io_tlb_shift = 14; break; case 3: hp->io_tlb_shift = 16; break; default: printk(KERN_ERR PFX "Invalid IOTLB page size " "configuration 0x%x\n", hp->io_tlb_ps); hp->gatt = NULL; hp->gatt_entries = 0; return -ENODEV; } hp->io_page_size = 1 << hp->io_tlb_shift; hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size; hp->iova_base = readq(hp->ioc_regs+HP_ZX1_IBASE) & ~0x1; hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE; hp->gart_size = HP_ZX1_GART_SIZE; hp->gatt_entries = hp->gart_size / hp->io_page_size; hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE)); hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) { /* Normal case when no AGP device in system */ hp->gatt = NULL; hp->gatt_entries = 0; printk(KERN_ERR PFX "No reserved IO PDIR entry found; " "GART disabled\n"); return -ENODEV; } return 0; } static int __init hp_zx1_ioc_owner (void) { struct _hp_private *hp = &hp_private; printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n"); /* * Select an IOV page size no larger than system page size. */ if (PAGE_SIZE >= KB(64)) { hp->io_tlb_shift = 16; hp->io_tlb_ps = 3; } else if (PAGE_SIZE >= KB(16)) { hp->io_tlb_shift = 14; hp->io_tlb_ps = 2; } else if (PAGE_SIZE >= KB(8)) { hp->io_tlb_shift = 13; hp->io_tlb_ps = 1; } else { hp->io_tlb_shift = 12; hp->io_tlb_ps = 0; } hp->io_page_size = 1 << hp->io_tlb_shift; hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size; hp->iova_base = HP_ZX1_IOVA_BASE; hp->gart_size = HP_ZX1_GART_SIZE; hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size; hp->gatt_entries = hp->gart_size / hp->io_page_size; hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64); return 0; } static int __init hp_zx1_ioc_init (u64 hpa) { struct _hp_private *hp = &hp_private; hp->ioc_regs = ioremap(hpa, 1024); if (!hp->ioc_regs) return -ENOMEM; /* * If the IOTLB is currently disabled, we can take it over. * Otherwise, we have to share with sba_iommu. */ hp->io_pdir_owner = (readq(hp->ioc_regs+HP_ZX1_IBASE) & 0x1) == 0; if (hp->io_pdir_owner) return hp_zx1_ioc_owner(); return hp_zx1_ioc_shared(); } static int hp_zx1_lba_find_capability (volatile u8 __iomem *hpa, int cap) { u16 status; u8 pos, id; int ttl = 48; status = readw(hpa+PCI_STATUS); if (!(status & PCI_STATUS_CAP_LIST)) return 0; pos = readb(hpa+PCI_CAPABILITY_LIST); while (ttl-- && pos >= 0x40) { pos &= ~3; id = readb(hpa+pos+PCI_CAP_LIST_ID); if (id == 0xff) break; if (id == cap) return pos; pos = readb(hpa+pos+PCI_CAP_LIST_NEXT); } return 0; } static int __init hp_zx1_lba_init (u64 hpa) { struct _hp_private *hp = &hp_private; int cap; hp->lba_regs = ioremap(hpa, 256); if (!hp->lba_regs) return -ENOMEM; hp->lba_cap_offset = hp_zx1_lba_find_capability(hp->lba_regs, PCI_CAP_ID_AGP); cap = readl(hp->lba_regs+hp->lba_cap_offset) & 0xff; if (cap != PCI_CAP_ID_AGP) { printk(KERN_ERR PFX "Invalid capability ID 0x%02x at 0x%x\n", cap, hp->lba_cap_offset); iounmap(hp->lba_regs); return -ENODEV; } return 0; } static int hp_zx1_fetch_size(void) { int size; size = hp_private.gart_size / MB(1); hp_zx1_sizes[0].size = size; agp_bridge->current_size = (void *) &hp_zx1_sizes[0]; return size; } static int hp_zx1_configure (void) { struct _hp_private *hp = &hp_private; agp_bridge->gart_bus_addr = hp->gart_base; agp_bridge->capndx = hp->lba_cap_offset; agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS); if (hp->io_pdir_owner) { writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE); readl(hp->ioc_regs+HP_ZX1_PDIR_BASE); writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG); readl(hp->ioc_regs+HP_ZX1_TCNFG); writel((unsigned int)(~(HP_ZX1_IOVA_SIZE-1)), hp->ioc_regs+HP_ZX1_IMASK); readl(hp->ioc_regs+HP_ZX1_IMASK); writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE); readl(hp->ioc_regs+HP_ZX1_IBASE); writel(hp->iova_base|ilog2(HP_ZX1_IOVA_SIZE), hp->ioc_regs+HP_ZX1_PCOM); readl(hp->ioc_regs+HP_ZX1_PCOM); } return 0; } static void hp_zx1_cleanup (void) { struct _hp_private *hp = &hp_private; if (hp->ioc_regs) { if (hp->io_pdir_owner) { writeq(0, hp->ioc_regs+HP_ZX1_IBASE); readq(hp->ioc_regs+HP_ZX1_IBASE); } iounmap(hp->ioc_regs); } if (hp->lba_regs) iounmap(hp->lba_regs); } static void hp_zx1_tlbflush (struct agp_memory *mem) { struct _hp_private *hp = &hp_private; writeq(hp->gart_base | ilog2(hp->gart_size), hp->ioc_regs+HP_ZX1_PCOM); readq(hp->ioc_regs+HP_ZX1_PCOM); } static int hp_zx1_create_gatt_table (struct agp_bridge_data *bridge) { struct _hp_private *hp = &hp_private; int i; if (hp->io_pdir_owner) { hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL, get_order(hp->io_pdir_size)); if (!hp->io_pdir) { printk(KERN_ERR PFX "Couldn't allocate contiguous " "memory for I/O PDIR\n"); hp->gatt = NULL; hp->gatt_entries = 0; return -ENOMEM; } memset(hp->io_pdir, 0, hp->io_pdir_size); hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; } for (i = 0; i < hp->gatt_entries; i++) { hp->gatt[i] = (unsigned long) agp_bridge->scratch_page; } return 0; } static int hp_zx1_free_gatt_table (struct agp_bridge_data *bridge) { struct _hp_private *hp = &hp_private; if (hp->io_pdir_owner) free_pages((unsigned long) hp->io_pdir, get_order(hp->io_pdir_size)); else hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE; return 0; } static int hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type) { struct _hp_private *hp = &hp_private; int i, k; off_t j, io_pg_start; int io_pg_count; if (type != mem->type || agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) { return -EINVAL; } io_pg_start = hp->io_pages_per_kpage * pg_start; io_pg_count = hp->io_pages_per_kpage * mem->page_count; if ((io_pg_start + io_pg_count) > hp->gatt_entries) { return -EINVAL; } j = io_pg_start; while (j < (io_pg_start + io_pg_count)) { if (hp->gatt[j]) { return -EBUSY; } j++; } if (!mem->is_flushed) { global_cache_flush(); mem->is_flushed = true; } for (i = 0, j = io_pg_start; i < mem->page_count; i++) { unsigned long paddr; paddr = page_to_phys(mem->pages[i]); for (k = 0; k < hp->io_pages_per_kpage; k++, j++, paddr += hp->io_page_size) { hp->gatt[j] = HP_ZX1_PDIR_VALID_BIT | paddr; } } agp_bridge->driver->tlb_flush(mem); return 0; } static int hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type) { struct _hp_private *hp = &hp_private; int i, io_pg_start, io_pg_count; if (type != mem->type || agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) { return -EINVAL; } io_pg_start = hp->io_pages_per_kpage * pg_start; io_pg_count = hp->io_pages_per_kpage * mem->page_count; for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) { hp->gatt[i] = agp_bridge->scratch_page; } agp_bridge->driver->tlb_flush(mem); return 0; } static unsigned long hp_zx1_mask_memory (struct agp_bridge_data *bridge, dma_addr_t addr, int type) { return HP_ZX1_PDIR_VALID_BIT | addr; } static void hp_zx1_enable (struct agp_bridge_data *bridge, u32 mode) { struct _hp_private *hp = &hp_private; u32 command; command = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS); command = agp_collect_device_status(bridge, mode, command); command |= 0x00000100; writel(command, hp->lba_regs+hp->lba_cap_offset+PCI_AGP_COMMAND); agp_device_command(command, (mode & AGP8X_MODE) != 0); } const struct agp_bridge_driver hp_zx1_driver = { .owner = THIS_MODULE, .size_type = FIXED_APER_SIZE, .configure = hp_zx1_configure, .fetch_size = hp_zx1_fetch_size, .cleanup = hp_zx1_cleanup, .tlb_flush = hp_zx1_tlbflush, .mask_memory = hp_zx1_mask_memory, .masks = hp_zx1_masks, .agp_enable = hp_zx1_enable, .cache_flush = global_cache_flush, .create_gatt_table = hp_zx1_create_gatt_table, .free_gatt_table = hp_zx1_free_gatt_table, .insert_memory = hp_zx1_insert_memory, .remove_memory = hp_zx1_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, .cant_use_aperture = true, }; static int __init hp_zx1_setup (u64 ioc_hpa, u64 lba_hpa) { struct agp_bridge_data *bridge; int error = 0; error = hp_zx1_ioc_init(ioc_hpa); if (error) goto fail; error = hp_zx1_lba_init(lba_hpa); if (error) goto fail; bridge = agp_alloc_bridge(); if (!bridge) { error = -ENOMEM; goto fail; } bridge->driver = &hp_zx1_driver; fake_bridge_dev.vendor = PCI_VENDOR_ID_HP; fake_bridge_dev.device = PCI_DEVICE_ID_HP_PCIX_LBA; bridge->dev = &fake_bridge_dev; error = agp_add_bridge(bridge); fail: if (error) hp_zx1_cleanup(); return error; } static acpi_status __init zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret) { acpi_handle handle, parent; acpi_status status; struct acpi_device_info *info; u64 lba_hpa, sba_hpa, length; int match; status = hp_acpi_csr_space(obj, &lba_hpa, &length); if (ACPI_FAILURE(status)) return AE_OK; /* keep looking for another bridge */ /* Look for an enclosing IOC scope and find its CSR space */ handle = obj; do { status = acpi_get_object_info(handle, &info); if (ACPI_SUCCESS(status) && (info->valid & ACPI_VALID_HID)) { /* TBD check _CID also */ match = (strcmp(info->hardware_id.string, "HWP0001") == 0); kfree(info); if (match) { status = hp_acpi_csr_space(handle, &sba_hpa, &length); if (ACPI_SUCCESS(status)) break; else { printk(KERN_ERR PFX "Detected HP ZX1 " "AGP LBA but no IOC.\n"); return AE_OK; } } } status = acpi_get_parent(handle, &parent); handle = parent; } while (ACPI_SUCCESS(status)); if (ACPI_FAILURE(status)) return AE_OK; /* found no enclosing IOC */ if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa)) return AE_OK; printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset " "(ioc=%llx, lba=%llx)\n", (char *)context, sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa); hp_zx1_gart_found = 1; return AE_CTRL_TERMINATE; /* we only support one bridge; quit looking */ } static int __init agp_hp_init (void) { if (agp_off) return -EINVAL; acpi_get_devices("HWP0003", zx1_gart_probe, "HWP0003", NULL); if (hp_zx1_gart_found) return 0; acpi_get_devices("HWP0007", zx1_gart_probe, "HWP0007", NULL); if (hp_zx1_gart_found) return 0; return -ENODEV; } static void __exit agp_hp_cleanup (void) { } module_init(agp_hp_init); module_exit(agp_hp_cleanup); MODULE_LICENSE("GPL and additional rights");
linux-master
drivers/char/agp/hp-agp.c
/* * * 3780i.c -- helper routines for the 3780i DSP * * * Written By: Mike Sullivan IBM Corporation * * Copyright (C) 1999 IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * 10/23/2000 - Alpha Release * First release to the public */ #include <linux/kernel.h> #include <linux/unistd.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/bitops.h> #include <linux/sched.h> /* cond_resched() */ #include <asm/io.h> #include <linux/uaccess.h> #include <asm/irq.h> #include "smapi.h" #include "mwavedd.h" #include "3780i.h" static DEFINE_SPINLOCK(dsp_lock); static void PaceMsaAccess(unsigned short usDspBaseIO) { cond_resched(); udelay(100); cond_resched(); } unsigned short dsp3780I_ReadMsaCfg(unsigned short usDspBaseIO, unsigned long ulMsaAddr) { unsigned long flags; unsigned short val; PRINTK_3(TRACE_3780I, "3780i::dsp3780I_ReadMsaCfg entry usDspBaseIO %x ulMsaAddr %lx\n", usDspBaseIO, ulMsaAddr); spin_lock_irqsave(&dsp_lock, flags); OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulMsaAddr); OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulMsaAddr >> 16)); val = InWordDsp(DSP_MsaDataDSISHigh); spin_unlock_irqrestore(&dsp_lock, flags); PRINTK_2(TRACE_3780I, "3780i::dsp3780I_ReadMsaCfg exit val %x\n", val); return val; } void dsp3780I_WriteMsaCfg(unsigned short usDspBaseIO, unsigned long ulMsaAddr, unsigned short usValue) { unsigned long flags; PRINTK_4(TRACE_3780I, "3780i::dsp3780i_WriteMsaCfg entry usDspBaseIO %x ulMsaAddr %lx usValue %x\n", usDspBaseIO, ulMsaAddr, usValue); spin_lock_irqsave(&dsp_lock, flags); OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulMsaAddr); OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulMsaAddr >> 16)); OutWordDsp(DSP_MsaDataDSISHigh, usValue); spin_unlock_irqrestore(&dsp_lock, flags); } static void dsp3780I_WriteGenCfg(unsigned short usDspBaseIO, unsigned uIndex, unsigned char ucValue) { DSP_ISA_SLAVE_CONTROL rSlaveControl; DSP_ISA_SLAVE_CONTROL rSlaveControl_Save; PRINTK_4(TRACE_3780I, "3780i::dsp3780i_WriteGenCfg entry usDspBaseIO %x uIndex %x ucValue %x\n", usDspBaseIO, uIndex, ucValue); MKBYTE(rSlaveControl) = InByteDsp(DSP_IsaSlaveControl); PRINTK_2(TRACE_3780I, "3780i::dsp3780i_WriteGenCfg rSlaveControl %x\n", MKBYTE(rSlaveControl)); rSlaveControl_Save = rSlaveControl; rSlaveControl.ConfigMode = true; PRINTK_2(TRACE_3780I, "3780i::dsp3780i_WriteGenCfg entry rSlaveControl+ConfigMode %x\n", MKBYTE(rSlaveControl)); OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl)); OutByteDsp(DSP_ConfigAddress, (unsigned char) uIndex); OutByteDsp(DSP_ConfigData, ucValue); OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl_Save)); PRINTK_1(TRACE_3780I, "3780i::dsp3780i_WriteGenCfg exit\n"); } #if 0 unsigned char dsp3780I_ReadGenCfg(unsigned short usDspBaseIO, unsigned uIndex) { DSP_ISA_SLAVE_CONTROL rSlaveControl; DSP_ISA_SLAVE_CONTROL rSlaveControl_Save; unsigned char ucValue; PRINTK_3(TRACE_3780I, "3780i::dsp3780i_ReadGenCfg entry usDspBaseIO %x uIndex %x\n", usDspBaseIO, uIndex); MKBYTE(rSlaveControl) = InByteDsp(DSP_IsaSlaveControl); rSlaveControl_Save = rSlaveControl; rSlaveControl.ConfigMode = true; OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl)); OutByteDsp(DSP_ConfigAddress, (unsigned char) uIndex); ucValue = InByteDsp(DSP_ConfigData); OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl_Save)); PRINTK_2(TRACE_3780I, "3780i::dsp3780i_ReadGenCfg exit ucValue %x\n", ucValue); return ucValue; } #endif /* 0 */ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings, unsigned short *pIrqMap, unsigned short *pDmaMap) { unsigned long flags; unsigned short usDspBaseIO = pSettings->usDspBaseIO; int i; DSP_UART_CFG_1 rUartCfg1; DSP_UART_CFG_2 rUartCfg2; DSP_HBRIDGE_CFG_1 rHBridgeCfg1; DSP_HBRIDGE_CFG_2 rHBridgeCfg2; DSP_BUSMASTER_CFG_1 rBusmasterCfg1; DSP_BUSMASTER_CFG_2 rBusmasterCfg2; DSP_ISA_PROT_CFG rIsaProtCfg; DSP_POWER_MGMT_CFG rPowerMgmtCfg; DSP_HBUS_TIMER_CFG rHBusTimerCfg; DSP_LBUS_TIMEOUT_DISABLE rLBusTimeoutDisable; DSP_CHIP_RESET rChipReset; DSP_CLOCK_CONTROL_1 rClockControl1; DSP_CLOCK_CONTROL_2 rClockControl2; DSP_ISA_SLAVE_CONTROL rSlaveControl; DSP_HBRIDGE_CONTROL rHBridgeControl; unsigned short ChipID = 0; unsigned short tval; PRINTK_2(TRACE_3780I, "3780i::dsp3780I_EnableDSP entry pSettings->bDSPEnabled %x\n", pSettings->bDSPEnabled); if (!pSettings->bDSPEnabled) { PRINTK_ERROR( KERN_ERR "3780i::dsp3780I_EnableDSP: Error: DSP not enabled. Aborting.\n" ); return -EIO; } PRINTK_2(TRACE_3780I, "3780i::dsp3780i_EnableDSP entry pSettings->bModemEnabled %x\n", pSettings->bModemEnabled); if (pSettings->bModemEnabled) { rUartCfg1.Reserved = rUartCfg2.Reserved = 0; rUartCfg1.IrqActiveLow = pSettings->bUartIrqActiveLow; rUartCfg1.IrqPulse = pSettings->bUartIrqPulse; rUartCfg1.Irq = (unsigned char) pIrqMap[pSettings->usUartIrq]; switch (pSettings->usUartBaseIO) { case 0x03F8: rUartCfg1.BaseIO = 0; break; case 0x02F8: rUartCfg1.BaseIO = 1; break; case 0x03E8: rUartCfg1.BaseIO = 2; break; case 0x02E8: rUartCfg1.BaseIO = 3; break; } rUartCfg2.Enable = true; } rHBridgeCfg1.Reserved = rHBridgeCfg2.Reserved = 0; rHBridgeCfg1.IrqActiveLow = pSettings->bDspIrqActiveLow; rHBridgeCfg1.IrqPulse = pSettings->bDspIrqPulse; rHBridgeCfg1.Irq = (unsigned char) pIrqMap[pSettings->usDspIrq]; rHBridgeCfg1.AccessMode = 1; rHBridgeCfg2.Enable = true; rBusmasterCfg2.Reserved = 0; rBusmasterCfg1.Dma = (unsigned char) pDmaMap[pSettings->usDspDma]; rBusmasterCfg1.NumTransfers = (unsigned char) pSettings->usNumTransfers; rBusmasterCfg1.ReRequest = (unsigned char) pSettings->usReRequest; rBusmasterCfg1.MEMCS16 = pSettings->bEnableMEMCS16; rBusmasterCfg2.IsaMemCmdWidth = (unsigned char) pSettings->usIsaMemCmdWidth; rIsaProtCfg.Reserved = 0; rIsaProtCfg.GateIOCHRDY = pSettings->bGateIOCHRDY; rPowerMgmtCfg.Reserved = 0; rPowerMgmtCfg.Enable = pSettings->bEnablePwrMgmt; rHBusTimerCfg.LoadValue = (unsigned char) pSettings->usHBusTimerLoadValue; rLBusTimeoutDisable.Reserved = 0; rLBusTimeoutDisable.DisableTimeout = pSettings->bDisableLBusTimeout; MKWORD(rChipReset) = ~pSettings->usChipletEnable; rClockControl1.Reserved1 = rClockControl1.Reserved2 = 0; rClockControl1.N_Divisor = pSettings->usN_Divisor; rClockControl1.M_Multiplier = pSettings->usM_Multiplier; rClockControl2.Reserved = 0; rClockControl2.PllBypass = pSettings->bPllBypass; /* Issue a soft reset to the chip */ /* Note: Since we may be coming in with 3780i clocks suspended, we must keep * soft-reset active for 10ms. */ rSlaveControl.ClockControl = 0; rSlaveControl.SoftReset = true; rSlaveControl.ConfigMode = false; rSlaveControl.Reserved = 0; PRINTK_4(TRACE_3780I, "3780i::dsp3780i_EnableDSP usDspBaseIO %x index %x taddr %x\n", usDspBaseIO, DSP_IsaSlaveControl, usDspBaseIO + DSP_IsaSlaveControl); PRINTK_2(TRACE_3780I, "3780i::dsp3780i_EnableDSP rSlaveContrl %x\n", MKWORD(rSlaveControl)); spin_lock_irqsave(&dsp_lock, flags); OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl)); MKWORD(tval) = InWordDsp(DSP_IsaSlaveControl); PRINTK_2(TRACE_3780I, "3780i::dsp3780i_EnableDSP rSlaveControl 2 %x\n", tval); for (i = 0; i < 11; i++) udelay(2000); rSlaveControl.SoftReset = false; OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl)); MKWORD(tval) = InWordDsp(DSP_IsaSlaveControl); PRINTK_2(TRACE_3780I, "3780i::dsp3780i_EnableDSP rSlaveControl 3 %x\n", tval); /* Program our general configuration registers */ WriteGenCfg(DSP_HBridgeCfg1Index, MKBYTE(rHBridgeCfg1)); WriteGenCfg(DSP_HBridgeCfg2Index, MKBYTE(rHBridgeCfg2)); WriteGenCfg(DSP_BusMasterCfg1Index, MKBYTE(rBusmasterCfg1)); WriteGenCfg(DSP_BusMasterCfg2Index, MKBYTE(rBusmasterCfg2)); WriteGenCfg(DSP_IsaProtCfgIndex, MKBYTE(rIsaProtCfg)); WriteGenCfg(DSP_PowerMgCfgIndex, MKBYTE(rPowerMgmtCfg)); WriteGenCfg(DSP_HBusTimerCfgIndex, MKBYTE(rHBusTimerCfg)); if (pSettings->bModemEnabled) { WriteGenCfg(DSP_UartCfg1Index, MKBYTE(rUartCfg1)); WriteGenCfg(DSP_UartCfg2Index, MKBYTE(rUartCfg2)); } rHBridgeControl.EnableDspInt = false; rHBridgeControl.MemAutoInc = true; rHBridgeControl.IoAutoInc = false; rHBridgeControl.DiagnosticMode = false; PRINTK_3(TRACE_3780I, "3780i::dsp3780i_EnableDSP DSP_HBridgeControl %x rHBridgeControl %x\n", DSP_HBridgeControl, MKWORD(rHBridgeControl)); OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl)); spin_unlock_irqrestore(&dsp_lock, flags); WriteMsaCfg(DSP_LBusTimeoutDisable, MKWORD(rLBusTimeoutDisable)); WriteMsaCfg(DSP_ClockControl_1, MKWORD(rClockControl1)); WriteMsaCfg(DSP_ClockControl_2, MKWORD(rClockControl2)); WriteMsaCfg(DSP_ChipReset, MKWORD(rChipReset)); ChipID = ReadMsaCfg(DSP_ChipID); PRINTK_2(TRACE_3780I, "3780i::dsp3780I_EnableDSP exiting bRC=true, ChipID %x\n", ChipID); return 0; } int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings) { unsigned long flags; unsigned short usDspBaseIO = pSettings->usDspBaseIO; DSP_ISA_SLAVE_CONTROL rSlaveControl; PRINTK_1(TRACE_3780I, "3780i::dsp3780i_DisableDSP entry\n"); rSlaveControl.ClockControl = 0; rSlaveControl.SoftReset = true; rSlaveControl.ConfigMode = false; rSlaveControl.Reserved = 0; spin_lock_irqsave(&dsp_lock, flags); OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl)); udelay(5); rSlaveControl.ClockControl = 1; OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl)); spin_unlock_irqrestore(&dsp_lock, flags); udelay(5); PRINTK_1(TRACE_3780I, "3780i::dsp3780i_DisableDSP exit\n"); return 0; } int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings) { unsigned long flags; unsigned short usDspBaseIO = pSettings->usDspBaseIO; DSP_BOOT_DOMAIN rBootDomain; DSP_HBRIDGE_CONTROL rHBridgeControl; PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Reset entry\n"); spin_lock_irqsave(&dsp_lock, flags); /* Mask DSP to PC interrupt */ MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl); PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rHBridgeControl %x\n", MKWORD(rHBridgeControl)); rHBridgeControl.EnableDspInt = false; OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl)); spin_unlock_irqrestore(&dsp_lock, flags); /* Reset the core via the boot domain register */ rBootDomain.ResetCore = true; rBootDomain.Halt = true; rBootDomain.NMI = true; rBootDomain.Reserved = 0; PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rBootDomain %x\n", MKWORD(rBootDomain)); WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain)); /* Reset all the chiplets and then reactivate them */ WriteMsaCfg(DSP_ChipReset, 0xFFFF); udelay(5); WriteMsaCfg(DSP_ChipReset, (unsigned short) (~pSettings->usChipletEnable)); PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Reset exit bRC=0\n"); return 0; } int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings) { unsigned long flags; unsigned short usDspBaseIO = pSettings->usDspBaseIO; DSP_BOOT_DOMAIN rBootDomain; DSP_HBRIDGE_CONTROL rHBridgeControl; PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run entry\n"); /* Transition the core to a running state */ rBootDomain.ResetCore = true; rBootDomain.Halt = false; rBootDomain.NMI = true; rBootDomain.Reserved = 0; WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain)); udelay(5); rBootDomain.ResetCore = false; WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain)); udelay(5); rBootDomain.NMI = false; WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain)); udelay(5); /* Enable DSP to PC interrupt */ spin_lock_irqsave(&dsp_lock, flags); MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl); rHBridgeControl.EnableDspInt = true; PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Run rHBridgeControl %x\n", MKWORD(rHBridgeControl)); OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl)); spin_unlock_irqrestore(&dsp_lock, flags); PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run exit bRC=true\n"); return 0; } int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer, unsigned uCount, unsigned long ulDSPAddr) { unsigned long flags; unsigned short __user *pusBuffer = pvBuffer; unsigned short val; PRINTK_5(TRACE_3780I, "3780i::dsp3780I_ReadDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n", usDspBaseIO, pusBuffer, uCount, ulDSPAddr); /* Set the initial MSA address. No adjustments need to be made to data store addresses */ spin_lock_irqsave(&dsp_lock, flags); OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr); OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16)); spin_unlock_irqrestore(&dsp_lock, flags); /* Transfer the memory block */ while (uCount-- != 0) { spin_lock_irqsave(&dsp_lock, flags); val = InWordDsp(DSP_MsaDataDSISHigh); spin_unlock_irqrestore(&dsp_lock, flags); if(put_user(val, pusBuffer++)) return -EFAULT; PRINTK_3(TRACE_3780I, "3780I::dsp3780I_ReadDStore uCount %x val %x\n", uCount, val); PaceMsaAccess(usDspBaseIO); } PRINTK_1(TRACE_3780I, "3780I::dsp3780I_ReadDStore exit bRC=true\n"); return 0; } int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO, void __user *pvBuffer, unsigned uCount, unsigned long ulDSPAddr) { unsigned long flags; unsigned short __user *pusBuffer = pvBuffer; unsigned short val; PRINTK_5(TRACE_3780I, "3780i::dsp3780I_ReadAndDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n", usDspBaseIO, pusBuffer, uCount, ulDSPAddr); /* Set the initial MSA address. No adjustments need to be made to data store addresses */ spin_lock_irqsave(&dsp_lock, flags); OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr); OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16)); spin_unlock_irqrestore(&dsp_lock, flags); /* Transfer the memory block */ while (uCount-- != 0) { spin_lock_irqsave(&dsp_lock, flags); val = InWordDsp(DSP_ReadAndClear); spin_unlock_irqrestore(&dsp_lock, flags); if(put_user(val, pusBuffer++)) return -EFAULT; PRINTK_3(TRACE_3780I, "3780I::dsp3780I_ReadAndCleanDStore uCount %x val %x\n", uCount, val); PaceMsaAccess(usDspBaseIO); } PRINTK_1(TRACE_3780I, "3780I::dsp3780I_ReadAndClearDStore exit bRC=true\n"); return 0; } int dsp3780I_WriteDStore(unsigned short usDspBaseIO, void __user *pvBuffer, unsigned uCount, unsigned long ulDSPAddr) { unsigned long flags; unsigned short __user *pusBuffer = pvBuffer; PRINTK_5(TRACE_3780I, "3780i::dsp3780D_WriteDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n", usDspBaseIO, pusBuffer, uCount, ulDSPAddr); /* Set the initial MSA address. No adjustments need to be made to data store addresses */ spin_lock_irqsave(&dsp_lock, flags); OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr); OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16)); spin_unlock_irqrestore(&dsp_lock, flags); /* Transfer the memory block */ while (uCount-- != 0) { unsigned short val; if(get_user(val, pusBuffer++)) return -EFAULT; spin_lock_irqsave(&dsp_lock, flags); OutWordDsp(DSP_MsaDataDSISHigh, val); spin_unlock_irqrestore(&dsp_lock, flags); PRINTK_3(TRACE_3780I, "3780I::dsp3780I_WriteDStore uCount %x val %x\n", uCount, val); PaceMsaAccess(usDspBaseIO); } PRINTK_1(TRACE_3780I, "3780I::dsp3780D_WriteDStore exit bRC=true\n"); return 0; } int dsp3780I_ReadIStore(unsigned short usDspBaseIO, void __user *pvBuffer, unsigned uCount, unsigned long ulDSPAddr) { unsigned long flags; unsigned short __user *pusBuffer = pvBuffer; PRINTK_5(TRACE_3780I, "3780i::dsp3780I_ReadIStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n", usDspBaseIO, pusBuffer, uCount, ulDSPAddr); /* * Set the initial MSA address. To convert from an instruction store * address to an MSA address * shift the address two bits to the left and set bit 22 */ ulDSPAddr = (ulDSPAddr << 2) | (1 << 22); spin_lock_irqsave(&dsp_lock, flags); OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr); OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16)); spin_unlock_irqrestore(&dsp_lock, flags); /* Transfer the memory block */ while (uCount-- != 0) { unsigned short val_lo, val_hi; spin_lock_irqsave(&dsp_lock, flags); val_lo = InWordDsp(DSP_MsaDataISLow); val_hi = InWordDsp(DSP_MsaDataDSISHigh); spin_unlock_irqrestore(&dsp_lock, flags); if(put_user(val_lo, pusBuffer++)) return -EFAULT; if(put_user(val_hi, pusBuffer++)) return -EFAULT; PRINTK_4(TRACE_3780I, "3780I::dsp3780I_ReadIStore uCount %x val_lo %x val_hi %x\n", uCount, val_lo, val_hi); PaceMsaAccess(usDspBaseIO); } PRINTK_1(TRACE_3780I, "3780I::dsp3780I_ReadIStore exit bRC=true\n"); return 0; } int dsp3780I_WriteIStore(unsigned short usDspBaseIO, void __user *pvBuffer, unsigned uCount, unsigned long ulDSPAddr) { unsigned long flags; unsigned short __user *pusBuffer = pvBuffer; PRINTK_5(TRACE_3780I, "3780i::dsp3780I_WriteIStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n", usDspBaseIO, pusBuffer, uCount, ulDSPAddr); /* * Set the initial MSA address. To convert from an instruction store * address to an MSA address * shift the address two bits to the left and set bit 22 */ ulDSPAddr = (ulDSPAddr << 2) | (1 << 22); spin_lock_irqsave(&dsp_lock, flags); OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr); OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16)); spin_unlock_irqrestore(&dsp_lock, flags); /* Transfer the memory block */ while (uCount-- != 0) { unsigned short val_lo, val_hi; if(get_user(val_lo, pusBuffer++)) return -EFAULT; if(get_user(val_hi, pusBuffer++)) return -EFAULT; spin_lock_irqsave(&dsp_lock, flags); OutWordDsp(DSP_MsaDataISLow, val_lo); OutWordDsp(DSP_MsaDataDSISHigh, val_hi); spin_unlock_irqrestore(&dsp_lock, flags); PRINTK_4(TRACE_3780I, "3780I::dsp3780I_WriteIStore uCount %x val_lo %x val_hi %x\n", uCount, val_lo, val_hi); PaceMsaAccess(usDspBaseIO); } PRINTK_1(TRACE_3780I, "3780I::dsp3780I_WriteIStore exit bRC=true\n"); return 0; } int dsp3780I_GetIPCSource(unsigned short usDspBaseIO, unsigned short *pusIPCSource) { unsigned long flags; DSP_HBRIDGE_CONTROL rHBridgeControl; unsigned short temp; PRINTK_3(TRACE_3780I, "3780i::dsp3780I_GetIPCSource entry usDspBaseIO %x pusIPCSource %p\n", usDspBaseIO, pusIPCSource); /* * Disable DSP to PC interrupts, read the interrupt register, * clear the pending IPC bits, and reenable DSP to PC interrupts */ spin_lock_irqsave(&dsp_lock, flags); MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl); rHBridgeControl.EnableDspInt = false; OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl)); *pusIPCSource = InWordDsp(DSP_Interrupt); temp = (unsigned short) ~(*pusIPCSource); PRINTK_3(TRACE_3780I, "3780i::dsp3780I_GetIPCSource, usIPCSource %x ~ %x\n", *pusIPCSource, temp); OutWordDsp(DSP_Interrupt, (unsigned short) ~(*pusIPCSource)); rHBridgeControl.EnableDspInt = true; OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl)); spin_unlock_irqrestore(&dsp_lock, flags); PRINTK_2(TRACE_3780I, "3780i::dsp3780I_GetIPCSource exit usIPCSource %x\n", *pusIPCSource); return 0; }
linux-master
drivers/char/mwave/3780i.c
/* * * mwavedd.c -- mwave device driver * * * Written By: Mike Sullivan IBM Corporation * * Copyright (C) 1999 IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * 10/23/2000 - Alpha Release * First release to the public */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/major.h> #include <linux/miscdevice.h> #include <linux/device.h> #include <linux/serial.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/serial_8250.h> #include <linux/nospec.h> #include "smapi.h" #include "mwavedd.h" #include "3780i.h" #include "tp3780i.h" MODULE_DESCRIPTION("3780i Advanced Communications Processor (Mwave) driver"); MODULE_AUTHOR("Mike Sullivan and Paul Schroeder"); MODULE_LICENSE("GPL"); /* * These parameters support the setting of MWave resources. Note that no * checks are made against other devices (ie. superio) for conflicts. * We'll depend on users using the tpctl utility to do that for now */ static DEFINE_MUTEX(mwave_mutex); int mwave_debug = 0; int mwave_3780i_irq = 0; int mwave_3780i_io = 0; int mwave_uart_irq = 0; int mwave_uart_io = 0; module_param(mwave_debug, int, 0); module_param_hw(mwave_3780i_irq, int, irq, 0); module_param_hw(mwave_3780i_io, int, ioport, 0); module_param_hw(mwave_uart_irq, int, irq, 0); module_param_hw(mwave_uart_io, int, ioport, 0); static int mwave_open(struct inode *inode, struct file *file); static int mwave_close(struct inode *inode, struct file *file); static long mwave_ioctl(struct file *filp, unsigned int iocmd, unsigned long ioarg); MWAVE_DEVICE_DATA mwave_s_mdd; static int mwave_open(struct inode *inode, struct file *file) { unsigned int retval = 0; PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_open, entry inode %p file %p\n", inode, file); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_open, exit return retval %x\n", retval); return retval; } static int mwave_close(struct inode *inode, struct file *file) { unsigned int retval = 0; PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_close, entry inode %p file %p\n", inode, file); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_close, exit retval %x\n", retval); return retval; } static long mwave_ioctl(struct file *file, unsigned int iocmd, unsigned long ioarg) { unsigned int retval = 0; pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; void __user *arg = (void __user *)ioarg; PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl, entry file %p cmd %x arg %x\n", file, iocmd, (int) ioarg); switch (iocmd) { case IOCTL_MW_RESET: PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RESET" " calling tp3780I_ResetDSP\n"); mutex_lock(&mwave_mutex); retval = tp3780I_ResetDSP(&pDrvData->rBDData); mutex_unlock(&mwave_mutex); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RESET" " retval %x from tp3780I_ResetDSP\n", retval); break; case IOCTL_MW_RUN: PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RUN" " calling tp3780I_StartDSP\n"); mutex_lock(&mwave_mutex); retval = tp3780I_StartDSP(&pDrvData->rBDData); mutex_unlock(&mwave_mutex); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RUN" " retval %x from tp3780I_StartDSP\n", retval); break; case IOCTL_MW_DSP_ABILITIES: { MW_ABILITIES rAbilities; PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_ioctl," " IOCTL_MW_DSP_ABILITIES calling" " tp3780I_QueryAbilities\n"); mutex_lock(&mwave_mutex); retval = tp3780I_QueryAbilities(&pDrvData->rBDData, &rAbilities); mutex_unlock(&mwave_mutex); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES" " retval %x from tp3780I_QueryAbilities\n", retval); if (retval == 0) { if( copy_to_user(arg, &rAbilities, sizeof(MW_ABILITIES)) ) return -EFAULT; } PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES" " exit retval %x\n", retval); } break; case IOCTL_MW_READ_DATA: case IOCTL_MW_READCLEAR_DATA: { MW_READWRITE rReadData; unsigned short __user *pusBuffer = NULL; if( copy_from_user(&rReadData, arg, sizeof(MW_READWRITE)) ) return -EFAULT; pusBuffer = (unsigned short __user *) (rReadData.pBuf); PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_READ_DATA," " size %lx, ioarg %lx pusBuffer %p\n", rReadData.ulDataLength, ioarg, pusBuffer); mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, iocmd, pusBuffer, rReadData.ulDataLength, rReadData.usDspAddress); mutex_unlock(&mwave_mutex); } break; case IOCTL_MW_READ_INST: { MW_READWRITE rReadData; unsigned short __user *pusBuffer = NULL; if( copy_from_user(&rReadData, arg, sizeof(MW_READWRITE)) ) return -EFAULT; pusBuffer = (unsigned short __user *) (rReadData.pBuf); PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_READ_INST," " size %lx, ioarg %lx pusBuffer %p\n", rReadData.ulDataLength / 2, ioarg, pusBuffer); mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, iocmd, pusBuffer, rReadData.ulDataLength / 2, rReadData.usDspAddress); mutex_unlock(&mwave_mutex); } break; case IOCTL_MW_WRITE_DATA: { MW_READWRITE rWriteData; unsigned short __user *pusBuffer = NULL; if( copy_from_user(&rWriteData, arg, sizeof(MW_READWRITE)) ) return -EFAULT; pusBuffer = (unsigned short __user *) (rWriteData.pBuf); PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_WRITE_DATA," " size %lx, ioarg %lx pusBuffer %p\n", rWriteData.ulDataLength, ioarg, pusBuffer); mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, iocmd, pusBuffer, rWriteData.ulDataLength, rWriteData.usDspAddress); mutex_unlock(&mwave_mutex); } break; case IOCTL_MW_WRITE_INST: { MW_READWRITE rWriteData; unsigned short __user *pusBuffer = NULL; if( copy_from_user(&rWriteData, arg, sizeof(MW_READWRITE)) ) return -EFAULT; pusBuffer = (unsigned short __user *)(rWriteData.pBuf); PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_WRITE_INST," " size %lx, ioarg %lx pusBuffer %p\n", rWriteData.ulDataLength, ioarg, pusBuffer); mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspIStore(&pDrvData->rBDData, iocmd, pusBuffer, rWriteData.ulDataLength, rWriteData.usDspAddress); mutex_unlock(&mwave_mutex); } break; case IOCTL_MW_REGISTER_IPC: { unsigned int ipcnum = (unsigned int) ioarg; if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_ioctl:" " IOCTL_MW_REGISTER_IPC:" " Error: Invalid ipcnum %x\n", ipcnum); return -EINVAL; } ipcnum = array_index_nospec(ipcnum, ARRAY_SIZE(pDrvData->IPCs)); PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" " ipcnum %x entry usIntCount %x\n", ipcnum, pDrvData->IPCs[ipcnum].usIntCount); mutex_lock(&mwave_mutex); pDrvData->IPCs[ipcnum].bIsHere = false; pDrvData->IPCs[ipcnum].bIsEnabled = true; mutex_unlock(&mwave_mutex); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" " ipcnum %x exit\n", ipcnum); } break; case IOCTL_MW_GET_IPC: { unsigned int ipcnum = (unsigned int) ioarg; if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_ioctl:" " IOCTL_MW_GET_IPC: Error:" " Invalid ipcnum %x\n", ipcnum); return -EINVAL; } ipcnum = array_index_nospec(ipcnum, ARRAY_SIZE(pDrvData->IPCs)); PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC" " ipcnum %x, usIntCount %x\n", ipcnum, pDrvData->IPCs[ipcnum].usIntCount); mutex_lock(&mwave_mutex); if (pDrvData->IPCs[ipcnum].bIsEnabled == true) { DECLARE_WAITQUEUE(wait, current); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, thread for" " ipc %x going to sleep\n", ipcnum); add_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait); pDrvData->IPCs[ipcnum].bIsHere = true; set_current_state(TASK_INTERRUPTIBLE); /* check whether an event was signalled by */ /* the interrupt handler while we were gone */ if (pDrvData->IPCs[ipcnum].usIntCount == 1) { /* first int has occurred (race condition) */ pDrvData->IPCs[ipcnum].usIntCount = 2; /* first int has been handled */ PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl" " IOCTL_MW_GET_IPC ipcnum %x" " handling first int\n", ipcnum); } else { /* either 1st int has not yet occurred, or we have already handled the first int */ schedule(); if (pDrvData->IPCs[ipcnum].usIntCount == 1) { pDrvData->IPCs[ipcnum].usIntCount = 2; } PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl" " IOCTL_MW_GET_IPC ipcnum %x" " woke up and returning to" " application\n", ipcnum); } pDrvData->IPCs[ipcnum].bIsHere = false; remove_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait); set_current_state(TASK_RUNNING); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC," " returning thread for ipc %x" " processing\n", ipcnum); } mutex_unlock(&mwave_mutex); } break; case IOCTL_MW_UNREGISTER_IPC: { unsigned int ipcnum = (unsigned int) ioarg; PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_UNREGISTER_IPC" " ipcnum %x\n", ipcnum); if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_ioctl:" " IOCTL_MW_UNREGISTER_IPC:" " Error: Invalid ipcnum %x\n", ipcnum); return -EINVAL; } ipcnum = array_index_nospec(ipcnum, ARRAY_SIZE(pDrvData->IPCs)); mutex_lock(&mwave_mutex); if (pDrvData->IPCs[ipcnum].bIsEnabled == true) { pDrvData->IPCs[ipcnum].bIsEnabled = false; if (pDrvData->IPCs[ipcnum].bIsHere == true) { wake_up_interruptible(&pDrvData->IPCs[ipcnum].ipc_wait_queue); } } mutex_unlock(&mwave_mutex); } break; default: return -ENOTTY; } /* switch */ PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, exit retval %x\n", retval); return retval; } static ssize_t mwave_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) { PRINTK_5(TRACE_MWAVE, "mwavedd::mwave_read entry file %p, buf %p, count %zx ppos %p\n", file, buf, count, ppos); return -EINVAL; } static ssize_t mwave_write(struct file *file, const char __user *buf, size_t count, loff_t * ppos) { PRINTK_5(TRACE_MWAVE, "mwavedd::mwave_write entry file %p, buf %p," " count %zx ppos %p\n", file, buf, count, ppos); return -EINVAL; } static int register_serial_portandirq(unsigned int port, int irq) { struct uart_8250_port uart; switch ( port ) { case 0x3f8: case 0x2f8: case 0x3e8: case 0x2e8: /* OK */ break; default: PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::register_serial_portandirq:" " Error: Illegal port %x\n", port ); return -1; } /* switch */ /* port is okay */ switch ( irq ) { case 3: case 4: case 5: case 7: /* OK */ break; default: PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::register_serial_portandirq:" " Error: Illegal irq %x\n", irq ); return -1; } /* switch */ /* irq is okay */ memset(&uart, 0, sizeof(uart)); uart.port.uartclk = 1843200; uart.port.iobase = port; uart.port.irq = irq; uart.port.iotype = UPIO_PORT; uart.port.flags = UPF_SHARE_IRQ; return serial8250_register_8250_port(&uart); } static const struct file_operations mwave_fops = { .owner = THIS_MODULE, .read = mwave_read, .write = mwave_write, .unlocked_ioctl = mwave_ioctl, .open = mwave_open, .release = mwave_close, .llseek = default_llseek, }; static struct miscdevice mwave_misc_dev = { MWAVE_MINOR, "mwave", &mwave_fops }; #if 0 /* totally b0rked */ /* * sysfs support <[email protected]> */ struct device mwave_device; /* Prevent code redundancy, create a macro for mwave_show_* functions. */ #define mwave_show_function(attr_name, format_string, field) \ static ssize_t mwave_show_##attr_name(struct device *dev, struct device_attribute *attr, char *buf) \ { \ DSP_3780I_CONFIG_SETTINGS *pSettings = \ &mwave_s_mdd.rBDData.rDspSettings; \ return sprintf(buf, format_string, pSettings->field); \ } /* All of our attributes are read attributes. */ #define mwave_dev_rd_attr(attr_name, format_string, field) \ mwave_show_function(attr_name, format_string, field) \ static DEVICE_ATTR(attr_name, S_IRUGO, mwave_show_##attr_name, NULL) mwave_dev_rd_attr (3780i_dma, "%i\n", usDspDma); mwave_dev_rd_attr (3780i_irq, "%i\n", usDspIrq); mwave_dev_rd_attr (3780i_io, "%#.4x\n", usDspBaseIO); mwave_dev_rd_attr (uart_irq, "%i\n", usUartIrq); mwave_dev_rd_attr (uart_io, "%#.4x\n", usUartBaseIO); static struct device_attribute * const mwave_dev_attrs[] = { &dev_attr_3780i_dma, &dev_attr_3780i_irq, &dev_attr_3780i_io, &dev_attr_uart_irq, &dev_attr_uart_io, }; #endif /* * mwave_init is called on module load * * mwave_exit is called on module unload * mwave_exit is also used to clean up after an aborted mwave_init */ static void mwave_exit(void) { pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit entry\n"); #if 0 for (i = 0; i < pDrvData->nr_registered_attrs; i++) device_remove_file(&mwave_device, mwave_dev_attrs[i]); pDrvData->nr_registered_attrs = 0; if (pDrvData->device_registered) { device_unregister(&mwave_device); pDrvData->device_registered = false; } #endif if ( pDrvData->sLine >= 0 ) { serial8250_unregister_port(pDrvData->sLine); } if (pDrvData->bMwaveDevRegistered) { misc_deregister(&mwave_misc_dev); } if (pDrvData->bDSPEnabled) { tp3780I_DisableDSP(&pDrvData->rBDData); } if (pDrvData->bResourcesClaimed) { tp3780I_ReleaseResources(&pDrvData->rBDData); } if (pDrvData->bBDInitialized) { tp3780I_Cleanup(&pDrvData->rBDData); } PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit exit\n"); } module_exit(mwave_exit); static int __init mwave_init(void) { int i; int retval = 0; pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_init entry\n"); memset(&mwave_s_mdd, 0, sizeof(MWAVE_DEVICE_DATA)); pDrvData->bBDInitialized = false; pDrvData->bResourcesClaimed = false; pDrvData->bDSPEnabled = false; pDrvData->bDSPReset = false; pDrvData->bMwaveDevRegistered = false; pDrvData->sLine = -1; for (i = 0; i < ARRAY_SIZE(pDrvData->IPCs); i++) { pDrvData->IPCs[i].bIsEnabled = false; pDrvData->IPCs[i].bIsHere = false; pDrvData->IPCs[i].usIntCount = 0; /* no ints received yet */ init_waitqueue_head(&pDrvData->IPCs[i].ipc_wait_queue); } retval = tp3780I_InitializeBoardData(&pDrvData->rBDData); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_init, return from tp3780I_InitializeBoardData" " retval %x\n", retval); if (retval) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_init: Error:" " Failed to initialize board data\n"); goto cleanup_error; } pDrvData->bBDInitialized = true; retval = tp3780I_CalcResources(&pDrvData->rBDData); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_init, return from tp3780I_CalcResources" " retval %x\n", retval); if (retval) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to calculate resources\n"); goto cleanup_error; } retval = tp3780I_ClaimResources(&pDrvData->rBDData); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_init, return from tp3780I_ClaimResources" " retval %x\n", retval); if (retval) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to claim resources\n"); goto cleanup_error; } pDrvData->bResourcesClaimed = true; retval = tp3780I_EnableDSP(&pDrvData->rBDData); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_init, return from tp3780I_EnableDSP" " retval %x\n", retval); if (retval) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to enable DSP\n"); goto cleanup_error; } pDrvData->bDSPEnabled = true; if (misc_register(&mwave_misc_dev) < 0) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to register misc device\n"); goto cleanup_error; } pDrvData->bMwaveDevRegistered = true; pDrvData->sLine = register_serial_portandirq( pDrvData->rBDData.rDspSettings.usUartBaseIO, pDrvData->rBDData.rDspSettings.usUartIrq ); if (pDrvData->sLine < 0) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to register serial driver\n"); goto cleanup_error; } /* uart is registered */ #if 0 /* sysfs */ memset(&mwave_device, 0, sizeof (struct device)); dev_set_name(&mwave_device, "mwave"); if (device_register(&mwave_device)) goto cleanup_error; pDrvData->device_registered = true; for (i = 0; i < ARRAY_SIZE(mwave_dev_attrs); i++) { if(device_create_file(&mwave_device, mwave_dev_attrs[i])) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to create sysfs file %s\n", mwave_dev_attrs[i]->attr.name); goto cleanup_error; } pDrvData->nr_registered_attrs++; } #endif /* SUCCESS! */ return 0; cleanup_error: PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_init: Error:" " Failed to initialize\n"); mwave_exit(); /* clean up */ return -EIO; } module_init(mwave_init);
linux-master
drivers/char/mwave/mwavedd.c
/* * * tp3780i.c -- board driver for 3780i on ThinkPads * * * Written By: Mike Sullivan IBM Corporation * * Copyright (C) 1999 IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * 10/23/2000 - Alpha Release * First release to the public */ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <asm/io.h> #include "smapi.h" #include "mwavedd.h" #include "tp3780i.h" #include "3780i.h" #include "mwavepub.h" static unsigned short s_ausThinkpadIrqToField[16] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x0001, 0x0002, 0x0003, 0xFFFF, 0x0004, 0xFFFF, 0xFFFF, 0x0005, 0x0006, 0xFFFF, 0xFFFF, 0xFFFF, 0x0007 }; static unsigned short s_ausThinkpadDmaToField[8] = { 0x0001, 0x0002, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0003, 0x0004 }; static unsigned short s_numIrqs = 16, s_numDmas = 8; static void EnableSRAM(THINKPAD_BD_DATA * pBDData) { DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; unsigned short usDspBaseIO = pSettings->usDspBaseIO; DSP_GPIO_OUTPUT_DATA_15_8 rGpioOutputData; DSP_GPIO_DRIVER_ENABLE_15_8 rGpioDriverEnable; DSP_GPIO_MODE_15_8 rGpioMode; PRINTK_1(TRACE_TP3780I, "tp3780i::EnableSRAM, entry\n"); MKWORD(rGpioMode) = ReadMsaCfg(DSP_GpioModeControl_15_8); rGpioMode.GpioMode10 = 0; WriteMsaCfg(DSP_GpioModeControl_15_8, MKWORD(rGpioMode)); MKWORD(rGpioDriverEnable) = 0; rGpioDriverEnable.Enable10 = true; rGpioDriverEnable.Mask10 = true; WriteMsaCfg(DSP_GpioDriverEnable_15_8, MKWORD(rGpioDriverEnable)); MKWORD(rGpioOutputData) = 0; rGpioOutputData.Latch10 = 0; rGpioOutputData.Mask10 = true; WriteMsaCfg(DSP_GpioOutputData_15_8, MKWORD(rGpioOutputData)); PRINTK_1(TRACE_TP3780I, "tp3780i::EnableSRAM exit\n"); } static irqreturn_t UartInterrupt(int irq, void *dev_id) { PRINTK_3(TRACE_TP3780I, "tp3780i::UartInterrupt entry irq %x dev_id %p\n", irq, dev_id); return IRQ_HANDLED; } static irqreturn_t DspInterrupt(int irq, void *dev_id) { pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; DSP_3780I_CONFIG_SETTINGS *pSettings = &pDrvData->rBDData.rDspSettings; unsigned short usDspBaseIO = pSettings->usDspBaseIO; unsigned short usIPCSource = 0, usIsolationMask, usPCNum; PRINTK_3(TRACE_TP3780I, "tp3780i::DspInterrupt entry irq %x dev_id %p\n", irq, dev_id); if (dsp3780I_GetIPCSource(usDspBaseIO, &usIPCSource) == 0) { PRINTK_2(TRACE_TP3780I, "tp3780i::DspInterrupt, return from dsp3780i_GetIPCSource, usIPCSource %x\n", usIPCSource); usIsolationMask = 1; for (usPCNum = 1; usPCNum <= 16; usPCNum++) { if (usIPCSource & usIsolationMask) { usIPCSource &= ~usIsolationMask; PRINTK_3(TRACE_TP3780I, "tp3780i::DspInterrupt usPCNum %x usIPCSource %x\n", usPCNum, usIPCSource); if (pDrvData->IPCs[usPCNum - 1].usIntCount == 0) { pDrvData->IPCs[usPCNum - 1].usIntCount = 1; } PRINTK_2(TRACE_TP3780I, "tp3780i::DspInterrupt usIntCount %x\n", pDrvData->IPCs[usPCNum - 1].usIntCount); if (pDrvData->IPCs[usPCNum - 1].bIsEnabled == true) { PRINTK_2(TRACE_TP3780I, "tp3780i::DspInterrupt, waking up usPCNum %x\n", usPCNum - 1); wake_up_interruptible(&pDrvData->IPCs[usPCNum - 1].ipc_wait_queue); } else { PRINTK_2(TRACE_TP3780I, "tp3780i::DspInterrupt, no one waiting for IPC %x\n", usPCNum - 1); } } if (usIPCSource == 0) break; /* try next IPC */ usIsolationMask = usIsolationMask << 1; } } else { PRINTK_1(TRACE_TP3780I, "tp3780i::DspInterrupt, return false from dsp3780i_GetIPCSource\n"); } PRINTK_1(TRACE_TP3780I, "tp3780i::DspInterrupt exit\n"); return IRQ_HANDLED; } int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData) { int retval = 0; DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_InitializeBoardData entry pBDData %p\n", pBDData); pBDData->bDSPEnabled = false; pSettings->bInterruptClaimed = false; retval = smapi_init(); if (retval) { PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_InitializeBoardData: Error: SMAPI is not available on this machine\n"); } else { if (mwave_3780i_irq || mwave_3780i_io || mwave_uart_irq || mwave_uart_io) { retval = smapi_set_DSP_cfg(); } } PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_InitializeBoardData exit retval %x\n", retval); return retval; } void tp3780I_Cleanup(THINKPAD_BD_DATA *pBDData) { PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_Cleanup entry and exit pBDData %p\n", pBDData); } int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData) { SMAPI_DSP_SETTINGS rSmapiInfo; DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_CalcResources entry pBDData %p\n", pBDData); if (smapi_query_DSP_cfg(&rSmapiInfo)) { PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_CalcResources: Error: Could not query DSP config. Aborting.\n"); return -EIO; } /* Sanity check */ if ( ( rSmapiInfo.usDspIRQ == 0 ) || ( rSmapiInfo.usDspBaseIO == 0 ) || ( rSmapiInfo.usUartIRQ == 0 ) || ( rSmapiInfo.usUartBaseIO == 0 ) ) { PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_CalcResources: Error: Illegal resource setting. Aborting.\n"); return -EIO; } pSettings->bDSPEnabled = (rSmapiInfo.bDSPEnabled && rSmapiInfo.bDSPPresent); pSettings->bModemEnabled = rSmapiInfo.bModemEnabled; pSettings->usDspIrq = rSmapiInfo.usDspIRQ; pSettings->usDspDma = rSmapiInfo.usDspDMA; pSettings->usDspBaseIO = rSmapiInfo.usDspBaseIO; pSettings->usUartIrq = rSmapiInfo.usUartIRQ; pSettings->usUartBaseIO = rSmapiInfo.usUartBaseIO; pSettings->uDStoreSize = TP_ABILITIES_DATA_SIZE; pSettings->uIStoreSize = TP_ABILITIES_INST_SIZE; pSettings->uIps = TP_ABILITIES_INTS_PER_SEC; if (pSettings->bDSPEnabled && pSettings->bModemEnabled && pSettings->usDspIrq == pSettings->usUartIrq) { pBDData->bShareDspIrq = pBDData->bShareUartIrq = 1; } else { pBDData->bShareDspIrq = pBDData->bShareUartIrq = 0; } PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_CalcResources exit\n"); return 0; } int tp3780I_ClaimResources(THINKPAD_BD_DATA * pBDData) { int retval = 0; DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; struct resource *pres; PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ClaimResources entry pBDData %p\n", pBDData); pres = request_region(pSettings->usDspBaseIO, 16, "mwave_3780i"); if ( pres == NULL ) retval = -EIO; if (retval) { PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_ClaimResources: Error: Could not claim I/O region starting at %x\n", pSettings->usDspBaseIO); retval = -EIO; } PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ClaimResources exit retval %x\n", retval); return retval; } int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData) { int retval = 0; DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ReleaseResources entry pBDData %p\n", pBDData); release_region(pSettings->usDspBaseIO & (~3), 16); if (pSettings->bInterruptClaimed) { free_irq(pSettings->usDspIrq, NULL); pSettings->bInterruptClaimed = false; } PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ReleaseResources exit retval %x\n", retval); return retval; } int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData) { DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; bool bDSPPoweredUp = false, bInterruptAllocated = false; PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP entry pBDData %p\n", pBDData); if (pBDData->bDSPEnabled) { PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: DSP already enabled!\n"); goto exit_cleanup; } if (!pSettings->bDSPEnabled) { PRINTK_ERROR(KERN_ERR_MWAVE "tp3780::tp3780I_EnableDSP: Error: pSettings->bDSPEnabled not set\n"); goto exit_cleanup; } if ( (pSettings->usDspIrq >= s_numIrqs) || (pSettings->usDspDma >= s_numDmas) || (s_ausThinkpadIrqToField[pSettings->usDspIrq] == 0xFFFF) || (s_ausThinkpadDmaToField[pSettings->usDspDma] == 0xFFFF) ) { PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: invalid irq %x\n", pSettings->usDspIrq); goto exit_cleanup; } if ( ((pSettings->usDspBaseIO & 0xF00F) != 0) || (pSettings->usDspBaseIO & 0x0FF0) == 0 ) { PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Invalid DSP base I/O address %x\n", pSettings->usDspBaseIO); goto exit_cleanup; } if (pSettings->bModemEnabled) { if ( pSettings->usUartIrq >= s_numIrqs || s_ausThinkpadIrqToField[pSettings->usUartIrq] == 0xFFFF ) { PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Invalid UART IRQ %x\n", pSettings->usUartIrq); goto exit_cleanup; } switch (pSettings->usUartBaseIO) { case 0x03F8: case 0x02F8: case 0x03E8: case 0x02E8: break; default: PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Invalid UART base I/O address %x\n", pSettings->usUartBaseIO); goto exit_cleanup; } } pSettings->bDspIrqActiveLow = pSettings->bDspIrqPulse = true; pSettings->bUartIrqActiveLow = pSettings->bUartIrqPulse = true; if (pBDData->bShareDspIrq) { pSettings->bDspIrqActiveLow = false; } if (pBDData->bShareUartIrq) { pSettings->bUartIrqActiveLow = false; } pSettings->usNumTransfers = TP_CFG_NumTransfers; pSettings->usReRequest = TP_CFG_RerequestTimer; pSettings->bEnableMEMCS16 = TP_CFG_MEMCS16; pSettings->usIsaMemCmdWidth = TP_CFG_IsaMemCmdWidth; pSettings->bGateIOCHRDY = TP_CFG_GateIOCHRDY; pSettings->bEnablePwrMgmt = TP_CFG_EnablePwrMgmt; pSettings->usHBusTimerLoadValue = TP_CFG_HBusTimerValue; pSettings->bDisableLBusTimeout = TP_CFG_DisableLBusTimeout; pSettings->usN_Divisor = TP_CFG_N_Divisor; pSettings->usM_Multiplier = TP_CFG_M_Multiplier; pSettings->bPllBypass = TP_CFG_PllBypass; pSettings->usChipletEnable = TP_CFG_ChipletEnable; if (request_irq(pSettings->usUartIrq, &UartInterrupt, 0, "mwave_uart", NULL)) { PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Could not get UART IRQ %x\n", pSettings->usUartIrq); goto exit_cleanup; } else { /* no conflict just release */ free_irq(pSettings->usUartIrq, NULL); } if (request_irq(pSettings->usDspIrq, &DspInterrupt, 0, "mwave_3780i", NULL)) { PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Could not get 3780i IRQ %x\n", pSettings->usDspIrq); goto exit_cleanup; } else { PRINTK_3(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP, got interrupt %x bShareDspIrq %x\n", pSettings->usDspIrq, pBDData->bShareDspIrq); bInterruptAllocated = true; pSettings->bInterruptClaimed = true; } smapi_set_DSP_power_state(false); if (smapi_set_DSP_power_state(true)) { PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: smapi_set_DSP_power_state(true) failed\n"); goto exit_cleanup; } else { bDSPPoweredUp = true; } if (dsp3780I_EnableDSP(pSettings, s_ausThinkpadIrqToField, s_ausThinkpadDmaToField)) { PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: dsp7880I_EnableDSP() failed\n"); goto exit_cleanup; } EnableSRAM(pBDData); pBDData->bDSPEnabled = true; PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP exit\n"); return 0; exit_cleanup: PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Cleaning up\n"); if (bDSPPoweredUp) smapi_set_DSP_power_state(false); if (bInterruptAllocated) { free_irq(pSettings->usDspIrq, NULL); pSettings->bInterruptClaimed = false; } return -EIO; } int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData) { int retval = 0; DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_DisableDSP entry pBDData %p\n", pBDData); if (pBDData->bDSPEnabled) { dsp3780I_DisableDSP(&pBDData->rDspSettings); if (pSettings->bInterruptClaimed) { free_irq(pSettings->usDspIrq, NULL); pSettings->bInterruptClaimed = false; } smapi_set_DSP_power_state(false); pBDData->bDSPEnabled = false; } PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_DisableDSP exit retval %x\n", retval); return retval; } int tp3780I_ResetDSP(THINKPAD_BD_DATA * pBDData) { int retval = 0; DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ResetDSP entry pBDData %p\n", pBDData); if (dsp3780I_Reset(pSettings) == 0) { EnableSRAM(pBDData); } else { retval = -EIO; } PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ResetDSP exit retval %x\n", retval); return retval; } int tp3780I_StartDSP(THINKPAD_BD_DATA * pBDData) { int retval = 0; DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_StartDSP entry pBDData %p\n", pBDData); if (dsp3780I_Run(pSettings) == 0) { // @BUG @TBD EnableSRAM(pBDData); } else { retval = -EIO; } PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_StartDSP exit retval %x\n", retval); return retval; } int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities) { PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData); memset(pAbilities, 0, sizeof(*pAbilities)); /* fill out standard constant fields */ pAbilities->instr_per_sec = pBDData->rDspSettings.uIps; pAbilities->data_size = pBDData->rDspSettings.uDStoreSize; pAbilities->inst_size = pBDData->rDspSettings.uIStoreSize; pAbilities->bus_dma_bw = pBDData->rDspSettings.uDmaBandwidth; /* fill out dynamically determined fields */ pAbilities->component_list[0] = 0x00010000 | MW_ADC_MASK; pAbilities->component_list[1] = 0x00010000 | MW_ACI_MASK; pAbilities->component_list[2] = 0x00010000 | MW_AIC1_MASK; pAbilities->component_list[3] = 0x00010000 | MW_AIC2_MASK; pAbilities->component_list[4] = 0x00010000 | MW_CDDAC_MASK; pAbilities->component_list[5] = 0x00010000 | MW_MIDI_MASK; pAbilities->component_list[6] = 0x00010000 | MW_UART_MASK; pAbilities->component_count = 7; /* Fill out Mwave OS and BIOS task names */ memcpy(pAbilities->mwave_os_name, TP_ABILITIES_MWAVEOS_NAME, sizeof(TP_ABILITIES_MWAVEOS_NAME)); memcpy(pAbilities->bios_task_name, TP_ABILITIES_BIOSTASK_NAME, sizeof(TP_ABILITIES_BIOSTASK_NAME)); PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_QueryAbilities exit retval=SUCCESSFUL\n"); return 0; } int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode, void __user *pvBuffer, unsigned int uCount, unsigned long ulDSPAddr) { int retval = 0; DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; unsigned short usDspBaseIO = pSettings->usDspBaseIO; bool bRC = 0; PRINTK_6(TRACE_TP3780I, "tp3780i::tp3780I_ReadWriteDspDStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n", pBDData, uOpcode, pvBuffer, uCount, ulDSPAddr); if (pBDData->bDSPEnabled) { switch (uOpcode) { case IOCTL_MW_READ_DATA: bRC = dsp3780I_ReadDStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr); break; case IOCTL_MW_READCLEAR_DATA: bRC = dsp3780I_ReadAndClearDStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr); break; case IOCTL_MW_WRITE_DATA: bRC = dsp3780I_WriteDStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr); break; } } retval = (bRC) ? -EIO : 0; PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ReadWriteDspDStore exit retval %x\n", retval); return retval; } int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode, void __user *pvBuffer, unsigned int uCount, unsigned long ulDSPAddr) { int retval = 0; DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings; unsigned short usDspBaseIO = pSettings->usDspBaseIO; bool bRC = 0; PRINTK_6(TRACE_TP3780I, "tp3780i::tp3780I_ReadWriteDspIStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n", pBDData, uOpcode, pvBuffer, uCount, ulDSPAddr); if (pBDData->bDSPEnabled) { switch (uOpcode) { case IOCTL_MW_READ_INST: bRC = dsp3780I_ReadIStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr); break; case IOCTL_MW_WRITE_INST: bRC = dsp3780I_WriteIStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr); break; } } retval = (bRC) ? -EIO : 0; PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ReadWriteDspIStore exit retval %x\n", retval); return retval; }
linux-master
drivers/char/mwave/tp3780i.c
/* * * smapi.c -- SMAPI interface routines * * * Written By: Mike Sullivan IBM Corporation * * Copyright (C) 1999 IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * 10/23/2000 - Alpha Release * First release to the public */ #include <linux/kernel.h> #include <linux/mc146818rtc.h> /* CMOS defines */ #include "smapi.h" #include "mwavedd.h" static unsigned short g_usSmapiPort = 0; static int smapi_request(unsigned short inBX, unsigned short inCX, unsigned short inDI, unsigned short inSI, unsigned short *outAX, unsigned short *outBX, unsigned short *outCX, unsigned short *outDX, unsigned short *outDI, unsigned short *outSI) { unsigned short myoutAX = 2, *pmyoutAX = &myoutAX; unsigned short myoutBX = 3, *pmyoutBX = &myoutBX; unsigned short myoutCX = 4, *pmyoutCX = &myoutCX; unsigned short myoutDX = 5, *pmyoutDX = &myoutDX; unsigned short myoutDI = 6, *pmyoutDI = &myoutDI; unsigned short myoutSI = 7, *pmyoutSI = &myoutSI; unsigned short usSmapiOK = -EIO, *pusSmapiOK = &usSmapiOK; unsigned int inBXCX = (inBX << 16) | inCX; unsigned int inDISI = (inDI << 16) | inSI; int retval = 0; PRINTK_5(TRACE_SMAPI, "inBX %x inCX %x inDI %x inSI %x\n", inBX, inCX, inDI, inSI); __asm__ __volatile__("movw $0x5380,%%ax\n\t" "movl %7,%%ebx\n\t" "shrl $16, %%ebx\n\t" "movw %7,%%cx\n\t" "movl %8,%%edi\n\t" "shrl $16,%%edi\n\t" "movw %8,%%si\n\t" "movw %9,%%dx\n\t" "out %%al,%%dx\n\t" "out %%al,$0x4F\n\t" "cmpb $0x53,%%ah\n\t" "je 2f\n\t" "1:\n\t" "orb %%ah,%%ah\n\t" "jnz 2f\n\t" "movw %%ax,%0\n\t" "movw %%bx,%1\n\t" "movw %%cx,%2\n\t" "movw %%dx,%3\n\t" "movw %%di,%4\n\t" "movw %%si,%5\n\t" "movw $1,%6\n\t" "2:\n\t":"=m"(*(unsigned short *) pmyoutAX), "=m"(*(unsigned short *) pmyoutBX), "=m"(*(unsigned short *) pmyoutCX), "=m"(*(unsigned short *) pmyoutDX), "=m"(*(unsigned short *) pmyoutDI), "=m"(*(unsigned short *) pmyoutSI), "=m"(*(unsigned short *) pusSmapiOK) :"m"(inBXCX), "m"(inDISI), "m"(g_usSmapiPort) :"%eax", "%ebx", "%ecx", "%edx", "%edi", "%esi"); PRINTK_8(TRACE_SMAPI, "myoutAX %x myoutBX %x myoutCX %x myoutDX %x myoutDI %x myoutSI %x usSmapiOK %x\n", myoutAX, myoutBX, myoutCX, myoutDX, myoutDI, myoutSI, usSmapiOK); *outAX = myoutAX; *outBX = myoutBX; *outCX = myoutCX; *outDX = myoutDX; *outDI = myoutDI; *outSI = myoutSI; retval = (usSmapiOK == 1) ? 0 : -EIO; PRINTK_2(TRACE_SMAPI, "smapi::smapi_request exit retval %x\n", retval); return retval; } int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings) { int bRC; unsigned short usAX, usBX, usCX, usDX, usDI, usSI; static const unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 }; static const unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 }; PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg entry\n"); bRC = smapi_request(0x1802, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) { PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Error: Could not get DSP Settings. Aborting.\n"); return bRC; } PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n"); pSettings->bDSPPresent = ((usBX & 0x0100) != 0); pSettings->bDSPEnabled = ((usCX & 0x0001) != 0); pSettings->usDspIRQ = usSI & 0x00FF; pSettings->usDspDMA = (usSI & 0xFF00) >> 8; if ((usDI & 0x00FF) < ARRAY_SIZE(ausDspBases)) { pSettings->usDspBaseIO = ausDspBases[usDI & 0x00FF]; } else { pSettings->usDspBaseIO = 0; } PRINTK_6(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg get DSP Settings bDSPPresent %x bDSPEnabled %x usDspIRQ %x usDspDMA %x usDspBaseIO %x\n", pSettings->bDSPPresent, pSettings->bDSPEnabled, pSettings->usDspIRQ, pSettings->usDspDMA, pSettings->usDspBaseIO); /* check for illegal values */ if ( pSettings->usDspBaseIO == 0 ) PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP base I/O address is 0\n"); if ( pSettings->usDspIRQ == 0 ) PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP IRQ line is 0\n"); bRC = smapi_request(0x1804, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) { PRINTK_ERROR("smapi::smapi_query_DSP_cfg: Error: Could not get DSP modem settings. Aborting.\n"); return bRC; } PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n"); pSettings->bModemEnabled = ((usCX & 0x0001) != 0); pSettings->usUartIRQ = usSI & 0x000F; if (((usSI & 0xFF00) >> 8) < ARRAY_SIZE(ausUartBases)) { pSettings->usUartBaseIO = ausUartBases[(usSI & 0xFF00) >> 8]; } else { pSettings->usUartBaseIO = 0; } PRINTK_4(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg get DSP modem settings bModemEnabled %x usUartIRQ %x usUartBaseIO %x\n", pSettings->bModemEnabled, pSettings->usUartIRQ, pSettings->usUartBaseIO); /* check for illegal values */ if ( pSettings->usUartBaseIO == 0 ) PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART base I/O address is 0\n"); if ( pSettings->usUartIRQ == 0 ) PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART IRQ line is 0\n"); PRINTK_2(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg exit bRC %x\n", bRC); return bRC; } int smapi_set_DSP_cfg(void) { int bRC = -EIO; int i; unsigned short usAX, usBX, usCX, usDX, usDI, usSI; static const unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 }; static const unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 }; static const unsigned short ausDspIrqs[] = { 5, 7, 10, 11, 15 }; static const unsigned short ausUartIrqs[] = { 3, 4 }; unsigned short dspio_index = 0, uartio_index = 0; PRINTK_5(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg entry mwave_3780i_irq %x mwave_3780i_io %x mwave_uart_irq %x mwave_uart_io %x\n", mwave_3780i_irq, mwave_3780i_io, mwave_uart_irq, mwave_uart_io); if (mwave_3780i_io) { for (i = 0; i < ARRAY_SIZE(ausDspBases); i++) { if (mwave_3780i_io == ausDspBases[i]) break; } if (i == ARRAY_SIZE(ausDspBases)) { PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_io address %x. Aborting.\n", mwave_3780i_io); return bRC; } dspio_index = i; } if (mwave_3780i_irq) { for (i = 0; i < ARRAY_SIZE(ausDspIrqs); i++) { if (mwave_3780i_irq == ausDspIrqs[i]) break; } if (i == ARRAY_SIZE(ausDspIrqs)) { PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_irq %x. Aborting.\n", mwave_3780i_irq); return bRC; } } if (mwave_uart_io) { for (i = 0; i < ARRAY_SIZE(ausUartBases); i++) { if (mwave_uart_io == ausUartBases[i]) break; } if (i == ARRAY_SIZE(ausUartBases)) { PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_io address %x. Aborting.\n", mwave_uart_io); return bRC; } uartio_index = i; } if (mwave_uart_irq) { for (i = 0; i < ARRAY_SIZE(ausUartIrqs); i++) { if (mwave_uart_irq == ausUartIrqs[i]) break; } if (i == ARRAY_SIZE(ausUartIrqs)) { PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_irq %x. Aborting.\n", mwave_uart_irq); return bRC; } } if (mwave_uart_irq || mwave_uart_io) { /* Check serial port A */ bRC = smapi_request(0x1402, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; /* bRC == 0 */ if (usBX & 0x0100) { /* serial port A is present */ if (usCX & 1) { /* serial port is enabled */ if ((usSI & 0xFF) == mwave_uart_irq) { #ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); #else PRINTK_3(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); #endif #ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg Disabling conflicting serial port\n"); bRC = smapi_request(0x1403, 0x0100, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1402, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; #else goto exit_conflict; #endif } else { if ((usSI >> 8) == uartio_index) { #ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); #else PRINTK_3(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); #endif #ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg Disabling conflicting serial port A\n"); bRC = smapi_request (0x1403, 0x0100, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request (0x1402, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; #else goto exit_conflict; #endif } } } } /* Check serial port B */ bRC = smapi_request(0x1404, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; /* bRC == 0 */ if (usBX & 0x0100) { /* serial port B is present */ if (usCX & 1) { /* serial port is enabled */ if ((usSI & 0xFF) == mwave_uart_irq) { #ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); #else PRINTK_3(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq); #endif #ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n"); bRC = smapi_request(0x1405, 0x0100, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1404, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; #else goto exit_conflict; #endif } else { if ((usSI >> 8) == uartio_index) { #ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); #else PRINTK_3(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]); #endif #ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_1 (TRACE_SMAPI, "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n"); bRC = smapi_request (0x1405, 0x0100, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request (0x1404, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; #else goto exit_conflict; #endif } } } } /* Check IR port */ bRC = smapi_request(0x1700, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1704, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; /* bRC == 0 */ if ((usCX & 0xff) != 0xff) { /* IR port not disabled */ if ((usCX & 0xff) == mwave_uart_irq) { #ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq); #else PRINTK_3(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq); #endif #ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n"); bRC = smapi_request(0x1701, 0x0100, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1700, 0, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1705, 0x01ff, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1704, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; #else goto exit_conflict; #endif } else { if ((usSI & 0xff) == uartio_index) { #ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]); #else PRINTK_3(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]); #endif #ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n"); bRC = smapi_request(0x1701, 0x0100, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1700, 0, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1705, 0x01ff, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1704, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; #else goto exit_conflict; #endif } } } } bRC = smapi_request(0x1802, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; if (mwave_3780i_io) { usDI = dspio_index; } if (mwave_3780i_irq) { usSI = (usSI & 0xff00) | mwave_3780i_irq; } bRC = smapi_request(0x1803, 0x0101, usDI, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1804, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; if (mwave_uart_io) { usSI = (usSI & 0x00ff) | (uartio_index << 8); } if (mwave_uart_irq) { usSI = (usSI & 0xff00) | mwave_uart_irq; } bRC = smapi_request(0x1805, 0x0101, 0, usSI, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1802, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; bRC = smapi_request(0x1804, 0x0000, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC) goto exit_smapi_request_error; /* normal exit: */ PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg exit\n"); return 0; exit_conflict: /* Message has already been printed */ return -EIO; exit_smapi_request_error: PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg exit on smapi_request error bRC %x\n", bRC); return bRC; } int smapi_set_DSP_power_state(bool bOn) { int bRC; unsigned short usAX, usBX, usCX, usDX, usDI, usSI; unsigned short usPowerFunction; PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state entry bOn %x\n", bOn); usPowerFunction = (bOn) ? 1 : 0; bRC = smapi_request(0x4901, 0x0000, 0, usPowerFunction, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state exit bRC %x\n", bRC); return bRC; } #if 0 static int SmapiQuerySystemID(void) { int bRC = -EIO; unsigned short usAX = 0xffff, usBX = 0xffff, usCX = 0xffff, usDX = 0xffff, usDI = 0xffff, usSI = 0xffff; printk("smapi::SmapiQUerySystemID entry\n"); bRC = smapi_request(0x0000, 0, 0, 0, &usAX, &usBX, &usCX, &usDX, &usDI, &usSI); if (bRC == 0) { printk("AX=%x, BX=%x, CX=%x, DX=%x, DI=%x, SI=%x\n", usAX, usBX, usCX, usDX, usDI, usSI); } else { printk("smapi::SmapiQuerySystemID smapi_request error\n"); } return bRC; } #endif /* 0 */ int smapi_init(void) { int retval = -EIO; unsigned short usSmapiID = 0; unsigned long flags; PRINTK_1(TRACE_SMAPI, "smapi::smapi_init entry\n"); spin_lock_irqsave(&rtc_lock, flags); usSmapiID = CMOS_READ(0x7C); usSmapiID |= (CMOS_READ(0x7D) << 8); spin_unlock_irqrestore(&rtc_lock, flags); PRINTK_2(TRACE_SMAPI, "smapi::smapi_init usSmapiID %x\n", usSmapiID); if (usSmapiID == 0x5349) { spin_lock_irqsave(&rtc_lock, flags); g_usSmapiPort = CMOS_READ(0x7E); g_usSmapiPort |= (CMOS_READ(0x7F) << 8); spin_unlock_irqrestore(&rtc_lock, flags); if (g_usSmapiPort == 0) { PRINTK_ERROR("smapi::smapi_init, ERROR unable to read from SMAPI port\n"); } else { PRINTK_2(TRACE_SMAPI, "smapi::smapi_init, exit true g_usSmapiPort %x\n", g_usSmapiPort); retval = 0; //SmapiQuerySystemID(); } } else { PRINTK_ERROR("smapi::smapi_init, ERROR invalid usSmapiID\n"); retval = -ENXIO; } return retval; }
linux-master
drivers/char/mwave/smapi.c
/***************************************************************************** * * Author: Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE, * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT, * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE. * * (c) Copyright 2003-2008 Xilinx Inc. * All rights reserved. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * *****************************************************************************/ #include "buffer_icap.h" /* Indicates how many bytes will fit in a buffer. (1 BRAM) */ #define XHI_MAX_BUFFER_BYTES 2048 #define XHI_MAX_BUFFER_INTS (XHI_MAX_BUFFER_BYTES >> 2) /* File access and error constants */ #define XHI_DEVICE_READ_ERROR -1 #define XHI_DEVICE_WRITE_ERROR -2 #define XHI_BUFFER_OVERFLOW_ERROR -3 #define XHI_DEVICE_READ 0x1 #define XHI_DEVICE_WRITE 0x0 /* Constants for checking transfer status */ #define XHI_CYCLE_DONE 0 #define XHI_CYCLE_EXECUTING 1 /* buffer_icap register offsets */ /* Size of transfer, read & write */ #define XHI_SIZE_REG_OFFSET 0x800L /* offset into bram, read & write */ #define XHI_BRAM_OFFSET_REG_OFFSET 0x804L /* Read not Configure, direction of transfer. Write only */ #define XHI_RNC_REG_OFFSET 0x808L /* Indicates transfer complete. Read only */ #define XHI_STATUS_REG_OFFSET 0x80CL /* Constants for setting the RNC register */ #define XHI_CONFIGURE 0x0UL #define XHI_READBACK 0x1UL /* Constants for the Done register */ #define XHI_NOT_FINISHED 0x0UL #define XHI_FINISHED 0x1UL #define XHI_BUFFER_START 0 /** * buffer_icap_get_status - Get the contents of the status register. * @drvdata: a pointer to the drvdata. * * The status register contains the ICAP status and the done bit. * * D8 - cfgerr * D7 - dalign * D6 - rip * D5 - in_abort_l * D4 - Always 1 * D3 - Always 1 * D2 - Always 1 * D1 - Always 1 * D0 - Done bit **/ u32 buffer_icap_get_status(struct hwicap_drvdata *drvdata) { return in_be32(drvdata->base_address + XHI_STATUS_REG_OFFSET); } /** * buffer_icap_get_bram - Reads data from the storage buffer bram. * @base_address: contains the base address of the component. * @offset: The word offset from which the data should be read. * * A bram is used as a configuration memory cache. One frame of data can * be stored in this "storage buffer". **/ static inline u32 buffer_icap_get_bram(void __iomem *base_address, u32 offset) { return in_be32(base_address + (offset << 2)); } /** * buffer_icap_busy - Return true if the icap device is busy * @base_address: is the base address of the device * * The queries the low order bit of the status register, which * indicates whether the current configuration or readback operation * has completed. **/ static inline bool buffer_icap_busy(void __iomem *base_address) { u32 status = in_be32(base_address + XHI_STATUS_REG_OFFSET); return (status & 1) == XHI_NOT_FINISHED; } /** * buffer_icap_set_size - Set the size register. * @base_address: is the base address of the device * @data: The size in bytes. * * The size register holds the number of 8 bit bytes to transfer between * bram and the icap (or icap to bram). **/ static inline void buffer_icap_set_size(void __iomem *base_address, u32 data) { out_be32(base_address + XHI_SIZE_REG_OFFSET, data); } /** * buffer_icap_set_offset - Set the bram offset register. * @base_address: contains the base address of the device. * @data: is the value to be written to the data register. * * The bram offset register holds the starting bram address to transfer * data from during configuration or write data to during readback. **/ static inline void buffer_icap_set_offset(void __iomem *base_address, u32 data) { out_be32(base_address + XHI_BRAM_OFFSET_REG_OFFSET, data); } /** * buffer_icap_set_rnc - Set the RNC (Readback not Configure) register. * @base_address: contains the base address of the device. * @data: is the value to be written to the data register. * * The RNC register determines the direction of the data transfer. It * controls whether a configuration or readback take place. Writing to * this register initiates the transfer. A value of 1 initiates a * readback while writing a value of 0 initiates a configuration. **/ static inline void buffer_icap_set_rnc(void __iomem *base_address, u32 data) { out_be32(base_address + XHI_RNC_REG_OFFSET, data); } /** * buffer_icap_set_bram - Write data to the storage buffer bram. * @base_address: contains the base address of the component. * @offset: The word offset at which the data should be written. * @data: The value to be written to the bram offset. * * A bram is used as a configuration memory cache. One frame of data can * be stored in this "storage buffer". **/ static inline void buffer_icap_set_bram(void __iomem *base_address, u32 offset, u32 data) { out_be32(base_address + (offset << 2), data); } /** * buffer_icap_device_read - Transfer bytes from ICAP to the storage buffer. * @drvdata: a pointer to the drvdata. * @offset: The storage buffer start address. * @count: The number of words (32 bit) to read from the * device (ICAP). **/ static int buffer_icap_device_read(struct hwicap_drvdata *drvdata, u32 offset, u32 count) { s32 retries = 0; void __iomem *base_address = drvdata->base_address; if (buffer_icap_busy(base_address)) return -EBUSY; if ((offset + count) > XHI_MAX_BUFFER_INTS) return -EINVAL; /* setSize count*4 to get bytes. */ buffer_icap_set_size(base_address, (count << 2)); buffer_icap_set_offset(base_address, offset); buffer_icap_set_rnc(base_address, XHI_READBACK); while (buffer_icap_busy(base_address)) { retries++; if (retries > XHI_MAX_RETRIES) return -EBUSY; } return 0; }; /** * buffer_icap_device_write - Transfer bytes from ICAP to the storage buffer. * @drvdata: a pointer to the drvdata. * @offset: The storage buffer start address. * @count: The number of words (32 bit) to read from the * device (ICAP). **/ static int buffer_icap_device_write(struct hwicap_drvdata *drvdata, u32 offset, u32 count) { s32 retries = 0; void __iomem *base_address = drvdata->base_address; if (buffer_icap_busy(base_address)) return -EBUSY; if ((offset + count) > XHI_MAX_BUFFER_INTS) return -EINVAL; /* setSize count*4 to get bytes. */ buffer_icap_set_size(base_address, count << 2); buffer_icap_set_offset(base_address, offset); buffer_icap_set_rnc(base_address, XHI_CONFIGURE); while (buffer_icap_busy(base_address)) { retries++; if (retries > XHI_MAX_RETRIES) return -EBUSY; } return 0; }; /** * buffer_icap_reset - Reset the logic of the icap device. * @drvdata: a pointer to the drvdata. * * Writing to the status register resets the ICAP logic in an internal * version of the core. For the version of the core published in EDK, * this is a noop. **/ void buffer_icap_reset(struct hwicap_drvdata *drvdata) { out_be32(drvdata->base_address + XHI_STATUS_REG_OFFSET, 0xFEFE); } /** * buffer_icap_set_configuration - Load a partial bitstream from system memory. * @drvdata: a pointer to the drvdata. * @data: Kernel address of the partial bitstream. * @size: the size of the partial bitstream in 32 bit words. **/ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data, u32 size) { int status; s32 buffer_count = 0; bool dirty = false; u32 i; void __iomem *base_address = drvdata->base_address; /* Loop through all the data */ for (i = 0, buffer_count = 0; i < size; i++) { /* Copy data to bram */ buffer_icap_set_bram(base_address, buffer_count, data[i]); dirty = true; if (buffer_count < XHI_MAX_BUFFER_INTS - 1) { buffer_count++; continue; } /* Write data to ICAP */ status = buffer_icap_device_write( drvdata, XHI_BUFFER_START, XHI_MAX_BUFFER_INTS); if (status != 0) { /* abort. */ buffer_icap_reset(drvdata); return status; } buffer_count = 0; dirty = false; } /* Write unwritten data to ICAP */ if (dirty) { /* Write data to ICAP */ status = buffer_icap_device_write(drvdata, XHI_BUFFER_START, buffer_count); if (status != 0) { /* abort. */ buffer_icap_reset(drvdata); } return status; } return 0; }; /** * buffer_icap_get_configuration - Read configuration data from the device. * @drvdata: a pointer to the drvdata. * @data: Address of the data representing the partial bitstream * @size: the size of the partial bitstream in 32 bit words. **/ int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data, u32 size) { int status; s32 buffer_count = 0; u32 i; void __iomem *base_address = drvdata->base_address; /* Loop through all the data */ for (i = 0, buffer_count = XHI_MAX_BUFFER_INTS; i < size; i++) { if (buffer_count == XHI_MAX_BUFFER_INTS) { u32 words_remaining = size - i; u32 words_to_read = words_remaining < XHI_MAX_BUFFER_INTS ? words_remaining : XHI_MAX_BUFFER_INTS; /* Read data from ICAP */ status = buffer_icap_device_read( drvdata, XHI_BUFFER_START, words_to_read); if (status != 0) { /* abort. */ buffer_icap_reset(drvdata); return status; } buffer_count = 0; } /* Copy data from bram */ data[i] = buffer_icap_get_bram(base_address, buffer_count); buffer_count++; } return 0; };
linux-master
drivers/char/xilinx_hwicap/buffer_icap.c
/***************************************************************************** * * Author: Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE, * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT, * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE. * * (c) Copyright 2007-2008 Xilinx Inc. * All rights reserved. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * *****************************************************************************/ #include "fifo_icap.h" /* Register offsets for the XHwIcap device. */ #define XHI_GIER_OFFSET 0x1C /* Device Global Interrupt Enable Reg */ #define XHI_IPISR_OFFSET 0x20 /* Interrupt Status Register */ #define XHI_IPIER_OFFSET 0x28 /* Interrupt Enable Register */ #define XHI_WF_OFFSET 0x100 /* Write FIFO */ #define XHI_RF_OFFSET 0x104 /* Read FIFO */ #define XHI_SZ_OFFSET 0x108 /* Size Register */ #define XHI_CR_OFFSET 0x10C /* Control Register */ #define XHI_SR_OFFSET 0x110 /* Status Register */ #define XHI_WFV_OFFSET 0x114 /* Write FIFO Vacancy Register */ #define XHI_RFO_OFFSET 0x118 /* Read FIFO Occupancy Register */ /* Device Global Interrupt Enable Register (GIER) bit definitions */ #define XHI_GIER_GIE_MASK 0x80000000 /* Global Interrupt enable Mask */ /** * HwIcap Device Interrupt Status/Enable Registers * * Interrupt Status Register (IPISR) : This register holds the * interrupt status flags for the device. These bits are toggle on * write. * * Interrupt Enable Register (IPIER) : This register is used to enable * interrupt sources for the device. * Writing a '1' to a bit enables the corresponding interrupt. * Writing a '0' to a bit disables the corresponding interrupt. * * IPISR/IPIER registers have the same bit definitions and are only defined * once. */ #define XHI_IPIXR_RFULL_MASK 0x00000008 /* Read FIFO Full */ #define XHI_IPIXR_WEMPTY_MASK 0x00000004 /* Write FIFO Empty */ #define XHI_IPIXR_RDP_MASK 0x00000002 /* Read FIFO half full */ #define XHI_IPIXR_WRP_MASK 0x00000001 /* Write FIFO half full */ #define XHI_IPIXR_ALL_MASK 0x0000000F /* Mask of all interrupts */ /* Control Register (CR) */ #define XHI_CR_SW_RESET_MASK 0x00000008 /* SW Reset Mask */ #define XHI_CR_FIFO_CLR_MASK 0x00000004 /* FIFO Clear Mask */ #define XHI_CR_READ_MASK 0x00000002 /* Read from ICAP to FIFO */ #define XHI_CR_WRITE_MASK 0x00000001 /* Write from FIFO to ICAP */ #define XHI_WFO_MAX_VACANCY 1024 /* Max Write FIFO Vacancy, in words */ #define XHI_RFO_MAX_OCCUPANCY 256 /* Max Read FIFO Occupancy, in words */ /* The maximum amount we can request from fifo_icap_get_configuration at once, in bytes. */ #define XHI_MAX_READ_TRANSACTION_WORDS 0xFFF /** * fifo_icap_fifo_write - Write data to the write FIFO. * @drvdata: a pointer to the drvdata. * @data: the 32-bit value to be written to the FIFO. * * This function will silently fail if the fifo is full. **/ static inline void fifo_icap_fifo_write(struct hwicap_drvdata *drvdata, u32 data) { dev_dbg(drvdata->dev, "fifo_write: %x\n", data); out_be32(drvdata->base_address + XHI_WF_OFFSET, data); } /** * fifo_icap_fifo_read - Read data from the Read FIFO. * @drvdata: a pointer to the drvdata. * * This function will silently fail if the fifo is empty. **/ static inline u32 fifo_icap_fifo_read(struct hwicap_drvdata *drvdata) { u32 data = in_be32(drvdata->base_address + XHI_RF_OFFSET); dev_dbg(drvdata->dev, "fifo_read: %x\n", data); return data; } /** * fifo_icap_set_read_size - Set the size register. * @drvdata: a pointer to the drvdata. * @data: the size of the following read transaction, in words. **/ static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata, u32 data) { out_be32(drvdata->base_address + XHI_SZ_OFFSET, data); } /** * fifo_icap_start_config - Initiate a configuration (write) to the device. * @drvdata: a pointer to the drvdata. **/ static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata) { out_be32(drvdata->base_address + XHI_CR_OFFSET, XHI_CR_WRITE_MASK); dev_dbg(drvdata->dev, "configuration started\n"); } /** * fifo_icap_start_readback - Initiate a readback from the device. * @drvdata: a pointer to the drvdata. **/ static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata) { out_be32(drvdata->base_address + XHI_CR_OFFSET, XHI_CR_READ_MASK); dev_dbg(drvdata->dev, "readback started\n"); } /** * fifo_icap_get_status - Get the contents of the status register. * @drvdata: a pointer to the drvdata. * * The status register contains the ICAP status and the done bit. * * D8 - cfgerr * D7 - dalign * D6 - rip * D5 - in_abort_l * D4 - Always 1 * D3 - Always 1 * D2 - Always 1 * D1 - Always 1 * D0 - Done bit **/ u32 fifo_icap_get_status(struct hwicap_drvdata *drvdata) { u32 status = in_be32(drvdata->base_address + XHI_SR_OFFSET); dev_dbg(drvdata->dev, "Getting status = %x\n", status); return status; } /** * fifo_icap_busy - Return true if the ICAP is still processing a transaction. * @drvdata: a pointer to the drvdata. **/ static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata) { u32 status = in_be32(drvdata->base_address + XHI_SR_OFFSET); return (status & XHI_SR_DONE_MASK) ? 0 : 1; } /** * fifo_icap_write_fifo_vacancy - Query the write fifo available space. * @drvdata: a pointer to the drvdata. * * Return the number of words that can be safely pushed into the write fifo. **/ static inline u32 fifo_icap_write_fifo_vacancy( struct hwicap_drvdata *drvdata) { return in_be32(drvdata->base_address + XHI_WFV_OFFSET); } /** * fifo_icap_read_fifo_occupancy - Query the read fifo available data. * @drvdata: a pointer to the drvdata. * * Return the number of words that can be safely read from the read fifo. **/ static inline u32 fifo_icap_read_fifo_occupancy( struct hwicap_drvdata *drvdata) { return in_be32(drvdata->base_address + XHI_RFO_OFFSET); } /** * fifo_icap_set_configuration - Send configuration data to the ICAP. * @drvdata: a pointer to the drvdata. * @frame_buffer: a pointer to the data to be written to the * ICAP device. * @num_words: the number of words (32 bit) to write to the ICAP * device. * This function writes the given user data to the Write FIFO in * polled mode and starts the transfer of the data to * the ICAP device. **/ int fifo_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *frame_buffer, u32 num_words) { u32 write_fifo_vacancy = 0; u32 retries = 0; u32 remaining_words; dev_dbg(drvdata->dev, "fifo_set_configuration\n"); /* * Check if the ICAP device is Busy with the last Read/Write */ if (fifo_icap_busy(drvdata)) return -EBUSY; /* * Set up the buffer pointer and the words to be transferred. */ remaining_words = num_words; while (remaining_words > 0) { /* * Wait until we have some data in the fifo. */ while (write_fifo_vacancy == 0) { write_fifo_vacancy = fifo_icap_write_fifo_vacancy(drvdata); retries++; if (retries > XHI_MAX_RETRIES) return -EIO; } /* * Write data into the Write FIFO. */ while ((write_fifo_vacancy != 0) && (remaining_words > 0)) { fifo_icap_fifo_write(drvdata, *frame_buffer); remaining_words--; write_fifo_vacancy--; frame_buffer++; } /* Start pushing whatever is in the FIFO into the ICAP. */ fifo_icap_start_config(drvdata); } /* Wait until the write has finished. */ while (fifo_icap_busy(drvdata)) { retries++; if (retries > XHI_MAX_RETRIES) break; } dev_dbg(drvdata->dev, "done fifo_set_configuration\n"); /* * If the requested number of words have not been read from * the device then indicate failure. */ if (remaining_words != 0) return -EIO; return 0; } /** * fifo_icap_get_configuration - Read configuration data from the device. * @drvdata: a pointer to the drvdata. * @data: Address of the data representing the partial bitstream * @size: the size of the partial bitstream in 32 bit words. * * This function reads the specified number of words from the ICAP device in * the polled mode. */ int fifo_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *frame_buffer, u32 num_words) { u32 read_fifo_occupancy = 0; u32 retries = 0; u32 *data = frame_buffer; u32 remaining_words; u32 words_to_read; dev_dbg(drvdata->dev, "fifo_get_configuration\n"); /* * Check if the ICAP device is Busy with the last Write/Read */ if (fifo_icap_busy(drvdata)) return -EBUSY; remaining_words = num_words; while (remaining_words > 0) { words_to_read = remaining_words; /* The hardware has a limit on the number of words that can be read at one time. */ if (words_to_read > XHI_MAX_READ_TRANSACTION_WORDS) words_to_read = XHI_MAX_READ_TRANSACTION_WORDS; remaining_words -= words_to_read; fifo_icap_set_read_size(drvdata, words_to_read); fifo_icap_start_readback(drvdata); while (words_to_read > 0) { /* Wait until we have some data in the fifo. */ while (read_fifo_occupancy == 0) { read_fifo_occupancy = fifo_icap_read_fifo_occupancy(drvdata); retries++; if (retries > XHI_MAX_RETRIES) return -EIO; } if (read_fifo_occupancy > words_to_read) read_fifo_occupancy = words_to_read; words_to_read -= read_fifo_occupancy; /* Read the data from the Read FIFO. */ while (read_fifo_occupancy != 0) { *data++ = fifo_icap_fifo_read(drvdata); read_fifo_occupancy--; } } } dev_dbg(drvdata->dev, "done fifo_get_configuration\n"); return 0; } /** * buffer_icap_reset - Reset the logic of the icap device. * @drvdata: a pointer to the drvdata. * * This function forces the software reset of the complete HWICAP device. * All the registers will return to the default value and the FIFO is also * flushed as a part of this software reset. */ void fifo_icap_reset(struct hwicap_drvdata *drvdata) { u32 reg_data; /* * Reset the device by setting/clearing the RESET bit in the * Control Register. */ reg_data = in_be32(drvdata->base_address + XHI_CR_OFFSET); out_be32(drvdata->base_address + XHI_CR_OFFSET, reg_data | XHI_CR_SW_RESET_MASK); out_be32(drvdata->base_address + XHI_CR_OFFSET, reg_data & (~XHI_CR_SW_RESET_MASK)); } /** * fifo_icap_flush_fifo - This function flushes the FIFOs in the device. * @drvdata: a pointer to the drvdata. */ void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata) { u32 reg_data; /* * Flush the FIFO by setting/clearing the FIFO Clear bit in the * Control Register. */ reg_data = in_be32(drvdata->base_address + XHI_CR_OFFSET); out_be32(drvdata->base_address + XHI_CR_OFFSET, reg_data | XHI_CR_FIFO_CLR_MASK); out_be32(drvdata->base_address + XHI_CR_OFFSET, reg_data & (~XHI_CR_FIFO_CLR_MASK)); }
linux-master
drivers/char/xilinx_hwicap/fifo_icap.c
/***************************************************************************** * * Author: Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE, * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT, * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE. * * (c) Copyright 2002 Xilinx Inc., Systems Engineering Group * (c) Copyright 2004 Xilinx Inc., Systems Engineering Group * (c) Copyright 2007-2008 Xilinx Inc. * All rights reserved. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * *****************************************************************************/ /* * This is the code behind /dev/icap* -- it allows a user-space * application to use the Xilinx ICAP subsystem. * * The following operations are possible: * * open open the port and initialize for access. * release release port * write Write a bitstream to the configuration processor. * read Read a data stream from the configuration processor. * * After being opened, the port is initialized and accessed to avoid a * corrupted first read which may occur with some hardware. The port * is left in a desynched state, requiring that a synch sequence be * transmitted before any valid configuration data. A user will have * exclusive access to the device while it remains open, and the state * of the ICAP cannot be guaranteed after the device is closed. Note * that a complete reset of the core and the state of the ICAP cannot * be performed on many versions of the cores, hence users of this * device should avoid making inconsistent accesses to the device. In * particular, accessing the read interface, without first generating * a write containing a readback packet can leave the ICAP in an * inaccessible state. * * Note that in order to use the read interface, it is first necessary * to write a request packet to the write interface. i.e., it is not * possible to simply readback the bitstream (or any configuration * bits) from a device without specifically requesting them first. * The code to craft such packets is intended to be part of the * user-space application code that uses this device. The simplest * way to use this interface is simply: * * cp foo.bit /dev/icap0 * * Note that unless foo.bit is an appropriately constructed partial * bitstream, this has a high likelihood of overwriting the design * currently programmed in the FPGA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/mutex.h> #include <linux/sysctl.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/uaccess.h> #ifdef CONFIG_OF /* For open firmware. */ #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_platform.h> #endif #include "xilinx_hwicap.h" #include "buffer_icap.h" #include "fifo_icap.h" #define DRIVER_NAME "icap" #define HWICAP_REGS (0x10000) #define XHWICAP_MAJOR 259 #define XHWICAP_MINOR 0 #define HWICAP_DEVICES 1 /* An array, which is set to true when the device is registered. */ static DEFINE_MUTEX(hwicap_mutex); static bool probed_devices[HWICAP_DEVICES]; static struct mutex icap_sem; static const struct class icap_class = { .name = "xilinx_config", }; #define UNIMPLEMENTED 0xFFFF static const struct config_registers v2_config_registers = { .CRC = 0, .FAR = 1, .FDRI = 2, .FDRO = 3, .CMD = 4, .CTL = 5, .MASK = 6, .STAT = 7, .LOUT = 8, .COR = 9, .MFWR = 10, .FLR = 11, .KEY = 12, .CBC = 13, .IDCODE = 14, .AXSS = UNIMPLEMENTED, .C0R_1 = UNIMPLEMENTED, .CSOB = UNIMPLEMENTED, .WBSTAR = UNIMPLEMENTED, .TIMER = UNIMPLEMENTED, .BOOTSTS = UNIMPLEMENTED, .CTL_1 = UNIMPLEMENTED, }; static const struct config_registers v4_config_registers = { .CRC = 0, .FAR = 1, .FDRI = 2, .FDRO = 3, .CMD = 4, .CTL = 5, .MASK = 6, .STAT = 7, .LOUT = 8, .COR = 9, .MFWR = 10, .FLR = UNIMPLEMENTED, .KEY = UNIMPLEMENTED, .CBC = 11, .IDCODE = 12, .AXSS = 13, .C0R_1 = UNIMPLEMENTED, .CSOB = UNIMPLEMENTED, .WBSTAR = UNIMPLEMENTED, .TIMER = UNIMPLEMENTED, .BOOTSTS = UNIMPLEMENTED, .CTL_1 = UNIMPLEMENTED, }; static const struct config_registers v5_config_registers = { .CRC = 0, .FAR = 1, .FDRI = 2, .FDRO = 3, .CMD = 4, .CTL = 5, .MASK = 6, .STAT = 7, .LOUT = 8, .COR = 9, .MFWR = 10, .FLR = UNIMPLEMENTED, .KEY = UNIMPLEMENTED, .CBC = 11, .IDCODE = 12, .AXSS = 13, .C0R_1 = 14, .CSOB = 15, .WBSTAR = 16, .TIMER = 17, .BOOTSTS = 18, .CTL_1 = 19, }; static const struct config_registers v6_config_registers = { .CRC = 0, .FAR = 1, .FDRI = 2, .FDRO = 3, .CMD = 4, .CTL = 5, .MASK = 6, .STAT = 7, .LOUT = 8, .COR = 9, .MFWR = 10, .FLR = UNIMPLEMENTED, .KEY = UNIMPLEMENTED, .CBC = 11, .IDCODE = 12, .AXSS = 13, .C0R_1 = 14, .CSOB = 15, .WBSTAR = 16, .TIMER = 17, .BOOTSTS = 22, .CTL_1 = 24, }; /** * hwicap_command_desync - Send a DESYNC command to the ICAP port. * @drvdata: a pointer to the drvdata. * * Returns: '0' on success and failure value on error * * This command desynchronizes the ICAP After this command, a * bitstream containing a NULL packet, followed by a SYNCH packet is * required before the ICAP will recognize commands. */ static int hwicap_command_desync(struct hwicap_drvdata *drvdata) { u32 buffer[4]; u32 index = 0; /* * Create the data to be written to the ICAP. */ buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1; buffer[index++] = XHI_CMD_DESYNCH; buffer[index++] = XHI_NOOP_PACKET; buffer[index++] = XHI_NOOP_PACKET; /* * Write the data to the FIFO and initiate the transfer of data present * in the FIFO to the ICAP device. */ return drvdata->config->set_configuration(drvdata, &buffer[0], index); } /** * hwicap_get_configuration_register - Query a configuration register. * @drvdata: a pointer to the drvdata. * @reg: a constant which represents the configuration * register value to be returned. * Examples: XHI_IDCODE, XHI_FLR. * @reg_data: returns the value of the register. * * Returns: '0' on success and failure value on error * * Sends a query packet to the ICAP and then receives the response. * The icap is left in Synched state. */ static int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, u32 reg, u32 *reg_data) { int status; u32 buffer[6]; u32 index = 0; /* * Create the data to be written to the ICAP. */ buffer[index++] = XHI_DUMMY_PACKET; buffer[index++] = XHI_NOOP_PACKET; buffer[index++] = XHI_SYNC_PACKET; buffer[index++] = XHI_NOOP_PACKET; buffer[index++] = XHI_NOOP_PACKET; /* * Write the data to the FIFO and initiate the transfer of data present * in the FIFO to the ICAP device. */ status = drvdata->config->set_configuration(drvdata, &buffer[0], index); if (status) return status; /* If the syncword was not found, then we need to start over. */ status = drvdata->config->get_status(drvdata); if ((status & XHI_SR_DALIGN_MASK) != XHI_SR_DALIGN_MASK) return -EIO; index = 0; buffer[index++] = hwicap_type_1_read(reg) | 1; buffer[index++] = XHI_NOOP_PACKET; buffer[index++] = XHI_NOOP_PACKET; /* * Write the data to the FIFO and initiate the transfer of data present * in the FIFO to the ICAP device. */ status = drvdata->config->set_configuration(drvdata, &buffer[0], index); if (status) return status; /* * Read the configuration register */ status = drvdata->config->get_configuration(drvdata, reg_data, 1); if (status) return status; return 0; } static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) { int status; u32 idcode; dev_dbg(drvdata->dev, "initializing\n"); /* Abort any current transaction, to make sure we have the * ICAP in a good state. */ dev_dbg(drvdata->dev, "Reset...\n"); drvdata->config->reset(drvdata); dev_dbg(drvdata->dev, "Desync...\n"); status = hwicap_command_desync(drvdata); if (status) return status; /* Attempt to read the IDCODE from ICAP. This * may not be returned correctly, due to the design of the * hardware. */ dev_dbg(drvdata->dev, "Reading IDCODE...\n"); status = hwicap_get_configuration_register( drvdata, drvdata->config_regs->IDCODE, &idcode); dev_dbg(drvdata->dev, "IDCODE = %x\n", idcode); if (status) return status; dev_dbg(drvdata->dev, "Desync...\n"); status = hwicap_command_desync(drvdata); if (status) return status; return 0; } static ssize_t hwicap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct hwicap_drvdata *drvdata = file->private_data; ssize_t bytes_to_read = 0; u32 *kbuf; u32 words; u32 bytes_remaining; int status; status = mutex_lock_interruptible(&drvdata->sem); if (status) return status; if (drvdata->read_buffer_in_use) { /* If there are leftover bytes in the buffer, just */ /* return them and don't try to read more from the */ /* ICAP device. */ bytes_to_read = (count < drvdata->read_buffer_in_use) ? count : drvdata->read_buffer_in_use; /* Return the data currently in the read buffer. */ if (copy_to_user(buf, drvdata->read_buffer, bytes_to_read)) { status = -EFAULT; goto error; } drvdata->read_buffer_in_use -= bytes_to_read; memmove(drvdata->read_buffer, drvdata->read_buffer + bytes_to_read, 4 - bytes_to_read); } else { /* Get new data from the ICAP, and return what was requested. */ kbuf = (u32 *) get_zeroed_page(GFP_KERNEL); if (!kbuf) { status = -ENOMEM; goto error; } /* The ICAP device is only able to read complete */ /* words. If a number of bytes that do not correspond */ /* to complete words is requested, then we read enough */ /* words to get the required number of bytes, and then */ /* save the remaining bytes for the next read. */ /* Determine the number of words to read, rounding up */ /* if necessary. */ words = ((count + 3) >> 2); bytes_to_read = words << 2; if (bytes_to_read > PAGE_SIZE) bytes_to_read = PAGE_SIZE; /* Ensure we only read a complete number of words. */ bytes_remaining = bytes_to_read & 3; bytes_to_read &= ~3; words = bytes_to_read >> 2; status = drvdata->config->get_configuration(drvdata, kbuf, words); /* If we didn't read correctly, then bail out. */ if (status) { free_page((unsigned long)kbuf); goto error; } /* If we fail to return the data to the user, then bail out. */ if (copy_to_user(buf, kbuf, bytes_to_read)) { free_page((unsigned long)kbuf); status = -EFAULT; goto error; } memcpy(drvdata->read_buffer, kbuf, bytes_remaining); drvdata->read_buffer_in_use = bytes_remaining; free_page((unsigned long)kbuf); } status = bytes_to_read; error: mutex_unlock(&drvdata->sem); return status; } static ssize_t hwicap_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct hwicap_drvdata *drvdata = file->private_data; ssize_t written = 0; ssize_t left = count; u32 *kbuf; ssize_t len; ssize_t status; status = mutex_lock_interruptible(&drvdata->sem); if (status) return status; left += drvdata->write_buffer_in_use; /* Only write multiples of 4 bytes. */ if (left < 4) { status = 0; goto error; } kbuf = (u32 *) __get_free_page(GFP_KERNEL); if (!kbuf) { status = -ENOMEM; goto error; } while (left > 3) { /* only write multiples of 4 bytes, so there might */ /* be as many as 3 bytes left (at the end). */ len = left; if (len > PAGE_SIZE) len = PAGE_SIZE; len &= ~3; if (drvdata->write_buffer_in_use) { memcpy(kbuf, drvdata->write_buffer, drvdata->write_buffer_in_use); if (copy_from_user( (((char *)kbuf) + drvdata->write_buffer_in_use), buf + written, len - (drvdata->write_buffer_in_use))) { free_page((unsigned long)kbuf); status = -EFAULT; goto error; } } else { if (copy_from_user(kbuf, buf + written, len)) { free_page((unsigned long)kbuf); status = -EFAULT; goto error; } } status = drvdata->config->set_configuration(drvdata, kbuf, len >> 2); if (status) { free_page((unsigned long)kbuf); status = -EFAULT; goto error; } if (drvdata->write_buffer_in_use) { len -= drvdata->write_buffer_in_use; left -= drvdata->write_buffer_in_use; drvdata->write_buffer_in_use = 0; } written += len; left -= len; } if ((left > 0) && (left < 4)) { if (!copy_from_user(drvdata->write_buffer, buf + written, left)) { drvdata->write_buffer_in_use = left; written += left; left = 0; } } free_page((unsigned long)kbuf); status = written; error: mutex_unlock(&drvdata->sem); return status; } static int hwicap_open(struct inode *inode, struct file *file) { struct hwicap_drvdata *drvdata; int status; mutex_lock(&hwicap_mutex); drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev); status = mutex_lock_interruptible(&drvdata->sem); if (status) goto out; if (drvdata->is_open) { status = -EBUSY; goto error; } status = hwicap_initialize_hwicap(drvdata); if (status) { dev_err(drvdata->dev, "Failed to open file"); goto error; } file->private_data = drvdata; drvdata->write_buffer_in_use = 0; drvdata->read_buffer_in_use = 0; drvdata->is_open = 1; error: mutex_unlock(&drvdata->sem); out: mutex_unlock(&hwicap_mutex); return status; } static int hwicap_release(struct inode *inode, struct file *file) { struct hwicap_drvdata *drvdata = file->private_data; int i; int status = 0; mutex_lock(&drvdata->sem); if (drvdata->write_buffer_in_use) { /* Flush write buffer. */ for (i = drvdata->write_buffer_in_use; i < 4; i++) drvdata->write_buffer[i] = 0; status = drvdata->config->set_configuration(drvdata, (u32 *) drvdata->write_buffer, 1); if (status) goto error; } status = hwicap_command_desync(drvdata); if (status) goto error; error: drvdata->is_open = 0; mutex_unlock(&drvdata->sem); return status; } static const struct file_operations hwicap_fops = { .owner = THIS_MODULE, .write = hwicap_write, .read = hwicap_read, .open = hwicap_open, .release = hwicap_release, .llseek = noop_llseek, }; static int hwicap_setup(struct device *dev, int id, const struct resource *regs_res, const struct hwicap_driver_config *config, const struct config_registers *config_regs) { dev_t devt; struct hwicap_drvdata *drvdata = NULL; int retval = 0; dev_info(dev, "Xilinx icap port driver\n"); mutex_lock(&icap_sem); if (id < 0) { for (id = 0; id < HWICAP_DEVICES; id++) if (!probed_devices[id]) break; } if (id < 0 || id >= HWICAP_DEVICES) { mutex_unlock(&icap_sem); dev_err(dev, "%s%i too large\n", DRIVER_NAME, id); return -EINVAL; } if (probed_devices[id]) { mutex_unlock(&icap_sem); dev_err(dev, "cannot assign to %s%i; it is already in use\n", DRIVER_NAME, id); return -EBUSY; } probed_devices[id] = 1; mutex_unlock(&icap_sem); devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR + id); drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL); if (!drvdata) { retval = -ENOMEM; goto failed0; } dev_set_drvdata(dev, (void *)drvdata); if (!regs_res) { dev_err(dev, "Couldn't get registers resource\n"); retval = -EFAULT; goto failed1; } drvdata->mem_start = regs_res->start; drvdata->mem_end = regs_res->end; drvdata->mem_size = resource_size(regs_res); if (!request_mem_region(drvdata->mem_start, drvdata->mem_size, DRIVER_NAME)) { dev_err(dev, "Couldn't lock memory region at %Lx\n", (unsigned long long) regs_res->start); retval = -EBUSY; goto failed1; } drvdata->devt = devt; drvdata->dev = dev; drvdata->base_address = ioremap(drvdata->mem_start, drvdata->mem_size); if (!drvdata->base_address) { dev_err(dev, "ioremap() failed\n"); retval = -ENOMEM; goto failed2; } drvdata->config = config; drvdata->config_regs = config_regs; mutex_init(&drvdata->sem); drvdata->is_open = 0; dev_info(dev, "ioremap %llx to %p with size %llx\n", (unsigned long long) drvdata->mem_start, drvdata->base_address, (unsigned long long) drvdata->mem_size); cdev_init(&drvdata->cdev, &hwicap_fops); drvdata->cdev.owner = THIS_MODULE; retval = cdev_add(&drvdata->cdev, devt, 1); if (retval) { dev_err(dev, "cdev_add() failed\n"); goto failed3; } device_create(&icap_class, dev, devt, NULL, "%s%d", DRIVER_NAME, id); return 0; /* success */ failed3: iounmap(drvdata->base_address); failed2: release_mem_region(regs_res->start, drvdata->mem_size); failed1: kfree(drvdata); failed0: mutex_lock(&icap_sem); probed_devices[id] = 0; mutex_unlock(&icap_sem); return retval; } static struct hwicap_driver_config buffer_icap_config = { .get_configuration = buffer_icap_get_configuration, .set_configuration = buffer_icap_set_configuration, .get_status = buffer_icap_get_status, .reset = buffer_icap_reset, }; static struct hwicap_driver_config fifo_icap_config = { .get_configuration = fifo_icap_get_configuration, .set_configuration = fifo_icap_set_configuration, .get_status = fifo_icap_get_status, .reset = fifo_icap_reset, }; #ifdef CONFIG_OF static int hwicap_of_probe(struct platform_device *op, const struct hwicap_driver_config *config) { struct resource res; const unsigned int *id; const char *family; int rc; const struct config_registers *regs; rc = of_address_to_resource(op->dev.of_node, 0, &res); if (rc) { dev_err(&op->dev, "invalid address\n"); return rc; } id = of_get_property(op->dev.of_node, "port-number", NULL); /* It's most likely that we're using V4, if the family is not * specified */ regs = &v4_config_registers; family = of_get_property(op->dev.of_node, "xlnx,family", NULL); if (family) { if (!strcmp(family, "virtex2p")) regs = &v2_config_registers; else if (!strcmp(family, "virtex4")) regs = &v4_config_registers; else if (!strcmp(family, "virtex5")) regs = &v5_config_registers; else if (!strcmp(family, "virtex6")) regs = &v6_config_registers; } return hwicap_setup(&op->dev, id ? *id : -1, &res, config, regs); } #else static inline int hwicap_of_probe(struct platform_device *op, const struct hwicap_driver_config *config) { return -EINVAL; } #endif /* CONFIG_OF */ static const struct of_device_id hwicap_of_match[]; static int hwicap_drv_probe(struct platform_device *pdev) { const struct of_device_id *match; struct resource *res; const struct config_registers *regs; const char *family; match = of_match_device(hwicap_of_match, &pdev->dev); if (match) return hwicap_of_probe(pdev, match->data); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; /* It's most likely that we're using V4, if the family is not * specified */ regs = &v4_config_registers; family = pdev->dev.platform_data; if (family) { if (!strcmp(family, "virtex2p")) regs = &v2_config_registers; else if (!strcmp(family, "virtex4")) regs = &v4_config_registers; else if (!strcmp(family, "virtex5")) regs = &v5_config_registers; else if (!strcmp(family, "virtex6")) regs = &v6_config_registers; } return hwicap_setup(&pdev->dev, pdev->id, res, &buffer_icap_config, regs); } static void hwicap_drv_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct hwicap_drvdata *drvdata; drvdata = dev_get_drvdata(dev); device_destroy(&icap_class, drvdata->devt); cdev_del(&drvdata->cdev); iounmap(drvdata->base_address); release_mem_region(drvdata->mem_start, drvdata->mem_size); kfree(drvdata); mutex_lock(&icap_sem); probed_devices[MINOR(dev->devt)-XHWICAP_MINOR] = 0; mutex_unlock(&icap_sem); } #ifdef CONFIG_OF /* Match table for device tree binding */ static const struct of_device_id hwicap_of_match[] = { { .compatible = "xlnx,opb-hwicap-1.00.b", .data = &buffer_icap_config}, { .compatible = "xlnx,xps-hwicap-1.00.a", .data = &fifo_icap_config}, {}, }; MODULE_DEVICE_TABLE(of, hwicap_of_match); #else #define hwicap_of_match NULL #endif static struct platform_driver hwicap_platform_driver = { .probe = hwicap_drv_probe, .remove_new = hwicap_drv_remove, .driver = { .name = DRIVER_NAME, .of_match_table = hwicap_of_match, }, }; static int __init hwicap_module_init(void) { dev_t devt; int retval; retval = class_register(&icap_class); if (retval) return retval; mutex_init(&icap_sem); devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR); retval = register_chrdev_region(devt, HWICAP_DEVICES, DRIVER_NAME); if (retval < 0) return retval; retval = platform_driver_register(&hwicap_platform_driver); if (retval) goto failed; return retval; failed: unregister_chrdev_region(devt, HWICAP_DEVICES); return retval; } static void __exit hwicap_module_cleanup(void) { dev_t devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR); class_unregister(&icap_class); platform_driver_unregister(&hwicap_platform_driver); unregister_chrdev_region(devt, HWICAP_DEVICES); } module_init(hwicap_module_init); module_exit(hwicap_module_cleanup); MODULE_AUTHOR("Xilinx, Inc; Xilinx Research Labs Group"); MODULE_DESCRIPTION("Xilinx ICAP Port Driver"); MODULE_LICENSE("GPL");
linux-master
drivers/char/xilinx_hwicap/xilinx_hwicap.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/misc/xillybus_of.c * * Copyright 2011 Xillybus Ltd, http://xillybus.com * * Driver for the Xillybus FPGA/host framework using Open Firmware. */ #include <linux/module.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/err.h> #include "xillybus.h" MODULE_DESCRIPTION("Xillybus driver for Open Firmware"); MODULE_AUTHOR("Eli Billauer, Xillybus Ltd."); MODULE_ALIAS("xillybus_of"); MODULE_LICENSE("GPL v2"); static const char xillyname[] = "xillybus_of"; /* Match table for of_platform binding */ static const struct of_device_id xillybus_of_match[] = { { .compatible = "xillybus,xillybus-1.00.a", }, { .compatible = "xlnx,xillybus-1.00.a", }, /* Deprecated */ {} }; MODULE_DEVICE_TABLE(of, xillybus_of_match); static int xilly_drv_probe(struct platform_device *op) { struct device *dev = &op->dev; struct xilly_endpoint *endpoint; int rc; int irq; endpoint = xillybus_init_endpoint(dev); if (!endpoint) return -ENOMEM; dev_set_drvdata(dev, endpoint); endpoint->owner = THIS_MODULE; endpoint->registers = devm_platform_ioremap_resource(op, 0); if (IS_ERR(endpoint->registers)) return PTR_ERR(endpoint->registers); irq = platform_get_irq(op, 0); rc = devm_request_irq(dev, irq, xillybus_isr, 0, xillyname, endpoint); if (rc) { dev_err(endpoint->dev, "Failed to register IRQ handler. Aborting.\n"); return -ENODEV; } return xillybus_endpoint_discovery(endpoint); } static int xilly_drv_remove(struct platform_device *op) { struct device *dev = &op->dev; struct xilly_endpoint *endpoint = dev_get_drvdata(dev); xillybus_endpoint_remove(endpoint); return 0; } static struct platform_driver xillybus_platform_driver = { .probe = xilly_drv_probe, .remove = xilly_drv_remove, .driver = { .name = xillyname, .of_match_table = xillybus_of_match, }, }; module_platform_driver(xillybus_platform_driver);
linux-master
drivers/char/xillybus/xillybus_of.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/misc/xillybus_pcie.c * * Copyright 2011 Xillybus Ltd, http://xillybus.com * * Driver for the Xillybus FPGA/host framework using PCI Express. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include "xillybus.h" MODULE_DESCRIPTION("Xillybus driver for PCIe"); MODULE_AUTHOR("Eli Billauer, Xillybus Ltd."); MODULE_ALIAS("xillybus_pcie"); MODULE_LICENSE("GPL v2"); #define PCI_DEVICE_ID_XILLYBUS 0xebeb #define PCI_VENDOR_ID_ACTEL 0x11aa #define PCI_VENDOR_ID_LATTICE 0x1204 static const char xillyname[] = "xillybus_pcie"; static const struct pci_device_id xillyids[] = { {PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_XILLYBUS)}, {PCI_DEVICE(PCI_VENDOR_ID_ALTERA, PCI_DEVICE_ID_XILLYBUS)}, {PCI_DEVICE(PCI_VENDOR_ID_ACTEL, PCI_DEVICE_ID_XILLYBUS)}, {PCI_DEVICE(PCI_VENDOR_ID_LATTICE, PCI_DEVICE_ID_XILLYBUS)}, { /* End: all zeroes */ } }; static int xilly_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct xilly_endpoint *endpoint; int rc; endpoint = xillybus_init_endpoint(&pdev->dev); if (!endpoint) return -ENOMEM; pci_set_drvdata(pdev, endpoint); endpoint->owner = THIS_MODULE; rc = pcim_enable_device(pdev); if (rc) { dev_err(endpoint->dev, "pcim_enable_device() failed. Aborting.\n"); return rc; } /* L0s has caused packet drops. No power saving, thank you. */ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { dev_err(endpoint->dev, "Incorrect BAR configuration. Aborting.\n"); return -ENODEV; } rc = pcim_iomap_regions(pdev, 0x01, xillyname); if (rc) { dev_err(endpoint->dev, "pcim_iomap_regions() failed. Aborting.\n"); return rc; } endpoint->registers = pcim_iomap_table(pdev)[0]; pci_set_master(pdev); /* Set up a single MSI interrupt */ if (pci_enable_msi(pdev)) { dev_err(endpoint->dev, "Failed to enable MSI interrupts. Aborting.\n"); return -ENODEV; } rc = devm_request_irq(&pdev->dev, pdev->irq, xillybus_isr, 0, xillyname, endpoint); if (rc) { dev_err(endpoint->dev, "Failed to register MSI handler. Aborting.\n"); return -ENODEV; } /* * Some (old and buggy?) hardware drops 64-bit addressed PCIe packets, * even when the PCIe driver claims that a 64-bit mask is OK. On the * other hand, on some architectures, 64-bit addressing is mandatory. * So go for the 64-bit mask only when failing is the other option. */ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { endpoint->dma_using_dac = 0; } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { endpoint->dma_using_dac = 1; } else { dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n"); return -ENODEV; } return xillybus_endpoint_discovery(endpoint); } static void xilly_remove(struct pci_dev *pdev) { struct xilly_endpoint *endpoint = pci_get_drvdata(pdev); xillybus_endpoint_remove(endpoint); } MODULE_DEVICE_TABLE(pci, xillyids); static struct pci_driver xillybus_driver = { .name = xillyname, .id_table = xillyids, .probe = xilly_probe, .remove = xilly_remove, }; module_pci_driver(xillybus_driver);
linux-master
drivers/char/xillybus/xillybus_pcie.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2020 Xillybus Ltd, http://xillybus.com * * Driver for the XillyUSB FPGA/host framework. * * This driver interfaces with a special IP core in an FPGA, setting up * a pipe between a hardware FIFO in the programmable logic and a device * file in the host. The number of such pipes and their attributes are * set up on the logic. This driver detects these automatically and * creates the device files accordingly. */ #include <linux/types.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/device.h> #include <linux/module.h> #include <asm/byteorder.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/crc32.h> #include <linux/poll.h> #include <linux/delay.h> #include <linux/usb.h> #include "xillybus_class.h" MODULE_DESCRIPTION("Driver for XillyUSB FPGA IP Core"); MODULE_AUTHOR("Eli Billauer, Xillybus Ltd."); MODULE_ALIAS("xillyusb"); MODULE_LICENSE("GPL v2"); #define XILLY_RX_TIMEOUT (10 * HZ / 1000) #define XILLY_RESPONSE_TIMEOUT (500 * HZ / 1000) #define BUF_SIZE_ORDER 4 #define BUFNUM 8 #define LOG2_IDT_FIFO_SIZE 16 #define LOG2_INITIAL_FIFO_BUF_SIZE 16 #define MSG_EP_NUM 1 #define IN_EP_NUM 1 static const char xillyname[] = "xillyusb"; static unsigned int fifo_buf_order; #define USB_VENDOR_ID_XILINX 0x03fd #define USB_VENDOR_ID_ALTERA 0x09fb #define USB_PRODUCT_ID_XILLYUSB 0xebbe static const struct usb_device_id xillyusb_table[] = { { USB_DEVICE(USB_VENDOR_ID_XILINX, USB_PRODUCT_ID_XILLYUSB) }, { USB_DEVICE(USB_VENDOR_ID_ALTERA, USB_PRODUCT_ID_XILLYUSB) }, { } }; MODULE_DEVICE_TABLE(usb, xillyusb_table); struct xillyusb_dev; struct xillyfifo { unsigned int bufsize; /* In bytes, always a power of 2 */ unsigned int bufnum; unsigned int size; /* Lazy: Equals bufsize * bufnum */ unsigned int buf_order; int fill; /* Number of bytes in the FIFO */ spinlock_t lock; wait_queue_head_t waitq; unsigned int readpos; unsigned int readbuf; unsigned int writepos; unsigned int writebuf; char **mem; }; struct xillyusb_channel; struct xillyusb_endpoint { struct xillyusb_dev *xdev; struct mutex ep_mutex; /* serialize operations on endpoint */ struct list_head buffers; struct list_head filled_buffers; spinlock_t buffers_lock; /* protect these two lists */ unsigned int order; unsigned int buffer_size; unsigned int fill_mask; int outstanding_urbs; struct usb_anchor anchor; struct xillyfifo fifo; struct work_struct workitem; bool shutting_down; bool drained; bool wake_on_drain; u8 ep_num; }; struct xillyusb_channel { struct xillyusb_dev *xdev; struct xillyfifo *in_fifo; struct xillyusb_endpoint *out_ep; struct mutex lock; /* protect @out_ep, @in_fifo, bit fields below */ struct mutex in_mutex; /* serialize fops on FPGA to host stream */ struct mutex out_mutex; /* serialize fops on host to FPGA stream */ wait_queue_head_t flushq; int chan_idx; u32 in_consumed_bytes; u32 in_current_checkpoint; u32 out_bytes; unsigned int in_log2_element_size; unsigned int out_log2_element_size; unsigned int in_log2_fifo_size; unsigned int out_log2_fifo_size; unsigned int read_data_ok; /* EOF not arrived (yet) */ unsigned int poll_used; unsigned int flushing; unsigned int flushed; unsigned int canceled; /* Bit fields protected by @lock except for initialization */ unsigned readable:1; unsigned writable:1; unsigned open_for_read:1; unsigned open_for_write:1; unsigned in_synchronous:1; unsigned out_synchronous:1; unsigned in_seekable:1; unsigned out_seekable:1; }; struct xillybuffer { struct list_head entry; struct xillyusb_endpoint *ep; void *buf; unsigned int len; }; struct xillyusb_dev { struct xillyusb_channel *channels; struct usb_device *udev; struct device *dev; /* For dev_err() and such */ struct kref kref; struct workqueue_struct *workq; int error; spinlock_t error_lock; /* protect @error */ struct work_struct wakeup_workitem; int num_channels; struct xillyusb_endpoint *msg_ep; struct xillyusb_endpoint *in_ep; struct mutex msg_mutex; /* serialize opcode transmission */ int in_bytes_left; int leftover_chan_num; unsigned int in_counter; struct mutex process_in_mutex; /* synchronize wakeup_all() */ }; /* * kref_mutex is used in xillyusb_open() to prevent the xillyusb_dev * struct from being freed during the gap between being found by * xillybus_find_inode() and having its reference count incremented. */ static DEFINE_MUTEX(kref_mutex); /* FPGA to host opcodes */ enum { OPCODE_DATA = 0, OPCODE_QUIESCE_ACK = 1, OPCODE_EOF = 2, OPCODE_REACHED_CHECKPOINT = 3, OPCODE_CANCELED_CHECKPOINT = 4, }; /* Host to FPGA opcodes */ enum { OPCODE_QUIESCE = 0, OPCODE_REQ_IDT = 1, OPCODE_SET_CHECKPOINT = 2, OPCODE_CLOSE = 3, OPCODE_SET_PUSH = 4, OPCODE_UPDATE_PUSH = 5, OPCODE_CANCEL_CHECKPOINT = 6, OPCODE_SET_ADDR = 7, }; /* * fifo_write() and fifo_read() are NOT reentrant (i.e. concurrent multiple * calls to each on the same FIFO is not allowed) however it's OK to have * threads calling each of the two functions once on the same FIFO, and * at the same time. */ static int fifo_write(struct xillyfifo *fifo, const void *data, unsigned int len, int (*copier)(void *, const void *, int)) { unsigned int done = 0; unsigned int todo = len; unsigned int nmax; unsigned int writepos = fifo->writepos; unsigned int writebuf = fifo->writebuf; unsigned long flags; int rc; nmax = fifo->size - READ_ONCE(fifo->fill); while (1) { unsigned int nrail = fifo->bufsize - writepos; unsigned int n = min(todo, nmax); if (n == 0) { spin_lock_irqsave(&fifo->lock, flags); fifo->fill += done; spin_unlock_irqrestore(&fifo->lock, flags); fifo->writepos = writepos; fifo->writebuf = writebuf; return done; } if (n > nrail) n = nrail; rc = (*copier)(fifo->mem[writebuf] + writepos, data + done, n); if (rc) return rc; done += n; todo -= n; writepos += n; nmax -= n; if (writepos == fifo->bufsize) { writepos = 0; writebuf++; if (writebuf == fifo->bufnum) writebuf = 0; } } } static int fifo_read(struct xillyfifo *fifo, void *data, unsigned int len, int (*copier)(void *, const void *, int)) { unsigned int done = 0; unsigned int todo = len; unsigned int fill; unsigned int readpos = fifo->readpos; unsigned int readbuf = fifo->readbuf; unsigned long flags; int rc; /* * The spinlock here is necessary, because otherwise fifo->fill * could have been increased by fifo_write() after writing data * to the buffer, but this data would potentially not have been * visible on this thread at the time the updated fifo->fill was. * That could lead to reading invalid data. */ spin_lock_irqsave(&fifo->lock, flags); fill = fifo->fill; spin_unlock_irqrestore(&fifo->lock, flags); while (1) { unsigned int nrail = fifo->bufsize - readpos; unsigned int n = min(todo, fill); if (n == 0) { spin_lock_irqsave(&fifo->lock, flags); fifo->fill -= done; spin_unlock_irqrestore(&fifo->lock, flags); fifo->readpos = readpos; fifo->readbuf = readbuf; return done; } if (n > nrail) n = nrail; rc = (*copier)(data + done, fifo->mem[readbuf] + readpos, n); if (rc) return rc; done += n; todo -= n; readpos += n; fill -= n; if (readpos == fifo->bufsize) { readpos = 0; readbuf++; if (readbuf == fifo->bufnum) readbuf = 0; } } } /* * These three wrapper functions are used as the @copier argument to * fifo_write() and fifo_read(), so that they can work directly with * user memory as well. */ static int xilly_copy_from_user(void *dst, const void *src, int n) { if (copy_from_user(dst, (const void __user *)src, n)) return -EFAULT; return 0; } static int xilly_copy_to_user(void *dst, const void *src, int n) { if (copy_to_user((void __user *)dst, src, n)) return -EFAULT; return 0; } static int xilly_memcpy(void *dst, const void *src, int n) { memcpy(dst, src, n); return 0; } static int fifo_init(struct xillyfifo *fifo, unsigned int log2_size) { unsigned int log2_bufnum; unsigned int buf_order; int i; unsigned int log2_fifo_buf_size; retry: log2_fifo_buf_size = fifo_buf_order + PAGE_SHIFT; if (log2_size > log2_fifo_buf_size) { log2_bufnum = log2_size - log2_fifo_buf_size; buf_order = fifo_buf_order; fifo->bufsize = 1 << log2_fifo_buf_size; } else { log2_bufnum = 0; buf_order = (log2_size > PAGE_SHIFT) ? log2_size - PAGE_SHIFT : 0; fifo->bufsize = 1 << log2_size; } fifo->bufnum = 1 << log2_bufnum; fifo->size = fifo->bufnum * fifo->bufsize; fifo->buf_order = buf_order; fifo->mem = kmalloc_array(fifo->bufnum, sizeof(void *), GFP_KERNEL); if (!fifo->mem) return -ENOMEM; for (i = 0; i < fifo->bufnum; i++) { fifo->mem[i] = (void *) __get_free_pages(GFP_KERNEL, buf_order); if (!fifo->mem[i]) goto memfail; } fifo->fill = 0; fifo->readpos = 0; fifo->readbuf = 0; fifo->writepos = 0; fifo->writebuf = 0; spin_lock_init(&fifo->lock); init_waitqueue_head(&fifo->waitq); return 0; memfail: for (i--; i >= 0; i--) free_pages((unsigned long)fifo->mem[i], buf_order); kfree(fifo->mem); fifo->mem = NULL; if (fifo_buf_order) { fifo_buf_order--; goto retry; } else { return -ENOMEM; } } static void fifo_mem_release(struct xillyfifo *fifo) { int i; if (!fifo->mem) return; for (i = 0; i < fifo->bufnum; i++) free_pages((unsigned long)fifo->mem[i], fifo->buf_order); kfree(fifo->mem); } /* * When endpoint_quiesce() returns, the endpoint has no URBs submitted, * won't accept any new URB submissions, and its related work item doesn't * and won't run anymore. */ static void endpoint_quiesce(struct xillyusb_endpoint *ep) { mutex_lock(&ep->ep_mutex); ep->shutting_down = true; mutex_unlock(&ep->ep_mutex); usb_kill_anchored_urbs(&ep->anchor); cancel_work_sync(&ep->workitem); } /* * Note that endpoint_dealloc() also frees fifo memory (if allocated), even * though endpoint_alloc doesn't allocate that memory. */ static void endpoint_dealloc(struct xillyusb_endpoint *ep) { struct list_head *this, *next; fifo_mem_release(&ep->fifo); /* Join @filled_buffers with @buffers to free these entries too */ list_splice(&ep->filled_buffers, &ep->buffers); list_for_each_safe(this, next, &ep->buffers) { struct xillybuffer *xb = list_entry(this, struct xillybuffer, entry); free_pages((unsigned long)xb->buf, ep->order); kfree(xb); } kfree(ep); } static struct xillyusb_endpoint *endpoint_alloc(struct xillyusb_dev *xdev, u8 ep_num, void (*work)(struct work_struct *), unsigned int order, int bufnum) { int i; struct xillyusb_endpoint *ep; ep = kzalloc(sizeof(*ep), GFP_KERNEL); if (!ep) return NULL; INIT_LIST_HEAD(&ep->buffers); INIT_LIST_HEAD(&ep->filled_buffers); spin_lock_init(&ep->buffers_lock); mutex_init(&ep->ep_mutex); init_usb_anchor(&ep->anchor); INIT_WORK(&ep->workitem, work); ep->order = order; ep->buffer_size = 1 << (PAGE_SHIFT + order); ep->outstanding_urbs = 0; ep->drained = true; ep->wake_on_drain = false; ep->xdev = xdev; ep->ep_num = ep_num; ep->shutting_down = false; for (i = 0; i < bufnum; i++) { struct xillybuffer *xb; unsigned long addr; xb = kzalloc(sizeof(*xb), GFP_KERNEL); if (!xb) { endpoint_dealloc(ep); return NULL; } addr = __get_free_pages(GFP_KERNEL, order); if (!addr) { kfree(xb); endpoint_dealloc(ep); return NULL; } xb->buf = (void *)addr; xb->ep = ep; list_add_tail(&xb->entry, &ep->buffers); } return ep; } static void cleanup_dev(struct kref *kref) { struct xillyusb_dev *xdev = container_of(kref, struct xillyusb_dev, kref); if (xdev->in_ep) endpoint_dealloc(xdev->in_ep); if (xdev->msg_ep) endpoint_dealloc(xdev->msg_ep); if (xdev->workq) destroy_workqueue(xdev->workq); usb_put_dev(xdev->udev); kfree(xdev->channels); /* Argument may be NULL, and that's fine */ kfree(xdev); } /* * @process_in_mutex is taken to ensure that bulk_in_work() won't call * process_bulk_in() after wakeup_all()'s execution: The latter zeroes all * @read_data_ok entries, which will make process_bulk_in() report false * errors if executed. The mechanism relies on that xdev->error is assigned * a non-zero value by report_io_error() prior to queueing wakeup_all(), * which prevents bulk_in_work() from calling process_bulk_in(). * * The fact that wakeup_all() and bulk_in_work() are queued on the same * workqueue makes their concurrent execution very unlikely, however the * kernel's API doesn't seem to ensure this strictly. */ static void wakeup_all(struct work_struct *work) { int i; struct xillyusb_dev *xdev = container_of(work, struct xillyusb_dev, wakeup_workitem); mutex_lock(&xdev->process_in_mutex); for (i = 0; i < xdev->num_channels; i++) { struct xillyusb_channel *chan = &xdev->channels[i]; mutex_lock(&chan->lock); if (chan->in_fifo) { /* * Fake an EOF: Even if such arrives, it won't be * processed. */ chan->read_data_ok = 0; wake_up_interruptible(&chan->in_fifo->waitq); } if (chan->out_ep) wake_up_interruptible(&chan->out_ep->fifo.waitq); mutex_unlock(&chan->lock); wake_up_interruptible(&chan->flushq); } mutex_unlock(&xdev->process_in_mutex); wake_up_interruptible(&xdev->msg_ep->fifo.waitq); kref_put(&xdev->kref, cleanup_dev); } static void report_io_error(struct xillyusb_dev *xdev, int errcode) { unsigned long flags; bool do_once = false; spin_lock_irqsave(&xdev->error_lock, flags); if (!xdev->error) { xdev->error = errcode; do_once = true; } spin_unlock_irqrestore(&xdev->error_lock, flags); if (do_once) { kref_get(&xdev->kref); /* xdev is used by work item */ queue_work(xdev->workq, &xdev->wakeup_workitem); } } /* * safely_assign_in_fifo() changes the value of chan->in_fifo and ensures * the previous pointer is never used after its return. */ static void safely_assign_in_fifo(struct xillyusb_channel *chan, struct xillyfifo *fifo) { mutex_lock(&chan->lock); chan->in_fifo = fifo; mutex_unlock(&chan->lock); flush_work(&chan->xdev->in_ep->workitem); } static void bulk_in_completer(struct urb *urb) { struct xillybuffer *xb = urb->context; struct xillyusb_endpoint *ep = xb->ep; unsigned long flags; if (urb->status) { if (!(urb->status == -ENOENT || urb->status == -ECONNRESET || urb->status == -ESHUTDOWN)) report_io_error(ep->xdev, -EIO); spin_lock_irqsave(&ep->buffers_lock, flags); list_add_tail(&xb->entry, &ep->buffers); ep->outstanding_urbs--; spin_unlock_irqrestore(&ep->buffers_lock, flags); return; } xb->len = urb->actual_length; spin_lock_irqsave(&ep->buffers_lock, flags); list_add_tail(&xb->entry, &ep->filled_buffers); spin_unlock_irqrestore(&ep->buffers_lock, flags); if (!ep->shutting_down) queue_work(ep->xdev->workq, &ep->workitem); } static void bulk_out_completer(struct urb *urb) { struct xillybuffer *xb = urb->context; struct xillyusb_endpoint *ep = xb->ep; unsigned long flags; if (urb->status && (!(urb->status == -ENOENT || urb->status == -ECONNRESET || urb->status == -ESHUTDOWN))) report_io_error(ep->xdev, -EIO); spin_lock_irqsave(&ep->buffers_lock, flags); list_add_tail(&xb->entry, &ep->buffers); ep->outstanding_urbs--; spin_unlock_irqrestore(&ep->buffers_lock, flags); if (!ep->shutting_down) queue_work(ep->xdev->workq, &ep->workitem); } static void try_queue_bulk_in(struct xillyusb_endpoint *ep) { struct xillyusb_dev *xdev = ep->xdev; struct xillybuffer *xb; struct urb *urb; int rc; unsigned long flags; unsigned int bufsize = ep->buffer_size; mutex_lock(&ep->ep_mutex); if (ep->shutting_down || xdev->error) goto done; while (1) { spin_lock_irqsave(&ep->buffers_lock, flags); if (list_empty(&ep->buffers)) { spin_unlock_irqrestore(&ep->buffers_lock, flags); goto done; } xb = list_first_entry(&ep->buffers, struct xillybuffer, entry); list_del(&xb->entry); ep->outstanding_urbs++; spin_unlock_irqrestore(&ep->buffers_lock, flags); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { report_io_error(xdev, -ENOMEM); goto relist; } usb_fill_bulk_urb(urb, xdev->udev, usb_rcvbulkpipe(xdev->udev, ep->ep_num), xb->buf, bufsize, bulk_in_completer, xb); usb_anchor_urb(urb, &ep->anchor); rc = usb_submit_urb(urb, GFP_KERNEL); if (rc) { report_io_error(xdev, (rc == -ENOMEM) ? -ENOMEM : -EIO); goto unanchor; } usb_free_urb(urb); /* This just decrements reference count */ } unanchor: usb_unanchor_urb(urb); usb_free_urb(urb); relist: spin_lock_irqsave(&ep->buffers_lock, flags); list_add_tail(&xb->entry, &ep->buffers); ep->outstanding_urbs--; spin_unlock_irqrestore(&ep->buffers_lock, flags); done: mutex_unlock(&ep->ep_mutex); } static void try_queue_bulk_out(struct xillyusb_endpoint *ep) { struct xillyfifo *fifo = &ep->fifo; struct xillyusb_dev *xdev = ep->xdev; struct xillybuffer *xb; struct urb *urb; int rc; unsigned int fill; unsigned long flags; bool do_wake = false; mutex_lock(&ep->ep_mutex); if (ep->shutting_down || xdev->error) goto done; fill = READ_ONCE(fifo->fill) & ep->fill_mask; while (1) { int count; unsigned int max_read; spin_lock_irqsave(&ep->buffers_lock, flags); /* * Race conditions might have the FIFO filled while the * endpoint is marked as drained here. That doesn't matter, * because the sole purpose of @drained is to ensure that * certain data has been sent on the USB channel before * shutting it down. Hence knowing that the FIFO appears * to be empty with no outstanding URBs at some moment * is good enough. */ if (!fill) { ep->drained = !ep->outstanding_urbs; if (ep->drained && ep->wake_on_drain) do_wake = true; spin_unlock_irqrestore(&ep->buffers_lock, flags); goto done; } ep->drained = false; if ((fill < ep->buffer_size && ep->outstanding_urbs) || list_empty(&ep->buffers)) { spin_unlock_irqrestore(&ep->buffers_lock, flags); goto done; } xb = list_first_entry(&ep->buffers, struct xillybuffer, entry); list_del(&xb->entry); ep->outstanding_urbs++; spin_unlock_irqrestore(&ep->buffers_lock, flags); max_read = min(fill, ep->buffer_size); count = fifo_read(&ep->fifo, xb->buf, max_read, xilly_memcpy); /* * xilly_memcpy always returns 0 => fifo_read can't fail => * count > 0 */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { report_io_error(xdev, -ENOMEM); goto relist; } usb_fill_bulk_urb(urb, xdev->udev, usb_sndbulkpipe(xdev->udev, ep->ep_num), xb->buf, count, bulk_out_completer, xb); usb_anchor_urb(urb, &ep->anchor); rc = usb_submit_urb(urb, GFP_KERNEL); if (rc) { report_io_error(xdev, (rc == -ENOMEM) ? -ENOMEM : -EIO); goto unanchor; } usb_free_urb(urb); /* This just decrements reference count */ fill -= count; do_wake = true; } unanchor: usb_unanchor_urb(urb); usb_free_urb(urb); relist: spin_lock_irqsave(&ep->buffers_lock, flags); list_add_tail(&xb->entry, &ep->buffers); ep->outstanding_urbs--; spin_unlock_irqrestore(&ep->buffers_lock, flags); done: mutex_unlock(&ep->ep_mutex); if (do_wake) wake_up_interruptible(&fifo->waitq); } static void bulk_out_work(struct work_struct *work) { struct xillyusb_endpoint *ep = container_of(work, struct xillyusb_endpoint, workitem); try_queue_bulk_out(ep); } static int process_in_opcode(struct xillyusb_dev *xdev, int opcode, int chan_num) { struct xillyusb_channel *chan; struct device *dev = xdev->dev; int chan_idx = chan_num >> 1; if (chan_idx >= xdev->num_channels) { dev_err(dev, "Received illegal channel ID %d from FPGA\n", chan_num); return -EIO; } chan = &xdev->channels[chan_idx]; switch (opcode) { case OPCODE_EOF: if (!chan->read_data_ok) { dev_err(dev, "Received unexpected EOF for channel %d\n", chan_num); return -EIO; } /* * A write memory barrier ensures that the FIFO's fill level * is visible before read_data_ok turns zero, so the data in * the FIFO isn't missed by the consumer. */ smp_wmb(); WRITE_ONCE(chan->read_data_ok, 0); wake_up_interruptible(&chan->in_fifo->waitq); break; case OPCODE_REACHED_CHECKPOINT: chan->flushing = 0; wake_up_interruptible(&chan->flushq); break; case OPCODE_CANCELED_CHECKPOINT: chan->canceled = 1; wake_up_interruptible(&chan->flushq); break; default: dev_err(dev, "Received illegal opcode %d from FPGA\n", opcode); return -EIO; } return 0; } static int process_bulk_in(struct xillybuffer *xb) { struct xillyusb_endpoint *ep = xb->ep; struct xillyusb_dev *xdev = ep->xdev; struct device *dev = xdev->dev; int dws = xb->len >> 2; __le32 *p = xb->buf; u32 ctrlword; struct xillyusb_channel *chan; struct xillyfifo *fifo; int chan_num = 0, opcode; int chan_idx; int bytes, count, dwconsume; int in_bytes_left = 0; int rc; if ((dws << 2) != xb->len) { dev_err(dev, "Received BULK IN transfer with %d bytes, not a multiple of 4\n", xb->len); return -EIO; } if (xdev->in_bytes_left) { bytes = min(xdev->in_bytes_left, dws << 2); in_bytes_left = xdev->in_bytes_left - bytes; chan_num = xdev->leftover_chan_num; goto resume_leftovers; } while (dws) { ctrlword = le32_to_cpu(*p++); dws--; chan_num = ctrlword & 0xfff; count = (ctrlword >> 12) & 0x3ff; opcode = (ctrlword >> 24) & 0xf; if (opcode != OPCODE_DATA) { unsigned int in_counter = xdev->in_counter++ & 0x3ff; if (count != in_counter) { dev_err(dev, "Expected opcode counter %d, got %d\n", in_counter, count); return -EIO; } rc = process_in_opcode(xdev, opcode, chan_num); if (rc) return rc; continue; } bytes = min(count + 1, dws << 2); in_bytes_left = count + 1 - bytes; resume_leftovers: chan_idx = chan_num >> 1; if (!(chan_num & 1) || chan_idx >= xdev->num_channels || !xdev->channels[chan_idx].read_data_ok) { dev_err(dev, "Received illegal channel ID %d from FPGA\n", chan_num); return -EIO; } chan = &xdev->channels[chan_idx]; fifo = chan->in_fifo; if (unlikely(!fifo)) return -EIO; /* We got really unexpected data */ if (bytes != fifo_write(fifo, p, bytes, xilly_memcpy)) { dev_err(dev, "Misbehaving FPGA overflowed an upstream FIFO!\n"); return -EIO; } wake_up_interruptible(&fifo->waitq); dwconsume = (bytes + 3) >> 2; dws -= dwconsume; p += dwconsume; } xdev->in_bytes_left = in_bytes_left; xdev->leftover_chan_num = chan_num; return 0; } static void bulk_in_work(struct work_struct *work) { struct xillyusb_endpoint *ep = container_of(work, struct xillyusb_endpoint, workitem); struct xillyusb_dev *xdev = ep->xdev; unsigned long flags; struct xillybuffer *xb; bool consumed = false; int rc = 0; mutex_lock(&xdev->process_in_mutex); spin_lock_irqsave(&ep->buffers_lock, flags); while (1) { if (rc || list_empty(&ep->filled_buffers)) { spin_unlock_irqrestore(&ep->buffers_lock, flags); mutex_unlock(&xdev->process_in_mutex); if (rc) report_io_error(xdev, rc); else if (consumed) try_queue_bulk_in(ep); return; } xb = list_first_entry(&ep->filled_buffers, struct xillybuffer, entry); list_del(&xb->entry); spin_unlock_irqrestore(&ep->buffers_lock, flags); consumed = true; if (!xdev->error) rc = process_bulk_in(xb); spin_lock_irqsave(&ep->buffers_lock, flags); list_add_tail(&xb->entry, &ep->buffers); ep->outstanding_urbs--; } } static int xillyusb_send_opcode(struct xillyusb_dev *xdev, int chan_num, char opcode, u32 data) { struct xillyusb_endpoint *ep = xdev->msg_ep; struct xillyfifo *fifo = &ep->fifo; __le32 msg[2]; int rc = 0; msg[0] = cpu_to_le32((chan_num & 0xfff) | ((opcode & 0xf) << 24)); msg[1] = cpu_to_le32(data); mutex_lock(&xdev->msg_mutex); /* * The wait queue is woken with the interruptible variant, so the * wait function matches, however returning because of an interrupt * will mess things up considerably, in particular when the caller is * the release method. And the xdev->error part prevents being stuck * forever in the event of a bizarre hardware bug: Pull the USB plug. */ while (wait_event_interruptible(fifo->waitq, fifo->fill <= (fifo->size - 8) || xdev->error)) ; /* Empty loop */ if (xdev->error) { rc = xdev->error; goto unlock_done; } fifo_write(fifo, (void *)msg, 8, xilly_memcpy); try_queue_bulk_out(ep); unlock_done: mutex_unlock(&xdev->msg_mutex); return rc; } /* * Note that flush_downstream() merely waits for the data to arrive to * the application logic at the FPGA -- unlike PCIe Xillybus' counterpart, * it does nothing to make it happen (and neither is it necessary). * * This function is not reentrant for the same @chan, but this is covered * by the fact that for any given @chan, it's called either by the open, * write, llseek and flush fops methods, which can't run in parallel (and the * write + flush and llseek method handlers are protected with out_mutex). * * chan->flushed is there to avoid multiple flushes at the same position, * in particular as a result of programs that close the file descriptor * e.g. after a dup2() for redirection. */ static int flush_downstream(struct xillyusb_channel *chan, long timeout, bool interruptible) { struct xillyusb_dev *xdev = chan->xdev; int chan_num = chan->chan_idx << 1; long deadline, left_to_sleep; int rc; if (chan->flushed) return 0; deadline = jiffies + 1 + timeout; if (chan->flushing) { long cancel_deadline = jiffies + 1 + XILLY_RESPONSE_TIMEOUT; chan->canceled = 0; rc = xillyusb_send_opcode(xdev, chan_num, OPCODE_CANCEL_CHECKPOINT, 0); if (rc) return rc; /* Only real error, never -EINTR */ /* Ignoring interrupts. Cancellation must be handled */ while (!chan->canceled) { left_to_sleep = cancel_deadline - ((long)jiffies); if (left_to_sleep <= 0) { report_io_error(xdev, -EIO); return -EIO; } rc = wait_event_interruptible_timeout(chan->flushq, chan->canceled || xdev->error, left_to_sleep); if (xdev->error) return xdev->error; } } chan->flushing = 1; /* * The checkpoint is given in terms of data elements, not bytes. As * a result, if less than an element's worth of data is stored in the * FIFO, it's not flushed, including the flush before closing, which * means that such data is lost. This is consistent with PCIe Xillybus. */ rc = xillyusb_send_opcode(xdev, chan_num, OPCODE_SET_CHECKPOINT, chan->out_bytes >> chan->out_log2_element_size); if (rc) return rc; /* Only real error, never -EINTR */ if (!timeout) { while (chan->flushing) { rc = wait_event_interruptible(chan->flushq, !chan->flushing || xdev->error); if (xdev->error) return xdev->error; if (interruptible && rc) return -EINTR; } goto done; } while (chan->flushing) { left_to_sleep = deadline - ((long)jiffies); if (left_to_sleep <= 0) return -ETIMEDOUT; rc = wait_event_interruptible_timeout(chan->flushq, !chan->flushing || xdev->error, left_to_sleep); if (xdev->error) return xdev->error; if (interruptible && rc < 0) return -EINTR; } done: chan->flushed = 1; return 0; } /* request_read_anything(): Ask the FPGA for any little amount of data */ static int request_read_anything(struct xillyusb_channel *chan, char opcode) { struct xillyusb_dev *xdev = chan->xdev; unsigned int sh = chan->in_log2_element_size; int chan_num = (chan->chan_idx << 1) | 1; u32 mercy = chan->in_consumed_bytes + (2 << sh) - 1; return xillyusb_send_opcode(xdev, chan_num, opcode, mercy >> sh); } static int xillyusb_open(struct inode *inode, struct file *filp) { struct xillyusb_dev *xdev; struct xillyusb_channel *chan; struct xillyfifo *in_fifo = NULL; struct xillyusb_endpoint *out_ep = NULL; int rc; int index; mutex_lock(&kref_mutex); rc = xillybus_find_inode(inode, (void **)&xdev, &index); if (rc) { mutex_unlock(&kref_mutex); return rc; } kref_get(&xdev->kref); mutex_unlock(&kref_mutex); chan = &xdev->channels[index]; filp->private_data = chan; mutex_lock(&chan->lock); rc = -ENODEV; if (xdev->error) goto unmutex_fail; if (((filp->f_mode & FMODE_READ) && !chan->readable) || ((filp->f_mode & FMODE_WRITE) && !chan->writable)) goto unmutex_fail; if ((filp->f_flags & O_NONBLOCK) && (filp->f_mode & FMODE_READ) && chan->in_synchronous) { dev_err(xdev->dev, "open() failed: O_NONBLOCK not allowed for read on this device\n"); goto unmutex_fail; } if ((filp->f_flags & O_NONBLOCK) && (filp->f_mode & FMODE_WRITE) && chan->out_synchronous) { dev_err(xdev->dev, "open() failed: O_NONBLOCK not allowed for write on this device\n"); goto unmutex_fail; } rc = -EBUSY; if (((filp->f_mode & FMODE_READ) && chan->open_for_read) || ((filp->f_mode & FMODE_WRITE) && chan->open_for_write)) goto unmutex_fail; if (filp->f_mode & FMODE_READ) chan->open_for_read = 1; if (filp->f_mode & FMODE_WRITE) chan->open_for_write = 1; mutex_unlock(&chan->lock); if (filp->f_mode & FMODE_WRITE) { out_ep = endpoint_alloc(xdev, (chan->chan_idx + 2) | USB_DIR_OUT, bulk_out_work, BUF_SIZE_ORDER, BUFNUM); if (!out_ep) { rc = -ENOMEM; goto unopen; } rc = fifo_init(&out_ep->fifo, chan->out_log2_fifo_size); if (rc) goto late_unopen; out_ep->fill_mask = -(1 << chan->out_log2_element_size); chan->out_bytes = 0; chan->flushed = 0; /* * Sending a flush request to a previously closed stream * effectively opens it, and also waits until the command is * confirmed by the FPGA. The latter is necessary because the * data is sent through a separate BULK OUT endpoint, and the * xHCI controller is free to reorder transmissions. * * This can't go wrong unless there's a serious hardware error * (or the computer is stuck for 500 ms?) */ rc = flush_downstream(chan, XILLY_RESPONSE_TIMEOUT, false); if (rc == -ETIMEDOUT) { rc = -EIO; report_io_error(xdev, rc); } if (rc) goto late_unopen; } if (filp->f_mode & FMODE_READ) { in_fifo = kzalloc(sizeof(*in_fifo), GFP_KERNEL); if (!in_fifo) { rc = -ENOMEM; goto late_unopen; } rc = fifo_init(in_fifo, chan->in_log2_fifo_size); if (rc) { kfree(in_fifo); goto late_unopen; } } mutex_lock(&chan->lock); if (in_fifo) { chan->in_fifo = in_fifo; chan->read_data_ok = 1; } if (out_ep) chan->out_ep = out_ep; mutex_unlock(&chan->lock); if (in_fifo) { u32 in_checkpoint = 0; if (!chan->in_synchronous) in_checkpoint = in_fifo->size >> chan->in_log2_element_size; chan->in_consumed_bytes = 0; chan->poll_used = 0; chan->in_current_checkpoint = in_checkpoint; rc = xillyusb_send_opcode(xdev, (chan->chan_idx << 1) | 1, OPCODE_SET_CHECKPOINT, in_checkpoint); if (rc) /* Failure guarantees that opcode wasn't sent */ goto unfifo; /* * In non-blocking mode, request the FPGA to send any data it * has right away. Otherwise, the first read() will always * return -EAGAIN, which is OK strictly speaking, but ugly. * Checking and unrolling if this fails isn't worth the * effort -- the error is propagated to the first read() * anyhow. */ if (filp->f_flags & O_NONBLOCK) request_read_anything(chan, OPCODE_SET_PUSH); } return 0; unfifo: chan->read_data_ok = 0; safely_assign_in_fifo(chan, NULL); fifo_mem_release(in_fifo); kfree(in_fifo); if (out_ep) { mutex_lock(&chan->lock); chan->out_ep = NULL; mutex_unlock(&chan->lock); } late_unopen: if (out_ep) endpoint_dealloc(out_ep); unopen: mutex_lock(&chan->lock); if (filp->f_mode & FMODE_READ) chan->open_for_read = 0; if (filp->f_mode & FMODE_WRITE) chan->open_for_write = 0; mutex_unlock(&chan->lock); kref_put(&xdev->kref, cleanup_dev); return rc; unmutex_fail: kref_put(&xdev->kref, cleanup_dev); mutex_unlock(&chan->lock); return rc; } static ssize_t xillyusb_read(struct file *filp, char __user *userbuf, size_t count, loff_t *f_pos) { struct xillyusb_channel *chan = filp->private_data; struct xillyusb_dev *xdev = chan->xdev; struct xillyfifo *fifo = chan->in_fifo; int chan_num = (chan->chan_idx << 1) | 1; long deadline, left_to_sleep; int bytes_done = 0; bool sent_set_push = false; int rc; deadline = jiffies + 1 + XILLY_RX_TIMEOUT; rc = mutex_lock_interruptible(&chan->in_mutex); if (rc) return rc; while (1) { u32 fifo_checkpoint_bytes, complete_checkpoint_bytes; u32 complete_checkpoint, fifo_checkpoint; u32 checkpoint; s32 diff, leap; unsigned int sh = chan->in_log2_element_size; bool checkpoint_for_complete; rc = fifo_read(fifo, (__force void *)userbuf + bytes_done, count - bytes_done, xilly_copy_to_user); if (rc < 0) break; bytes_done += rc; chan->in_consumed_bytes += rc; left_to_sleep = deadline - ((long)jiffies); /* * Some 32-bit arithmetic that may wrap. Note that * complete_checkpoint is rounded up to the closest element * boundary, because the read() can't be completed otherwise. * fifo_checkpoint_bytes is rounded down, because it protects * in_fifo from overflowing. */ fifo_checkpoint_bytes = chan->in_consumed_bytes + fifo->size; complete_checkpoint_bytes = chan->in_consumed_bytes + count - bytes_done; fifo_checkpoint = fifo_checkpoint_bytes >> sh; complete_checkpoint = (complete_checkpoint_bytes + (1 << sh) - 1) >> sh; diff = (fifo_checkpoint - complete_checkpoint) << sh; if (chan->in_synchronous && diff >= 0) { checkpoint = complete_checkpoint; checkpoint_for_complete = true; } else { checkpoint = fifo_checkpoint; checkpoint_for_complete = false; } leap = (checkpoint - chan->in_current_checkpoint) << sh; /* * To prevent flooding of OPCODE_SET_CHECKPOINT commands as * data is consumed, it's issued only if it moves the * checkpoint by at least an 8th of the FIFO's size, or if * it's necessary to complete the number of bytes requested by * the read() call. * * chan->read_data_ok is checked to spare an unnecessary * submission after receiving EOF, however it's harmless if * such slips away. */ if (chan->read_data_ok && (leap > (fifo->size >> 3) || (checkpoint_for_complete && leap > 0))) { chan->in_current_checkpoint = checkpoint; rc = xillyusb_send_opcode(xdev, chan_num, OPCODE_SET_CHECKPOINT, checkpoint); if (rc) break; } if (bytes_done == count || (left_to_sleep <= 0 && bytes_done)) break; /* * Reaching here means that the FIFO was empty when * fifo_read() returned, but not necessarily right now. Error * and EOF are checked and reported only now, so that no data * that managed its way to the FIFO is lost. */ if (!READ_ONCE(chan->read_data_ok)) { /* FPGA has sent EOF */ /* Has data slipped into the FIFO since fifo_read()? */ smp_rmb(); if (READ_ONCE(fifo->fill)) continue; rc = 0; break; } if (xdev->error) { rc = xdev->error; break; } if (filp->f_flags & O_NONBLOCK) { rc = -EAGAIN; break; } if (!sent_set_push) { rc = xillyusb_send_opcode(xdev, chan_num, OPCODE_SET_PUSH, complete_checkpoint); if (rc) break; sent_set_push = true; } if (left_to_sleep > 0) { /* * Note that when xdev->error is set (e.g. when the * device is unplugged), read_data_ok turns zero and * fifo->waitq is awaken. * Therefore no special attention to xdev->error. */ rc = wait_event_interruptible_timeout (fifo->waitq, fifo->fill || !chan->read_data_ok, left_to_sleep); } else { /* bytes_done == 0 */ /* Tell FPGA to send anything it has */ rc = request_read_anything(chan, OPCODE_UPDATE_PUSH); if (rc) break; rc = wait_event_interruptible (fifo->waitq, fifo->fill || !chan->read_data_ok); } if (rc < 0) { rc = -EINTR; break; } } if (((filp->f_flags & O_NONBLOCK) || chan->poll_used) && !READ_ONCE(fifo->fill)) request_read_anything(chan, OPCODE_SET_PUSH); mutex_unlock(&chan->in_mutex); if (bytes_done) return bytes_done; return rc; } static int xillyusb_flush(struct file *filp, fl_owner_t id) { struct xillyusb_channel *chan = filp->private_data; int rc; if (!(filp->f_mode & FMODE_WRITE)) return 0; rc = mutex_lock_interruptible(&chan->out_mutex); if (rc) return rc; /* * One second's timeout on flushing. Interrupts are ignored, because if * the user pressed CTRL-C, that interrupt will still be in flight by * the time we reach here, and the opportunity to flush is lost. */ rc = flush_downstream(chan, HZ, false); mutex_unlock(&chan->out_mutex); if (rc == -ETIMEDOUT) { /* The things you do to use dev_warn() and not pr_warn() */ struct xillyusb_dev *xdev = chan->xdev; mutex_lock(&chan->lock); if (!xdev->error) dev_warn(xdev->dev, "Timed out while flushing. Output data may be lost.\n"); mutex_unlock(&chan->lock); } return rc; } static ssize_t xillyusb_write(struct file *filp, const char __user *userbuf, size_t count, loff_t *f_pos) { struct xillyusb_channel *chan = filp->private_data; struct xillyusb_dev *xdev = chan->xdev; struct xillyfifo *fifo = &chan->out_ep->fifo; int rc; rc = mutex_lock_interruptible(&chan->out_mutex); if (rc) return rc; while (1) { if (xdev->error) { rc = xdev->error; break; } if (count == 0) break; rc = fifo_write(fifo, (__force void *)userbuf, count, xilly_copy_from_user); if (rc != 0) break; if (filp->f_flags & O_NONBLOCK) { rc = -EAGAIN; break; } if (wait_event_interruptible (fifo->waitq, fifo->fill != fifo->size || xdev->error)) { rc = -EINTR; break; } } if (rc < 0) goto done; chan->out_bytes += rc; if (rc) { try_queue_bulk_out(chan->out_ep); chan->flushed = 0; } if (chan->out_synchronous) { int flush_rc = flush_downstream(chan, 0, true); if (flush_rc && !rc) rc = flush_rc; } done: mutex_unlock(&chan->out_mutex); return rc; } static int xillyusb_release(struct inode *inode, struct file *filp) { struct xillyusb_channel *chan = filp->private_data; struct xillyusb_dev *xdev = chan->xdev; int rc_read = 0, rc_write = 0; if (filp->f_mode & FMODE_READ) { struct xillyfifo *in_fifo = chan->in_fifo; rc_read = xillyusb_send_opcode(xdev, (chan->chan_idx << 1) | 1, OPCODE_CLOSE, 0); /* * If rc_read is nonzero, xdev->error indicates a global * device error. The error is reported later, so that * resources are freed. * * Looping on wait_event_interruptible() kinda breaks the idea * of being interruptible, and this should have been * wait_event(). Only it's being waken with * wake_up_interruptible() for the sake of other uses. If * there's a global device error, chan->read_data_ok is * deasserted and the wait queue is awaken, so this is covered. */ while (wait_event_interruptible(in_fifo->waitq, !chan->read_data_ok)) ; /* Empty loop */ safely_assign_in_fifo(chan, NULL); fifo_mem_release(in_fifo); kfree(in_fifo); mutex_lock(&chan->lock); chan->open_for_read = 0; mutex_unlock(&chan->lock); } if (filp->f_mode & FMODE_WRITE) { struct xillyusb_endpoint *ep = chan->out_ep; /* * chan->flushing isn't zeroed. If the pre-release flush timed * out, a cancel request will be sent before the next * OPCODE_SET_CHECKPOINT (i.e. when the file is opened again). * This is despite that the FPGA forgets about the checkpoint * request as the file closes. Still, in an exceptional race * condition, the FPGA could send an OPCODE_REACHED_CHECKPOINT * just before closing that would reach the host after the * file has re-opened. */ mutex_lock(&chan->lock); chan->out_ep = NULL; mutex_unlock(&chan->lock); endpoint_quiesce(ep); endpoint_dealloc(ep); /* See comments on rc_read above */ rc_write = xillyusb_send_opcode(xdev, chan->chan_idx << 1, OPCODE_CLOSE, 0); mutex_lock(&chan->lock); chan->open_for_write = 0; mutex_unlock(&chan->lock); } kref_put(&xdev->kref, cleanup_dev); return rc_read ? rc_read : rc_write; } /* * Xillybus' API allows device nodes to be seekable, giving the user * application access to a RAM array on the FPGA (or logic emulating it). */ static loff_t xillyusb_llseek(struct file *filp, loff_t offset, int whence) { struct xillyusb_channel *chan = filp->private_data; struct xillyusb_dev *xdev = chan->xdev; loff_t pos = filp->f_pos; int rc = 0; unsigned int log2_element_size = chan->readable ? chan->in_log2_element_size : chan->out_log2_element_size; /* * Take both mutexes not allowing interrupts, since it seems like * common applications don't expect an -EINTR here. Besides, multiple * access to a single file descriptor on seekable devices is a mess * anyhow. */ mutex_lock(&chan->out_mutex); mutex_lock(&chan->in_mutex); switch (whence) { case SEEK_SET: pos = offset; break; case SEEK_CUR: pos += offset; break; case SEEK_END: pos = offset; /* Going to the end => to the beginning */ break; default: rc = -EINVAL; goto end; } /* In any case, we must finish on an element boundary */ if (pos & ((1 << log2_element_size) - 1)) { rc = -EINVAL; goto end; } rc = xillyusb_send_opcode(xdev, chan->chan_idx << 1, OPCODE_SET_ADDR, pos >> log2_element_size); if (rc) goto end; if (chan->writable) { chan->flushed = 0; rc = flush_downstream(chan, HZ, false); } end: mutex_unlock(&chan->out_mutex); mutex_unlock(&chan->in_mutex); if (rc) /* Return error after releasing mutexes */ return rc; filp->f_pos = pos; return pos; } static __poll_t xillyusb_poll(struct file *filp, poll_table *wait) { struct xillyusb_channel *chan = filp->private_data; __poll_t mask = 0; if (chan->in_fifo) poll_wait(filp, &chan->in_fifo->waitq, wait); if (chan->out_ep) poll_wait(filp, &chan->out_ep->fifo.waitq, wait); /* * If this is the first time poll() is called, and the file is * readable, set the relevant flag. Also tell the FPGA to send all it * has, to kickstart the mechanism that ensures there's always some * data in in_fifo unless the stream is dry end-to-end. Note that the * first poll() may not return a EPOLLIN, even if there's data on the * FPGA. Rather, the data will arrive soon, and trigger the relevant * wait queue. */ if (!chan->poll_used && chan->in_fifo) { chan->poll_used = 1; request_read_anything(chan, OPCODE_SET_PUSH); } /* * poll() won't play ball regarding read() channels which * are synchronous. Allowing that will create situations where data has * been delivered at the FPGA, and users expecting select() to wake up, * which it may not. So make it never work. */ if (chan->in_fifo && !chan->in_synchronous && (READ_ONCE(chan->in_fifo->fill) || !chan->read_data_ok)) mask |= EPOLLIN | EPOLLRDNORM; if (chan->out_ep && (READ_ONCE(chan->out_ep->fifo.fill) != chan->out_ep->fifo.size)) mask |= EPOLLOUT | EPOLLWRNORM; if (chan->xdev->error) mask |= EPOLLERR; return mask; } static const struct file_operations xillyusb_fops = { .owner = THIS_MODULE, .read = xillyusb_read, .write = xillyusb_write, .open = xillyusb_open, .flush = xillyusb_flush, .release = xillyusb_release, .llseek = xillyusb_llseek, .poll = xillyusb_poll, }; static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev) { xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT, bulk_out_work, 1, 2); if (!xdev->msg_ep) return -ENOMEM; if (fifo_init(&xdev->msg_ep->fifo, 13)) /* 8 kiB */ goto dealloc; xdev->msg_ep->fill_mask = -8; /* 8 bytes granularity */ xdev->in_ep = endpoint_alloc(xdev, IN_EP_NUM | USB_DIR_IN, bulk_in_work, BUF_SIZE_ORDER, BUFNUM); if (!xdev->in_ep) goto dealloc; try_queue_bulk_in(xdev->in_ep); return 0; dealloc: endpoint_dealloc(xdev->msg_ep); /* Also frees FIFO mem if allocated */ xdev->msg_ep = NULL; return -ENOMEM; } static int setup_channels(struct xillyusb_dev *xdev, __le16 *chandesc, int num_channels) { struct xillyusb_channel *chan; int i; chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL); if (!chan) return -ENOMEM; xdev->channels = chan; for (i = 0; i < num_channels; i++, chan++) { unsigned int in_desc = le16_to_cpu(*chandesc++); unsigned int out_desc = le16_to_cpu(*chandesc++); chan->xdev = xdev; mutex_init(&chan->in_mutex); mutex_init(&chan->out_mutex); mutex_init(&chan->lock); init_waitqueue_head(&chan->flushq); chan->chan_idx = i; if (in_desc & 0x80) { /* Entry is valid */ chan->readable = 1; chan->in_synchronous = !!(in_desc & 0x40); chan->in_seekable = !!(in_desc & 0x20); chan->in_log2_element_size = in_desc & 0x0f; chan->in_log2_fifo_size = ((in_desc >> 8) & 0x1f) + 16; } /* * A downstream channel should never exist above index 13, * as it would request a nonexistent BULK endpoint > 15. * In the peculiar case that it does, it's ignored silently. */ if ((out_desc & 0x80) && i < 14) { /* Entry is valid */ chan->writable = 1; chan->out_synchronous = !!(out_desc & 0x40); chan->out_seekable = !!(out_desc & 0x20); chan->out_log2_element_size = out_desc & 0x0f; chan->out_log2_fifo_size = ((out_desc >> 8) & 0x1f) + 16; } } return 0; } static int xillyusb_discovery(struct usb_interface *interface) { int rc; struct xillyusb_dev *xdev = usb_get_intfdata(interface); __le16 bogus_chandesc[2]; struct xillyfifo idt_fifo; struct xillyusb_channel *chan; unsigned int idt_len, names_offset; unsigned char *idt; int num_channels; rc = xillyusb_send_opcode(xdev, ~0, OPCODE_QUIESCE, 0); if (rc) { dev_err(&interface->dev, "Failed to send quiesce request. Aborting.\n"); return rc; } /* Phase I: Set up one fake upstream channel and obtain IDT */ /* Set up a fake IDT with one async IN stream */ bogus_chandesc[0] = cpu_to_le16(0x80); bogus_chandesc[1] = cpu_to_le16(0); rc = setup_channels(xdev, bogus_chandesc, 1); if (rc) return rc; rc = fifo_init(&idt_fifo, LOG2_IDT_FIFO_SIZE); if (rc) return rc; chan = xdev->channels; chan->in_fifo = &idt_fifo; chan->read_data_ok = 1; xdev->num_channels = 1; rc = xillyusb_send_opcode(xdev, ~0, OPCODE_REQ_IDT, 0); if (rc) { dev_err(&interface->dev, "Failed to send IDT request. Aborting.\n"); goto unfifo; } rc = wait_event_interruptible_timeout(idt_fifo.waitq, !chan->read_data_ok, XILLY_RESPONSE_TIMEOUT); if (xdev->error) { rc = xdev->error; goto unfifo; } if (rc < 0) { rc = -EINTR; /* Interrupt on probe method? Interesting. */ goto unfifo; } if (chan->read_data_ok) { rc = -ETIMEDOUT; dev_err(&interface->dev, "No response from FPGA. Aborting.\n"); goto unfifo; } idt_len = READ_ONCE(idt_fifo.fill); idt = kmalloc(idt_len, GFP_KERNEL); if (!idt) { rc = -ENOMEM; goto unfifo; } fifo_read(&idt_fifo, idt, idt_len, xilly_memcpy); if (crc32_le(~0, idt, idt_len) != 0) { dev_err(&interface->dev, "IDT failed CRC check. Aborting.\n"); rc = -ENODEV; goto unidt; } if (*idt > 0x90) { dev_err(&interface->dev, "No support for IDT version 0x%02x. Maybe the xillyusb driver needs an upgrade. Aborting.\n", (int)*idt); rc = -ENODEV; goto unidt; } /* Phase II: Set up the streams as defined in IDT */ num_channels = le16_to_cpu(*((__le16 *)(idt + 1))); names_offset = 3 + num_channels * 4; idt_len -= 4; /* Exclude CRC */ if (idt_len < names_offset) { dev_err(&interface->dev, "IDT too short. This is exceptionally weird, because its CRC is OK\n"); rc = -ENODEV; goto unidt; } rc = setup_channels(xdev, (void *)idt + 3, num_channels); if (rc) goto unidt; /* * Except for wildly misbehaving hardware, or if it was disconnected * just after responding with the IDT, there is no reason for any * work item to be running now. To be sure that xdev->channels * is updated on anything that might run in parallel, flush the * workqueue, which rarely does anything. */ flush_workqueue(xdev->workq); xdev->num_channels = num_channels; fifo_mem_release(&idt_fifo); kfree(chan); rc = xillybus_init_chrdev(&interface->dev, &xillyusb_fops, THIS_MODULE, xdev, idt + names_offset, idt_len - names_offset, num_channels, xillyname, true); kfree(idt); return rc; unidt: kfree(idt); unfifo: safely_assign_in_fifo(chan, NULL); fifo_mem_release(&idt_fifo); return rc; } static int xillyusb_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct xillyusb_dev *xdev; int rc; xdev = kzalloc(sizeof(*xdev), GFP_KERNEL); if (!xdev) return -ENOMEM; kref_init(&xdev->kref); mutex_init(&xdev->process_in_mutex); mutex_init(&xdev->msg_mutex); xdev->udev = usb_get_dev(interface_to_usbdev(interface)); xdev->dev = &interface->dev; xdev->error = 0; spin_lock_init(&xdev->error_lock); xdev->in_counter = 0; xdev->in_bytes_left = 0; xdev->workq = alloc_workqueue(xillyname, WQ_HIGHPRI, 0); if (!xdev->workq) { dev_err(&interface->dev, "Failed to allocate work queue\n"); rc = -ENOMEM; goto fail; } INIT_WORK(&xdev->wakeup_workitem, wakeup_all); usb_set_intfdata(interface, xdev); rc = xillyusb_setup_base_eps(xdev); if (rc) goto fail; rc = xillyusb_discovery(interface); if (rc) goto latefail; return 0; latefail: endpoint_quiesce(xdev->in_ep); endpoint_quiesce(xdev->msg_ep); fail: usb_set_intfdata(interface, NULL); kref_put(&xdev->kref, cleanup_dev); return rc; } static void xillyusb_disconnect(struct usb_interface *interface) { struct xillyusb_dev *xdev = usb_get_intfdata(interface); struct xillyusb_endpoint *msg_ep = xdev->msg_ep; struct xillyfifo *fifo = &msg_ep->fifo; int rc; int i; xillybus_cleanup_chrdev(xdev, &interface->dev); /* * Try to send OPCODE_QUIESCE, which will fail silently if the device * was disconnected, but makes sense on module unload. */ msg_ep->wake_on_drain = true; xillyusb_send_opcode(xdev, ~0, OPCODE_QUIESCE, 0); /* * If the device has been disconnected, sending the opcode causes * a global device error with xdev->error, if such error didn't * occur earlier. Hence timing out means that the USB link is fine, * but somehow the message wasn't sent. Should never happen. */ rc = wait_event_interruptible_timeout(fifo->waitq, msg_ep->drained || xdev->error, XILLY_RESPONSE_TIMEOUT); if (!rc) dev_err(&interface->dev, "Weird timeout condition on sending quiesce request.\n"); report_io_error(xdev, -ENODEV); /* Discourage further activity */ /* * This device driver is declared with soft_unbind set, or else * sending OPCODE_QUIESCE above would always fail. The price is * that the USB framework didn't kill outstanding URBs, so it has * to be done explicitly before returning from this call. */ for (i = 0; i < xdev->num_channels; i++) { struct xillyusb_channel *chan = &xdev->channels[i]; /* * Lock taken to prevent chan->out_ep from changing. It also * ensures xillyusb_open() and xillyusb_flush() don't access * xdev->dev after being nullified below. */ mutex_lock(&chan->lock); if (chan->out_ep) endpoint_quiesce(chan->out_ep); mutex_unlock(&chan->lock); } endpoint_quiesce(xdev->in_ep); endpoint_quiesce(xdev->msg_ep); usb_set_intfdata(interface, NULL); xdev->dev = NULL; mutex_lock(&kref_mutex); kref_put(&xdev->kref, cleanup_dev); mutex_unlock(&kref_mutex); } static struct usb_driver xillyusb_driver = { .name = xillyname, .id_table = xillyusb_table, .probe = xillyusb_probe, .disconnect = xillyusb_disconnect, .soft_unbind = 1, }; static int __init xillyusb_init(void) { int rc = 0; if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT) fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT; else fifo_buf_order = 0; rc = usb_register(&xillyusb_driver); return rc; } static void __exit xillyusb_exit(void) { usb_deregister(&xillyusb_driver); } module_init(xillyusb_init); module_exit(xillyusb_exit);
linux-master
drivers/char/xillybus/xillyusb.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/misc/xillybus_core.c * * Copyright 2011 Xillybus Ltd, http://xillybus.com * * Driver for the Xillybus FPGA/host framework. * * This driver interfaces with a special IP core in an FPGA, setting up * a pipe between a hardware FIFO in the programmable logic and a device * file in the host. The number of such pipes and their attributes are * set up on the logic. This driver detects these automatically and * creates the device files accordingly. */ #include <linux/list.h> #include <linux/device.h> #include <linux/module.h> #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/crc32.h> #include <linux/poll.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/workqueue.h> #include "xillybus.h" #include "xillybus_class.h" MODULE_DESCRIPTION("Xillybus core functions"); MODULE_AUTHOR("Eli Billauer, Xillybus Ltd."); MODULE_ALIAS("xillybus_core"); MODULE_LICENSE("GPL v2"); /* General timeout is 100 ms, rx timeout is 10 ms */ #define XILLY_RX_TIMEOUT (10*HZ/1000) #define XILLY_TIMEOUT (100*HZ/1000) #define fpga_msg_ctrl_reg 0x0008 #define fpga_dma_control_reg 0x0020 #define fpga_dma_bufno_reg 0x0024 #define fpga_dma_bufaddr_lowaddr_reg 0x0028 #define fpga_dma_bufaddr_highaddr_reg 0x002c #define fpga_buf_ctrl_reg 0x0030 #define fpga_buf_offset_reg 0x0034 #define fpga_endian_reg 0x0040 #define XILLYMSG_OPCODE_RELEASEBUF 1 #define XILLYMSG_OPCODE_QUIESCEACK 2 #define XILLYMSG_OPCODE_FIFOEOF 3 #define XILLYMSG_OPCODE_FATAL_ERROR 4 #define XILLYMSG_OPCODE_NONEMPTY 5 static const char xillyname[] = "xillybus"; static struct workqueue_struct *xillybus_wq; /* * Locking scheme: Mutexes protect invocations of character device methods. * If both locks are taken, wr_mutex is taken first, rd_mutex second. * * wr_spinlock protects wr_*_buf_idx, wr_empty, wr_sleepy, wr_ready and the * buffers' end_offset fields against changes made by IRQ handler (and in * theory, other file request handlers, but the mutex handles that). Nothing * else. * They are held for short direct memory manipulations. Needless to say, * no mutex locking is allowed when a spinlock is held. * * rd_spinlock does the same with rd_*_buf_idx, rd_empty and end_offset. * * register_mutex is endpoint-specific, and is held when non-atomic * register operations are performed. wr_mutex and rd_mutex may be * held when register_mutex is taken, but none of the spinlocks. Note that * register_mutex doesn't protect against sporadic buf_ctrl_reg writes * which are unrelated to buf_offset_reg, since they are harmless. * * Blocking on the wait queues is allowed with mutexes held, but not with * spinlocks. * * Only interruptible blocking is allowed on mutexes and wait queues. * * All in all, the locking order goes (with skips allowed, of course): * wr_mutex -> rd_mutex -> register_mutex -> wr_spinlock -> rd_spinlock */ static void malformed_message(struct xilly_endpoint *endpoint, u32 *buf) { int opcode; int msg_channel, msg_bufno, msg_data, msg_dir; opcode = (buf[0] >> 24) & 0xff; msg_dir = buf[0] & 1; msg_channel = (buf[0] >> 1) & 0x7ff; msg_bufno = (buf[0] >> 12) & 0x3ff; msg_data = buf[1] & 0xfffffff; dev_warn(endpoint->dev, "Malformed message (skipping): opcode=%d, channel=%03x, dir=%d, bufno=%03x, data=%07x\n", opcode, msg_channel, msg_dir, msg_bufno, msg_data); } /* * xillybus_isr assumes the interrupt is allocated exclusively to it, * which is the natural case MSI and several other hardware-oriented * interrupts. Sharing is not allowed. */ irqreturn_t xillybus_isr(int irq, void *data) { struct xilly_endpoint *ep = data; u32 *buf; unsigned int buf_size; int i; int opcode; unsigned int msg_channel, msg_bufno, msg_data, msg_dir; struct xilly_channel *channel; buf = ep->msgbuf_addr; buf_size = ep->msg_buf_size/sizeof(u32); dma_sync_single_for_cpu(ep->dev, ep->msgbuf_dma_addr, ep->msg_buf_size, DMA_FROM_DEVICE); for (i = 0; i < buf_size; i += 2) { if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) { malformed_message(ep, &buf[i]); dev_warn(ep->dev, "Sending a NACK on counter %x (instead of %x) on entry %d\n", ((buf[i+1] >> 28) & 0xf), ep->msg_counter, i/2); if (++ep->failed_messages > 10) { dev_err(ep->dev, "Lost sync with interrupt messages. Stopping.\n"); } else { dma_sync_single_for_device(ep->dev, ep->msgbuf_dma_addr, ep->msg_buf_size, DMA_FROM_DEVICE); iowrite32(0x01, /* Message NACK */ ep->registers + fpga_msg_ctrl_reg); } return IRQ_HANDLED; } else if (buf[i] & (1 << 22)) /* Last message */ break; } if (i >= buf_size) { dev_err(ep->dev, "Bad interrupt message. Stopping.\n"); return IRQ_HANDLED; } buf_size = i + 2; for (i = 0; i < buf_size; i += 2) { /* Scan through messages */ opcode = (buf[i] >> 24) & 0xff; msg_dir = buf[i] & 1; msg_channel = (buf[i] >> 1) & 0x7ff; msg_bufno = (buf[i] >> 12) & 0x3ff; msg_data = buf[i+1] & 0xfffffff; switch (opcode) { case XILLYMSG_OPCODE_RELEASEBUF: if ((msg_channel > ep->num_channels) || (msg_channel == 0)) { malformed_message(ep, &buf[i]); break; } channel = ep->channels[msg_channel]; if (msg_dir) { /* Write channel */ if (msg_bufno >= channel->num_wr_buffers) { malformed_message(ep, &buf[i]); break; } spin_lock(&channel->wr_spinlock); channel->wr_buffers[msg_bufno]->end_offset = msg_data; channel->wr_fpga_buf_idx = msg_bufno; channel->wr_empty = 0; channel->wr_sleepy = 0; spin_unlock(&channel->wr_spinlock); wake_up_interruptible(&channel->wr_wait); } else { /* Read channel */ if (msg_bufno >= channel->num_rd_buffers) { malformed_message(ep, &buf[i]); break; } spin_lock(&channel->rd_spinlock); channel->rd_fpga_buf_idx = msg_bufno; channel->rd_full = 0; spin_unlock(&channel->rd_spinlock); wake_up_interruptible(&channel->rd_wait); if (!channel->rd_synchronous) queue_delayed_work( xillybus_wq, &channel->rd_workitem, XILLY_RX_TIMEOUT); } break; case XILLYMSG_OPCODE_NONEMPTY: if ((msg_channel > ep->num_channels) || (msg_channel == 0) || (!msg_dir) || !ep->channels[msg_channel]->wr_supports_nonempty) { malformed_message(ep, &buf[i]); break; } channel = ep->channels[msg_channel]; if (msg_bufno >= channel->num_wr_buffers) { malformed_message(ep, &buf[i]); break; } spin_lock(&channel->wr_spinlock); if (msg_bufno == channel->wr_host_buf_idx) channel->wr_ready = 1; spin_unlock(&channel->wr_spinlock); wake_up_interruptible(&channel->wr_ready_wait); break; case XILLYMSG_OPCODE_QUIESCEACK: ep->idtlen = msg_data; wake_up_interruptible(&ep->ep_wait); break; case XILLYMSG_OPCODE_FIFOEOF: if ((msg_channel > ep->num_channels) || (msg_channel == 0) || (!msg_dir) || !ep->channels[msg_channel]->num_wr_buffers) { malformed_message(ep, &buf[i]); break; } channel = ep->channels[msg_channel]; spin_lock(&channel->wr_spinlock); channel->wr_eof = msg_bufno; channel->wr_sleepy = 0; channel->wr_hangup = channel->wr_empty && (channel->wr_host_buf_idx == msg_bufno); spin_unlock(&channel->wr_spinlock); wake_up_interruptible(&channel->wr_wait); break; case XILLYMSG_OPCODE_FATAL_ERROR: ep->fatal_error = 1; wake_up_interruptible(&ep->ep_wait); /* For select() */ dev_err(ep->dev, "FPGA reported a fatal error. This means that the low-level communication with the device has failed. This hardware problem is most likely unrelated to Xillybus (neither kernel module nor FPGA core), but reports are still welcome. All I/O is aborted.\n"); break; default: malformed_message(ep, &buf[i]); break; } } dma_sync_single_for_device(ep->dev, ep->msgbuf_dma_addr, ep->msg_buf_size, DMA_FROM_DEVICE); ep->msg_counter = (ep->msg_counter + 1) & 0xf; ep->failed_messages = 0; iowrite32(0x03, ep->registers + fpga_msg_ctrl_reg); /* Message ACK */ return IRQ_HANDLED; } EXPORT_SYMBOL(xillybus_isr); /* * A few trivial memory management functions. * NOTE: These functions are used only on probe and remove, and therefore * no locks are applied! */ static void xillybus_autoflush(struct work_struct *work); struct xilly_alloc_state { void *salami; int left_of_salami; int nbuffer; enum dma_data_direction direction; u32 regdirection; }; static void xilly_unmap(void *ptr) { struct xilly_mapping *data = ptr; dma_unmap_single(data->device, data->dma_addr, data->size, data->direction); kfree(ptr); } static int xilly_map_single(struct xilly_endpoint *ep, void *ptr, size_t size, int direction, dma_addr_t *ret_dma_handle ) { dma_addr_t addr; struct xilly_mapping *this; this = kzalloc(sizeof(*this), GFP_KERNEL); if (!this) return -ENOMEM; addr = dma_map_single(ep->dev, ptr, size, direction); if (dma_mapping_error(ep->dev, addr)) { kfree(this); return -ENODEV; } this->device = ep->dev; this->dma_addr = addr; this->size = size; this->direction = direction; *ret_dma_handle = addr; return devm_add_action_or_reset(ep->dev, xilly_unmap, this); } static int xilly_get_dma_buffers(struct xilly_endpoint *ep, struct xilly_alloc_state *s, struct xilly_buffer **buffers, int bufnum, int bytebufsize) { int i, rc; dma_addr_t dma_addr; struct device *dev = ep->dev; struct xilly_buffer *this_buffer = NULL; /* Init to silence warning */ if (buffers) { /* Not the message buffer */ this_buffer = devm_kcalloc(dev, bufnum, sizeof(struct xilly_buffer), GFP_KERNEL); if (!this_buffer) return -ENOMEM; } for (i = 0; i < bufnum; i++) { /* * Buffers are expected in descending size order, so there * is either enough space for this buffer or none at all. */ if ((s->left_of_salami < bytebufsize) && (s->left_of_salami > 0)) { dev_err(ep->dev, "Corrupt buffer allocation in IDT. Aborting.\n"); return -ENODEV; } if (s->left_of_salami == 0) { int allocorder, allocsize; allocsize = PAGE_SIZE; allocorder = 0; while (bytebufsize > allocsize) { allocsize *= 2; allocorder++; } s->salami = (void *) devm_get_free_pages( dev, GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO, allocorder); if (!s->salami) return -ENOMEM; s->left_of_salami = allocsize; } rc = xilly_map_single(ep, s->salami, bytebufsize, s->direction, &dma_addr); if (rc) return rc; iowrite32((u32) (dma_addr & 0xffffffff), ep->registers + fpga_dma_bufaddr_lowaddr_reg); iowrite32(((u32) ((((u64) dma_addr) >> 32) & 0xffffffff)), ep->registers + fpga_dma_bufaddr_highaddr_reg); if (buffers) { /* Not the message buffer */ this_buffer->addr = s->salami; this_buffer->dma_addr = dma_addr; buffers[i] = this_buffer++; iowrite32(s->regdirection | s->nbuffer++, ep->registers + fpga_dma_bufno_reg); } else { ep->msgbuf_addr = s->salami; ep->msgbuf_dma_addr = dma_addr; ep->msg_buf_size = bytebufsize; iowrite32(s->regdirection, ep->registers + fpga_dma_bufno_reg); } s->left_of_salami -= bytebufsize; s->salami += bytebufsize; } return 0; } static int xilly_setupchannels(struct xilly_endpoint *ep, unsigned char *chandesc, int entries) { struct device *dev = ep->dev; int i, entry, rc; struct xilly_channel *channel; int channelnum, bufnum, bufsize, format, is_writebuf; int bytebufsize; int synchronous, allowpartial, exclusive_open, seekable; int supports_nonempty; int msg_buf_done = 0; struct xilly_alloc_state rd_alloc = { .salami = NULL, .left_of_salami = 0, .nbuffer = 1, .direction = DMA_TO_DEVICE, .regdirection = 0, }; struct xilly_alloc_state wr_alloc = { .salami = NULL, .left_of_salami = 0, .nbuffer = 1, .direction = DMA_FROM_DEVICE, .regdirection = 0x80000000, }; channel = devm_kcalloc(dev, ep->num_channels, sizeof(struct xilly_channel), GFP_KERNEL); if (!channel) return -ENOMEM; ep->channels = devm_kcalloc(dev, ep->num_channels + 1, sizeof(struct xilly_channel *), GFP_KERNEL); if (!ep->channels) return -ENOMEM; ep->channels[0] = NULL; /* Channel 0 is message buf. */ /* Initialize all channels with defaults */ for (i = 1; i <= ep->num_channels; i++) { channel->wr_buffers = NULL; channel->rd_buffers = NULL; channel->num_wr_buffers = 0; channel->num_rd_buffers = 0; channel->wr_fpga_buf_idx = -1; channel->wr_host_buf_idx = 0; channel->wr_host_buf_pos = 0; channel->wr_empty = 1; channel->wr_ready = 0; channel->wr_sleepy = 1; channel->rd_fpga_buf_idx = 0; channel->rd_host_buf_idx = 0; channel->rd_host_buf_pos = 0; channel->rd_full = 0; channel->wr_ref_count = 0; channel->rd_ref_count = 0; spin_lock_init(&channel->wr_spinlock); spin_lock_init(&channel->rd_spinlock); mutex_init(&channel->wr_mutex); mutex_init(&channel->rd_mutex); init_waitqueue_head(&channel->rd_wait); init_waitqueue_head(&channel->wr_wait); init_waitqueue_head(&channel->wr_ready_wait); INIT_DELAYED_WORK(&channel->rd_workitem, xillybus_autoflush); channel->endpoint = ep; channel->chan_num = i; channel->log2_element_size = 0; ep->channels[i] = channel++; } for (entry = 0; entry < entries; entry++, chandesc += 4) { struct xilly_buffer **buffers = NULL; is_writebuf = chandesc[0] & 0x01; channelnum = (chandesc[0] >> 1) | ((chandesc[1] & 0x0f) << 7); format = (chandesc[1] >> 4) & 0x03; allowpartial = (chandesc[1] >> 6) & 0x01; synchronous = (chandesc[1] >> 7) & 0x01; bufsize = 1 << (chandesc[2] & 0x1f); bufnum = 1 << (chandesc[3] & 0x0f); exclusive_open = (chandesc[2] >> 7) & 0x01; seekable = (chandesc[2] >> 6) & 0x01; supports_nonempty = (chandesc[2] >> 5) & 0x01; if ((channelnum > ep->num_channels) || ((channelnum == 0) && !is_writebuf)) { dev_err(ep->dev, "IDT requests channel out of range. Aborting.\n"); return -ENODEV; } channel = ep->channels[channelnum]; /* NULL for msg channel */ if (!is_writebuf || channelnum > 0) { channel->log2_element_size = ((format > 2) ? 2 : format); bytebufsize = bufsize * (1 << channel->log2_element_size); buffers = devm_kcalloc(dev, bufnum, sizeof(struct xilly_buffer *), GFP_KERNEL); if (!buffers) return -ENOMEM; } else { bytebufsize = bufsize << 2; } if (!is_writebuf) { channel->num_rd_buffers = bufnum; channel->rd_buf_size = bytebufsize; channel->rd_allow_partial = allowpartial; channel->rd_synchronous = synchronous; channel->rd_exclusive_open = exclusive_open; channel->seekable = seekable; channel->rd_buffers = buffers; rc = xilly_get_dma_buffers(ep, &rd_alloc, buffers, bufnum, bytebufsize); } else if (channelnum > 0) { channel->num_wr_buffers = bufnum; channel->wr_buf_size = bytebufsize; channel->seekable = seekable; channel->wr_supports_nonempty = supports_nonempty; channel->wr_allow_partial = allowpartial; channel->wr_synchronous = synchronous; channel->wr_exclusive_open = exclusive_open; channel->wr_buffers = buffers; rc = xilly_get_dma_buffers(ep, &wr_alloc, buffers, bufnum, bytebufsize); } else { rc = xilly_get_dma_buffers(ep, &wr_alloc, NULL, bufnum, bytebufsize); msg_buf_done++; } if (rc) return -ENOMEM; } if (!msg_buf_done) { dev_err(ep->dev, "Corrupt IDT: No message buffer. Aborting.\n"); return -ENODEV; } return 0; } static int xilly_scan_idt(struct xilly_endpoint *endpoint, struct xilly_idt_handle *idt_handle) { int count = 0; unsigned char *idt = endpoint->channels[1]->wr_buffers[0]->addr; unsigned char *end_of_idt = idt + endpoint->idtlen - 4; unsigned char *scan; int len; scan = idt + 1; idt_handle->names = scan; while ((scan <= end_of_idt) && *scan) { while ((scan <= end_of_idt) && *scan++) /* Do nothing, just scan thru string */; count++; } idt_handle->names_len = scan - idt_handle->names; scan++; if (scan > end_of_idt) { dev_err(endpoint->dev, "IDT device name list overflow. Aborting.\n"); return -ENODEV; } idt_handle->chandesc = scan; len = endpoint->idtlen - (3 + ((int) (scan - idt))); if (len & 0x03) { dev_err(endpoint->dev, "Corrupt IDT device name list. Aborting.\n"); return -ENODEV; } idt_handle->entries = len >> 2; endpoint->num_channels = count; return 0; } static int xilly_obtain_idt(struct xilly_endpoint *endpoint) { struct xilly_channel *channel; unsigned char *version; long t; channel = endpoint->channels[1]; /* This should be generated ad-hoc */ channel->wr_sleepy = 1; iowrite32(1 | (3 << 24), /* Opcode 3 for channel 0 = Send IDT */ endpoint->registers + fpga_buf_ctrl_reg); t = wait_event_interruptible_timeout(channel->wr_wait, (!channel->wr_sleepy), XILLY_TIMEOUT); if (t <= 0) { dev_err(endpoint->dev, "Failed to obtain IDT. Aborting.\n"); if (endpoint->fatal_error) return -EIO; return -ENODEV; } dma_sync_single_for_cpu(channel->endpoint->dev, channel->wr_buffers[0]->dma_addr, channel->wr_buf_size, DMA_FROM_DEVICE); if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) { dev_err(endpoint->dev, "IDT length mismatch (%d != %d). Aborting.\n", channel->wr_buffers[0]->end_offset, endpoint->idtlen); return -ENODEV; } if (crc32_le(~0, channel->wr_buffers[0]->addr, endpoint->idtlen+1) != 0) { dev_err(endpoint->dev, "IDT failed CRC check. Aborting.\n"); return -ENODEV; } version = channel->wr_buffers[0]->addr; /* Check version number. Reject anything above 0x82. */ if (*version > 0x82) { dev_err(endpoint->dev, "No support for IDT version 0x%02x. Maybe the xillybus driver needs an upgrade. Aborting.\n", *version); return -ENODEV; } return 0; } static ssize_t xillybus_read(struct file *filp, char __user *userbuf, size_t count, loff_t *f_pos) { ssize_t rc; unsigned long flags; int bytes_done = 0; int no_time_left = 0; long deadline, left_to_sleep; struct xilly_channel *channel = filp->private_data; int empty, reached_eof, exhausted, ready; /* Initializations are there only to silence warnings */ int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0; int waiting_bufidx; if (channel->endpoint->fatal_error) return -EIO; deadline = jiffies + 1 + XILLY_RX_TIMEOUT; rc = mutex_lock_interruptible(&channel->wr_mutex); if (rc) return rc; while (1) { /* Note that we may drop mutex within this loop */ int bytes_to_do = count - bytes_done; spin_lock_irqsave(&channel->wr_spinlock, flags); empty = channel->wr_empty; ready = !empty || channel->wr_ready; if (!empty) { bufidx = channel->wr_host_buf_idx; bufpos = channel->wr_host_buf_pos; howmany = ((channel->wr_buffers[bufidx]->end_offset + 1) << channel->log2_element_size) - bufpos; /* Update wr_host_* to its post-operation state */ if (howmany > bytes_to_do) { bufferdone = 0; howmany = bytes_to_do; channel->wr_host_buf_pos += howmany; } else { bufferdone = 1; channel->wr_host_buf_pos = 0; if (bufidx == channel->wr_fpga_buf_idx) { channel->wr_empty = 1; channel->wr_sleepy = 1; channel->wr_ready = 0; } if (bufidx >= (channel->num_wr_buffers - 1)) channel->wr_host_buf_idx = 0; else channel->wr_host_buf_idx++; } } /* * Marking our situation after the possible changes above, * for use after releasing the spinlock. * * empty = empty before change * exhasted = empty after possible change */ reached_eof = channel->wr_empty && (channel->wr_host_buf_idx == channel->wr_eof); channel->wr_hangup = reached_eof; exhausted = channel->wr_empty; waiting_bufidx = channel->wr_host_buf_idx; spin_unlock_irqrestore(&channel->wr_spinlock, flags); if (!empty) { /* Go on, now without the spinlock */ if (bufpos == 0) /* Position zero means it's virgin */ dma_sync_single_for_cpu(channel->endpoint->dev, channel->wr_buffers[bufidx]->dma_addr, channel->wr_buf_size, DMA_FROM_DEVICE); if (copy_to_user( userbuf, channel->wr_buffers[bufidx]->addr + bufpos, howmany)) rc = -EFAULT; userbuf += howmany; bytes_done += howmany; if (bufferdone) { dma_sync_single_for_device(channel->endpoint->dev, channel->wr_buffers[bufidx]->dma_addr, channel->wr_buf_size, DMA_FROM_DEVICE); /* * Tell FPGA the buffer is done with. It's an * atomic operation to the FPGA, so what * happens with other channels doesn't matter, * and the certain channel is protected with * the channel-specific mutex. */ iowrite32(1 | (channel->chan_num << 1) | (bufidx << 12), channel->endpoint->registers + fpga_buf_ctrl_reg); } if (rc) { mutex_unlock(&channel->wr_mutex); return rc; } } /* This includes a zero-count return = EOF */ if ((bytes_done >= count) || reached_eof) break; if (!exhausted) continue; /* More in RAM buffer(s)? Just go on. */ if ((bytes_done > 0) && (no_time_left || (channel->wr_synchronous && channel->wr_allow_partial))) break; /* * Nonblocking read: The "ready" flag tells us that the FPGA * has data to send. In non-blocking mode, if it isn't on, * just return. But if there is, we jump directly to the point * where we ask for the FPGA to send all it has, and wait * until that data arrives. So in a sense, we *do* block in * nonblocking mode, but only for a very short time. */ if (!no_time_left && (filp->f_flags & O_NONBLOCK)) { if (bytes_done > 0) break; if (ready) goto desperate; rc = -EAGAIN; break; } if (!no_time_left || (bytes_done > 0)) { /* * Note that in case of an element-misaligned read * request, offsetlimit will include the last element, * which will be partially read from. */ int offsetlimit = ((count - bytes_done) - 1) >> channel->log2_element_size; int buf_elements = channel->wr_buf_size >> channel->log2_element_size; /* * In synchronous mode, always send an offset limit. * Just don't send a value too big. */ if (channel->wr_synchronous) { /* Don't request more than one buffer */ if (channel->wr_allow_partial && (offsetlimit >= buf_elements)) offsetlimit = buf_elements - 1; /* Don't request more than all buffers */ if (!channel->wr_allow_partial && (offsetlimit >= (buf_elements * channel->num_wr_buffers))) offsetlimit = buf_elements * channel->num_wr_buffers - 1; } /* * In asynchronous mode, force early flush of a buffer * only if that will allow returning a full count. The * "offsetlimit < ( ... )" rather than "<=" excludes * requesting a full buffer, which would obviously * cause a buffer transmission anyhow */ if (channel->wr_synchronous || (offsetlimit < (buf_elements - 1))) { mutex_lock(&channel->endpoint->register_mutex); iowrite32(offsetlimit, channel->endpoint->registers + fpga_buf_offset_reg); iowrite32(1 | (channel->chan_num << 1) | (2 << 24) | /* 2 = offset limit */ (waiting_bufidx << 12), channel->endpoint->registers + fpga_buf_ctrl_reg); mutex_unlock(&channel->endpoint-> register_mutex); } } /* * If partial completion is disallowed, there is no point in * timeout sleeping. Neither if no_time_left is set and * there's no data. */ if (!channel->wr_allow_partial || (no_time_left && (bytes_done == 0))) { /* * This do-loop will run more than once if another * thread reasserted wr_sleepy before we got the mutex * back, so we try again. */ do { mutex_unlock(&channel->wr_mutex); if (wait_event_interruptible( channel->wr_wait, (!channel->wr_sleepy))) goto interrupted; if (mutex_lock_interruptible( &channel->wr_mutex)) goto interrupted; } while (channel->wr_sleepy); continue; interrupted: /* Mutex is not held if got here */ if (channel->endpoint->fatal_error) return -EIO; if (bytes_done) return bytes_done; if (filp->f_flags & O_NONBLOCK) return -EAGAIN; /* Don't admit snoozing */ return -EINTR; } left_to_sleep = deadline - ((long) jiffies); /* * If our time is out, skip the waiting. We may miss wr_sleepy * being deasserted but hey, almost missing the train is like * missing it. */ if (left_to_sleep > 0) { left_to_sleep = wait_event_interruptible_timeout( channel->wr_wait, (!channel->wr_sleepy), left_to_sleep); if (left_to_sleep > 0) /* wr_sleepy deasserted */ continue; if (left_to_sleep < 0) { /* Interrupt */ mutex_unlock(&channel->wr_mutex); if (channel->endpoint->fatal_error) return -EIO; if (bytes_done) return bytes_done; return -EINTR; } } desperate: no_time_left = 1; /* We're out of sleeping time. Desperate! */ if (bytes_done == 0) { /* * Reaching here means that we allow partial return, * that we've run out of time, and that we have * nothing to return. * So tell the FPGA to send anything it has or gets. */ iowrite32(1 | (channel->chan_num << 1) | (3 << 24) | /* Opcode 3, flush it all! */ (waiting_bufidx << 12), channel->endpoint->registers + fpga_buf_ctrl_reg); } /* * Reaching here means that we *do* have data in the buffer, * but the "partial" flag disallows returning less than * required. And we don't have as much. So loop again, * which is likely to end up blocking indefinitely until * enough data has arrived. */ } mutex_unlock(&channel->wr_mutex); if (channel->endpoint->fatal_error) return -EIO; if (rc) return rc; return bytes_done; } /* * The timeout argument takes values as follows: * >0 : Flush with timeout * ==0 : Flush, and wait idefinitely for the flush to complete * <0 : Autoflush: Flush only if there's a single buffer occupied */ static int xillybus_myflush(struct xilly_channel *channel, long timeout) { int rc; unsigned long flags; int end_offset_plus1; int bufidx, bufidx_minus1; int i; int empty; int new_rd_host_buf_pos; if (channel->endpoint->fatal_error) return -EIO; rc = mutex_lock_interruptible(&channel->rd_mutex); if (rc) return rc; /* * Don't flush a closed channel. This can happen when the work queued * autoflush thread fires off after the file has closed. This is not * an error, just something to dismiss. */ if (!channel->rd_ref_count) goto done; bufidx = channel->rd_host_buf_idx; bufidx_minus1 = (bufidx == 0) ? channel->num_rd_buffers - 1 : bufidx - 1; end_offset_plus1 = channel->rd_host_buf_pos >> channel->log2_element_size; new_rd_host_buf_pos = channel->rd_host_buf_pos - (end_offset_plus1 << channel->log2_element_size); /* Submit the current buffer if it's nonempty */ if (end_offset_plus1) { unsigned char *tail = channel->rd_buffers[bufidx]->addr + (end_offset_plus1 << channel->log2_element_size); /* Copy unflushed data, so we can put it in next buffer */ for (i = 0; i < new_rd_host_buf_pos; i++) channel->rd_leftovers[i] = *tail++; spin_lock_irqsave(&channel->rd_spinlock, flags); /* Autoflush only if a single buffer is occupied */ if ((timeout < 0) && (channel->rd_full || (bufidx_minus1 != channel->rd_fpga_buf_idx))) { spin_unlock_irqrestore(&channel->rd_spinlock, flags); /* * A new work item may be queued by the ISR exactly * now, since the execution of a work item allows the * queuing of a new one while it's running. */ goto done; } /* The 4th element is never needed for data, so it's a flag */ channel->rd_leftovers[3] = (new_rd_host_buf_pos != 0); /* Set up rd_full to reflect a certain moment's state */ if (bufidx == channel->rd_fpga_buf_idx) channel->rd_full = 1; spin_unlock_irqrestore(&channel->rd_spinlock, flags); if (bufidx >= (channel->num_rd_buffers - 1)) channel->rd_host_buf_idx = 0; else channel->rd_host_buf_idx++; dma_sync_single_for_device(channel->endpoint->dev, channel->rd_buffers[bufidx]->dma_addr, channel->rd_buf_size, DMA_TO_DEVICE); mutex_lock(&channel->endpoint->register_mutex); iowrite32(end_offset_plus1 - 1, channel->endpoint->registers + fpga_buf_offset_reg); iowrite32((channel->chan_num << 1) | /* Channel ID */ (2 << 24) | /* Opcode 2, submit buffer */ (bufidx << 12), channel->endpoint->registers + fpga_buf_ctrl_reg); mutex_unlock(&channel->endpoint->register_mutex); } else if (bufidx == 0) { bufidx = channel->num_rd_buffers - 1; } else { bufidx--; } channel->rd_host_buf_pos = new_rd_host_buf_pos; if (timeout < 0) goto done; /* Autoflush */ /* * bufidx is now the last buffer written to (or equal to * rd_fpga_buf_idx if buffer was never written to), and * channel->rd_host_buf_idx the one after it. * * If bufidx == channel->rd_fpga_buf_idx we're either empty or full. */ while (1) { /* Loop waiting for draining of buffers */ spin_lock_irqsave(&channel->rd_spinlock, flags); if (bufidx != channel->rd_fpga_buf_idx) channel->rd_full = 1; /* * Not really full, * but needs waiting. */ empty = !channel->rd_full; spin_unlock_irqrestore(&channel->rd_spinlock, flags); if (empty) break; /* * Indefinite sleep with mutex taken. With data waiting for * flushing user should not be surprised if open() for write * sleeps. */ if (timeout == 0) wait_event_interruptible(channel->rd_wait, (!channel->rd_full)); else if (wait_event_interruptible_timeout( channel->rd_wait, (!channel->rd_full), timeout) == 0) { dev_warn(channel->endpoint->dev, "Timed out while flushing. Output data may be lost.\n"); rc = -ETIMEDOUT; break; } if (channel->rd_full) { rc = -EINTR; break; } } done: mutex_unlock(&channel->rd_mutex); if (channel->endpoint->fatal_error) return -EIO; return rc; } static int xillybus_flush(struct file *filp, fl_owner_t id) { if (!(filp->f_mode & FMODE_WRITE)) return 0; return xillybus_myflush(filp->private_data, HZ); /* 1 second timeout */ } static void xillybus_autoflush(struct work_struct *work) { struct delayed_work *workitem = container_of( work, struct delayed_work, work); struct xilly_channel *channel = container_of( workitem, struct xilly_channel, rd_workitem); int rc; rc = xillybus_myflush(channel, -1); if (rc == -EINTR) dev_warn(channel->endpoint->dev, "Autoflush failed because work queue thread got a signal.\n"); else if (rc) dev_err(channel->endpoint->dev, "Autoflush failed under weird circumstances.\n"); } static ssize_t xillybus_write(struct file *filp, const char __user *userbuf, size_t count, loff_t *f_pos) { ssize_t rc; unsigned long flags; int bytes_done = 0; struct xilly_channel *channel = filp->private_data; int full, exhausted; /* Initializations are there only to silence warnings */ int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0; int end_offset_plus1 = 0; if (channel->endpoint->fatal_error) return -EIO; rc = mutex_lock_interruptible(&channel->rd_mutex); if (rc) return rc; while (1) { int bytes_to_do = count - bytes_done; spin_lock_irqsave(&channel->rd_spinlock, flags); full = channel->rd_full; if (!full) { bufidx = channel->rd_host_buf_idx; bufpos = channel->rd_host_buf_pos; howmany = channel->rd_buf_size - bufpos; /* * Update rd_host_* to its state after this operation. * count=0 means committing the buffer immediately, * which is like flushing, but not necessarily block. */ if ((howmany > bytes_to_do) && (count || ((bufpos >> channel->log2_element_size) == 0))) { bufferdone = 0; howmany = bytes_to_do; channel->rd_host_buf_pos += howmany; } else { bufferdone = 1; if (count) { end_offset_plus1 = channel->rd_buf_size >> channel->log2_element_size; channel->rd_host_buf_pos = 0; } else { unsigned char *tail; int i; howmany = 0; end_offset_plus1 = bufpos >> channel->log2_element_size; channel->rd_host_buf_pos -= end_offset_plus1 << channel->log2_element_size; tail = channel-> rd_buffers[bufidx]->addr + (end_offset_plus1 << channel->log2_element_size); for (i = 0; i < channel->rd_host_buf_pos; i++) channel->rd_leftovers[i] = *tail++; } if (bufidx == channel->rd_fpga_buf_idx) channel->rd_full = 1; if (bufidx >= (channel->num_rd_buffers - 1)) channel->rd_host_buf_idx = 0; else channel->rd_host_buf_idx++; } } /* * Marking our situation after the possible changes above, * for use after releasing the spinlock. * * full = full before change * exhasted = full after possible change */ exhausted = channel->rd_full; spin_unlock_irqrestore(&channel->rd_spinlock, flags); if (!full) { /* Go on, now without the spinlock */ unsigned char *head = channel->rd_buffers[bufidx]->addr; int i; if ((bufpos == 0) || /* Zero means it's virgin */ (channel->rd_leftovers[3] != 0)) { dma_sync_single_for_cpu(channel->endpoint->dev, channel->rd_buffers[bufidx]->dma_addr, channel->rd_buf_size, DMA_TO_DEVICE); /* Virgin, but leftovers are due */ for (i = 0; i < bufpos; i++) *head++ = channel->rd_leftovers[i]; channel->rd_leftovers[3] = 0; /* Clear flag */ } if (copy_from_user( channel->rd_buffers[bufidx]->addr + bufpos, userbuf, howmany)) rc = -EFAULT; userbuf += howmany; bytes_done += howmany; if (bufferdone) { dma_sync_single_for_device(channel->endpoint->dev, channel->rd_buffers[bufidx]->dma_addr, channel->rd_buf_size, DMA_TO_DEVICE); mutex_lock(&channel->endpoint->register_mutex); iowrite32(end_offset_plus1 - 1, channel->endpoint->registers + fpga_buf_offset_reg); iowrite32((channel->chan_num << 1) | (2 << 24) | /* 2 = submit buffer */ (bufidx << 12), channel->endpoint->registers + fpga_buf_ctrl_reg); mutex_unlock(&channel->endpoint-> register_mutex); channel->rd_leftovers[3] = (channel->rd_host_buf_pos != 0); } if (rc) { mutex_unlock(&channel->rd_mutex); if (channel->endpoint->fatal_error) return -EIO; if (!channel->rd_synchronous) queue_delayed_work( xillybus_wq, &channel->rd_workitem, XILLY_RX_TIMEOUT); return rc; } } if (bytes_done >= count) break; if (!exhausted) continue; /* If there's more space, just go on */ if ((bytes_done > 0) && channel->rd_allow_partial) break; /* * Indefinite sleep with mutex taken. With data waiting for * flushing, user should not be surprised if open() for write * sleeps. */ if (filp->f_flags & O_NONBLOCK) { rc = -EAGAIN; break; } if (wait_event_interruptible(channel->rd_wait, (!channel->rd_full))) { mutex_unlock(&channel->rd_mutex); if (channel->endpoint->fatal_error) return -EIO; if (bytes_done) return bytes_done; return -EINTR; } } mutex_unlock(&channel->rd_mutex); if (!channel->rd_synchronous) queue_delayed_work(xillybus_wq, &channel->rd_workitem, XILLY_RX_TIMEOUT); if (channel->endpoint->fatal_error) return -EIO; if (rc) return rc; if ((channel->rd_synchronous) && (bytes_done > 0)) { rc = xillybus_myflush(filp->private_data, 0); /* No timeout */ if (rc && (rc != -EINTR)) return rc; } return bytes_done; } static int xillybus_open(struct inode *inode, struct file *filp) { int rc; unsigned long flags; struct xilly_endpoint *endpoint; struct xilly_channel *channel; int index; rc = xillybus_find_inode(inode, (void **)&endpoint, &index); if (rc) return rc; if (endpoint->fatal_error) return -EIO; channel = endpoint->channels[1 + index]; filp->private_data = channel; /* * It gets complicated because: * 1. We don't want to take a mutex we don't have to * 2. We don't want to open one direction if the other will fail. */ if ((filp->f_mode & FMODE_READ) && (!channel->num_wr_buffers)) return -ENODEV; if ((filp->f_mode & FMODE_WRITE) && (!channel->num_rd_buffers)) return -ENODEV; if ((filp->f_mode & FMODE_READ) && (filp->f_flags & O_NONBLOCK) && (channel->wr_synchronous || !channel->wr_allow_partial || !channel->wr_supports_nonempty)) { dev_err(endpoint->dev, "open() failed: O_NONBLOCK not allowed for read on this device\n"); return -ENODEV; } if ((filp->f_mode & FMODE_WRITE) && (filp->f_flags & O_NONBLOCK) && (channel->rd_synchronous || !channel->rd_allow_partial)) { dev_err(endpoint->dev, "open() failed: O_NONBLOCK not allowed for write on this device\n"); return -ENODEV; } /* * Note: open() may block on getting mutexes despite O_NONBLOCK. * This shouldn't occur normally, since multiple open of the same * file descriptor is almost always prohibited anyhow * (*_exclusive_open is normally set in real-life systems). */ if (filp->f_mode & FMODE_READ) { rc = mutex_lock_interruptible(&channel->wr_mutex); if (rc) return rc; } if (filp->f_mode & FMODE_WRITE) { rc = mutex_lock_interruptible(&channel->rd_mutex); if (rc) goto unlock_wr; } if ((filp->f_mode & FMODE_READ) && (channel->wr_ref_count != 0) && (channel->wr_exclusive_open)) { rc = -EBUSY; goto unlock; } if ((filp->f_mode & FMODE_WRITE) && (channel->rd_ref_count != 0) && (channel->rd_exclusive_open)) { rc = -EBUSY; goto unlock; } if (filp->f_mode & FMODE_READ) { if (channel->wr_ref_count == 0) { /* First open of file */ /* Move the host to first buffer */ spin_lock_irqsave(&channel->wr_spinlock, flags); channel->wr_host_buf_idx = 0; channel->wr_host_buf_pos = 0; channel->wr_fpga_buf_idx = -1; channel->wr_empty = 1; channel->wr_ready = 0; channel->wr_sleepy = 1; channel->wr_eof = -1; channel->wr_hangup = 0; spin_unlock_irqrestore(&channel->wr_spinlock, flags); iowrite32(1 | (channel->chan_num << 1) | (4 << 24) | /* Opcode 4, open channel */ ((channel->wr_synchronous & 1) << 23), channel->endpoint->registers + fpga_buf_ctrl_reg); } channel->wr_ref_count++; } if (filp->f_mode & FMODE_WRITE) { if (channel->rd_ref_count == 0) { /* First open of file */ /* Move the host to first buffer */ spin_lock_irqsave(&channel->rd_spinlock, flags); channel->rd_host_buf_idx = 0; channel->rd_host_buf_pos = 0; channel->rd_leftovers[3] = 0; /* No leftovers. */ channel->rd_fpga_buf_idx = channel->num_rd_buffers - 1; channel->rd_full = 0; spin_unlock_irqrestore(&channel->rd_spinlock, flags); iowrite32((channel->chan_num << 1) | (4 << 24), /* Opcode 4, open channel */ channel->endpoint->registers + fpga_buf_ctrl_reg); } channel->rd_ref_count++; } unlock: if (filp->f_mode & FMODE_WRITE) mutex_unlock(&channel->rd_mutex); unlock_wr: if (filp->f_mode & FMODE_READ) mutex_unlock(&channel->wr_mutex); if (!rc && (!channel->seekable)) return nonseekable_open(inode, filp); return rc; } static int xillybus_release(struct inode *inode, struct file *filp) { unsigned long flags; struct xilly_channel *channel = filp->private_data; int buf_idx; int eof; if (channel->endpoint->fatal_error) return -EIO; if (filp->f_mode & FMODE_WRITE) { mutex_lock(&channel->rd_mutex); channel->rd_ref_count--; if (channel->rd_ref_count == 0) { /* * We rely on the kernel calling flush() * before we get here. */ iowrite32((channel->chan_num << 1) | /* Channel ID */ (5 << 24), /* Opcode 5, close channel */ channel->endpoint->registers + fpga_buf_ctrl_reg); } mutex_unlock(&channel->rd_mutex); } if (filp->f_mode & FMODE_READ) { mutex_lock(&channel->wr_mutex); channel->wr_ref_count--; if (channel->wr_ref_count == 0) { iowrite32(1 | (channel->chan_num << 1) | (5 << 24), /* Opcode 5, close channel */ channel->endpoint->registers + fpga_buf_ctrl_reg); /* * This is crazily cautious: We make sure that not * only that we got an EOF (be it because we closed * the channel or because of a user's EOF), but verify * that it's one beyond the last buffer arrived, so * we have no leftover buffers pending before wrapping * up (which can only happen in asynchronous channels, * BTW) */ while (1) { spin_lock_irqsave(&channel->wr_spinlock, flags); buf_idx = channel->wr_fpga_buf_idx; eof = channel->wr_eof; channel->wr_sleepy = 1; spin_unlock_irqrestore(&channel->wr_spinlock, flags); /* * Check if eof points at the buffer after * the last one the FPGA submitted. Note that * no EOF is marked by negative eof. */ buf_idx++; if (buf_idx == channel->num_wr_buffers) buf_idx = 0; if (buf_idx == eof) break; /* * Steal extra 100 ms if awaken by interrupt. * This is a simple workaround for an * interrupt pending when entering, which would * otherwise result in declaring the hardware * non-responsive. */ if (wait_event_interruptible( channel->wr_wait, (!channel->wr_sleepy))) msleep(100); if (channel->wr_sleepy) { mutex_unlock(&channel->wr_mutex); dev_warn(channel->endpoint->dev, "Hardware failed to respond to close command, therefore left in messy state.\n"); return -EINTR; } } } mutex_unlock(&channel->wr_mutex); } return 0; } static loff_t xillybus_llseek(struct file *filp, loff_t offset, int whence) { struct xilly_channel *channel = filp->private_data; loff_t pos = filp->f_pos; int rc = 0; /* * Take both mutexes not allowing interrupts, since it seems like * common applications don't expect an -EINTR here. Besides, multiple * access to a single file descriptor on seekable devices is a mess * anyhow. */ if (channel->endpoint->fatal_error) return -EIO; mutex_lock(&channel->wr_mutex); mutex_lock(&channel->rd_mutex); switch (whence) { case SEEK_SET: pos = offset; break; case SEEK_CUR: pos += offset; break; case SEEK_END: pos = offset; /* Going to the end => to the beginning */ break; default: rc = -EINVAL; goto end; } /* In any case, we must finish on an element boundary */ if (pos & ((1 << channel->log2_element_size) - 1)) { rc = -EINVAL; goto end; } mutex_lock(&channel->endpoint->register_mutex); iowrite32(pos >> channel->log2_element_size, channel->endpoint->registers + fpga_buf_offset_reg); iowrite32((channel->chan_num << 1) | (6 << 24), /* Opcode 6, set address */ channel->endpoint->registers + fpga_buf_ctrl_reg); mutex_unlock(&channel->endpoint->register_mutex); end: mutex_unlock(&channel->rd_mutex); mutex_unlock(&channel->wr_mutex); if (rc) /* Return error after releasing mutexes */ return rc; filp->f_pos = pos; /* * Since seekable devices are allowed only when the channel is * synchronous, we assume that there is no data pending in either * direction (which holds true as long as no concurrent access on the * file descriptor takes place). * The only thing we may need to throw away is leftovers from partial * write() flush. */ channel->rd_leftovers[3] = 0; return pos; } static __poll_t xillybus_poll(struct file *filp, poll_table *wait) { struct xilly_channel *channel = filp->private_data; __poll_t mask = 0; unsigned long flags; poll_wait(filp, &channel->endpoint->ep_wait, wait); /* * poll() won't play ball regarding read() channels which * aren't asynchronous and support the nonempty message. Allowing * that will create situations where data has been delivered at * the FPGA, and users expecting select() to wake up, which it may * not. */ if (!channel->wr_synchronous && channel->wr_supports_nonempty) { poll_wait(filp, &channel->wr_wait, wait); poll_wait(filp, &channel->wr_ready_wait, wait); spin_lock_irqsave(&channel->wr_spinlock, flags); if (!channel->wr_empty || channel->wr_ready) mask |= EPOLLIN | EPOLLRDNORM; if (channel->wr_hangup) /* * Not EPOLLHUP, because its behavior is in the * mist, and EPOLLIN does what we want: Wake up * the read file descriptor so it sees EOF. */ mask |= EPOLLIN | EPOLLRDNORM; spin_unlock_irqrestore(&channel->wr_spinlock, flags); } /* * If partial data write is disallowed on a write() channel, * it's pointless to ever signal OK to write, because is could * block despite some space being available. */ if (channel->rd_allow_partial) { poll_wait(filp, &channel->rd_wait, wait); spin_lock_irqsave(&channel->rd_spinlock, flags); if (!channel->rd_full) mask |= EPOLLOUT | EPOLLWRNORM; spin_unlock_irqrestore(&channel->rd_spinlock, flags); } if (channel->endpoint->fatal_error) mask |= EPOLLERR; return mask; } static const struct file_operations xillybus_fops = { .owner = THIS_MODULE, .read = xillybus_read, .write = xillybus_write, .open = xillybus_open, .flush = xillybus_flush, .release = xillybus_release, .llseek = xillybus_llseek, .poll = xillybus_poll, }; struct xilly_endpoint *xillybus_init_endpoint(struct device *dev) { struct xilly_endpoint *endpoint; endpoint = devm_kzalloc(dev, sizeof(*endpoint), GFP_KERNEL); if (!endpoint) return NULL; endpoint->dev = dev; endpoint->msg_counter = 0x0b; endpoint->failed_messages = 0; endpoint->fatal_error = 0; init_waitqueue_head(&endpoint->ep_wait); mutex_init(&endpoint->register_mutex); return endpoint; } EXPORT_SYMBOL(xillybus_init_endpoint); static int xilly_quiesce(struct xilly_endpoint *endpoint) { long t; endpoint->idtlen = -1; iowrite32((u32) (endpoint->dma_using_dac & 0x0001), endpoint->registers + fpga_dma_control_reg); t = wait_event_interruptible_timeout(endpoint->ep_wait, (endpoint->idtlen >= 0), XILLY_TIMEOUT); if (t <= 0) { dev_err(endpoint->dev, "Failed to quiesce the device on exit.\n"); return -ENODEV; } return 0; } int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint) { int rc; long t; void *bootstrap_resources; int idtbuffersize = (1 << PAGE_SHIFT); struct device *dev = endpoint->dev; /* * The bogus IDT is used during bootstrap for allocating the initial * message buffer, and then the message buffer and space for the IDT * itself. The initial message buffer is of a single page's size, but * it's soon replaced with a more modest one (and memory is freed). */ unsigned char bogus_idt[8] = { 1, 224, (PAGE_SHIFT)-2, 0, 3, 192, PAGE_SHIFT, 0 }; struct xilly_idt_handle idt_handle; /* * Writing the value 0x00000001 to Endianness register signals which * endianness this processor is using, so the FPGA can swap words as * necessary. */ iowrite32(1, endpoint->registers + fpga_endian_reg); /* Bootstrap phase I: Allocate temporary message buffer */ bootstrap_resources = devres_open_group(dev, NULL, GFP_KERNEL); if (!bootstrap_resources) return -ENOMEM; endpoint->num_channels = 0; rc = xilly_setupchannels(endpoint, bogus_idt, 1); if (rc) return rc; /* Clear the message subsystem (and counter in particular) */ iowrite32(0x04, endpoint->registers + fpga_msg_ctrl_reg); endpoint->idtlen = -1; /* * Set DMA 32/64 bit mode, quiesce the device (?!) and get IDT * buffer size. */ iowrite32((u32) (endpoint->dma_using_dac & 0x0001), endpoint->registers + fpga_dma_control_reg); t = wait_event_interruptible_timeout(endpoint->ep_wait, (endpoint->idtlen >= 0), XILLY_TIMEOUT); if (t <= 0) { dev_err(endpoint->dev, "No response from FPGA. Aborting.\n"); return -ENODEV; } /* Enable DMA */ iowrite32((u32) (0x0002 | (endpoint->dma_using_dac & 0x0001)), endpoint->registers + fpga_dma_control_reg); /* Bootstrap phase II: Allocate buffer for IDT and obtain it */ while (endpoint->idtlen >= idtbuffersize) { idtbuffersize *= 2; bogus_idt[6]++; } endpoint->num_channels = 1; rc = xilly_setupchannels(endpoint, bogus_idt, 2); if (rc) goto failed_idt; rc = xilly_obtain_idt(endpoint); if (rc) goto failed_idt; rc = xilly_scan_idt(endpoint, &idt_handle); if (rc) goto failed_idt; devres_close_group(dev, bootstrap_resources); /* Bootstrap phase III: Allocate buffers according to IDT */ rc = xilly_setupchannels(endpoint, idt_handle.chandesc, idt_handle.entries); if (rc) goto failed_idt; rc = xillybus_init_chrdev(dev, &xillybus_fops, endpoint->owner, endpoint, idt_handle.names, idt_handle.names_len, endpoint->num_channels, xillyname, false); if (rc) goto failed_idt; devres_release_group(dev, bootstrap_resources); return 0; failed_idt: xilly_quiesce(endpoint); flush_workqueue(xillybus_wq); return rc; } EXPORT_SYMBOL(xillybus_endpoint_discovery); void xillybus_endpoint_remove(struct xilly_endpoint *endpoint) { xillybus_cleanup_chrdev(endpoint, endpoint->dev); xilly_quiesce(endpoint); /* * Flushing is done upon endpoint release to prevent access to memory * just about to be released. This makes the quiesce complete. */ flush_workqueue(xillybus_wq); } EXPORT_SYMBOL(xillybus_endpoint_remove); static int __init xillybus_init(void) { xillybus_wq = alloc_workqueue(xillyname, 0, 0); if (!xillybus_wq) return -ENOMEM; return 0; } static void __exit xillybus_exit(void) { /* flush_workqueue() was called for each endpoint released */ destroy_workqueue(xillybus_wq); } module_init(xillybus_init); module_exit(xillybus_exit);
linux-master
drivers/char/xillybus/xillybus_core.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2021 Xillybus Ltd, http://xillybus.com * * Driver for the Xillybus class */ #include <linux/types.h> #include <linux/module.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/mutex.h> #include "xillybus_class.h" MODULE_DESCRIPTION("Driver for Xillybus class"); MODULE_AUTHOR("Eli Billauer, Xillybus Ltd."); MODULE_ALIAS("xillybus_class"); MODULE_LICENSE("GPL v2"); static DEFINE_MUTEX(unit_mutex); static LIST_HEAD(unit_list); static const struct class xillybus_class = { .name = "xillybus", }; #define UNITNAMELEN 16 struct xilly_unit { struct list_head list_entry; void *private_data; struct cdev *cdev; char name[UNITNAMELEN]; int major; int lowest_minor; int num_nodes; }; int xillybus_init_chrdev(struct device *dev, const struct file_operations *fops, struct module *owner, void *private_data, unsigned char *idt, unsigned int len, int num_nodes, const char *prefix, bool enumerate) { int rc; dev_t mdev; int i; char devname[48]; struct device *device; size_t namelen; struct xilly_unit *unit, *u; unit = kzalloc(sizeof(*unit), GFP_KERNEL); if (!unit) return -ENOMEM; mutex_lock(&unit_mutex); if (!enumerate) snprintf(unit->name, UNITNAMELEN, "%s", prefix); for (i = 0; enumerate; i++) { snprintf(unit->name, UNITNAMELEN, "%s_%02d", prefix, i); enumerate = false; list_for_each_entry(u, &unit_list, list_entry) if (!strcmp(unit->name, u->name)) { enumerate = true; break; } } rc = alloc_chrdev_region(&mdev, 0, num_nodes, unit->name); if (rc) { dev_warn(dev, "Failed to obtain major/minors"); goto fail_obtain; } unit->major = MAJOR(mdev); unit->lowest_minor = MINOR(mdev); unit->num_nodes = num_nodes; unit->private_data = private_data; unit->cdev = cdev_alloc(); if (!unit->cdev) { rc = -ENOMEM; goto unregister_chrdev; } unit->cdev->ops = fops; unit->cdev->owner = owner; rc = cdev_add(unit->cdev, MKDEV(unit->major, unit->lowest_minor), unit->num_nodes); if (rc) { dev_err(dev, "Failed to add cdev.\n"); /* kobject_put() is normally done by cdev_del() */ kobject_put(&unit->cdev->kobj); goto unregister_chrdev; } for (i = 0; i < num_nodes; i++) { namelen = strnlen(idt, len); if (namelen == len) { dev_err(dev, "IDT's list of names is too short. This is exceptionally weird, because its CRC is OK\n"); rc = -ENODEV; goto unroll_device_create; } snprintf(devname, sizeof(devname), "%s_%s", unit->name, idt); len -= namelen + 1; idt += namelen + 1; device = device_create(&xillybus_class, NULL, MKDEV(unit->major, i + unit->lowest_minor), NULL, "%s", devname); if (IS_ERR(device)) { dev_err(dev, "Failed to create %s device. Aborting.\n", devname); rc = -ENODEV; goto unroll_device_create; } } if (len) { dev_err(dev, "IDT's list of names is too long. This is exceptionally weird, because its CRC is OK\n"); rc = -ENODEV; goto unroll_device_create; } list_add_tail(&unit->list_entry, &unit_list); dev_info(dev, "Created %d device files.\n", num_nodes); mutex_unlock(&unit_mutex); return 0; unroll_device_create: for (i--; i >= 0; i--) device_destroy(&xillybus_class, MKDEV(unit->major, i + unit->lowest_minor)); cdev_del(unit->cdev); unregister_chrdev: unregister_chrdev_region(MKDEV(unit->major, unit->lowest_minor), unit->num_nodes); fail_obtain: mutex_unlock(&unit_mutex); kfree(unit); return rc; } EXPORT_SYMBOL(xillybus_init_chrdev); void xillybus_cleanup_chrdev(void *private_data, struct device *dev) { int minor; struct xilly_unit *unit = NULL, *iter; mutex_lock(&unit_mutex); list_for_each_entry(iter, &unit_list, list_entry) if (iter->private_data == private_data) { unit = iter; break; } if (!unit) { dev_err(dev, "Weird bug: Failed to find unit\n"); mutex_unlock(&unit_mutex); return; } for (minor = unit->lowest_minor; minor < (unit->lowest_minor + unit->num_nodes); minor++) device_destroy(&xillybus_class, MKDEV(unit->major, minor)); cdev_del(unit->cdev); unregister_chrdev_region(MKDEV(unit->major, unit->lowest_minor), unit->num_nodes); dev_info(dev, "Removed %d device files.\n", unit->num_nodes); list_del(&unit->list_entry); kfree(unit); mutex_unlock(&unit_mutex); } EXPORT_SYMBOL(xillybus_cleanup_chrdev); int xillybus_find_inode(struct inode *inode, void **private_data, int *index) { int minor = iminor(inode); int major = imajor(inode); struct xilly_unit *unit = NULL, *iter; mutex_lock(&unit_mutex); list_for_each_entry(iter, &unit_list, list_entry) if (iter->major == major && minor >= iter->lowest_minor && minor < (iter->lowest_minor + iter->num_nodes)) { unit = iter; break; } if (!unit) { mutex_unlock(&unit_mutex); return -ENODEV; } *private_data = unit->private_data; *index = minor - unit->lowest_minor; mutex_unlock(&unit_mutex); return 0; } EXPORT_SYMBOL(xillybus_find_inode); static int __init xillybus_class_init(void) { return class_register(&xillybus_class); } static void __exit xillybus_class_exit(void) { class_unregister(&xillybus_class); } module_init(xillybus_class_init); module_exit(xillybus_class_exit);
linux-master
drivers/char/xillybus/xillybus_class.c
// SPDX-License-Identifier: GPL-2.0+ /* * ipmi_poweroff.c * * MontaVista IPMI Poweroff extension to sys_reboot * * Author: MontaVista Software, Inc. * Steven Dake <[email protected]> * Corey Minyard <[email protected]> * [email protected] * * Copyright 2002,2004 MontaVista Software Inc. */ #define pr_fmt(fmt) "IPMI poweroff: " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/proc_fs.h> #include <linux/string.h> #include <linux/completion.h> #include <linux/pm.h> #include <linux/kdev_t.h> #include <linux/ipmi.h> #include <linux/ipmi_smi.h> static void ipmi_po_smi_gone(int if_num); static void ipmi_po_new_smi(int if_num, struct device *device); /* Definitions for controlling power off (if the system supports it). It * conveniently matches the IPMI chassis control values. */ #define IPMI_CHASSIS_POWER_DOWN 0 /* power down, the default. */ #define IPMI_CHASSIS_POWER_CYCLE 0x02 /* power cycle */ /* the IPMI data command */ static int poweroff_powercycle; /* Which interface to use, -1 means the first we see. */ static int ifnum_to_use = -1; /* Our local state. */ static int ready; static struct ipmi_user *ipmi_user; static int ipmi_ifnum; static void (*specific_poweroff_func)(struct ipmi_user *user); /* Holds the old poweroff function so we can restore it on removal. */ static void (*old_poweroff_func)(void); static int set_param_ifnum(const char *val, const struct kernel_param *kp) { int rv = param_set_int(val, kp); if (rv) return rv; if ((ifnum_to_use < 0) || (ifnum_to_use == ipmi_ifnum)) return 0; ipmi_po_smi_gone(ipmi_ifnum); ipmi_po_new_smi(ifnum_to_use, NULL); return 0; } module_param_call(ifnum_to_use, set_param_ifnum, param_get_int, &ifnum_to_use, 0644); MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog " "timer. Setting to -1 defaults to the first registered " "interface"); /* parameter definition to allow user to flag power cycle */ module_param(poweroff_powercycle, int, 0644); MODULE_PARM_DESC(poweroff_powercycle, " Set to non-zero to enable power cycle instead of power" " down. Power cycle is contingent on hardware support," " otherwise it defaults back to power down."); /* Stuff from the get device id command. */ static unsigned int mfg_id; static unsigned int prod_id; static unsigned char capabilities; static unsigned char ipmi_version; /* * We use our own messages for this operation, we don't let the system * allocate them, since we may be in a panic situation. The whole * thing is single-threaded, anyway, so multiple messages are not * required. */ static atomic_t dummy_count = ATOMIC_INIT(0); static void dummy_smi_free(struct ipmi_smi_msg *msg) { atomic_dec(&dummy_count); } static void dummy_recv_free(struct ipmi_recv_msg *msg) { atomic_dec(&dummy_count); } static struct ipmi_smi_msg halt_smi_msg = INIT_IPMI_SMI_MSG(dummy_smi_free); static struct ipmi_recv_msg halt_recv_msg = INIT_IPMI_RECV_MSG(dummy_recv_free); /* * Code to send a message and wait for the response. */ static void receive_handler(struct ipmi_recv_msg *recv_msg, void *handler_data) { struct completion *comp = recv_msg->user_msg_data; if (comp) complete(comp); } static const struct ipmi_user_hndl ipmi_poweroff_handler = { .ipmi_recv_hndl = receive_handler }; static int ipmi_request_wait_for_response(struct ipmi_user *user, struct ipmi_addr *addr, struct kernel_ipmi_msg *send_msg) { int rv; struct completion comp; init_completion(&comp); rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, &comp, &halt_smi_msg, &halt_recv_msg, 0); if (rv) return rv; wait_for_completion(&comp); return halt_recv_msg.msg.data[0]; } /* Wait for message to complete, spinning. */ static int ipmi_request_in_rc_mode(struct ipmi_user *user, struct ipmi_addr *addr, struct kernel_ipmi_msg *send_msg) { int rv; atomic_set(&dummy_count, 2); rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL, &halt_smi_msg, &halt_recv_msg, 0); if (rv) { atomic_set(&dummy_count, 0); return rv; } /* * Spin until our message is done. */ while (atomic_read(&dummy_count) > 0) { ipmi_poll_interface(user); cpu_relax(); } return halt_recv_msg.msg.data[0]; } /* * ATCA Support */ #define IPMI_NETFN_ATCA 0x2c #define IPMI_ATCA_SET_POWER_CMD 0x11 #define IPMI_ATCA_GET_ADDR_INFO_CMD 0x01 #define IPMI_PICMG_ID 0 #define IPMI_NETFN_OEM 0x2e #define IPMI_ATCA_PPS_GRACEFUL_RESTART 0x11 #define IPMI_ATCA_PPS_IANA "\x00\x40\x0A" #define IPMI_MOTOROLA_MANUFACTURER_ID 0x0000A1 #define IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID 0x0051 static void (*atca_oem_poweroff_hook)(struct ipmi_user *user); static void pps_poweroff_atca(struct ipmi_user *user) { struct ipmi_system_interface_addr smi_addr; struct kernel_ipmi_msg send_msg; int rv; /* * Configure IPMI address for local access */ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; smi_addr.channel = IPMI_BMC_CHANNEL; smi_addr.lun = 0; pr_info("PPS powerdown hook used\n"); send_msg.netfn = IPMI_NETFN_OEM; send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART; send_msg.data = IPMI_ATCA_PPS_IANA; send_msg.data_len = 3; rv = ipmi_request_in_rc_mode(user, (struct ipmi_addr *) &smi_addr, &send_msg); if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) pr_err("Unable to send ATCA, IPMI error 0x%x\n", rv); return; } static int ipmi_atca_detect(struct ipmi_user *user) { struct ipmi_system_interface_addr smi_addr; struct kernel_ipmi_msg send_msg; int rv; unsigned char data[1]; /* * Configure IPMI address for local access */ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; smi_addr.channel = IPMI_BMC_CHANNEL; smi_addr.lun = 0; /* * Use get address info to check and see if we are ATCA */ send_msg.netfn = IPMI_NETFN_ATCA; send_msg.cmd = IPMI_ATCA_GET_ADDR_INFO_CMD; data[0] = IPMI_PICMG_ID; send_msg.data = data; send_msg.data_len = sizeof(data); rv = ipmi_request_wait_for_response(user, (struct ipmi_addr *) &smi_addr, &send_msg); pr_info("ATCA Detect mfg 0x%X prod 0x%X\n", mfg_id, prod_id); if ((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID) && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) { pr_info("Installing Pigeon Point Systems Poweroff Hook\n"); atca_oem_poweroff_hook = pps_poweroff_atca; } return !rv; } static void ipmi_poweroff_atca(struct ipmi_user *user) { struct ipmi_system_interface_addr smi_addr; struct kernel_ipmi_msg send_msg; int rv; unsigned char data[4]; /* * Configure IPMI address for local access */ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; smi_addr.channel = IPMI_BMC_CHANNEL; smi_addr.lun = 0; pr_info("Powering down via ATCA power command\n"); /* * Power down */ send_msg.netfn = IPMI_NETFN_ATCA; send_msg.cmd = IPMI_ATCA_SET_POWER_CMD; data[0] = IPMI_PICMG_ID; data[1] = 0; /* FRU id */ data[2] = 0; /* Power Level */ data[3] = 0; /* Don't change saved presets */ send_msg.data = data; send_msg.data_len = sizeof(data); rv = ipmi_request_in_rc_mode(user, (struct ipmi_addr *) &smi_addr, &send_msg); /* * At this point, the system may be shutting down, and most * serial drivers (if used) will have interrupts turned off * it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE * return code */ if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) { pr_err("Unable to send ATCA powerdown message, IPMI error 0x%x\n", rv); goto out; } if (atca_oem_poweroff_hook) atca_oem_poweroff_hook(user); out: return; } /* * CPI1 Support */ #define IPMI_NETFN_OEM_1 0xf8 #define OEM_GRP_CMD_SET_RESET_STATE 0x84 #define OEM_GRP_CMD_SET_POWER_STATE 0x82 #define IPMI_NETFN_OEM_8 0xf8 #define OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL 0x80 #define OEM_GRP_CMD_GET_SLOT_GA 0xa3 #define IPMI_NETFN_SENSOR_EVT 0x10 #define IPMI_CMD_GET_EVENT_RECEIVER 0x01 #define IPMI_CPI1_PRODUCT_ID 0x000157 #define IPMI_CPI1_MANUFACTURER_ID 0x0108 static int ipmi_cpi1_detect(struct ipmi_user *user) { return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID) && (prod_id == IPMI_CPI1_PRODUCT_ID)); } static void ipmi_poweroff_cpi1(struct ipmi_user *user) { struct ipmi_system_interface_addr smi_addr; struct ipmi_ipmb_addr ipmb_addr; struct kernel_ipmi_msg send_msg; int rv; unsigned char data[1]; int slot; unsigned char hotswap_ipmb; unsigned char aer_addr; unsigned char aer_lun; /* * Configure IPMI address for local access */ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; smi_addr.channel = IPMI_BMC_CHANNEL; smi_addr.lun = 0; pr_info("Powering down via CPI1 power command\n"); /* * Get IPMI ipmb address */ send_msg.netfn = IPMI_NETFN_OEM_8 >> 2; send_msg.cmd = OEM_GRP_CMD_GET_SLOT_GA; send_msg.data = NULL; send_msg.data_len = 0; rv = ipmi_request_in_rc_mode(user, (struct ipmi_addr *) &smi_addr, &send_msg); if (rv) goto out; slot = halt_recv_msg.msg.data[1]; hotswap_ipmb = (slot > 9) ? (0xb0 + 2 * slot) : (0xae + 2 * slot); /* * Get active event receiver */ send_msg.netfn = IPMI_NETFN_SENSOR_EVT >> 2; send_msg.cmd = IPMI_CMD_GET_EVENT_RECEIVER; send_msg.data = NULL; send_msg.data_len = 0; rv = ipmi_request_in_rc_mode(user, (struct ipmi_addr *) &smi_addr, &send_msg); if (rv) goto out; aer_addr = halt_recv_msg.msg.data[1]; aer_lun = halt_recv_msg.msg.data[2]; /* * Setup IPMB address target instead of local target */ ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; ipmb_addr.channel = 0; ipmb_addr.slave_addr = aer_addr; ipmb_addr.lun = aer_lun; /* * Send request hotswap control to remove blade from dpv */ send_msg.netfn = IPMI_NETFN_OEM_8 >> 2; send_msg.cmd = OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL; send_msg.data = &hotswap_ipmb; send_msg.data_len = 1; ipmi_request_in_rc_mode(user, (struct ipmi_addr *) &ipmb_addr, &send_msg); /* * Set reset asserted */ send_msg.netfn = IPMI_NETFN_OEM_1 >> 2; send_msg.cmd = OEM_GRP_CMD_SET_RESET_STATE; send_msg.data = data; data[0] = 1; /* Reset asserted state */ send_msg.data_len = 1; rv = ipmi_request_in_rc_mode(user, (struct ipmi_addr *) &smi_addr, &send_msg); if (rv) goto out; /* * Power down */ send_msg.netfn = IPMI_NETFN_OEM_1 >> 2; send_msg.cmd = OEM_GRP_CMD_SET_POWER_STATE; send_msg.data = data; data[0] = 1; /* Power down state */ send_msg.data_len = 1; rv = ipmi_request_in_rc_mode(user, (struct ipmi_addr *) &smi_addr, &send_msg); if (rv) goto out; out: return; } /* * ipmi_dell_chassis_detect() * Dell systems with IPMI < 1.5 don't set the chassis capability bit * but they can handle a chassis poweroff or powercycle command. */ #define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00} static int ipmi_dell_chassis_detect(struct ipmi_user *user) { const char ipmi_version_major = ipmi_version & 0xF; const char ipmi_version_minor = (ipmi_version >> 4) & 0xF; const char mfr[3] = DELL_IANA_MFR_ID; if (!memcmp(mfr, &mfg_id, sizeof(mfr)) && ipmi_version_major <= 1 && ipmi_version_minor < 5) return 1; return 0; } /* * ipmi_hp_chassis_detect() * HP PA-RISC servers rp3410/rp3440, the C8000 workstation and the rx2600 and * zx6000 machines support IPMI vers 1 and don't set the chassis capability bit * but they can handle a chassis poweroff or powercycle command. */ #define HP_IANA_MFR_ID 0x0b #define HP_BMC_PROD_ID 0x8201 static int ipmi_hp_chassis_detect(struct ipmi_user *user) { if (mfg_id == HP_IANA_MFR_ID && prod_id == HP_BMC_PROD_ID && ipmi_version == 1) return 1; return 0; } /* * Standard chassis support */ #define IPMI_NETFN_CHASSIS_REQUEST 0 #define IPMI_CHASSIS_CONTROL_CMD 0x02 static int ipmi_chassis_detect(struct ipmi_user *user) { /* Chassis support, use it. */ return (capabilities & 0x80); } static void ipmi_poweroff_chassis(struct ipmi_user *user) { struct ipmi_system_interface_addr smi_addr; struct kernel_ipmi_msg send_msg; int rv; unsigned char data[1]; /* * Configure IPMI address for local access */ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; smi_addr.channel = IPMI_BMC_CHANNEL; smi_addr.lun = 0; powercyclefailed: pr_info("Powering %s via IPMI chassis control command\n", (poweroff_powercycle ? "cycle" : "down")); /* * Power down */ send_msg.netfn = IPMI_NETFN_CHASSIS_REQUEST; send_msg.cmd = IPMI_CHASSIS_CONTROL_CMD; if (poweroff_powercycle) data[0] = IPMI_CHASSIS_POWER_CYCLE; else data[0] = IPMI_CHASSIS_POWER_DOWN; send_msg.data = data; send_msg.data_len = sizeof(data); rv = ipmi_request_in_rc_mode(user, (struct ipmi_addr *) &smi_addr, &send_msg); if (rv) { if (poweroff_powercycle) { /* power cycle failed, default to power down */ pr_err("Unable to send chassis power cycle message, IPMI error 0x%x\n", rv); poweroff_powercycle = 0; goto powercyclefailed; } pr_err("Unable to send chassis power down message, IPMI error 0x%x\n", rv); } } /* Table of possible power off functions. */ struct poweroff_function { char *platform_type; int (*detect)(struct ipmi_user *user); void (*poweroff_func)(struct ipmi_user *user); }; static struct poweroff_function poweroff_functions[] = { { .platform_type = "ATCA", .detect = ipmi_atca_detect, .poweroff_func = ipmi_poweroff_atca }, { .platform_type = "CPI1", .detect = ipmi_cpi1_detect, .poweroff_func = ipmi_poweroff_cpi1 }, { .platform_type = "chassis", .detect = ipmi_dell_chassis_detect, .poweroff_func = ipmi_poweroff_chassis }, { .platform_type = "chassis", .detect = ipmi_hp_chassis_detect, .poweroff_func = ipmi_poweroff_chassis }, /* Chassis should generally be last, other things should override it. */ { .platform_type = "chassis", .detect = ipmi_chassis_detect, .poweroff_func = ipmi_poweroff_chassis }, }; #define NUM_PO_FUNCS ARRAY_SIZE(poweroff_functions) /* Called on a powerdown request. */ static void ipmi_poweroff_function(void) { if (!ready) return; /* Use run-to-completion mode, since interrupts may be off. */ specific_poweroff_func(ipmi_user); } /* Wait for an IPMI interface to be installed, the first one installed will be grabbed by this code and used to perform the powerdown. */ static void ipmi_po_new_smi(int if_num, struct device *device) { struct ipmi_system_interface_addr smi_addr; struct kernel_ipmi_msg send_msg; int rv; int i; if (ready) return; if ((ifnum_to_use >= 0) && (ifnum_to_use != if_num)) return; rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL, &ipmi_user); if (rv) { pr_err("could not create IPMI user, error %d\n", rv); return; } ipmi_ifnum = if_num; /* * Do a get device ide and store some results, since this is * used by several functions. */ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; smi_addr.channel = IPMI_BMC_CHANNEL; smi_addr.lun = 0; send_msg.netfn = IPMI_NETFN_APP_REQUEST; send_msg.cmd = IPMI_GET_DEVICE_ID_CMD; send_msg.data = NULL; send_msg.data_len = 0; rv = ipmi_request_wait_for_response(ipmi_user, (struct ipmi_addr *) &smi_addr, &send_msg); if (rv) { pr_err("Unable to send IPMI get device id info, IPMI error 0x%x\n", rv); goto out_err; } if (halt_recv_msg.msg.data_len < 12) { pr_err("(chassis) IPMI get device id info too short, was %d bytes, needed %d bytes\n", halt_recv_msg.msg.data_len, 12); goto out_err; } mfg_id = (halt_recv_msg.msg.data[7] | (halt_recv_msg.msg.data[8] << 8) | (halt_recv_msg.msg.data[9] << 16)); prod_id = (halt_recv_msg.msg.data[10] | (halt_recv_msg.msg.data[11] << 8)); capabilities = halt_recv_msg.msg.data[6]; ipmi_version = halt_recv_msg.msg.data[5]; /* Scan for a poweroff method */ for (i = 0; i < NUM_PO_FUNCS; i++) { if (poweroff_functions[i].detect(ipmi_user)) goto found; } out_err: pr_err("Unable to find a poweroff function that will work, giving up\n"); ipmi_destroy_user(ipmi_user); return; found: pr_info("Found a %s style poweroff function\n", poweroff_functions[i].platform_type); specific_poweroff_func = poweroff_functions[i].poweroff_func; old_poweroff_func = pm_power_off; pm_power_off = ipmi_poweroff_function; ready = 1; } static void ipmi_po_smi_gone(int if_num) { if (!ready) return; if (ipmi_ifnum != if_num) return; ready = 0; ipmi_destroy_user(ipmi_user); pm_power_off = old_poweroff_func; } static struct ipmi_smi_watcher smi_watcher = { .owner = THIS_MODULE, .new_smi = ipmi_po_new_smi, .smi_gone = ipmi_po_smi_gone }; #ifdef CONFIG_PROC_FS #include <linux/sysctl.h> static struct ctl_table ipmi_table[] = { { .procname = "poweroff_powercycle", .data = &poweroff_powercycle, .maxlen = sizeof(poweroff_powercycle), .mode = 0644, .proc_handler = proc_dointvec }, { } }; static struct ctl_table_header *ipmi_table_header; #endif /* CONFIG_PROC_FS */ /* * Startup and shutdown functions. */ static int __init ipmi_poweroff_init(void) { int rv; pr_info("Copyright (C) 2004 MontaVista Software - IPMI Powerdown via sys_reboot\n"); if (poweroff_powercycle) pr_info("Power cycle is enabled\n"); #ifdef CONFIG_PROC_FS ipmi_table_header = register_sysctl("dev/ipmi", ipmi_table); if (!ipmi_table_header) { pr_err("Unable to register powercycle sysctl\n"); rv = -ENOMEM; goto out_err; } #endif rv = ipmi_smi_watcher_register(&smi_watcher); #ifdef CONFIG_PROC_FS if (rv) { unregister_sysctl_table(ipmi_table_header); pr_err("Unable to register SMI watcher: %d\n", rv); goto out_err; } out_err: #endif return rv; } #ifdef MODULE static void __exit ipmi_poweroff_cleanup(void) { int rv; #ifdef CONFIG_PROC_FS unregister_sysctl_table(ipmi_table_header); #endif ipmi_smi_watcher_unregister(&smi_watcher); if (ready) { rv = ipmi_destroy_user(ipmi_user); if (rv) pr_err("could not cleanup the IPMI user: 0x%x\n", rv); pm_power_off = old_poweroff_func; } } module_exit(ipmi_poweroff_cleanup); #endif module_init(ipmi_poweroff_init); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Corey Minyard <[email protected]>"); MODULE_DESCRIPTION("IPMI Poweroff extension to sys_reboot");
linux-master
drivers/char/ipmi/ipmi_poweroff.c
// SPDX-License-Identifier: GPL-2.0+ #define pr_fmt(fmt) "ipmi_hardcode: " fmt #include <linux/moduleparam.h> #include <linux/platform_device.h> #include "ipmi_si.h" #include "ipmi_plat_data.h" /* * There can be 4 IO ports passed in (with or without IRQs), 4 addresses, * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS. */ #define SI_MAX_PARMS 4 #define MAX_SI_TYPE_STR 30 static char si_type_str[MAX_SI_TYPE_STR] __initdata; static unsigned long addrs[SI_MAX_PARMS]; static unsigned int num_addrs; static unsigned int ports[SI_MAX_PARMS]; static unsigned int num_ports; static int irqs[SI_MAX_PARMS] __initdata; static unsigned int num_irqs __initdata; static int regspacings[SI_MAX_PARMS] __initdata; static unsigned int num_regspacings __initdata; static int regsizes[SI_MAX_PARMS] __initdata; static unsigned int num_regsizes __initdata; static int regshifts[SI_MAX_PARMS] __initdata; static unsigned int num_regshifts __initdata; static int slave_addrs[SI_MAX_PARMS] __initdata; static unsigned int num_slave_addrs __initdata; module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0); MODULE_PARM_DESC(type, "Defines the type of each interface, each interface separated by commas. The types are 'kcs', 'smic', and 'bt'. For example si_type=kcs,bt will set the first interface to kcs and the second to bt"); module_param_hw_array(addrs, ulong, iomem, &num_addrs, 0); MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the addresses separated by commas. Only use if an interface is in memory. Otherwise, set it to zero or leave it blank."); module_param_hw_array(ports, uint, ioport, &num_ports, 0); MODULE_PARM_DESC(ports, "Sets the port address of each interface, the addresses separated by commas. Only use if an interface is a port. Otherwise, set it to zero or leave it blank."); module_param_hw_array(irqs, int, irq, &num_irqs, 0); MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the addresses separated by commas. Only use if an interface has an interrupt. Otherwise, set it to zero or leave it blank."); module_param_hw_array(regspacings, int, other, &num_regspacings, 0); MODULE_PARM_DESC(regspacings, "The number of bytes between the start address and each successive register used by the interface. For instance, if the start address is 0xca2 and the spacing is 2, then the second address is at 0xca4. Defaults to 1."); module_param_hw_array(regsizes, int, other, &num_regsizes, 0); MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes. This should generally be 1, 2, 4, or 8 for an 8-bit, 16-bit, 32-bit, or 64-bit register. Use this if you the 8-bit IPMI register has to be read from a larger register."); module_param_hw_array(regshifts, int, other, &num_regshifts, 0); MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the. IPMI register, in bits. For instance, if the data is read from a 32-bit word and the IPMI data is in bit 8-15, then the shift would be 8"); module_param_hw_array(slave_addrs, int, other, &num_slave_addrs, 0); MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for the controller. Normally this is 0x20, but can be overridden by this parm. This is an array indexed by interface number."); static void __init ipmi_hardcode_init_one(const char *si_type_str, unsigned int i, unsigned long addr, enum ipmi_addr_space addr_space) { struct ipmi_plat_data p; int t; memset(&p, 0, sizeof(p)); p.iftype = IPMI_PLAT_IF_SI; if (!si_type_str || !*si_type_str) { p.type = SI_KCS; } else { t = match_string(si_to_str, -1, si_type_str); if (t < 0) { pr_warn("Interface type specified for interface %d, was invalid: %s\n", i, si_type_str); return; } p.type = t; } p.regsize = regsizes[i]; p.slave_addr = slave_addrs[i]; p.addr_source = SI_HARDCODED; p.regshift = regshifts[i]; p.regsize = regsizes[i]; p.addr = addr; p.space = addr_space; ipmi_platform_add("hardcode-ipmi-si", i, &p); } void __init ipmi_hardcode_init(void) { unsigned int i; char *str; char *si_type[SI_MAX_PARMS]; memset(si_type, 0, sizeof(si_type)); /* Parse out the si_type string into its components. */ str = si_type_str; if (*str != '\0') { for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) { si_type[i] = str; str = strchr(str, ','); if (str) { *str = '\0'; str++; } else { break; } } } for (i = 0; i < SI_MAX_PARMS; i++) { if (i < num_ports && ports[i]) ipmi_hardcode_init_one(si_type[i], i, ports[i], IPMI_IO_ADDR_SPACE); if (i < num_addrs && addrs[i]) ipmi_hardcode_init_one(si_type[i], i, addrs[i], IPMI_MEM_ADDR_SPACE); } } void ipmi_si_hardcode_exit(void) { ipmi_remove_platform_device_by_name("hardcode-ipmi-si"); } /* * Returns true of the given address exists as a hardcoded address, * false if not. */ int ipmi_si_hardcode_match(int addr_space, unsigned long addr) { unsigned int i; if (addr_space == IPMI_IO_ADDR_SPACE) { for (i = 0; i < num_ports; i++) { if (ports[i] == addr) return 1; } } else { for (i = 0; i < num_addrs; i++) { if (addrs[i] == addr) return 1; } } return 0; }
linux-master
drivers/char/ipmi/ipmi_si_hardcode.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2015-2016, IBM Corporation. */ #include <linux/atomic.h> #include <linux/bt-bmc.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/timer.h> /* * This is a BMC device used to communicate to the host */ #define DEVICE_NAME "ipmi-bt-host" #define BT_IO_BASE 0xe4 #define BT_IRQ 10 #define BT_CR0 0x0 #define BT_CR0_IO_BASE 16 #define BT_CR0_IRQ 12 #define BT_CR0_EN_CLR_SLV_RDP 0x8 #define BT_CR0_EN_CLR_SLV_WRP 0x4 #define BT_CR0_ENABLE_IBT 0x1 #define BT_CR1 0x4 #define BT_CR1_IRQ_H2B 0x01 #define BT_CR1_IRQ_HBUSY 0x40 #define BT_CR2 0x8 #define BT_CR2_IRQ_H2B 0x01 #define BT_CR2_IRQ_HBUSY 0x40 #define BT_CR3 0xc #define BT_CTRL 0x10 #define BT_CTRL_B_BUSY 0x80 #define BT_CTRL_H_BUSY 0x40 #define BT_CTRL_OEM0 0x20 #define BT_CTRL_SMS_ATN 0x10 #define BT_CTRL_B2H_ATN 0x08 #define BT_CTRL_H2B_ATN 0x04 #define BT_CTRL_CLR_RD_PTR 0x02 #define BT_CTRL_CLR_WR_PTR 0x01 #define BT_BMC2HOST 0x14 #define BT_INTMASK 0x18 #define BT_INTMASK_B2H_IRQEN 0x01 #define BT_INTMASK_B2H_IRQ 0x02 #define BT_INTMASK_BMC_HWRST 0x80 #define BT_BMC_BUFFER_SIZE 256 struct bt_bmc { struct device dev; struct miscdevice miscdev; void __iomem *base; int irq; wait_queue_head_t queue; struct timer_list poll_timer; struct mutex mutex; }; static atomic_t open_count = ATOMIC_INIT(0); static u8 bt_inb(struct bt_bmc *bt_bmc, int reg) { return readb(bt_bmc->base + reg); } static void bt_outb(struct bt_bmc *bt_bmc, u8 data, int reg) { writeb(data, bt_bmc->base + reg); } static void clr_rd_ptr(struct bt_bmc *bt_bmc) { bt_outb(bt_bmc, BT_CTRL_CLR_RD_PTR, BT_CTRL); } static void clr_wr_ptr(struct bt_bmc *bt_bmc) { bt_outb(bt_bmc, BT_CTRL_CLR_WR_PTR, BT_CTRL); } static void clr_h2b_atn(struct bt_bmc *bt_bmc) { bt_outb(bt_bmc, BT_CTRL_H2B_ATN, BT_CTRL); } static void set_b_busy(struct bt_bmc *bt_bmc) { if (!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY)) bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL); } static void clr_b_busy(struct bt_bmc *bt_bmc) { if (bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY) bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL); } static void set_b2h_atn(struct bt_bmc *bt_bmc) { bt_outb(bt_bmc, BT_CTRL_B2H_ATN, BT_CTRL); } static u8 bt_read(struct bt_bmc *bt_bmc) { return bt_inb(bt_bmc, BT_BMC2HOST); } static ssize_t bt_readn(struct bt_bmc *bt_bmc, u8 *buf, size_t n) { int i; for (i = 0; i < n; i++) buf[i] = bt_read(bt_bmc); return n; } static void bt_write(struct bt_bmc *bt_bmc, u8 c) { bt_outb(bt_bmc, c, BT_BMC2HOST); } static ssize_t bt_writen(struct bt_bmc *bt_bmc, u8 *buf, size_t n) { int i; for (i = 0; i < n; i++) bt_write(bt_bmc, buf[i]); return n; } static void set_sms_atn(struct bt_bmc *bt_bmc) { bt_outb(bt_bmc, BT_CTRL_SMS_ATN, BT_CTRL); } static struct bt_bmc *file_bt_bmc(struct file *file) { return container_of(file->private_data, struct bt_bmc, miscdev); } static int bt_bmc_open(struct inode *inode, struct file *file) { struct bt_bmc *bt_bmc = file_bt_bmc(file); if (atomic_inc_return(&open_count) == 1) { clr_b_busy(bt_bmc); return 0; } atomic_dec(&open_count); return -EBUSY; } /* * The BT (Block Transfer) interface means that entire messages are * buffered by the host before a notification is sent to the BMC that * there is data to be read. The first byte is the length and the * message data follows. The read operation just tries to capture the * whole before returning it to userspace. * * BT Message format : * * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5:N * Length NetFn/LUN Seq Cmd Data * */ static ssize_t bt_bmc_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct bt_bmc *bt_bmc = file_bt_bmc(file); u8 len; int len_byte = 1; u8 kbuffer[BT_BMC_BUFFER_SIZE]; ssize_t ret = 0; ssize_t nread; WARN_ON(*ppos); if (wait_event_interruptible(bt_bmc->queue, bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN)) return -ERESTARTSYS; mutex_lock(&bt_bmc->mutex); if (unlikely(!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))) { ret = -EIO; goto out_unlock; } set_b_busy(bt_bmc); clr_h2b_atn(bt_bmc); clr_rd_ptr(bt_bmc); /* * The BT frames start with the message length, which does not * include the length byte. */ kbuffer[0] = bt_read(bt_bmc); len = kbuffer[0]; /* We pass the length back to userspace as well */ if (len + 1 > count) len = count - 1; while (len) { nread = min_t(ssize_t, len, sizeof(kbuffer) - len_byte); bt_readn(bt_bmc, kbuffer + len_byte, nread); if (copy_to_user(buf, kbuffer, nread + len_byte)) { ret = -EFAULT; break; } len -= nread; buf += nread + len_byte; ret += nread + len_byte; len_byte = 0; } clr_b_busy(bt_bmc); out_unlock: mutex_unlock(&bt_bmc->mutex); return ret; } /* * BT Message response format : * * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5 Byte 6:N * Length NetFn/LUN Seq Cmd Code Data */ static ssize_t bt_bmc_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct bt_bmc *bt_bmc = file_bt_bmc(file); u8 kbuffer[BT_BMC_BUFFER_SIZE]; ssize_t ret = 0; ssize_t nwritten; /* * send a minimum response size */ if (count < 5) return -EINVAL; WARN_ON(*ppos); /* * There's no interrupt for clearing bmc busy so we have to * poll */ if (wait_event_interruptible(bt_bmc->queue, !(bt_inb(bt_bmc, BT_CTRL) & (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN)))) return -ERESTARTSYS; mutex_lock(&bt_bmc->mutex); if (unlikely(bt_inb(bt_bmc, BT_CTRL) & (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))) { ret = -EIO; goto out_unlock; } clr_wr_ptr(bt_bmc); while (count) { nwritten = min_t(ssize_t, count, sizeof(kbuffer)); if (copy_from_user(&kbuffer, buf, nwritten)) { ret = -EFAULT; break; } bt_writen(bt_bmc, kbuffer, nwritten); count -= nwritten; buf += nwritten; ret += nwritten; } set_b2h_atn(bt_bmc); out_unlock: mutex_unlock(&bt_bmc->mutex); return ret; } static long bt_bmc_ioctl(struct file *file, unsigned int cmd, unsigned long param) { struct bt_bmc *bt_bmc = file_bt_bmc(file); switch (cmd) { case BT_BMC_IOCTL_SMS_ATN: set_sms_atn(bt_bmc); return 0; } return -EINVAL; } static int bt_bmc_release(struct inode *inode, struct file *file) { struct bt_bmc *bt_bmc = file_bt_bmc(file); atomic_dec(&open_count); set_b_busy(bt_bmc); return 0; } static __poll_t bt_bmc_poll(struct file *file, poll_table *wait) { struct bt_bmc *bt_bmc = file_bt_bmc(file); __poll_t mask = 0; u8 ctrl; poll_wait(file, &bt_bmc->queue, wait); ctrl = bt_inb(bt_bmc, BT_CTRL); if (ctrl & BT_CTRL_H2B_ATN) mask |= EPOLLIN; if (!(ctrl & (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))) mask |= EPOLLOUT; return mask; } static const struct file_operations bt_bmc_fops = { .owner = THIS_MODULE, .open = bt_bmc_open, .read = bt_bmc_read, .write = bt_bmc_write, .release = bt_bmc_release, .poll = bt_bmc_poll, .unlocked_ioctl = bt_bmc_ioctl, }; static void poll_timer(struct timer_list *t) { struct bt_bmc *bt_bmc = from_timer(bt_bmc, t, poll_timer); bt_bmc->poll_timer.expires += msecs_to_jiffies(500); wake_up(&bt_bmc->queue); add_timer(&bt_bmc->poll_timer); } static irqreturn_t bt_bmc_irq(int irq, void *arg) { struct bt_bmc *bt_bmc = arg; u32 reg; reg = readl(bt_bmc->base + BT_CR2); reg &= BT_CR2_IRQ_H2B | BT_CR2_IRQ_HBUSY; if (!reg) return IRQ_NONE; /* ack pending IRQs */ writel(reg, bt_bmc->base + BT_CR2); wake_up(&bt_bmc->queue); return IRQ_HANDLED; } static int bt_bmc_config_irq(struct bt_bmc *bt_bmc, struct platform_device *pdev) { struct device *dev = &pdev->dev; int rc; u32 reg; bt_bmc->irq = platform_get_irq_optional(pdev, 0); if (bt_bmc->irq < 0) return bt_bmc->irq; rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED, DEVICE_NAME, bt_bmc); if (rc < 0) { dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq); bt_bmc->irq = rc; return rc; } /* * Configure IRQs on the bmc clearing the H2B and HBUSY bits; * H2B will be asserted when the bmc has data for us; HBUSY * will be cleared (along with B2H) when we can write the next * message to the BT buffer */ reg = readl(bt_bmc->base + BT_CR1); reg |= BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY; writel(reg, bt_bmc->base + BT_CR1); return 0; } static int bt_bmc_probe(struct platform_device *pdev) { struct bt_bmc *bt_bmc; struct device *dev; int rc; dev = &pdev->dev; dev_info(dev, "Found bt bmc device\n"); bt_bmc = devm_kzalloc(dev, sizeof(*bt_bmc), GFP_KERNEL); if (!bt_bmc) return -ENOMEM; dev_set_drvdata(&pdev->dev, bt_bmc); bt_bmc->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(bt_bmc->base)) return PTR_ERR(bt_bmc->base); mutex_init(&bt_bmc->mutex); init_waitqueue_head(&bt_bmc->queue); bt_bmc->miscdev.minor = MISC_DYNAMIC_MINOR; bt_bmc->miscdev.name = DEVICE_NAME; bt_bmc->miscdev.fops = &bt_bmc_fops; bt_bmc->miscdev.parent = dev; rc = misc_register(&bt_bmc->miscdev); if (rc) { dev_err(dev, "Unable to register misc device\n"); return rc; } bt_bmc_config_irq(bt_bmc, pdev); if (bt_bmc->irq >= 0) { dev_info(dev, "Using IRQ %d\n", bt_bmc->irq); } else { dev_info(dev, "No IRQ; using timer\n"); timer_setup(&bt_bmc->poll_timer, poll_timer, 0); bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10); add_timer(&bt_bmc->poll_timer); } writel((BT_IO_BASE << BT_CR0_IO_BASE) | (BT_IRQ << BT_CR0_IRQ) | BT_CR0_EN_CLR_SLV_RDP | BT_CR0_EN_CLR_SLV_WRP | BT_CR0_ENABLE_IBT, bt_bmc->base + BT_CR0); clr_b_busy(bt_bmc); return 0; } static int bt_bmc_remove(struct platform_device *pdev) { struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev); misc_deregister(&bt_bmc->miscdev); if (bt_bmc->irq < 0) del_timer_sync(&bt_bmc->poll_timer); return 0; } static const struct of_device_id bt_bmc_match[] = { { .compatible = "aspeed,ast2400-ibt-bmc" }, { .compatible = "aspeed,ast2500-ibt-bmc" }, { .compatible = "aspeed,ast2600-ibt-bmc" }, { }, }; static struct platform_driver bt_bmc_driver = { .driver = { .name = DEVICE_NAME, .of_match_table = bt_bmc_match, }, .probe = bt_bmc_probe, .remove = bt_bmc_remove, }; module_platform_driver(bt_bmc_driver); MODULE_DEVICE_TABLE(of, bt_bmc_match); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alistair Popple <[email protected]>"); MODULE_DESCRIPTION("Linux device interface to the IPMI BT interface");
linux-master
drivers/char/ipmi/bt-bmc.c
// SPDX-License-Identifier: GPL-2.0+ /* * ipmi_watchdog.c * * A watchdog timer based upon the IPMI interface. * * Author: MontaVista Software, Inc. * Corey Minyard <[email protected]> * [email protected] * * Copyright 2002 MontaVista Software Inc. */ #define pr_fmt(fmt) "IPMI Watchdog: " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ipmi.h> #include <linux/ipmi_smi.h> #include <linux/mutex.h> #include <linux/watchdog.h> #include <linux/miscdevice.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/kdebug.h> #include <linux/kstrtox.h> #include <linux/rwsem.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <linux/notifier.h> #include <linux/nmi.h> #include <linux/reboot.h> #include <linux/wait.h> #include <linux/poll.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/atomic.h> #include <linux/sched/signal.h> #ifdef CONFIG_X86 /* * This is ugly, but I've determined that x86 is the only architecture * that can reasonably support the IPMI NMI watchdog timeout at this * time. If another architecture adds this capability somehow, it * will have to be a somewhat different mechanism and I have no idea * how it will work. So in the unlikely event that another * architecture supports this, we can figure out a good generic * mechanism for it at that time. */ #include <asm/kdebug.h> #include <asm/nmi.h> #define HAVE_DIE_NMI #endif /* * The IPMI command/response information for the watchdog timer. */ /* values for byte 1 of the set command, byte 2 of the get response. */ #define WDOG_DONT_LOG (1 << 7) #define WDOG_DONT_STOP_ON_SET (1 << 6) #define WDOG_SET_TIMER_USE(byte, use) \ byte = ((byte) & 0xf8) | ((use) & 0x7) #define WDOG_GET_TIMER_USE(byte) ((byte) & 0x7) #define WDOG_TIMER_USE_BIOS_FRB2 1 #define WDOG_TIMER_USE_BIOS_POST 2 #define WDOG_TIMER_USE_OS_LOAD 3 #define WDOG_TIMER_USE_SMS_OS 4 #define WDOG_TIMER_USE_OEM 5 /* values for byte 2 of the set command, byte 3 of the get response. */ #define WDOG_SET_PRETIMEOUT_ACT(byte, use) \ byte = ((byte) & 0x8f) | (((use) & 0x7) << 4) #define WDOG_GET_PRETIMEOUT_ACT(byte) (((byte) >> 4) & 0x7) #define WDOG_PRETIMEOUT_NONE 0 #define WDOG_PRETIMEOUT_SMI 1 #define WDOG_PRETIMEOUT_NMI 2 #define WDOG_PRETIMEOUT_MSG_INT 3 /* Operations that can be performed on a pretimout. */ #define WDOG_PREOP_NONE 0 #define WDOG_PREOP_PANIC 1 /* Cause data to be available to read. Doesn't work in NMI mode. */ #define WDOG_PREOP_GIVE_DATA 2 /* Actions to perform on a full timeout. */ #define WDOG_SET_TIMEOUT_ACT(byte, use) \ byte = ((byte) & 0xf8) | ((use) & 0x7) #define WDOG_GET_TIMEOUT_ACT(byte) ((byte) & 0x7) #define WDOG_TIMEOUT_NONE 0 #define WDOG_TIMEOUT_RESET 1 #define WDOG_TIMEOUT_POWER_DOWN 2 #define WDOG_TIMEOUT_POWER_CYCLE 3 /* * Byte 3 of the get command, byte 4 of the get response is the * pre-timeout in seconds. */ /* Bits for setting byte 4 of the set command, byte 5 of the get response. */ #define WDOG_EXPIRE_CLEAR_BIOS_FRB2 (1 << 1) #define WDOG_EXPIRE_CLEAR_BIOS_POST (1 << 2) #define WDOG_EXPIRE_CLEAR_OS_LOAD (1 << 3) #define WDOG_EXPIRE_CLEAR_SMS_OS (1 << 4) #define WDOG_EXPIRE_CLEAR_OEM (1 << 5) /* * Setting/getting the watchdog timer value. This is for bytes 5 and * 6 (the timeout time) of the set command, and bytes 6 and 7 (the * timeout time) and 8 and 9 (the current countdown value) of the * response. The timeout value is given in seconds (in the command it * is 100ms intervals). */ #define WDOG_SET_TIMEOUT(byte1, byte2, val) \ (byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8) #define WDOG_GET_TIMEOUT(byte1, byte2) \ (((byte1) | ((byte2) << 8)) / 10) #define IPMI_WDOG_RESET_TIMER 0x22 #define IPMI_WDOG_SET_TIMER 0x24 #define IPMI_WDOG_GET_TIMER 0x25 #define IPMI_WDOG_TIMER_NOT_INIT_RESP 0x80 static DEFINE_MUTEX(ipmi_watchdog_mutex); static bool nowayout = WATCHDOG_NOWAYOUT; static struct ipmi_user *watchdog_user; static int watchdog_ifnum; /* Default the timeout to 10 seconds. */ static int timeout = 10; /* The pre-timeout is disabled by default. */ static int pretimeout; /* Default timeout to set on panic */ static int panic_wdt_timeout = 255; /* Default action is to reset the board on a timeout. */ static unsigned char action_val = WDOG_TIMEOUT_RESET; static char action[16] = "reset"; static unsigned char preaction_val = WDOG_PRETIMEOUT_NONE; static char preaction[16] = "pre_none"; static unsigned char preop_val = WDOG_PREOP_NONE; static char preop[16] = "preop_none"; static DEFINE_SPINLOCK(ipmi_read_lock); static char data_to_read; static DECLARE_WAIT_QUEUE_HEAD(read_q); static struct fasync_struct *fasync_q; static atomic_t pretimeout_since_last_heartbeat; static char expect_close; static int ifnum_to_use = -1; /* Parameters to ipmi_set_timeout */ #define IPMI_SET_TIMEOUT_NO_HB 0 #define IPMI_SET_TIMEOUT_HB_IF_NECESSARY 1 #define IPMI_SET_TIMEOUT_FORCE_HB 2 static int ipmi_set_timeout(int do_heartbeat); static void ipmi_register_watchdog(int ipmi_intf); static void ipmi_unregister_watchdog(int ipmi_intf); /* * If true, the driver will start running as soon as it is configured * and ready. */ static int start_now; static int set_param_timeout(const char *val, const struct kernel_param *kp) { char *endp; int l; int rv = 0; if (!val) return -EINVAL; l = simple_strtoul(val, &endp, 0); if (endp == val) return -EINVAL; *((int *)kp->arg) = l; if (watchdog_user) rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); return rv; } static const struct kernel_param_ops param_ops_timeout = { .set = set_param_timeout, .get = param_get_int, }; #define param_check_timeout param_check_int typedef int (*action_fn)(const char *intval, char *outval); static int action_op(const char *inval, char *outval); static int preaction_op(const char *inval, char *outval); static int preop_op(const char *inval, char *outval); static void check_parms(void); static int set_param_str(const char *val, const struct kernel_param *kp) { action_fn fn = (action_fn) kp->arg; int rv = 0; char valcp[16]; char *s; strscpy(valcp, val, 16); s = strstrip(valcp); rv = fn(s, NULL); if (rv) goto out; check_parms(); if (watchdog_user) rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); out: return rv; } static int get_param_str(char *buffer, const struct kernel_param *kp) { action_fn fn = (action_fn) kp->arg; int rv, len; rv = fn(NULL, buffer); if (rv) return rv; len = strlen(buffer); buffer[len++] = '\n'; buffer[len] = 0; return len; } static int set_param_wdog_ifnum(const char *val, const struct kernel_param *kp) { int rv = param_set_int(val, kp); if (rv) return rv; if ((ifnum_to_use < 0) || (ifnum_to_use == watchdog_ifnum)) return 0; ipmi_unregister_watchdog(watchdog_ifnum); ipmi_register_watchdog(ifnum_to_use); return 0; } static const struct kernel_param_ops param_ops_wdog_ifnum = { .set = set_param_wdog_ifnum, .get = param_get_int, }; #define param_check_wdog_ifnum param_check_int static const struct kernel_param_ops param_ops_str = { .set = set_param_str, .get = get_param_str, }; module_param(ifnum_to_use, wdog_ifnum, 0644); MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog " "timer. Setting to -1 defaults to the first registered " "interface"); module_param(timeout, timeout, 0644); MODULE_PARM_DESC(timeout, "Timeout value in seconds."); module_param(pretimeout, timeout, 0644); MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds."); module_param(panic_wdt_timeout, timeout, 0644); MODULE_PARM_DESC(panic_wdt_timeout, "Timeout value on kernel panic in seconds."); module_param_cb(action, &param_ops_str, action_op, 0644); MODULE_PARM_DESC(action, "Timeout action. One of: " "reset, none, power_cycle, power_off."); module_param_cb(preaction, &param_ops_str, preaction_op, 0644); MODULE_PARM_DESC(preaction, "Pretimeout action. One of: " "pre_none, pre_smi, pre_nmi, pre_int."); module_param_cb(preop, &param_ops_str, preop_op, 0644); MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: " "preop_none, preop_panic, preop_give_data."); module_param(start_now, int, 0444); MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as" "soon as the driver is loaded."); module_param(nowayout, bool, 0644); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " "(default=CONFIG_WATCHDOG_NOWAYOUT)"); /* Default state of the timer. */ static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE; /* Is someone using the watchdog? Only one user is allowed. */ static unsigned long ipmi_wdog_open; /* * If set to 1, the heartbeat command will set the state to reset and * start the timer. The timer doesn't normally run when the driver is * first opened until the heartbeat is set the first time, this * variable is used to accomplish this. */ static int ipmi_start_timer_on_heartbeat; /* IPMI version of the BMC. */ static unsigned char ipmi_version_major; static unsigned char ipmi_version_minor; /* If a pretimeout occurs, this is used to allow only one panic to happen. */ static atomic_t preop_panic_excl = ATOMIC_INIT(-1); #ifdef HAVE_DIE_NMI static int testing_nmi; static int nmi_handler_registered; #endif static int __ipmi_heartbeat(void); /* * We use a mutex to make sure that only one thing can send a set a * message at one time. The mutex is claimed when a message is sent * and freed when both the send and receive messages are free. */ static atomic_t msg_tofree = ATOMIC_INIT(0); static DECLARE_COMPLETION(msg_wait); static void msg_free_smi(struct ipmi_smi_msg *msg) { if (atomic_dec_and_test(&msg_tofree)) { if (!oops_in_progress) complete(&msg_wait); } } static void msg_free_recv(struct ipmi_recv_msg *msg) { if (atomic_dec_and_test(&msg_tofree)) { if (!oops_in_progress) complete(&msg_wait); } } static struct ipmi_smi_msg smi_msg = INIT_IPMI_SMI_MSG(msg_free_smi); static struct ipmi_recv_msg recv_msg = INIT_IPMI_RECV_MSG(msg_free_recv); static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, struct ipmi_recv_msg *recv_msg, int *send_heartbeat_now) { struct kernel_ipmi_msg msg; unsigned char data[6]; int rv; struct ipmi_system_interface_addr addr; int hbnow = 0; data[0] = 0; WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS); if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { if ((ipmi_version_major > 1) || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) { /* This is an IPMI 1.5-only feature. */ data[0] |= WDOG_DONT_STOP_ON_SET; } else { /* * In ipmi 1.0, setting the timer stops the watchdog, we * need to start it back up again. */ hbnow = 1; } } data[1] = 0; WDOG_SET_TIMEOUT_ACT(data[1], ipmi_watchdog_state); if ((pretimeout > 0) && (ipmi_watchdog_state != WDOG_TIMEOUT_NONE)) { WDOG_SET_PRETIMEOUT_ACT(data[1], preaction_val); data[2] = pretimeout; } else { WDOG_SET_PRETIMEOUT_ACT(data[1], WDOG_PRETIMEOUT_NONE); data[2] = 0; /* No pretimeout. */ } data[3] = 0; WDOG_SET_TIMEOUT(data[4], data[5], timeout); addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; addr.channel = IPMI_BMC_CHANNEL; addr.lun = 0; msg.netfn = 0x06; msg.cmd = IPMI_WDOG_SET_TIMER; msg.data = data; msg.data_len = sizeof(data); rv = ipmi_request_supply_msgs(watchdog_user, (struct ipmi_addr *) &addr, 0, &msg, NULL, smi_msg, recv_msg, 1); if (rv) pr_warn("set timeout error: %d\n", rv); else if (send_heartbeat_now) *send_heartbeat_now = hbnow; return rv; } static int _ipmi_set_timeout(int do_heartbeat) { int send_heartbeat_now; int rv; if (!watchdog_user) return -ENODEV; atomic_set(&msg_tofree, 2); rv = __ipmi_set_timeout(&smi_msg, &recv_msg, &send_heartbeat_now); if (rv) { atomic_set(&msg_tofree, 0); return rv; } wait_for_completion(&msg_wait); if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB) || ((send_heartbeat_now) && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY))) rv = __ipmi_heartbeat(); return rv; } static int ipmi_set_timeout(int do_heartbeat) { int rv; mutex_lock(&ipmi_watchdog_mutex); rv = _ipmi_set_timeout(do_heartbeat); mutex_unlock(&ipmi_watchdog_mutex); return rv; } static atomic_t panic_done_count = ATOMIC_INIT(0); static void panic_smi_free(struct ipmi_smi_msg *msg) { atomic_dec(&panic_done_count); } static void panic_recv_free(struct ipmi_recv_msg *msg) { atomic_dec(&panic_done_count); } static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = INIT_IPMI_SMI_MSG(panic_smi_free); static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = INIT_IPMI_RECV_MSG(panic_recv_free); static void panic_halt_ipmi_heartbeat(void) { struct kernel_ipmi_msg msg; struct ipmi_system_interface_addr addr; int rv; /* * Don't reset the timer if we have the timer turned off, that * re-enables the watchdog. */ if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) return; addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; addr.channel = IPMI_BMC_CHANNEL; addr.lun = 0; msg.netfn = 0x06; msg.cmd = IPMI_WDOG_RESET_TIMER; msg.data = NULL; msg.data_len = 0; atomic_add(2, &panic_done_count); rv = ipmi_request_supply_msgs(watchdog_user, (struct ipmi_addr *) &addr, 0, &msg, NULL, &panic_halt_heartbeat_smi_msg, &panic_halt_heartbeat_recv_msg, 1); if (rv) atomic_sub(2, &panic_done_count); } static struct ipmi_smi_msg panic_halt_smi_msg = INIT_IPMI_SMI_MSG(panic_smi_free); static struct ipmi_recv_msg panic_halt_recv_msg = INIT_IPMI_RECV_MSG(panic_recv_free); /* * Special call, doesn't claim any locks. This is only to be called * at panic or halt time, in run-to-completion mode, when the caller * is the only CPU and the only thing that will be going is these IPMI * calls. */ static void panic_halt_ipmi_set_timeout(void) { int send_heartbeat_now; int rv; /* Wait for the messages to be free. */ while (atomic_read(&panic_done_count) != 0) ipmi_poll_interface(watchdog_user); atomic_add(2, &panic_done_count); rv = __ipmi_set_timeout(&panic_halt_smi_msg, &panic_halt_recv_msg, &send_heartbeat_now); if (rv) { atomic_sub(2, &panic_done_count); pr_warn("Unable to extend the watchdog timeout\n"); } else { if (send_heartbeat_now) panic_halt_ipmi_heartbeat(); } while (atomic_read(&panic_done_count) != 0) ipmi_poll_interface(watchdog_user); } static int __ipmi_heartbeat(void) { struct kernel_ipmi_msg msg; int rv; struct ipmi_system_interface_addr addr; int timeout_retries = 0; restart: /* * Don't reset the timer if we have the timer turned off, that * re-enables the watchdog. */ if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) return 0; atomic_set(&msg_tofree, 2); addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; addr.channel = IPMI_BMC_CHANNEL; addr.lun = 0; msg.netfn = 0x06; msg.cmd = IPMI_WDOG_RESET_TIMER; msg.data = NULL; msg.data_len = 0; rv = ipmi_request_supply_msgs(watchdog_user, (struct ipmi_addr *) &addr, 0, &msg, NULL, &smi_msg, &recv_msg, 1); if (rv) { atomic_set(&msg_tofree, 0); pr_warn("heartbeat send failure: %d\n", rv); return rv; } /* Wait for the heartbeat to be sent. */ wait_for_completion(&msg_wait); if (recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) { timeout_retries++; if (timeout_retries > 3) { pr_err("Unable to restore the IPMI watchdog's settings, giving up\n"); rv = -EIO; goto out; } /* * The timer was not initialized, that means the BMC was * probably reset and lost the watchdog information. Attempt * to restore the timer's info. Note that we still hold * the heartbeat lock, to keep a heartbeat from happening * in this process, so must say no heartbeat to avoid a * deadlock on this mutex */ rv = _ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); if (rv) { pr_err("Unable to send the command to set the watchdog's settings, giving up\n"); goto out; } /* Might need a heartbeat send, go ahead and do it. */ goto restart; } else if (recv_msg.msg.data[0] != 0) { /* * Got an error in the heartbeat response. It was already * reported in ipmi_wdog_msg_handler, but we should return * an error here. */ rv = -EINVAL; } out: return rv; } static int _ipmi_heartbeat(void) { int rv; if (!watchdog_user) return -ENODEV; if (ipmi_start_timer_on_heartbeat) { ipmi_start_timer_on_heartbeat = 0; ipmi_watchdog_state = action_val; rv = _ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); } else if (atomic_cmpxchg(&pretimeout_since_last_heartbeat, 1, 0)) { /* * A pretimeout occurred, make sure we set the timeout. * We don't want to set the action, though, we want to * leave that alone (thus it can't be combined with the * above operation. */ rv = _ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); } else { rv = __ipmi_heartbeat(); } return rv; } static int ipmi_heartbeat(void) { int rv; mutex_lock(&ipmi_watchdog_mutex); rv = _ipmi_heartbeat(); mutex_unlock(&ipmi_watchdog_mutex); return rv; } static const struct watchdog_info ident = { .options = 0, /* WDIOF_SETTIMEOUT, */ .firmware_version = 1, .identity = "IPMI" }; static int ipmi_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int i; int val; switch (cmd) { case WDIOC_GETSUPPORT: i = copy_to_user(argp, &ident, sizeof(ident)); return i ? -EFAULT : 0; case WDIOC_SETTIMEOUT: i = copy_from_user(&val, argp, sizeof(int)); if (i) return -EFAULT; timeout = val; return _ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); case WDIOC_GETTIMEOUT: i = copy_to_user(argp, &timeout, sizeof(timeout)); if (i) return -EFAULT; return 0; case WDIOC_SETPRETIMEOUT: i = copy_from_user(&val, argp, sizeof(int)); if (i) return -EFAULT; pretimeout = val; return _ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); case WDIOC_GETPRETIMEOUT: i = copy_to_user(argp, &pretimeout, sizeof(pretimeout)); if (i) return -EFAULT; return 0; case WDIOC_KEEPALIVE: return _ipmi_heartbeat(); case WDIOC_SETOPTIONS: i = copy_from_user(&val, argp, sizeof(int)); if (i) return -EFAULT; if (val & WDIOS_DISABLECARD) { ipmi_watchdog_state = WDOG_TIMEOUT_NONE; _ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); ipmi_start_timer_on_heartbeat = 0; } if (val & WDIOS_ENABLECARD) { ipmi_watchdog_state = action_val; _ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); } return 0; case WDIOC_GETSTATUS: val = 0; i = copy_to_user(argp, &val, sizeof(val)); if (i) return -EFAULT; return 0; default: return -ENOIOCTLCMD; } } static long ipmi_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; mutex_lock(&ipmi_watchdog_mutex); ret = ipmi_ioctl(file, cmd, arg); mutex_unlock(&ipmi_watchdog_mutex); return ret; } static ssize_t ipmi_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { int rv; if (len) { if (!nowayout) { size_t i; /* In case it was set long ago */ expect_close = 0; for (i = 0; i != len; i++) { char c; if (get_user(c, buf + i)) return -EFAULT; if (c == 'V') expect_close = 42; } } rv = ipmi_heartbeat(); if (rv) return rv; } return len; } static ssize_t ipmi_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int rv = 0; wait_queue_entry_t wait; if (count <= 0) return 0; /* * Reading returns if the pretimeout has gone off, and it only does * it once per pretimeout. */ spin_lock_irq(&ipmi_read_lock); if (!data_to_read) { if (file->f_flags & O_NONBLOCK) { rv = -EAGAIN; goto out; } init_waitqueue_entry(&wait, current); add_wait_queue(&read_q, &wait); while (!data_to_read && !signal_pending(current)) { set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irq(&ipmi_read_lock); schedule(); spin_lock_irq(&ipmi_read_lock); } remove_wait_queue(&read_q, &wait); if (signal_pending(current)) { rv = -ERESTARTSYS; goto out; } } data_to_read = 0; out: spin_unlock_irq(&ipmi_read_lock); if (rv == 0) { if (copy_to_user(buf, &data_to_read, 1)) rv = -EFAULT; else rv = 1; } return rv; } static int ipmi_open(struct inode *ino, struct file *filep) { switch (iminor(ino)) { case WATCHDOG_MINOR: if (test_and_set_bit(0, &ipmi_wdog_open)) return -EBUSY; /* * Don't start the timer now, let it start on the * first heartbeat. */ ipmi_start_timer_on_heartbeat = 1; return stream_open(ino, filep); default: return (-ENODEV); } } static __poll_t ipmi_poll(struct file *file, poll_table *wait) { __poll_t mask = 0; poll_wait(file, &read_q, wait); spin_lock_irq(&ipmi_read_lock); if (data_to_read) mask |= (EPOLLIN | EPOLLRDNORM); spin_unlock_irq(&ipmi_read_lock); return mask; } static int ipmi_fasync(int fd, struct file *file, int on) { int result; result = fasync_helper(fd, file, on, &fasync_q); return (result); } static int ipmi_close(struct inode *ino, struct file *filep) { if (iminor(ino) == WATCHDOG_MINOR) { if (expect_close == 42) { mutex_lock(&ipmi_watchdog_mutex); ipmi_watchdog_state = WDOG_TIMEOUT_NONE; _ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); mutex_unlock(&ipmi_watchdog_mutex); } else { pr_crit("Unexpected close, not stopping watchdog!\n"); ipmi_heartbeat(); } clear_bit(0, &ipmi_wdog_open); } expect_close = 0; return 0; } static const struct file_operations ipmi_wdog_fops = { .owner = THIS_MODULE, .read = ipmi_read, .poll = ipmi_poll, .write = ipmi_write, .unlocked_ioctl = ipmi_unlocked_ioctl, .compat_ioctl = compat_ptr_ioctl, .open = ipmi_open, .release = ipmi_close, .fasync = ipmi_fasync, .llseek = no_llseek, }; static struct miscdevice ipmi_wdog_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &ipmi_wdog_fops }; static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg, void *handler_data) { if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER && msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) pr_info("response: The IPMI controller appears to have been reset, will attempt to reinitialize the watchdog timer\n"); else if (msg->msg.data[0] != 0) pr_err("response: Error %x on cmd %x\n", msg->msg.data[0], msg->msg.cmd); ipmi_free_recv_msg(msg); } static void ipmi_wdog_pretimeout_handler(void *handler_data) { if (preaction_val != WDOG_PRETIMEOUT_NONE) { if (preop_val == WDOG_PREOP_PANIC) { if (atomic_inc_and_test(&preop_panic_excl)) panic("Watchdog pre-timeout"); } else if (preop_val == WDOG_PREOP_GIVE_DATA) { unsigned long flags; spin_lock_irqsave(&ipmi_read_lock, flags); data_to_read = 1; wake_up_interruptible(&read_q); kill_fasync(&fasync_q, SIGIO, POLL_IN); spin_unlock_irqrestore(&ipmi_read_lock, flags); } } /* * On some machines, the heartbeat will give an error and not * work unless we re-enable the timer. So do so. */ atomic_set(&pretimeout_since_last_heartbeat, 1); } static void ipmi_wdog_panic_handler(void *user_data) { static int panic_event_handled; /* * On a panic, if we have a panic timeout, make sure to extend * the watchdog timer to a reasonable value to complete the * panic, if the watchdog timer is running. Plus the * pretimeout is meaningless at panic time. */ if (watchdog_user && !panic_event_handled && ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { /* Make sure we do this only once. */ panic_event_handled = 1; timeout = panic_wdt_timeout; pretimeout = 0; panic_halt_ipmi_set_timeout(); } } static const struct ipmi_user_hndl ipmi_hndlrs = { .ipmi_recv_hndl = ipmi_wdog_msg_handler, .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler, .ipmi_panic_handler = ipmi_wdog_panic_handler }; static void ipmi_register_watchdog(int ipmi_intf) { int rv = -EBUSY; if (watchdog_user) goto out; if ((ifnum_to_use >= 0) && (ifnum_to_use != ipmi_intf)) goto out; watchdog_ifnum = ipmi_intf; rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user); if (rv < 0) { pr_crit("Unable to register with ipmi\n"); goto out; } rv = ipmi_get_version(watchdog_user, &ipmi_version_major, &ipmi_version_minor); if (rv) { pr_warn("Unable to get IPMI version, assuming 1.0\n"); ipmi_version_major = 1; ipmi_version_minor = 0; } rv = misc_register(&ipmi_wdog_miscdev); if (rv < 0) { ipmi_destroy_user(watchdog_user); watchdog_user = NULL; pr_crit("Unable to register misc device\n"); } #ifdef HAVE_DIE_NMI if (nmi_handler_registered) { int old_pretimeout = pretimeout; int old_timeout = timeout; int old_preop_val = preop_val; /* * Set the pretimeout to go off in a second and give * ourselves plenty of time to stop the timer. */ ipmi_watchdog_state = WDOG_TIMEOUT_RESET; preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */ pretimeout = 99; timeout = 100; testing_nmi = 1; rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); if (rv) { pr_warn("Error starting timer to test NMI: 0x%x. The NMI pretimeout will likely not work\n", rv); rv = 0; goto out_restore; } msleep(1500); if (testing_nmi != 2) { pr_warn("IPMI NMI didn't seem to occur. The NMI pretimeout will likely not work\n"); } out_restore: testing_nmi = 0; preop_val = old_preop_val; pretimeout = old_pretimeout; timeout = old_timeout; } #endif out: if ((start_now) && (rv == 0)) { /* Run from startup, so start the timer now. */ start_now = 0; /* Disable this function after first startup. */ ipmi_watchdog_state = action_val; ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); pr_info("Starting now!\n"); } else { /* Stop the timer now. */ ipmi_watchdog_state = WDOG_TIMEOUT_NONE; ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); } } static void ipmi_unregister_watchdog(int ipmi_intf) { int rv; struct ipmi_user *loc_user = watchdog_user; if (!loc_user) return; if (watchdog_ifnum != ipmi_intf) return; /* Make sure no one can call us any more. */ misc_deregister(&ipmi_wdog_miscdev); watchdog_user = NULL; /* * Wait to make sure the message makes it out. The lower layer has * pointers to our buffers, we want to make sure they are done before * we release our memory. */ while (atomic_read(&msg_tofree)) msg_free_smi(NULL); mutex_lock(&ipmi_watchdog_mutex); /* Disconnect from IPMI. */ rv = ipmi_destroy_user(loc_user); if (rv) pr_warn("error unlinking from IPMI: %d\n", rv); /* If it comes back, restart it properly. */ ipmi_start_timer_on_heartbeat = 1; mutex_unlock(&ipmi_watchdog_mutex); } #ifdef HAVE_DIE_NMI static int ipmi_nmi(unsigned int val, struct pt_regs *regs) { /* * If we get here, it's an NMI that's not a memory or I/O * error. We can't truly tell if it's from IPMI or not * without sending a message, and sending a message is almost * impossible because of locking. */ if (testing_nmi) { testing_nmi = 2; return NMI_HANDLED; } /* If we are not expecting a timeout, ignore it. */ if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) return NMI_DONE; if (preaction_val != WDOG_PRETIMEOUT_NMI) return NMI_DONE; /* * If no one else handled the NMI, we assume it was the IPMI * watchdog. */ if (preop_val == WDOG_PREOP_PANIC) { /* On some machines, the heartbeat will give an error and not work unless we re-enable the timer. So do so. */ atomic_set(&pretimeout_since_last_heartbeat, 1); if (atomic_inc_and_test(&preop_panic_excl)) nmi_panic(regs, "pre-timeout"); } return NMI_HANDLED; } #endif static int wdog_reboot_handler(struct notifier_block *this, unsigned long code, void *unused) { static int reboot_event_handled; if ((watchdog_user) && (!reboot_event_handled)) { /* Make sure we only do this once. */ reboot_event_handled = 1; if (code == SYS_POWER_OFF || code == SYS_HALT) { /* Disable the WDT if we are shutting down. */ ipmi_watchdog_state = WDOG_TIMEOUT_NONE; ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { /* Set a long timer to let the reboot happen or reset if it hangs, but only if the watchdog timer was already running. */ if (timeout < 120) timeout = 120; pretimeout = 0; ipmi_watchdog_state = WDOG_TIMEOUT_RESET; ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); } } return NOTIFY_OK; } static struct notifier_block wdog_reboot_notifier = { .notifier_call = wdog_reboot_handler, .next = NULL, .priority = 0 }; static void ipmi_new_smi(int if_num, struct device *device) { ipmi_register_watchdog(if_num); } static void ipmi_smi_gone(int if_num) { ipmi_unregister_watchdog(if_num); } static struct ipmi_smi_watcher smi_watcher = { .owner = THIS_MODULE, .new_smi = ipmi_new_smi, .smi_gone = ipmi_smi_gone }; static int action_op(const char *inval, char *outval) { if (outval) strcpy(outval, action); if (!inval) return 0; if (strcmp(inval, "reset") == 0) action_val = WDOG_TIMEOUT_RESET; else if (strcmp(inval, "none") == 0) action_val = WDOG_TIMEOUT_NONE; else if (strcmp(inval, "power_cycle") == 0) action_val = WDOG_TIMEOUT_POWER_CYCLE; else if (strcmp(inval, "power_off") == 0) action_val = WDOG_TIMEOUT_POWER_DOWN; else return -EINVAL; strcpy(action, inval); return 0; } static int preaction_op(const char *inval, char *outval) { if (outval) strcpy(outval, preaction); if (!inval) return 0; if (strcmp(inval, "pre_none") == 0) preaction_val = WDOG_PRETIMEOUT_NONE; else if (strcmp(inval, "pre_smi") == 0) preaction_val = WDOG_PRETIMEOUT_SMI; #ifdef HAVE_DIE_NMI else if (strcmp(inval, "pre_nmi") == 0) preaction_val = WDOG_PRETIMEOUT_NMI; #endif else if (strcmp(inval, "pre_int") == 0) preaction_val = WDOG_PRETIMEOUT_MSG_INT; else return -EINVAL; strcpy(preaction, inval); return 0; } static int preop_op(const char *inval, char *outval) { if (outval) strcpy(outval, preop); if (!inval) return 0; if (strcmp(inval, "preop_none") == 0) preop_val = WDOG_PREOP_NONE; else if (strcmp(inval, "preop_panic") == 0) preop_val = WDOG_PREOP_PANIC; else if (strcmp(inval, "preop_give_data") == 0) preop_val = WDOG_PREOP_GIVE_DATA; else return -EINVAL; strcpy(preop, inval); return 0; } static void check_parms(void) { #ifdef HAVE_DIE_NMI int do_nmi = 0; int rv; if (preaction_val == WDOG_PRETIMEOUT_NMI) { do_nmi = 1; if (preop_val == WDOG_PREOP_GIVE_DATA) { pr_warn("Pretimeout op is to give data but NMI pretimeout is enabled, setting pretimeout op to none\n"); preop_op("preop_none", NULL); do_nmi = 0; } } if (do_nmi && !nmi_handler_registered) { rv = register_nmi_handler(NMI_UNKNOWN, ipmi_nmi, 0, "ipmi"); if (rv) { pr_warn("Can't register nmi handler\n"); return; } else nmi_handler_registered = 1; } else if (!do_nmi && nmi_handler_registered) { unregister_nmi_handler(NMI_UNKNOWN, "ipmi"); nmi_handler_registered = 0; } #endif } static int __init ipmi_wdog_init(void) { int rv; if (action_op(action, NULL)) { action_op("reset", NULL); pr_info("Unknown action '%s', defaulting to reset\n", action); } if (preaction_op(preaction, NULL)) { preaction_op("pre_none", NULL); pr_info("Unknown preaction '%s', defaulting to none\n", preaction); } if (preop_op(preop, NULL)) { preop_op("preop_none", NULL); pr_info("Unknown preop '%s', defaulting to none\n", preop); } check_parms(); register_reboot_notifier(&wdog_reboot_notifier); rv = ipmi_smi_watcher_register(&smi_watcher); if (rv) { #ifdef HAVE_DIE_NMI if (nmi_handler_registered) unregister_nmi_handler(NMI_UNKNOWN, "ipmi"); #endif unregister_reboot_notifier(&wdog_reboot_notifier); pr_warn("can't register smi watcher\n"); return rv; } pr_info("driver initialized\n"); return 0; } static void __exit ipmi_wdog_exit(void) { ipmi_smi_watcher_unregister(&smi_watcher); ipmi_unregister_watchdog(watchdog_ifnum); #ifdef HAVE_DIE_NMI if (nmi_handler_registered) unregister_nmi_handler(NMI_UNKNOWN, "ipmi"); #endif unregister_reboot_notifier(&wdog_reboot_notifier); } module_exit(ipmi_wdog_exit); module_init(ipmi_wdog_init); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Corey Minyard <[email protected]>"); MODULE_DESCRIPTION("watchdog timer based upon the IPMI interface.");
linux-master
drivers/char/ipmi/ipmi_watchdog.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2015-2018, Intel Corporation. * Copyright (c) 2021, IBM Corp. */ #include <linux/device.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include "kcs_bmc.h" /* Implement both the device and client interfaces here */ #include "kcs_bmc_device.h" #include "kcs_bmc_client.h" /* Record registered devices and drivers */ static DEFINE_MUTEX(kcs_bmc_lock); static LIST_HEAD(kcs_bmc_devices); static LIST_HEAD(kcs_bmc_drivers); /* Consumer data access */ u8 kcs_bmc_read_data(struct kcs_bmc_device *kcs_bmc) { return kcs_bmc->ops->io_inputb(kcs_bmc, kcs_bmc->ioreg.idr); } EXPORT_SYMBOL(kcs_bmc_read_data); void kcs_bmc_write_data(struct kcs_bmc_device *kcs_bmc, u8 data) { kcs_bmc->ops->io_outputb(kcs_bmc, kcs_bmc->ioreg.odr, data); } EXPORT_SYMBOL(kcs_bmc_write_data); u8 kcs_bmc_read_status(struct kcs_bmc_device *kcs_bmc) { return kcs_bmc->ops->io_inputb(kcs_bmc, kcs_bmc->ioreg.str); } EXPORT_SYMBOL(kcs_bmc_read_status); void kcs_bmc_write_status(struct kcs_bmc_device *kcs_bmc, u8 data) { kcs_bmc->ops->io_outputb(kcs_bmc, kcs_bmc->ioreg.str, data); } EXPORT_SYMBOL(kcs_bmc_write_status); void kcs_bmc_update_status(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 val) { kcs_bmc->ops->io_updateb(kcs_bmc, kcs_bmc->ioreg.str, mask, val); } EXPORT_SYMBOL(kcs_bmc_update_status); irqreturn_t kcs_bmc_handle_event(struct kcs_bmc_device *kcs_bmc) { struct kcs_bmc_client *client; irqreturn_t rc = IRQ_NONE; unsigned long flags; spin_lock_irqsave(&kcs_bmc->lock, flags); client = kcs_bmc->client; if (client) rc = client->ops->event(client); spin_unlock_irqrestore(&kcs_bmc->lock, flags); return rc; } EXPORT_SYMBOL(kcs_bmc_handle_event); int kcs_bmc_enable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client) { int rc; spin_lock_irq(&kcs_bmc->lock); if (kcs_bmc->client) { rc = -EBUSY; } else { u8 mask = KCS_BMC_EVENT_TYPE_IBF; kcs_bmc->client = client; kcs_bmc_update_event_mask(kcs_bmc, mask, mask); rc = 0; } spin_unlock_irq(&kcs_bmc->lock); return rc; } EXPORT_SYMBOL(kcs_bmc_enable_device); void kcs_bmc_disable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client) { spin_lock_irq(&kcs_bmc->lock); if (client == kcs_bmc->client) { u8 mask = KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE; kcs_bmc_update_event_mask(kcs_bmc, mask, 0); kcs_bmc->client = NULL; } spin_unlock_irq(&kcs_bmc->lock); } EXPORT_SYMBOL(kcs_bmc_disable_device); int kcs_bmc_add_device(struct kcs_bmc_device *kcs_bmc) { struct kcs_bmc_driver *drv; int error = 0; int rc; spin_lock_init(&kcs_bmc->lock); kcs_bmc->client = NULL; mutex_lock(&kcs_bmc_lock); list_add(&kcs_bmc->entry, &kcs_bmc_devices); list_for_each_entry(drv, &kcs_bmc_drivers, entry) { rc = drv->ops->add_device(kcs_bmc); if (!rc) continue; dev_err(kcs_bmc->dev, "Failed to add chardev for KCS channel %d: %d", kcs_bmc->channel, rc); error = rc; } mutex_unlock(&kcs_bmc_lock); return error; } EXPORT_SYMBOL(kcs_bmc_add_device); void kcs_bmc_remove_device(struct kcs_bmc_device *kcs_bmc) { struct kcs_bmc_driver *drv; int rc; mutex_lock(&kcs_bmc_lock); list_del(&kcs_bmc->entry); list_for_each_entry(drv, &kcs_bmc_drivers, entry) { rc = drv->ops->remove_device(kcs_bmc); if (rc) dev_err(kcs_bmc->dev, "Failed to remove chardev for KCS channel %d: %d", kcs_bmc->channel, rc); } mutex_unlock(&kcs_bmc_lock); } EXPORT_SYMBOL(kcs_bmc_remove_device); void kcs_bmc_register_driver(struct kcs_bmc_driver *drv) { struct kcs_bmc_device *kcs_bmc; int rc; mutex_lock(&kcs_bmc_lock); list_add(&drv->entry, &kcs_bmc_drivers); list_for_each_entry(kcs_bmc, &kcs_bmc_devices, entry) { rc = drv->ops->add_device(kcs_bmc); if (rc) dev_err(kcs_bmc->dev, "Failed to add driver for KCS channel %d: %d", kcs_bmc->channel, rc); } mutex_unlock(&kcs_bmc_lock); } EXPORT_SYMBOL(kcs_bmc_register_driver); void kcs_bmc_unregister_driver(struct kcs_bmc_driver *drv) { struct kcs_bmc_device *kcs_bmc; int rc; mutex_lock(&kcs_bmc_lock); list_del(&drv->entry); list_for_each_entry(kcs_bmc, &kcs_bmc_devices, entry) { rc = drv->ops->remove_device(kcs_bmc); if (rc) dev_err(kcs_bmc->dev, "Failed to remove driver for KCS channel %d: %d", kcs_bmc->channel, rc); } mutex_unlock(&kcs_bmc_lock); } EXPORT_SYMBOL(kcs_bmc_unregister_driver); void kcs_bmc_update_event_mask(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 events) { kcs_bmc->ops->irq_mask_update(kcs_bmc, mask, events); } EXPORT_SYMBOL(kcs_bmc_update_event_mask); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Haiyue Wang <[email protected]>"); MODULE_AUTHOR("Andrew Jeffery <[email protected]>"); MODULE_DESCRIPTION("KCS BMC to handle the IPMI request from system software");
linux-master
drivers/char/ipmi/kcs_bmc.c
// SPDX-License-Identifier: GPL-2.0+ #include <linux/io.h> #include "ipmi_si.h" static unsigned char intf_mem_inb(const struct si_sm_io *io, unsigned int offset) { return readb((io->addr)+(offset * io->regspacing)); } static void intf_mem_outb(const struct si_sm_io *io, unsigned int offset, unsigned char b) { writeb(b, (io->addr)+(offset * io->regspacing)); } static unsigned char intf_mem_inw(const struct si_sm_io *io, unsigned int offset) { return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } static void intf_mem_outw(const struct si_sm_io *io, unsigned int offset, unsigned char b) { writeb(b << io->regshift, (io->addr)+(offset * io->regspacing)); } static unsigned char intf_mem_inl(const struct si_sm_io *io, unsigned int offset) { return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } static void intf_mem_outl(const struct si_sm_io *io, unsigned int offset, unsigned char b) { writel(b << io->regshift, (io->addr)+(offset * io->regspacing)); } #ifdef readq static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset) { return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } static void mem_outq(const struct si_sm_io *io, unsigned int offset, unsigned char b) { writeq((u64)b << io->regshift, (io->addr)+(offset * io->regspacing)); } #endif static void mem_region_cleanup(struct si_sm_io *io, int num) { unsigned long addr = io->addr_data; int idx; for (idx = 0; idx < num; idx++) release_mem_region(addr + idx * io->regspacing, io->regsize); } static void mem_cleanup(struct si_sm_io *io) { if (io->addr) { iounmap(io->addr); mem_region_cleanup(io, io->io_size); } } int ipmi_si_mem_setup(struct si_sm_io *io) { unsigned long addr = io->addr_data; int mapsize, idx; if (!addr) return -ENODEV; /* * Figure out the actual readb/readw/readl/etc routine to use based * upon the register size. */ switch (io->regsize) { case 1: io->inputb = intf_mem_inb; io->outputb = intf_mem_outb; break; case 2: io->inputb = intf_mem_inw; io->outputb = intf_mem_outw; break; case 4: io->inputb = intf_mem_inl; io->outputb = intf_mem_outl; break; #ifdef readq case 8: io->inputb = mem_inq; io->outputb = mem_outq; break; #endif default: dev_warn(io->dev, "Invalid register size: %d\n", io->regsize); return -EINVAL; } /* * Some BIOSes reserve disjoint memory regions in their ACPI * tables. This causes problems when trying to request the * entire region. Therefore we must request each register * separately. */ for (idx = 0; idx < io->io_size; idx++) { if (request_mem_region(addr + idx * io->regspacing, io->regsize, SI_DEVICE_NAME) == NULL) { /* Undo allocations */ mem_region_cleanup(io, idx); return -EIO; } } /* * Calculate the total amount of memory to claim. This is an * unusual looking calculation, but it avoids claiming any * more memory than it has to. It will claim everything * between the first address to the end of the last full * register. */ mapsize = ((io->io_size * io->regspacing) - (io->regspacing - io->regsize)); io->addr = ioremap(addr, mapsize); if (io->addr == NULL) { mem_region_cleanup(io, io->io_size); return -EIO; } io->io_cleanup = mem_cleanup; return 0; }
linux-master
drivers/char/ipmi/ipmi_si_mem_io.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2015-2018, Intel Corporation. */ #define pr_fmt(fmt) "kcs-bmc: " fmt #include <linux/errno.h> #include <linux/io.h> #include <linux/ipmi_bmc.h> #include <linux/list.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/slab.h> #include "kcs_bmc_client.h" /* Different phases of the KCS BMC module. * KCS_PHASE_IDLE: * BMC should not be expecting nor sending any data. * KCS_PHASE_WRITE_START: * BMC is receiving a WRITE_START command from system software. * KCS_PHASE_WRITE_DATA: * BMC is receiving a data byte from system software. * KCS_PHASE_WRITE_END_CMD: * BMC is waiting a last data byte from system software. * KCS_PHASE_WRITE_DONE: * BMC has received the whole request from system software. * KCS_PHASE_WAIT_READ: * BMC is waiting the response from the upper IPMI service. * KCS_PHASE_READ: * BMC is transferring the response to system software. * KCS_PHASE_ABORT_ERROR1: * BMC is waiting error status request from system software. * KCS_PHASE_ABORT_ERROR2: * BMC is waiting for idle status afer error from system software. * KCS_PHASE_ERROR: * BMC has detected a protocol violation at the interface level. */ enum kcs_ipmi_phases { KCS_PHASE_IDLE, KCS_PHASE_WRITE_START, KCS_PHASE_WRITE_DATA, KCS_PHASE_WRITE_END_CMD, KCS_PHASE_WRITE_DONE, KCS_PHASE_WAIT_READ, KCS_PHASE_READ, KCS_PHASE_ABORT_ERROR1, KCS_PHASE_ABORT_ERROR2, KCS_PHASE_ERROR }; /* IPMI 2.0 - Table 9-4, KCS Interface Status Codes */ enum kcs_ipmi_errors { KCS_NO_ERROR = 0x00, KCS_ABORTED_BY_COMMAND = 0x01, KCS_ILLEGAL_CONTROL_CODE = 0x02, KCS_LENGTH_ERROR = 0x06, KCS_UNSPECIFIED_ERROR = 0xFF }; struct kcs_bmc_ipmi { struct list_head entry; struct kcs_bmc_client client; spinlock_t lock; enum kcs_ipmi_phases phase; enum kcs_ipmi_errors error; wait_queue_head_t queue; bool data_in_avail; int data_in_idx; u8 *data_in; int data_out_idx; int data_out_len; u8 *data_out; struct mutex mutex; u8 *kbuffer; struct miscdevice miscdev; }; #define DEVICE_NAME "ipmi-kcs" #define KCS_MSG_BUFSIZ 1000 #define KCS_ZERO_DATA 0 /* IPMI 2.0 - Table 9-1, KCS Interface Status Register Bits */ #define KCS_STATUS_STATE(state) (state << 6) #define KCS_STATUS_STATE_MASK GENMASK(7, 6) #define KCS_STATUS_CMD_DAT BIT(3) #define KCS_STATUS_SMS_ATN BIT(2) #define KCS_STATUS_IBF BIT(1) #define KCS_STATUS_OBF BIT(0) /* IPMI 2.0 - Table 9-2, KCS Interface State Bits */ enum kcs_states { IDLE_STATE = 0, READ_STATE = 1, WRITE_STATE = 2, ERROR_STATE = 3, }; /* IPMI 2.0 - Table 9-3, KCS Interface Control Codes */ #define KCS_CMD_GET_STATUS_ABORT 0x60 #define KCS_CMD_WRITE_START 0x61 #define KCS_CMD_WRITE_END 0x62 #define KCS_CMD_READ_BYTE 0x68 static inline void set_state(struct kcs_bmc_ipmi *priv, u8 state) { kcs_bmc_update_status(priv->client.dev, KCS_STATUS_STATE_MASK, KCS_STATUS_STATE(state)); } static void kcs_bmc_ipmi_force_abort(struct kcs_bmc_ipmi *priv) { set_state(priv, ERROR_STATE); kcs_bmc_read_data(priv->client.dev); kcs_bmc_write_data(priv->client.dev, KCS_ZERO_DATA); priv->phase = KCS_PHASE_ERROR; priv->data_in_avail = false; priv->data_in_idx = 0; } static void kcs_bmc_ipmi_handle_data(struct kcs_bmc_ipmi *priv) { struct kcs_bmc_device *dev; u8 data; dev = priv->client.dev; switch (priv->phase) { case KCS_PHASE_WRITE_START: priv->phase = KCS_PHASE_WRITE_DATA; fallthrough; case KCS_PHASE_WRITE_DATA: if (priv->data_in_idx < KCS_MSG_BUFSIZ) { set_state(priv, WRITE_STATE); kcs_bmc_write_data(dev, KCS_ZERO_DATA); priv->data_in[priv->data_in_idx++] = kcs_bmc_read_data(dev); } else { kcs_bmc_ipmi_force_abort(priv); priv->error = KCS_LENGTH_ERROR; } break; case KCS_PHASE_WRITE_END_CMD: if (priv->data_in_idx < KCS_MSG_BUFSIZ) { set_state(priv, READ_STATE); priv->data_in[priv->data_in_idx++] = kcs_bmc_read_data(dev); priv->phase = KCS_PHASE_WRITE_DONE; priv->data_in_avail = true; wake_up_interruptible(&priv->queue); } else { kcs_bmc_ipmi_force_abort(priv); priv->error = KCS_LENGTH_ERROR; } break; case KCS_PHASE_READ: if (priv->data_out_idx == priv->data_out_len) set_state(priv, IDLE_STATE); data = kcs_bmc_read_data(dev); if (data != KCS_CMD_READ_BYTE) { set_state(priv, ERROR_STATE); kcs_bmc_write_data(dev, KCS_ZERO_DATA); break; } if (priv->data_out_idx == priv->data_out_len) { kcs_bmc_write_data(dev, KCS_ZERO_DATA); priv->phase = KCS_PHASE_IDLE; break; } kcs_bmc_write_data(dev, priv->data_out[priv->data_out_idx++]); break; case KCS_PHASE_ABORT_ERROR1: set_state(priv, READ_STATE); kcs_bmc_read_data(dev); kcs_bmc_write_data(dev, priv->error); priv->phase = KCS_PHASE_ABORT_ERROR2; break; case KCS_PHASE_ABORT_ERROR2: set_state(priv, IDLE_STATE); kcs_bmc_read_data(dev); kcs_bmc_write_data(dev, KCS_ZERO_DATA); priv->phase = KCS_PHASE_IDLE; break; default: kcs_bmc_ipmi_force_abort(priv); break; } } static void kcs_bmc_ipmi_handle_cmd(struct kcs_bmc_ipmi *priv) { u8 cmd; set_state(priv, WRITE_STATE); kcs_bmc_write_data(priv->client.dev, KCS_ZERO_DATA); cmd = kcs_bmc_read_data(priv->client.dev); switch (cmd) { case KCS_CMD_WRITE_START: priv->phase = KCS_PHASE_WRITE_START; priv->error = KCS_NO_ERROR; priv->data_in_avail = false; priv->data_in_idx = 0; break; case KCS_CMD_WRITE_END: if (priv->phase != KCS_PHASE_WRITE_DATA) { kcs_bmc_ipmi_force_abort(priv); break; } priv->phase = KCS_PHASE_WRITE_END_CMD; break; case KCS_CMD_GET_STATUS_ABORT: if (priv->error == KCS_NO_ERROR) priv->error = KCS_ABORTED_BY_COMMAND; priv->phase = KCS_PHASE_ABORT_ERROR1; priv->data_in_avail = false; priv->data_in_idx = 0; break; default: kcs_bmc_ipmi_force_abort(priv); priv->error = KCS_ILLEGAL_CONTROL_CODE; break; } } static inline struct kcs_bmc_ipmi *client_to_kcs_bmc_ipmi(struct kcs_bmc_client *client) { return container_of(client, struct kcs_bmc_ipmi, client); } static irqreturn_t kcs_bmc_ipmi_event(struct kcs_bmc_client *client) { struct kcs_bmc_ipmi *priv; u8 status; int ret; priv = client_to_kcs_bmc_ipmi(client); if (!priv) return IRQ_NONE; spin_lock(&priv->lock); status = kcs_bmc_read_status(client->dev); if (status & KCS_STATUS_IBF) { if (status & KCS_STATUS_CMD_DAT) kcs_bmc_ipmi_handle_cmd(priv); else kcs_bmc_ipmi_handle_data(priv); ret = IRQ_HANDLED; } else { ret = IRQ_NONE; } spin_unlock(&priv->lock); return ret; } static const struct kcs_bmc_client_ops kcs_bmc_ipmi_client_ops = { .event = kcs_bmc_ipmi_event, }; static inline struct kcs_bmc_ipmi *to_kcs_bmc(struct file *filp) { return container_of(filp->private_data, struct kcs_bmc_ipmi, miscdev); } static int kcs_bmc_ipmi_open(struct inode *inode, struct file *filp) { struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp); return kcs_bmc_enable_device(priv->client.dev, &priv->client); } static __poll_t kcs_bmc_ipmi_poll(struct file *filp, poll_table *wait) { struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp); __poll_t mask = 0; poll_wait(filp, &priv->queue, wait); spin_lock_irq(&priv->lock); if (priv->data_in_avail) mask |= EPOLLIN; spin_unlock_irq(&priv->lock); return mask; } static ssize_t kcs_bmc_ipmi_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp); bool data_avail; size_t data_len; ssize_t ret; if (!(filp->f_flags & O_NONBLOCK)) wait_event_interruptible(priv->queue, priv->data_in_avail); mutex_lock(&priv->mutex); spin_lock_irq(&priv->lock); data_avail = priv->data_in_avail; if (data_avail) { data_len = priv->data_in_idx; memcpy(priv->kbuffer, priv->data_in, data_len); } spin_unlock_irq(&priv->lock); if (!data_avail) { ret = -EAGAIN; goto out_unlock; } if (count < data_len) { pr_err("channel=%u with too large data : %zu\n", priv->client.dev->channel, data_len); spin_lock_irq(&priv->lock); kcs_bmc_ipmi_force_abort(priv); spin_unlock_irq(&priv->lock); ret = -EOVERFLOW; goto out_unlock; } if (copy_to_user(buf, priv->kbuffer, data_len)) { ret = -EFAULT; goto out_unlock; } ret = data_len; spin_lock_irq(&priv->lock); if (priv->phase == KCS_PHASE_WRITE_DONE) { priv->phase = KCS_PHASE_WAIT_READ; priv->data_in_avail = false; priv->data_in_idx = 0; } else { ret = -EAGAIN; } spin_unlock_irq(&priv->lock); out_unlock: mutex_unlock(&priv->mutex); return ret; } static ssize_t kcs_bmc_ipmi_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp); ssize_t ret; /* a minimum response size '3' : netfn + cmd + ccode */ if (count < 3 || count > KCS_MSG_BUFSIZ) return -EINVAL; mutex_lock(&priv->mutex); if (copy_from_user(priv->kbuffer, buf, count)) { ret = -EFAULT; goto out_unlock; } spin_lock_irq(&priv->lock); if (priv->phase == KCS_PHASE_WAIT_READ) { priv->phase = KCS_PHASE_READ; priv->data_out_idx = 1; priv->data_out_len = count; memcpy(priv->data_out, priv->kbuffer, count); kcs_bmc_write_data(priv->client.dev, priv->data_out[0]); ret = count; } else { ret = -EINVAL; } spin_unlock_irq(&priv->lock); out_unlock: mutex_unlock(&priv->mutex); return ret; } static long kcs_bmc_ipmi_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp); long ret = 0; spin_lock_irq(&priv->lock); switch (cmd) { case IPMI_BMC_IOCTL_SET_SMS_ATN: kcs_bmc_update_status(priv->client.dev, KCS_STATUS_SMS_ATN, KCS_STATUS_SMS_ATN); break; case IPMI_BMC_IOCTL_CLEAR_SMS_ATN: kcs_bmc_update_status(priv->client.dev, KCS_STATUS_SMS_ATN, 0); break; case IPMI_BMC_IOCTL_FORCE_ABORT: kcs_bmc_ipmi_force_abort(priv); break; default: ret = -EINVAL; break; } spin_unlock_irq(&priv->lock); return ret; } static int kcs_bmc_ipmi_release(struct inode *inode, struct file *filp) { struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp); kcs_bmc_ipmi_force_abort(priv); kcs_bmc_disable_device(priv->client.dev, &priv->client); return 0; } static const struct file_operations kcs_bmc_ipmi_fops = { .owner = THIS_MODULE, .open = kcs_bmc_ipmi_open, .read = kcs_bmc_ipmi_read, .write = kcs_bmc_ipmi_write, .release = kcs_bmc_ipmi_release, .poll = kcs_bmc_ipmi_poll, .unlocked_ioctl = kcs_bmc_ipmi_ioctl, }; static DEFINE_SPINLOCK(kcs_bmc_ipmi_instances_lock); static LIST_HEAD(kcs_bmc_ipmi_instances); static int kcs_bmc_ipmi_add_device(struct kcs_bmc_device *kcs_bmc) { struct kcs_bmc_ipmi *priv; int rc; priv = devm_kzalloc(kcs_bmc->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; spin_lock_init(&priv->lock); mutex_init(&priv->mutex); init_waitqueue_head(&priv->queue); priv->client.dev = kcs_bmc; priv->client.ops = &kcs_bmc_ipmi_client_ops; priv->data_in = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL); priv->data_out = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL); priv->kbuffer = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL); priv->miscdev.minor = MISC_DYNAMIC_MINOR; priv->miscdev.name = devm_kasprintf(kcs_bmc->dev, GFP_KERNEL, "%s%u", DEVICE_NAME, kcs_bmc->channel); if (!priv->data_in || !priv->data_out || !priv->kbuffer || !priv->miscdev.name) return -EINVAL; priv->miscdev.fops = &kcs_bmc_ipmi_fops; rc = misc_register(&priv->miscdev); if (rc) { dev_err(kcs_bmc->dev, "Unable to register device: %d\n", rc); return rc; } spin_lock_irq(&kcs_bmc_ipmi_instances_lock); list_add(&priv->entry, &kcs_bmc_ipmi_instances); spin_unlock_irq(&kcs_bmc_ipmi_instances_lock); dev_info(kcs_bmc->dev, "Initialised IPMI client for channel %d", kcs_bmc->channel); return 0; } static int kcs_bmc_ipmi_remove_device(struct kcs_bmc_device *kcs_bmc) { struct kcs_bmc_ipmi *priv = NULL, *pos; spin_lock_irq(&kcs_bmc_ipmi_instances_lock); list_for_each_entry(pos, &kcs_bmc_ipmi_instances, entry) { if (pos->client.dev == kcs_bmc) { priv = pos; list_del(&pos->entry); break; } } spin_unlock_irq(&kcs_bmc_ipmi_instances_lock); if (!priv) return -ENODEV; misc_deregister(&priv->miscdev); kcs_bmc_disable_device(priv->client.dev, &priv->client); devm_kfree(kcs_bmc->dev, priv->kbuffer); devm_kfree(kcs_bmc->dev, priv->data_out); devm_kfree(kcs_bmc->dev, priv->data_in); devm_kfree(kcs_bmc->dev, priv); return 0; } static const struct kcs_bmc_driver_ops kcs_bmc_ipmi_driver_ops = { .add_device = kcs_bmc_ipmi_add_device, .remove_device = kcs_bmc_ipmi_remove_device, }; static struct kcs_bmc_driver kcs_bmc_ipmi_driver = { .ops = &kcs_bmc_ipmi_driver_ops, }; static int __init kcs_bmc_ipmi_init(void) { kcs_bmc_register_driver(&kcs_bmc_ipmi_driver); return 0; } module_init(kcs_bmc_ipmi_init); static void __exit kcs_bmc_ipmi_exit(void) { kcs_bmc_unregister_driver(&kcs_bmc_ipmi_driver); } module_exit(kcs_bmc_ipmi_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Haiyue Wang <[email protected]>"); MODULE_AUTHOR("Andrew Jeffery <[email protected]>"); MODULE_DESCRIPTION("KCS BMC to handle the IPMI request from system software");
linux-master
drivers/char/ipmi/kcs_bmc_cdev_ipmi.c
// SPDX-License-Identifier: GPL-2.0+ /* * ipmi_ssif.c * * The interface to the IPMI driver for SMBus access to a SMBus * compliant device. Called SSIF by the IPMI spec. * * Author: Intel Corporation * Todd Davis <[email protected]> * * Rewritten by Corey Minyard <[email protected]> to support the * non-blocking I2C interface, add support for multi-part * transactions, add PEC support, and general clenaup. * * Copyright 2003 Intel Corporation * Copyright 2005 MontaVista Software */ /* * This file holds the "policy" for the interface to the SSIF state * machine. It does the configuration, handles timers and interrupts, * and drives the real SSIF state machine. */ #define pr_fmt(fmt) "ipmi_ssif: " fmt #define dev_fmt(fmt) "ipmi_ssif: " fmt #if defined(MODVERSIONS) #include <linux/modversions.h> #endif #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/i2c.h> #include <linux/ipmi_smi.h> #include <linux/init.h> #include <linux/dmi.h> #include <linux/kthread.h> #include <linux/acpi.h> #include <linux/ctype.h> #include <linux/time64.h> #include "ipmi_dmi.h" #define DEVICE_NAME "ipmi_ssif" #define IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD 0x57 #define SSIF_IPMI_REQUEST 2 #define SSIF_IPMI_MULTI_PART_REQUEST_START 6 #define SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE 7 #define SSIF_IPMI_MULTI_PART_REQUEST_END 8 #define SSIF_IPMI_RESPONSE 3 #define SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE 9 /* ssif_debug is a bit-field * SSIF_DEBUG_MSG - commands and their responses * SSIF_DEBUG_STATES - message states * SSIF_DEBUG_TIMING - Measure times between events in the driver */ #define SSIF_DEBUG_TIMING 4 #define SSIF_DEBUG_STATE 2 #define SSIF_DEBUG_MSG 1 #define SSIF_NODEBUG 0 #define SSIF_DEFAULT_DEBUG (SSIF_NODEBUG) /* * Timer values */ #define SSIF_MSG_USEC 60000 /* 60ms between message tries (T3). */ #define SSIF_REQ_RETRY_USEC 60000 /* 60ms between send retries (T6). */ #define SSIF_MSG_PART_USEC 5000 /* 5ms for a message part */ /* How many times to we retry sending/receiving the message. */ #define SSIF_SEND_RETRIES 5 #define SSIF_RECV_RETRIES 250 #define SSIF_MSG_MSEC (SSIF_MSG_USEC / 1000) #define SSIF_REQ_RETRY_MSEC (SSIF_REQ_RETRY_USEC / 1000) #define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC) #define SSIF_REQ_RETRY_JIFFIES ((SSIF_REQ_RETRY_USEC * 1000) / TICK_NSEC) #define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC) /* * Timeout for the watch, only used for get flag timer. */ #define SSIF_WATCH_MSG_TIMEOUT msecs_to_jiffies(10) #define SSIF_WATCH_WATCHDOG_TIMEOUT msecs_to_jiffies(250) enum ssif_intf_state { SSIF_IDLE, SSIF_GETTING_FLAGS, SSIF_GETTING_EVENTS, SSIF_CLEARING_FLAGS, SSIF_GETTING_MESSAGES, /* FIXME - add watchdog stuff. */ }; #define IS_SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_IDLE \ && (ssif)->curr_msg == NULL) /* * Indexes into stats[] in ssif_info below. */ enum ssif_stat_indexes { /* Number of total messages sent. */ SSIF_STAT_sent_messages = 0, /* * Number of message parts sent. Messages may be broken into * parts if they are long. */ SSIF_STAT_sent_messages_parts, /* * Number of time a message was retried. */ SSIF_STAT_send_retries, /* * Number of times the send of a message failed. */ SSIF_STAT_send_errors, /* * Number of message responses received. */ SSIF_STAT_received_messages, /* * Number of message fragments received. */ SSIF_STAT_received_message_parts, /* * Number of times the receive of a message was retried. */ SSIF_STAT_receive_retries, /* * Number of errors receiving messages. */ SSIF_STAT_receive_errors, /* * Number of times a flag fetch was requested. */ SSIF_STAT_flag_fetches, /* * Number of times the hardware didn't follow the state machine. */ SSIF_STAT_hosed, /* * Number of received events. */ SSIF_STAT_events, /* Number of asyncronous messages received. */ SSIF_STAT_incoming_messages, /* Number of watchdog pretimeouts. */ SSIF_STAT_watchdog_pretimeouts, /* Number of alers received. */ SSIF_STAT_alerts, /* Always add statistics before this value, it must be last. */ SSIF_NUM_STATS }; struct ssif_addr_info { struct i2c_board_info binfo; char *adapter_name; int debug; int slave_addr; enum ipmi_addr_src addr_src; union ipmi_smi_info_union addr_info; struct device *dev; struct i2c_client *client; struct mutex clients_mutex; struct list_head clients; struct list_head link; }; struct ssif_info; typedef void (*ssif_i2c_done)(struct ssif_info *ssif_info, int result, unsigned char *data, unsigned int len); struct ssif_info { struct ipmi_smi *intf; spinlock_t lock; struct ipmi_smi_msg *waiting_msg; struct ipmi_smi_msg *curr_msg; enum ssif_intf_state ssif_state; unsigned long ssif_debug; struct ipmi_smi_handlers handlers; enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */ union ipmi_smi_info_union addr_info; /* * Flags from the last GET_MSG_FLAGS command, used when an ATTN * is set to hold the flags until we are done handling everything * from the flags. */ #define RECEIVE_MSG_AVAIL 0x01 #define EVENT_MSG_BUFFER_FULL 0x02 #define WDT_PRE_TIMEOUT_INT 0x08 unsigned char msg_flags; u8 global_enables; bool has_event_buffer; bool supports_alert; /* * Used to tell what we should do with alerts. If we are * waiting on a response, read the data immediately. */ bool got_alert; bool waiting_alert; /* Used to inform the timeout that it should do a resend. */ bool do_resend; /* * If set to true, this will request events the next time the * state machine is idle. */ bool req_events; /* * If set to true, this will request flags the next time the * state machine is idle. */ bool req_flags; /* Used for sending/receiving data. +1 for the length. */ unsigned char data[IPMI_MAX_MSG_LENGTH + 1]; unsigned int data_len; /* Temp receive buffer, gets copied into data. */ unsigned char recv[I2C_SMBUS_BLOCK_MAX]; struct i2c_client *client; ssif_i2c_done done_handler; /* Thread interface handling */ struct task_struct *thread; struct completion wake_thread; bool stopping; int i2c_read_write; int i2c_command; unsigned char *i2c_data; unsigned int i2c_size; struct timer_list retry_timer; int retries_left; long watch_timeout; /* Timeout for flags check, 0 if off. */ struct timer_list watch_timer; /* Flag fetch timer. */ /* Info from SSIF cmd */ unsigned char max_xmit_msg_size; unsigned char max_recv_msg_size; bool cmd8_works; /* See test_multipart_messages() for details. */ unsigned int multi_support; int supports_pec; #define SSIF_NO_MULTI 0 #define SSIF_MULTI_2_PART 1 #define SSIF_MULTI_n_PART 2 unsigned char *multi_data; unsigned int multi_len; unsigned int multi_pos; atomic_t stats[SSIF_NUM_STATS]; }; #define ssif_inc_stat(ssif, stat) \ atomic_inc(&(ssif)->stats[SSIF_STAT_ ## stat]) #define ssif_get_stat(ssif, stat) \ ((unsigned int) atomic_read(&(ssif)->stats[SSIF_STAT_ ## stat])) static bool initialized; static bool platform_registered; static void return_hosed_msg(struct ssif_info *ssif_info, struct ipmi_smi_msg *msg); static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags); static int start_send(struct ssif_info *ssif_info, unsigned char *data, unsigned int len); static unsigned long *ipmi_ssif_lock_cond(struct ssif_info *ssif_info, unsigned long *flags) __acquires(&ssif_info->lock) { spin_lock_irqsave(&ssif_info->lock, *flags); return flags; } static void ipmi_ssif_unlock_cond(struct ssif_info *ssif_info, unsigned long *flags) __releases(&ssif_info->lock) { spin_unlock_irqrestore(&ssif_info->lock, *flags); } static void deliver_recv_msg(struct ssif_info *ssif_info, struct ipmi_smi_msg *msg) { if (msg->rsp_size < 0) { return_hosed_msg(ssif_info, msg); dev_err(&ssif_info->client->dev, "%s: Malformed message: rsp_size = %d\n", __func__, msg->rsp_size); } else { ipmi_smi_msg_received(ssif_info->intf, msg); } } static void return_hosed_msg(struct ssif_info *ssif_info, struct ipmi_smi_msg *msg) { ssif_inc_stat(ssif_info, hosed); /* Make it a response */ msg->rsp[0] = msg->data[0] | 4; msg->rsp[1] = msg->data[1]; msg->rsp[2] = 0xFF; /* Unknown error. */ msg->rsp_size = 3; deliver_recv_msg(ssif_info, msg); } /* * Must be called with the message lock held. This will release the * message lock. Note that the caller will check IS_SSIF_IDLE and * start a new operation, so there is no need to check for new * messages to start in here. */ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags) { unsigned char msg[3]; ssif_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; ssif_info->ssif_state = SSIF_CLEARING_FLAGS; ipmi_ssif_unlock_cond(ssif_info, flags); /* Make sure the watchdog pre-timeout flag is not set at startup. */ msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; msg[2] = WDT_PRE_TIMEOUT_INT; if (start_send(ssif_info, msg, 3) != 0) { /* Error, just go to normal state. */ ssif_info->ssif_state = SSIF_IDLE; } } static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags) { unsigned char mb[2]; ssif_info->req_flags = false; ssif_info->ssif_state = SSIF_GETTING_FLAGS; ipmi_ssif_unlock_cond(ssif_info, flags); mb[0] = (IPMI_NETFN_APP_REQUEST << 2); mb[1] = IPMI_GET_MSG_FLAGS_CMD; if (start_send(ssif_info, mb, 2) != 0) ssif_info->ssif_state = SSIF_IDLE; } static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags, struct ipmi_smi_msg *msg) { if (start_send(ssif_info, msg->data, msg->data_size) != 0) { unsigned long oflags; flags = ipmi_ssif_lock_cond(ssif_info, &oflags); ssif_info->curr_msg = NULL; ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); ipmi_free_smi_msg(msg); } } static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags) { struct ipmi_smi_msg *msg; ssif_info->req_events = false; msg = ipmi_alloc_smi_msg(); if (!msg) { ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); return; } ssif_info->curr_msg = msg; ssif_info->ssif_state = SSIF_GETTING_EVENTS; ipmi_ssif_unlock_cond(ssif_info, flags); msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; msg->data_size = 2; check_start_send(ssif_info, flags, msg); } static void start_recv_msg_fetch(struct ssif_info *ssif_info, unsigned long *flags) { struct ipmi_smi_msg *msg; msg = ipmi_alloc_smi_msg(); if (!msg) { ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); return; } ssif_info->curr_msg = msg; ssif_info->ssif_state = SSIF_GETTING_MESSAGES; ipmi_ssif_unlock_cond(ssif_info, flags); msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); msg->data[1] = IPMI_GET_MSG_CMD; msg->data_size = 2; check_start_send(ssif_info, flags, msg); } /* * Must be called with the message lock held. This will release the * message lock. Note that the caller will check IS_SSIF_IDLE and * start a new operation, so there is no need to check for new * messages to start in here. */ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags) { if (ssif_info->msg_flags & WDT_PRE_TIMEOUT_INT) { /* Watchdog pre-timeout */ ssif_inc_stat(ssif_info, watchdog_pretimeouts); start_clear_flags(ssif_info, flags); ipmi_smi_watchdog_pretimeout(ssif_info->intf); } else if (ssif_info->msg_flags & RECEIVE_MSG_AVAIL) /* Messages available. */ start_recv_msg_fetch(ssif_info, flags); else if (ssif_info->msg_flags & EVENT_MSG_BUFFER_FULL) /* Events available. */ start_event_fetch(ssif_info, flags); else { ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); } } static int ipmi_ssif_thread(void *data) { struct ssif_info *ssif_info = data; while (!kthread_should_stop()) { int result; /* Wait for something to do */ result = wait_for_completion_interruptible( &ssif_info->wake_thread); if (ssif_info->stopping) break; if (result == -ERESTARTSYS) continue; init_completion(&ssif_info->wake_thread); if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) { result = i2c_smbus_write_block_data( ssif_info->client, ssif_info->i2c_command, ssif_info->i2c_data[0], ssif_info->i2c_data + 1); ssif_info->done_handler(ssif_info, result, NULL, 0); } else { result = i2c_smbus_read_block_data( ssif_info->client, ssif_info->i2c_command, ssif_info->i2c_data); if (result < 0) ssif_info->done_handler(ssif_info, result, NULL, 0); else ssif_info->done_handler(ssif_info, 0, ssif_info->i2c_data, result); } } return 0; } static void ssif_i2c_send(struct ssif_info *ssif_info, ssif_i2c_done handler, int read_write, int command, unsigned char *data, unsigned int size) { ssif_info->done_handler = handler; ssif_info->i2c_read_write = read_write; ssif_info->i2c_command = command; ssif_info->i2c_data = data; ssif_info->i2c_size = size; complete(&ssif_info->wake_thread); } static void msg_done_handler(struct ssif_info *ssif_info, int result, unsigned char *data, unsigned int len); static void start_get(struct ssif_info *ssif_info) { ssif_info->multi_pos = 0; ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ, SSIF_IPMI_RESPONSE, ssif_info->recv, I2C_SMBUS_BLOCK_DATA); } static void start_resend(struct ssif_info *ssif_info); static void retry_timeout(struct timer_list *t) { struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer); unsigned long oflags, *flags; bool waiting, resend; if (ssif_info->stopping) return; flags = ipmi_ssif_lock_cond(ssif_info, &oflags); resend = ssif_info->do_resend; ssif_info->do_resend = false; waiting = ssif_info->waiting_alert; ssif_info->waiting_alert = false; ipmi_ssif_unlock_cond(ssif_info, flags); if (waiting) start_get(ssif_info); if (resend) { start_resend(ssif_info); ssif_inc_stat(ssif_info, send_retries); } } static void watch_timeout(struct timer_list *t) { struct ssif_info *ssif_info = from_timer(ssif_info, t, watch_timer); unsigned long oflags, *flags; if (ssif_info->stopping) return; flags = ipmi_ssif_lock_cond(ssif_info, &oflags); if (ssif_info->watch_timeout) { mod_timer(&ssif_info->watch_timer, jiffies + ssif_info->watch_timeout); if (IS_SSIF_IDLE(ssif_info)) { start_flag_fetch(ssif_info, flags); /* Releases lock */ return; } ssif_info->req_flags = true; } ipmi_ssif_unlock_cond(ssif_info, flags); } static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type, unsigned int data) { struct ssif_info *ssif_info = i2c_get_clientdata(client); unsigned long oflags, *flags; bool do_get = false; if (type != I2C_PROTOCOL_SMBUS_ALERT) return; ssif_inc_stat(ssif_info, alerts); flags = ipmi_ssif_lock_cond(ssif_info, &oflags); if (ssif_info->waiting_alert) { ssif_info->waiting_alert = false; del_timer(&ssif_info->retry_timer); do_get = true; } else if (ssif_info->curr_msg) { ssif_info->got_alert = true; } ipmi_ssif_unlock_cond(ssif_info, flags); if (do_get) start_get(ssif_info); } static void msg_done_handler(struct ssif_info *ssif_info, int result, unsigned char *data, unsigned int len) { struct ipmi_smi_msg *msg; unsigned long oflags, *flags; /* * We are single-threaded here, so no need for a lock until we * start messing with driver states or the queues. */ if (result < 0) { ssif_info->retries_left--; if (ssif_info->retries_left > 0) { ssif_inc_stat(ssif_info, receive_retries); flags = ipmi_ssif_lock_cond(ssif_info, &oflags); ssif_info->waiting_alert = true; if (!ssif_info->stopping) mod_timer(&ssif_info->retry_timer, jiffies + SSIF_MSG_JIFFIES); ipmi_ssif_unlock_cond(ssif_info, flags); return; } ssif_inc_stat(ssif_info, receive_errors); if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) dev_dbg(&ssif_info->client->dev, "%s: Error %d\n", __func__, result); len = 0; goto continue_op; } if ((len > 1) && (ssif_info->multi_pos == 0) && (data[0] == 0x00) && (data[1] == 0x01)) { /* Start of multi-part read. Start the next transaction. */ int i; ssif_inc_stat(ssif_info, received_message_parts); /* Remove the multi-part read marker. */ len -= 2; data += 2; for (i = 0; i < len; i++) ssif_info->data[i] = data[i]; ssif_info->multi_len = len; ssif_info->multi_pos = 1; ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ, SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE, ssif_info->recv, I2C_SMBUS_BLOCK_DATA); return; } else if (ssif_info->multi_pos) { /* Middle of multi-part read. Start the next transaction. */ int i; unsigned char blocknum; if (len == 0) { result = -EIO; if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) dev_dbg(&ssif_info->client->dev, "Middle message with no data\n"); goto continue_op; } blocknum = data[0]; len--; data++; if (blocknum != 0xff && len != 31) { /* All blocks but the last must have 31 data bytes. */ result = -EIO; if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) dev_dbg(&ssif_info->client->dev, "Received middle message <31\n"); goto continue_op; } if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) { /* Received message too big, abort the operation. */ result = -E2BIG; if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) dev_dbg(&ssif_info->client->dev, "Received message too big\n"); goto continue_op; } for (i = 0; i < len; i++) ssif_info->data[i + ssif_info->multi_len] = data[i]; ssif_info->multi_len += len; if (blocknum == 0xff) { /* End of read */ len = ssif_info->multi_len; data = ssif_info->data; } else if (blocknum + 1 != ssif_info->multi_pos) { /* * Out of sequence block, just abort. Block * numbers start at zero for the second block, * but multi_pos starts at one, so the +1. */ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) dev_dbg(&ssif_info->client->dev, "Received message out of sequence, expected %u, got %u\n", ssif_info->multi_pos - 1, blocknum); result = -EIO; } else { ssif_inc_stat(ssif_info, received_message_parts); ssif_info->multi_pos++; ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ, SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE, ssif_info->recv, I2C_SMBUS_BLOCK_DATA); return; } } continue_op: if (result < 0) { ssif_inc_stat(ssif_info, receive_errors); } else { ssif_inc_stat(ssif_info, received_messages); ssif_inc_stat(ssif_info, received_message_parts); } if (ssif_info->ssif_debug & SSIF_DEBUG_STATE) dev_dbg(&ssif_info->client->dev, "DONE 1: state = %d, result=%d\n", ssif_info->ssif_state, result); flags = ipmi_ssif_lock_cond(ssif_info, &oflags); msg = ssif_info->curr_msg; if (msg) { if (data) { if (len > IPMI_MAX_MSG_LENGTH) len = IPMI_MAX_MSG_LENGTH; memcpy(msg->rsp, data, len); } else { len = 0; } msg->rsp_size = len; ssif_info->curr_msg = NULL; } switch (ssif_info->ssif_state) { case SSIF_IDLE: ipmi_ssif_unlock_cond(ssif_info, flags); if (!msg) break; if (result < 0) return_hosed_msg(ssif_info, msg); else deliver_recv_msg(ssif_info, msg); break; case SSIF_GETTING_FLAGS: /* We got the flags from the SSIF, now handle them. */ if ((result < 0) || (len < 4) || (data[2] != 0)) { /* * Error fetching flags, or invalid length, * just give up for now. */ ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); dev_warn(&ssif_info->client->dev, "Error getting flags: %d %d, %x\n", result, len, (len >= 3) ? data[2] : 0); } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || data[1] != IPMI_GET_MSG_FLAGS_CMD) { /* * Recv error response, give up. */ ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); dev_warn(&ssif_info->client->dev, "Invalid response getting flags: %x %x\n", data[0], data[1]); } else { ssif_inc_stat(ssif_info, flag_fetches); ssif_info->msg_flags = data[3]; handle_flags(ssif_info, flags); } break; case SSIF_CLEARING_FLAGS: /* We cleared the flags. */ if ((result < 0) || (len < 3) || (data[2] != 0)) { /* Error clearing flags */ dev_warn(&ssif_info->client->dev, "Error clearing flags: %d %d, %x\n", result, len, (len >= 3) ? data[2] : 0); } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) { dev_warn(&ssif_info->client->dev, "Invalid response clearing flags: %x %x\n", data[0], data[1]); } ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); break; case SSIF_GETTING_EVENTS: if (!msg) { /* Should never happen, but just in case. */ dev_warn(&ssif_info->client->dev, "No message set while getting events\n"); ipmi_ssif_unlock_cond(ssif_info, flags); break; } if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) { /* Error getting event, probably done. */ msg->done(msg); /* Take off the event flag. */ ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; handle_flags(ssif_info, flags); } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || msg->rsp[1] != IPMI_READ_EVENT_MSG_BUFFER_CMD) { dev_warn(&ssif_info->client->dev, "Invalid response getting events: %x %x\n", msg->rsp[0], msg->rsp[1]); msg->done(msg); /* Take off the event flag. */ ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; handle_flags(ssif_info, flags); } else { handle_flags(ssif_info, flags); ssif_inc_stat(ssif_info, events); deliver_recv_msg(ssif_info, msg); } break; case SSIF_GETTING_MESSAGES: if (!msg) { /* Should never happen, but just in case. */ dev_warn(&ssif_info->client->dev, "No message set while getting messages\n"); ipmi_ssif_unlock_cond(ssif_info, flags); break; } if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) { /* Error getting event, probably done. */ msg->done(msg); /* Take off the msg flag. */ ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL; handle_flags(ssif_info, flags); } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || msg->rsp[1] != IPMI_GET_MSG_CMD) { dev_warn(&ssif_info->client->dev, "Invalid response clearing flags: %x %x\n", msg->rsp[0], msg->rsp[1]); msg->done(msg); /* Take off the msg flag. */ ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL; handle_flags(ssif_info, flags); } else { ssif_inc_stat(ssif_info, incoming_messages); handle_flags(ssif_info, flags); deliver_recv_msg(ssif_info, msg); } break; default: /* Should never happen, but just in case. */ dev_warn(&ssif_info->client->dev, "Invalid state in message done handling: %d\n", ssif_info->ssif_state); ipmi_ssif_unlock_cond(ssif_info, flags); } flags = ipmi_ssif_lock_cond(ssif_info, &oflags); if (IS_SSIF_IDLE(ssif_info) && !ssif_info->stopping) { if (ssif_info->req_events) start_event_fetch(ssif_info, flags); else if (ssif_info->req_flags) start_flag_fetch(ssif_info, flags); else start_next_msg(ssif_info, flags); } else ipmi_ssif_unlock_cond(ssif_info, flags); if (ssif_info->ssif_debug & SSIF_DEBUG_STATE) dev_dbg(&ssif_info->client->dev, "DONE 2: state = %d.\n", ssif_info->ssif_state); } static void msg_written_handler(struct ssif_info *ssif_info, int result, unsigned char *data, unsigned int len) { /* We are single-threaded here, so no need for a lock. */ if (result < 0) { ssif_info->retries_left--; if (ssif_info->retries_left > 0) { /* * Wait the retry timeout time per the spec, * then redo the send. */ ssif_info->do_resend = true; mod_timer(&ssif_info->retry_timer, jiffies + SSIF_REQ_RETRY_JIFFIES); return; } ssif_inc_stat(ssif_info, send_errors); if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) dev_dbg(&ssif_info->client->dev, "%s: Out of retries\n", __func__); msg_done_handler(ssif_info, -EIO, NULL, 0); return; } if (ssif_info->multi_data) { /* * In the middle of a multi-data write. See the comment * in the SSIF_MULTI_n_PART case in the probe function * for details on the intricacies of this. */ int left, to_write; unsigned char *data_to_send; unsigned char cmd; ssif_inc_stat(ssif_info, sent_messages_parts); left = ssif_info->multi_len - ssif_info->multi_pos; to_write = left; if (to_write > 32) to_write = 32; /* Length byte. */ ssif_info->multi_data[ssif_info->multi_pos] = to_write; data_to_send = ssif_info->multi_data + ssif_info->multi_pos; ssif_info->multi_pos += to_write; cmd = SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE; if (ssif_info->cmd8_works) { if (left == to_write) { cmd = SSIF_IPMI_MULTI_PART_REQUEST_END; ssif_info->multi_data = NULL; } } else if (to_write < 32) { ssif_info->multi_data = NULL; } ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE, cmd, data_to_send, I2C_SMBUS_BLOCK_DATA); } else { /* Ready to request the result. */ unsigned long oflags, *flags; ssif_inc_stat(ssif_info, sent_messages); ssif_inc_stat(ssif_info, sent_messages_parts); flags = ipmi_ssif_lock_cond(ssif_info, &oflags); if (ssif_info->got_alert) { /* The result is already ready, just start it. */ ssif_info->got_alert = false; ipmi_ssif_unlock_cond(ssif_info, flags); start_get(ssif_info); } else { /* Wait a jiffie then request the next message */ ssif_info->waiting_alert = true; ssif_info->retries_left = SSIF_RECV_RETRIES; if (!ssif_info->stopping) mod_timer(&ssif_info->retry_timer, jiffies + SSIF_MSG_PART_JIFFIES); ipmi_ssif_unlock_cond(ssif_info, flags); } } } static void start_resend(struct ssif_info *ssif_info) { int command; ssif_info->got_alert = false; if (ssif_info->data_len > 32) { command = SSIF_IPMI_MULTI_PART_REQUEST_START; ssif_info->multi_data = ssif_info->data; ssif_info->multi_len = ssif_info->data_len; /* * Subtle thing, this is 32, not 33, because we will * overwrite the thing at position 32 (which was just * transmitted) with the new length. */ ssif_info->multi_pos = 32; ssif_info->data[0] = 32; } else { ssif_info->multi_data = NULL; command = SSIF_IPMI_REQUEST; ssif_info->data[0] = ssif_info->data_len; } ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE, command, ssif_info->data, I2C_SMBUS_BLOCK_DATA); } static int start_send(struct ssif_info *ssif_info, unsigned char *data, unsigned int len) { if (len > IPMI_MAX_MSG_LENGTH) return -E2BIG; if (len > ssif_info->max_xmit_msg_size) return -E2BIG; ssif_info->retries_left = SSIF_SEND_RETRIES; memcpy(ssif_info->data + 1, data, len); ssif_info->data_len = len; start_resend(ssif_info); return 0; } /* Must be called with the message lock held. */ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags) { struct ipmi_smi_msg *msg; unsigned long oflags; restart: if (!IS_SSIF_IDLE(ssif_info)) { ipmi_ssif_unlock_cond(ssif_info, flags); return; } if (!ssif_info->waiting_msg) { ssif_info->curr_msg = NULL; ipmi_ssif_unlock_cond(ssif_info, flags); } else { int rv; ssif_info->curr_msg = ssif_info->waiting_msg; ssif_info->waiting_msg = NULL; ipmi_ssif_unlock_cond(ssif_info, flags); rv = start_send(ssif_info, ssif_info->curr_msg->data, ssif_info->curr_msg->data_size); if (rv) { msg = ssif_info->curr_msg; ssif_info->curr_msg = NULL; return_hosed_msg(ssif_info, msg); flags = ipmi_ssif_lock_cond(ssif_info, &oflags); goto restart; } } } static void sender(void *send_info, struct ipmi_smi_msg *msg) { struct ssif_info *ssif_info = send_info; unsigned long oflags, *flags; BUG_ON(ssif_info->waiting_msg); ssif_info->waiting_msg = msg; flags = ipmi_ssif_lock_cond(ssif_info, &oflags); start_next_msg(ssif_info, flags); if (ssif_info->ssif_debug & SSIF_DEBUG_TIMING) { struct timespec64 t; ktime_get_real_ts64(&t); dev_dbg(&ssif_info->client->dev, "**Enqueue %02x %02x: %lld.%6.6ld\n", msg->data[0], msg->data[1], (long long)t.tv_sec, (long)t.tv_nsec / NSEC_PER_USEC); } } static int get_smi_info(void *send_info, struct ipmi_smi_info *data) { struct ssif_info *ssif_info = send_info; data->addr_src = ssif_info->addr_source; data->dev = &ssif_info->client->dev; data->addr_info = ssif_info->addr_info; get_device(data->dev); return 0; } /* * Upper layer wants us to request events. */ static void request_events(void *send_info) { struct ssif_info *ssif_info = send_info; unsigned long oflags, *flags; if (!ssif_info->has_event_buffer) return; flags = ipmi_ssif_lock_cond(ssif_info, &oflags); ssif_info->req_events = true; ipmi_ssif_unlock_cond(ssif_info, flags); } /* * Upper layer is changing the flag saying whether we need to request * flags periodically or not. */ static void ssif_set_need_watch(void *send_info, unsigned int watch_mask) { struct ssif_info *ssif_info = send_info; unsigned long oflags, *flags; long timeout = 0; if (watch_mask & IPMI_WATCH_MASK_CHECK_MESSAGES) timeout = SSIF_WATCH_MSG_TIMEOUT; else if (watch_mask) timeout = SSIF_WATCH_WATCHDOG_TIMEOUT; flags = ipmi_ssif_lock_cond(ssif_info, &oflags); if (timeout != ssif_info->watch_timeout) { ssif_info->watch_timeout = timeout; if (ssif_info->watch_timeout) mod_timer(&ssif_info->watch_timer, jiffies + ssif_info->watch_timeout); } ipmi_ssif_unlock_cond(ssif_info, flags); } static int ssif_start_processing(void *send_info, struct ipmi_smi *intf) { struct ssif_info *ssif_info = send_info; ssif_info->intf = intf; return 0; } #define MAX_SSIF_BMCS 4 static unsigned short addr[MAX_SSIF_BMCS]; static int num_addrs; module_param_array(addr, ushort, &num_addrs, 0); MODULE_PARM_DESC(addr, "The addresses to scan for IPMI BMCs on the SSIFs."); static char *adapter_name[MAX_SSIF_BMCS]; static int num_adapter_names; module_param_array(adapter_name, charp, &num_adapter_names, 0); MODULE_PARM_DESC(adapter_name, "The string name of the I2C device that has the BMC. By default all devices are scanned."); static int slave_addrs[MAX_SSIF_BMCS]; static int num_slave_addrs; module_param_array(slave_addrs, int, &num_slave_addrs, 0); MODULE_PARM_DESC(slave_addrs, "The default IPMB slave address for the controller."); static bool alerts_broken; module_param(alerts_broken, bool, 0); MODULE_PARM_DESC(alerts_broken, "Don't enable alerts for the controller."); /* * Bit 0 enables message debugging, bit 1 enables state debugging, and * bit 2 enables timing debugging. This is an array indexed by * interface number" */ static int dbg[MAX_SSIF_BMCS]; static int num_dbg; module_param_array(dbg, int, &num_dbg, 0); MODULE_PARM_DESC(dbg, "Turn on debugging."); static bool ssif_dbg_probe; module_param_named(dbg_probe, ssif_dbg_probe, bool, 0); MODULE_PARM_DESC(dbg_probe, "Enable debugging of probing of adapters."); static bool ssif_tryacpi = true; module_param_named(tryacpi, ssif_tryacpi, bool, 0); MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the default scan of the interfaces identified via ACPI"); static bool ssif_trydmi = true; module_param_named(trydmi, ssif_trydmi, bool, 0); MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the default scan of the interfaces identified via DMI (SMBIOS)"); static DEFINE_MUTEX(ssif_infos_mutex); static LIST_HEAD(ssif_infos); #define IPMI_SSIF_ATTR(name) \ static ssize_t ipmi_##name##_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct ssif_info *ssif_info = dev_get_drvdata(dev); \ \ return sysfs_emit(buf, "%u\n", ssif_get_stat(ssif_info, name));\ } \ static DEVICE_ATTR(name, S_IRUGO, ipmi_##name##_show, NULL) static ssize_t ipmi_type_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "ssif\n"); } static DEVICE_ATTR(type, S_IRUGO, ipmi_type_show, NULL); IPMI_SSIF_ATTR(sent_messages); IPMI_SSIF_ATTR(sent_messages_parts); IPMI_SSIF_ATTR(send_retries); IPMI_SSIF_ATTR(send_errors); IPMI_SSIF_ATTR(received_messages); IPMI_SSIF_ATTR(received_message_parts); IPMI_SSIF_ATTR(receive_retries); IPMI_SSIF_ATTR(receive_errors); IPMI_SSIF_ATTR(flag_fetches); IPMI_SSIF_ATTR(hosed); IPMI_SSIF_ATTR(events); IPMI_SSIF_ATTR(watchdog_pretimeouts); IPMI_SSIF_ATTR(alerts); static struct attribute *ipmi_ssif_dev_attrs[] = { &dev_attr_type.attr, &dev_attr_sent_messages.attr, &dev_attr_sent_messages_parts.attr, &dev_attr_send_retries.attr, &dev_attr_send_errors.attr, &dev_attr_received_messages.attr, &dev_attr_received_message_parts.attr, &dev_attr_receive_retries.attr, &dev_attr_receive_errors.attr, &dev_attr_flag_fetches.attr, &dev_attr_hosed.attr, &dev_attr_events.attr, &dev_attr_watchdog_pretimeouts.attr, &dev_attr_alerts.attr, NULL }; static const struct attribute_group ipmi_ssif_dev_attr_group = { .attrs = ipmi_ssif_dev_attrs, }; static void shutdown_ssif(void *send_info) { struct ssif_info *ssif_info = send_info; device_remove_group(&ssif_info->client->dev, &ipmi_ssif_dev_attr_group); dev_set_drvdata(&ssif_info->client->dev, NULL); /* make sure the driver is not looking for flags any more. */ while (ssif_info->ssif_state != SSIF_IDLE) schedule_timeout(1); ssif_info->stopping = true; del_timer_sync(&ssif_info->watch_timer); del_timer_sync(&ssif_info->retry_timer); if (ssif_info->thread) { complete(&ssif_info->wake_thread); kthread_stop(ssif_info->thread); } } static void ssif_remove(struct i2c_client *client) { struct ssif_info *ssif_info = i2c_get_clientdata(client); struct ssif_addr_info *addr_info; /* * After this point, we won't deliver anything asynchronously * to the message handler. We can unregister ourself. */ ipmi_unregister_smi(ssif_info->intf); list_for_each_entry(addr_info, &ssif_infos, link) { if (addr_info->client == client) { addr_info->client = NULL; break; } } kfree(ssif_info); } static int read_response(struct i2c_client *client, unsigned char *resp) { int ret = -ENODEV, retry_cnt = SSIF_RECV_RETRIES; while (retry_cnt > 0) { ret = i2c_smbus_read_block_data(client, SSIF_IPMI_RESPONSE, resp); if (ret > 0) break; msleep(SSIF_MSG_MSEC); retry_cnt--; if (retry_cnt <= 0) break; } return ret; } static int do_cmd(struct i2c_client *client, int len, unsigned char *msg, int *resp_len, unsigned char *resp) { int retry_cnt; int ret; retry_cnt = SSIF_SEND_RETRIES; retry1: ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg); if (ret) { retry_cnt--; if (retry_cnt > 0) { msleep(SSIF_REQ_RETRY_MSEC); goto retry1; } return -ENODEV; } ret = read_response(client, resp); if (ret > 0) { /* Validate that the response is correct. */ if (ret < 3 || (resp[0] != (msg[0] | (1 << 2))) || (resp[1] != msg[1])) ret = -EINVAL; else if (ret > IPMI_MAX_MSG_LENGTH) { ret = -E2BIG; } else { *resp_len = ret; ret = 0; } } return ret; } static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info) { unsigned char *resp; unsigned char msg[3]; int rv; int len; resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); if (!resp) return -ENOMEM; /* Do a Get Device ID command, since it is required. */ msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_GET_DEVICE_ID_CMD; rv = do_cmd(client, 2, msg, &len, resp); if (rv) rv = -ENODEV; else strscpy(info->type, DEVICE_NAME, I2C_NAME_SIZE); kfree(resp); return rv; } static int strcmp_nospace(char *s1, char *s2) { while (*s1 && *s2) { while (isspace(*s1)) s1++; while (isspace(*s2)) s2++; if (*s1 > *s2) return 1; if (*s1 < *s2) return -1; s1++; s2++; } return 0; } static struct ssif_addr_info *ssif_info_find(unsigned short addr, char *adapter_name, bool match_null_name) { struct ssif_addr_info *info, *found = NULL; restart: list_for_each_entry(info, &ssif_infos, link) { if (info->binfo.addr == addr) { if (info->addr_src == SI_SMBIOS && !info->adapter_name) info->adapter_name = kstrdup(adapter_name, GFP_KERNEL); if (info->adapter_name || adapter_name) { if (!info->adapter_name != !adapter_name) { /* One is NULL and one is not */ continue; } if (adapter_name && strcmp_nospace(info->adapter_name, adapter_name)) /* Names do not match */ continue; } found = info; break; } } if (!found && match_null_name) { /* Try to get an exact match first, then try with a NULL name */ adapter_name = NULL; match_null_name = false; goto restart; } return found; } static bool check_acpi(struct ssif_info *ssif_info, struct device *dev) { #ifdef CONFIG_ACPI acpi_handle acpi_handle; acpi_handle = ACPI_HANDLE(dev); if (acpi_handle) { ssif_info->addr_source = SI_ACPI; ssif_info->addr_info.acpi_info.acpi_handle = acpi_handle; request_module_nowait("acpi_ipmi"); return true; } #endif return false; } static int find_slave_address(struct i2c_client *client, int slave_addr) { #ifdef CONFIG_IPMI_DMI_DECODE if (!slave_addr) slave_addr = ipmi_dmi_get_slave_addr( SI_TYPE_INVALID, i2c_adapter_id(client->adapter), client->addr); #endif return slave_addr; } static int start_multipart_test(struct i2c_client *client, unsigned char *msg, bool do_middle) { int retry_cnt = SSIF_SEND_RETRIES, ret; retry_write: ret = i2c_smbus_write_block_data(client, SSIF_IPMI_MULTI_PART_REQUEST_START, 32, msg); if (ret) { retry_cnt--; if (retry_cnt > 0) { msleep(SSIF_REQ_RETRY_MSEC); goto retry_write; } dev_err(&client->dev, "Could not write multi-part start, though the BMC said it could handle it. Just limit sends to one part.\n"); return ret; } if (!do_middle) return 0; ret = i2c_smbus_write_block_data(client, SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE, 32, msg + 32); if (ret) { dev_err(&client->dev, "Could not write multi-part middle, though the BMC said it could handle it. Just limit sends to one part.\n"); return ret; } return 0; } static void test_multipart_messages(struct i2c_client *client, struct ssif_info *ssif_info, unsigned char *resp) { unsigned char msg[65]; int ret; bool do_middle; if (ssif_info->max_xmit_msg_size <= 32) return; do_middle = ssif_info->max_xmit_msg_size > 63; memset(msg, 0, sizeof(msg)); msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_GET_DEVICE_ID_CMD; /* * The specification is all messed up dealing with sending * multi-part messages. Per what the specification says, it * is impossible to send a message that is a multiple of 32 * bytes, except for 32 itself. It talks about a "start" * transaction (cmd=6) that must be 32 bytes, "middle" * transaction (cmd=7) that must be 32 bytes, and an "end" * transaction. The "end" transaction is shown as cmd=7 in * the text, but if that's the case there is no way to * differentiate between a middle and end part except the * length being less than 32. But there is a table at the far * end of the section (that I had never noticed until someone * pointed it out to me) that mentions it as cmd=8. * * After some thought, I think the example is wrong and the * end transaction should be cmd=8. But some systems don't * implement cmd=8, they use a zero-length end transaction, * even though that violates the SMBus specification. * * So, to work around this, this code tests if cmd=8 works. * If it does, then we use that. If not, it tests zero- * byte end transactions. If that works, good. If not, * we only allow 63-byte transactions max. */ ret = start_multipart_test(client, msg, do_middle); if (ret) goto out_no_multi_part; ret = i2c_smbus_write_block_data(client, SSIF_IPMI_MULTI_PART_REQUEST_END, 1, msg + 64); if (!ret) ret = read_response(client, resp); if (ret > 0) { /* End transactions work, we are good. */ ssif_info->cmd8_works = true; return; } ret = start_multipart_test(client, msg, do_middle); if (ret) { dev_err(&client->dev, "Second multipart test failed.\n"); goto out_no_multi_part; } ret = i2c_smbus_write_block_data(client, SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE, 0, msg + 64); if (!ret) ret = read_response(client, resp); if (ret > 0) /* Zero-size end parts work, use those. */ return; /* Limit to 63 bytes and use a short middle command to mark the end. */ if (ssif_info->max_xmit_msg_size > 63) ssif_info->max_xmit_msg_size = 63; return; out_no_multi_part: ssif_info->max_xmit_msg_size = 32; return; } /* * Global enables we care about. */ #define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \ IPMI_BMC_EVT_MSG_INTR) static void ssif_remove_dup(struct i2c_client *client) { struct ssif_info *ssif_info = i2c_get_clientdata(client); ipmi_unregister_smi(ssif_info->intf); kfree(ssif_info); } static int ssif_add_infos(struct i2c_client *client) { struct ssif_addr_info *info; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->addr_src = SI_ACPI; info->client = client; info->adapter_name = kstrdup(client->adapter->name, GFP_KERNEL); if (!info->adapter_name) { kfree(info); return -ENOMEM; } info->binfo.addr = client->addr; list_add_tail(&info->link, &ssif_infos); return 0; } /* * Prefer ACPI over SMBIOS, if both are available. * So if we get an ACPI interface and have already registered a SMBIOS * interface at the same address, remove the SMBIOS and add the ACPI one. */ static int ssif_check_and_remove(struct i2c_client *client, struct ssif_info *ssif_info) { struct ssif_addr_info *info; list_for_each_entry(info, &ssif_infos, link) { if (!info->client) return 0; if (!strcmp(info->adapter_name, client->adapter->name) && info->binfo.addr == client->addr) { if (info->addr_src == SI_ACPI) return -EEXIST; if (ssif_info->addr_source == SI_ACPI && info->addr_src == SI_SMBIOS) { dev_info(&client->dev, "Removing %s-specified SSIF interface in favor of ACPI\n", ipmi_addr_src_to_str(info->addr_src)); ssif_remove_dup(info->client); return 0; } } } return 0; } static int ssif_probe(struct i2c_client *client) { unsigned char msg[3]; unsigned char *resp; struct ssif_info *ssif_info; int rv = 0; int len = 0; int i; u8 slave_addr = 0; struct ssif_addr_info *addr_info = NULL; mutex_lock(&ssif_infos_mutex); resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); if (!resp) { mutex_unlock(&ssif_infos_mutex); return -ENOMEM; } ssif_info = kzalloc(sizeof(*ssif_info), GFP_KERNEL); if (!ssif_info) { kfree(resp); mutex_unlock(&ssif_infos_mutex); return -ENOMEM; } if (!check_acpi(ssif_info, &client->dev)) { addr_info = ssif_info_find(client->addr, client->adapter->name, true); if (!addr_info) { /* Must have come in through sysfs. */ ssif_info->addr_source = SI_HOTMOD; } else { ssif_info->addr_source = addr_info->addr_src; ssif_info->ssif_debug = addr_info->debug; ssif_info->addr_info = addr_info->addr_info; addr_info->client = client; slave_addr = addr_info->slave_addr; } } ssif_info->client = client; i2c_set_clientdata(client, ssif_info); rv = ssif_check_and_remove(client, ssif_info); /* If rv is 0 and addr source is not SI_ACPI, continue probing */ if (!rv && ssif_info->addr_source == SI_ACPI) { rv = ssif_add_infos(client); if (rv) { dev_err(&client->dev, "Out of memory!, exiting ..\n"); goto out; } } else if (rv) { dev_err(&client->dev, "Not probing, Interface already present\n"); goto out; } slave_addr = find_slave_address(client, slave_addr); dev_info(&client->dev, "Trying %s-specified SSIF interface at i2c address 0x%x, adapter %s, slave address 0x%x\n", ipmi_addr_src_to_str(ssif_info->addr_source), client->addr, client->adapter->name, slave_addr); /* Now check for system interface capabilities */ msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD; msg[2] = 0; /* SSIF */ rv = do_cmd(client, 3, msg, &len, resp); if (!rv && (len >= 3) && (resp[2] == 0)) { if (len < 7) { if (ssif_dbg_probe) dev_dbg(&ssif_info->client->dev, "SSIF info too short: %d\n", len); goto no_support; } /* Got a good SSIF response, handle it. */ ssif_info->max_xmit_msg_size = resp[5]; ssif_info->max_recv_msg_size = resp[6]; ssif_info->multi_support = (resp[4] >> 6) & 0x3; ssif_info->supports_pec = (resp[4] >> 3) & 0x1; /* Sanitize the data */ switch (ssif_info->multi_support) { case SSIF_NO_MULTI: if (ssif_info->max_xmit_msg_size > 32) ssif_info->max_xmit_msg_size = 32; if (ssif_info->max_recv_msg_size > 32) ssif_info->max_recv_msg_size = 32; break; case SSIF_MULTI_2_PART: if (ssif_info->max_xmit_msg_size > 63) ssif_info->max_xmit_msg_size = 63; if (ssif_info->max_recv_msg_size > 62) ssif_info->max_recv_msg_size = 62; break; case SSIF_MULTI_n_PART: /* We take whatever size given, but do some testing. */ break; default: /* Data is not sane, just give up. */ goto no_support; } } else { no_support: /* Assume no multi-part or PEC support */ dev_info(&ssif_info->client->dev, "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n", rv, len, resp[2]); ssif_info->max_xmit_msg_size = 32; ssif_info->max_recv_msg_size = 32; ssif_info->multi_support = SSIF_NO_MULTI; ssif_info->supports_pec = 0; } test_multipart_messages(client, ssif_info, resp); /* Make sure the NMI timeout is cleared. */ msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; msg[2] = WDT_PRE_TIMEOUT_INT; rv = do_cmd(client, 3, msg, &len, resp); if (rv || (len < 3) || (resp[2] != 0)) dev_warn(&ssif_info->client->dev, "Unable to clear message flags: %d %d %2.2x\n", rv, len, resp[2]); /* Attempt to enable the event buffer. */ msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; rv = do_cmd(client, 2, msg, &len, resp); if (rv || (len < 4) || (resp[2] != 0)) { dev_warn(&ssif_info->client->dev, "Error getting global enables: %d %d %2.2x\n", rv, len, resp[2]); rv = 0; /* Not fatal */ goto found; } ssif_info->global_enables = resp[3]; if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) { ssif_info->has_event_buffer = true; /* buffer is already enabled, nothing to do. */ goto found; } msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; msg[2] = ssif_info->global_enables | IPMI_BMC_EVT_MSG_BUFF; rv = do_cmd(client, 3, msg, &len, resp); if (rv || (len < 2)) { dev_warn(&ssif_info->client->dev, "Error setting global enables: %d %d %2.2x\n", rv, len, resp[2]); rv = 0; /* Not fatal */ goto found; } if (resp[2] == 0) { /* A successful return means the event buffer is supported. */ ssif_info->has_event_buffer = true; ssif_info->global_enables |= IPMI_BMC_EVT_MSG_BUFF; } /* Some systems don't behave well if you enable alerts. */ if (alerts_broken) goto found; msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; msg[2] = ssif_info->global_enables | IPMI_BMC_RCV_MSG_INTR; rv = do_cmd(client, 3, msg, &len, resp); if (rv || (len < 2)) { dev_warn(&ssif_info->client->dev, "Error setting global enables: %d %d %2.2x\n", rv, len, resp[2]); rv = 0; /* Not fatal */ goto found; } if (resp[2] == 0) { /* A successful return means the alert is supported. */ ssif_info->supports_alert = true; ssif_info->global_enables |= IPMI_BMC_RCV_MSG_INTR; } found: if (ssif_dbg_probe) { dev_dbg(&ssif_info->client->dev, "%s: i2c_probe found device at i2c address %x\n", __func__, client->addr); } spin_lock_init(&ssif_info->lock); ssif_info->ssif_state = SSIF_IDLE; timer_setup(&ssif_info->retry_timer, retry_timeout, 0); timer_setup(&ssif_info->watch_timer, watch_timeout, 0); for (i = 0; i < SSIF_NUM_STATS; i++) atomic_set(&ssif_info->stats[i], 0); if (ssif_info->supports_pec) ssif_info->client->flags |= I2C_CLIENT_PEC; ssif_info->handlers.owner = THIS_MODULE; ssif_info->handlers.start_processing = ssif_start_processing; ssif_info->handlers.shutdown = shutdown_ssif; ssif_info->handlers.get_smi_info = get_smi_info; ssif_info->handlers.sender = sender; ssif_info->handlers.request_events = request_events; ssif_info->handlers.set_need_watch = ssif_set_need_watch; { unsigned int thread_num; thread_num = ((i2c_adapter_id(ssif_info->client->adapter) << 8) | ssif_info->client->addr); init_completion(&ssif_info->wake_thread); ssif_info->thread = kthread_run(ipmi_ssif_thread, ssif_info, "kssif%4.4x", thread_num); if (IS_ERR(ssif_info->thread)) { rv = PTR_ERR(ssif_info->thread); dev_notice(&ssif_info->client->dev, "Could not start kernel thread: error %d\n", rv); goto out; } } dev_set_drvdata(&ssif_info->client->dev, ssif_info); rv = device_add_group(&ssif_info->client->dev, &ipmi_ssif_dev_attr_group); if (rv) { dev_err(&ssif_info->client->dev, "Unable to add device attributes: error %d\n", rv); goto out; } rv = ipmi_register_smi(&ssif_info->handlers, ssif_info, &ssif_info->client->dev, slave_addr); if (rv) { dev_err(&ssif_info->client->dev, "Unable to register device: error %d\n", rv); goto out_remove_attr; } out: if (rv) { if (addr_info) addr_info->client = NULL; dev_err(&ssif_info->client->dev, "Unable to start IPMI SSIF: %d\n", rv); i2c_set_clientdata(client, NULL); kfree(ssif_info); } kfree(resp); mutex_unlock(&ssif_infos_mutex); return rv; out_remove_attr: device_remove_group(&ssif_info->client->dev, &ipmi_ssif_dev_attr_group); dev_set_drvdata(&ssif_info->client->dev, NULL); goto out; } static int new_ssif_client(int addr, char *adapter_name, int debug, int slave_addr, enum ipmi_addr_src addr_src, struct device *dev) { struct ssif_addr_info *addr_info; int rv = 0; mutex_lock(&ssif_infos_mutex); if (ssif_info_find(addr, adapter_name, false)) { rv = -EEXIST; goto out_unlock; } addr_info = kzalloc(sizeof(*addr_info), GFP_KERNEL); if (!addr_info) { rv = -ENOMEM; goto out_unlock; } if (adapter_name) { addr_info->adapter_name = kstrdup(adapter_name, GFP_KERNEL); if (!addr_info->adapter_name) { kfree(addr_info); rv = -ENOMEM; goto out_unlock; } } strncpy(addr_info->binfo.type, DEVICE_NAME, sizeof(addr_info->binfo.type)); addr_info->binfo.addr = addr; addr_info->binfo.platform_data = addr_info; addr_info->debug = debug; addr_info->slave_addr = slave_addr; addr_info->addr_src = addr_src; addr_info->dev = dev; if (dev) dev_set_drvdata(dev, addr_info); list_add_tail(&addr_info->link, &ssif_infos); /* Address list will get it */ out_unlock: mutex_unlock(&ssif_infos_mutex); return rv; } static void free_ssif_clients(void) { struct ssif_addr_info *info, *tmp; mutex_lock(&ssif_infos_mutex); list_for_each_entry_safe(info, tmp, &ssif_infos, link) { list_del(&info->link); kfree(info->adapter_name); kfree(info); } mutex_unlock(&ssif_infos_mutex); } static unsigned short *ssif_address_list(void) { struct ssif_addr_info *info; unsigned int count = 0, i = 0; unsigned short *address_list; list_for_each_entry(info, &ssif_infos, link) count++; address_list = kcalloc(count + 1, sizeof(*address_list), GFP_KERNEL); if (!address_list) return NULL; list_for_each_entry(info, &ssif_infos, link) { unsigned short addr = info->binfo.addr; int j; for (j = 0; j < i; j++) { if (address_list[j] == addr) /* Found a dup. */ break; } if (j == i) /* Didn't find it in the list. */ address_list[i++] = addr; } address_list[i] = I2C_CLIENT_END; return address_list; } #ifdef CONFIG_ACPI static const struct acpi_device_id ssif_acpi_match[] = { { "IPI0001", 0 }, { }, }; MODULE_DEVICE_TABLE(acpi, ssif_acpi_match); #endif #ifdef CONFIG_DMI static int dmi_ipmi_probe(struct platform_device *pdev) { u8 slave_addr = 0; u16 i2c_addr; int rv; if (!ssif_trydmi) return -ENODEV; rv = device_property_read_u16(&pdev->dev, "i2c-addr", &i2c_addr); if (rv) { dev_warn(&pdev->dev, "No i2c-addr property\n"); return -ENODEV; } rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr); if (rv) slave_addr = 0x20; return new_ssif_client(i2c_addr, NULL, 0, slave_addr, SI_SMBIOS, &pdev->dev); } #else static int dmi_ipmi_probe(struct platform_device *pdev) { return -ENODEV; } #endif static const struct i2c_device_id ssif_id[] = { { DEVICE_NAME, 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ssif_id); static struct i2c_driver ssif_i2c_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = DEVICE_NAME }, .probe = ssif_probe, .remove = ssif_remove, .alert = ssif_alert, .id_table = ssif_id, .detect = ssif_detect }; static int ssif_platform_probe(struct platform_device *dev) { return dmi_ipmi_probe(dev); } static int ssif_platform_remove(struct platform_device *dev) { struct ssif_addr_info *addr_info = dev_get_drvdata(&dev->dev); mutex_lock(&ssif_infos_mutex); list_del(&addr_info->link); kfree(addr_info); mutex_unlock(&ssif_infos_mutex); return 0; } static const struct platform_device_id ssif_plat_ids[] = { { "dmi-ipmi-ssif", 0 }, { } }; static struct platform_driver ipmi_driver = { .driver = { .name = DEVICE_NAME, }, .probe = ssif_platform_probe, .remove = ssif_platform_remove, .id_table = ssif_plat_ids }; static int __init init_ipmi_ssif(void) { int i; int rv; if (initialized) return 0; pr_info("IPMI SSIF Interface driver\n"); /* build list for i2c from addr list */ for (i = 0; i < num_addrs; i++) { rv = new_ssif_client(addr[i], adapter_name[i], dbg[i], slave_addrs[i], SI_HARDCODED, NULL); if (rv) pr_err("Couldn't add hardcoded device at addr 0x%x\n", addr[i]); } if (ssif_tryacpi) ssif_i2c_driver.driver.acpi_match_table = ACPI_PTR(ssif_acpi_match); if (ssif_trydmi) { rv = platform_driver_register(&ipmi_driver); if (rv) pr_err("Unable to register driver: %d\n", rv); else platform_registered = true; } ssif_i2c_driver.address_list = ssif_address_list(); rv = i2c_add_driver(&ssif_i2c_driver); if (!rv) initialized = true; return rv; } module_init(init_ipmi_ssif); static void __exit cleanup_ipmi_ssif(void) { if (!initialized) return; initialized = false; i2c_del_driver(&ssif_i2c_driver); kfree(ssif_i2c_driver.address_list); if (ssif_trydmi && platform_registered) platform_driver_unregister(&ipmi_driver); free_ssif_clients(); } module_exit(cleanup_ipmi_ssif); MODULE_ALIAS("platform:dmi-ipmi-ssif"); MODULE_AUTHOR("Todd C Davis <[email protected]>, Corey Minyard <[email protected]>"); MODULE_DESCRIPTION("IPMI driver for management controllers on a SMBus"); MODULE_LICENSE("GPL");
linux-master
drivers/char/ipmi/ipmi_ssif.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2015-2018, Intel Corporation. */ #define pr_fmt(fmt) "aspeed-kcs-bmc: " fmt #include <linux/atomic.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/poll.h> #include <linux/regmap.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/timer.h> #include "kcs_bmc_device.h" #define DEVICE_NAME "ast-kcs-bmc" #define KCS_CHANNEL_MAX 4 /* * Field class descriptions * * LPCyE Enable LPC channel y * IBFIEy Input Buffer Full IRQ Enable for LPC channel y * IRQxEy Assert SerIRQ x for LPC channel y (Deprecated, use IDyIRQX, IRQXEy) * IDyIRQX Use the specified 4-bit SerIRQ for LPC channel y * SELyIRQX SerIRQ polarity for LPC channel y (low: 0, high: 1) * IRQXEy Assert the SerIRQ specified in IDyIRQX for LPC channel y */ #define LPC_TYIRQX_LOW 0b00 #define LPC_TYIRQX_HIGH 0b01 #define LPC_TYIRQX_RSVD 0b10 #define LPC_TYIRQX_RISING 0b11 #define LPC_HICR0 0x000 #define LPC_HICR0_LPC3E BIT(7) #define LPC_HICR0_LPC2E BIT(6) #define LPC_HICR0_LPC1E BIT(5) #define LPC_HICR2 0x008 #define LPC_HICR2_IBFIE3 BIT(3) #define LPC_HICR2_IBFIE2 BIT(2) #define LPC_HICR2_IBFIE1 BIT(1) #define LPC_HICR4 0x010 #define LPC_HICR4_LADR12AS BIT(7) #define LPC_HICR4_KCSENBL BIT(2) #define LPC_SIRQCR0 0x070 /* IRQ{12,1}E1 are deprecated as of AST2600 A3 but necessary for prior chips */ #define LPC_SIRQCR0_IRQ12E1 BIT(1) #define LPC_SIRQCR0_IRQ1E1 BIT(0) #define LPC_HICR5 0x080 #define LPC_HICR5_ID3IRQX_MASK GENMASK(23, 20) #define LPC_HICR5_ID3IRQX_SHIFT 20 #define LPC_HICR5_ID2IRQX_MASK GENMASK(19, 16) #define LPC_HICR5_ID2IRQX_SHIFT 16 #define LPC_HICR5_SEL3IRQX BIT(15) #define LPC_HICR5_IRQXE3 BIT(14) #define LPC_HICR5_SEL2IRQX BIT(13) #define LPC_HICR5_IRQXE2 BIT(12) #define LPC_LADR3H 0x014 #define LPC_LADR3L 0x018 #define LPC_LADR12H 0x01C #define LPC_LADR12L 0x020 #define LPC_IDR1 0x024 #define LPC_IDR2 0x028 #define LPC_IDR3 0x02C #define LPC_ODR1 0x030 #define LPC_ODR2 0x034 #define LPC_ODR3 0x038 #define LPC_STR1 0x03C #define LPC_STR2 0x040 #define LPC_STR3 0x044 #define LPC_HICRB 0x100 #define LPC_HICRB_EN16LADR2 BIT(5) #define LPC_HICRB_EN16LADR1 BIT(4) #define LPC_HICRB_IBFIE4 BIT(1) #define LPC_HICRB_LPC4E BIT(0) #define LPC_HICRC 0x104 #define LPC_HICRC_ID4IRQX_MASK GENMASK(7, 4) #define LPC_HICRC_ID4IRQX_SHIFT 4 #define LPC_HICRC_TY4IRQX_MASK GENMASK(3, 2) #define LPC_HICRC_TY4IRQX_SHIFT 2 #define LPC_HICRC_OBF4_AUTO_CLR BIT(1) #define LPC_HICRC_IRQXE4 BIT(0) #define LPC_LADR4 0x110 #define LPC_IDR4 0x114 #define LPC_ODR4 0x118 #define LPC_STR4 0x11C #define LPC_LSADR12 0x120 #define LPC_LSADR12_LSADR2_MASK GENMASK(31, 16) #define LPC_LSADR12_LSADR2_SHIFT 16 #define LPC_LSADR12_LSADR1_MASK GENMASK(15, 0) #define LPC_LSADR12_LSADR1_SHIFT 0 #define OBE_POLL_PERIOD (HZ / 2) enum aspeed_kcs_irq_mode { aspeed_kcs_irq_none, aspeed_kcs_irq_serirq, }; struct aspeed_kcs_bmc { struct kcs_bmc_device kcs_bmc; struct regmap *map; struct { enum aspeed_kcs_irq_mode mode; int id; } upstream_irq; struct { spinlock_t lock; bool remove; struct timer_list timer; } obe; }; static inline struct aspeed_kcs_bmc *to_aspeed_kcs_bmc(struct kcs_bmc_device *kcs_bmc) { return container_of(kcs_bmc, struct aspeed_kcs_bmc, kcs_bmc); } static u8 aspeed_kcs_inb(struct kcs_bmc_device *kcs_bmc, u32 reg) { struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); u32 val = 0; int rc; rc = regmap_read(priv->map, reg, &val); WARN(rc != 0, "regmap_read() failed: %d\n", rc); return rc == 0 ? (u8) val : 0; } static void aspeed_kcs_outb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 data) { struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); int rc; rc = regmap_write(priv->map, reg, data); WARN(rc != 0, "regmap_write() failed: %d\n", rc); /* Trigger the upstream IRQ on ODR writes, if enabled */ switch (reg) { case LPC_ODR1: case LPC_ODR2: case LPC_ODR3: case LPC_ODR4: break; default: return; } if (priv->upstream_irq.mode != aspeed_kcs_irq_serirq) return; switch (kcs_bmc->channel) { case 1: switch (priv->upstream_irq.id) { case 12: regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ12E1, LPC_SIRQCR0_IRQ12E1); break; case 1: regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ1E1, LPC_SIRQCR0_IRQ1E1); break; default: break; } break; case 2: regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE2, LPC_HICR5_IRQXE2); break; case 3: regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE3, LPC_HICR5_IRQXE3); break; case 4: regmap_update_bits(priv->map, LPC_HICRC, LPC_HICRC_IRQXE4, LPC_HICRC_IRQXE4); break; default: break; } } static void aspeed_kcs_updateb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 val) { struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); int rc; rc = regmap_update_bits(priv->map, reg, mask, val); WARN(rc != 0, "regmap_update_bits() failed: %d\n", rc); } /* * We note D for Data, and C for Cmd/Status, default rules are * * 1. Only the D address is given: * A. KCS1/KCS2 (D/C: X/X+4) * D/C: CA0h/CA4h * D/C: CA8h/CACh * B. KCS3 (D/C: XX2/XX3h) * D/C: CA2h/CA3h * C. KCS4 (D/C: X/X+1) * D/C: CA4h/CA5h * * 2. Both the D/C addresses are given: * A. KCS1/KCS2/KCS4 (D/C: X/Y) * D/C: CA0h/CA1h * D/C: CA8h/CA9h * D/C: CA4h/CA5h * B. KCS3 (D/C: XX2/XX3h) * D/C: CA2h/CA3h */ static int aspeed_kcs_set_address(struct kcs_bmc_device *kcs_bmc, u32 addrs[2], int nr_addrs) { struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); if (WARN_ON(nr_addrs < 1 || nr_addrs > 2)) return -EINVAL; switch (priv->kcs_bmc.channel) { case 1: regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, 0); regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8); regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF); if (nr_addrs == 2) { regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR1_MASK, addrs[1] << LPC_LSADR12_LSADR1_SHIFT); regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR1, LPC_HICRB_EN16LADR1); } break; case 2: regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, LPC_HICR4_LADR12AS); regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8); regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF); if (nr_addrs == 2) { regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR2_MASK, addrs[1] << LPC_LSADR12_LSADR2_SHIFT); regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR2, LPC_HICRB_EN16LADR2); } break; case 3: if (nr_addrs == 2) { dev_err(priv->kcs_bmc.dev, "Channel 3 only supports inferred status IO address\n"); return -EINVAL; } regmap_write(priv->map, LPC_LADR3H, addrs[0] >> 8); regmap_write(priv->map, LPC_LADR3L, addrs[0] & 0xFF); break; case 4: if (nr_addrs == 1) regmap_write(priv->map, LPC_LADR4, ((addrs[0] + 1) << 16) | addrs[0]); else regmap_write(priv->map, LPC_LADR4, (addrs[1] << 16) | addrs[0]); break; default: return -EINVAL; } return 0; } static inline int aspeed_kcs_map_serirq_type(u32 dt_type) { switch (dt_type) { case IRQ_TYPE_EDGE_RISING: return LPC_TYIRQX_RISING; case IRQ_TYPE_LEVEL_HIGH: return LPC_TYIRQX_HIGH; case IRQ_TYPE_LEVEL_LOW: return LPC_TYIRQX_LOW; default: return -EINVAL; } } static int aspeed_kcs_config_upstream_irq(struct aspeed_kcs_bmc *priv, u32 id, u32 dt_type) { unsigned int mask, val, hw_type; int ret; if (id > 15) return -EINVAL; ret = aspeed_kcs_map_serirq_type(dt_type); if (ret < 0) return ret; hw_type = ret; priv->upstream_irq.mode = aspeed_kcs_irq_serirq; priv->upstream_irq.id = id; switch (priv->kcs_bmc.channel) { case 1: /* Needs IRQxE1 rather than (ID1IRQX, SEL1IRQX, IRQXE1) before AST2600 A3 */ break; case 2: if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH)) return -EINVAL; mask = LPC_HICR5_SEL2IRQX | LPC_HICR5_ID2IRQX_MASK; val = (id << LPC_HICR5_ID2IRQX_SHIFT); val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL2IRQX : 0; regmap_update_bits(priv->map, LPC_HICR5, mask, val); break; case 3: if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH)) return -EINVAL; mask = LPC_HICR5_SEL3IRQX | LPC_HICR5_ID3IRQX_MASK; val = (id << LPC_HICR5_ID3IRQX_SHIFT); val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL3IRQX : 0; regmap_update_bits(priv->map, LPC_HICR5, mask, val); break; case 4: mask = LPC_HICRC_ID4IRQX_MASK | LPC_HICRC_TY4IRQX_MASK | LPC_HICRC_OBF4_AUTO_CLR; val = (id << LPC_HICRC_ID4IRQX_SHIFT) | (hw_type << LPC_HICRC_TY4IRQX_SHIFT); regmap_update_bits(priv->map, LPC_HICRC, mask, val); break; default: dev_warn(priv->kcs_bmc.dev, "SerIRQ configuration not supported on KCS channel %d\n", priv->kcs_bmc.channel); return -EINVAL; } return 0; } static void aspeed_kcs_enable_channel(struct kcs_bmc_device *kcs_bmc, bool enable) { struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); switch (kcs_bmc->channel) { case 1: regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC1E, enable * LPC_HICR0_LPC1E); return; case 2: regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC2E, enable * LPC_HICR0_LPC2E); return; case 3: regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC3E, enable * LPC_HICR0_LPC3E); regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_KCSENBL, enable * LPC_HICR4_KCSENBL); return; case 4: regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_LPC4E, enable * LPC_HICRB_LPC4E); return; default: pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel); return; } } static void aspeed_kcs_check_obe(struct timer_list *timer) { struct aspeed_kcs_bmc *priv = container_of(timer, struct aspeed_kcs_bmc, obe.timer); unsigned long flags; u8 str; spin_lock_irqsave(&priv->obe.lock, flags); if (priv->obe.remove) { spin_unlock_irqrestore(&priv->obe.lock, flags); return; } str = aspeed_kcs_inb(&priv->kcs_bmc, priv->kcs_bmc.ioreg.str); if (str & KCS_BMC_STR_OBF) { mod_timer(timer, jiffies + OBE_POLL_PERIOD); spin_unlock_irqrestore(&priv->obe.lock, flags); return; } spin_unlock_irqrestore(&priv->obe.lock, flags); kcs_bmc_handle_event(&priv->kcs_bmc); } static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state) { struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); int rc; u8 str; /* We don't have an OBE IRQ, emulate it */ if (mask & KCS_BMC_EVENT_TYPE_OBE) { if (KCS_BMC_EVENT_TYPE_OBE & state) { /* * Given we don't have an OBE IRQ, delay by polling briefly to see if we can * observe such an event before returning to the caller. This is not * incorrect because OBF may have already become clear before enabling the * IRQ if we had one, under which circumstance no event will be propagated * anyway. * * The onus is on the client to perform a race-free check that it hasn't * missed the event. */ rc = read_poll_timeout_atomic(aspeed_kcs_inb, str, !(str & KCS_BMC_STR_OBF), 1, 100, false, &priv->kcs_bmc, priv->kcs_bmc.ioreg.str); /* Time for the slow path? */ if (rc == -ETIMEDOUT) mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD); } else { del_timer(&priv->obe.timer); } } if (mask & KCS_BMC_EVENT_TYPE_IBF) { const bool enable = !!(state & KCS_BMC_EVENT_TYPE_IBF); switch (kcs_bmc->channel) { case 1: regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE1, enable * LPC_HICR2_IBFIE1); return; case 2: regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE2, enable * LPC_HICR2_IBFIE2); return; case 3: regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE3, enable * LPC_HICR2_IBFIE3); return; case 4: regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_IBFIE4, enable * LPC_HICRB_IBFIE4); return; default: pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel); return; } } } static const struct kcs_bmc_device_ops aspeed_kcs_ops = { .irq_mask_update = aspeed_kcs_irq_mask_update, .io_inputb = aspeed_kcs_inb, .io_outputb = aspeed_kcs_outb, .io_updateb = aspeed_kcs_updateb, }; static irqreturn_t aspeed_kcs_irq(int irq, void *arg) { struct kcs_bmc_device *kcs_bmc = arg; return kcs_bmc_handle_event(kcs_bmc); } static int aspeed_kcs_config_downstream_irq(struct kcs_bmc_device *kcs_bmc, struct platform_device *pdev) { struct device *dev = &pdev->dev; int irq; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; return devm_request_irq(dev, irq, aspeed_kcs_irq, IRQF_SHARED, dev_name(dev), kcs_bmc); } static const struct kcs_ioreg ast_kcs_bmc_ioregs[KCS_CHANNEL_MAX] = { { .idr = LPC_IDR1, .odr = LPC_ODR1, .str = LPC_STR1 }, { .idr = LPC_IDR2, .odr = LPC_ODR2, .str = LPC_STR2 }, { .idr = LPC_IDR3, .odr = LPC_ODR3, .str = LPC_STR3 }, { .idr = LPC_IDR4, .odr = LPC_ODR4, .str = LPC_STR4 }, }; static int aspeed_kcs_of_get_channel(struct platform_device *pdev) { struct device_node *np; struct kcs_ioreg ioreg; const __be32 *reg; int i; np = pdev->dev.of_node; /* Don't translate addresses, we want offsets for the regmaps */ reg = of_get_address(np, 0, NULL, NULL); if (!reg) return -EINVAL; ioreg.idr = be32_to_cpup(reg); reg = of_get_address(np, 1, NULL, NULL); if (!reg) return -EINVAL; ioreg.odr = be32_to_cpup(reg); reg = of_get_address(np, 2, NULL, NULL); if (!reg) return -EINVAL; ioreg.str = be32_to_cpup(reg); for (i = 0; i < ARRAY_SIZE(ast_kcs_bmc_ioregs); i++) { if (!memcmp(&ast_kcs_bmc_ioregs[i], &ioreg, sizeof(ioreg))) return i + 1; } return -EINVAL; } static int aspeed_kcs_of_get_io_address(struct platform_device *pdev, u32 addrs[2]) { int rc; rc = of_property_read_variable_u32_array(pdev->dev.of_node, "aspeed,lpc-io-reg", addrs, 1, 2); if (rc < 0) { dev_err(&pdev->dev, "No valid 'aspeed,lpc-io-reg' configured\n"); return rc; } if (addrs[0] > 0xffff) { dev_err(&pdev->dev, "Invalid data address in 'aspeed,lpc-io-reg'\n"); return -EINVAL; } if (rc == 2 && addrs[1] > 0xffff) { dev_err(&pdev->dev, "Invalid status address in 'aspeed,lpc-io-reg'\n"); return -EINVAL; } return rc; } static int aspeed_kcs_probe(struct platform_device *pdev) { struct kcs_bmc_device *kcs_bmc; struct aspeed_kcs_bmc *priv; struct device_node *np; bool have_upstream_irq; u32 upstream_irq[2]; int rc, channel; int nr_addrs; u32 addrs[2]; np = pdev->dev.of_node->parent; if (!of_device_is_compatible(np, "aspeed,ast2400-lpc-v2") && !of_device_is_compatible(np, "aspeed,ast2500-lpc-v2") && !of_device_is_compatible(np, "aspeed,ast2600-lpc-v2")) { dev_err(&pdev->dev, "unsupported LPC device binding\n"); return -ENODEV; } channel = aspeed_kcs_of_get_channel(pdev); if (channel < 0) return channel; nr_addrs = aspeed_kcs_of_get_io_address(pdev, addrs); if (nr_addrs < 0) return nr_addrs; np = pdev->dev.of_node; rc = of_property_read_u32_array(np, "aspeed,lpc-interrupts", upstream_irq, 2); if (rc && rc != -EINVAL) return -EINVAL; have_upstream_irq = !rc; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; kcs_bmc = &priv->kcs_bmc; kcs_bmc->dev = &pdev->dev; kcs_bmc->channel = channel; kcs_bmc->ioreg = ast_kcs_bmc_ioregs[channel - 1]; kcs_bmc->ops = &aspeed_kcs_ops; priv->map = syscon_node_to_regmap(pdev->dev.parent->of_node); if (IS_ERR(priv->map)) { dev_err(&pdev->dev, "Couldn't get regmap\n"); return -ENODEV; } spin_lock_init(&priv->obe.lock); priv->obe.remove = false; timer_setup(&priv->obe.timer, aspeed_kcs_check_obe, 0); rc = aspeed_kcs_set_address(kcs_bmc, addrs, nr_addrs); if (rc) return rc; /* Host to BMC IRQ */ rc = aspeed_kcs_config_downstream_irq(kcs_bmc, pdev); if (rc) return rc; /* BMC to Host IRQ */ if (have_upstream_irq) { rc = aspeed_kcs_config_upstream_irq(priv, upstream_irq[0], upstream_irq[1]); if (rc < 0) return rc; } else { priv->upstream_irq.mode = aspeed_kcs_irq_none; } platform_set_drvdata(pdev, priv); aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0); aspeed_kcs_enable_channel(kcs_bmc, true); rc = kcs_bmc_add_device(&priv->kcs_bmc); if (rc) { dev_warn(&pdev->dev, "Failed to register channel %d: %d\n", kcs_bmc->channel, rc); return rc; } dev_info(&pdev->dev, "Initialised channel %d at 0x%x\n", kcs_bmc->channel, addrs[0]); return 0; } static int aspeed_kcs_remove(struct platform_device *pdev) { struct aspeed_kcs_bmc *priv = platform_get_drvdata(pdev); struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc; kcs_bmc_remove_device(kcs_bmc); aspeed_kcs_enable_channel(kcs_bmc, false); aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0); /* Make sure it's proper dead */ spin_lock_irq(&priv->obe.lock); priv->obe.remove = true; spin_unlock_irq(&priv->obe.lock); del_timer_sync(&priv->obe.timer); return 0; } static const struct of_device_id ast_kcs_bmc_match[] = { { .compatible = "aspeed,ast2400-kcs-bmc-v2" }, { .compatible = "aspeed,ast2500-kcs-bmc-v2" }, { .compatible = "aspeed,ast2600-kcs-bmc" }, { } }; MODULE_DEVICE_TABLE(of, ast_kcs_bmc_match); static struct platform_driver ast_kcs_bmc_driver = { .driver = { .name = DEVICE_NAME, .of_match_table = ast_kcs_bmc_match, }, .probe = aspeed_kcs_probe, .remove = aspeed_kcs_remove, }; module_platform_driver(ast_kcs_bmc_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Haiyue Wang <[email protected]>"); MODULE_AUTHOR("Andrew Jeffery <[email protected]>"); MODULE_DESCRIPTION("Aspeed device interface to the KCS BMC device");
linux-master
drivers/char/ipmi/kcs_bmc_aspeed.c
// SPDX-License-Identifier: GPL-2.0+ /* * ipmi_devintf.c * * Linux device interface for the IPMI message handler. * * Author: MontaVista Software, Inc. * Corey Minyard <[email protected]> * [email protected] * * Copyright 2002 MontaVista Software Inc. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/errno.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/ipmi.h> #include <linux/mutex.h> #include <linux/init.h> #include <linux/device.h> #include <linux/compat.h> struct ipmi_file_private { struct ipmi_user *user; spinlock_t recv_msg_lock; struct list_head recv_msgs; struct fasync_struct *fasync_queue; wait_queue_head_t wait; struct mutex recv_mutex; int default_retries; unsigned int default_retry_time_ms; }; static void file_receive_handler(struct ipmi_recv_msg *msg, void *handler_data) { struct ipmi_file_private *priv = handler_data; int was_empty; unsigned long flags; spin_lock_irqsave(&priv->recv_msg_lock, flags); was_empty = list_empty(&priv->recv_msgs); list_add_tail(&msg->link, &priv->recv_msgs); spin_unlock_irqrestore(&priv->recv_msg_lock, flags); if (was_empty) { wake_up_interruptible(&priv->wait); kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN); } } static __poll_t ipmi_poll(struct file *file, poll_table *wait) { struct ipmi_file_private *priv = file->private_data; __poll_t mask = 0; unsigned long flags; poll_wait(file, &priv->wait, wait); spin_lock_irqsave(&priv->recv_msg_lock, flags); if (!list_empty(&priv->recv_msgs)) mask |= (EPOLLIN | EPOLLRDNORM); spin_unlock_irqrestore(&priv->recv_msg_lock, flags); return mask; } static int ipmi_fasync(int fd, struct file *file, int on) { struct ipmi_file_private *priv = file->private_data; return fasync_helper(fd, file, on, &priv->fasync_queue); } static const struct ipmi_user_hndl ipmi_hndlrs = { .ipmi_recv_hndl = file_receive_handler, }; static int ipmi_open(struct inode *inode, struct file *file) { int if_num = iminor(inode); int rv; struct ipmi_file_private *priv; priv = kmalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; rv = ipmi_create_user(if_num, &ipmi_hndlrs, priv, &priv->user); if (rv) { kfree(priv); goto out; } file->private_data = priv; spin_lock_init(&priv->recv_msg_lock); INIT_LIST_HEAD(&priv->recv_msgs); init_waitqueue_head(&priv->wait); priv->fasync_queue = NULL; mutex_init(&priv->recv_mutex); /* Use the low-level defaults. */ priv->default_retries = -1; priv->default_retry_time_ms = 0; out: return rv; } static int ipmi_release(struct inode *inode, struct file *file) { struct ipmi_file_private *priv = file->private_data; int rv; struct ipmi_recv_msg *msg, *next; rv = ipmi_destroy_user(priv->user); if (rv) return rv; list_for_each_entry_safe(msg, next, &priv->recv_msgs, link) ipmi_free_recv_msg(msg); kfree(priv); return 0; } static int handle_send_req(struct ipmi_user *user, struct ipmi_req *req, int retries, unsigned int retry_time_ms) { int rv; struct ipmi_addr addr; struct kernel_ipmi_msg msg; if (req->addr_len > sizeof(struct ipmi_addr)) return -EINVAL; if (copy_from_user(&addr, req->addr, req->addr_len)) return -EFAULT; msg.netfn = req->msg.netfn; msg.cmd = req->msg.cmd; msg.data_len = req->msg.data_len; msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); if (!msg.data) return -ENOMEM; /* From here out we cannot return, we must jump to "out" for error exits to free msgdata. */ rv = ipmi_validate_addr(&addr, req->addr_len); if (rv) goto out; if (req->msg.data != NULL) { if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) { rv = -EMSGSIZE; goto out; } if (copy_from_user(msg.data, req->msg.data, req->msg.data_len)) { rv = -EFAULT; goto out; } } else { msg.data_len = 0; } rv = ipmi_request_settime(user, &addr, req->msgid, &msg, NULL, 0, retries, retry_time_ms); out: kfree(msg.data); return rv; } static int handle_recv(struct ipmi_file_private *priv, bool trunc, struct ipmi_recv *rsp, int (*copyout)(struct ipmi_recv *, void __user *), void __user *to) { int addr_len; struct list_head *entry; struct ipmi_recv_msg *msg; unsigned long flags; int rv = 0, rv2 = 0; /* We claim a mutex because we don't want two users getting something from the queue at a time. Since we have to release the spinlock before we can copy the data to the user, it's possible another user will grab something from the queue, too. Then the messages might get out of order if something fails and the message gets put back onto the queue. This mutex prevents that problem. */ mutex_lock(&priv->recv_mutex); /* Grab the message off the list. */ spin_lock_irqsave(&priv->recv_msg_lock, flags); if (list_empty(&(priv->recv_msgs))) { spin_unlock_irqrestore(&priv->recv_msg_lock, flags); rv = -EAGAIN; goto recv_err; } entry = priv->recv_msgs.next; msg = list_entry(entry, struct ipmi_recv_msg, link); list_del(entry); spin_unlock_irqrestore(&priv->recv_msg_lock, flags); addr_len = ipmi_addr_length(msg->addr.addr_type); if (rsp->addr_len < addr_len) { rv = -EINVAL; goto recv_putback_on_err; } if (copy_to_user(rsp->addr, &msg->addr, addr_len)) { rv = -EFAULT; goto recv_putback_on_err; } rsp->addr_len = addr_len; rsp->recv_type = msg->recv_type; rsp->msgid = msg->msgid; rsp->msg.netfn = msg->msg.netfn; rsp->msg.cmd = msg->msg.cmd; if (msg->msg.data_len > 0) { if (rsp->msg.data_len < msg->msg.data_len) { if (trunc) { rv2 = -EMSGSIZE; msg->msg.data_len = rsp->msg.data_len; } else { rv = -EMSGSIZE; goto recv_putback_on_err; } } if (copy_to_user(rsp->msg.data, msg->msg.data, msg->msg.data_len)) { rv = -EFAULT; goto recv_putback_on_err; } rsp->msg.data_len = msg->msg.data_len; } else { rsp->msg.data_len = 0; } rv = copyout(rsp, to); if (rv) goto recv_putback_on_err; mutex_unlock(&priv->recv_mutex); ipmi_free_recv_msg(msg); return rv2; recv_putback_on_err: /* If we got an error, put the message back onto the head of the queue. */ spin_lock_irqsave(&priv->recv_msg_lock, flags); list_add(entry, &priv->recv_msgs); spin_unlock_irqrestore(&priv->recv_msg_lock, flags); recv_err: mutex_unlock(&priv->recv_mutex); return rv; } static int copyout_recv(struct ipmi_recv *rsp, void __user *to) { return copy_to_user(to, rsp, sizeof(struct ipmi_recv)) ? -EFAULT : 0; } static long ipmi_ioctl(struct file *file, unsigned int cmd, unsigned long data) { int rv = -EINVAL; struct ipmi_file_private *priv = file->private_data; void __user *arg = (void __user *)data; switch (cmd) { case IPMICTL_SEND_COMMAND: { struct ipmi_req req; int retries; unsigned int retry_time_ms; if (copy_from_user(&req, arg, sizeof(req))) { rv = -EFAULT; break; } mutex_lock(&priv->recv_mutex); retries = priv->default_retries; retry_time_ms = priv->default_retry_time_ms; mutex_unlock(&priv->recv_mutex); rv = handle_send_req(priv->user, &req, retries, retry_time_ms); break; } case IPMICTL_SEND_COMMAND_SETTIME: { struct ipmi_req_settime req; if (copy_from_user(&req, arg, sizeof(req))) { rv = -EFAULT; break; } rv = handle_send_req(priv->user, &req.req, req.retries, req.retry_time_ms); break; } case IPMICTL_RECEIVE_MSG: case IPMICTL_RECEIVE_MSG_TRUNC: { struct ipmi_recv rsp; if (copy_from_user(&rsp, arg, sizeof(rsp))) rv = -EFAULT; else rv = handle_recv(priv, cmd == IPMICTL_RECEIVE_MSG_TRUNC, &rsp, copyout_recv, arg); break; } case IPMICTL_REGISTER_FOR_CMD: { struct ipmi_cmdspec val; if (copy_from_user(&val, arg, sizeof(val))) { rv = -EFAULT; break; } rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd, IPMI_CHAN_ALL); break; } case IPMICTL_UNREGISTER_FOR_CMD: { struct ipmi_cmdspec val; if (copy_from_user(&val, arg, sizeof(val))) { rv = -EFAULT; break; } rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd, IPMI_CHAN_ALL); break; } case IPMICTL_REGISTER_FOR_CMD_CHANS: { struct ipmi_cmdspec_chans val; if (copy_from_user(&val, arg, sizeof(val))) { rv = -EFAULT; break; } rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd, val.chans); break; } case IPMICTL_UNREGISTER_FOR_CMD_CHANS: { struct ipmi_cmdspec_chans val; if (copy_from_user(&val, arg, sizeof(val))) { rv = -EFAULT; break; } rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd, val.chans); break; } case IPMICTL_SET_GETS_EVENTS_CMD: { int val; if (copy_from_user(&val, arg, sizeof(val))) { rv = -EFAULT; break; } rv = ipmi_set_gets_events(priv->user, val); break; } /* The next four are legacy, not per-channel. */ case IPMICTL_SET_MY_ADDRESS_CMD: { unsigned int val; if (copy_from_user(&val, arg, sizeof(val))) { rv = -EFAULT; break; } rv = ipmi_set_my_address(priv->user, 0, val); break; } case IPMICTL_GET_MY_ADDRESS_CMD: { unsigned int val; unsigned char rval; rv = ipmi_get_my_address(priv->user, 0, &rval); if (rv) break; val = rval; if (copy_to_user(arg, &val, sizeof(val))) { rv = -EFAULT; break; } break; } case IPMICTL_SET_MY_LUN_CMD: { unsigned int val; if (copy_from_user(&val, arg, sizeof(val))) { rv = -EFAULT; break; } rv = ipmi_set_my_LUN(priv->user, 0, val); break; } case IPMICTL_GET_MY_LUN_CMD: { unsigned int val; unsigned char rval; rv = ipmi_get_my_LUN(priv->user, 0, &rval); if (rv) break; val = rval; if (copy_to_user(arg, &val, sizeof(val))) { rv = -EFAULT; break; } break; } case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD: { struct ipmi_channel_lun_address_set val; if (copy_from_user(&val, arg, sizeof(val))) { rv = -EFAULT; break; } return ipmi_set_my_address(priv->user, val.channel, val.value); } case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD: { struct ipmi_channel_lun_address_set val; if (copy_from_user(&val, arg, sizeof(val))) { rv = -EFAULT; break; } rv = ipmi_get_my_address(priv->user, val.channel, &val.value); if (rv) break; if (copy_to_user(arg, &val, sizeof(val))) { rv = -EFAULT; break; } break; } case IPMICTL_SET_MY_CHANNEL_LUN_CMD: { struct ipmi_channel_lun_address_set val; if (copy_from_user(&val, arg, sizeof(val))) { rv = -EFAULT; break; } rv = ipmi_set_my_LUN(priv->user, val.channel, val.value); break; } case IPMICTL_GET_MY_CHANNEL_LUN_CMD: { struct ipmi_channel_lun_address_set val; if (copy_from_user(&val, arg, sizeof(val))) { rv = -EFAULT; break; } rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value); if (rv) break; if (copy_to_user(arg, &val, sizeof(val))) { rv = -EFAULT; break; } break; } case IPMICTL_SET_TIMING_PARMS_CMD: { struct ipmi_timing_parms parms; if (copy_from_user(&parms, arg, sizeof(parms))) { rv = -EFAULT; break; } mutex_lock(&priv->recv_mutex); priv->default_retries = parms.retries; priv->default_retry_time_ms = parms.retry_time_ms; mutex_unlock(&priv->recv_mutex); rv = 0; break; } case IPMICTL_GET_TIMING_PARMS_CMD: { struct ipmi_timing_parms parms; mutex_lock(&priv->recv_mutex); parms.retries = priv->default_retries; parms.retry_time_ms = priv->default_retry_time_ms; mutex_unlock(&priv->recv_mutex); if (copy_to_user(arg, &parms, sizeof(parms))) { rv = -EFAULT; break; } rv = 0; break; } case IPMICTL_GET_MAINTENANCE_MODE_CMD: { int mode; mode = ipmi_get_maintenance_mode(priv->user); if (copy_to_user(arg, &mode, sizeof(mode))) { rv = -EFAULT; break; } rv = 0; break; } case IPMICTL_SET_MAINTENANCE_MODE_CMD: { int mode; if (copy_from_user(&mode, arg, sizeof(mode))) { rv = -EFAULT; break; } rv = ipmi_set_maintenance_mode(priv->user, mode); break; } default: rv = -ENOTTY; break; } return rv; } #ifdef CONFIG_COMPAT /* * The following code contains code for supporting 32-bit compatible * ioctls on 64-bit kernels. This allows running 32-bit apps on the * 64-bit kernel */ #define COMPAT_IPMICTL_SEND_COMMAND \ _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req) #define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \ _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime) #define COMPAT_IPMICTL_RECEIVE_MSG \ _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv) #define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \ _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv) struct compat_ipmi_msg { u8 netfn; u8 cmd; u16 data_len; compat_uptr_t data; }; struct compat_ipmi_req { compat_uptr_t addr; compat_uint_t addr_len; compat_long_t msgid; struct compat_ipmi_msg msg; }; struct compat_ipmi_recv { compat_int_t recv_type; compat_uptr_t addr; compat_uint_t addr_len; compat_long_t msgid; struct compat_ipmi_msg msg; }; struct compat_ipmi_req_settime { struct compat_ipmi_req req; compat_int_t retries; compat_uint_t retry_time_ms; }; /* * Define some helper functions for copying IPMI data */ static void get_compat_ipmi_msg(struct ipmi_msg *p64, struct compat_ipmi_msg *p32) { p64->netfn = p32->netfn; p64->cmd = p32->cmd; p64->data_len = p32->data_len; p64->data = compat_ptr(p32->data); } static void get_compat_ipmi_req(struct ipmi_req *p64, struct compat_ipmi_req *p32) { p64->addr = compat_ptr(p32->addr); p64->addr_len = p32->addr_len; p64->msgid = p32->msgid; get_compat_ipmi_msg(&p64->msg, &p32->msg); } static void get_compat_ipmi_req_settime(struct ipmi_req_settime *p64, struct compat_ipmi_req_settime *p32) { get_compat_ipmi_req(&p64->req, &p32->req); p64->retries = p32->retries; p64->retry_time_ms = p32->retry_time_ms; } static void get_compat_ipmi_recv(struct ipmi_recv *p64, struct compat_ipmi_recv *p32) { memset(p64, 0, sizeof(struct ipmi_recv)); p64->recv_type = p32->recv_type; p64->addr = compat_ptr(p32->addr); p64->addr_len = p32->addr_len; p64->msgid = p32->msgid; get_compat_ipmi_msg(&p64->msg, &p32->msg); } static int copyout_recv32(struct ipmi_recv *p64, void __user *to) { struct compat_ipmi_recv v32; memset(&v32, 0, sizeof(struct compat_ipmi_recv)); v32.recv_type = p64->recv_type; v32.addr = ptr_to_compat(p64->addr); v32.addr_len = p64->addr_len; v32.msgid = p64->msgid; v32.msg.netfn = p64->msg.netfn; v32.msg.cmd = p64->msg.cmd; v32.msg.data_len = p64->msg.data_len; v32.msg.data = ptr_to_compat(p64->msg.data); return copy_to_user(to, &v32, sizeof(v32)) ? -EFAULT : 0; } /* * Handle compatibility ioctls */ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { struct ipmi_file_private *priv = filep->private_data; switch(cmd) { case COMPAT_IPMICTL_SEND_COMMAND: { struct ipmi_req rp; struct compat_ipmi_req r32; int retries; unsigned int retry_time_ms; if (copy_from_user(&r32, compat_ptr(arg), sizeof(r32))) return -EFAULT; get_compat_ipmi_req(&rp, &r32); mutex_lock(&priv->recv_mutex); retries = priv->default_retries; retry_time_ms = priv->default_retry_time_ms; mutex_unlock(&priv->recv_mutex); return handle_send_req(priv->user, &rp, retries, retry_time_ms); } case COMPAT_IPMICTL_SEND_COMMAND_SETTIME: { struct ipmi_req_settime sp; struct compat_ipmi_req_settime sp32; if (copy_from_user(&sp32, compat_ptr(arg), sizeof(sp32))) return -EFAULT; get_compat_ipmi_req_settime(&sp, &sp32); return handle_send_req(priv->user, &sp.req, sp.retries, sp.retry_time_ms); } case COMPAT_IPMICTL_RECEIVE_MSG: case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC: { struct ipmi_recv recv64; struct compat_ipmi_recv recv32; if (copy_from_user(&recv32, compat_ptr(arg), sizeof(recv32))) return -EFAULT; get_compat_ipmi_recv(&recv64, &recv32); return handle_recv(priv, cmd == COMPAT_IPMICTL_RECEIVE_MSG_TRUNC, &recv64, copyout_recv32, compat_ptr(arg)); } default: return ipmi_ioctl(filep, cmd, arg); } } #endif static const struct file_operations ipmi_fops = { .owner = THIS_MODULE, .unlocked_ioctl = ipmi_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_ipmi_ioctl, #endif .open = ipmi_open, .release = ipmi_release, .fasync = ipmi_fasync, .poll = ipmi_poll, .llseek = noop_llseek, }; #define DEVICE_NAME "ipmidev" static int ipmi_major; module_param(ipmi_major, int, 0); MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By" " default, or if you set it to zero, it will choose the next" " available device. Setting it to -1 will disable the" " interface. Other values will set the major device number" " to that value."); /* Keep track of the devices that are registered. */ struct ipmi_reg_list { dev_t dev; struct list_head link; }; static LIST_HEAD(reg_list); static DEFINE_MUTEX(reg_list_mutex); static const struct class ipmi_class = { .name = "ipmi", }; static void ipmi_new_smi(int if_num, struct device *device) { dev_t dev = MKDEV(ipmi_major, if_num); struct ipmi_reg_list *entry; entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { pr_err("ipmi_devintf: Unable to create the ipmi class device link\n"); return; } entry->dev = dev; mutex_lock(&reg_list_mutex); device_create(&ipmi_class, device, dev, NULL, "ipmi%d", if_num); list_add(&entry->link, &reg_list); mutex_unlock(&reg_list_mutex); } static void ipmi_smi_gone(int if_num) { dev_t dev = MKDEV(ipmi_major, if_num); struct ipmi_reg_list *entry; mutex_lock(&reg_list_mutex); list_for_each_entry(entry, &reg_list, link) { if (entry->dev == dev) { list_del(&entry->link); kfree(entry); break; } } device_destroy(&ipmi_class, dev); mutex_unlock(&reg_list_mutex); } static struct ipmi_smi_watcher smi_watcher = { .owner = THIS_MODULE, .new_smi = ipmi_new_smi, .smi_gone = ipmi_smi_gone, }; static int __init init_ipmi_devintf(void) { int rv; if (ipmi_major < 0) return -EINVAL; pr_info("ipmi device interface\n"); rv = class_register(&ipmi_class); if (rv) return rv; rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops); if (rv < 0) { class_unregister(&ipmi_class); pr_err("ipmi: can't get major %d\n", ipmi_major); return rv; } if (ipmi_major == 0) { ipmi_major = rv; } rv = ipmi_smi_watcher_register(&smi_watcher); if (rv) { unregister_chrdev(ipmi_major, DEVICE_NAME); class_unregister(&ipmi_class); pr_warn("ipmi: can't register smi watcher\n"); return rv; } return 0; } module_init(init_ipmi_devintf); static void __exit cleanup_ipmi(void) { struct ipmi_reg_list *entry, *entry2; mutex_lock(&reg_list_mutex); list_for_each_entry_safe(entry, entry2, &reg_list, link) { list_del(&entry->link); device_destroy(&ipmi_class, entry->dev); kfree(entry); } mutex_unlock(&reg_list_mutex); class_unregister(&ipmi_class); ipmi_smi_watcher_unregister(&smi_watcher); unregister_chrdev(ipmi_major, DEVICE_NAME); } module_exit(cleanup_ipmi); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Corey Minyard <[email protected]>"); MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
linux-master
drivers/char/ipmi/ipmi_devintf.c
// SPDX-License-Identifier: GPL-2.0+ /* * ipmi_smic_sm.c * * The state-machine driver for an IPMI SMIC driver * * It started as a copy of Corey Minyard's driver for the KSC interface * and the kernel patch "mmcdev-patch-245" by HP * * modified by: Hannes Schulz <[email protected]> * [email protected] * * * Corey Minyard's driver for the KSC interface has the following * copyright notice: * Copyright 2002 MontaVista Software Inc. * * the kernel patch "mmcdev-patch-245" by HP has the following * copyright notice: * (c) Copyright 2001 Grant Grundler (c) Copyright * 2001 Hewlett-Packard Company */ #define DEBUG /* So dev_dbg() is always available. */ #include <linux/kernel.h> /* For printk. */ #include <linux/string.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ipmi_msgdefs.h> /* for completion codes */ #include "ipmi_si_sm.h" /* smic_debug is a bit-field * SMIC_DEBUG_ENABLE - turned on for now * SMIC_DEBUG_MSG - commands and their responses * SMIC_DEBUG_STATES - state machine */ #define SMIC_DEBUG_STATES 4 #define SMIC_DEBUG_MSG 2 #define SMIC_DEBUG_ENABLE 1 static int smic_debug = 1; module_param(smic_debug, int, 0644); MODULE_PARM_DESC(smic_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); enum smic_states { SMIC_IDLE, SMIC_START_OP, SMIC_OP_OK, SMIC_WRITE_START, SMIC_WRITE_NEXT, SMIC_WRITE_END, SMIC_WRITE2READ, SMIC_READ_START, SMIC_READ_NEXT, SMIC_READ_END, SMIC_HOSED }; #define MAX_SMIC_READ_SIZE 80 #define MAX_SMIC_WRITE_SIZE 80 #define SMIC_MAX_ERROR_RETRIES 3 /* Timeouts in microseconds. */ #define SMIC_RETRY_TIMEOUT (2*USEC_PER_SEC) /* SMIC Flags Register Bits */ #define SMIC_RX_DATA_READY 0x80 #define SMIC_TX_DATA_READY 0x40 /* * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by * a few systems, and then only by Systems Management * Interrupts, not by the OS. Always ignore these bits. * */ #define SMIC_SMI 0x10 #define SMIC_EVM_DATA_AVAIL 0x08 #define SMIC_SMS_DATA_AVAIL 0x04 #define SMIC_FLAG_BSY 0x01 /* SMIC Error Codes */ #define EC_NO_ERROR 0x00 #define EC_ABORTED 0x01 #define EC_ILLEGAL_CONTROL 0x02 #define EC_NO_RESPONSE 0x03 #define EC_ILLEGAL_COMMAND 0x04 #define EC_BUFFER_FULL 0x05 struct si_sm_data { enum smic_states state; struct si_sm_io *io; unsigned char write_data[MAX_SMIC_WRITE_SIZE]; int write_pos; int write_count; int orig_write_count; unsigned char read_data[MAX_SMIC_READ_SIZE]; int read_pos; int truncated; unsigned int error_retries; long smic_timeout; }; static unsigned int init_smic_data(struct si_sm_data *smic, struct si_sm_io *io) { smic->state = SMIC_IDLE; smic->io = io; smic->write_pos = 0; smic->write_count = 0; smic->orig_write_count = 0; smic->read_pos = 0; smic->error_retries = 0; smic->truncated = 0; smic->smic_timeout = SMIC_RETRY_TIMEOUT; /* We use 3 bytes of I/O. */ return 3; } static int start_smic_transaction(struct si_sm_data *smic, unsigned char *data, unsigned int size) { unsigned int i; if (size < 2) return IPMI_REQ_LEN_INVALID_ERR; if (size > MAX_SMIC_WRITE_SIZE) return IPMI_REQ_LEN_EXCEEDED_ERR; if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED)) { dev_warn(smic->io->dev, "SMIC in invalid state %d\n", smic->state); return IPMI_NOT_IN_MY_STATE_ERR; } if (smic_debug & SMIC_DEBUG_MSG) { dev_dbg(smic->io->dev, "%s -", __func__); for (i = 0; i < size; i++) pr_cont(" %02x", data[i]); pr_cont("\n"); } smic->error_retries = 0; memcpy(smic->write_data, data, size); smic->write_count = size; smic->orig_write_count = size; smic->write_pos = 0; smic->read_pos = 0; smic->state = SMIC_START_OP; smic->smic_timeout = SMIC_RETRY_TIMEOUT; return 0; } static int smic_get_result(struct si_sm_data *smic, unsigned char *data, unsigned int length) { int i; if (smic_debug & SMIC_DEBUG_MSG) { dev_dbg(smic->io->dev, "smic_get result -"); for (i = 0; i < smic->read_pos; i++) pr_cont(" %02x", smic->read_data[i]); pr_cont("\n"); } if (length < smic->read_pos) { smic->read_pos = length; smic->truncated = 1; } memcpy(data, smic->read_data, smic->read_pos); if ((length >= 3) && (smic->read_pos < 3)) { data[2] = IPMI_ERR_UNSPECIFIED; smic->read_pos = 3; } if (smic->truncated) { data[2] = IPMI_ERR_MSG_TRUNCATED; smic->truncated = 0; } return smic->read_pos; } static inline unsigned char read_smic_flags(struct si_sm_data *smic) { return smic->io->inputb(smic->io, 2); } static inline unsigned char read_smic_status(struct si_sm_data *smic) { return smic->io->inputb(smic->io, 1); } static inline unsigned char read_smic_data(struct si_sm_data *smic) { return smic->io->inputb(smic->io, 0); } static inline void write_smic_flags(struct si_sm_data *smic, unsigned char flags) { smic->io->outputb(smic->io, 2, flags); } static inline void write_smic_control(struct si_sm_data *smic, unsigned char control) { smic->io->outputb(smic->io, 1, control); } static inline void write_si_sm_data(struct si_sm_data *smic, unsigned char data) { smic->io->outputb(smic->io, 0, data); } static inline void start_error_recovery(struct si_sm_data *smic, char *reason) { (smic->error_retries)++; if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) { if (smic_debug & SMIC_DEBUG_ENABLE) pr_warn("ipmi_smic_drv: smic hosed: %s\n", reason); smic->state = SMIC_HOSED; } else { smic->write_count = smic->orig_write_count; smic->write_pos = 0; smic->read_pos = 0; smic->state = SMIC_START_OP; smic->smic_timeout = SMIC_RETRY_TIMEOUT; } } static inline void write_next_byte(struct si_sm_data *smic) { write_si_sm_data(smic, smic->write_data[smic->write_pos]); (smic->write_pos)++; (smic->write_count)--; } static inline void read_next_byte(struct si_sm_data *smic) { if (smic->read_pos >= MAX_SMIC_READ_SIZE) { read_smic_data(smic); smic->truncated = 1; } else { smic->read_data[smic->read_pos] = read_smic_data(smic); smic->read_pos++; } } /* SMIC Control/Status Code Components */ #define SMIC_GET_STATUS 0x00 /* Control form's name */ #define SMIC_READY 0x00 /* Status form's name */ #define SMIC_WR_START 0x01 /* Unified Control/Status names... */ #define SMIC_WR_NEXT 0x02 #define SMIC_WR_END 0x03 #define SMIC_RD_START 0x04 #define SMIC_RD_NEXT 0x05 #define SMIC_RD_END 0x06 #define SMIC_CODE_MASK 0x0f #define SMIC_CONTROL 0x00 #define SMIC_STATUS 0x80 #define SMIC_CS_MASK 0x80 #define SMIC_SMS 0x40 #define SMIC_SMM 0x60 #define SMIC_STREAM_MASK 0x60 /* SMIC Control Codes */ #define SMIC_CC_SMS_GET_STATUS (SMIC_CONTROL|SMIC_SMS|SMIC_GET_STATUS) #define SMIC_CC_SMS_WR_START (SMIC_CONTROL|SMIC_SMS|SMIC_WR_START) #define SMIC_CC_SMS_WR_NEXT (SMIC_CONTROL|SMIC_SMS|SMIC_WR_NEXT) #define SMIC_CC_SMS_WR_END (SMIC_CONTROL|SMIC_SMS|SMIC_WR_END) #define SMIC_CC_SMS_RD_START (SMIC_CONTROL|SMIC_SMS|SMIC_RD_START) #define SMIC_CC_SMS_RD_NEXT (SMIC_CONTROL|SMIC_SMS|SMIC_RD_NEXT) #define SMIC_CC_SMS_RD_END (SMIC_CONTROL|SMIC_SMS|SMIC_RD_END) #define SMIC_CC_SMM_GET_STATUS (SMIC_CONTROL|SMIC_SMM|SMIC_GET_STATUS) #define SMIC_CC_SMM_WR_START (SMIC_CONTROL|SMIC_SMM|SMIC_WR_START) #define SMIC_CC_SMM_WR_NEXT (SMIC_CONTROL|SMIC_SMM|SMIC_WR_NEXT) #define SMIC_CC_SMM_WR_END (SMIC_CONTROL|SMIC_SMM|SMIC_WR_END) #define SMIC_CC_SMM_RD_START (SMIC_CONTROL|SMIC_SMM|SMIC_RD_START) #define SMIC_CC_SMM_RD_NEXT (SMIC_CONTROL|SMIC_SMM|SMIC_RD_NEXT) #define SMIC_CC_SMM_RD_END (SMIC_CONTROL|SMIC_SMM|SMIC_RD_END) /* SMIC Status Codes */ #define SMIC_SC_SMS_READY (SMIC_STATUS|SMIC_SMS|SMIC_READY) #define SMIC_SC_SMS_WR_START (SMIC_STATUS|SMIC_SMS|SMIC_WR_START) #define SMIC_SC_SMS_WR_NEXT (SMIC_STATUS|SMIC_SMS|SMIC_WR_NEXT) #define SMIC_SC_SMS_WR_END (SMIC_STATUS|SMIC_SMS|SMIC_WR_END) #define SMIC_SC_SMS_RD_START (SMIC_STATUS|SMIC_SMS|SMIC_RD_START) #define SMIC_SC_SMS_RD_NEXT (SMIC_STATUS|SMIC_SMS|SMIC_RD_NEXT) #define SMIC_SC_SMS_RD_END (SMIC_STATUS|SMIC_SMS|SMIC_RD_END) #define SMIC_SC_SMM_READY (SMIC_STATUS|SMIC_SMM|SMIC_READY) #define SMIC_SC_SMM_WR_START (SMIC_STATUS|SMIC_SMM|SMIC_WR_START) #define SMIC_SC_SMM_WR_NEXT (SMIC_STATUS|SMIC_SMM|SMIC_WR_NEXT) #define SMIC_SC_SMM_WR_END (SMIC_STATUS|SMIC_SMM|SMIC_WR_END) #define SMIC_SC_SMM_RD_START (SMIC_STATUS|SMIC_SMM|SMIC_RD_START) #define SMIC_SC_SMM_RD_NEXT (SMIC_STATUS|SMIC_SMM|SMIC_RD_NEXT) #define SMIC_SC_SMM_RD_END (SMIC_STATUS|SMIC_SMM|SMIC_RD_END) /* these are the control/status codes we actually use SMIC_CC_SMS_GET_STATUS 0x40 SMIC_CC_SMS_WR_START 0x41 SMIC_CC_SMS_WR_NEXT 0x42 SMIC_CC_SMS_WR_END 0x43 SMIC_CC_SMS_RD_START 0x44 SMIC_CC_SMS_RD_NEXT 0x45 SMIC_CC_SMS_RD_END 0x46 SMIC_SC_SMS_READY 0xC0 SMIC_SC_SMS_WR_START 0xC1 SMIC_SC_SMS_WR_NEXT 0xC2 SMIC_SC_SMS_WR_END 0xC3 SMIC_SC_SMS_RD_START 0xC4 SMIC_SC_SMS_RD_NEXT 0xC5 SMIC_SC_SMS_RD_END 0xC6 */ static enum si_sm_result smic_event(struct si_sm_data *smic, long time) { unsigned char status; unsigned char flags; unsigned char data; if (smic->state == SMIC_HOSED) { init_smic_data(smic, smic->io); return SI_SM_HOSED; } if (smic->state != SMIC_IDLE) { if (smic_debug & SMIC_DEBUG_STATES) dev_dbg(smic->io->dev, "%s - smic->smic_timeout = %ld, time = %ld\n", __func__, smic->smic_timeout, time); /* * FIXME: smic_event is sometimes called with time > * SMIC_RETRY_TIMEOUT */ if (time < SMIC_RETRY_TIMEOUT) { smic->smic_timeout -= time; if (smic->smic_timeout < 0) { start_error_recovery(smic, "smic timed out."); return SI_SM_CALL_WITH_DELAY; } } } flags = read_smic_flags(smic); if (flags & SMIC_FLAG_BSY) return SI_SM_CALL_WITH_DELAY; status = read_smic_status(smic); if (smic_debug & SMIC_DEBUG_STATES) dev_dbg(smic->io->dev, "%s - state = %d, flags = 0x%02x, status = 0x%02x\n", __func__, smic->state, flags, status); switch (smic->state) { case SMIC_IDLE: /* in IDLE we check for available messages */ if (flags & SMIC_SMS_DATA_AVAIL) return SI_SM_ATTN; return SI_SM_IDLE; case SMIC_START_OP: /* sanity check whether smic is really idle */ write_smic_control(smic, SMIC_CC_SMS_GET_STATUS); write_smic_flags(smic, flags | SMIC_FLAG_BSY); smic->state = SMIC_OP_OK; break; case SMIC_OP_OK: if (status != SMIC_SC_SMS_READY) { /* this should not happen */ start_error_recovery(smic, "state = SMIC_OP_OK," " status != SMIC_SC_SMS_READY"); return SI_SM_CALL_WITH_DELAY; } /* OK so far; smic is idle let us start ... */ write_smic_control(smic, SMIC_CC_SMS_WR_START); write_next_byte(smic); write_smic_flags(smic, flags | SMIC_FLAG_BSY); smic->state = SMIC_WRITE_START; break; case SMIC_WRITE_START: if (status != SMIC_SC_SMS_WR_START) { start_error_recovery(smic, "state = SMIC_WRITE_START, " "status != SMIC_SC_SMS_WR_START"); return SI_SM_CALL_WITH_DELAY; } /* * we must not issue WR_(NEXT|END) unless * TX_DATA_READY is set * */ if (flags & SMIC_TX_DATA_READY) { if (smic->write_count == 1) { /* last byte */ write_smic_control(smic, SMIC_CC_SMS_WR_END); smic->state = SMIC_WRITE_END; } else { write_smic_control(smic, SMIC_CC_SMS_WR_NEXT); smic->state = SMIC_WRITE_NEXT; } write_next_byte(smic); write_smic_flags(smic, flags | SMIC_FLAG_BSY); } else return SI_SM_CALL_WITH_DELAY; break; case SMIC_WRITE_NEXT: if (status != SMIC_SC_SMS_WR_NEXT) { start_error_recovery(smic, "state = SMIC_WRITE_NEXT, " "status != SMIC_SC_SMS_WR_NEXT"); return SI_SM_CALL_WITH_DELAY; } /* this is the same code as in SMIC_WRITE_START */ if (flags & SMIC_TX_DATA_READY) { if (smic->write_count == 1) { write_smic_control(smic, SMIC_CC_SMS_WR_END); smic->state = SMIC_WRITE_END; } else { write_smic_control(smic, SMIC_CC_SMS_WR_NEXT); smic->state = SMIC_WRITE_NEXT; } write_next_byte(smic); write_smic_flags(smic, flags | SMIC_FLAG_BSY); } else return SI_SM_CALL_WITH_DELAY; break; case SMIC_WRITE_END: if (status != SMIC_SC_SMS_WR_END) { start_error_recovery(smic, "state = SMIC_WRITE_END, " "status != SMIC_SC_SMS_WR_END"); return SI_SM_CALL_WITH_DELAY; } /* data register holds an error code */ data = read_smic_data(smic); if (data != 0) { if (smic_debug & SMIC_DEBUG_ENABLE) dev_dbg(smic->io->dev, "SMIC_WRITE_END: data = %02x\n", data); start_error_recovery(smic, "state = SMIC_WRITE_END, " "data != SUCCESS"); return SI_SM_CALL_WITH_DELAY; } else smic->state = SMIC_WRITE2READ; break; case SMIC_WRITE2READ: /* * we must wait for RX_DATA_READY to be set before we * can continue */ if (flags & SMIC_RX_DATA_READY) { write_smic_control(smic, SMIC_CC_SMS_RD_START); write_smic_flags(smic, flags | SMIC_FLAG_BSY); smic->state = SMIC_READ_START; } else return SI_SM_CALL_WITH_DELAY; break; case SMIC_READ_START: if (status != SMIC_SC_SMS_RD_START) { start_error_recovery(smic, "state = SMIC_READ_START, " "status != SMIC_SC_SMS_RD_START"); return SI_SM_CALL_WITH_DELAY; } if (flags & SMIC_RX_DATA_READY) { read_next_byte(smic); write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); write_smic_flags(smic, flags | SMIC_FLAG_BSY); smic->state = SMIC_READ_NEXT; } else return SI_SM_CALL_WITH_DELAY; break; case SMIC_READ_NEXT: switch (status) { /* * smic tells us that this is the last byte to be read * --> clean up */ case SMIC_SC_SMS_RD_END: read_next_byte(smic); write_smic_control(smic, SMIC_CC_SMS_RD_END); write_smic_flags(smic, flags | SMIC_FLAG_BSY); smic->state = SMIC_READ_END; break; case SMIC_SC_SMS_RD_NEXT: if (flags & SMIC_RX_DATA_READY) { read_next_byte(smic); write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); write_smic_flags(smic, flags | SMIC_FLAG_BSY); smic->state = SMIC_READ_NEXT; } else return SI_SM_CALL_WITH_DELAY; break; default: start_error_recovery( smic, "state = SMIC_READ_NEXT, " "status != SMIC_SC_SMS_RD_(NEXT|END)"); return SI_SM_CALL_WITH_DELAY; } break; case SMIC_READ_END: if (status != SMIC_SC_SMS_READY) { start_error_recovery(smic, "state = SMIC_READ_END, " "status != SMIC_SC_SMS_READY"); return SI_SM_CALL_WITH_DELAY; } data = read_smic_data(smic); /* data register holds an error code */ if (data != 0) { if (smic_debug & SMIC_DEBUG_ENABLE) dev_dbg(smic->io->dev, "SMIC_READ_END: data = %02x\n", data); start_error_recovery(smic, "state = SMIC_READ_END, " "data != SUCCESS"); return SI_SM_CALL_WITH_DELAY; } else { smic->state = SMIC_IDLE; return SI_SM_TRANSACTION_COMPLETE; } case SMIC_HOSED: init_smic_data(smic, smic->io); return SI_SM_HOSED; default: if (smic_debug & SMIC_DEBUG_ENABLE) { dev_dbg(smic->io->dev, "smic->state = %d\n", smic->state); start_error_recovery(smic, "state = UNKNOWN"); return SI_SM_CALL_WITH_DELAY; } } smic->smic_timeout = SMIC_RETRY_TIMEOUT; return SI_SM_CALL_WITHOUT_DELAY; } static int smic_detect(struct si_sm_data *smic) { /* * It's impossible for the SMIC fnags register to be all 1's, * (assuming a properly functioning, self-initialized BMC) * but that's what you get from reading a bogus address, so we * test that first. */ if (read_smic_flags(smic) == 0xff) return 1; return 0; } static void smic_cleanup(struct si_sm_data *kcs) { } static int smic_size(void) { return sizeof(struct si_sm_data); } const struct si_sm_handlers smic_smi_handlers = { .init_data = init_smic_data, .start_transaction = start_smic_transaction, .get_result = smic_get_result, .event = smic_event, .detect = smic_detect, .cleanup = smic_cleanup, .size = smic_size, };
linux-master
drivers/char/ipmi/ipmi_smic_sm.c
// SPDX-License-Identifier: GPL-2.0+ /* * Add an IPMI platform device. */ #include <linux/platform_device.h> #include "ipmi_plat_data.h" #include "ipmi_si.h" struct platform_device *ipmi_platform_add(const char *name, unsigned int inst, struct ipmi_plat_data *p) { struct platform_device *pdev; unsigned int num_r = 1, size = 0, pidx = 0; struct resource r[4]; struct property_entry pr[6]; u32 flags; int rv; memset(pr, 0, sizeof(pr)); memset(r, 0, sizeof(r)); if (p->iftype == IPMI_PLAT_IF_SI) { if (p->type == SI_BT) size = 3; else if (p->type != SI_TYPE_INVALID) size = 2; if (p->regsize == 0) p->regsize = DEFAULT_REGSIZE; if (p->regspacing == 0) p->regspacing = p->regsize; pr[pidx++] = PROPERTY_ENTRY_U8("ipmi-type", p->type); } else if (p->iftype == IPMI_PLAT_IF_SSIF) { pr[pidx++] = PROPERTY_ENTRY_U16("i2c-addr", p->addr); } if (p->slave_addr) pr[pidx++] = PROPERTY_ENTRY_U8("slave-addr", p->slave_addr); pr[pidx++] = PROPERTY_ENTRY_U8("addr-source", p->addr_source); if (p->regshift) pr[pidx++] = PROPERTY_ENTRY_U8("reg-shift", p->regshift); pr[pidx++] = PROPERTY_ENTRY_U8("reg-size", p->regsize); /* Last entry must be left NULL to terminate it. */ pdev = platform_device_alloc(name, inst); if (!pdev) { pr_err("Error allocating IPMI platform device %s.%d\n", name, inst); return NULL; } if (size == 0) /* An invalid or SSIF interface, no resources. */ goto add_properties; /* * Register spacing is derived from the resources in * the IPMI platform code. */ if (p->space == IPMI_IO_ADDR_SPACE) flags = IORESOURCE_IO; else flags = IORESOURCE_MEM; r[0].start = p->addr; r[0].end = r[0].start + p->regsize - 1; r[0].name = "IPMI Address 1"; r[0].flags = flags; if (size > 1) { r[1].start = r[0].start + p->regspacing; r[1].end = r[1].start + p->regsize - 1; r[1].name = "IPMI Address 2"; r[1].flags = flags; num_r++; } if (size > 2) { r[2].start = r[1].start + p->regspacing; r[2].end = r[2].start + p->regsize - 1; r[2].name = "IPMI Address 3"; r[2].flags = flags; num_r++; } if (p->irq) { r[num_r].start = p->irq; r[num_r].end = p->irq; r[num_r].name = "IPMI IRQ"; r[num_r].flags = IORESOURCE_IRQ; num_r++; } rv = platform_device_add_resources(pdev, r, num_r); if (rv) { dev_err(&pdev->dev, "Unable to add hard-code resources: %d\n", rv); goto err; } add_properties: rv = device_create_managed_software_node(&pdev->dev, pr, NULL); if (rv) { dev_err(&pdev->dev, "Unable to add hard-code properties: %d\n", rv); goto err; } rv = platform_device_add(pdev); if (rv) { dev_err(&pdev->dev, "Unable to add hard-code device: %d\n", rv); goto err; } return pdev; err: platform_device_put(pdev); return NULL; } EXPORT_SYMBOL(ipmi_platform_add);
linux-master
drivers/char/ipmi/ipmi_plat_data.c
// SPDX-License-Identifier: GPL-2.0+ /* * ipmi_si_hotmod.c * * Handling for dynamically adding/removing IPMI devices through * a module parameter (and thus sysfs). */ #define pr_fmt(fmt) "ipmi_hotmod: " fmt #include <linux/moduleparam.h> #include <linux/ipmi.h> #include <linux/atomic.h> #include "ipmi_si.h" #include "ipmi_plat_data.h" static int hotmod_handler(const char *val, const struct kernel_param *kp); module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200); MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See Documentation/driver-api/ipmi.rst in the kernel sources for the gory details."); /* * Parms come in as <op1>[:op2[:op3...]]. ops are: * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]] * Options are: * rsp=<regspacing> * rsi=<regsize> * rsh=<regshift> * irq=<irq> * ipmb=<ipmb addr> */ enum hotmod_op { HM_ADD, HM_REMOVE }; struct hotmod_vals { const char *name; const int val; }; static const struct hotmod_vals hotmod_ops[] = { { "add", HM_ADD }, { "remove", HM_REMOVE }, { NULL } }; static const struct hotmod_vals hotmod_si[] = { { "kcs", SI_KCS }, { "smic", SI_SMIC }, { "bt", SI_BT }, { NULL } }; static const struct hotmod_vals hotmod_as[] = { { "mem", IPMI_MEM_ADDR_SPACE }, { "i/o", IPMI_IO_ADDR_SPACE }, { NULL } }; static int parse_str(const struct hotmod_vals *v, unsigned int *val, char *name, const char **curr) { char *s; int i; s = strchr(*curr, ','); if (!s) { pr_warn("No hotmod %s given\n", name); return -EINVAL; } *s = '\0'; s++; for (i = 0; v[i].name; i++) { if (strcmp(*curr, v[i].name) == 0) { *val = v[i].val; *curr = s; return 0; } } pr_warn("Invalid hotmod %s '%s'\n", name, *curr); return -EINVAL; } static int check_hotmod_int_op(const char *curr, const char *option, const char *name, unsigned int *val) { char *n; if (strcmp(curr, name) == 0) { if (!option) { pr_warn("No option given for '%s'\n", curr); return -EINVAL; } *val = simple_strtoul(option, &n, 0); if ((*n != '\0') || (*option == '\0')) { pr_warn("Bad option given for '%s'\n", curr); return -EINVAL; } return 1; } return 0; } static int parse_hotmod_str(const char *curr, enum hotmod_op *op, struct ipmi_plat_data *h) { char *s, *o; int rv; unsigned int ival; h->iftype = IPMI_PLAT_IF_SI; rv = parse_str(hotmod_ops, &ival, "operation", &curr); if (rv) return rv; *op = ival; rv = parse_str(hotmod_si, &ival, "interface type", &curr); if (rv) return rv; h->type = ival; rv = parse_str(hotmod_as, &ival, "address space", &curr); if (rv) return rv; h->space = ival; s = strchr(curr, ','); if (s) { *s = '\0'; s++; } rv = kstrtoul(curr, 0, &h->addr); if (rv) { pr_warn("Invalid hotmod address '%s': %d\n", curr, rv); return rv; } while (s) { curr = s; s = strchr(curr, ','); if (s) { *s = '\0'; s++; } o = strchr(curr, '='); if (o) { *o = '\0'; o++; } rv = check_hotmod_int_op(curr, o, "rsp", &h->regspacing); if (rv < 0) return rv; else if (rv) continue; rv = check_hotmod_int_op(curr, o, "rsi", &h->regsize); if (rv < 0) return rv; else if (rv) continue; rv = check_hotmod_int_op(curr, o, "rsh", &h->regshift); if (rv < 0) return rv; else if (rv) continue; rv = check_hotmod_int_op(curr, o, "irq", &h->irq); if (rv < 0) return rv; else if (rv) continue; rv = check_hotmod_int_op(curr, o, "ipmb", &h->slave_addr); if (rv < 0) return rv; else if (rv) continue; pr_warn("Invalid hotmod option '%s'\n", curr); return -EINVAL; } h->addr_source = SI_HOTMOD; return 0; } static atomic_t hotmod_nr; static int hotmod_handler(const char *val, const struct kernel_param *kp) { int rv; struct ipmi_plat_data h; char *str, *curr, *next; str = kstrdup(val, GFP_KERNEL); if (!str) return -ENOMEM; /* Kill any trailing spaces, as we can get a "\n" from echo. */ for (curr = strstrip(str); curr; curr = next) { enum hotmod_op op; next = strchr(curr, ':'); if (next) { *next = '\0'; next++; } memset(&h, 0, sizeof(h)); rv = parse_hotmod_str(curr, &op, &h); if (rv) goto out; if (op == HM_ADD) { ipmi_platform_add("hotmod-ipmi-si", atomic_inc_return(&hotmod_nr), &h); } else { struct device *dev; dev = ipmi_si_remove_by_data(h.space, h.type, h.addr); if (dev && dev_is_platform(dev)) { struct platform_device *pdev; pdev = to_platform_device(dev); if (strcmp(pdev->name, "hotmod-ipmi-si") == 0) platform_device_unregister(pdev); } put_device(dev); } } rv = strlen(val); out: kfree(str); return rv; } void ipmi_si_hotmod_exit(void) { ipmi_remove_platform_device_by_name("hotmod-ipmi-si"); }
linux-master
drivers/char/ipmi/ipmi_si_hotmod.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2018, Nuvoton Corporation. * Copyright (c) 2018, Intel Corporation. */ #define pr_fmt(fmt) "nuvoton-kcs-bmc: " fmt #include <linux/atomic.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/slab.h> #include "kcs_bmc_device.h" #define DEVICE_NAME "npcm-kcs-bmc" #define KCS_CHANNEL_MAX 3 #define KCS1ST 0x0C #define KCS2ST 0x1E #define KCS3ST 0x30 #define KCS1DO 0x0E #define KCS2DO 0x20 #define KCS3DO 0x32 #define KCS1DI 0x10 #define KCS2DI 0x22 #define KCS3DI 0x34 #define KCS1CTL 0x18 #define KCS2CTL 0x2A #define KCS3CTL 0x3C #define KCS_CTL_IBFIE BIT(0) #define KCS_CTL_OBEIE BIT(1) #define KCS1IE 0x1C #define KCS2IE 0x2E #define KCS3IE 0x40 #define KCS_IE_IRQE BIT(0) #define KCS_IE_HIRQE BIT(3) /* * 7.2.4 Core KCS Registers * Registers in this module are 8 bits. An 8-bit register must be accessed * by an 8-bit read or write. * * sts: KCS Channel n Status Register (KCSnST). * dob: KCS Channel n Data Out Buffer Register (KCSnDO). * dib: KCS Channel n Data In Buffer Register (KCSnDI). * ctl: KCS Channel n Control Register (KCSnCTL). * ie : KCS Channel n Interrupt Enable Register (KCSnIE). */ struct npcm7xx_kcs_reg { u32 sts; u32 dob; u32 dib; u32 ctl; u32 ie; }; struct npcm7xx_kcs_bmc { struct kcs_bmc_device kcs_bmc; struct regmap *map; const struct npcm7xx_kcs_reg *reg; }; static const struct npcm7xx_kcs_reg npcm7xx_kcs_reg_tbl[KCS_CHANNEL_MAX] = { { .sts = KCS1ST, .dob = KCS1DO, .dib = KCS1DI, .ctl = KCS1CTL, .ie = KCS1IE }, { .sts = KCS2ST, .dob = KCS2DO, .dib = KCS2DI, .ctl = KCS2CTL, .ie = KCS2IE }, { .sts = KCS3ST, .dob = KCS3DO, .dib = KCS3DI, .ctl = KCS3CTL, .ie = KCS3IE }, }; static inline struct npcm7xx_kcs_bmc *to_npcm7xx_kcs_bmc(struct kcs_bmc_device *kcs_bmc) { return container_of(kcs_bmc, struct npcm7xx_kcs_bmc, kcs_bmc); } static u8 npcm7xx_kcs_inb(struct kcs_bmc_device *kcs_bmc, u32 reg) { struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc); u32 val = 0; int rc; rc = regmap_read(priv->map, reg, &val); WARN(rc != 0, "regmap_read() failed: %d\n", rc); return rc == 0 ? (u8)val : 0; } static void npcm7xx_kcs_outb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 data) { struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc); int rc; rc = regmap_write(priv->map, reg, data); WARN(rc != 0, "regmap_write() failed: %d\n", rc); } static void npcm7xx_kcs_updateb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 data) { struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc); int rc; rc = regmap_update_bits(priv->map, reg, mask, data); WARN(rc != 0, "regmap_update_bits() failed: %d\n", rc); } static void npcm7xx_kcs_enable_channel(struct kcs_bmc_device *kcs_bmc, bool enable) { struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc); regmap_update_bits(priv->map, priv->reg->ie, KCS_IE_IRQE | KCS_IE_HIRQE, enable ? KCS_IE_IRQE | KCS_IE_HIRQE : 0); } static void npcm7xx_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state) { struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc); if (mask & KCS_BMC_EVENT_TYPE_OBE) regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_OBEIE, !!(state & KCS_BMC_EVENT_TYPE_OBE) * KCS_CTL_OBEIE); if (mask & KCS_BMC_EVENT_TYPE_IBF) regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_IBFIE, !!(state & KCS_BMC_EVENT_TYPE_IBF) * KCS_CTL_IBFIE); } static irqreturn_t npcm7xx_kcs_irq(int irq, void *arg) { struct kcs_bmc_device *kcs_bmc = arg; return kcs_bmc_handle_event(kcs_bmc); } static int npcm7xx_kcs_config_irq(struct kcs_bmc_device *kcs_bmc, struct platform_device *pdev) { struct device *dev = &pdev->dev; int irq; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; return devm_request_irq(dev, irq, npcm7xx_kcs_irq, IRQF_SHARED, dev_name(dev), kcs_bmc); } static const struct kcs_bmc_device_ops npcm7xx_kcs_ops = { .irq_mask_update = npcm7xx_kcs_irq_mask_update, .io_inputb = npcm7xx_kcs_inb, .io_outputb = npcm7xx_kcs_outb, .io_updateb = npcm7xx_kcs_updateb, }; static int npcm7xx_kcs_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct npcm7xx_kcs_bmc *priv; struct kcs_bmc_device *kcs_bmc; u32 chan; int rc; rc = of_property_read_u32(dev->of_node, "kcs_chan", &chan); if (rc != 0 || chan == 0 || chan > KCS_CHANNEL_MAX) { dev_err(dev, "no valid 'kcs_chan' configured\n"); return -ENODEV; } priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->map = syscon_node_to_regmap(dev->parent->of_node); if (IS_ERR(priv->map)) { dev_err(dev, "Couldn't get regmap\n"); return -ENODEV; } priv->reg = &npcm7xx_kcs_reg_tbl[chan - 1]; kcs_bmc = &priv->kcs_bmc; kcs_bmc->dev = &pdev->dev; kcs_bmc->channel = chan; kcs_bmc->ioreg.idr = priv->reg->dib; kcs_bmc->ioreg.odr = priv->reg->dob; kcs_bmc->ioreg.str = priv->reg->sts; kcs_bmc->ops = &npcm7xx_kcs_ops; platform_set_drvdata(pdev, priv); rc = npcm7xx_kcs_config_irq(kcs_bmc, pdev); if (rc) return rc; npcm7xx_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0); npcm7xx_kcs_enable_channel(kcs_bmc, true); rc = kcs_bmc_add_device(kcs_bmc); if (rc) { dev_warn(&pdev->dev, "Failed to register channel %d: %d\n", kcs_bmc->channel, rc); return rc; } pr_info("channel=%u idr=0x%x odr=0x%x str=0x%x\n", chan, kcs_bmc->ioreg.idr, kcs_bmc->ioreg.odr, kcs_bmc->ioreg.str); return 0; } static int npcm7xx_kcs_remove(struct platform_device *pdev) { struct npcm7xx_kcs_bmc *priv = platform_get_drvdata(pdev); struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc; kcs_bmc_remove_device(kcs_bmc); npcm7xx_kcs_enable_channel(kcs_bmc, false); npcm7xx_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0); return 0; } static const struct of_device_id npcm_kcs_bmc_match[] = { { .compatible = "nuvoton,npcm750-kcs-bmc" }, { } }; MODULE_DEVICE_TABLE(of, npcm_kcs_bmc_match); static struct platform_driver npcm_kcs_bmc_driver = { .driver = { .name = DEVICE_NAME, .of_match_table = npcm_kcs_bmc_match, }, .probe = npcm7xx_kcs_probe, .remove = npcm7xx_kcs_remove, }; module_platform_driver(npcm_kcs_bmc_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Avi Fishman <[email protected]>"); MODULE_AUTHOR("Haiyue Wang <[email protected]>"); MODULE_DESCRIPTION("NPCM7xx device interface to the KCS BMC device");
linux-master
drivers/char/ipmi/kcs_bmc_npcm7xx.c
// SPDX-License-Identifier: GPL-2.0+ #include <linux/io.h> #include "ipmi_si.h" static unsigned char port_inb(const struct si_sm_io *io, unsigned int offset) { unsigned int addr = io->addr_data; return inb(addr + (offset * io->regspacing)); } static void port_outb(const struct si_sm_io *io, unsigned int offset, unsigned char b) { unsigned int addr = io->addr_data; outb(b, addr + (offset * io->regspacing)); } static unsigned char port_inw(const struct si_sm_io *io, unsigned int offset) { unsigned int addr = io->addr_data; return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; } static void port_outw(const struct si_sm_io *io, unsigned int offset, unsigned char b) { unsigned int addr = io->addr_data; outw(b << io->regshift, addr + (offset * io->regspacing)); } static unsigned char port_inl(const struct si_sm_io *io, unsigned int offset) { unsigned int addr = io->addr_data; return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; } static void port_outl(const struct si_sm_io *io, unsigned int offset, unsigned char b) { unsigned int addr = io->addr_data; outl(b << io->regshift, addr+(offset * io->regspacing)); } static void port_cleanup(struct si_sm_io *io) { unsigned int addr = io->addr_data; int idx; if (addr) { for (idx = 0; idx < io->io_size; idx++) release_region(addr + idx * io->regspacing, io->regsize); } } int ipmi_si_port_setup(struct si_sm_io *io) { unsigned int addr = io->addr_data; int idx; if (!addr) return -ENODEV; /* * Figure out the actual inb/inw/inl/etc routine to use based * upon the register size. */ switch (io->regsize) { case 1: io->inputb = port_inb; io->outputb = port_outb; break; case 2: io->inputb = port_inw; io->outputb = port_outw; break; case 4: io->inputb = port_inl; io->outputb = port_outl; break; default: dev_warn(io->dev, "Invalid register size: %d\n", io->regsize); return -EINVAL; } /* * Some BIOSes reserve disjoint I/O regions in their ACPI * tables. This causes problems when trying to register the * entire I/O region. Therefore we must register each I/O * port separately. */ for (idx = 0; idx < io->io_size; idx++) { if (request_region(addr + idx * io->regspacing, io->regsize, SI_DEVICE_NAME) == NULL) { /* Undo allocations */ while (idx--) release_region(addr + idx * io->regspacing, io->regsize); return -EIO; } } io->io_cleanup = port_cleanup; return 0; }
linux-master
drivers/char/ipmi/ipmi_si_port_io.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (c) 2021 IBM Corp. */ #include <linux/delay.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/module.h> #include <linux/sched/signal.h> #include <linux/serio.h> #include <linux/slab.h> #include "kcs_bmc_client.h" struct kcs_bmc_serio { struct list_head entry; struct kcs_bmc_client client; struct serio *port; spinlock_t lock; }; static inline struct kcs_bmc_serio *client_to_kcs_bmc_serio(struct kcs_bmc_client *client) { return container_of(client, struct kcs_bmc_serio, client); } static irqreturn_t kcs_bmc_serio_event(struct kcs_bmc_client *client) { struct kcs_bmc_serio *priv; u8 handled = IRQ_NONE; u8 status; priv = client_to_kcs_bmc_serio(client); spin_lock(&priv->lock); status = kcs_bmc_read_status(client->dev); if (status & KCS_BMC_STR_IBF) handled = serio_interrupt(priv->port, kcs_bmc_read_data(client->dev), 0); spin_unlock(&priv->lock); return handled; } static const struct kcs_bmc_client_ops kcs_bmc_serio_client_ops = { .event = kcs_bmc_serio_event, }; static int kcs_bmc_serio_open(struct serio *port) { struct kcs_bmc_serio *priv = port->port_data; return kcs_bmc_enable_device(priv->client.dev, &priv->client); } static void kcs_bmc_serio_close(struct serio *port) { struct kcs_bmc_serio *priv = port->port_data; kcs_bmc_disable_device(priv->client.dev, &priv->client); } static DEFINE_SPINLOCK(kcs_bmc_serio_instances_lock); static LIST_HEAD(kcs_bmc_serio_instances); static int kcs_bmc_serio_add_device(struct kcs_bmc_device *kcs_bmc) { struct kcs_bmc_serio *priv; struct serio *port; priv = devm_kzalloc(kcs_bmc->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; /* Use kzalloc() as the allocation is cleaned up with kfree() via serio_unregister_port() */ port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) return -ENOMEM; port->id.type = SERIO_8042; port->open = kcs_bmc_serio_open; port->close = kcs_bmc_serio_close; port->port_data = priv; port->dev.parent = kcs_bmc->dev; spin_lock_init(&priv->lock); priv->port = port; priv->client.dev = kcs_bmc; priv->client.ops = &kcs_bmc_serio_client_ops; spin_lock_irq(&kcs_bmc_serio_instances_lock); list_add(&priv->entry, &kcs_bmc_serio_instances); spin_unlock_irq(&kcs_bmc_serio_instances_lock); serio_register_port(port); dev_info(kcs_bmc->dev, "Initialised serio client for channel %d", kcs_bmc->channel); return 0; } static int kcs_bmc_serio_remove_device(struct kcs_bmc_device *kcs_bmc) { struct kcs_bmc_serio *priv = NULL, *pos; spin_lock_irq(&kcs_bmc_serio_instances_lock); list_for_each_entry(pos, &kcs_bmc_serio_instances, entry) { if (pos->client.dev == kcs_bmc) { priv = pos; list_del(&pos->entry); break; } } spin_unlock_irq(&kcs_bmc_serio_instances_lock); if (!priv) return -ENODEV; /* kfree()s priv->port via put_device() */ serio_unregister_port(priv->port); /* Ensure the IBF IRQ is disabled if we were the active client */ kcs_bmc_disable_device(kcs_bmc, &priv->client); devm_kfree(priv->client.dev->dev, priv); return 0; } static const struct kcs_bmc_driver_ops kcs_bmc_serio_driver_ops = { .add_device = kcs_bmc_serio_add_device, .remove_device = kcs_bmc_serio_remove_device, }; static struct kcs_bmc_driver kcs_bmc_serio_driver = { .ops = &kcs_bmc_serio_driver_ops, }; static int __init kcs_bmc_serio_init(void) { kcs_bmc_register_driver(&kcs_bmc_serio_driver); return 0; } module_init(kcs_bmc_serio_init); static void __exit kcs_bmc_serio_exit(void) { kcs_bmc_unregister_driver(&kcs_bmc_serio_driver); } module_exit(kcs_bmc_serio_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Andrew Jeffery <[email protected]>"); MODULE_DESCRIPTION("Adapter driver for serio access to BMC KCS devices");
linux-master
drivers/char/ipmi/kcs_bmc_serio.c
// SPDX-License-Identifier: GPL-2.0-only /* * The driver for BMC side of SSIF interface * * Copyright (c) 2022, Ampere Computing LLC * */ #include <linux/i2c.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/jiffies.h> #include <linux/ipmi_ssif_bmc.h> #define DEVICE_NAME "ipmi-ssif-host" #define GET_8BIT_ADDR(addr_7bit) (((addr_7bit) << 1) & 0xff) /* A standard SMBus Transaction is limited to 32 data bytes */ #define MAX_PAYLOAD_PER_TRANSACTION 32 /* Transaction includes the address, the command, the length and the PEC byte */ #define MAX_TRANSACTION (MAX_PAYLOAD_PER_TRANSACTION + 4) #define MAX_IPMI_DATA_PER_START_TRANSACTION 30 #define MAX_IPMI_DATA_PER_MIDDLE_TRANSACTION 31 #define SSIF_IPMI_SINGLEPART_WRITE 0x2 #define SSIF_IPMI_SINGLEPART_READ 0x3 #define SSIF_IPMI_MULTIPART_WRITE_START 0x6 #define SSIF_IPMI_MULTIPART_WRITE_MIDDLE 0x7 #define SSIF_IPMI_MULTIPART_WRITE_END 0x8 #define SSIF_IPMI_MULTIPART_READ_START 0x3 #define SSIF_IPMI_MULTIPART_READ_MIDDLE 0x9 /* * IPMI 2.0 Spec, section 12.7 SSIF Timing, * Request-to-Response Time is T6max(250ms) - T1max(20ms) - 3ms = 227ms * Recover ssif_bmc from busy state if it takes up to 500ms */ #define RESPONSE_TIMEOUT 500 /* ms */ struct ssif_part_buffer { u8 address; u8 smbus_cmd; u8 length; u8 payload[MAX_PAYLOAD_PER_TRANSACTION]; u8 pec; u8 index; }; /* * SSIF internal states: * SSIF_READY 0x00 : Ready state * SSIF_START 0x01 : Start smbus transaction * SSIF_SMBUS_CMD 0x02 : Received SMBus command * SSIF_REQ_RECVING 0x03 : Receiving request * SSIF_RES_SENDING 0x04 : Sending response * SSIF_ABORTING 0x05 : Aborting state */ enum ssif_state { SSIF_READY, SSIF_START, SSIF_SMBUS_CMD, SSIF_REQ_RECVING, SSIF_RES_SENDING, SSIF_ABORTING, SSIF_STATE_MAX }; struct ssif_bmc_ctx { struct i2c_client *client; struct miscdevice miscdev; int msg_idx; bool pec_support; /* ssif bmc spinlock */ spinlock_t lock; wait_queue_head_t wait_queue; u8 running; enum ssif_state state; /* Timeout waiting for response */ struct timer_list response_timer; bool response_timer_inited; /* Flag to identify a Multi-part Read Transaction */ bool is_singlepart_read; u8 nbytes_processed; u8 remain_len; u8 recv_len; /* Block Number of a Multi-part Read Transaction */ u8 block_num; bool request_available; bool response_in_progress; bool busy; bool aborting; /* Buffer for SSIF Transaction part*/ struct ssif_part_buffer part_buf; struct ipmi_ssif_msg response; struct ipmi_ssif_msg request; }; static inline struct ssif_bmc_ctx *to_ssif_bmc(struct file *file) { return container_of(file->private_data, struct ssif_bmc_ctx, miscdev); } static const char *state_to_string(enum ssif_state state) { switch (state) { case SSIF_READY: return "SSIF_READY"; case SSIF_START: return "SSIF_START"; case SSIF_SMBUS_CMD: return "SSIF_SMBUS_CMD"; case SSIF_REQ_RECVING: return "SSIF_REQ_RECVING"; case SSIF_RES_SENDING: return "SSIF_RES_SENDING"; case SSIF_ABORTING: return "SSIF_ABORTING"; default: return "SSIF_STATE_UNKNOWN"; } } /* Handle SSIF message that will be sent to user */ static ssize_t ssif_bmc_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct ssif_bmc_ctx *ssif_bmc = to_ssif_bmc(file); struct ipmi_ssif_msg msg; unsigned long flags; ssize_t ret; spin_lock_irqsave(&ssif_bmc->lock, flags); while (!ssif_bmc->request_available) { spin_unlock_irqrestore(&ssif_bmc->lock, flags); if (file->f_flags & O_NONBLOCK) return -EAGAIN; ret = wait_event_interruptible(ssif_bmc->wait_queue, ssif_bmc->request_available); if (ret) return ret; spin_lock_irqsave(&ssif_bmc->lock, flags); } if (count < min_t(ssize_t, sizeof_field(struct ipmi_ssif_msg, len) + ssif_bmc->request.len, sizeof(struct ipmi_ssif_msg))) { spin_unlock_irqrestore(&ssif_bmc->lock, flags); ret = -EINVAL; } else { count = min_t(ssize_t, sizeof_field(struct ipmi_ssif_msg, len) + ssif_bmc->request.len, sizeof(struct ipmi_ssif_msg)); memcpy(&msg, &ssif_bmc->request, count); ssif_bmc->request_available = false; spin_unlock_irqrestore(&ssif_bmc->lock, flags); ret = copy_to_user(buf, &msg, count); } return (ret < 0) ? ret : count; } /* Handle SSIF message that is written by user */ static ssize_t ssif_bmc_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct ssif_bmc_ctx *ssif_bmc = to_ssif_bmc(file); struct ipmi_ssif_msg msg; unsigned long flags; ssize_t ret; if (count > sizeof(struct ipmi_ssif_msg)) return -EINVAL; if (copy_from_user(&msg, buf, count)) return -EFAULT; if (!msg.len || count < sizeof_field(struct ipmi_ssif_msg, len) + msg.len) return -EINVAL; spin_lock_irqsave(&ssif_bmc->lock, flags); while (ssif_bmc->response_in_progress) { spin_unlock_irqrestore(&ssif_bmc->lock, flags); if (file->f_flags & O_NONBLOCK) return -EAGAIN; ret = wait_event_interruptible(ssif_bmc->wait_queue, !ssif_bmc->response_in_progress); if (ret) return ret; spin_lock_irqsave(&ssif_bmc->lock, flags); } /* * The write must complete before the response timeout fired, otherwise * the response is aborted and wait for next request * Return -EINVAL if the response is aborted */ ret = (ssif_bmc->response_timer_inited) ? 0 : -EINVAL; if (ret) goto exit; del_timer(&ssif_bmc->response_timer); ssif_bmc->response_timer_inited = false; memcpy(&ssif_bmc->response, &msg, count); ssif_bmc->is_singlepart_read = (msg.len <= MAX_PAYLOAD_PER_TRANSACTION); ssif_bmc->response_in_progress = true; /* ssif_bmc not busy */ ssif_bmc->busy = false; /* Clean old request buffer */ memset(&ssif_bmc->request, 0, sizeof(struct ipmi_ssif_msg)); exit: spin_unlock_irqrestore(&ssif_bmc->lock, flags); return (ret < 0) ? ret : count; } static int ssif_bmc_open(struct inode *inode, struct file *file) { struct ssif_bmc_ctx *ssif_bmc = to_ssif_bmc(file); int ret = 0; spin_lock_irq(&ssif_bmc->lock); if (!ssif_bmc->running) ssif_bmc->running = 1; else ret = -EBUSY; spin_unlock_irq(&ssif_bmc->lock); return ret; } static __poll_t ssif_bmc_poll(struct file *file, poll_table *wait) { struct ssif_bmc_ctx *ssif_bmc = to_ssif_bmc(file); __poll_t mask = 0; poll_wait(file, &ssif_bmc->wait_queue, wait); spin_lock_irq(&ssif_bmc->lock); /* The request is available, userspace application can get the request */ if (ssif_bmc->request_available) mask |= EPOLLIN; spin_unlock_irq(&ssif_bmc->lock); return mask; } static int ssif_bmc_release(struct inode *inode, struct file *file) { struct ssif_bmc_ctx *ssif_bmc = to_ssif_bmc(file); spin_lock_irq(&ssif_bmc->lock); ssif_bmc->running = 0; spin_unlock_irq(&ssif_bmc->lock); return 0; } /* * System calls to device interface for user apps */ static const struct file_operations ssif_bmc_fops = { .owner = THIS_MODULE, .open = ssif_bmc_open, .read = ssif_bmc_read, .write = ssif_bmc_write, .release = ssif_bmc_release, .poll = ssif_bmc_poll, }; /* Called with ssif_bmc->lock held. */ static void complete_response(struct ssif_bmc_ctx *ssif_bmc) { /* Invalidate response in buffer to denote it having been sent. */ ssif_bmc->response.len = 0; ssif_bmc->response_in_progress = false; ssif_bmc->nbytes_processed = 0; ssif_bmc->remain_len = 0; ssif_bmc->busy = false; memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer)); wake_up_all(&ssif_bmc->wait_queue); } static void response_timeout(struct timer_list *t) { struct ssif_bmc_ctx *ssif_bmc = from_timer(ssif_bmc, t, response_timer); unsigned long flags; spin_lock_irqsave(&ssif_bmc->lock, flags); /* Do nothing if the response is in progress */ if (!ssif_bmc->response_in_progress) { /* Recover ssif_bmc from busy */ ssif_bmc->busy = false; ssif_bmc->response_timer_inited = false; /* Set aborting flag */ ssif_bmc->aborting = true; } spin_unlock_irqrestore(&ssif_bmc->lock, flags); } /* Called with ssif_bmc->lock held. */ static void handle_request(struct ssif_bmc_ctx *ssif_bmc) { /* set ssif_bmc to busy waiting for response */ ssif_bmc->busy = true; /* Request message is available to process */ ssif_bmc->request_available = true; /* Clean old response buffer */ memset(&ssif_bmc->response, 0, sizeof(struct ipmi_ssif_msg)); /* This is the new READ request.*/ wake_up_all(&ssif_bmc->wait_queue); /* Armed timer to recover slave from busy state in case of no response */ if (!ssif_bmc->response_timer_inited) { timer_setup(&ssif_bmc->response_timer, response_timeout, 0); ssif_bmc->response_timer_inited = true; } mod_timer(&ssif_bmc->response_timer, jiffies + msecs_to_jiffies(RESPONSE_TIMEOUT)); } static void calculate_response_part_pec(struct ssif_part_buffer *part) { u8 addr = part->address; /* PEC - Start Read Address */ part->pec = i2c_smbus_pec(0, &addr, 1); /* PEC - SSIF Command */ part->pec = i2c_smbus_pec(part->pec, &part->smbus_cmd, 1); /* PEC - Restart Write Address */ addr = addr | 0x01; part->pec = i2c_smbus_pec(part->pec, &addr, 1); part->pec = i2c_smbus_pec(part->pec, &part->length, 1); if (part->length) part->pec = i2c_smbus_pec(part->pec, part->payload, part->length); } static void set_singlepart_response_buffer(struct ssif_bmc_ctx *ssif_bmc) { struct ssif_part_buffer *part = &ssif_bmc->part_buf; part->address = GET_8BIT_ADDR(ssif_bmc->client->addr); part->length = (u8)ssif_bmc->response.len; /* Clear the rest to 0 */ memset(part->payload + part->length, 0, MAX_PAYLOAD_PER_TRANSACTION - part->length); memcpy(&part->payload[0], &ssif_bmc->response.payload[0], part->length); } static void set_multipart_response_buffer(struct ssif_bmc_ctx *ssif_bmc) { struct ssif_part_buffer *part = &ssif_bmc->part_buf; u8 part_len = 0; part->address = GET_8BIT_ADDR(ssif_bmc->client->addr); switch (part->smbus_cmd) { case SSIF_IPMI_MULTIPART_READ_START: /* * Read Start length is 32 bytes. * Read Start transfer first 30 bytes of IPMI response * and 2 special code 0x00, 0x01. */ ssif_bmc->nbytes_processed = 0; ssif_bmc->block_num = 0; part->length = MAX_PAYLOAD_PER_TRANSACTION; part_len = MAX_IPMI_DATA_PER_START_TRANSACTION; ssif_bmc->remain_len = ssif_bmc->response.len - part_len; part->payload[0] = 0x00; /* Start Flag */ part->payload[1] = 0x01; /* Start Flag */ memcpy(&part->payload[2], &ssif_bmc->response.payload[0], part_len); break; case SSIF_IPMI_MULTIPART_READ_MIDDLE: /* * IPMI READ Middle or READ End messages can carry up to 31 bytes * IPMI data plus block number byte. */ if (ssif_bmc->remain_len <= MAX_IPMI_DATA_PER_MIDDLE_TRANSACTION) { /* * This is READ End message * Return length is the remaining response data length * plus block number * Block number 0xFF is to indicate this is last message * */ /* Clean the buffer */ memset(&part->payload[0], 0, MAX_PAYLOAD_PER_TRANSACTION); part->length = ssif_bmc->remain_len + 1; part_len = ssif_bmc->remain_len; ssif_bmc->block_num = 0xFF; part->payload[0] = ssif_bmc->block_num; } else { /* * This is READ Middle message * Response length is the maximum SMBUS transfer length * Block number byte is incremented * Return length is maximum SMBUS transfer length */ part->length = MAX_PAYLOAD_PER_TRANSACTION; part_len = MAX_IPMI_DATA_PER_MIDDLE_TRANSACTION; part->payload[0] = ssif_bmc->block_num; ssif_bmc->block_num++; } ssif_bmc->remain_len -= part_len; memcpy(&part->payload[1], ssif_bmc->response.payload + ssif_bmc->nbytes_processed, part_len); break; default: /* Do not expect to go to this case */ dev_err(&ssif_bmc->client->dev, "%s: Unexpected SMBus command 0x%x\n", __func__, part->smbus_cmd); break; } ssif_bmc->nbytes_processed += part_len; } static bool supported_read_cmd(u8 cmd) { if (cmd == SSIF_IPMI_SINGLEPART_READ || cmd == SSIF_IPMI_MULTIPART_READ_START || cmd == SSIF_IPMI_MULTIPART_READ_MIDDLE) return true; return false; } static bool supported_write_cmd(u8 cmd) { if (cmd == SSIF_IPMI_SINGLEPART_WRITE || cmd == SSIF_IPMI_MULTIPART_WRITE_START || cmd == SSIF_IPMI_MULTIPART_WRITE_MIDDLE || cmd == SSIF_IPMI_MULTIPART_WRITE_END) return true; return false; } /* Process the IPMI response that will be read by master */ static void handle_read_processed(struct ssif_bmc_ctx *ssif_bmc, u8 *val) { struct ssif_part_buffer *part = &ssif_bmc->part_buf; /* msg_idx start from 0 */ if (part->index < part->length) *val = part->payload[part->index]; else if (part->index == part->length && ssif_bmc->pec_support) *val = part->pec; else *val = 0; part->index++; } static void handle_write_received(struct ssif_bmc_ctx *ssif_bmc, u8 *val) { /* * The msg_idx must be 1 when first enter SSIF_REQ_RECVING state * And it would never exceeded 36 bytes included the 32 bytes max payload + * the address + the command + the len and the PEC. */ if (ssif_bmc->msg_idx < 1 || ssif_bmc->msg_idx > MAX_TRANSACTION) return; if (ssif_bmc->msg_idx == 1) { ssif_bmc->part_buf.length = *val; ssif_bmc->part_buf.index = 0; } else { ssif_bmc->part_buf.payload[ssif_bmc->part_buf.index++] = *val; } ssif_bmc->msg_idx++; } static bool validate_request_part(struct ssif_bmc_ctx *ssif_bmc) { struct ssif_part_buffer *part = &ssif_bmc->part_buf; bool ret = true; u8 cpec; u8 addr; if (part->index == part->length) { /* PEC is not included */ ssif_bmc->pec_support = false; ret = true; goto exit; } if (part->index != part->length + 1) { ret = false; goto exit; } /* PEC is included */ ssif_bmc->pec_support = true; part->pec = part->payload[part->length]; addr = GET_8BIT_ADDR(ssif_bmc->client->addr); cpec = i2c_smbus_pec(0, &addr, 1); cpec = i2c_smbus_pec(cpec, &part->smbus_cmd, 1); cpec = i2c_smbus_pec(cpec, &part->length, 1); /* * As SMBus specification does not allow the length * (byte count) in the Write-Block protocol to be zero. * Therefore, it is illegal to have the last Middle * transaction in the sequence carry 32-byte and have * a length of ‘0’ in the End transaction. * But some users may try to use this way and we should * prevent ssif_bmc driver broken in this case. */ if (part->length) cpec = i2c_smbus_pec(cpec, part->payload, part->length); if (cpec != part->pec) ret = false; exit: return ret; } static void process_request_part(struct ssif_bmc_ctx *ssif_bmc) { struct ssif_part_buffer *part = &ssif_bmc->part_buf; unsigned int len; switch (part->smbus_cmd) { case SSIF_IPMI_SINGLEPART_WRITE: /* save the whole part to request*/ ssif_bmc->request.len = part->length; memcpy(ssif_bmc->request.payload, part->payload, part->length); break; case SSIF_IPMI_MULTIPART_WRITE_START: ssif_bmc->request.len = 0; fallthrough; case SSIF_IPMI_MULTIPART_WRITE_MIDDLE: case SSIF_IPMI_MULTIPART_WRITE_END: len = ssif_bmc->request.len + part->length; /* Do the bound check here, not allow the request len exceed 254 bytes */ if (len > IPMI_SSIF_PAYLOAD_MAX) { dev_warn(&ssif_bmc->client->dev, "Warn: Request exceeded 254 bytes, aborting"); /* Request too long, aborting */ ssif_bmc->aborting = true; } else { memcpy(ssif_bmc->request.payload + ssif_bmc->request.len, part->payload, part->length); ssif_bmc->request.len += part->length; } break; default: /* Do not expect to go to this case */ dev_err(&ssif_bmc->client->dev, "%s: Unexpected SMBus command 0x%x\n", __func__, part->smbus_cmd); break; } } static void process_smbus_cmd(struct ssif_bmc_ctx *ssif_bmc, u8 *val) { /* SMBUS command can vary (single or multi-part) */ ssif_bmc->part_buf.smbus_cmd = *val; ssif_bmc->msg_idx = 1; memset(&ssif_bmc->part_buf.payload[0], 0, MAX_PAYLOAD_PER_TRANSACTION); if (*val == SSIF_IPMI_SINGLEPART_WRITE || *val == SSIF_IPMI_MULTIPART_WRITE_START) { /* * The response maybe not come in-time, causing host SSIF driver * to timeout and resend a new request. In such case check for * pending response and clear it */ if (ssif_bmc->response_in_progress) complete_response(ssif_bmc); /* This is new request, flip aborting flag if set */ if (ssif_bmc->aborting) ssif_bmc->aborting = false; } } static void on_read_requested_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val) { if (ssif_bmc->state == SSIF_READY || ssif_bmc->state == SSIF_START || ssif_bmc->state == SSIF_REQ_RECVING || ssif_bmc->state == SSIF_RES_SENDING) { dev_warn(&ssif_bmc->client->dev, "Warn: %s unexpected READ REQUESTED in state=%s\n", __func__, state_to_string(ssif_bmc->state)); ssif_bmc->state = SSIF_ABORTING; *val = 0; return; } else if (ssif_bmc->state == SSIF_SMBUS_CMD) { if (!supported_read_cmd(ssif_bmc->part_buf.smbus_cmd)) { dev_warn(&ssif_bmc->client->dev, "Warn: Unknown SMBus read command=0x%x", ssif_bmc->part_buf.smbus_cmd); ssif_bmc->aborting = true; } if (ssif_bmc->aborting) ssif_bmc->state = SSIF_ABORTING; else ssif_bmc->state = SSIF_RES_SENDING; } ssif_bmc->msg_idx = 0; /* Send 0 if there is nothing to send */ if (!ssif_bmc->response_in_progress || ssif_bmc->state == SSIF_ABORTING) { *val = 0; return; } if (ssif_bmc->is_singlepart_read) set_singlepart_response_buffer(ssif_bmc); else set_multipart_response_buffer(ssif_bmc); calculate_response_part_pec(&ssif_bmc->part_buf); ssif_bmc->part_buf.index = 0; *val = ssif_bmc->part_buf.length; } static void on_read_processed_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val) { if (ssif_bmc->state == SSIF_READY || ssif_bmc->state == SSIF_START || ssif_bmc->state == SSIF_REQ_RECVING || ssif_bmc->state == SSIF_SMBUS_CMD) { dev_warn(&ssif_bmc->client->dev, "Warn: %s unexpected READ PROCESSED in state=%s\n", __func__, state_to_string(ssif_bmc->state)); ssif_bmc->state = SSIF_ABORTING; *val = 0; return; } /* Send 0 if there is nothing to send */ if (!ssif_bmc->response_in_progress || ssif_bmc->state == SSIF_ABORTING) { *val = 0; return; } handle_read_processed(ssif_bmc, val); } static void on_write_requested_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val) { if (ssif_bmc->state == SSIF_READY || ssif_bmc->state == SSIF_SMBUS_CMD) { ssif_bmc->state = SSIF_START; } else if (ssif_bmc->state == SSIF_START || ssif_bmc->state == SSIF_REQ_RECVING || ssif_bmc->state == SSIF_RES_SENDING) { dev_warn(&ssif_bmc->client->dev, "Warn: %s unexpected WRITE REQUEST in state=%s\n", __func__, state_to_string(ssif_bmc->state)); ssif_bmc->state = SSIF_ABORTING; return; } ssif_bmc->msg_idx = 0; ssif_bmc->part_buf.address = *val; } static void on_write_received_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val) { if (ssif_bmc->state == SSIF_READY || ssif_bmc->state == SSIF_RES_SENDING) { dev_warn(&ssif_bmc->client->dev, "Warn: %s unexpected WRITE RECEIVED in state=%s\n", __func__, state_to_string(ssif_bmc->state)); ssif_bmc->state = SSIF_ABORTING; } else if (ssif_bmc->state == SSIF_START) { ssif_bmc->state = SSIF_SMBUS_CMD; } else if (ssif_bmc->state == SSIF_SMBUS_CMD) { if (!supported_write_cmd(ssif_bmc->part_buf.smbus_cmd)) { dev_warn(&ssif_bmc->client->dev, "Warn: Unknown SMBus write command=0x%x", ssif_bmc->part_buf.smbus_cmd); ssif_bmc->aborting = true; } if (ssif_bmc->aborting) ssif_bmc->state = SSIF_ABORTING; else ssif_bmc->state = SSIF_REQ_RECVING; } /* This is response sending state */ if (ssif_bmc->state == SSIF_REQ_RECVING) handle_write_received(ssif_bmc, val); else if (ssif_bmc->state == SSIF_SMBUS_CMD) process_smbus_cmd(ssif_bmc, val); } static void on_stop_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val) { if (ssif_bmc->state == SSIF_READY || ssif_bmc->state == SSIF_START || ssif_bmc->state == SSIF_SMBUS_CMD || ssif_bmc->state == SSIF_ABORTING) { dev_warn(&ssif_bmc->client->dev, "Warn: %s unexpected SLAVE STOP in state=%s\n", __func__, state_to_string(ssif_bmc->state)); ssif_bmc->state = SSIF_READY; } else if (ssif_bmc->state == SSIF_REQ_RECVING) { if (validate_request_part(ssif_bmc)) { process_request_part(ssif_bmc); if (ssif_bmc->part_buf.smbus_cmd == SSIF_IPMI_SINGLEPART_WRITE || ssif_bmc->part_buf.smbus_cmd == SSIF_IPMI_MULTIPART_WRITE_END) handle_request(ssif_bmc); ssif_bmc->state = SSIF_READY; } else { /* * A BMC that receives an invalid request drop the data for the write * transaction and any further transactions (read or write) until * the next valid read or write Start transaction is received */ dev_err(&ssif_bmc->client->dev, "Error: invalid pec\n"); ssif_bmc->aborting = true; } } else if (ssif_bmc->state == SSIF_RES_SENDING) { if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF) /* Invalidate response buffer to denote it is sent */ complete_response(ssif_bmc); ssif_bmc->state = SSIF_READY; } /* Reset message index */ ssif_bmc->msg_idx = 0; } /* * Callback function to handle I2C slave events */ static int ssif_bmc_cb(struct i2c_client *client, enum i2c_slave_event event, u8 *val) { unsigned long flags; struct ssif_bmc_ctx *ssif_bmc = i2c_get_clientdata(client); int ret = 0; spin_lock_irqsave(&ssif_bmc->lock, flags); switch (event) { case I2C_SLAVE_READ_REQUESTED: on_read_requested_event(ssif_bmc, val); break; case I2C_SLAVE_WRITE_REQUESTED: on_write_requested_event(ssif_bmc, val); break; case I2C_SLAVE_READ_PROCESSED: on_read_processed_event(ssif_bmc, val); break; case I2C_SLAVE_WRITE_RECEIVED: on_write_received_event(ssif_bmc, val); break; case I2C_SLAVE_STOP: on_stop_event(ssif_bmc, val); break; default: dev_warn(&ssif_bmc->client->dev, "Warn: Unknown i2c slave event\n"); break; } if (!ssif_bmc->aborting && ssif_bmc->busy) ret = -EBUSY; spin_unlock_irqrestore(&ssif_bmc->lock, flags); return ret; } static int ssif_bmc_probe(struct i2c_client *client) { struct ssif_bmc_ctx *ssif_bmc; int ret; ssif_bmc = devm_kzalloc(&client->dev, sizeof(*ssif_bmc), GFP_KERNEL); if (!ssif_bmc) return -ENOMEM; spin_lock_init(&ssif_bmc->lock); init_waitqueue_head(&ssif_bmc->wait_queue); ssif_bmc->request_available = false; ssif_bmc->response_in_progress = false; ssif_bmc->busy = false; ssif_bmc->response_timer_inited = false; /* Register misc device interface */ ssif_bmc->miscdev.minor = MISC_DYNAMIC_MINOR; ssif_bmc->miscdev.name = DEVICE_NAME; ssif_bmc->miscdev.fops = &ssif_bmc_fops; ssif_bmc->miscdev.parent = &client->dev; ret = misc_register(&ssif_bmc->miscdev); if (ret) return ret; ssif_bmc->client = client; ssif_bmc->client->flags |= I2C_CLIENT_SLAVE; /* Register I2C slave */ i2c_set_clientdata(client, ssif_bmc); ret = i2c_slave_register(client, ssif_bmc_cb); if (ret) misc_deregister(&ssif_bmc->miscdev); return ret; } static void ssif_bmc_remove(struct i2c_client *client) { struct ssif_bmc_ctx *ssif_bmc = i2c_get_clientdata(client); i2c_slave_unregister(client); misc_deregister(&ssif_bmc->miscdev); } static const struct of_device_id ssif_bmc_match[] = { { .compatible = "ssif-bmc" }, { }, }; MODULE_DEVICE_TABLE(of, ssif_bmc_match); static const struct i2c_device_id ssif_bmc_id[] = { { DEVICE_NAME, 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, ssif_bmc_id); static struct i2c_driver ssif_bmc_driver = { .driver = { .name = DEVICE_NAME, .of_match_table = ssif_bmc_match, }, .probe = ssif_bmc_probe, .remove = ssif_bmc_remove, .id_table = ssif_bmc_id, }; module_i2c_driver(ssif_bmc_driver); MODULE_AUTHOR("Quan Nguyen <[email protected]>"); MODULE_AUTHOR("Chuong Tran <[email protected]>"); MODULE_DESCRIPTION("Linux device driver of the BMC IPMI SSIF interface."); MODULE_LICENSE("GPL");
linux-master
drivers/char/ipmi/ssif_bmc.c
// SPDX-License-Identifier: GPL-2.0+ /* * ipmi_si_platform.c * * Handling for platform devices in IPMI (ACPI, OF, and things * coming from the platform. */ #define pr_fmt(fmt) "ipmi_platform: " fmt #define dev_fmt pr_fmt #include <linux/types.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/acpi.h> #include "ipmi_si.h" #include "ipmi_dmi.h" static bool platform_registered; static bool si_tryplatform = true; #ifdef CONFIG_ACPI static bool si_tryacpi = true; #endif #ifdef CONFIG_OF static bool si_tryopenfirmware = true; #endif #ifdef CONFIG_DMI static bool si_trydmi = true; #else static bool si_trydmi = false; #endif module_param_named(tryplatform, si_tryplatform, bool, 0); MODULE_PARM_DESC(tryplatform, "Setting this to zero will disable the default scan of the interfaces identified via platform interfaces besides ACPI, OpenFirmware, and DMI"); #ifdef CONFIG_ACPI module_param_named(tryacpi, si_tryacpi, bool, 0); MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the default scan of the interfaces identified via ACPI"); #endif #ifdef CONFIG_OF module_param_named(tryopenfirmware, si_tryopenfirmware, bool, 0); MODULE_PARM_DESC(tryopenfirmware, "Setting this to zero will disable the default scan of the interfaces identified via OpenFirmware"); #endif #ifdef CONFIG_DMI module_param_named(trydmi, si_trydmi, bool, 0); MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the default scan of the interfaces identified via DMI"); #endif #ifdef CONFIG_ACPI /* For GPE-type interrupts. */ static u32 ipmi_acpi_gpe(acpi_handle gpe_device, u32 gpe_number, void *context) { struct si_sm_io *io = context; ipmi_si_irq_handler(io->irq, io->irq_handler_data); return ACPI_INTERRUPT_HANDLED; } static void acpi_gpe_irq_cleanup(struct si_sm_io *io) { if (!io->irq) return; ipmi_irq_start_cleanup(io); acpi_remove_gpe_handler(NULL, io->irq, &ipmi_acpi_gpe); } static int acpi_gpe_irq_setup(struct si_sm_io *io) { acpi_status status; if (!io->irq) return 0; status = acpi_install_gpe_handler(NULL, io->irq, ACPI_GPE_LEVEL_TRIGGERED, &ipmi_acpi_gpe, io); if (ACPI_FAILURE(status)) { dev_warn(io->dev, "Unable to claim ACPI GPE %d, running polled\n", io->irq); io->irq = 0; return -EINVAL; } io->irq_cleanup = acpi_gpe_irq_cleanup; ipmi_irq_finish_setup(io); dev_info(io->dev, "Using ACPI GPE %d\n", io->irq); return 0; } #endif static void ipmi_set_addr_data_and_space(struct resource *r, struct si_sm_io *io) { if (resource_type(r) == IORESOURCE_IO) io->addr_space = IPMI_IO_ADDR_SPACE; else io->addr_space = IPMI_MEM_ADDR_SPACE; io->addr_data = r->start; } static struct resource * ipmi_get_info_from_resources(struct platform_device *pdev, struct si_sm_io *io) { struct resource *res, *res_second; res = platform_get_mem_or_io(pdev, 0); if (!res) { dev_err(&pdev->dev, "no I/O or memory address\n"); return NULL; } ipmi_set_addr_data_and_space(res, io); io->regspacing = DEFAULT_REGSPACING; res_second = platform_get_mem_or_io(pdev, 1); if (res_second && resource_type(res_second) == resource_type(res)) { if (res_second->start > io->addr_data) io->regspacing = res_second->start - io->addr_data; } return res; } static int platform_ipmi_probe(struct platform_device *pdev) { struct si_sm_io io; u8 type, slave_addr, addr_source, regsize, regshift; int rv; rv = device_property_read_u8(&pdev->dev, "addr-source", &addr_source); if (rv) addr_source = SI_PLATFORM; if (addr_source >= SI_LAST) return -EINVAL; if (addr_source == SI_SMBIOS) { if (!si_trydmi) return -ENODEV; } else if (addr_source != SI_HARDCODED) { if (!si_tryplatform) return -ENODEV; } rv = device_property_read_u8(&pdev->dev, "ipmi-type", &type); if (rv) return -ENODEV; memset(&io, 0, sizeof(io)); io.addr_source = addr_source; dev_info(&pdev->dev, "probing via %s\n", ipmi_addr_src_to_str(addr_source)); switch (type) { case SI_KCS: case SI_SMIC: case SI_BT: io.si_type = type; break; case SI_TYPE_INVALID: /* User disabled this in hardcode. */ return -ENODEV; default: dev_err(&pdev->dev, "ipmi-type property is invalid\n"); return -EINVAL; } io.regsize = DEFAULT_REGSIZE; rv = device_property_read_u8(&pdev->dev, "reg-size", &regsize); if (!rv) io.regsize = regsize; io.regshift = 0; rv = device_property_read_u8(&pdev->dev, "reg-shift", &regshift); if (!rv) io.regshift = regshift; if (!ipmi_get_info_from_resources(pdev, &io)) return -EINVAL; rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr); if (rv) io.slave_addr = 0x20; else io.slave_addr = slave_addr; io.irq = platform_get_irq_optional(pdev, 0); if (io.irq > 0) io.irq_setup = ipmi_std_irq_setup; else io.irq = 0; io.dev = &pdev->dev; pr_info("ipmi_si: %s: %s %#lx regsize %d spacing %d irq %d\n", ipmi_addr_src_to_str(addr_source), (io.addr_space == IPMI_IO_ADDR_SPACE) ? "io" : "mem", io.addr_data, io.regsize, io.regspacing, io.irq); ipmi_si_add_smi(&io); return 0; } #ifdef CONFIG_OF static const struct of_device_id of_ipmi_match[] = { { .type = "ipmi", .compatible = "ipmi-kcs", .data = (void *)(unsigned long) SI_KCS }, { .type = "ipmi", .compatible = "ipmi-smic", .data = (void *)(unsigned long) SI_SMIC }, { .type = "ipmi", .compatible = "ipmi-bt", .data = (void *)(unsigned long) SI_BT }, {}, }; MODULE_DEVICE_TABLE(of, of_ipmi_match); static int of_ipmi_probe(struct platform_device *pdev) { const struct of_device_id *match; struct si_sm_io io; struct resource resource; const __be32 *regsize, *regspacing, *regshift; struct device_node *np = pdev->dev.of_node; int ret; int proplen; if (!si_tryopenfirmware) return -ENODEV; dev_info(&pdev->dev, "probing via device tree\n"); match = of_match_device(of_ipmi_match, &pdev->dev); if (!match) return -ENODEV; if (!of_device_is_available(np)) return -EINVAL; ret = of_address_to_resource(np, 0, &resource); if (ret) { dev_warn(&pdev->dev, "invalid address from OF\n"); return ret; } regsize = of_get_property(np, "reg-size", &proplen); if (regsize && proplen != 4) { dev_warn(&pdev->dev, "invalid regsize from OF\n"); return -EINVAL; } regspacing = of_get_property(np, "reg-spacing", &proplen); if (regspacing && proplen != 4) { dev_warn(&pdev->dev, "invalid regspacing from OF\n"); return -EINVAL; } regshift = of_get_property(np, "reg-shift", &proplen); if (regshift && proplen != 4) { dev_warn(&pdev->dev, "invalid regshift from OF\n"); return -EINVAL; } memset(&io, 0, sizeof(io)); io.si_type = (unsigned long) match->data; io.addr_source = SI_DEVICETREE; io.irq_setup = ipmi_std_irq_setup; ipmi_set_addr_data_and_space(&resource, &io); io.regsize = regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE; io.regspacing = regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING; io.regshift = regshift ? be32_to_cpup(regshift) : 0; io.irq = irq_of_parse_and_map(pdev->dev.of_node, 0); io.dev = &pdev->dev; dev_dbg(&pdev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n", io.addr_data, io.regsize, io.regspacing, io.irq); return ipmi_si_add_smi(&io); } #else #define of_ipmi_match NULL static int of_ipmi_probe(struct platform_device *dev) { return -ENODEV; } #endif #ifdef CONFIG_ACPI static int find_slave_address(struct si_sm_io *io, int slave_addr) { #ifdef CONFIG_IPMI_DMI_DECODE if (!slave_addr) slave_addr = ipmi_dmi_get_slave_addr(io->si_type, io->addr_space, io->addr_data); #endif return slave_addr; } static int acpi_ipmi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct si_sm_io io; acpi_handle handle; acpi_status status; unsigned long long tmp; struct resource *res; if (!si_tryacpi) return -ENODEV; handle = ACPI_HANDLE(dev); if (!handle) return -ENODEV; memset(&io, 0, sizeof(io)); io.addr_source = SI_ACPI; dev_info(dev, "probing via ACPI\n"); io.addr_info.acpi_info.acpi_handle = handle; /* _IFT tells us the interface type: KCS, BT, etc */ status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp); if (ACPI_FAILURE(status)) { dev_err(dev, "Could not find ACPI IPMI interface type\n"); return -EINVAL; } switch (tmp) { case 1: io.si_type = SI_KCS; break; case 2: io.si_type = SI_SMIC; break; case 3: io.si_type = SI_BT; break; case 4: /* SSIF, just ignore */ return -ENODEV; default: dev_info(dev, "unknown IPMI type %lld\n", tmp); return -EINVAL; } io.dev = dev; io.regsize = DEFAULT_REGSIZE; io.regshift = 0; res = ipmi_get_info_from_resources(pdev, &io); if (!res) return -EINVAL; /* If _GPE exists, use it; otherwise use standard interrupts */ status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); if (ACPI_SUCCESS(status)) { io.irq = tmp; io.irq_setup = acpi_gpe_irq_setup; } else { int irq = platform_get_irq_optional(pdev, 0); if (irq > 0) { io.irq = irq; io.irq_setup = ipmi_std_irq_setup; } } io.slave_addr = find_slave_address(&io, io.slave_addr); dev_info(dev, "%pR regsize %d spacing %d irq %d\n", res, io.regsize, io.regspacing, io.irq); request_module_nowait("acpi_ipmi"); return ipmi_si_add_smi(&io); } static const struct acpi_device_id acpi_ipmi_match[] = { { "IPI0001", 0 }, { }, }; MODULE_DEVICE_TABLE(acpi, acpi_ipmi_match); #else static int acpi_ipmi_probe(struct platform_device *dev) { return -ENODEV; } #endif static int ipmi_probe(struct platform_device *pdev) { if (pdev->dev.of_node && of_ipmi_probe(pdev) == 0) return 0; if (acpi_ipmi_probe(pdev) == 0) return 0; return platform_ipmi_probe(pdev); } static int ipmi_remove(struct platform_device *pdev) { ipmi_si_remove_by_dev(&pdev->dev); return 0; } static int pdev_match_name(struct device *dev, const void *data) { struct platform_device *pdev = to_platform_device(dev); const char *name = data; return strcmp(pdev->name, name) == 0; } void ipmi_remove_platform_device_by_name(char *name) { struct device *dev; while ((dev = bus_find_device(&platform_bus_type, NULL, name, pdev_match_name))) { struct platform_device *pdev = to_platform_device(dev); platform_device_unregister(pdev); put_device(dev); } } static const struct platform_device_id si_plat_ids[] = { { "dmi-ipmi-si", 0 }, { "hardcode-ipmi-si", 0 }, { "hotmod-ipmi-si", 0 }, { } }; struct platform_driver ipmi_platform_driver = { .driver = { .name = SI_DEVICE_NAME, .of_match_table = of_ipmi_match, .acpi_match_table = ACPI_PTR(acpi_ipmi_match), }, .probe = ipmi_probe, .remove = ipmi_remove, .id_table = si_plat_ids }; void ipmi_si_platform_init(void) { int rv = platform_driver_register(&ipmi_platform_driver); if (rv) pr_err("Unable to register driver: %d\n", rv); else platform_registered = true; } void ipmi_si_platform_shutdown(void) { if (platform_registered) platform_driver_unregister(&ipmi_platform_driver); }
linux-master
drivers/char/ipmi/ipmi_si_platform.c
// SPDX-License-Identifier: GPL-2.0+ #include <linux/module.h> #include <asm/hardware.h> /* for register_parisc_driver() stuff */ #include <asm/parisc-device.h> #include "ipmi_si.h" static bool parisc_registered; static int __init ipmi_parisc_probe(struct parisc_device *dev) { struct si_sm_io io; memset(&io, 0, sizeof(io)); io.si_type = SI_KCS; io.addr_source = SI_DEVICETREE; io.addr_space = IPMI_MEM_ADDR_SPACE; io.addr_data = dev->hpa.start; io.regsize = 1; io.regspacing = 1; io.regshift = 0; io.irq = 0; /* no interrupt */ io.irq_setup = NULL; io.dev = &dev->dev; dev_dbg(&dev->dev, "addr 0x%lx\n", io.addr_data); return ipmi_si_add_smi(&io); } static void __exit ipmi_parisc_remove(struct parisc_device *dev) { ipmi_si_remove_by_dev(&dev->dev); } static const struct parisc_device_id ipmi_parisc_tbl[] __initconst = { { HPHW_MC, HVERSION_REV_ANY_ID, 0x004, 0xC0 }, { 0, } }; MODULE_DEVICE_TABLE(parisc, ipmi_parisc_tbl); static struct parisc_driver ipmi_parisc_driver __refdata = { .name = "ipmi", .id_table = ipmi_parisc_tbl, .probe = ipmi_parisc_probe, .remove = __exit_p(ipmi_parisc_remove), }; void ipmi_si_parisc_init(void) { register_parisc_driver(&ipmi_parisc_driver); parisc_registered = true; } void ipmi_si_parisc_shutdown(void) { if (parisc_registered) unregister_parisc_driver(&ipmi_parisc_driver); }
linux-master
drivers/char/ipmi/ipmi_si_parisc.c
// SPDX-License-Identifier: GPL-2.0 /* * IPMB driver to receive a request and send a response * * Copyright (C) 2019 Mellanox Techologies, Ltd. * * This was inspired by Brendan Higgins' ipmi-bmc-bt-i2c driver. */ #include <linux/acpi.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/wait.h> #define MAX_MSG_LEN 240 #define IPMB_REQUEST_LEN_MIN 7 #define NETFN_RSP_BIT_MASK 0x4 #define REQUEST_QUEUE_MAX_LEN 256 #define IPMB_MSG_LEN_IDX 0 #define RQ_SA_8BIT_IDX 1 #define NETFN_LUN_IDX 2 #define GET_7BIT_ADDR(addr_8bit) (addr_8bit >> 1) #define GET_8BIT_ADDR(addr_7bit) ((addr_7bit << 1) & 0xff) #define IPMB_MSG_PAYLOAD_LEN_MAX (MAX_MSG_LEN - IPMB_REQUEST_LEN_MIN - 1) #define SMBUS_MSG_HEADER_LENGTH 2 #define SMBUS_MSG_IDX_OFFSET (SMBUS_MSG_HEADER_LENGTH + 1) struct ipmb_msg { u8 len; u8 rs_sa; u8 netfn_rs_lun; u8 checksum1; u8 rq_sa; u8 rq_seq_rq_lun; u8 cmd; u8 payload[IPMB_MSG_PAYLOAD_LEN_MAX]; /* checksum2 is included in payload */ } __packed; struct ipmb_request_elem { struct list_head list; struct ipmb_msg request; }; struct ipmb_dev { struct i2c_client *client; struct miscdevice miscdev; struct ipmb_msg request; struct list_head request_queue; atomic_t request_queue_len; size_t msg_idx; spinlock_t lock; wait_queue_head_t wait_queue; struct mutex file_mutex; bool is_i2c_protocol; }; static inline struct ipmb_dev *to_ipmb_dev(struct file *file) { return container_of(file->private_data, struct ipmb_dev, miscdev); } static ssize_t ipmb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct ipmb_dev *ipmb_dev = to_ipmb_dev(file); struct ipmb_request_elem *queue_elem; struct ipmb_msg msg; ssize_t ret = 0; memset(&msg, 0, sizeof(msg)); spin_lock_irq(&ipmb_dev->lock); while (list_empty(&ipmb_dev->request_queue)) { spin_unlock_irq(&ipmb_dev->lock); if (file->f_flags & O_NONBLOCK) return -EAGAIN; ret = wait_event_interruptible(ipmb_dev->wait_queue, !list_empty(&ipmb_dev->request_queue)); if (ret) return ret; spin_lock_irq(&ipmb_dev->lock); } queue_elem = list_first_entry(&ipmb_dev->request_queue, struct ipmb_request_elem, list); memcpy(&msg, &queue_elem->request, sizeof(msg)); list_del(&queue_elem->list); kfree(queue_elem); atomic_dec(&ipmb_dev->request_queue_len); spin_unlock_irq(&ipmb_dev->lock); count = min_t(size_t, count, msg.len + 1); if (copy_to_user(buf, &msg, count)) ret = -EFAULT; return ret < 0 ? ret : count; } static int ipmb_i2c_write(struct i2c_client *client, u8 *msg, u8 addr) { struct i2c_msg i2c_msg; /* * subtract 1 byte (rq_sa) from the length of the msg passed to * raw i2c_transfer */ i2c_msg.len = msg[IPMB_MSG_LEN_IDX] - 1; /* Assign message to buffer except first 2 bytes (length and address) */ i2c_msg.buf = msg + 2; i2c_msg.addr = addr; i2c_msg.flags = client->flags & I2C_CLIENT_PEC; return i2c_transfer(client->adapter, &i2c_msg, 1); } static ssize_t ipmb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct ipmb_dev *ipmb_dev = to_ipmb_dev(file); u8 rq_sa, netf_rq_lun, msg_len; struct i2c_client *temp_client; u8 msg[MAX_MSG_LEN]; ssize_t ret; if (count > sizeof(msg)) return -EINVAL; if (copy_from_user(&msg, buf, count)) return -EFAULT; if (count < msg[0]) return -EINVAL; rq_sa = GET_7BIT_ADDR(msg[RQ_SA_8BIT_IDX]); netf_rq_lun = msg[NETFN_LUN_IDX]; /* Check i2c block transfer vs smbus */ if (ipmb_dev->is_i2c_protocol) { ret = ipmb_i2c_write(ipmb_dev->client, msg, rq_sa); return (ret == 1) ? count : ret; } /* * subtract rq_sa and netf_rq_lun from the length of the msg. Fill the * temporary client. Note that its use is an exception for IPMI. */ msg_len = msg[IPMB_MSG_LEN_IDX] - SMBUS_MSG_HEADER_LENGTH; temp_client = kmemdup(ipmb_dev->client, sizeof(*temp_client), GFP_KERNEL); if (!temp_client) return -ENOMEM; temp_client->addr = rq_sa; ret = i2c_smbus_write_block_data(temp_client, netf_rq_lun, msg_len, msg + SMBUS_MSG_IDX_OFFSET); kfree(temp_client); return ret < 0 ? ret : count; } static __poll_t ipmb_poll(struct file *file, poll_table *wait) { struct ipmb_dev *ipmb_dev = to_ipmb_dev(file); __poll_t mask = EPOLLOUT; mutex_lock(&ipmb_dev->file_mutex); poll_wait(file, &ipmb_dev->wait_queue, wait); if (atomic_read(&ipmb_dev->request_queue_len)) mask |= EPOLLIN; mutex_unlock(&ipmb_dev->file_mutex); return mask; } static const struct file_operations ipmb_fops = { .owner = THIS_MODULE, .read = ipmb_read, .write = ipmb_write, .poll = ipmb_poll, }; /* Called with ipmb_dev->lock held. */ static void ipmb_handle_request(struct ipmb_dev *ipmb_dev) { struct ipmb_request_elem *queue_elem; if (atomic_read(&ipmb_dev->request_queue_len) >= REQUEST_QUEUE_MAX_LEN) return; queue_elem = kmalloc(sizeof(*queue_elem), GFP_ATOMIC); if (!queue_elem) return; memcpy(&queue_elem->request, &ipmb_dev->request, sizeof(struct ipmb_msg)); list_add(&queue_elem->list, &ipmb_dev->request_queue); atomic_inc(&ipmb_dev->request_queue_len); wake_up_all(&ipmb_dev->wait_queue); } static u8 ipmb_verify_checksum1(struct ipmb_dev *ipmb_dev, u8 rs_sa) { /* The 8 lsb of the sum is 0 when the checksum is valid */ return (rs_sa + ipmb_dev->request.netfn_rs_lun + ipmb_dev->request.checksum1); } /* * Verify if message has proper ipmb header with minimum length * and correct checksum byte. */ static bool is_ipmb_msg(struct ipmb_dev *ipmb_dev, u8 rs_sa) { if ((ipmb_dev->msg_idx >= IPMB_REQUEST_LEN_MIN) && (!ipmb_verify_checksum1(ipmb_dev, rs_sa))) return true; return false; } /* * The IPMB protocol only supports I2C Writes so there is no need * to support I2C_SLAVE_READ* events. * This i2c callback function only monitors IPMB request messages * and adds them in a queue, so that they can be handled by * receive_ipmb_request. */ static int ipmb_slave_cb(struct i2c_client *client, enum i2c_slave_event event, u8 *val) { struct ipmb_dev *ipmb_dev = i2c_get_clientdata(client); u8 *buf = (u8 *)&ipmb_dev->request; unsigned long flags; spin_lock_irqsave(&ipmb_dev->lock, flags); switch (event) { case I2C_SLAVE_WRITE_REQUESTED: memset(&ipmb_dev->request, 0, sizeof(ipmb_dev->request)); ipmb_dev->msg_idx = 0; /* * At index 0, ipmb_msg stores the length of msg, * skip it for now. * The len will be populated once the whole * buf is populated. * * The I2C bus driver's responsibility is to pass the * data bytes to the backend driver; it does not * forward the i2c slave address. * Since the first byte in the IPMB message is the * address of the responder, it is the responsibility * of the IPMB driver to format the message properly. * So this driver prepends the address of the responder * to the received i2c data before the request message * is handled in userland. */ buf[++ipmb_dev->msg_idx] = GET_8BIT_ADDR(client->addr); break; case I2C_SLAVE_WRITE_RECEIVED: if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg) - 1) break; buf[++ipmb_dev->msg_idx] = *val; break; case I2C_SLAVE_STOP: ipmb_dev->request.len = ipmb_dev->msg_idx; if (is_ipmb_msg(ipmb_dev, GET_8BIT_ADDR(client->addr))) ipmb_handle_request(ipmb_dev); break; default: break; } spin_unlock_irqrestore(&ipmb_dev->lock, flags); return 0; } static int ipmb_probe(struct i2c_client *client) { struct ipmb_dev *ipmb_dev; int ret; ipmb_dev = devm_kzalloc(&client->dev, sizeof(*ipmb_dev), GFP_KERNEL); if (!ipmb_dev) return -ENOMEM; spin_lock_init(&ipmb_dev->lock); init_waitqueue_head(&ipmb_dev->wait_queue); atomic_set(&ipmb_dev->request_queue_len, 0); INIT_LIST_HEAD(&ipmb_dev->request_queue); mutex_init(&ipmb_dev->file_mutex); ipmb_dev->miscdev.minor = MISC_DYNAMIC_MINOR; ipmb_dev->miscdev.name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s%d", "ipmb-", client->adapter->nr); ipmb_dev->miscdev.fops = &ipmb_fops; ipmb_dev->miscdev.parent = &client->dev; ret = misc_register(&ipmb_dev->miscdev); if (ret) return ret; ipmb_dev->is_i2c_protocol = device_property_read_bool(&client->dev, "i2c-protocol"); ipmb_dev->client = client; i2c_set_clientdata(client, ipmb_dev); ret = i2c_slave_register(client, ipmb_slave_cb); if (ret) { misc_deregister(&ipmb_dev->miscdev); return ret; } return 0; } static void ipmb_remove(struct i2c_client *client) { struct ipmb_dev *ipmb_dev = i2c_get_clientdata(client); i2c_slave_unregister(client); misc_deregister(&ipmb_dev->miscdev); } static const struct i2c_device_id ipmb_id[] = { { "ipmb-dev", 0 }, {}, }; MODULE_DEVICE_TABLE(i2c, ipmb_id); static const struct acpi_device_id acpi_ipmb_id[] = { { "IPMB0001", 0 }, {}, }; MODULE_DEVICE_TABLE(acpi, acpi_ipmb_id); static struct i2c_driver ipmb_driver = { .driver = { .name = "ipmb-dev", .acpi_match_table = ACPI_PTR(acpi_ipmb_id), }, .probe = ipmb_probe, .remove = ipmb_remove, .id_table = ipmb_id, }; module_i2c_driver(ipmb_driver); MODULE_AUTHOR("Mellanox Technologies"); MODULE_DESCRIPTION("IPMB driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/char/ipmi/ipmb_dev_int.c
// SPDX-License-Identifier: GPL-2.0+ /* * ipmi_si.c * * The interface to the IPMI driver for the system interfaces (KCS, SMIC, * BT). * * Author: MontaVista Software, Inc. * Corey Minyard <[email protected]> * [email protected] * * Copyright 2002 MontaVista Software Inc. * Copyright 2006 IBM Corp., Christian Krafft <[email protected]> */ /* * This file holds the "policy" for the interface to the SMI state * machine. It does the configuration, handles timers and interrupts, * and drives the real SMI state machine. */ #define pr_fmt(fmt) "ipmi_si: " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/timer.h> #include <linux/errno.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/list.h> #include <linux/notifier.h> #include <linux/mutex.h> #include <linux/kthread.h> #include <asm/irq.h> #include <linux/interrupt.h> #include <linux/rcupdate.h> #include <linux/ipmi.h> #include <linux/ipmi_smi.h> #include "ipmi_si.h" #include "ipmi_si_sm.h" #include <linux/string.h> #include <linux/ctype.h> /* Measure times between events in the driver. */ #undef DEBUG_TIMING /* Call every 10 ms. */ #define SI_TIMEOUT_TIME_USEC 10000 #define SI_USEC_PER_JIFFY (1000000/HZ) #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY) #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a short timeout */ enum si_intf_state { SI_NORMAL, SI_GETTING_FLAGS, SI_GETTING_EVENTS, SI_CLEARING_FLAGS, SI_GETTING_MESSAGES, SI_CHECKING_ENABLES, SI_SETTING_ENABLES /* FIXME - add watchdog stuff. */ }; /* Some BT-specific defines we need here. */ #define IPMI_BT_INTMASK_REG 2 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1 /* 'invalid' to allow a firmware-specified interface to be disabled */ const char *const si_to_str[] = { "invalid", "kcs", "smic", "bt", NULL }; static bool initialized; /* * Indexes into stats[] in smi_info below. */ enum si_stat_indexes { /* * Number of times the driver requested a timer while an operation * was in progress. */ SI_STAT_short_timeouts = 0, /* * Number of times the driver requested a timer while nothing was in * progress. */ SI_STAT_long_timeouts, /* Number of times the interface was idle while being polled. */ SI_STAT_idles, /* Number of interrupts the driver handled. */ SI_STAT_interrupts, /* Number of time the driver got an ATTN from the hardware. */ SI_STAT_attentions, /* Number of times the driver requested flags from the hardware. */ SI_STAT_flag_fetches, /* Number of times the hardware didn't follow the state machine. */ SI_STAT_hosed_count, /* Number of completed messages. */ SI_STAT_complete_transactions, /* Number of IPMI events received from the hardware. */ SI_STAT_events, /* Number of watchdog pretimeouts. */ SI_STAT_watchdog_pretimeouts, /* Number of asynchronous messages received. */ SI_STAT_incoming_messages, /* This *must* remain last, add new values above this. */ SI_NUM_STATS }; struct smi_info { int si_num; struct ipmi_smi *intf; struct si_sm_data *si_sm; const struct si_sm_handlers *handlers; spinlock_t si_lock; struct ipmi_smi_msg *waiting_msg; struct ipmi_smi_msg *curr_msg; enum si_intf_state si_state; /* * Used to handle the various types of I/O that can occur with * IPMI */ struct si_sm_io io; /* * Per-OEM handler, called from handle_flags(). Returns 1 * when handle_flags() needs to be re-run or 0 indicating it * set si_state itself. */ int (*oem_data_avail_handler)(struct smi_info *smi_info); /* * Flags from the last GET_MSG_FLAGS command, used when an ATTN * is set to hold the flags until we are done handling everything * from the flags. */ #define RECEIVE_MSG_AVAIL 0x01 #define EVENT_MSG_BUFFER_FULL 0x02 #define WDT_PRE_TIMEOUT_INT 0x08 #define OEM0_DATA_AVAIL 0x20 #define OEM1_DATA_AVAIL 0x40 #define OEM2_DATA_AVAIL 0x80 #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \ OEM1_DATA_AVAIL | \ OEM2_DATA_AVAIL) unsigned char msg_flags; /* Does the BMC have an event buffer? */ bool has_event_buffer; /* * If set to true, this will request events the next time the * state machine is idle. */ atomic_t req_events; /* * If true, run the state machine to completion on every send * call. Generally used after a panic to make sure stuff goes * out. */ bool run_to_completion; /* The timer for this si. */ struct timer_list si_timer; /* This flag is set, if the timer can be set */ bool timer_can_start; /* This flag is set, if the timer is running (timer_pending() isn't enough) */ bool timer_running; /* The time (in jiffies) the last timeout occurred at. */ unsigned long last_timeout_jiffies; /* Are we waiting for the events, pretimeouts, received msgs? */ atomic_t need_watch; /* * The driver will disable interrupts when it gets into a * situation where it cannot handle messages due to lack of * memory. Once that situation clears up, it will re-enable * interrupts. */ bool interrupt_disabled; /* * Does the BMC support events? */ bool supports_event_msg_buff; /* * Can we disable interrupts the global enables receive irq * bit? There are currently two forms of brokenness, some * systems cannot disable the bit (which is technically within * the spec but a bad idea) and some systems have the bit * forced to zero even though interrupts work (which is * clearly outside the spec). The next bool tells which form * of brokenness is present. */ bool cannot_disable_irq; /* * Some systems are broken and cannot set the irq enable * bit, even if they support interrupts. */ bool irq_enable_broken; /* Is the driver in maintenance mode? */ bool in_maintenance_mode; /* * Did we get an attention that we did not handle? */ bool got_attn; /* From the get device id response... */ struct ipmi_device_id device_id; /* Have we added the device group to the device? */ bool dev_group_added; /* Counters and things for the proc filesystem. */ atomic_t stats[SI_NUM_STATS]; struct task_struct *thread; struct list_head link; }; #define smi_inc_stat(smi, stat) \ atomic_inc(&(smi)->stats[SI_STAT_ ## stat]) #define smi_get_stat(smi, stat) \ ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat])) #define IPMI_MAX_INTFS 4 static int force_kipmid[IPMI_MAX_INTFS]; static int num_force_kipmid; static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS]; static int num_max_busy_us; static bool unload_when_empty = true; static int try_smi_init(struct smi_info *smi); static void cleanup_one_si(struct smi_info *smi_info); static void cleanup_ipmi_si(void); #ifdef DEBUG_TIMING void debug_timestamp(struct smi_info *smi_info, char *msg) { struct timespec64 t; ktime_get_ts64(&t); dev_dbg(smi_info->io.dev, "**%s: %lld.%9.9ld\n", msg, t.tv_sec, t.tv_nsec); } #else #define debug_timestamp(smi_info, x) #endif static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); static int register_xaction_notifier(struct notifier_block *nb) { return atomic_notifier_chain_register(&xaction_notifier_list, nb); } static void deliver_recv_msg(struct smi_info *smi_info, struct ipmi_smi_msg *msg) { /* Deliver the message to the upper layer. */ ipmi_smi_msg_received(smi_info->intf, msg); } static void return_hosed_msg(struct smi_info *smi_info, int cCode) { struct ipmi_smi_msg *msg = smi_info->curr_msg; if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED) cCode = IPMI_ERR_UNSPECIFIED; /* else use it as is */ /* Make it a response */ msg->rsp[0] = msg->data[0] | 4; msg->rsp[1] = msg->data[1]; msg->rsp[2] = cCode; msg->rsp_size = 3; smi_info->curr_msg = NULL; deliver_recv_msg(smi_info, msg); } static enum si_sm_result start_next_msg(struct smi_info *smi_info) { int rv; if (!smi_info->waiting_msg) { smi_info->curr_msg = NULL; rv = SI_SM_IDLE; } else { int err; smi_info->curr_msg = smi_info->waiting_msg; smi_info->waiting_msg = NULL; debug_timestamp(smi_info, "Start2"); err = atomic_notifier_call_chain(&xaction_notifier_list, 0, smi_info); if (err & NOTIFY_STOP_MASK) { rv = SI_SM_CALL_WITHOUT_DELAY; goto out; } err = smi_info->handlers->start_transaction( smi_info->si_sm, smi_info->curr_msg->data, smi_info->curr_msg->data_size); if (err) return_hosed_msg(smi_info, err); rv = SI_SM_CALL_WITHOUT_DELAY; } out: return rv; } static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) { if (!smi_info->timer_can_start) return; smi_info->last_timeout_jiffies = jiffies; mod_timer(&smi_info->si_timer, new_val); smi_info->timer_running = true; } /* * Start a new message and (re)start the timer and thread. */ static void start_new_msg(struct smi_info *smi_info, unsigned char *msg, unsigned int size) { smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); if (smi_info->thread) wake_up_process(smi_info->thread); smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); } static void start_check_enables(struct smi_info *smi_info) { unsigned char msg[2]; msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; start_new_msg(smi_info, msg, 2); smi_info->si_state = SI_CHECKING_ENABLES; } static void start_clear_flags(struct smi_info *smi_info) { unsigned char msg[3]; /* Make sure the watchdog pre-timeout flag is not set at startup. */ msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; msg[2] = WDT_PRE_TIMEOUT_INT; start_new_msg(smi_info, msg, 3); smi_info->si_state = SI_CLEARING_FLAGS; } static void start_getting_msg_queue(struct smi_info *smi_info) { smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; smi_info->curr_msg->data_size = 2; start_new_msg(smi_info, smi_info->curr_msg->data, smi_info->curr_msg->data_size); smi_info->si_state = SI_GETTING_MESSAGES; } static void start_getting_events(struct smi_info *smi_info) { smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; smi_info->curr_msg->data_size = 2; start_new_msg(smi_info, smi_info->curr_msg->data, smi_info->curr_msg->data_size); smi_info->si_state = SI_GETTING_EVENTS; } /* * When we have a situtaion where we run out of memory and cannot * allocate messages, we just leave them in the BMC and run the system * polled until we can allocate some memory. Once we have some * memory, we will re-enable the interrupt. * * Note that we cannot just use disable_irq(), since the interrupt may * be shared. */ static inline bool disable_si_irq(struct smi_info *smi_info) { if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { smi_info->interrupt_disabled = true; start_check_enables(smi_info); return true; } return false; } static inline bool enable_si_irq(struct smi_info *smi_info) { if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) { smi_info->interrupt_disabled = false; start_check_enables(smi_info); return true; } return false; } /* * Allocate a message. If unable to allocate, start the interrupt * disable process and return NULL. If able to allocate but * interrupts are disabled, free the message and return NULL after * starting the interrupt enable process. */ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info) { struct ipmi_smi_msg *msg; msg = ipmi_alloc_smi_msg(); if (!msg) { if (!disable_si_irq(smi_info)) smi_info->si_state = SI_NORMAL; } else if (enable_si_irq(smi_info)) { ipmi_free_smi_msg(msg); msg = NULL; } return msg; } static void handle_flags(struct smi_info *smi_info) { retry: if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { /* Watchdog pre-timeout */ smi_inc_stat(smi_info, watchdog_pretimeouts); start_clear_flags(smi_info); smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; ipmi_smi_watchdog_pretimeout(smi_info->intf); } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { /* Messages available. */ smi_info->curr_msg = alloc_msg_handle_irq(smi_info); if (!smi_info->curr_msg) return; start_getting_msg_queue(smi_info); } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { /* Events available. */ smi_info->curr_msg = alloc_msg_handle_irq(smi_info); if (!smi_info->curr_msg) return; start_getting_events(smi_info); } else if (smi_info->msg_flags & OEM_DATA_AVAIL && smi_info->oem_data_avail_handler) { if (smi_info->oem_data_avail_handler(smi_info)) goto retry; } else smi_info->si_state = SI_NORMAL; } /* * Global enables we care about. */ #define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \ IPMI_BMC_EVT_MSG_INTR) static u8 current_global_enables(struct smi_info *smi_info, u8 base, bool *irq_on) { u8 enables = 0; if (smi_info->supports_event_msg_buff) enables |= IPMI_BMC_EVT_MSG_BUFF; if (((smi_info->io.irq && !smi_info->interrupt_disabled) || smi_info->cannot_disable_irq) && !smi_info->irq_enable_broken) enables |= IPMI_BMC_RCV_MSG_INTR; if (smi_info->supports_event_msg_buff && smi_info->io.irq && !smi_info->interrupt_disabled && !smi_info->irq_enable_broken) enables |= IPMI_BMC_EVT_MSG_INTR; *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR); return enables; } static void check_bt_irq(struct smi_info *smi_info, bool irq_on) { u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG); irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT; if ((bool)irqstate == irq_on) return; if (irq_on) smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, IPMI_BT_INTMASK_ENABLE_IRQ_BIT); else smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0); } static void handle_transaction_done(struct smi_info *smi_info) { struct ipmi_smi_msg *msg; debug_timestamp(smi_info, "Done"); switch (smi_info->si_state) { case SI_NORMAL: if (!smi_info->curr_msg) break; smi_info->curr_msg->rsp_size = smi_info->handlers->get_result( smi_info->si_sm, smi_info->curr_msg->rsp, IPMI_MAX_MSG_LENGTH); /* * Do this here becase deliver_recv_msg() releases the * lock, and a new message can be put in during the * time the lock is released. */ msg = smi_info->curr_msg; smi_info->curr_msg = NULL; deliver_recv_msg(smi_info, msg); break; case SI_GETTING_FLAGS: { unsigned char msg[4]; unsigned int len; /* We got the flags from the SMI, now handle them. */ len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); if (msg[2] != 0) { /* Error fetching flags, just give up for now. */ smi_info->si_state = SI_NORMAL; } else if (len < 4) { /* * Hmm, no flags. That's technically illegal, but * don't use uninitialized data. */ smi_info->si_state = SI_NORMAL; } else { smi_info->msg_flags = msg[3]; handle_flags(smi_info); } break; } case SI_CLEARING_FLAGS: { unsigned char msg[3]; /* We cleared the flags. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 3); if (msg[2] != 0) { /* Error clearing flags */ dev_warn_ratelimited(smi_info->io.dev, "Error clearing flags: %2.2x\n", msg[2]); } smi_info->si_state = SI_NORMAL; break; } case SI_GETTING_EVENTS: { smi_info->curr_msg->rsp_size = smi_info->handlers->get_result( smi_info->si_sm, smi_info->curr_msg->rsp, IPMI_MAX_MSG_LENGTH); /* * Do this here becase deliver_recv_msg() releases the * lock, and a new message can be put in during the * time the lock is released. */ msg = smi_info->curr_msg; smi_info->curr_msg = NULL; if (msg->rsp[2] != 0) { /* Error getting event, probably done. */ msg->done(msg); /* Take off the event flag. */ smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; handle_flags(smi_info); } else { smi_inc_stat(smi_info, events); /* * Do this before we deliver the message * because delivering the message releases the * lock and something else can mess with the * state. */ handle_flags(smi_info); deliver_recv_msg(smi_info, msg); } break; } case SI_GETTING_MESSAGES: { smi_info->curr_msg->rsp_size = smi_info->handlers->get_result( smi_info->si_sm, smi_info->curr_msg->rsp, IPMI_MAX_MSG_LENGTH); /* * Do this here becase deliver_recv_msg() releases the * lock, and a new message can be put in during the * time the lock is released. */ msg = smi_info->curr_msg; smi_info->curr_msg = NULL; if (msg->rsp[2] != 0) { /* Error getting event, probably done. */ msg->done(msg); /* Take off the msg flag. */ smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL; handle_flags(smi_info); } else { smi_inc_stat(smi_info, incoming_messages); /* * Do this before we deliver the message * because delivering the message releases the * lock and something else can mess with the * state. */ handle_flags(smi_info); deliver_recv_msg(smi_info, msg); } break; } case SI_CHECKING_ENABLES: { unsigned char msg[4]; u8 enables; bool irq_on; /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); if (msg[2] != 0) { dev_warn_ratelimited(smi_info->io.dev, "Couldn't get irq info: %x,\n" "Maybe ok, but ipmi might run very slowly.\n", msg[2]); smi_info->si_state = SI_NORMAL; break; } enables = current_global_enables(smi_info, 0, &irq_on); if (smi_info->io.si_type == SI_BT) /* BT has its own interrupt enable bit. */ check_bt_irq(smi_info, irq_on); if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) { /* Enables are not correct, fix them. */ msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK); smi_info->handlers->start_transaction( smi_info->si_sm, msg, 3); smi_info->si_state = SI_SETTING_ENABLES; } else if (smi_info->supports_event_msg_buff) { smi_info->curr_msg = ipmi_alloc_smi_msg(); if (!smi_info->curr_msg) { smi_info->si_state = SI_NORMAL; break; } start_getting_events(smi_info); } else { smi_info->si_state = SI_NORMAL; } break; } case SI_SETTING_ENABLES: { unsigned char msg[4]; smi_info->handlers->get_result(smi_info->si_sm, msg, 4); if (msg[2] != 0) dev_warn_ratelimited(smi_info->io.dev, "Could not set the global enables: 0x%x.\n", msg[2]); if (smi_info->supports_event_msg_buff) { smi_info->curr_msg = ipmi_alloc_smi_msg(); if (!smi_info->curr_msg) { smi_info->si_state = SI_NORMAL; break; } start_getting_events(smi_info); } else { smi_info->si_state = SI_NORMAL; } break; } } } /* * Called on timeouts and events. Timeouts should pass the elapsed * time, interrupts should pass in zero. Must be called with * si_lock held and interrupts disabled. */ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, int time) { enum si_sm_result si_sm_result; restart: /* * There used to be a loop here that waited a little while * (around 25us) before giving up. That turned out to be * pointless, the minimum delays I was seeing were in the 300us * range, which is far too long to wait in an interrupt. So * we just run until the state machine tells us something * happened or it needs a delay. */ si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); time = 0; while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY) si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) { smi_inc_stat(smi_info, complete_transactions); handle_transaction_done(smi_info); goto restart; } else if (si_sm_result == SI_SM_HOSED) { smi_inc_stat(smi_info, hosed_count); /* * Do the before return_hosed_msg, because that * releases the lock. */ smi_info->si_state = SI_NORMAL; if (smi_info->curr_msg != NULL) { /* * If we were handling a user message, format * a response to send to the upper layer to * tell it about the error. */ return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); } goto restart; } /* * We prefer handling attn over new messages. But don't do * this if there is not yet an upper layer to handle anything. */ if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) { unsigned char msg[2]; if (smi_info->si_state != SI_NORMAL) { /* * We got an ATTN, but we are doing something else. * Handle the ATTN later. */ smi_info->got_attn = true; } else { smi_info->got_attn = false; smi_inc_stat(smi_info, attentions); /* * Got a attn, send down a get message flags to see * what's causing it. It would be better to handle * this in the upper layer, but due to the way * interrupts work with the SMI, that's not really * possible. */ msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_GET_MSG_FLAGS_CMD; start_new_msg(smi_info, msg, 2); smi_info->si_state = SI_GETTING_FLAGS; goto restart; } } /* If we are currently idle, try to start the next message. */ if (si_sm_result == SI_SM_IDLE) { smi_inc_stat(smi_info, idles); si_sm_result = start_next_msg(smi_info); if (si_sm_result != SI_SM_IDLE) goto restart; } if ((si_sm_result == SI_SM_IDLE) && (atomic_read(&smi_info->req_events))) { /* * We are idle and the upper layer requested that I fetch * events, so do so. */ atomic_set(&smi_info->req_events, 0); /* * Take this opportunity to check the interrupt and * message enable state for the BMC. The BMC can be * asynchronously reset, and may thus get interrupts * disable and messages disabled. */ if (smi_info->supports_event_msg_buff || smi_info->io.irq) { start_check_enables(smi_info); } else { smi_info->curr_msg = alloc_msg_handle_irq(smi_info); if (!smi_info->curr_msg) goto out; start_getting_events(smi_info); } goto restart; } if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) { /* Ok it if fails, the timer will just go off. */ if (del_timer(&smi_info->si_timer)) smi_info->timer_running = false; } out: return si_sm_result; } static void check_start_timer_thread(struct smi_info *smi_info) { if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); if (smi_info->thread) wake_up_process(smi_info->thread); start_next_msg(smi_info); smi_event_handler(smi_info, 0); } } static void flush_messages(void *send_info) { struct smi_info *smi_info = send_info; enum si_sm_result result; /* * Currently, this function is called only in run-to-completion * mode. This means we are single-threaded, no need for locks. */ result = smi_event_handler(smi_info, 0); while (result != SI_SM_IDLE) { udelay(SI_SHORT_TIMEOUT_USEC); result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC); } } static void sender(void *send_info, struct ipmi_smi_msg *msg) { struct smi_info *smi_info = send_info; unsigned long flags; debug_timestamp(smi_info, "Enqueue"); if (smi_info->run_to_completion) { /* * If we are running to completion, start it. Upper * layer will call flush_messages to clear it out. */ smi_info->waiting_msg = msg; return; } spin_lock_irqsave(&smi_info->si_lock, flags); /* * The following two lines don't need to be under the lock for * the lock's sake, but they do need SMP memory barriers to * avoid getting things out of order. We are already claiming * the lock, anyway, so just do it under the lock to avoid the * ordering problem. */ BUG_ON(smi_info->waiting_msg); smi_info->waiting_msg = msg; check_start_timer_thread(smi_info); spin_unlock_irqrestore(&smi_info->si_lock, flags); } static void set_run_to_completion(void *send_info, bool i_run_to_completion) { struct smi_info *smi_info = send_info; smi_info->run_to_completion = i_run_to_completion; if (i_run_to_completion) flush_messages(smi_info); } /* * Use -1 as a special constant to tell that we are spinning in kipmid * looking for something and not delaying between checks */ #define IPMI_TIME_NOT_BUSY ns_to_ktime(-1ull) static inline bool ipmi_thread_busy_wait(enum si_sm_result smi_result, const struct smi_info *smi_info, ktime_t *busy_until) { unsigned int max_busy_us = 0; if (smi_info->si_num < num_max_busy_us) max_busy_us = kipmid_max_busy_us[smi_info->si_num]; if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY) *busy_until = IPMI_TIME_NOT_BUSY; else if (*busy_until == IPMI_TIME_NOT_BUSY) { *busy_until = ktime_get() + max_busy_us * NSEC_PER_USEC; } else { if (unlikely(ktime_get() > *busy_until)) { *busy_until = IPMI_TIME_NOT_BUSY; return false; } } return true; } /* * A busy-waiting loop for speeding up IPMI operation. * * Lousy hardware makes this hard. This is only enabled for systems * that are not BT and do not have interrupts. It starts spinning * when an operation is complete or until max_busy tells it to stop * (if that is enabled). See the paragraph on kimid_max_busy_us in * Documentation/driver-api/ipmi.rst for details. */ static int ipmi_thread(void *data) { struct smi_info *smi_info = data; unsigned long flags; enum si_sm_result smi_result; ktime_t busy_until = IPMI_TIME_NOT_BUSY; set_user_nice(current, MAX_NICE); while (!kthread_should_stop()) { int busy_wait; spin_lock_irqsave(&(smi_info->si_lock), flags); smi_result = smi_event_handler(smi_info, 0); /* * If the driver is doing something, there is a possible * race with the timer. If the timer handler see idle, * and the thread here sees something else, the timer * handler won't restart the timer even though it is * required. So start it here if necessary. */ if (smi_result != SI_SM_IDLE && !smi_info->timer_running) smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); spin_unlock_irqrestore(&(smi_info->si_lock), flags); busy_wait = ipmi_thread_busy_wait(smi_result, smi_info, &busy_until); if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { ; /* do nothing */ } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) { /* * In maintenance mode we run as fast as * possible to allow firmware updates to * complete as fast as possible, but normally * don't bang on the scheduler. */ if (smi_info->in_maintenance_mode) schedule(); else usleep_range(100, 200); } else if (smi_result == SI_SM_IDLE) { if (atomic_read(&smi_info->need_watch)) { schedule_timeout_interruptible(100); } else { /* Wait to be woken up when we are needed. */ __set_current_state(TASK_INTERRUPTIBLE); schedule(); } } else { schedule_timeout_interruptible(1); } } return 0; } static void poll(void *send_info) { struct smi_info *smi_info = send_info; unsigned long flags = 0; bool run_to_completion = smi_info->run_to_completion; /* * Make sure there is some delay in the poll loop so we can * drive time forward and timeout things. */ udelay(10); if (!run_to_completion) spin_lock_irqsave(&smi_info->si_lock, flags); smi_event_handler(smi_info, 10); if (!run_to_completion) spin_unlock_irqrestore(&smi_info->si_lock, flags); } static void request_events(void *send_info) { struct smi_info *smi_info = send_info; if (!smi_info->has_event_buffer) return; atomic_set(&smi_info->req_events, 1); } static void set_need_watch(void *send_info, unsigned int watch_mask) { struct smi_info *smi_info = send_info; unsigned long flags; int enable; enable = !!watch_mask; atomic_set(&smi_info->need_watch, enable); spin_lock_irqsave(&smi_info->si_lock, flags); check_start_timer_thread(smi_info); spin_unlock_irqrestore(&smi_info->si_lock, flags); } static void smi_timeout(struct timer_list *t) { struct smi_info *smi_info = from_timer(smi_info, t, si_timer); enum si_sm_result smi_result; unsigned long flags; unsigned long jiffies_now; long time_diff; long timeout; spin_lock_irqsave(&(smi_info->si_lock), flags); debug_timestamp(smi_info, "Timer"); jiffies_now = jiffies; time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) * SI_USEC_PER_JIFFY); smi_result = smi_event_handler(smi_info, time_diff); if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { /* Running with interrupts, only do long timeouts. */ timeout = jiffies + SI_TIMEOUT_JIFFIES; smi_inc_stat(smi_info, long_timeouts); goto do_mod_timer; } /* * If the state machine asks for a short delay, then shorten * the timer timeout. */ if (smi_result == SI_SM_CALL_WITH_DELAY) { smi_inc_stat(smi_info, short_timeouts); timeout = jiffies + 1; } else { smi_inc_stat(smi_info, long_timeouts); timeout = jiffies + SI_TIMEOUT_JIFFIES; } do_mod_timer: if (smi_result != SI_SM_IDLE) smi_mod_timer(smi_info, timeout); else smi_info->timer_running = false; spin_unlock_irqrestore(&(smi_info->si_lock), flags); } irqreturn_t ipmi_si_irq_handler(int irq, void *data) { struct smi_info *smi_info = data; unsigned long flags; if (smi_info->io.si_type == SI_BT) /* We need to clear the IRQ flag for the BT interface. */ smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, IPMI_BT_INTMASK_CLEAR_IRQ_BIT | IPMI_BT_INTMASK_ENABLE_IRQ_BIT); spin_lock_irqsave(&(smi_info->si_lock), flags); smi_inc_stat(smi_info, interrupts); debug_timestamp(smi_info, "Interrupt"); smi_event_handler(smi_info, 0); spin_unlock_irqrestore(&(smi_info->si_lock), flags); return IRQ_HANDLED; } static int smi_start_processing(void *send_info, struct ipmi_smi *intf) { struct smi_info *new_smi = send_info; int enable = 0; new_smi->intf = intf; /* Set up the timer that drives the interface. */ timer_setup(&new_smi->si_timer, smi_timeout, 0); new_smi->timer_can_start = true; smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); /* Try to claim any interrupts. */ if (new_smi->io.irq_setup) { new_smi->io.irq_handler_data = new_smi; new_smi->io.irq_setup(&new_smi->io); } /* * Check if the user forcefully enabled the daemon. */ if (new_smi->si_num < num_force_kipmid) enable = force_kipmid[new_smi->si_num]; /* * The BT interface is efficient enough to not need a thread, * and there is no need for a thread if we have interrupts. */ else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq)) enable = 1; if (enable) { new_smi->thread = kthread_run(ipmi_thread, new_smi, "kipmi%d", new_smi->si_num); if (IS_ERR(new_smi->thread)) { dev_notice(new_smi->io.dev, "Could not start kernel thread due to error %ld, only using timers to drive the interface\n", PTR_ERR(new_smi->thread)); new_smi->thread = NULL; } } return 0; } static int get_smi_info(void *send_info, struct ipmi_smi_info *data) { struct smi_info *smi = send_info; data->addr_src = smi->io.addr_source; data->dev = smi->io.dev; data->addr_info = smi->io.addr_info; get_device(smi->io.dev); return 0; } static void set_maintenance_mode(void *send_info, bool enable) { struct smi_info *smi_info = send_info; if (!enable) atomic_set(&smi_info->req_events, 0); smi_info->in_maintenance_mode = enable; } static void shutdown_smi(void *send_info); static const struct ipmi_smi_handlers handlers = { .owner = THIS_MODULE, .start_processing = smi_start_processing, .shutdown = shutdown_smi, .get_smi_info = get_smi_info, .sender = sender, .request_events = request_events, .set_need_watch = set_need_watch, .set_maintenance_mode = set_maintenance_mode, .set_run_to_completion = set_run_to_completion, .flush_messages = flush_messages, .poll = poll, }; static LIST_HEAD(smi_infos); static DEFINE_MUTEX(smi_infos_lock); static int smi_num; /* Used to sequence the SMIs */ static const char * const addr_space_to_str[] = { "i/o", "mem" }; module_param_array(force_kipmid, int, &num_force_kipmid, 0); MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or disabled(0). Normally the IPMI driver auto-detects this, but the value may be overridden by this parm."); module_param(unload_when_empty, bool, 0); MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are specified or found, default is 1. Setting to 0 is useful for hot add of devices using hotmod."); module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644); MODULE_PARM_DESC(kipmid_max_busy_us, "Max time (in microseconds) to busy-wait for IPMI data before sleeping. 0 (default) means to wait forever. Set to 100-500 if kipmid is using up a lot of CPU time."); void ipmi_irq_finish_setup(struct si_sm_io *io) { if (io->si_type == SI_BT) /* Enable the interrupt in the BT interface. */ io->outputb(io, IPMI_BT_INTMASK_REG, IPMI_BT_INTMASK_ENABLE_IRQ_BIT); } void ipmi_irq_start_cleanup(struct si_sm_io *io) { if (io->si_type == SI_BT) /* Disable the interrupt in the BT interface. */ io->outputb(io, IPMI_BT_INTMASK_REG, 0); } static void std_irq_cleanup(struct si_sm_io *io) { ipmi_irq_start_cleanup(io); free_irq(io->irq, io->irq_handler_data); } int ipmi_std_irq_setup(struct si_sm_io *io) { int rv; if (!io->irq) return 0; rv = request_irq(io->irq, ipmi_si_irq_handler, IRQF_SHARED, SI_DEVICE_NAME, io->irq_handler_data); if (rv) { dev_warn(io->dev, "%s unable to claim interrupt %d, running polled\n", SI_DEVICE_NAME, io->irq); io->irq = 0; } else { io->irq_cleanup = std_irq_cleanup; ipmi_irq_finish_setup(io); dev_info(io->dev, "Using irq %d\n", io->irq); } return rv; } static int wait_for_msg_done(struct smi_info *smi_info) { enum si_sm_result smi_result; smi_result = smi_info->handlers->event(smi_info->si_sm, 0); for (;;) { if (smi_result == SI_SM_CALL_WITH_DELAY || smi_result == SI_SM_CALL_WITH_TICK_DELAY) { schedule_timeout_uninterruptible(1); smi_result = smi_info->handlers->event( smi_info->si_sm, jiffies_to_usecs(1)); } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { smi_result = smi_info->handlers->event( smi_info->si_sm, 0); } else break; } if (smi_result == SI_SM_HOSED) /* * We couldn't get the state machine to run, so whatever's at * the port is probably not an IPMI SMI interface. */ return -ENODEV; return 0; } static int try_get_dev_id(struct smi_info *smi_info) { unsigned char msg[2]; unsigned char *resp; unsigned long resp_len; int rv = 0; unsigned int retry_count = 0; resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); if (!resp) return -ENOMEM; /* * Do a Get Device ID command, since it comes back with some * useful info. */ msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_GET_DEVICE_ID_CMD; retry: smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); rv = wait_for_msg_done(smi_info); if (rv) goto out; resp_len = smi_info->handlers->get_result(smi_info->si_sm, resp, IPMI_MAX_MSG_LENGTH); /* Check and record info from the get device id, in case we need it. */ rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1], resp + 2, resp_len - 2, &smi_info->device_id); if (rv) { /* record completion code */ unsigned char cc = *(resp + 2); if (cc != IPMI_CC_NO_ERROR && ++retry_count <= GET_DEVICE_ID_MAX_RETRY) { dev_warn_ratelimited(smi_info->io.dev, "BMC returned 0x%2.2x, retry get bmc device id\n", cc); goto retry; } } out: kfree(resp); return rv; } static int get_global_enables(struct smi_info *smi_info, u8 *enables) { unsigned char msg[3]; unsigned char *resp; unsigned long resp_len; int rv; resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); if (!resp) return -ENOMEM; msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); rv = wait_for_msg_done(smi_info); if (rv) { dev_warn(smi_info->io.dev, "Error getting response from get global enables command: %d\n", rv); goto out; } resp_len = smi_info->handlers->get_result(smi_info->si_sm, resp, IPMI_MAX_MSG_LENGTH); if (resp_len < 4 || resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || resp[2] != 0) { dev_warn(smi_info->io.dev, "Invalid return from get global enables command: %ld %x %x %x\n", resp_len, resp[0], resp[1], resp[2]); rv = -EINVAL; goto out; } else { *enables = resp[3]; } out: kfree(resp); return rv; } /* * Returns 1 if it gets an error from the command. */ static int set_global_enables(struct smi_info *smi_info, u8 enables) { unsigned char msg[3]; unsigned char *resp; unsigned long resp_len; int rv; resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); if (!resp) return -ENOMEM; msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; msg[2] = enables; smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); rv = wait_for_msg_done(smi_info); if (rv) { dev_warn(smi_info->io.dev, "Error getting response from set global enables command: %d\n", rv); goto out; } resp_len = smi_info->handlers->get_result(smi_info->si_sm, resp, IPMI_MAX_MSG_LENGTH); if (resp_len < 3 || resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { dev_warn(smi_info->io.dev, "Invalid return from set global enables command: %ld %x %x\n", resp_len, resp[0], resp[1]); rv = -EINVAL; goto out; } if (resp[2] != 0) rv = 1; out: kfree(resp); return rv; } /* * Some BMCs do not support clearing the receive irq bit in the global * enables (even if they don't support interrupts on the BMC). Check * for this and handle it properly. */ static void check_clr_rcv_irq(struct smi_info *smi_info) { u8 enables = 0; int rv; rv = get_global_enables(smi_info, &enables); if (!rv) { if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0) /* Already clear, should work ok. */ return; enables &= ~IPMI_BMC_RCV_MSG_INTR; rv = set_global_enables(smi_info, enables); } if (rv < 0) { dev_err(smi_info->io.dev, "Cannot check clearing the rcv irq: %d\n", rv); return; } if (rv) { /* * An error when setting the event buffer bit means * clearing the bit is not supported. */ dev_warn(smi_info->io.dev, "The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n"); smi_info->cannot_disable_irq = true; } } /* * Some BMCs do not support setting the interrupt bits in the global * enables even if they support interrupts. Clearly bad, but we can * compensate. */ static void check_set_rcv_irq(struct smi_info *smi_info) { u8 enables = 0; int rv; if (!smi_info->io.irq) return; rv = get_global_enables(smi_info, &enables); if (!rv) { enables |= IPMI_BMC_RCV_MSG_INTR; rv = set_global_enables(smi_info, enables); } if (rv < 0) { dev_err(smi_info->io.dev, "Cannot check setting the rcv irq: %d\n", rv); return; } if (rv) { /* * An error when setting the event buffer bit means * setting the bit is not supported. */ dev_warn(smi_info->io.dev, "The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n"); smi_info->cannot_disable_irq = true; smi_info->irq_enable_broken = true; } } static int try_enable_event_buffer(struct smi_info *smi_info) { unsigned char msg[3]; unsigned char *resp; unsigned long resp_len; int rv = 0; resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); if (!resp) return -ENOMEM; msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); rv = wait_for_msg_done(smi_info); if (rv) { pr_warn("Error getting response from get global enables command, the event buffer is not enabled\n"); goto out; } resp_len = smi_info->handlers->get_result(smi_info->si_sm, resp, IPMI_MAX_MSG_LENGTH); if (resp_len < 4 || resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || resp[2] != 0) { pr_warn("Invalid return from get global enables command, cannot enable the event buffer\n"); rv = -EINVAL; goto out; } if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) { /* buffer is already enabled, nothing to do. */ smi_info->supports_event_msg_buff = true; goto out; } msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF; smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); rv = wait_for_msg_done(smi_info); if (rv) { pr_warn("Error getting response from set global, enables command, the event buffer is not enabled\n"); goto out; } resp_len = smi_info->handlers->get_result(smi_info->si_sm, resp, IPMI_MAX_MSG_LENGTH); if (resp_len < 3 || resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { pr_warn("Invalid return from get global, enables command, not enable the event buffer\n"); rv = -EINVAL; goto out; } if (resp[2] != 0) /* * An error when setting the event buffer bit means * that the event buffer is not supported. */ rv = -ENOENT; else smi_info->supports_event_msg_buff = true; out: kfree(resp); return rv; } #define IPMI_SI_ATTR(name) \ static ssize_t name##_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct smi_info *smi_info = dev_get_drvdata(dev); \ \ return sysfs_emit(buf, "%u\n", smi_get_stat(smi_info, name)); \ } \ static DEVICE_ATTR_RO(name) static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct smi_info *smi_info = dev_get_drvdata(dev); return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_type]); } static DEVICE_ATTR_RO(type); static ssize_t interrupts_enabled_show(struct device *dev, struct device_attribute *attr, char *buf) { struct smi_info *smi_info = dev_get_drvdata(dev); int enabled = smi_info->io.irq && !smi_info->interrupt_disabled; return sysfs_emit(buf, "%d\n", enabled); } static DEVICE_ATTR_RO(interrupts_enabled); IPMI_SI_ATTR(short_timeouts); IPMI_SI_ATTR(long_timeouts); IPMI_SI_ATTR(idles); IPMI_SI_ATTR(interrupts); IPMI_SI_ATTR(attentions); IPMI_SI_ATTR(flag_fetches); IPMI_SI_ATTR(hosed_count); IPMI_SI_ATTR(complete_transactions); IPMI_SI_ATTR(events); IPMI_SI_ATTR(watchdog_pretimeouts); IPMI_SI_ATTR(incoming_messages); static ssize_t params_show(struct device *dev, struct device_attribute *attr, char *buf) { struct smi_info *smi_info = dev_get_drvdata(dev); return sysfs_emit(buf, "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", si_to_str[smi_info->io.si_type], addr_space_to_str[smi_info->io.addr_space], smi_info->io.addr_data, smi_info->io.regspacing, smi_info->io.regsize, smi_info->io.regshift, smi_info->io.irq, smi_info->io.slave_addr); } static DEVICE_ATTR_RO(params); static struct attribute *ipmi_si_dev_attrs[] = { &dev_attr_type.attr, &dev_attr_interrupts_enabled.attr, &dev_attr_short_timeouts.attr, &dev_attr_long_timeouts.attr, &dev_attr_idles.attr, &dev_attr_interrupts.attr, &dev_attr_attentions.attr, &dev_attr_flag_fetches.attr, &dev_attr_hosed_count.attr, &dev_attr_complete_transactions.attr, &dev_attr_events.attr, &dev_attr_watchdog_pretimeouts.attr, &dev_attr_incoming_messages.attr, &dev_attr_params.attr, NULL }; static const struct attribute_group ipmi_si_dev_attr_group = { .attrs = ipmi_si_dev_attrs, }; /* * oem_data_avail_to_receive_msg_avail * @info - smi_info structure with msg_flags set * * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL * Returns 1 indicating need to re-run handle_flags(). */ static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) { smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) | RECEIVE_MSG_AVAIL); return 1; } /* * setup_dell_poweredge_oem_data_handler * @info - smi_info.device_id must be populated * * Systems that match, but have firmware version < 1.40 may assert * OEM0_DATA_AVAIL on their own, without being told via Set Flags that * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags * as RECEIVE_MSG_AVAIL instead. * * As Dell has no plans to release IPMI 1.5 firmware that *ever* * assert the OEM[012] bits, and if it did, the driver would have to * change to handle that properly, we don't actually check for the * firmware version. * Device ID = 0x20 BMC on PowerEdge 8G servers * Device Revision = 0x80 * Firmware Revision1 = 0x01 BMC version 1.40 * Firmware Revision2 = 0x40 BCD encoded * IPMI Version = 0x51 IPMI 1.5 * Manufacturer ID = A2 02 00 Dell IANA * * Additionally, PowerEdge systems with IPMI < 1.5 may also assert * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL. * */ #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51 #define DELL_IANA_MFR_ID 0x0002a2 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) { struct ipmi_device_id *id = &smi_info->device_id; if (id->manufacturer_id == DELL_IANA_MFR_ID) { if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID && id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV && id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { smi_info->oem_data_avail_handler = oem_data_avail_to_receive_msg_avail; } else if (ipmi_version_major(id) < 1 || (ipmi_version_major(id) == 1 && ipmi_version_minor(id) < 5)) { smi_info->oem_data_avail_handler = oem_data_avail_to_receive_msg_avail; } } } #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA static void return_hosed_msg_badsize(struct smi_info *smi_info) { struct ipmi_smi_msg *msg = smi_info->curr_msg; /* Make it a response */ msg->rsp[0] = msg->data[0] | 4; msg->rsp[1] = msg->data[1]; msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH; msg->rsp_size = 3; smi_info->curr_msg = NULL; deliver_recv_msg(smi_info, msg); } /* * dell_poweredge_bt_xaction_handler * @info - smi_info.device_id must be populated * * Dell PowerEdge servers with the BT interface (x6xx and 1750) will * not respond to a Get SDR command if the length of the data * requested is exactly 0x3A, which leads to command timeouts and no * data returned. This intercepts such commands, and causes userspace * callers to try again with a different-sized buffer, which succeeds. */ #define STORAGE_NETFN 0x0A #define STORAGE_CMD_GET_SDR 0x23 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self, unsigned long unused, void *in) { struct smi_info *smi_info = in; unsigned char *data = smi_info->curr_msg->data; unsigned int size = smi_info->curr_msg->data_size; if (size >= 8 && (data[0]>>2) == STORAGE_NETFN && data[1] == STORAGE_CMD_GET_SDR && data[7] == 0x3A) { return_hosed_msg_badsize(smi_info); return NOTIFY_STOP; } return NOTIFY_DONE; } static struct notifier_block dell_poweredge_bt_xaction_notifier = { .notifier_call = dell_poweredge_bt_xaction_handler, }; /* * setup_dell_poweredge_bt_xaction_handler * @info - smi_info.device_id must be filled in already * * Fills in smi_info.device_id.start_transaction_pre_hook * when we know what function to use there. */ static void setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) { struct ipmi_device_id *id = &smi_info->device_id; if (id->manufacturer_id == DELL_IANA_MFR_ID && smi_info->io.si_type == SI_BT) register_xaction_notifier(&dell_poweredge_bt_xaction_notifier); } /* * setup_oem_data_handler * @info - smi_info.device_id must be filled in already * * Fills in smi_info.device_id.oem_data_available_handler * when we know what function to use there. */ static void setup_oem_data_handler(struct smi_info *smi_info) { setup_dell_poweredge_oem_data_handler(smi_info); } static void setup_xaction_handlers(struct smi_info *smi_info) { setup_dell_poweredge_bt_xaction_handler(smi_info); } static void check_for_broken_irqs(struct smi_info *smi_info) { check_clr_rcv_irq(smi_info); check_set_rcv_irq(smi_info); } static inline void stop_timer_and_thread(struct smi_info *smi_info) { if (smi_info->thread != NULL) { kthread_stop(smi_info->thread); smi_info->thread = NULL; } smi_info->timer_can_start = false; del_timer_sync(&smi_info->si_timer); } static struct smi_info *find_dup_si(struct smi_info *info) { struct smi_info *e; list_for_each_entry(e, &smi_infos, link) { if (e->io.addr_space != info->io.addr_space) continue; if (e->io.addr_data == info->io.addr_data) { /* * This is a cheap hack, ACPI doesn't have a defined * slave address but SMBIOS does. Pick it up from * any source that has it available. */ if (info->io.slave_addr && !e->io.slave_addr) e->io.slave_addr = info->io.slave_addr; return e; } } return NULL; } int ipmi_si_add_smi(struct si_sm_io *io) { int rv = 0; struct smi_info *new_smi, *dup; /* * If the user gave us a hard-coded device at the same * address, they presumably want us to use it and not what is * in the firmware. */ if (io->addr_source != SI_HARDCODED && io->addr_source != SI_HOTMOD && ipmi_si_hardcode_match(io->addr_space, io->addr_data)) { dev_info(io->dev, "Hard-coded device at this address already exists"); return -ENODEV; } if (!io->io_setup) { if (io->addr_space == IPMI_IO_ADDR_SPACE) { io->io_setup = ipmi_si_port_setup; } else if (io->addr_space == IPMI_MEM_ADDR_SPACE) { io->io_setup = ipmi_si_mem_setup; } else { return -EINVAL; } } new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL); if (!new_smi) return -ENOMEM; spin_lock_init(&new_smi->si_lock); new_smi->io = *io; mutex_lock(&smi_infos_lock); dup = find_dup_si(new_smi); if (dup) { if (new_smi->io.addr_source == SI_ACPI && dup->io.addr_source == SI_SMBIOS) { /* We prefer ACPI over SMBIOS. */ dev_info(dup->io.dev, "Removing SMBIOS-specified %s state machine in favor of ACPI\n", si_to_str[new_smi->io.si_type]); cleanup_one_si(dup); } else { dev_info(new_smi->io.dev, "%s-specified %s state machine: duplicate\n", ipmi_addr_src_to_str(new_smi->io.addr_source), si_to_str[new_smi->io.si_type]); rv = -EBUSY; kfree(new_smi); goto out_err; } } pr_info("Adding %s-specified %s state machine\n", ipmi_addr_src_to_str(new_smi->io.addr_source), si_to_str[new_smi->io.si_type]); list_add_tail(&new_smi->link, &smi_infos); if (initialized) rv = try_smi_init(new_smi); out_err: mutex_unlock(&smi_infos_lock); return rv; } /* * Try to start up an interface. Must be called with smi_infos_lock * held, primarily to keep smi_num consistent, we only one to do these * one at a time. */ static int try_smi_init(struct smi_info *new_smi) { int rv = 0; int i; pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n", ipmi_addr_src_to_str(new_smi->io.addr_source), si_to_str[new_smi->io.si_type], addr_space_to_str[new_smi->io.addr_space], new_smi->io.addr_data, new_smi->io.slave_addr, new_smi->io.irq); switch (new_smi->io.si_type) { case SI_KCS: new_smi->handlers = &kcs_smi_handlers; break; case SI_SMIC: new_smi->handlers = &smic_smi_handlers; break; case SI_BT: new_smi->handlers = &bt_smi_handlers; break; default: /* No support for anything else yet. */ rv = -EIO; goto out_err; } new_smi->si_num = smi_num; /* Do this early so it's available for logs. */ if (!new_smi->io.dev) { pr_err("IPMI interface added with no device\n"); rv = -EIO; goto out_err; } /* Allocate the state machine's data and initialize it. */ new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); if (!new_smi->si_sm) { rv = -ENOMEM; goto out_err; } new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm, &new_smi->io); /* Now that we know the I/O size, we can set up the I/O. */ rv = new_smi->io.io_setup(&new_smi->io); if (rv) { dev_err(new_smi->io.dev, "Could not set up I/O space\n"); goto out_err; } /* Do low-level detection first. */ if (new_smi->handlers->detect(new_smi->si_sm)) { if (new_smi->io.addr_source) dev_err(new_smi->io.dev, "Interface detection failed\n"); rv = -ENODEV; goto out_err; } /* * Attempt a get device id command. If it fails, we probably * don't have a BMC here. */ rv = try_get_dev_id(new_smi); if (rv) { if (new_smi->io.addr_source) dev_err(new_smi->io.dev, "There appears to be no BMC at this location\n"); goto out_err; } setup_oem_data_handler(new_smi); setup_xaction_handlers(new_smi); check_for_broken_irqs(new_smi); new_smi->waiting_msg = NULL; new_smi->curr_msg = NULL; atomic_set(&new_smi->req_events, 0); new_smi->run_to_completion = false; for (i = 0; i < SI_NUM_STATS; i++) atomic_set(&new_smi->stats[i], 0); new_smi->interrupt_disabled = true; atomic_set(&new_smi->need_watch, 0); rv = try_enable_event_buffer(new_smi); if (rv == 0) new_smi->has_event_buffer = true; /* * Start clearing the flags before we enable interrupts or the * timer to avoid racing with the timer. */ start_clear_flags(new_smi); /* * IRQ is defined to be set when non-zero. req_events will * cause a global flags check that will enable interrupts. */ if (new_smi->io.irq) { new_smi->interrupt_disabled = false; atomic_set(&new_smi->req_events, 1); } dev_set_drvdata(new_smi->io.dev, new_smi); rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group); if (rv) { dev_err(new_smi->io.dev, "Unable to add device attributes: error %d\n", rv); goto out_err; } new_smi->dev_group_added = true; rv = ipmi_register_smi(&handlers, new_smi, new_smi->io.dev, new_smi->io.slave_addr); if (rv) { dev_err(new_smi->io.dev, "Unable to register device: error %d\n", rv); goto out_err; } /* Don't increment till we know we have succeeded. */ smi_num++; dev_info(new_smi->io.dev, "IPMI %s interface initialized\n", si_to_str[new_smi->io.si_type]); WARN_ON(new_smi->io.dev->init_name != NULL); out_err: if (rv && new_smi->io.io_cleanup) { new_smi->io.io_cleanup(&new_smi->io); new_smi->io.io_cleanup = NULL; } if (rv && new_smi->si_sm) { kfree(new_smi->si_sm); new_smi->si_sm = NULL; } return rv; } static int __init init_ipmi_si(void) { struct smi_info *e; enum ipmi_addr_src type = SI_INVALID; if (initialized) return 0; ipmi_hardcode_init(); pr_info("IPMI System Interface driver\n"); ipmi_si_platform_init(); ipmi_si_pci_init(); ipmi_si_parisc_init(); /* We prefer devices with interrupts, but in the case of a machine with multiple BMCs we assume that there will be several instances of a given type so if we succeed in registering a type then also try to register everything else of the same type */ mutex_lock(&smi_infos_lock); list_for_each_entry(e, &smi_infos, link) { /* Try to register a device if it has an IRQ and we either haven't successfully registered a device yet or this device has the same type as one we successfully registered */ if (e->io.irq && (!type || e->io.addr_source == type)) { if (!try_smi_init(e)) { type = e->io.addr_source; } } } /* type will only have been set if we successfully registered an si */ if (type) goto skip_fallback_noirq; /* Fall back to the preferred device */ list_for_each_entry(e, &smi_infos, link) { if (!e->io.irq && (!type || e->io.addr_source == type)) { if (!try_smi_init(e)) { type = e->io.addr_source; } } } skip_fallback_noirq: initialized = true; mutex_unlock(&smi_infos_lock); if (type) return 0; mutex_lock(&smi_infos_lock); if (unload_when_empty && list_empty(&smi_infos)) { mutex_unlock(&smi_infos_lock); cleanup_ipmi_si(); pr_warn("Unable to find any System Interface(s)\n"); return -ENODEV; } else { mutex_unlock(&smi_infos_lock); return 0; } } module_init(init_ipmi_si); static void wait_msg_processed(struct smi_info *smi_info) { unsigned long jiffies_now; long time_diff; while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { jiffies_now = jiffies; time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) * SI_USEC_PER_JIFFY); smi_event_handler(smi_info, time_diff); schedule_timeout_uninterruptible(1); } } static void shutdown_smi(void *send_info) { struct smi_info *smi_info = send_info; if (smi_info->dev_group_added) { device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group); smi_info->dev_group_added = false; } if (smi_info->io.dev) dev_set_drvdata(smi_info->io.dev, NULL); /* * Make sure that interrupts, the timer and the thread are * stopped and will not run again. */ smi_info->interrupt_disabled = true; if (smi_info->io.irq_cleanup) { smi_info->io.irq_cleanup(&smi_info->io); smi_info->io.irq_cleanup = NULL; } stop_timer_and_thread(smi_info); /* * Wait until we know that we are out of any interrupt * handlers might have been running before we freed the * interrupt. */ synchronize_rcu(); /* * Timeouts are stopped, now make sure the interrupts are off * in the BMC. Note that timers and CPU interrupts are off, * so no need for locks. */ wait_msg_processed(smi_info); if (smi_info->handlers) disable_si_irq(smi_info); wait_msg_processed(smi_info); if (smi_info->handlers) smi_info->handlers->cleanup(smi_info->si_sm); if (smi_info->io.io_cleanup) { smi_info->io.io_cleanup(&smi_info->io); smi_info->io.io_cleanup = NULL; } kfree(smi_info->si_sm); smi_info->si_sm = NULL; smi_info->intf = NULL; } /* * Must be called with smi_infos_lock held, to serialize the * smi_info->intf check. */ static void cleanup_one_si(struct smi_info *smi_info) { if (!smi_info) return; list_del(&smi_info->link); ipmi_unregister_smi(smi_info->intf); kfree(smi_info); } void ipmi_si_remove_by_dev(struct device *dev) { struct smi_info *e; mutex_lock(&smi_infos_lock); list_for_each_entry(e, &smi_infos, link) { if (e->io.dev == dev) { cleanup_one_si(e); break; } } mutex_unlock(&smi_infos_lock); } struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type, unsigned long addr) { /* remove */ struct smi_info *e, *tmp_e; struct device *dev = NULL; mutex_lock(&smi_infos_lock); list_for_each_entry_safe(e, tmp_e, &smi_infos, link) { if (e->io.addr_space != addr_space) continue; if (e->io.si_type != si_type) continue; if (e->io.addr_data == addr) { dev = get_device(e->io.dev); cleanup_one_si(e); } } mutex_unlock(&smi_infos_lock); return dev; } static void cleanup_ipmi_si(void) { struct smi_info *e, *tmp_e; if (!initialized) return; ipmi_si_pci_shutdown(); ipmi_si_parisc_shutdown(); ipmi_si_platform_shutdown(); mutex_lock(&smi_infos_lock); list_for_each_entry_safe(e, tmp_e, &smi_infos, link) cleanup_one_si(e); mutex_unlock(&smi_infos_lock); ipmi_si_hardcode_exit(); ipmi_si_hotmod_exit(); } module_exit(cleanup_ipmi_si); MODULE_ALIAS("platform:dmi-ipmi-si"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Corey Minyard <[email protected]>"); MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");
linux-master
drivers/char/ipmi/ipmi_si_intf.c
// SPDX-License-Identifier: GPL-2.0 /* * Driver to talk to a remote management controller on IPMB. */ #include <linux/acpi.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/semaphore.h> #include <linux/kthread.h> #include <linux/wait.h> #include <linux/ipmi_msgdefs.h> #include <linux/ipmi_smi.h> #define DEVICE_NAME "ipmi-ipmb" static int bmcaddr = 0x20; module_param(bmcaddr, int, 0644); MODULE_PARM_DESC(bmcaddr, "Address to use for BMC."); static unsigned int retry_time_ms = 250; module_param(retry_time_ms, uint, 0644); MODULE_PARM_DESC(retry_time_ms, "Timeout time between retries, in milliseconds."); static unsigned int max_retries = 1; module_param(max_retries, uint, 0644); MODULE_PARM_DESC(max_retries, "Max resends of a command before timing out."); /* Add room for the two slave addresses, two checksums, and rqSeq. */ #define IPMB_MAX_MSG_LEN (IPMI_MAX_MSG_LENGTH + 5) struct ipmi_ipmb_dev { struct ipmi_smi *intf; struct i2c_client *client; struct i2c_client *slave; struct ipmi_smi_handlers handlers; bool ready; u8 curr_seq; u8 bmcaddr; u32 retry_time_ms; u32 max_retries; struct ipmi_smi_msg *next_msg; struct ipmi_smi_msg *working_msg; /* Transmit thread. */ struct task_struct *thread; struct semaphore wake_thread; struct semaphore got_rsp; spinlock_t lock; bool stopping; u8 xmitmsg[IPMB_MAX_MSG_LEN]; unsigned int xmitlen; u8 rcvmsg[IPMB_MAX_MSG_LEN]; unsigned int rcvlen; bool overrun; }; static bool valid_ipmb(struct ipmi_ipmb_dev *iidev) { u8 *msg = iidev->rcvmsg; u8 netfn; if (iidev->overrun) return false; /* Minimum message size. */ if (iidev->rcvlen < 7) return false; /* Is it a response? */ netfn = msg[1] >> 2; if (netfn & 1) { /* Response messages have an added completion code. */ if (iidev->rcvlen < 8) return false; } if (ipmb_checksum(msg, 3) != 0) return false; if (ipmb_checksum(msg + 3, iidev->rcvlen - 3) != 0) return false; return true; } static void ipmi_ipmb_check_msg_done(struct ipmi_ipmb_dev *iidev) { struct ipmi_smi_msg *imsg = NULL; u8 *msg = iidev->rcvmsg; bool is_cmd; unsigned long flags; if (iidev->rcvlen == 0) return; if (!valid_ipmb(iidev)) goto done; is_cmd = ((msg[1] >> 2) & 1) == 0; if (is_cmd) { /* Ignore commands until we are up. */ if (!iidev->ready) goto done; /* It's a command, allocate a message for it. */ imsg = ipmi_alloc_smi_msg(); if (!imsg) goto done; imsg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT; imsg->data_size = 0; } else { spin_lock_irqsave(&iidev->lock, flags); if (iidev->working_msg) { u8 seq = msg[4] >> 2; bool xmit_rsp = (iidev->working_msg->data[0] >> 2) & 1; /* * Responses should carry the sequence we sent * them with. If it's a transmitted response, * ignore it. And if the message hasn't been * transmitted, ignore it. */ if (!xmit_rsp && seq == iidev->curr_seq) { iidev->curr_seq = (iidev->curr_seq + 1) & 0x3f; imsg = iidev->working_msg; iidev->working_msg = NULL; } } spin_unlock_irqrestore(&iidev->lock, flags); } if (!imsg) goto done; if (imsg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { imsg->rsp[0] = msg[1]; /* NetFn/LUN */ /* * Keep the source address, rqSeq. Drop the trailing * checksum. */ memcpy(imsg->rsp + 1, msg + 3, iidev->rcvlen - 4); imsg->rsp_size = iidev->rcvlen - 3; } else { imsg->rsp[0] = msg[1]; /* NetFn/LUN */ /* * Skip the source address, rqSeq. Drop the trailing * checksum. */ memcpy(imsg->rsp + 1, msg + 5, iidev->rcvlen - 6); imsg->rsp_size = iidev->rcvlen - 5; } ipmi_smi_msg_received(iidev->intf, imsg); if (!is_cmd) up(&iidev->got_rsp); done: iidev->overrun = false; iidev->rcvlen = 0; } /* * The IPMB protocol only supports i2c writes so there is no need to * support I2C_SLAVE_READ* events, except to know if the other end has * issued a read without going to stop mode. */ static int ipmi_ipmb_slave_cb(struct i2c_client *client, enum i2c_slave_event event, u8 *val) { struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client); switch (event) { case I2C_SLAVE_WRITE_REQUESTED: ipmi_ipmb_check_msg_done(iidev); /* * First byte is the slave address, to ease the checksum * calculation. */ iidev->rcvmsg[0] = client->addr << 1; iidev->rcvlen = 1; break; case I2C_SLAVE_WRITE_RECEIVED: if (iidev->rcvlen >= sizeof(iidev->rcvmsg)) iidev->overrun = true; else iidev->rcvmsg[iidev->rcvlen++] = *val; break; case I2C_SLAVE_READ_REQUESTED: case I2C_SLAVE_STOP: ipmi_ipmb_check_msg_done(iidev); break; case I2C_SLAVE_READ_PROCESSED: break; } return 0; } static void ipmi_ipmb_send_response(struct ipmi_ipmb_dev *iidev, struct ipmi_smi_msg *msg, u8 cc) { if ((msg->data[0] >> 2) & 1) { /* * It's a response being sent, we need to return a * response to the response. Fake a send msg command * response with channel 0. This will always be ipmb * direct. */ msg->data[0] = (IPMI_NETFN_APP_REQUEST | 1) << 2; msg->data[3] = IPMI_SEND_MSG_CMD; msg->data[4] = cc; msg->data_size = 5; } msg->rsp[0] = msg->data[0] | (1 << 2); if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { msg->rsp[1] = msg->data[1]; msg->rsp[2] = msg->data[2]; msg->rsp[3] = msg->data[3]; msg->rsp[4] = cc; msg->rsp_size = 5; } else { msg->rsp[1] = msg->data[1]; msg->rsp[2] = cc; msg->rsp_size = 3; } ipmi_smi_msg_received(iidev->intf, msg); } static void ipmi_ipmb_format_for_xmit(struct ipmi_ipmb_dev *iidev, struct ipmi_smi_msg *msg) { if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { iidev->xmitmsg[0] = msg->data[1]; iidev->xmitmsg[1] = msg->data[0]; memcpy(iidev->xmitmsg + 4, msg->data + 2, msg->data_size - 2); iidev->xmitlen = msg->data_size + 2; } else { iidev->xmitmsg[0] = iidev->bmcaddr; iidev->xmitmsg[1] = msg->data[0]; iidev->xmitmsg[4] = 0; memcpy(iidev->xmitmsg + 5, msg->data + 1, msg->data_size - 1); iidev->xmitlen = msg->data_size + 4; } iidev->xmitmsg[3] = iidev->slave->addr << 1; if (((msg->data[0] >> 2) & 1) == 0) /* If it's a command, put in our own sequence number. */ iidev->xmitmsg[4] = ((iidev->xmitmsg[4] & 0x03) | (iidev->curr_seq << 2)); /* Now add on the final checksums. */ iidev->xmitmsg[2] = ipmb_checksum(iidev->xmitmsg, 2); iidev->xmitmsg[iidev->xmitlen] = ipmb_checksum(iidev->xmitmsg + 3, iidev->xmitlen - 3); iidev->xmitlen++; } static int ipmi_ipmb_thread(void *data) { struct ipmi_ipmb_dev *iidev = data; while (!kthread_should_stop()) { long ret; struct i2c_msg i2c_msg; struct ipmi_smi_msg *msg = NULL; unsigned long flags; unsigned int retries = 0; /* Wait for a message to send */ ret = down_interruptible(&iidev->wake_thread); if (iidev->stopping) break; if (ret) continue; spin_lock_irqsave(&iidev->lock, flags); if (iidev->next_msg) { msg = iidev->next_msg; iidev->next_msg = NULL; } spin_unlock_irqrestore(&iidev->lock, flags); if (!msg) continue; ipmi_ipmb_format_for_xmit(iidev, msg); retry: i2c_msg.len = iidev->xmitlen - 1; if (i2c_msg.len > 32) { ipmi_ipmb_send_response(iidev, msg, IPMI_REQ_LEN_EXCEEDED_ERR); continue; } i2c_msg.addr = iidev->xmitmsg[0] >> 1; i2c_msg.flags = 0; i2c_msg.buf = iidev->xmitmsg + 1; /* Rely on i2c_transfer for a barrier. */ iidev->working_msg = msg; ret = i2c_transfer(iidev->client->adapter, &i2c_msg, 1); if ((msg->data[0] >> 2) & 1) { /* * It's a response, nothing will be returned * by the other end. */ iidev->working_msg = NULL; ipmi_ipmb_send_response(iidev, msg, ret < 0 ? IPMI_BUS_ERR : 0); continue; } if (ret < 0) { iidev->working_msg = NULL; ipmi_ipmb_send_response(iidev, msg, IPMI_BUS_ERR); continue; } /* A command was sent, wait for its response. */ ret = down_timeout(&iidev->got_rsp, msecs_to_jiffies(iidev->retry_time_ms)); /* * Grab the message if we can. If the handler hasn't * already handled it, the message will still be there. */ spin_lock_irqsave(&iidev->lock, flags); msg = iidev->working_msg; iidev->working_msg = NULL; spin_unlock_irqrestore(&iidev->lock, flags); if (!msg && ret) { /* * If working_msg is not set and we timed out, * that means the message grabbed by * check_msg_done before we could grab it * here. Wait again for check_msg_done to up * the semaphore. */ down(&iidev->got_rsp); } else if (msg && ++retries <= iidev->max_retries) { spin_lock_irqsave(&iidev->lock, flags); iidev->working_msg = msg; spin_unlock_irqrestore(&iidev->lock, flags); goto retry; } if (msg) ipmi_ipmb_send_response(iidev, msg, IPMI_TIMEOUT_ERR); } if (iidev->next_msg) /* Return an unspecified error. */ ipmi_ipmb_send_response(iidev, iidev->next_msg, 0xff); return 0; } static int ipmi_ipmb_start_processing(void *send_info, struct ipmi_smi *new_intf) { struct ipmi_ipmb_dev *iidev = send_info; iidev->intf = new_intf; iidev->ready = true; return 0; } static void ipmi_ipmb_stop_thread(struct ipmi_ipmb_dev *iidev) { if (iidev->thread) { struct task_struct *t = iidev->thread; iidev->thread = NULL; iidev->stopping = true; up(&iidev->wake_thread); up(&iidev->got_rsp); kthread_stop(t); } } static void ipmi_ipmb_shutdown(void *send_info) { struct ipmi_ipmb_dev *iidev = send_info; ipmi_ipmb_stop_thread(iidev); } static void ipmi_ipmb_sender(void *send_info, struct ipmi_smi_msg *msg) { struct ipmi_ipmb_dev *iidev = send_info; unsigned long flags; spin_lock_irqsave(&iidev->lock, flags); BUG_ON(iidev->next_msg); iidev->next_msg = msg; spin_unlock_irqrestore(&iidev->lock, flags); up(&iidev->wake_thread); } static void ipmi_ipmb_request_events(void *send_info) { /* We don't fetch events here. */ } static void ipmi_ipmb_cleanup(struct ipmi_ipmb_dev *iidev) { if (iidev->slave) { i2c_slave_unregister(iidev->slave); if (iidev->slave != iidev->client) i2c_unregister_device(iidev->slave); } iidev->slave = NULL; iidev->client = NULL; ipmi_ipmb_stop_thread(iidev); } static void ipmi_ipmb_remove(struct i2c_client *client) { struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client); ipmi_ipmb_cleanup(iidev); ipmi_unregister_smi(iidev->intf); } static int ipmi_ipmb_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct ipmi_ipmb_dev *iidev; struct device_node *slave_np; struct i2c_adapter *slave_adap = NULL; struct i2c_client *slave = NULL; int rv; iidev = devm_kzalloc(&client->dev, sizeof(*iidev), GFP_KERNEL); if (!iidev) return -ENOMEM; if (of_property_read_u8(dev->of_node, "bmcaddr", &iidev->bmcaddr) != 0) iidev->bmcaddr = bmcaddr; if (iidev->bmcaddr == 0 || iidev->bmcaddr & 1) { /* Can't have the write bit set. */ dev_notice(&client->dev, "Invalid bmc address value %2.2x\n", iidev->bmcaddr); return -EINVAL; } if (of_property_read_u32(dev->of_node, "retry-time", &iidev->retry_time_ms) != 0) iidev->retry_time_ms = retry_time_ms; if (of_property_read_u32(dev->of_node, "max-retries", &iidev->max_retries) != 0) iidev->max_retries = max_retries; slave_np = of_parse_phandle(dev->of_node, "slave-dev", 0); if (slave_np) { slave_adap = of_get_i2c_adapter_by_node(slave_np); of_node_put(slave_np); if (!slave_adap) { dev_notice(&client->dev, "Could not find slave adapter\n"); return -EINVAL; } } iidev->client = client; if (slave_adap) { struct i2c_board_info binfo; memset(&binfo, 0, sizeof(binfo)); strscpy(binfo.type, "ipmb-slave", I2C_NAME_SIZE); binfo.addr = client->addr; binfo.flags = I2C_CLIENT_SLAVE; slave = i2c_new_client_device(slave_adap, &binfo); i2c_put_adapter(slave_adap); if (IS_ERR(slave)) { rv = PTR_ERR(slave); dev_notice(&client->dev, "Could not allocate slave device: %d\n", rv); return rv; } i2c_set_clientdata(slave, iidev); } else { slave = client; } i2c_set_clientdata(client, iidev); slave->flags |= I2C_CLIENT_SLAVE; rv = i2c_slave_register(slave, ipmi_ipmb_slave_cb); if (rv) goto out_err; iidev->slave = slave; slave = NULL; iidev->handlers.flags = IPMI_SMI_CAN_HANDLE_IPMB_DIRECT; iidev->handlers.start_processing = ipmi_ipmb_start_processing; iidev->handlers.shutdown = ipmi_ipmb_shutdown; iidev->handlers.sender = ipmi_ipmb_sender; iidev->handlers.request_events = ipmi_ipmb_request_events; spin_lock_init(&iidev->lock); sema_init(&iidev->wake_thread, 0); sema_init(&iidev->got_rsp, 0); iidev->thread = kthread_run(ipmi_ipmb_thread, iidev, "kipmb%4.4x", client->addr); if (IS_ERR(iidev->thread)) { rv = PTR_ERR(iidev->thread); dev_notice(&client->dev, "Could not start kernel thread: error %d\n", rv); goto out_err; } rv = ipmi_register_smi(&iidev->handlers, iidev, &client->dev, iidev->bmcaddr); if (rv) goto out_err; return 0; out_err: if (slave && slave != client) i2c_unregister_device(slave); ipmi_ipmb_cleanup(iidev); return rv; } #ifdef CONFIG_OF static const struct of_device_id of_ipmi_ipmb_match[] = { { .type = "ipmi", .compatible = DEVICE_NAME }, {}, }; MODULE_DEVICE_TABLE(of, of_ipmi_ipmb_match); #else #define of_ipmi_ipmb_match NULL #endif static const struct i2c_device_id ipmi_ipmb_id[] = { { DEVICE_NAME, 0 }, {}, }; MODULE_DEVICE_TABLE(i2c, ipmi_ipmb_id); static struct i2c_driver ipmi_ipmb_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = DEVICE_NAME, .of_match_table = of_ipmi_ipmb_match, }, .probe = ipmi_ipmb_probe, .remove = ipmi_ipmb_remove, .id_table = ipmi_ipmb_id, }; module_i2c_driver(ipmi_ipmb_driver); MODULE_AUTHOR("Corey Minyard"); MODULE_DESCRIPTION("IPMI IPMB driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/char/ipmi/ipmi_ipmb.c
// SPDX-License-Identifier: GPL-2.0+ /* * ipmi_bt_sm.c * * The state machine for an Open IPMI BT sub-driver under ipmi_si.c, part * of the driver architecture at http://sourceforge.net/projects/openipmi * * Author: Rocky Craig <[email protected]> */ #define DEBUG /* So dev_dbg() is always available. */ #include <linux/kernel.h> /* For printk. */ #include <linux/string.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ipmi_msgdefs.h> /* for completion codes */ #include "ipmi_si_sm.h" #define BT_DEBUG_OFF 0 /* Used in production */ #define BT_DEBUG_ENABLE 1 /* Generic messages */ #define BT_DEBUG_MSG 2 /* Prints all request/response buffers */ #define BT_DEBUG_STATES 4 /* Verbose look at state changes */ /* * BT_DEBUG_OFF must be zero to correspond to the default uninitialized * value */ static int bt_debug; /* 0 == BT_DEBUG_OFF */ module_param(bt_debug, int, 0644); MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); /* * Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds, * and 64 byte buffers. However, one HP implementation wants 255 bytes of * buffer (with a documented message of 160 bytes) so go for the max. * Since the Open IPMI architecture is single-message oriented at this * stage, the queue depth of BT is of no concern. */ #define BT_NORMAL_TIMEOUT 5 /* seconds */ #define BT_NORMAL_RETRY_LIMIT 2 #define BT_RESET_DELAY 6 /* seconds after warm reset */ /* * States are written in chronological order and usually cover * multiple rows of the state table discussion in the IPMI spec. */ enum bt_states { BT_STATE_IDLE = 0, /* Order is critical in this list */ BT_STATE_XACTION_START, BT_STATE_WRITE_BYTES, BT_STATE_WRITE_CONSUME, BT_STATE_READ_WAIT, BT_STATE_CLEAR_B2H, BT_STATE_READ_BYTES, BT_STATE_RESET1, /* These must come last */ BT_STATE_RESET2, BT_STATE_RESET3, BT_STATE_RESTART, BT_STATE_PRINTME, BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */ }; /* * Macros seen at the end of state "case" blocks. They help with legibility * and debugging. */ #define BT_STATE_CHANGE(X, Y) { bt->state = X; return Y; } #define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; } struct si_sm_data { enum bt_states state; unsigned char seq; /* BT sequence number */ struct si_sm_io *io; unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */ int write_count; unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */ int read_count; int truncated; long timeout; /* microseconds countdown */ int error_retries; /* end of "common" fields */ int nonzero_status; /* hung BMCs stay all 0 */ enum bt_states complete; /* to divert the state machine */ long BT_CAP_req2rsp; int BT_CAP_retries; /* Recommended retries */ }; #define BT_CLR_WR_PTR 0x01 /* See IPMI 1.5 table 11.6.4 */ #define BT_CLR_RD_PTR 0x02 #define BT_H2B_ATN 0x04 #define BT_B2H_ATN 0x08 #define BT_SMS_ATN 0x10 #define BT_OEM0 0x20 #define BT_H_BUSY 0x40 #define BT_B_BUSY 0x80 /* * Some bits are toggled on each write: write once to set it, once * more to clear it; writing a zero does nothing. To absolutely * clear it, check its state and write if set. This avoids the "get * current then use as mask" scheme to modify one bit. Note that the * variable "bt" is hardcoded into these macros. */ #define BT_STATUS bt->io->inputb(bt->io, 0) #define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x) #define BMC2HOST bt->io->inputb(bt->io, 1) #define HOST2BMC(x) bt->io->outputb(bt->io, 1, x) #define BT_INTMASK_R bt->io->inputb(bt->io, 2) #define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x) /* * Convenience routines for debugging. These are not multi-open safe! * Note the macros have hardcoded variables in them. */ static char *state2txt(unsigned char state) { switch (state) { case BT_STATE_IDLE: return("IDLE"); case BT_STATE_XACTION_START: return("XACTION"); case BT_STATE_WRITE_BYTES: return("WR_BYTES"); case BT_STATE_WRITE_CONSUME: return("WR_CONSUME"); case BT_STATE_READ_WAIT: return("RD_WAIT"); case BT_STATE_CLEAR_B2H: return("CLEAR_B2H"); case BT_STATE_READ_BYTES: return("RD_BYTES"); case BT_STATE_RESET1: return("RESET1"); case BT_STATE_RESET2: return("RESET2"); case BT_STATE_RESET3: return("RESET3"); case BT_STATE_RESTART: return("RESTART"); case BT_STATE_LONG_BUSY: return("LONG_BUSY"); } return("BAD STATE"); } #define STATE2TXT state2txt(bt->state) static char *status2txt(unsigned char status) { /* * This cannot be called by two threads at the same time and * the buffer is always consumed immediately, so the static is * safe to use. */ static char buf[40]; strcpy(buf, "[ "); if (status & BT_B_BUSY) strcat(buf, "B_BUSY "); if (status & BT_H_BUSY) strcat(buf, "H_BUSY "); if (status & BT_OEM0) strcat(buf, "OEM0 "); if (status & BT_SMS_ATN) strcat(buf, "SMS "); if (status & BT_B2H_ATN) strcat(buf, "B2H "); if (status & BT_H2B_ATN) strcat(buf, "H2B "); strcat(buf, "]"); return buf; } #define STATUS2TXT status2txt(status) /* called externally at insmod time, and internally on cleanup */ static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io) { memset(bt, 0, sizeof(struct si_sm_data)); if (bt->io != io) { /* external: one-time only things */ bt->io = io; bt->seq = 0; } bt->state = BT_STATE_IDLE; /* start here */ bt->complete = BT_STATE_IDLE; /* end here */ bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC; bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT; return 3; /* We claim 3 bytes of space; ought to check SPMI table */ } /* Jam a completion code (probably an error) into a response */ static void force_result(struct si_sm_data *bt, unsigned char completion_code) { bt->read_data[0] = 4; /* # following bytes */ bt->read_data[1] = bt->write_data[1] | 4; /* Odd NetFn/LUN */ bt->read_data[2] = bt->write_data[2]; /* seq (ignored) */ bt->read_data[3] = bt->write_data[3]; /* Command */ bt->read_data[4] = completion_code; bt->read_count = 5; } /* The upper state machine starts here */ static int bt_start_transaction(struct si_sm_data *bt, unsigned char *data, unsigned int size) { unsigned int i; if (size < 2) return IPMI_REQ_LEN_INVALID_ERR; if (size > IPMI_MAX_MSG_LENGTH) return IPMI_REQ_LEN_EXCEEDED_ERR; if (bt->state == BT_STATE_LONG_BUSY) return IPMI_NODE_BUSY_ERR; if (bt->state != BT_STATE_IDLE) { dev_warn(bt->io->dev, "BT in invalid state %d\n", bt->state); return IPMI_NOT_IN_MY_STATE_ERR; } if (bt_debug & BT_DEBUG_MSG) { dev_dbg(bt->io->dev, "+++++++++++++++++ New command\n"); dev_dbg(bt->io->dev, "NetFn/LUN CMD [%d data]:", size - 2); for (i = 0; i < size; i ++) pr_cont(" %02x", data[i]); pr_cont("\n"); } bt->write_data[0] = size + 1; /* all data plus seq byte */ bt->write_data[1] = *data; /* NetFn/LUN */ bt->write_data[2] = bt->seq++; memcpy(bt->write_data + 3, data + 1, size - 1); bt->write_count = size + 2; bt->error_retries = 0; bt->nonzero_status = 0; bt->truncated = 0; bt->state = BT_STATE_XACTION_START; bt->timeout = bt->BT_CAP_req2rsp; force_result(bt, IPMI_ERR_UNSPECIFIED); return 0; } /* * After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE * it calls this. Strip out the length and seq bytes. */ static int bt_get_result(struct si_sm_data *bt, unsigned char *data, unsigned int length) { int i, msg_len; msg_len = bt->read_count - 2; /* account for length & seq */ if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) { force_result(bt, IPMI_ERR_UNSPECIFIED); msg_len = 3; } data[0] = bt->read_data[1]; data[1] = bt->read_data[3]; if (length < msg_len || bt->truncated) { data[2] = IPMI_ERR_MSG_TRUNCATED; msg_len = 3; } else memcpy(data + 2, bt->read_data + 4, msg_len - 2); if (bt_debug & BT_DEBUG_MSG) { dev_dbg(bt->io->dev, "result %d bytes:", msg_len); for (i = 0; i < msg_len; i++) pr_cont(" %02x", data[i]); pr_cont("\n"); } return msg_len; } /* This bit's functionality is optional */ #define BT_BMC_HWRST 0x80 static void reset_flags(struct si_sm_data *bt) { if (bt_debug) dev_dbg(bt->io->dev, "flag reset %s\n", status2txt(BT_STATUS)); if (BT_STATUS & BT_H_BUSY) BT_CONTROL(BT_H_BUSY); /* force clear */ BT_CONTROL(BT_CLR_WR_PTR); /* always reset */ BT_CONTROL(BT_SMS_ATN); /* always clear */ BT_INTMASK_W(BT_BMC_HWRST); } /* * Get rid of an unwanted/stale response. This should only be needed for * BMCs that support multiple outstanding requests. */ static void drain_BMC2HOST(struct si_sm_data *bt) { int i, size; if (!(BT_STATUS & BT_B2H_ATN)) /* Not signalling a response */ return; BT_CONTROL(BT_H_BUSY); /* now set */ BT_CONTROL(BT_B2H_ATN); /* always clear */ BT_STATUS; /* pause */ BT_CONTROL(BT_B2H_ATN); /* some BMCs are stubborn */ BT_CONTROL(BT_CLR_RD_PTR); /* always reset */ if (bt_debug) dev_dbg(bt->io->dev, "stale response %s; ", status2txt(BT_STATUS)); size = BMC2HOST; for (i = 0; i < size ; i++) BMC2HOST; BT_CONTROL(BT_H_BUSY); /* now clear */ if (bt_debug) pr_cont("drained %d bytes\n", size + 1); } static inline void write_all_bytes(struct si_sm_data *bt) { int i; if (bt_debug & BT_DEBUG_MSG) { dev_dbg(bt->io->dev, "write %d bytes seq=0x%02X", bt->write_count, bt->seq); for (i = 0; i < bt->write_count; i++) pr_cont(" %02x", bt->write_data[i]); pr_cont("\n"); } for (i = 0; i < bt->write_count; i++) HOST2BMC(bt->write_data[i]); } static inline int read_all_bytes(struct si_sm_data *bt) { unsigned int i; /* * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. * Keep layout of first four bytes aligned with write_data[] */ bt->read_data[0] = BMC2HOST; bt->read_count = bt->read_data[0]; if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) { if (bt_debug & BT_DEBUG_MSG) dev_dbg(bt->io->dev, "bad raw rsp len=%d\n", bt->read_count); bt->truncated = 1; return 1; /* let next XACTION START clean it up */ } for (i = 1; i <= bt->read_count; i++) bt->read_data[i] = BMC2HOST; bt->read_count++; /* Account internally for length byte */ if (bt_debug & BT_DEBUG_MSG) { int max = bt->read_count; dev_dbg(bt->io->dev, "got %d bytes seq=0x%02X", max, bt->read_data[2]); if (max > 16) max = 16; for (i = 0; i < max; i++) pr_cont(" %02x", bt->read_data[i]); pr_cont("%s\n", bt->read_count == max ? "" : " ..."); } /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */ if ((bt->read_data[3] == bt->write_data[3]) && (bt->read_data[2] == bt->write_data[2]) && ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8))) return 1; if (bt_debug & BT_DEBUG_MSG) dev_dbg(bt->io->dev, "IPMI BT: bad packet: want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n", bt->write_data[1] | 0x04, bt->write_data[2], bt->write_data[3], bt->read_data[1], bt->read_data[2], bt->read_data[3]); return 0; } /* Restart if retries are left, or return an error completion code */ static enum si_sm_result error_recovery(struct si_sm_data *bt, unsigned char status, unsigned char cCode) { char *reason; bt->timeout = bt->BT_CAP_req2rsp; switch (cCode) { case IPMI_TIMEOUT_ERR: reason = "timeout"; break; default: reason = "internal error"; break; } dev_warn(bt->io->dev, "IPMI BT: %s in %s %s ", /* open-ended line */ reason, STATE2TXT, STATUS2TXT); /* * Per the IPMI spec, retries are based on the sequence number * known only to this module, so manage a restart here. */ (bt->error_retries)++; if (bt->error_retries < bt->BT_CAP_retries) { pr_cont("%d retries left\n", bt->BT_CAP_retries - bt->error_retries); bt->state = BT_STATE_RESTART; return SI_SM_CALL_WITHOUT_DELAY; } dev_warn(bt->io->dev, "failed %d retries, sending error response\n", bt->BT_CAP_retries); if (!bt->nonzero_status) dev_err(bt->io->dev, "stuck, try power cycle\n"); /* this is most likely during insmod */ else if (bt->seq <= (unsigned char)(bt->BT_CAP_retries & 0xFF)) { dev_warn(bt->io->dev, "BT reset (takes 5 secs)\n"); bt->state = BT_STATE_RESET1; return SI_SM_CALL_WITHOUT_DELAY; } /* * Concoct a useful error message, set up the next state, and * be done with this sequence. */ bt->state = BT_STATE_IDLE; switch (cCode) { case IPMI_TIMEOUT_ERR: if (status & BT_B_BUSY) { cCode = IPMI_NODE_BUSY_ERR; bt->state = BT_STATE_LONG_BUSY; } break; default: break; } force_result(bt, cCode); return SI_SM_TRANSACTION_COMPLETE; } /* Check status and (usually) take action and change this state machine. */ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) { unsigned char status; static enum bt_states last_printed = BT_STATE_PRINTME; int i; status = BT_STATUS; bt->nonzero_status |= status; if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) { dev_dbg(bt->io->dev, "BT: %s %s TO=%ld - %ld\n", STATE2TXT, STATUS2TXT, bt->timeout, time); last_printed = bt->state; } /* * Commands that time out may still (eventually) provide a response. * This stale response will get in the way of a new response so remove * it if possible (hopefully during IDLE). Even if it comes up later * it will be rejected by its (now-forgotten) seq number. */ if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) { drain_BMC2HOST(bt); BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); } if ((bt->state != BT_STATE_IDLE) && (bt->state < BT_STATE_PRINTME)) { /* check timeout */ bt->timeout -= time; if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) return error_recovery(bt, status, IPMI_TIMEOUT_ERR); } switch (bt->state) { /* * Idle state first checks for asynchronous messages from another * channel, then does some opportunistic housekeeping. */ case BT_STATE_IDLE: if (status & BT_SMS_ATN) { BT_CONTROL(BT_SMS_ATN); /* clear it */ return SI_SM_ATTN; } if (status & BT_H_BUSY) /* clear a leftover H_BUSY */ BT_CONTROL(BT_H_BUSY); BT_SI_SM_RETURN(SI_SM_IDLE); case BT_STATE_XACTION_START: if (status & (BT_B_BUSY | BT_H2B_ATN)) BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); if (BT_STATUS & BT_H_BUSY) BT_CONTROL(BT_H_BUSY); /* force clear */ BT_STATE_CHANGE(BT_STATE_WRITE_BYTES, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_WRITE_BYTES: if (status & BT_H_BUSY) BT_CONTROL(BT_H_BUSY); /* clear */ BT_CONTROL(BT_CLR_WR_PTR); write_all_bytes(bt); BT_CONTROL(BT_H2B_ATN); /* can clear too fast to catch */ BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_WRITE_CONSUME: if (status & (BT_B_BUSY | BT_H2B_ATN)) BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); BT_STATE_CHANGE(BT_STATE_READ_WAIT, SI_SM_CALL_WITHOUT_DELAY); /* Spinning hard can suppress B2H_ATN and force a timeout */ case BT_STATE_READ_WAIT: if (!(status & BT_B2H_ATN)) BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); BT_CONTROL(BT_H_BUSY); /* set */ /* * Uncached, ordered writes should just proceed serially but * some BMCs don't clear B2H_ATN with one hit. Fast-path a * workaround without too much penalty to the general case. */ BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */ BT_STATE_CHANGE(BT_STATE_CLEAR_B2H, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_CLEAR_B2H: if (status & BT_B2H_ATN) { /* keep hitting it */ BT_CONTROL(BT_B2H_ATN); BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); } BT_STATE_CHANGE(BT_STATE_READ_BYTES, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_READ_BYTES: if (!(status & BT_H_BUSY)) /* check in case of retry */ BT_CONTROL(BT_H_BUSY); BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */ i = read_all_bytes(bt); /* true == packet seq match */ BT_CONTROL(BT_H_BUSY); /* NOW clear */ if (!i) /* Not my message */ BT_STATE_CHANGE(BT_STATE_READ_WAIT, SI_SM_CALL_WITHOUT_DELAY); bt->state = bt->complete; return bt->state == BT_STATE_IDLE ? /* where to next? */ SI_SM_TRANSACTION_COMPLETE : /* normal */ SI_SM_CALL_WITHOUT_DELAY; /* Startup magic */ case BT_STATE_LONG_BUSY: /* For example: after FW update */ if (!(status & BT_B_BUSY)) { reset_flags(bt); /* next state is now IDLE */ bt_init_data(bt, bt->io); } return SI_SM_CALL_WITH_DELAY; /* No repeat printing */ case BT_STATE_RESET1: reset_flags(bt); drain_BMC2HOST(bt); BT_STATE_CHANGE(BT_STATE_RESET2, SI_SM_CALL_WITH_DELAY); case BT_STATE_RESET2: /* Send a soft reset */ BT_CONTROL(BT_CLR_WR_PTR); HOST2BMC(3); /* number of bytes following */ HOST2BMC(0x18); /* NetFn/LUN == Application, LUN 0 */ HOST2BMC(42); /* Sequence number */ HOST2BMC(3); /* Cmd == Soft reset */ BT_CONTROL(BT_H2B_ATN); bt->timeout = BT_RESET_DELAY * USEC_PER_SEC; BT_STATE_CHANGE(BT_STATE_RESET3, SI_SM_CALL_WITH_DELAY); case BT_STATE_RESET3: /* Hold off everything for a bit */ if (bt->timeout > 0) return SI_SM_CALL_WITH_DELAY; drain_BMC2HOST(bt); BT_STATE_CHANGE(BT_STATE_RESTART, SI_SM_CALL_WITH_DELAY); case BT_STATE_RESTART: /* don't reset retries or seq! */ bt->read_count = 0; bt->nonzero_status = 0; bt->timeout = bt->BT_CAP_req2rsp; BT_STATE_CHANGE(BT_STATE_XACTION_START, SI_SM_CALL_WITH_DELAY); default: /* should never occur */ return error_recovery(bt, status, IPMI_ERR_UNSPECIFIED); } return SI_SM_CALL_WITH_DELAY; } static int bt_detect(struct si_sm_data *bt) { unsigned char GetBT_CAP[] = { 0x18, 0x36 }; unsigned char BT_CAP[8]; enum si_sm_result smi_result; int rv; /* * It's impossible for the BT status and interrupt registers to be * all 1's, (assuming a properly functioning, self-initialized BMC) * but that's what you get from reading a bogus address, so we * test that first. The calling routine uses negative logic. */ if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) return 1; reset_flags(bt); /* * Try getting the BT capabilities here. */ rv = bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP)); if (rv) { dev_warn(bt->io->dev, "Can't start capabilities transaction: %d\n", rv); goto out_no_bt_cap; } smi_result = SI_SM_CALL_WITHOUT_DELAY; for (;;) { if (smi_result == SI_SM_CALL_WITH_DELAY || smi_result == SI_SM_CALL_WITH_TICK_DELAY) { schedule_timeout_uninterruptible(1); smi_result = bt_event(bt, jiffies_to_usecs(1)); } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { smi_result = bt_event(bt, 0); } else break; } rv = bt_get_result(bt, BT_CAP, sizeof(BT_CAP)); bt_init_data(bt, bt->io); if (rv < 8) { dev_warn(bt->io->dev, "bt cap response too short: %d\n", rv); goto out_no_bt_cap; } if (BT_CAP[2]) { dev_warn(bt->io->dev, "Error fetching bt cap: %x\n", BT_CAP[2]); out_no_bt_cap: dev_warn(bt->io->dev, "using default values\n"); } else { bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC; bt->BT_CAP_retries = BT_CAP[7]; } dev_info(bt->io->dev, "req2rsp=%ld secs retries=%d\n", bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries); return 0; } static void bt_cleanup(struct si_sm_data *bt) { } static int bt_size(void) { return sizeof(struct si_sm_data); } const struct si_sm_handlers bt_smi_handlers = { .init_data = bt_init_data, .start_transaction = bt_start_transaction, .get_result = bt_get_result, .event = bt_event, .detect = bt_detect, .cleanup = bt_cleanup, .size = bt_size, };
linux-master
drivers/char/ipmi/ipmi_bt_sm.c
// SPDX-License-Identifier: GPL-2.0+ /* * A hack to create a platform device from a DMI entry. This will * allow autoloading of the IPMI drive based on SMBIOS entries. */ #define pr_fmt(fmt) "%s" fmt, "ipmi:dmi: " #define dev_fmt pr_fmt #include <linux/ipmi.h> #include <linux/init.h> #include <linux/dmi.h> #include <linux/platform_device.h> #include <linux/property.h> #include "ipmi_dmi.h" #include "ipmi_plat_data.h" #define IPMI_DMI_TYPE_KCS 0x01 #define IPMI_DMI_TYPE_SMIC 0x02 #define IPMI_DMI_TYPE_BT 0x03 #define IPMI_DMI_TYPE_SSIF 0x04 struct ipmi_dmi_info { enum si_type si_type; unsigned int space; /* addr space for si, intf# for ssif */ unsigned long addr; u8 slave_addr; struct ipmi_dmi_info *next; }; static struct ipmi_dmi_info *ipmi_dmi_infos; static int ipmi_dmi_nr __initdata; static void __init dmi_add_platform_ipmi(unsigned long base_addr, unsigned int space, u8 slave_addr, int irq, int offset, int type) { const char *name; struct ipmi_dmi_info *info; struct ipmi_plat_data p; memset(&p, 0, sizeof(p)); name = "dmi-ipmi-si"; p.iftype = IPMI_PLAT_IF_SI; switch (type) { case IPMI_DMI_TYPE_SSIF: name = "dmi-ipmi-ssif"; p.iftype = IPMI_PLAT_IF_SSIF; p.type = SI_TYPE_INVALID; break; case IPMI_DMI_TYPE_BT: p.type = SI_BT; break; case IPMI_DMI_TYPE_KCS: p.type = SI_KCS; break; case IPMI_DMI_TYPE_SMIC: p.type = SI_SMIC; break; default: pr_err("Invalid IPMI type: %d\n", type); return; } p.addr = base_addr; p.space = space; p.regspacing = offset; p.irq = irq; p.slave_addr = slave_addr; p.addr_source = SI_SMBIOS; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) { pr_warn("Could not allocate dmi info\n"); } else { info->si_type = p.type; info->space = space; info->addr = base_addr; info->slave_addr = slave_addr; info->next = ipmi_dmi_infos; ipmi_dmi_infos = info; } if (ipmi_platform_add(name, ipmi_dmi_nr, &p)) ipmi_dmi_nr++; } /* * Look up the slave address for a given interface. This is here * because ACPI doesn't have a slave address while SMBIOS does, but we * prefer using ACPI so the ACPI code can use the IPMI namespace. * This function allows an ACPI-specified IPMI device to look up the * slave address from the DMI table. */ int ipmi_dmi_get_slave_addr(enum si_type si_type, unsigned int space, unsigned long base_addr) { struct ipmi_dmi_info *info = ipmi_dmi_infos; while (info) { if (info->si_type == si_type && info->space == space && info->addr == base_addr) return info->slave_addr; info = info->next; } return 0; } EXPORT_SYMBOL(ipmi_dmi_get_slave_addr); #define DMI_IPMI_MIN_LENGTH 0x10 #define DMI_IPMI_VER2_LENGTH 0x12 #define DMI_IPMI_TYPE 4 #define DMI_IPMI_SLAVEADDR 6 #define DMI_IPMI_ADDR 8 #define DMI_IPMI_ACCESS 0x10 #define DMI_IPMI_IRQ 0x11 #define DMI_IPMI_IO_MASK 0xfffe static void __init dmi_decode_ipmi(const struct dmi_header *dm) { const u8 *data = (const u8 *) dm; int space = IPMI_IO_ADDR_SPACE; unsigned long base_addr; u8 len = dm->length; u8 slave_addr; int irq = 0, offset = 0; int type; if (len < DMI_IPMI_MIN_LENGTH) return; type = data[DMI_IPMI_TYPE]; slave_addr = data[DMI_IPMI_SLAVEADDR]; memcpy(&base_addr, data + DMI_IPMI_ADDR, sizeof(unsigned long)); if (!base_addr) { pr_err("Base address is zero, assuming no IPMI interface\n"); return; } if (len >= DMI_IPMI_VER2_LENGTH) { if (type == IPMI_DMI_TYPE_SSIF) { space = 0; /* Match I2C interface 0. */ base_addr = data[DMI_IPMI_ADDR] >> 1; if (base_addr == 0) { /* * Some broken systems put the I2C address in * the slave address field. We try to * accommodate them here. */ base_addr = data[DMI_IPMI_SLAVEADDR] >> 1; slave_addr = 0; } } else { if (base_addr & 1) { /* I/O */ base_addr &= DMI_IPMI_IO_MASK; } else { /* Memory */ space = IPMI_MEM_ADDR_SPACE; } /* * If bit 4 of byte 0x10 is set, then the lsb * for the address is odd. */ base_addr |= (data[DMI_IPMI_ACCESS] >> 4) & 1; irq = data[DMI_IPMI_IRQ]; /* * The top two bits of byte 0x10 hold the * register spacing. */ switch ((data[DMI_IPMI_ACCESS] >> 6) & 3) { case 0: /* Byte boundaries */ offset = 1; break; case 1: /* 32-bit boundaries */ offset = 4; break; case 2: /* 16-byte boundaries */ offset = 16; break; default: pr_err("Invalid offset: 0\n"); return; } } } else { /* Old DMI spec. */ /* * Note that technically, the lower bit of the base * address should be 1 if the address is I/O and 0 if * the address is in memory. So many systems get that * wrong (and all that I have seen are I/O) so we just * ignore that bit and assume I/O. Systems that use * memory should use the newer spec, anyway. */ base_addr = base_addr & DMI_IPMI_IO_MASK; offset = 1; } dmi_add_platform_ipmi(base_addr, space, slave_addr, irq, offset, type); } static int __init scan_for_dmi_ipmi(void) { const struct dmi_device *dev = NULL; while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) dmi_decode_ipmi((const struct dmi_header *) dev->device_data); return 0; } subsys_initcall(scan_for_dmi_ipmi);
linux-master
drivers/char/ipmi/ipmi_dmi.c
// SPDX-License-Identifier: GPL-2.0+ /* * ipmi_msghandler.c * * Incoming and outgoing message routing for an IPMI interface. * * Author: MontaVista Software, Inc. * Corey Minyard <[email protected]> * [email protected] * * Copyright 2002 MontaVista Software Inc. */ #define pr_fmt(fmt) "IPMI message handler: " fmt #define dev_fmt(fmt) pr_fmt(fmt) #include <linux/module.h> #include <linux/errno.h> #include <linux/panic_notifier.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/ipmi.h> #include <linux/ipmi_smi.h> #include <linux/notifier.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/rcupdate.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <linux/workqueue.h> #include <linux/uuid.h> #include <linux/nospec.h> #include <linux/vmalloc.h> #include <linux/delay.h> #define IPMI_DRIVER_VERSION "39.2" static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); static int ipmi_init_msghandler(void); static void smi_recv_tasklet(struct tasklet_struct *t); static void handle_new_recv_msgs(struct ipmi_smi *intf); static void need_waiter(struct ipmi_smi *intf); static int handle_one_recv_msg(struct ipmi_smi *intf, struct ipmi_smi_msg *msg); static bool initialized; static bool drvregistered; /* Numbers in this enumerator should be mapped to ipmi_panic_event_str */ enum ipmi_panic_event_op { IPMI_SEND_PANIC_EVENT_NONE, IPMI_SEND_PANIC_EVENT, IPMI_SEND_PANIC_EVENT_STRING, IPMI_SEND_PANIC_EVENT_MAX }; /* Indices in this array should be mapped to enum ipmi_panic_event_op */ static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL }; #ifdef CONFIG_IPMI_PANIC_STRING #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING #elif defined(CONFIG_IPMI_PANIC_EVENT) #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT #else #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE #endif static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT; static int panic_op_write_handler(const char *val, const struct kernel_param *kp) { char valcp[16]; int e; strscpy(valcp, val, sizeof(valcp)); e = match_string(ipmi_panic_event_str, -1, strstrip(valcp)); if (e < 0) return e; ipmi_send_panic_event = e; return 0; } static int panic_op_read_handler(char *buffer, const struct kernel_param *kp) { const char *event_str; if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX) event_str = "???"; else event_str = ipmi_panic_event_str[ipmi_send_panic_event]; return sprintf(buffer, "%s\n", event_str); } static const struct kernel_param_ops panic_op_ops = { .set = panic_op_write_handler, .get = panic_op_read_handler }; module_param_cb(panic_op, &panic_op_ops, NULL, 0600); MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events."); #define MAX_EVENTS_IN_QUEUE 25 /* Remain in auto-maintenance mode for this amount of time (in ms). */ static unsigned long maintenance_mode_timeout_ms = 30000; module_param(maintenance_mode_timeout_ms, ulong, 0644); MODULE_PARM_DESC(maintenance_mode_timeout_ms, "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode."); /* * Don't let a message sit in a queue forever, always time it with at lest * the max message timer. This is in milliseconds. */ #define MAX_MSG_TIMEOUT 60000 /* * Timeout times below are in milliseconds, and are done off a 1 * second timer. So setting the value to 1000 would mean anything * between 0 and 1000ms. So really the only reasonable minimum * setting it 2000ms, which is between 1 and 2 seconds. */ /* The default timeout for message retries. */ static unsigned long default_retry_ms = 2000; module_param(default_retry_ms, ulong, 0644); MODULE_PARM_DESC(default_retry_ms, "The time (milliseconds) between retry sends"); /* The default timeout for maintenance mode message retries. */ static unsigned long default_maintenance_retry_ms = 3000; module_param(default_maintenance_retry_ms, ulong, 0644); MODULE_PARM_DESC(default_maintenance_retry_ms, "The time (milliseconds) between retry sends in maintenance mode"); /* The default maximum number of retries */ static unsigned int default_max_retries = 4; module_param(default_max_retries, uint, 0644); MODULE_PARM_DESC(default_max_retries, "The time (milliseconds) between retry sends in maintenance mode"); /* The default maximum number of users that may register. */ static unsigned int max_users = 30; module_param(max_users, uint, 0644); MODULE_PARM_DESC(max_users, "The most users that may use the IPMI stack at one time."); /* The default maximum number of message a user may have outstanding. */ static unsigned int max_msgs_per_user = 100; module_param(max_msgs_per_user, uint, 0644); MODULE_PARM_DESC(max_msgs_per_user, "The most message a user may have outstanding."); /* Call every ~1000 ms. */ #define IPMI_TIMEOUT_TIME 1000 /* How many jiffies does it take to get to the timeout time. */ #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) /* * Request events from the queue every second (this is the number of * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the * future, IPMI will add a way to know immediately if an event is in * the queue and this silliness can go away. */ #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) /* How long should we cache dynamic device IDs? */ #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ) /* * The main "user" data structure. */ struct ipmi_user { struct list_head link; /* * Set to NULL when the user is destroyed, a pointer to myself * so srcu_dereference can be used on it. */ struct ipmi_user *self; struct srcu_struct release_barrier; struct kref refcount; /* The upper layer that handles receive messages. */ const struct ipmi_user_hndl *handler; void *handler_data; /* The interface this user is bound to. */ struct ipmi_smi *intf; /* Does this interface receive IPMI events? */ bool gets_events; atomic_t nr_msgs; /* Free must run in process context for RCU cleanup. */ struct work_struct remove_work; }; static struct workqueue_struct *remove_work_wq; static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) __acquires(user->release_barrier) { struct ipmi_user *ruser; *index = srcu_read_lock(&user->release_barrier); ruser = srcu_dereference(user->self, &user->release_barrier); if (!ruser) srcu_read_unlock(&user->release_barrier, *index); return ruser; } static void release_ipmi_user(struct ipmi_user *user, int index) { srcu_read_unlock(&user->release_barrier, index); } struct cmd_rcvr { struct list_head link; struct ipmi_user *user; unsigned char netfn; unsigned char cmd; unsigned int chans; /* * This is used to form a linked lised during mass deletion. * Since this is in an RCU list, we cannot use the link above * or change any data until the RCU period completes. So we * use this next variable during mass deletion so we can have * a list and don't have to wait and restart the search on * every individual deletion of a command. */ struct cmd_rcvr *next; }; struct seq_table { unsigned int inuse : 1; unsigned int broadcast : 1; unsigned long timeout; unsigned long orig_timeout; unsigned int retries_left; /* * To verify on an incoming send message response that this is * the message that the response is for, we keep a sequence id * and increment it every time we send a message. */ long seqid; /* * This is held so we can properly respond to the message on a * timeout, and it is used to hold the temporary data for * retransmission, too. */ struct ipmi_recv_msg *recv_msg; }; /* * Store the information in a msgid (long) to allow us to find a * sequence table entry from the msgid. */ #define STORE_SEQ_IN_MSGID(seq, seqid) \ ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff)) #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ do { \ seq = (((msgid) >> 26) & 0x3f); \ seqid = ((msgid) & 0x3ffffff); \ } while (0) #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff) #define IPMI_MAX_CHANNELS 16 struct ipmi_channel { unsigned char medium; unsigned char protocol; }; struct ipmi_channel_set { struct ipmi_channel c[IPMI_MAX_CHANNELS]; }; struct ipmi_my_addrinfo { /* * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, * but may be changed by the user. */ unsigned char address; /* * My LUN. This should generally stay the SMS LUN, but just in * case... */ unsigned char lun; }; /* * Note that the product id, manufacturer id, guid, and device id are * immutable in this structure, so dyn_mutex is not required for * accessing those. If those change on a BMC, a new BMC is allocated. */ struct bmc_device { struct platform_device pdev; struct list_head intfs; /* Interfaces on this BMC. */ struct ipmi_device_id id; struct ipmi_device_id fetch_id; int dyn_id_set; unsigned long dyn_id_expiry; struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */ guid_t guid; guid_t fetch_guid; int dyn_guid_set; struct kref usecount; struct work_struct remove_work; unsigned char cc; /* completion code */ }; #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, struct ipmi_device_id *id, bool *guid_set, guid_t *guid); /* * Various statistics for IPMI, these index stats[] in the ipmi_smi * structure. */ enum ipmi_stat_indexes { /* Commands we got from the user that were invalid. */ IPMI_STAT_sent_invalid_commands = 0, /* Commands we sent to the MC. */ IPMI_STAT_sent_local_commands, /* Responses from the MC that were delivered to a user. */ IPMI_STAT_handled_local_responses, /* Responses from the MC that were not delivered to a user. */ IPMI_STAT_unhandled_local_responses, /* Commands we sent out to the IPMB bus. */ IPMI_STAT_sent_ipmb_commands, /* Commands sent on the IPMB that had errors on the SEND CMD */ IPMI_STAT_sent_ipmb_command_errs, /* Each retransmit increments this count. */ IPMI_STAT_retransmitted_ipmb_commands, /* * When a message times out (runs out of retransmits) this is * incremented. */ IPMI_STAT_timed_out_ipmb_commands, /* * This is like above, but for broadcasts. Broadcasts are * *not* included in the above count (they are expected to * time out). */ IPMI_STAT_timed_out_ipmb_broadcasts, /* Responses I have sent to the IPMB bus. */ IPMI_STAT_sent_ipmb_responses, /* The response was delivered to the user. */ IPMI_STAT_handled_ipmb_responses, /* The response had invalid data in it. */ IPMI_STAT_invalid_ipmb_responses, /* The response didn't have anyone waiting for it. */ IPMI_STAT_unhandled_ipmb_responses, /* Commands we sent out to the IPMB bus. */ IPMI_STAT_sent_lan_commands, /* Commands sent on the IPMB that had errors on the SEND CMD */ IPMI_STAT_sent_lan_command_errs, /* Each retransmit increments this count. */ IPMI_STAT_retransmitted_lan_commands, /* * When a message times out (runs out of retransmits) this is * incremented. */ IPMI_STAT_timed_out_lan_commands, /* Responses I have sent to the IPMB bus. */ IPMI_STAT_sent_lan_responses, /* The response was delivered to the user. */ IPMI_STAT_handled_lan_responses, /* The response had invalid data in it. */ IPMI_STAT_invalid_lan_responses, /* The response didn't have anyone waiting for it. */ IPMI_STAT_unhandled_lan_responses, /* The command was delivered to the user. */ IPMI_STAT_handled_commands, /* The command had invalid data in it. */ IPMI_STAT_invalid_commands, /* The command didn't have anyone waiting for it. */ IPMI_STAT_unhandled_commands, /* Invalid data in an event. */ IPMI_STAT_invalid_events, /* Events that were received with the proper format. */ IPMI_STAT_events, /* Retransmissions on IPMB that failed. */ IPMI_STAT_dropped_rexmit_ipmb_commands, /* Retransmissions on LAN that failed. */ IPMI_STAT_dropped_rexmit_lan_commands, /* This *must* remain last, add new values above this. */ IPMI_NUM_STATS }; #define IPMI_IPMB_NUM_SEQ 64 struct ipmi_smi { struct module *owner; /* What interface number are we? */ int intf_num; struct kref refcount; /* Set when the interface is being unregistered. */ bool in_shutdown; /* Used for a list of interfaces. */ struct list_head link; /* * The list of upper layers that are using me. seq_lock write * protects this. Read protection is with srcu. */ struct list_head users; struct srcu_struct users_srcu; atomic_t nr_users; struct device_attribute nr_users_devattr; struct device_attribute nr_msgs_devattr; /* Used for wake ups at startup. */ wait_queue_head_t waitq; /* * Prevents the interface from being unregistered when the * interface is used by being looked up through the BMC * structure. */ struct mutex bmc_reg_mutex; struct bmc_device tmp_bmc; struct bmc_device *bmc; bool bmc_registered; struct list_head bmc_link; char *my_dev_name; bool in_bmc_register; /* Handle recursive situations. Yuck. */ struct work_struct bmc_reg_work; const struct ipmi_smi_handlers *handlers; void *send_info; /* Driver-model device for the system interface. */ struct device *si_dev; /* * A table of sequence numbers for this interface. We use the * sequence numbers for IPMB messages that go out of the * interface to match them up with their responses. A routine * is called periodically to time the items in this list. */ spinlock_t seq_lock; struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; int curr_seq; /* * Messages queued for delivery. If delivery fails (out of memory * for instance), They will stay in here to be processed later in a * periodic timer interrupt. The tasklet is for handling received * messages directly from the handler. */ spinlock_t waiting_rcv_msgs_lock; struct list_head waiting_rcv_msgs; atomic_t watchdog_pretimeouts_to_deliver; struct tasklet_struct recv_tasklet; spinlock_t xmit_msgs_lock; struct list_head xmit_msgs; struct ipmi_smi_msg *curr_msg; struct list_head hp_xmit_msgs; /* * The list of command receivers that are registered for commands * on this interface. */ struct mutex cmd_rcvrs_mutex; struct list_head cmd_rcvrs; /* * Events that were queues because no one was there to receive * them. */ spinlock_t events_lock; /* For dealing with event stuff. */ struct list_head waiting_events; unsigned int waiting_events_count; /* How many events in queue? */ char delivering_events; char event_msg_printed; /* How many users are waiting for events? */ atomic_t event_waiters; unsigned int ticks_to_req_ev; spinlock_t watch_lock; /* For dealing with watch stuff below. */ /* How many users are waiting for commands? */ unsigned int command_waiters; /* How many users are waiting for watchdogs? */ unsigned int watchdog_waiters; /* How many users are waiting for message responses? */ unsigned int response_waiters; /* * Tells what the lower layer has last been asked to watch for, * messages and/or watchdogs. Protected by watch_lock. */ unsigned int last_watch_mask; /* * The event receiver for my BMC, only really used at panic * shutdown as a place to store this. */ unsigned char event_receiver; unsigned char event_receiver_lun; unsigned char local_sel_device; unsigned char local_event_generator; /* For handling of maintenance mode. */ int maintenance_mode; bool maintenance_mode_enable; int auto_maintenance_timeout; spinlock_t maintenance_mode_lock; /* Used in a timer... */ /* * If we are doing maintenance on something on IPMB, extend * the timeout time to avoid timeouts writing firmware and * such. */ int ipmb_maintenance_mode_timeout; /* * A cheap hack, if this is non-null and a message to an * interface comes in with a NULL user, call this routine with * it. Note that the message will still be freed by the * caller. This only works on the system interface. * * Protected by bmc_reg_mutex. */ void (*null_user_handler)(struct ipmi_smi *intf, struct ipmi_recv_msg *msg); /* * When we are scanning the channels for an SMI, this will * tell which channel we are scanning. */ int curr_channel; /* Channel information */ struct ipmi_channel_set *channel_list; unsigned int curr_working_cset; /* First index into the following. */ struct ipmi_channel_set wchannels[2]; struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS]; bool channels_ready; atomic_t stats[IPMI_NUM_STATS]; /* * run_to_completion duplicate of smb_info, smi_info * and ipmi_serial_info structures. Used to decrease numbers of * parameters passed by "low" level IPMI code. */ int run_to_completion; }; #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) static void __get_guid(struct ipmi_smi *intf); static void __ipmi_bmc_unregister(struct ipmi_smi *intf); static int __ipmi_bmc_register(struct ipmi_smi *intf, struct ipmi_device_id *id, bool guid_set, guid_t *guid, int intf_num); static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id); /* * The driver model view of the IPMI messaging driver. */ static struct platform_driver ipmidriver = { .driver = { .name = "ipmi", .bus = &platform_bus_type } }; /* * This mutex keeps us from adding the same BMC twice. */ static DEFINE_MUTEX(ipmidriver_mutex); static LIST_HEAD(ipmi_interfaces); static DEFINE_MUTEX(ipmi_interfaces_mutex); #define ipmi_interfaces_mutex_held() \ lockdep_is_held(&ipmi_interfaces_mutex) static struct srcu_struct ipmi_interfaces_srcu; /* * List of watchers that want to know when smi's are added and deleted. */ static LIST_HEAD(smi_watchers); static DEFINE_MUTEX(smi_watchers_mutex); #define ipmi_inc_stat(intf, stat) \ atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) #define ipmi_get_stat(intf, stat) \ ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) static const char * const addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI", "device-tree", "platform" }; const char *ipmi_addr_src_to_str(enum ipmi_addr_src src) { if (src >= SI_LAST) src = 0; /* Invalid */ return addr_src_to_str[src]; } EXPORT_SYMBOL(ipmi_addr_src_to_str); static int is_lan_addr(struct ipmi_addr *addr) { return addr->addr_type == IPMI_LAN_ADDR_TYPE; } static int is_ipmb_addr(struct ipmi_addr *addr) { return addr->addr_type == IPMI_IPMB_ADDR_TYPE; } static int is_ipmb_bcast_addr(struct ipmi_addr *addr) { return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE; } static int is_ipmb_direct_addr(struct ipmi_addr *addr) { return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE; } static void free_recv_msg_list(struct list_head *q) { struct ipmi_recv_msg *msg, *msg2; list_for_each_entry_safe(msg, msg2, q, link) { list_del(&msg->link); ipmi_free_recv_msg(msg); } } static void free_smi_msg_list(struct list_head *q) { struct ipmi_smi_msg *msg, *msg2; list_for_each_entry_safe(msg, msg2, q, link) { list_del(&msg->link); ipmi_free_smi_msg(msg); } } static void clean_up_interface_data(struct ipmi_smi *intf) { int i; struct cmd_rcvr *rcvr, *rcvr2; struct list_head list; tasklet_kill(&intf->recv_tasklet); free_smi_msg_list(&intf->waiting_rcv_msgs); free_recv_msg_list(&intf->waiting_events); /* * Wholesale remove all the entries from the list in the * interface and wait for RCU to know that none are in use. */ mutex_lock(&intf->cmd_rcvrs_mutex); INIT_LIST_HEAD(&list); list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu); mutex_unlock(&intf->cmd_rcvrs_mutex); list_for_each_entry_safe(rcvr, rcvr2, &list, link) kfree(rcvr); for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { if ((intf->seq_table[i].inuse) && (intf->seq_table[i].recv_msg)) ipmi_free_recv_msg(intf->seq_table[i].recv_msg); } } static void intf_free(struct kref *ref) { struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount); clean_up_interface_data(intf); kfree(intf); } int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) { struct ipmi_smi *intf; int index, rv; /* * Make sure the driver is actually initialized, this handles * problems with initialization order. */ rv = ipmi_init_msghandler(); if (rv) return rv; mutex_lock(&smi_watchers_mutex); list_add(&watcher->link, &smi_watchers); index = srcu_read_lock(&ipmi_interfaces_srcu); list_for_each_entry_rcu(intf, &ipmi_interfaces, link, lockdep_is_held(&smi_watchers_mutex)) { int intf_num = READ_ONCE(intf->intf_num); if (intf_num == -1) continue; watcher->new_smi(intf_num, intf->si_dev); } srcu_read_unlock(&ipmi_interfaces_srcu, index); mutex_unlock(&smi_watchers_mutex); return 0; } EXPORT_SYMBOL(ipmi_smi_watcher_register); int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) { mutex_lock(&smi_watchers_mutex); list_del(&watcher->link); mutex_unlock(&smi_watchers_mutex); return 0; } EXPORT_SYMBOL(ipmi_smi_watcher_unregister); /* * Must be called with smi_watchers_mutex held. */ static void call_smi_watchers(int i, struct device *dev) { struct ipmi_smi_watcher *w; mutex_lock(&smi_watchers_mutex); list_for_each_entry(w, &smi_watchers, link) { if (try_module_get(w->owner)) { w->new_smi(i, dev); module_put(w->owner); } } mutex_unlock(&smi_watchers_mutex); } static int ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) { if (addr1->addr_type != addr2->addr_type) return 0; if (addr1->channel != addr2->channel) return 0; if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { struct ipmi_system_interface_addr *smi_addr1 = (struct ipmi_system_interface_addr *) addr1; struct ipmi_system_interface_addr *smi_addr2 = (struct ipmi_system_interface_addr *) addr2; return (smi_addr1->lun == smi_addr2->lun); } if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) { struct ipmi_ipmb_addr *ipmb_addr1 = (struct ipmi_ipmb_addr *) addr1; struct ipmi_ipmb_addr *ipmb_addr2 = (struct ipmi_ipmb_addr *) addr2; return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr) && (ipmb_addr1->lun == ipmb_addr2->lun)); } if (is_ipmb_direct_addr(addr1)) { struct ipmi_ipmb_direct_addr *daddr1 = (struct ipmi_ipmb_direct_addr *) addr1; struct ipmi_ipmb_direct_addr *daddr2 = (struct ipmi_ipmb_direct_addr *) addr2; return daddr1->slave_addr == daddr2->slave_addr && daddr1->rq_lun == daddr2->rq_lun && daddr1->rs_lun == daddr2->rs_lun; } if (is_lan_addr(addr1)) { struct ipmi_lan_addr *lan_addr1 = (struct ipmi_lan_addr *) addr1; struct ipmi_lan_addr *lan_addr2 = (struct ipmi_lan_addr *) addr2; return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID) && (lan_addr1->local_SWID == lan_addr2->local_SWID) && (lan_addr1->session_handle == lan_addr2->session_handle) && (lan_addr1->lun == lan_addr2->lun)); } return 1; } int ipmi_validate_addr(struct ipmi_addr *addr, int len) { if (len < sizeof(struct ipmi_system_interface_addr)) return -EINVAL; if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { if (addr->channel != IPMI_BMC_CHANNEL) return -EINVAL; return 0; } if ((addr->channel == IPMI_BMC_CHANNEL) || (addr->channel >= IPMI_MAX_CHANNELS) || (addr->channel < 0)) return -EINVAL; if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { if (len < sizeof(struct ipmi_ipmb_addr)) return -EINVAL; return 0; } if (is_ipmb_direct_addr(addr)) { struct ipmi_ipmb_direct_addr *daddr = (void *) addr; if (addr->channel != 0) return -EINVAL; if (len < sizeof(struct ipmi_ipmb_direct_addr)) return -EINVAL; if (daddr->slave_addr & 0x01) return -EINVAL; if (daddr->rq_lun >= 4) return -EINVAL; if (daddr->rs_lun >= 4) return -EINVAL; return 0; } if (is_lan_addr(addr)) { if (len < sizeof(struct ipmi_lan_addr)) return -EINVAL; return 0; } return -EINVAL; } EXPORT_SYMBOL(ipmi_validate_addr); unsigned int ipmi_addr_length(int addr_type) { if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) return sizeof(struct ipmi_system_interface_addr); if ((addr_type == IPMI_IPMB_ADDR_TYPE) || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) return sizeof(struct ipmi_ipmb_addr); if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE) return sizeof(struct ipmi_ipmb_direct_addr); if (addr_type == IPMI_LAN_ADDR_TYPE) return sizeof(struct ipmi_lan_addr); return 0; } EXPORT_SYMBOL(ipmi_addr_length); static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) { int rv = 0; if (!msg->user) { /* Special handling for NULL users. */ if (intf->null_user_handler) { intf->null_user_handler(intf, msg); } else { /* No handler, so give up. */ rv = -EINVAL; } ipmi_free_recv_msg(msg); } else if (oops_in_progress) { /* * If we are running in the panic context, calling the * receive handler doesn't much meaning and has a deadlock * risk. At this moment, simply skip it in that case. */ ipmi_free_recv_msg(msg); atomic_dec(&msg->user->nr_msgs); } else { int index; struct ipmi_user *user = acquire_ipmi_user(msg->user, &index); if (user) { atomic_dec(&user->nr_msgs); user->handler->ipmi_recv_hndl(msg, user->handler_data); release_ipmi_user(user, index); } else { /* User went away, give up. */ ipmi_free_recv_msg(msg); rv = -EINVAL; } } return rv; } static void deliver_local_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) { if (deliver_response(intf, msg)) ipmi_inc_stat(intf, unhandled_local_responses); else ipmi_inc_stat(intf, handled_local_responses); } static void deliver_err_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg, int err) { msg->recv_type = IPMI_RESPONSE_RECV_TYPE; msg->msg_data[0] = err; msg->msg.netfn |= 1; /* Convert to a response. */ msg->msg.data_len = 1; msg->msg.data = msg->msg_data; deliver_local_response(intf, msg); } static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags) { unsigned long iflags; if (!intf->handlers->set_need_watch) return; spin_lock_irqsave(&intf->watch_lock, iflags); if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) intf->response_waiters++; if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) intf->watchdog_waiters++; if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) intf->command_waiters++; if ((intf->last_watch_mask & flags) != flags) { intf->last_watch_mask |= flags; intf->handlers->set_need_watch(intf->send_info, intf->last_watch_mask); } spin_unlock_irqrestore(&intf->watch_lock, iflags); } static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags) { unsigned long iflags; if (!intf->handlers->set_need_watch) return; spin_lock_irqsave(&intf->watch_lock, iflags); if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) intf->response_waiters--; if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) intf->watchdog_waiters--; if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) intf->command_waiters--; flags = 0; if (intf->response_waiters) flags |= IPMI_WATCH_MASK_CHECK_MESSAGES; if (intf->watchdog_waiters) flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG; if (intf->command_waiters) flags |= IPMI_WATCH_MASK_CHECK_COMMANDS; if (intf->last_watch_mask != flags) { intf->last_watch_mask = flags; intf->handlers->set_need_watch(intf->send_info, intf->last_watch_mask); } spin_unlock_irqrestore(&intf->watch_lock, iflags); } /* * Find the next sequence number not being used and add the given * message with the given timeout to the sequence table. This must be * called with the interface's seq_lock held. */ static int intf_next_seq(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, unsigned long timeout, int retries, int broadcast, unsigned char *seq, long *seqid) { int rv = 0; unsigned int i; if (timeout == 0) timeout = default_retry_ms; if (retries < 0) retries = default_max_retries; for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; i = (i+1)%IPMI_IPMB_NUM_SEQ) { if (!intf->seq_table[i].inuse) break; } if (!intf->seq_table[i].inuse) { intf->seq_table[i].recv_msg = recv_msg; /* * Start with the maximum timeout, when the send response * comes in we will start the real timer. */ intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; intf->seq_table[i].orig_timeout = timeout; intf->seq_table[i].retries_left = retries; intf->seq_table[i].broadcast = broadcast; intf->seq_table[i].inuse = 1; intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); *seq = i; *seqid = intf->seq_table[i].seqid; intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); need_waiter(intf); } else { rv = -EAGAIN; } return rv; } /* * Return the receive message for the given sequence number and * release the sequence number so it can be reused. Some other data * is passed in to be sure the message matches up correctly (to help * guard against message coming in after their timeout and the * sequence number being reused). */ static int intf_find_seq(struct ipmi_smi *intf, unsigned char seq, short channel, unsigned char cmd, unsigned char netfn, struct ipmi_addr *addr, struct ipmi_recv_msg **recv_msg) { int rv = -ENODEV; unsigned long flags; if (seq >= IPMI_IPMB_NUM_SEQ) return -EINVAL; spin_lock_irqsave(&intf->seq_lock, flags); if (intf->seq_table[seq].inuse) { struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) && (msg->msg.netfn == netfn) && (ipmi_addr_equal(addr, &msg->addr))) { *recv_msg = msg; intf->seq_table[seq].inuse = 0; smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); rv = 0; } } spin_unlock_irqrestore(&intf->seq_lock, flags); return rv; } /* Start the timer for a specific sequence table entry. */ static int intf_start_seq_timer(struct ipmi_smi *intf, long msgid) { int rv = -ENODEV; unsigned long flags; unsigned char seq; unsigned long seqid; GET_SEQ_FROM_MSGID(msgid, seq, seqid); spin_lock_irqsave(&intf->seq_lock, flags); /* * We do this verification because the user can be deleted * while a message is outstanding. */ if ((intf->seq_table[seq].inuse) && (intf->seq_table[seq].seqid == seqid)) { struct seq_table *ent = &intf->seq_table[seq]; ent->timeout = ent->orig_timeout; rv = 0; } spin_unlock_irqrestore(&intf->seq_lock, flags); return rv; } /* Got an error for the send message for a specific sequence number. */ static int intf_err_seq(struct ipmi_smi *intf, long msgid, unsigned int err) { int rv = -ENODEV; unsigned long flags; unsigned char seq; unsigned long seqid; struct ipmi_recv_msg *msg = NULL; GET_SEQ_FROM_MSGID(msgid, seq, seqid); spin_lock_irqsave(&intf->seq_lock, flags); /* * We do this verification because the user can be deleted * while a message is outstanding. */ if ((intf->seq_table[seq].inuse) && (intf->seq_table[seq].seqid == seqid)) { struct seq_table *ent = &intf->seq_table[seq]; ent->inuse = 0; smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); msg = ent->recv_msg; rv = 0; } spin_unlock_irqrestore(&intf->seq_lock, flags); if (msg) deliver_err_response(intf, msg, err); return rv; } static void free_user_work(struct work_struct *work) { struct ipmi_user *user = container_of(work, struct ipmi_user, remove_work); cleanup_srcu_struct(&user->release_barrier); vfree(user); } int ipmi_create_user(unsigned int if_num, const struct ipmi_user_hndl *handler, void *handler_data, struct ipmi_user **user) { unsigned long flags; struct ipmi_user *new_user; int rv, index; struct ipmi_smi *intf; /* * There is no module usecount here, because it's not * required. Since this can only be used by and called from * other modules, they will implicitly use this module, and * thus this can't be removed unless the other modules are * removed. */ if (handler == NULL) return -EINVAL; /* * Make sure the driver is actually initialized, this handles * problems with initialization order. */ rv = ipmi_init_msghandler(); if (rv) return rv; new_user = vzalloc(sizeof(*new_user)); if (!new_user) return -ENOMEM; index = srcu_read_lock(&ipmi_interfaces_srcu); list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { if (intf->intf_num == if_num) goto found; } /* Not found, return an error */ rv = -EINVAL; goto out_kfree; found: if (atomic_add_return(1, &intf->nr_users) > max_users) { rv = -EBUSY; goto out_kfree; } INIT_WORK(&new_user->remove_work, free_user_work); rv = init_srcu_struct(&new_user->release_barrier); if (rv) goto out_kfree; if (!try_module_get(intf->owner)) { rv = -ENODEV; goto out_kfree; } /* Note that each existing user holds a refcount to the interface. */ kref_get(&intf->refcount); atomic_set(&new_user->nr_msgs, 0); kref_init(&new_user->refcount); new_user->handler = handler; new_user->handler_data = handler_data; new_user->intf = intf; new_user->gets_events = false; rcu_assign_pointer(new_user->self, new_user); spin_lock_irqsave(&intf->seq_lock, flags); list_add_rcu(&new_user->link, &intf->users); spin_unlock_irqrestore(&intf->seq_lock, flags); if (handler->ipmi_watchdog_pretimeout) /* User wants pretimeouts, so make sure to watch for them. */ smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); srcu_read_unlock(&ipmi_interfaces_srcu, index); *user = new_user; return 0; out_kfree: atomic_dec(&intf->nr_users); srcu_read_unlock(&ipmi_interfaces_srcu, index); vfree(new_user); return rv; } EXPORT_SYMBOL(ipmi_create_user); int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) { int rv, index; struct ipmi_smi *intf; index = srcu_read_lock(&ipmi_interfaces_srcu); list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { if (intf->intf_num == if_num) goto found; } srcu_read_unlock(&ipmi_interfaces_srcu, index); /* Not found, return an error */ return -EINVAL; found: if (!intf->handlers->get_smi_info) rv = -ENOTTY; else rv = intf->handlers->get_smi_info(intf->send_info, data); srcu_read_unlock(&ipmi_interfaces_srcu, index); return rv; } EXPORT_SYMBOL(ipmi_get_smi_info); static void free_user(struct kref *ref) { struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); /* SRCU cleanup must happen in task context. */ queue_work(remove_work_wq, &user->remove_work); } static void _ipmi_destroy_user(struct ipmi_user *user) { struct ipmi_smi *intf = user->intf; int i; unsigned long flags; struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvrs = NULL; struct module *owner; if (!acquire_ipmi_user(user, &i)) { /* * The user has already been cleaned up, just make sure * nothing is using it and return. */ synchronize_srcu(&user->release_barrier); return; } rcu_assign_pointer(user->self, NULL); release_ipmi_user(user, i); synchronize_srcu(&user->release_barrier); if (user->handler->shutdown) user->handler->shutdown(user->handler_data); if (user->handler->ipmi_watchdog_pretimeout) smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); if (user->gets_events) atomic_dec(&intf->event_waiters); /* Remove the user from the interface's sequence table. */ spin_lock_irqsave(&intf->seq_lock, flags); list_del_rcu(&user->link); atomic_dec(&intf->nr_users); for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { if (intf->seq_table[i].inuse && (intf->seq_table[i].recv_msg->user == user)) { intf->seq_table[i].inuse = 0; smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); ipmi_free_recv_msg(intf->seq_table[i].recv_msg); } } spin_unlock_irqrestore(&intf->seq_lock, flags); /* * Remove the user from the command receiver's table. First * we build a list of everything (not using the standard link, * since other things may be using it till we do * synchronize_srcu()) then free everything in that list. */ mutex_lock(&intf->cmd_rcvrs_mutex); list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, lockdep_is_held(&intf->cmd_rcvrs_mutex)) { if (rcvr->user == user) { list_del_rcu(&rcvr->link); rcvr->next = rcvrs; rcvrs = rcvr; } } mutex_unlock(&intf->cmd_rcvrs_mutex); synchronize_rcu(); while (rcvrs) { rcvr = rcvrs; rcvrs = rcvr->next; kfree(rcvr); } owner = intf->owner; kref_put(&intf->refcount, intf_free); module_put(owner); } int ipmi_destroy_user(struct ipmi_user *user) { _ipmi_destroy_user(user); kref_put(&user->refcount, free_user); return 0; } EXPORT_SYMBOL(ipmi_destroy_user); int ipmi_get_version(struct ipmi_user *user, unsigned char *major, unsigned char *minor) { struct ipmi_device_id id; int rv, index; user = acquire_ipmi_user(user, &index); if (!user) return -ENODEV; rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL); if (!rv) { *major = ipmi_version_major(&id); *minor = ipmi_version_minor(&id); } release_ipmi_user(user, index); return rv; } EXPORT_SYMBOL(ipmi_get_version); int ipmi_set_my_address(struct ipmi_user *user, unsigned int channel, unsigned char address) { int index, rv = 0; user = acquire_ipmi_user(user, &index); if (!user) return -ENODEV; if (channel >= IPMI_MAX_CHANNELS) { rv = -EINVAL; } else { channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); user->intf->addrinfo[channel].address = address; } release_ipmi_user(user, index); return rv; } EXPORT_SYMBOL(ipmi_set_my_address); int ipmi_get_my_address(struct ipmi_user *user, unsigned int channel, unsigned char *address) { int index, rv = 0; user = acquire_ipmi_user(user, &index); if (!user) return -ENODEV; if (channel >= IPMI_MAX_CHANNELS) { rv = -EINVAL; } else { channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); *address = user->intf->addrinfo[channel].address; } release_ipmi_user(user, index); return rv; } EXPORT_SYMBOL(ipmi_get_my_address); int ipmi_set_my_LUN(struct ipmi_user *user, unsigned int channel, unsigned char LUN) { int index, rv = 0; user = acquire_ipmi_user(user, &index); if (!user) return -ENODEV; if (channel >= IPMI_MAX_CHANNELS) { rv = -EINVAL; } else { channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); user->intf->addrinfo[channel].lun = LUN & 0x3; } release_ipmi_user(user, index); return rv; } EXPORT_SYMBOL(ipmi_set_my_LUN); int ipmi_get_my_LUN(struct ipmi_user *user, unsigned int channel, unsigned char *address) { int index, rv = 0; user = acquire_ipmi_user(user, &index); if (!user) return -ENODEV; if (channel >= IPMI_MAX_CHANNELS) { rv = -EINVAL; } else { channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); *address = user->intf->addrinfo[channel].lun; } release_ipmi_user(user, index); return rv; } EXPORT_SYMBOL(ipmi_get_my_LUN); int ipmi_get_maintenance_mode(struct ipmi_user *user) { int mode, index; unsigned long flags; user = acquire_ipmi_user(user, &index); if (!user) return -ENODEV; spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); mode = user->intf->maintenance_mode; spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); release_ipmi_user(user, index); return mode; } EXPORT_SYMBOL(ipmi_get_maintenance_mode); static void maintenance_mode_update(struct ipmi_smi *intf) { if (intf->handlers->set_maintenance_mode) intf->handlers->set_maintenance_mode( intf->send_info, intf->maintenance_mode_enable); } int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) { int rv = 0, index; unsigned long flags; struct ipmi_smi *intf = user->intf; user = acquire_ipmi_user(user, &index); if (!user) return -ENODEV; spin_lock_irqsave(&intf->maintenance_mode_lock, flags); if (intf->maintenance_mode != mode) { switch (mode) { case IPMI_MAINTENANCE_MODE_AUTO: intf->maintenance_mode_enable = (intf->auto_maintenance_timeout > 0); break; case IPMI_MAINTENANCE_MODE_OFF: intf->maintenance_mode_enable = false; break; case IPMI_MAINTENANCE_MODE_ON: intf->maintenance_mode_enable = true; break; default: rv = -EINVAL; goto out_unlock; } intf->maintenance_mode = mode; maintenance_mode_update(intf); } out_unlock: spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); release_ipmi_user(user, index); return rv; } EXPORT_SYMBOL(ipmi_set_maintenance_mode); int ipmi_set_gets_events(struct ipmi_user *user, bool val) { unsigned long flags; struct ipmi_smi *intf = user->intf; struct ipmi_recv_msg *msg, *msg2; struct list_head msgs; int index; user = acquire_ipmi_user(user, &index); if (!user) return -ENODEV; INIT_LIST_HEAD(&msgs); spin_lock_irqsave(&intf->events_lock, flags); if (user->gets_events == val) goto out; user->gets_events = val; if (val) { if (atomic_inc_return(&intf->event_waiters) == 1) need_waiter(intf); } else { atomic_dec(&intf->event_waiters); } if (intf->delivering_events) /* * Another thread is delivering events for this, so * let it handle any new events. */ goto out; /* Deliver any queued events. */ while (user->gets_events && !list_empty(&intf->waiting_events)) { list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) list_move_tail(&msg->link, &msgs); intf->waiting_events_count = 0; if (intf->event_msg_printed) { dev_warn(intf->si_dev, "Event queue no longer full\n"); intf->event_msg_printed = 0; } intf->delivering_events = 1; spin_unlock_irqrestore(&intf->events_lock, flags); list_for_each_entry_safe(msg, msg2, &msgs, link) { msg->user = user; kref_get(&user->refcount); deliver_local_response(intf, msg); } spin_lock_irqsave(&intf->events_lock, flags); intf->delivering_events = 0; } out: spin_unlock_irqrestore(&intf->events_lock, flags); release_ipmi_user(user, index); return 0; } EXPORT_SYMBOL(ipmi_set_gets_events); static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf, unsigned char netfn, unsigned char cmd, unsigned char chan) { struct cmd_rcvr *rcvr; list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, lockdep_is_held(&intf->cmd_rcvrs_mutex)) { if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) && (rcvr->chans & (1 << chan))) return rcvr; } return NULL; } static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf, unsigned char netfn, unsigned char cmd, unsigned int chans) { struct cmd_rcvr *rcvr; list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, lockdep_is_held(&intf->cmd_rcvrs_mutex)) { if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) && (rcvr->chans & chans)) return 0; } return 1; } int ipmi_register_for_cmd(struct ipmi_user *user, unsigned char netfn, unsigned char cmd, unsigned int chans) { struct ipmi_smi *intf = user->intf; struct cmd_rcvr *rcvr; int rv = 0, index; user = acquire_ipmi_user(user, &index); if (!user) return -ENODEV; rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); if (!rcvr) { rv = -ENOMEM; goto out_release; } rcvr->cmd = cmd; rcvr->netfn = netfn; rcvr->chans = chans; rcvr->user = user; mutex_lock(&intf->cmd_rcvrs_mutex); /* Make sure the command/netfn is not already registered. */ if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) { rv = -EBUSY; goto out_unlock; } smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); out_unlock: mutex_unlock(&intf->cmd_rcvrs_mutex); if (rv) kfree(rcvr); out_release: release_ipmi_user(user, index); return rv; } EXPORT_SYMBOL(ipmi_register_for_cmd); int ipmi_unregister_for_cmd(struct ipmi_user *user, unsigned char netfn, unsigned char cmd, unsigned int chans) { struct ipmi_smi *intf = user->intf; struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvrs = NULL; int i, rv = -ENOENT, index; user = acquire_ipmi_user(user, &index); if (!user) return -ENODEV; mutex_lock(&intf->cmd_rcvrs_mutex); for (i = 0; i < IPMI_NUM_CHANNELS; i++) { if (((1 << i) & chans) == 0) continue; rcvr = find_cmd_rcvr(intf, netfn, cmd, i); if (rcvr == NULL) continue; if (rcvr->user == user) { rv = 0; rcvr->chans &= ~chans; if (rcvr->chans == 0) { list_del_rcu(&rcvr->link); rcvr->next = rcvrs; rcvrs = rcvr; } } } mutex_unlock(&intf->cmd_rcvrs_mutex); synchronize_rcu(); release_ipmi_user(user, index); while (rcvrs) { smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); rcvr = rcvrs; rcvrs = rcvr->next; kfree(rcvr); } return rv; } EXPORT_SYMBOL(ipmi_unregister_for_cmd); unsigned char ipmb_checksum(unsigned char *data, int size) { unsigned char csum = 0; for (; size > 0; size--, data++) csum += *data; return -csum; } EXPORT_SYMBOL(ipmb_checksum); static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, struct kernel_ipmi_msg *msg, struct ipmi_ipmb_addr *ipmb_addr, long msgid, unsigned char ipmb_seq, int broadcast, unsigned char source_address, unsigned char source_lun) { int i = broadcast; /* Format the IPMB header data. */ smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); smi_msg->data[1] = IPMI_SEND_MSG_CMD; smi_msg->data[2] = ipmb_addr->channel; if (broadcast) smi_msg->data[3] = 0; smi_msg->data[i+3] = ipmb_addr->slave_addr; smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2); smi_msg->data[i+6] = source_address; smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; smi_msg->data[i+8] = msg->cmd; /* Now tack on the data to the message. */ if (msg->data_len > 0) memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len); smi_msg->data_size = msg->data_len + 9; /* Now calculate the checksum and tack it on. */ smi_msg->data[i+smi_msg->data_size] = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6); /* * Add on the checksum size and the offset from the * broadcast. */ smi_msg->data_size += 1 + i; smi_msg->msgid = msgid; } static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, struct kernel_ipmi_msg *msg, struct ipmi_lan_addr *lan_addr, long msgid, unsigned char ipmb_seq, unsigned char source_lun) { /* Format the IPMB header data. */ smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); smi_msg->data[1] = IPMI_SEND_MSG_CMD; smi_msg->data[2] = lan_addr->channel; smi_msg->data[3] = lan_addr->session_handle; smi_msg->data[4] = lan_addr->remote_SWID; smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2); smi_msg->data[7] = lan_addr->local_SWID; smi_msg->data[8] = (ipmb_seq << 2) | source_lun; smi_msg->data[9] = msg->cmd; /* Now tack on the data to the message. */ if (msg->data_len > 0) memcpy(&smi_msg->data[10], msg->data, msg->data_len); smi_msg->data_size = msg->data_len + 10; /* Now calculate the checksum and tack it on. */ smi_msg->data[smi_msg->data_size] = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7); /* * Add on the checksum size and the offset from the * broadcast. */ smi_msg->data_size += 1; smi_msg->msgid = msgid; } static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf, struct ipmi_smi_msg *smi_msg, int priority) { if (intf->curr_msg) { if (priority > 0) list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); else list_add_tail(&smi_msg->link, &intf->xmit_msgs); smi_msg = NULL; } else { intf->curr_msg = smi_msg; } return smi_msg; } static void smi_send(struct ipmi_smi *intf, const struct ipmi_smi_handlers *handlers, struct ipmi_smi_msg *smi_msg, int priority) { int run_to_completion = intf->run_to_completion; unsigned long flags = 0; if (!run_to_completion) spin_lock_irqsave(&intf->xmit_msgs_lock, flags); smi_msg = smi_add_send_msg(intf, smi_msg, priority); if (!run_to_completion) spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); if (smi_msg) handlers->sender(intf->send_info, smi_msg); } static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg) { return (((msg->netfn == IPMI_NETFN_APP_REQUEST) && ((msg->cmd == IPMI_COLD_RESET_CMD) || (msg->cmd == IPMI_WARM_RESET_CMD))) || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)); } static int i_ipmi_req_sysintf(struct ipmi_smi *intf, struct ipmi_addr *addr, long msgid, struct kernel_ipmi_msg *msg, struct ipmi_smi_msg *smi_msg, struct ipmi_recv_msg *recv_msg, int retries, unsigned int retry_time_ms) { struct ipmi_system_interface_addr *smi_addr; if (msg->netfn & 1) /* Responses are not allowed to the SMI. */ return -EINVAL; smi_addr = (struct ipmi_system_interface_addr *) addr; if (smi_addr->lun > 3) { ipmi_inc_stat(intf, sent_invalid_commands); return -EINVAL; } memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); if ((msg->netfn == IPMI_NETFN_APP_REQUEST) && ((msg->cmd == IPMI_SEND_MSG_CMD) || (msg->cmd == IPMI_GET_MSG_CMD) || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { /* * We don't let the user do these, since we manage * the sequence numbers. */ ipmi_inc_stat(intf, sent_invalid_commands); return -EINVAL; } if (is_maintenance_mode_cmd(msg)) { unsigned long flags; spin_lock_irqsave(&intf->maintenance_mode_lock, flags); intf->auto_maintenance_timeout = maintenance_mode_timeout_ms; if (!intf->maintenance_mode && !intf->maintenance_mode_enable) { intf->maintenance_mode_enable = true; maintenance_mode_update(intf); } spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); } if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) { ipmi_inc_stat(intf, sent_invalid_commands); return -EMSGSIZE; } smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); smi_msg->data[1] = msg->cmd; smi_msg->msgid = msgid; smi_msg->user_data = recv_msg; if (msg->data_len > 0) memcpy(&smi_msg->data[2], msg->data, msg->data_len); smi_msg->data_size = msg->data_len + 2; ipmi_inc_stat(intf, sent_local_commands); return 0; } static int i_ipmi_req_ipmb(struct ipmi_smi *intf, struct ipmi_addr *addr, long msgid, struct kernel_ipmi_msg *msg, struct ipmi_smi_msg *smi_msg, struct ipmi_recv_msg *recv_msg, unsigned char source_address, unsigned char source_lun, int retries, unsigned int retry_time_ms) { struct ipmi_ipmb_addr *ipmb_addr; unsigned char ipmb_seq; long seqid; int broadcast = 0; struct ipmi_channel *chans; int rv = 0; if (addr->channel >= IPMI_MAX_CHANNELS) { ipmi_inc_stat(intf, sent_invalid_commands); return -EINVAL; } chans = READ_ONCE(intf->channel_list)->c; if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) { ipmi_inc_stat(intf, sent_invalid_commands); return -EINVAL; } if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { /* * Broadcasts add a zero at the beginning of the * message, but otherwise is the same as an IPMB * address. */ addr->addr_type = IPMI_IPMB_ADDR_TYPE; broadcast = 1; retries = 0; /* Don't retry broadcasts. */ } /* * 9 for the header and 1 for the checksum, plus * possibly one for the broadcast. */ if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { ipmi_inc_stat(intf, sent_invalid_commands); return -EMSGSIZE; } ipmb_addr = (struct ipmi_ipmb_addr *) addr; if (ipmb_addr->lun > 3) { ipmi_inc_stat(intf, sent_invalid_commands); return -EINVAL; } memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); if (recv_msg->msg.netfn & 0x1) { /* * It's a response, so use the user's sequence * from msgid. */ ipmi_inc_stat(intf, sent_ipmb_responses); format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, msgid, broadcast, source_address, source_lun); /* * Save the receive message so we can use it * to deliver the response. */ smi_msg->user_data = recv_msg; } else { /* It's a command, so get a sequence for it. */ unsigned long flags; spin_lock_irqsave(&intf->seq_lock, flags); if (is_maintenance_mode_cmd(msg)) intf->ipmb_maintenance_mode_timeout = maintenance_mode_timeout_ms; if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0) /* Different default in maintenance mode */ retry_time_ms = default_maintenance_retry_ms; /* * Create a sequence number with a 1 second * timeout and 4 retries. */ rv = intf_next_seq(intf, recv_msg, retry_time_ms, retries, broadcast, &ipmb_seq, &seqid); if (rv) /* * We have used up all the sequence numbers, * probably, so abort. */ goto out_err; ipmi_inc_stat(intf, sent_ipmb_commands); /* * Store the sequence number in the message, * so that when the send message response * comes back we can start the timer. */ format_ipmb_msg(smi_msg, msg, ipmb_addr, STORE_SEQ_IN_MSGID(ipmb_seq, seqid), ipmb_seq, broadcast, source_address, source_lun); /* * Copy the message into the recv message data, so we * can retransmit it later if necessary. */ memcpy(recv_msg->msg_data, smi_msg->data, smi_msg->data_size); recv_msg->msg.data = recv_msg->msg_data; recv_msg->msg.data_len = smi_msg->data_size; /* * We don't unlock until here, because we need * to copy the completed message into the * recv_msg before we release the lock. * Otherwise, race conditions may bite us. I * know that's pretty paranoid, but I prefer * to be correct. */ out_err: spin_unlock_irqrestore(&intf->seq_lock, flags); } return rv; } static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf, struct ipmi_addr *addr, long msgid, struct kernel_ipmi_msg *msg, struct ipmi_smi_msg *smi_msg, struct ipmi_recv_msg *recv_msg, unsigned char source_lun) { struct ipmi_ipmb_direct_addr *daddr; bool is_cmd = !(recv_msg->msg.netfn & 0x1); if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT)) return -EAFNOSUPPORT; /* Responses must have a completion code. */ if (!is_cmd && msg->data_len < 1) { ipmi_inc_stat(intf, sent_invalid_commands); return -EINVAL; } if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) { ipmi_inc_stat(intf, sent_invalid_commands); return -EMSGSIZE; } daddr = (struct ipmi_ipmb_direct_addr *) addr; if (daddr->rq_lun > 3 || daddr->rs_lun > 3) { ipmi_inc_stat(intf, sent_invalid_commands); return -EINVAL; } smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT; smi_msg->msgid = msgid; if (is_cmd) { smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun; smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun; } else { smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun; smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun; } smi_msg->data[1] = daddr->slave_addr; smi_msg->data[3] = msg->cmd; memcpy(smi_msg->data + 4, msg->data, msg->data_len); smi_msg->data_size = msg->data_len + 4; smi_msg->user_data = recv_msg; return 0; } static int i_ipmi_req_lan(struct ipmi_smi *intf, struct ipmi_addr *addr, long msgid, struct kernel_ipmi_msg *msg, struct ipmi_smi_msg *smi_msg, struct ipmi_recv_msg *recv_msg, unsigned char source_lun, int retries, unsigned int retry_time_ms) { struct ipmi_lan_addr *lan_addr; unsigned char ipmb_seq; long seqid; struct ipmi_channel *chans; int rv = 0; if (addr->channel >= IPMI_MAX_CHANNELS) { ipmi_inc_stat(intf, sent_invalid_commands); return -EINVAL; } chans = READ_ONCE(intf->channel_list)->c; if ((chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_8023LAN) && (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_ASYNC)) { ipmi_inc_stat(intf, sent_invalid_commands); return -EINVAL; } /* 11 for the header and 1 for the checksum. */ if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { ipmi_inc_stat(intf, sent_invalid_commands); return -EMSGSIZE; } lan_addr = (struct ipmi_lan_addr *) addr; if (lan_addr->lun > 3) { ipmi_inc_stat(intf, sent_invalid_commands); return -EINVAL; } memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); if (recv_msg->msg.netfn & 0x1) { /* * It's a response, so use the user's sequence * from msgid. */ ipmi_inc_stat(intf, sent_lan_responses); format_lan_msg(smi_msg, msg, lan_addr, msgid, msgid, source_lun); /* * Save the receive message so we can use it * to deliver the response. */ smi_msg->user_data = recv_msg; } else { /* It's a command, so get a sequence for it. */ unsigned long flags; spin_lock_irqsave(&intf->seq_lock, flags); /* * Create a sequence number with a 1 second * timeout and 4 retries. */ rv = intf_next_seq(intf, recv_msg, retry_time_ms, retries, 0, &ipmb_seq, &seqid); if (rv) /* * We have used up all the sequence numbers, * probably, so abort. */ goto out_err; ipmi_inc_stat(intf, sent_lan_commands); /* * Store the sequence number in the message, * so that when the send message response * comes back we can start the timer. */ format_lan_msg(smi_msg, msg, lan_addr, STORE_SEQ_IN_MSGID(ipmb_seq, seqid), ipmb_seq, source_lun); /* * Copy the message into the recv message data, so we * can retransmit it later if necessary. */ memcpy(recv_msg->msg_data, smi_msg->data, smi_msg->data_size); recv_msg->msg.data = recv_msg->msg_data; recv_msg->msg.data_len = smi_msg->data_size; /* * We don't unlock until here, because we need * to copy the completed message into the * recv_msg before we release the lock. * Otherwise, race conditions may bite us. I * know that's pretty paranoid, but I prefer * to be correct. */ out_err: spin_unlock_irqrestore(&intf->seq_lock, flags); } return rv; } /* * Separate from ipmi_request so that the user does not have to be * supplied in certain circumstances (mainly at panic time). If * messages are supplied, they will be freed, even if an error * occurs. */ static int i_ipmi_request(struct ipmi_user *user, struct ipmi_smi *intf, struct ipmi_addr *addr, long msgid, struct kernel_ipmi_msg *msg, void *user_msg_data, void *supplied_smi, struct ipmi_recv_msg *supplied_recv, int priority, unsigned char source_address, unsigned char source_lun, int retries, unsigned int retry_time_ms) { struct ipmi_smi_msg *smi_msg; struct ipmi_recv_msg *recv_msg; int rv = 0; if (user) { if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) { /* Decrement will happen at the end of the routine. */ rv = -EBUSY; goto out; } } if (supplied_recv) recv_msg = supplied_recv; else { recv_msg = ipmi_alloc_recv_msg(); if (recv_msg == NULL) { rv = -ENOMEM; goto out; } } recv_msg->user_msg_data = user_msg_data; if (supplied_smi) smi_msg = supplied_smi; else { smi_msg = ipmi_alloc_smi_msg(); if (smi_msg == NULL) { if (!supplied_recv) ipmi_free_recv_msg(recv_msg); rv = -ENOMEM; goto out; } } rcu_read_lock(); if (intf->in_shutdown) { rv = -ENODEV; goto out_err; } recv_msg->user = user; if (user) /* The put happens when the message is freed. */ kref_get(&user->refcount); recv_msg->msgid = msgid; /* * Store the message to send in the receive message so timeout * responses can get the proper response data. */ recv_msg->msg = *msg; if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg, recv_msg, retries, retry_time_ms); } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg, source_address, source_lun, retries, retry_time_ms); } else if (is_ipmb_direct_addr(addr)) { rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg, recv_msg, source_lun); } else if (is_lan_addr(addr)) { rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg, source_lun, retries, retry_time_ms); } else { /* Unknown address type. */ ipmi_inc_stat(intf, sent_invalid_commands); rv = -EINVAL; } if (rv) { out_err: ipmi_free_smi_msg(smi_msg); ipmi_free_recv_msg(recv_msg); } else { dev_dbg(intf->si_dev, "Send: %*ph\n", smi_msg->data_size, smi_msg->data); smi_send(intf, intf->handlers, smi_msg, priority); } rcu_read_unlock(); out: if (rv && user) atomic_dec(&user->nr_msgs); return rv; } static int check_addr(struct ipmi_smi *intf, struct ipmi_addr *addr, unsigned char *saddr, unsigned char *lun) { if (addr->channel >= IPMI_MAX_CHANNELS) return -EINVAL; addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS); *lun = intf->addrinfo[addr->channel].lun; *saddr = intf->addrinfo[addr->channel].address; return 0; } int ipmi_request_settime(struct ipmi_user *user, struct ipmi_addr *addr, long msgid, struct kernel_ipmi_msg *msg, void *user_msg_data, int priority, int retries, unsigned int retry_time_ms) { unsigned char saddr = 0, lun = 0; int rv, index; if (!user) return -EINVAL; user = acquire_ipmi_user(user, &index); if (!user) return -ENODEV; rv = check_addr(user->intf, addr, &saddr, &lun); if (!rv) rv = i_ipmi_request(user, user->intf, addr, msgid, msg, user_msg_data, NULL, NULL, priority, saddr, lun, retries, retry_time_ms); release_ipmi_user(user, index); return rv; } EXPORT_SYMBOL(ipmi_request_settime); int ipmi_request_supply_msgs(struct ipmi_user *user, struct ipmi_addr *addr, long msgid, struct kernel_ipmi_msg *msg, void *user_msg_data, void *supplied_smi, struct ipmi_recv_msg *supplied_recv, int priority) { unsigned char saddr = 0, lun = 0; int rv, index; if (!user) return -EINVAL; user = acquire_ipmi_user(user, &index); if (!user) return -ENODEV; rv = check_addr(user->intf, addr, &saddr, &lun); if (!rv) rv = i_ipmi_request(user, user->intf, addr, msgid, msg, user_msg_data, supplied_smi, supplied_recv, priority, saddr, lun, -1, 0); release_ipmi_user(user, index); return rv; } EXPORT_SYMBOL(ipmi_request_supply_msgs); static void bmc_device_id_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) { int rv; if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) { dev_warn(intf->si_dev, "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n", msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd); return; } if (msg->msg.data[0]) { dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n", msg->msg.data[0]); intf->bmc->dyn_id_set = 0; goto out; } rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd, msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id); if (rv) { dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv); /* record completion code when error */ intf->bmc->cc = msg->msg.data[0]; intf->bmc->dyn_id_set = 0; } else { /* * Make sure the id data is available before setting * dyn_id_set. */ smp_wmb(); intf->bmc->dyn_id_set = 1; } out: wake_up(&intf->waitq); } static int send_get_device_id_cmd(struct ipmi_smi *intf) { struct ipmi_system_interface_addr si; struct kernel_ipmi_msg msg; si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; si.channel = IPMI_BMC_CHANNEL; si.lun = 0; msg.netfn = IPMI_NETFN_APP_REQUEST; msg.cmd = IPMI_GET_DEVICE_ID_CMD; msg.data = NULL; msg.data_len = 0; return i_ipmi_request(NULL, intf, (struct ipmi_addr *) &si, 0, &msg, intf, NULL, NULL, 0, intf->addrinfo[0].address, intf->addrinfo[0].lun, -1, 0); } static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc) { int rv; unsigned int retry_count = 0; intf->null_user_handler = bmc_device_id_handler; retry: bmc->cc = 0; bmc->dyn_id_set = 2; rv = send_get_device_id_cmd(intf); if (rv) goto out_reset_handler; wait_event(intf->waitq, bmc->dyn_id_set != 2); if (!bmc->dyn_id_set) { if (bmc->cc != IPMI_CC_NO_ERROR && ++retry_count <= GET_DEVICE_ID_MAX_RETRY) { msleep(500); dev_warn(intf->si_dev, "BMC returned 0x%2.2x, retry get bmc device id\n", bmc->cc); goto retry; } rv = -EIO; /* Something went wrong in the fetch. */ } /* dyn_id_set makes the id data available. */ smp_rmb(); out_reset_handler: intf->null_user_handler = NULL; return rv; } /* * Fetch the device id for the bmc/interface. You must pass in either * bmc or intf, this code will get the other one. If the data has * been recently fetched, this will just use the cached data. Otherwise * it will run a new fetch. * * Except for the first time this is called (in ipmi_add_smi()), * this will always return good data; */ static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, struct ipmi_device_id *id, bool *guid_set, guid_t *guid, int intf_num) { int rv = 0; int prev_dyn_id_set, prev_guid_set; bool intf_set = intf != NULL; if (!intf) { mutex_lock(&bmc->dyn_mutex); retry_bmc_lock: if (list_empty(&bmc->intfs)) { mutex_unlock(&bmc->dyn_mutex); return -ENOENT; } intf = list_first_entry(&bmc->intfs, struct ipmi_smi, bmc_link); kref_get(&intf->refcount); mutex_unlock(&bmc->dyn_mutex); mutex_lock(&intf->bmc_reg_mutex); mutex_lock(&bmc->dyn_mutex); if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi, bmc_link)) { mutex_unlock(&intf->bmc_reg_mutex); kref_put(&intf->refcount, intf_free); goto retry_bmc_lock; } } else { mutex_lock(&intf->bmc_reg_mutex); bmc = intf->bmc; mutex_lock(&bmc->dyn_mutex); kref_get(&intf->refcount); } /* If we have a valid and current ID, just return that. */ if (intf->in_bmc_register || (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry))) goto out_noprocessing; prev_guid_set = bmc->dyn_guid_set; __get_guid(intf); prev_dyn_id_set = bmc->dyn_id_set; rv = __get_device_id(intf, bmc); if (rv) goto out; /* * The guid, device id, manufacturer id, and product id should * not change on a BMC. If it does we have to do some dancing. */ if (!intf->bmc_registered || (!prev_guid_set && bmc->dyn_guid_set) || (!prev_dyn_id_set && bmc->dyn_id_set) || (prev_guid_set && bmc->dyn_guid_set && !guid_equal(&bmc->guid, &bmc->fetch_guid)) || bmc->id.device_id != bmc->fetch_id.device_id || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id || bmc->id.product_id != bmc->fetch_id.product_id) { struct ipmi_device_id id = bmc->fetch_id; int guid_set = bmc->dyn_guid_set; guid_t guid; guid = bmc->fetch_guid; mutex_unlock(&bmc->dyn_mutex); __ipmi_bmc_unregister(intf); /* Fill in the temporary BMC for good measure. */ intf->bmc->id = id; intf->bmc->dyn_guid_set = guid_set; intf->bmc->guid = guid; if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num)) need_waiter(intf); /* Retry later on an error. */ else __scan_channels(intf, &id); if (!intf_set) { /* * We weren't given the interface on the * command line, so restart the operation on * the next interface for the BMC. */ mutex_unlock(&intf->bmc_reg_mutex); mutex_lock(&bmc->dyn_mutex); goto retry_bmc_lock; } /* We have a new BMC, set it up. */ bmc = intf->bmc; mutex_lock(&bmc->dyn_mutex); goto out_noprocessing; } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id))) /* Version info changes, scan the channels again. */ __scan_channels(intf, &bmc->fetch_id); bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; out: if (rv && prev_dyn_id_set) { rv = 0; /* Ignore failures if we have previous data. */ bmc->dyn_id_set = prev_dyn_id_set; } if (!rv) { bmc->id = bmc->fetch_id; if (bmc->dyn_guid_set) bmc->guid = bmc->fetch_guid; else if (prev_guid_set) /* * The guid used to be valid and it failed to fetch, * just use the cached value. */ bmc->dyn_guid_set = prev_guid_set; } out_noprocessing: if (!rv) { if (id) *id = bmc->id; if (guid_set) *guid_set = bmc->dyn_guid_set; if (guid && bmc->dyn_guid_set) *guid = bmc->guid; } mutex_unlock(&bmc->dyn_mutex); mutex_unlock(&intf->bmc_reg_mutex); kref_put(&intf->refcount, intf_free); return rv; } static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, struct ipmi_device_id *id, bool *guid_set, guid_t *guid) { return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1); } static ssize_t device_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = to_bmc_device(dev); struct ipmi_device_id id; int rv; rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); if (rv) return rv; return sysfs_emit(buf, "%u\n", id.device_id); } static DEVICE_ATTR_RO(device_id); static ssize_t provides_device_sdrs_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = to_bmc_device(dev); struct ipmi_device_id id; int rv; rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); if (rv) return rv; return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7); } static DEVICE_ATTR_RO(provides_device_sdrs); static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = to_bmc_device(dev); struct ipmi_device_id id; int rv; rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); if (rv) return rv; return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F); } static DEVICE_ATTR_RO(revision); static ssize_t firmware_revision_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = to_bmc_device(dev); struct ipmi_device_id id; int rv; rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); if (rv) return rv; return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1, id.firmware_revision_2); } static DEVICE_ATTR_RO(firmware_revision); static ssize_t ipmi_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = to_bmc_device(dev); struct ipmi_device_id id; int rv; rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); if (rv) return rv; return sysfs_emit(buf, "%u.%u\n", ipmi_version_major(&id), ipmi_version_minor(&id)); } static DEVICE_ATTR_RO(ipmi_version); static ssize_t add_dev_support_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = to_bmc_device(dev); struct ipmi_device_id id; int rv; rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); if (rv) return rv; return sysfs_emit(buf, "0x%02x\n", id.additional_device_support); } static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, NULL); static ssize_t manufacturer_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = to_bmc_device(dev); struct ipmi_device_id id; int rv; rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); if (rv) return rv; return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id); } static DEVICE_ATTR_RO(manufacturer_id); static ssize_t product_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = to_bmc_device(dev); struct ipmi_device_id id; int rv; rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); if (rv) return rv; return sysfs_emit(buf, "0x%4.4x\n", id.product_id); } static DEVICE_ATTR_RO(product_id); static ssize_t aux_firmware_rev_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = to_bmc_device(dev); struct ipmi_device_id id; int rv; rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); if (rv) return rv; return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n", id.aux_firmware_revision[3], id.aux_firmware_revision[2], id.aux_firmware_revision[1], id.aux_firmware_revision[0]); } static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); static ssize_t guid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bmc_device *bmc = to_bmc_device(dev); bool guid_set; guid_t guid; int rv; rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid); if (rv) return rv; if (!guid_set) return -ENOENT; return sysfs_emit(buf, "%pUl\n", &guid); } static DEVICE_ATTR_RO(guid); static struct attribute *bmc_dev_attrs[] = { &dev_attr_device_id.attr, &dev_attr_provides_device_sdrs.attr, &dev_attr_revision.attr, &dev_attr_firmware_revision.attr, &dev_attr_ipmi_version.attr, &dev_attr_additional_device_support.attr, &dev_attr_manufacturer_id.attr, &dev_attr_product_id.attr, &dev_attr_aux_firmware_revision.attr, &dev_attr_guid.attr, NULL }; static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { struct device *dev = kobj_to_dev(kobj); struct bmc_device *bmc = to_bmc_device(dev); umode_t mode = attr->mode; int rv; if (attr == &dev_attr_aux_firmware_revision.attr) { struct ipmi_device_id id; rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); return (!rv && id.aux_firmware_revision_set) ? mode : 0; } if (attr == &dev_attr_guid.attr) { bool guid_set; rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL); return (!rv && guid_set) ? mode : 0; } return mode; } static const struct attribute_group bmc_dev_attr_group = { .attrs = bmc_dev_attrs, .is_visible = bmc_dev_attr_is_visible, }; static const struct attribute_group *bmc_dev_attr_groups[] = { &bmc_dev_attr_group, NULL }; static const struct device_type bmc_device_type = { .groups = bmc_dev_attr_groups, }; static int __find_bmc_guid(struct device *dev, const void *data) { const guid_t *guid = data; struct bmc_device *bmc; int rv; if (dev->type != &bmc_device_type) return 0; bmc = to_bmc_device(dev); rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid); if (rv) rv = kref_get_unless_zero(&bmc->usecount); return rv; } /* * Returns with the bmc's usecount incremented, if it is non-NULL. */ static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, guid_t *guid) { struct device *dev; struct bmc_device *bmc = NULL; dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); if (dev) { bmc = to_bmc_device(dev); put_device(dev); } return bmc; } struct prod_dev_id { unsigned int product_id; unsigned char device_id; }; static int __find_bmc_prod_dev_id(struct device *dev, const void *data) { const struct prod_dev_id *cid = data; struct bmc_device *bmc; int rv; if (dev->type != &bmc_device_type) return 0; bmc = to_bmc_device(dev); rv = (bmc->id.product_id == cid->product_id && bmc->id.device_id == cid->device_id); if (rv) rv = kref_get_unless_zero(&bmc->usecount); return rv; } /* * Returns with the bmc's usecount incremented, if it is non-NULL. */ static struct bmc_device *ipmi_find_bmc_prod_dev_id( struct device_driver *drv, unsigned int product_id, unsigned char device_id) { struct prod_dev_id id = { .product_id = product_id, .device_id = device_id, }; struct device *dev; struct bmc_device *bmc = NULL; dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); if (dev) { bmc = to_bmc_device(dev); put_device(dev); } return bmc; } static DEFINE_IDA(ipmi_bmc_ida); static void release_bmc_device(struct device *dev) { kfree(to_bmc_device(dev)); } static void cleanup_bmc_work(struct work_struct *work) { struct bmc_device *bmc = container_of(work, struct bmc_device, remove_work); int id = bmc->pdev.id; /* Unregister overwrites id */ platform_device_unregister(&bmc->pdev); ida_simple_remove(&ipmi_bmc_ida, id); } static void cleanup_bmc_device(struct kref *ref) { struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); /* * Remove the platform device in a work queue to avoid issues * with removing the device attributes while reading a device * attribute. */ queue_work(remove_work_wq, &bmc->remove_work); } /* * Must be called with intf->bmc_reg_mutex held. */ static void __ipmi_bmc_unregister(struct ipmi_smi *intf) { struct bmc_device *bmc = intf->bmc; if (!intf->bmc_registered) return; sysfs_remove_link(&intf->si_dev->kobj, "bmc"); sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name); kfree(intf->my_dev_name); intf->my_dev_name = NULL; mutex_lock(&bmc->dyn_mutex); list_del(&intf->bmc_link); mutex_unlock(&bmc->dyn_mutex); intf->bmc = &intf->tmp_bmc; kref_put(&bmc->usecount, cleanup_bmc_device); intf->bmc_registered = false; } static void ipmi_bmc_unregister(struct ipmi_smi *intf) { mutex_lock(&intf->bmc_reg_mutex); __ipmi_bmc_unregister(intf); mutex_unlock(&intf->bmc_reg_mutex); } /* * Must be called with intf->bmc_reg_mutex held. */ static int __ipmi_bmc_register(struct ipmi_smi *intf, struct ipmi_device_id *id, bool guid_set, guid_t *guid, int intf_num) { int rv; struct bmc_device *bmc; struct bmc_device *old_bmc; /* * platform_device_register() can cause bmc_reg_mutex to * be claimed because of the is_visible functions of * the attributes. Eliminate possible recursion and * release the lock. */ intf->in_bmc_register = true; mutex_unlock(&intf->bmc_reg_mutex); /* * Try to find if there is an bmc_device struct * representing the interfaced BMC already */ mutex_lock(&ipmidriver_mutex); if (guid_set) old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid); else old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, id->product_id, id->device_id); /* * If there is already an bmc_device, free the new one, * otherwise register the new BMC device */ if (old_bmc) { bmc = old_bmc; /* * Note: old_bmc already has usecount incremented by * the BMC find functions. */ intf->bmc = old_bmc; mutex_lock(&bmc->dyn_mutex); list_add_tail(&intf->bmc_link, &bmc->intfs); mutex_unlock(&bmc->dyn_mutex); dev_info(intf->si_dev, "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", bmc->id.manufacturer_id, bmc->id.product_id, bmc->id.device_id); } else { bmc = kzalloc(sizeof(*bmc), GFP_KERNEL); if (!bmc) { rv = -ENOMEM; goto out; } INIT_LIST_HEAD(&bmc->intfs); mutex_init(&bmc->dyn_mutex); INIT_WORK(&bmc->remove_work, cleanup_bmc_work); bmc->id = *id; bmc->dyn_id_set = 1; bmc->dyn_guid_set = guid_set; bmc->guid = *guid; bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; bmc->pdev.name = "ipmi_bmc"; rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL); if (rv < 0) { kfree(bmc); goto out; } bmc->pdev.dev.driver = &ipmidriver.driver; bmc->pdev.id = rv; bmc->pdev.dev.release = release_bmc_device; bmc->pdev.dev.type = &bmc_device_type; kref_init(&bmc->usecount); intf->bmc = bmc; mutex_lock(&bmc->dyn_mutex); list_add_tail(&intf->bmc_link, &bmc->intfs); mutex_unlock(&bmc->dyn_mutex); rv = platform_device_register(&bmc->pdev); if (rv) { dev_err(intf->si_dev, "Unable to register bmc device: %d\n", rv); goto out_list_del; } dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", bmc->id.manufacturer_id, bmc->id.product_id, bmc->id.device_id); } /* * create symlink from system interface device to bmc device * and back. */ rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc"); if (rv) { dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv); goto out_put_bmc; } if (intf_num == -1) intf_num = intf->intf_num; intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num); if (!intf->my_dev_name) { rv = -ENOMEM; dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n", rv); goto out_unlink1; } rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, intf->my_dev_name); if (rv) { dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n", rv); goto out_free_my_dev_name; } intf->bmc_registered = true; out: mutex_unlock(&ipmidriver_mutex); mutex_lock(&intf->bmc_reg_mutex); intf->in_bmc_register = false; return rv; out_free_my_dev_name: kfree(intf->my_dev_name); intf->my_dev_name = NULL; out_unlink1: sysfs_remove_link(&intf->si_dev->kobj, "bmc"); out_put_bmc: mutex_lock(&bmc->dyn_mutex); list_del(&intf->bmc_link); mutex_unlock(&bmc->dyn_mutex); intf->bmc = &intf->tmp_bmc; kref_put(&bmc->usecount, cleanup_bmc_device); goto out; out_list_del: mutex_lock(&bmc->dyn_mutex); list_del(&intf->bmc_link); mutex_unlock(&bmc->dyn_mutex); intf->bmc = &intf->tmp_bmc; put_device(&bmc->pdev.dev); goto out; } static int send_guid_cmd(struct ipmi_smi *intf, int chan) { struct kernel_ipmi_msg msg; struct ipmi_system_interface_addr si; si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; si.channel = IPMI_BMC_CHANNEL; si.lun = 0; msg.netfn = IPMI_NETFN_APP_REQUEST; msg.cmd = IPMI_GET_DEVICE_GUID_CMD; msg.data = NULL; msg.data_len = 0; return i_ipmi_request(NULL, intf, (struct ipmi_addr *) &si, 0, &msg, intf, NULL, NULL, 0, intf->addrinfo[0].address, intf->addrinfo[0].lun, -1, 0); } static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) { struct bmc_device *bmc = intf->bmc; if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) /* Not for me */ return; if (msg->msg.data[0] != 0) { /* Error from getting the GUID, the BMC doesn't have one. */ bmc->dyn_guid_set = 0; goto out; } if (msg->msg.data_len < UUID_SIZE + 1) { bmc->dyn_guid_set = 0; dev_warn(intf->si_dev, "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n", msg->msg.data_len, UUID_SIZE + 1); goto out; } import_guid(&bmc->fetch_guid, msg->msg.data + 1); /* * Make sure the guid data is available before setting * dyn_guid_set. */ smp_wmb(); bmc->dyn_guid_set = 1; out: wake_up(&intf->waitq); } static void __get_guid(struct ipmi_smi *intf) { int rv; struct bmc_device *bmc = intf->bmc; bmc->dyn_guid_set = 2; intf->null_user_handler = guid_handler; rv = send_guid_cmd(intf, 0); if (rv) /* Send failed, no GUID available. */ bmc->dyn_guid_set = 0; else wait_event(intf->waitq, bmc->dyn_guid_set != 2); /* dyn_guid_set makes the guid data available. */ smp_rmb(); intf->null_user_handler = NULL; } static int send_channel_info_cmd(struct ipmi_smi *intf, int chan) { struct kernel_ipmi_msg msg; unsigned char data[1]; struct ipmi_system_interface_addr si; si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; si.channel = IPMI_BMC_CHANNEL; si.lun = 0; msg.netfn = IPMI_NETFN_APP_REQUEST; msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; msg.data = data; msg.data_len = 1; data[0] = chan; return i_ipmi_request(NULL, intf, (struct ipmi_addr *) &si, 0, &msg, intf, NULL, NULL, 0, intf->addrinfo[0].address, intf->addrinfo[0].lun, -1, 0); } static void channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) { int rv = 0; int ch; unsigned int set = intf->curr_working_cset; struct ipmi_channel *chans; if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { /* It's the one we want */ if (msg->msg.data[0] != 0) { /* Got an error from the channel, just go on. */ if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { /* * If the MC does not support this * command, that is legal. We just * assume it has one IPMB at channel * zero. */ intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; intf->channel_list = intf->wchannels + set; intf->channels_ready = true; wake_up(&intf->waitq); goto out; } goto next_channel; } if (msg->msg.data_len < 4) { /* Message not big enough, just go on. */ goto next_channel; } ch = intf->curr_channel; chans = intf->wchannels[set].c; chans[ch].medium = msg->msg.data[2] & 0x7f; chans[ch].protocol = msg->msg.data[3] & 0x1f; next_channel: intf->curr_channel++; if (intf->curr_channel >= IPMI_MAX_CHANNELS) { intf->channel_list = intf->wchannels + set; intf->channels_ready = true; wake_up(&intf->waitq); } else { intf->channel_list = intf->wchannels + set; intf->channels_ready = true; rv = send_channel_info_cmd(intf, intf->curr_channel); } if (rv) { /* Got an error somehow, just give up. */ dev_warn(intf->si_dev, "Error sending channel information for channel %d: %d\n", intf->curr_channel, rv); intf->channel_list = intf->wchannels + set; intf->channels_ready = true; wake_up(&intf->waitq); } } out: return; } /* * Must be holding intf->bmc_reg_mutex to call this. */ static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id) { int rv; if (ipmi_version_major(id) > 1 || (ipmi_version_major(id) == 1 && ipmi_version_minor(id) >= 5)) { unsigned int set; /* * Start scanning the channels to see what is * available. */ set = !intf->curr_working_cset; intf->curr_working_cset = set; memset(&intf->wchannels[set], 0, sizeof(struct ipmi_channel_set)); intf->null_user_handler = channel_handler; intf->curr_channel = 0; rv = send_channel_info_cmd(intf, 0); if (rv) { dev_warn(intf->si_dev, "Error sending channel information for channel 0, %d\n", rv); intf->null_user_handler = NULL; return -EIO; } /* Wait for the channel info to be read. */ wait_event(intf->waitq, intf->channels_ready); intf->null_user_handler = NULL; } else { unsigned int set = intf->curr_working_cset; /* Assume a single IPMB channel at zero. */ intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; intf->channel_list = intf->wchannels + set; intf->channels_ready = true; } return 0; } static void ipmi_poll(struct ipmi_smi *intf) { if (intf->handlers->poll) intf->handlers->poll(intf->send_info); /* In case something came in */ handle_new_recv_msgs(intf); } void ipmi_poll_interface(struct ipmi_user *user) { ipmi_poll(user->intf); } EXPORT_SYMBOL(ipmi_poll_interface); static ssize_t nr_users_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ipmi_smi *intf = container_of(attr, struct ipmi_smi, nr_users_devattr); return sysfs_emit(buf, "%d\n", atomic_read(&intf->nr_users)); } static DEVICE_ATTR_RO(nr_users); static ssize_t nr_msgs_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ipmi_smi *intf = container_of(attr, struct ipmi_smi, nr_msgs_devattr); struct ipmi_user *user; int index; unsigned int count = 0; index = srcu_read_lock(&intf->users_srcu); list_for_each_entry_rcu(user, &intf->users, link) count += atomic_read(&user->nr_msgs); srcu_read_unlock(&intf->users_srcu, index); return sysfs_emit(buf, "%u\n", count); } static DEVICE_ATTR_RO(nr_msgs); static void redo_bmc_reg(struct work_struct *work) { struct ipmi_smi *intf = container_of(work, struct ipmi_smi, bmc_reg_work); if (!intf->in_shutdown) bmc_get_device_id(intf, NULL, NULL, NULL, NULL); kref_put(&intf->refcount, intf_free); } int ipmi_add_smi(struct module *owner, const struct ipmi_smi_handlers *handlers, void *send_info, struct device *si_dev, unsigned char slave_addr) { int i, j; int rv; struct ipmi_smi *intf, *tintf; struct list_head *link; struct ipmi_device_id id; /* * Make sure the driver is actually initialized, this handles * problems with initialization order. */ rv = ipmi_init_msghandler(); if (rv) return rv; intf = kzalloc(sizeof(*intf), GFP_KERNEL); if (!intf) return -ENOMEM; rv = init_srcu_struct(&intf->users_srcu); if (rv) { kfree(intf); return rv; } intf->owner = owner; intf->bmc = &intf->tmp_bmc; INIT_LIST_HEAD(&intf->bmc->intfs); mutex_init(&intf->bmc->dyn_mutex); INIT_LIST_HEAD(&intf->bmc_link); mutex_init(&intf->bmc_reg_mutex); intf->intf_num = -1; /* Mark it invalid for now. */ kref_init(&intf->refcount); INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg); intf->si_dev = si_dev; for (j = 0; j < IPMI_MAX_CHANNELS; j++) { intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR; intf->addrinfo[j].lun = 2; } if (slave_addr != 0) intf->addrinfo[0].address = slave_addr; INIT_LIST_HEAD(&intf->users); atomic_set(&intf->nr_users, 0); intf->handlers = handlers; intf->send_info = send_info; spin_lock_init(&intf->seq_lock); for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { intf->seq_table[j].inuse = 0; intf->seq_table[j].seqid = 0; } intf->curr_seq = 0; spin_lock_init(&intf->waiting_rcv_msgs_lock); INIT_LIST_HEAD(&intf->waiting_rcv_msgs); tasklet_setup(&intf->recv_tasklet, smi_recv_tasklet); atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); spin_lock_init(&intf->xmit_msgs_lock); INIT_LIST_HEAD(&intf->xmit_msgs); INIT_LIST_HEAD(&intf->hp_xmit_msgs); spin_lock_init(&intf->events_lock); spin_lock_init(&intf->watch_lock); atomic_set(&intf->event_waiters, 0); intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; INIT_LIST_HEAD(&intf->waiting_events); intf->waiting_events_count = 0; mutex_init(&intf->cmd_rcvrs_mutex); spin_lock_init(&intf->maintenance_mode_lock); INIT_LIST_HEAD(&intf->cmd_rcvrs); init_waitqueue_head(&intf->waitq); for (i = 0; i < IPMI_NUM_STATS; i++) atomic_set(&intf->stats[i], 0); mutex_lock(&ipmi_interfaces_mutex); /* Look for a hole in the numbers. */ i = 0; link = &ipmi_interfaces; list_for_each_entry_rcu(tintf, &ipmi_interfaces, link, ipmi_interfaces_mutex_held()) { if (tintf->intf_num != i) { link = &tintf->link; break; } i++; } /* Add the new interface in numeric order. */ if (i == 0) list_add_rcu(&intf->link, &ipmi_interfaces); else list_add_tail_rcu(&intf->link, link); rv = handlers->start_processing(send_info, intf); if (rv) goto out_err; rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); if (rv) { dev_err(si_dev, "Unable to get the device id: %d\n", rv); goto out_err_started; } mutex_lock(&intf->bmc_reg_mutex); rv = __scan_channels(intf, &id); mutex_unlock(&intf->bmc_reg_mutex); if (rv) goto out_err_bmc_reg; intf->nr_users_devattr = dev_attr_nr_users; sysfs_attr_init(&intf->nr_users_devattr.attr); rv = device_create_file(intf->si_dev, &intf->nr_users_devattr); if (rv) goto out_err_bmc_reg; intf->nr_msgs_devattr = dev_attr_nr_msgs; sysfs_attr_init(&intf->nr_msgs_devattr.attr); rv = device_create_file(intf->si_dev, &intf->nr_msgs_devattr); if (rv) { device_remove_file(intf->si_dev, &intf->nr_users_devattr); goto out_err_bmc_reg; } /* * Keep memory order straight for RCU readers. Make * sure everything else is committed to memory before * setting intf_num to mark the interface valid. */ smp_wmb(); intf->intf_num = i; mutex_unlock(&ipmi_interfaces_mutex); /* After this point the interface is legal to use. */ call_smi_watchers(i, intf->si_dev); return 0; out_err_bmc_reg: ipmi_bmc_unregister(intf); out_err_started: if (intf->handlers->shutdown) intf->handlers->shutdown(intf->send_info); out_err: list_del_rcu(&intf->link); mutex_unlock(&ipmi_interfaces_mutex); synchronize_srcu(&ipmi_interfaces_srcu); cleanup_srcu_struct(&intf->users_srcu); kref_put(&intf->refcount, intf_free); return rv; } EXPORT_SYMBOL(ipmi_add_smi); static void deliver_smi_err_response(struct ipmi_smi *intf, struct ipmi_smi_msg *msg, unsigned char err) { int rv; msg->rsp[0] = msg->data[0] | 4; msg->rsp[1] = msg->data[1]; msg->rsp[2] = err; msg->rsp_size = 3; /* This will never requeue, but it may ask us to free the message. */ rv = handle_one_recv_msg(intf, msg); if (rv == 0) ipmi_free_smi_msg(msg); } static void cleanup_smi_msgs(struct ipmi_smi *intf) { int i; struct seq_table *ent; struct ipmi_smi_msg *msg; struct list_head *entry; struct list_head tmplist; /* Clear out our transmit queues and hold the messages. */ INIT_LIST_HEAD(&tmplist); list_splice_tail(&intf->hp_xmit_msgs, &tmplist); list_splice_tail(&intf->xmit_msgs, &tmplist); /* Current message first, to preserve order */ while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { /* Wait for the message to clear out. */ schedule_timeout(1); } /* No need for locks, the interface is down. */ /* * Return errors for all pending messages in queue and in the * tables waiting for remote responses. */ while (!list_empty(&tmplist)) { entry = tmplist.next; list_del(entry); msg = list_entry(entry, struct ipmi_smi_msg, link); deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED); } for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { ent = &intf->seq_table[i]; if (!ent->inuse) continue; deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED); } } void ipmi_unregister_smi(struct ipmi_smi *intf) { struct ipmi_smi_watcher *w; int intf_num, index; if (!intf) return; intf_num = intf->intf_num; mutex_lock(&ipmi_interfaces_mutex); intf->intf_num = -1; intf->in_shutdown = true; list_del_rcu(&intf->link); mutex_unlock(&ipmi_interfaces_mutex); synchronize_srcu(&ipmi_interfaces_srcu); /* At this point no users can be added to the interface. */ device_remove_file(intf->si_dev, &intf->nr_msgs_devattr); device_remove_file(intf->si_dev, &intf->nr_users_devattr); /* * Call all the watcher interfaces to tell them that * an interface is going away. */ mutex_lock(&smi_watchers_mutex); list_for_each_entry(w, &smi_watchers, link) w->smi_gone(intf_num); mutex_unlock(&smi_watchers_mutex); index = srcu_read_lock(&intf->users_srcu); while (!list_empty(&intf->users)) { struct ipmi_user *user = container_of(list_next_rcu(&intf->users), struct ipmi_user, link); _ipmi_destroy_user(user); } srcu_read_unlock(&intf->users_srcu, index); if (intf->handlers->shutdown) intf->handlers->shutdown(intf->send_info); cleanup_smi_msgs(intf); ipmi_bmc_unregister(intf); cleanup_srcu_struct(&intf->users_srcu); kref_put(&intf->refcount, intf_free); } EXPORT_SYMBOL(ipmi_unregister_smi); static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf, struct ipmi_smi_msg *msg) { struct ipmi_ipmb_addr ipmb_addr; struct ipmi_recv_msg *recv_msg; /* * This is 11, not 10, because the response must contain a * completion code. */ if (msg->rsp_size < 11) { /* Message not big enough, just ignore it. */ ipmi_inc_stat(intf, invalid_ipmb_responses); return 0; } if (msg->rsp[2] != 0) { /* An error getting the response, just ignore it. */ return 0; } ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; ipmb_addr.slave_addr = msg->rsp[6]; ipmb_addr.channel = msg->rsp[3] & 0x0f; ipmb_addr.lun = msg->rsp[7] & 3; /* * It's a response from a remote entity. Look up the sequence * number and handle the response. */ if (intf_find_seq(intf, msg->rsp[7] >> 2, msg->rsp[3] & 0x0f, msg->rsp[8], (msg->rsp[4] >> 2) & (~1), (struct ipmi_addr *) &ipmb_addr, &recv_msg)) { /* * We were unable to find the sequence number, * so just nuke the message. */ ipmi_inc_stat(intf, unhandled_ipmb_responses); return 0; } memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9); /* * The other fields matched, so no need to set them, except * for netfn, which needs to be the response that was * returned, not the request value. */ recv_msg->msg.netfn = msg->rsp[4] >> 2; recv_msg->msg.data = recv_msg->msg_data; recv_msg->msg.data_len = msg->rsp_size - 10; recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; if (deliver_response(intf, recv_msg)) ipmi_inc_stat(intf, unhandled_ipmb_responses); else ipmi_inc_stat(intf, handled_ipmb_responses); return 0; } static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, struct ipmi_smi_msg *msg) { struct cmd_rcvr *rcvr; int rv = 0; unsigned char netfn; unsigned char cmd; unsigned char chan; struct ipmi_user *user = NULL; struct ipmi_ipmb_addr *ipmb_addr; struct ipmi_recv_msg *recv_msg; if (msg->rsp_size < 10) { /* Message not big enough, just ignore it. */ ipmi_inc_stat(intf, invalid_commands); return 0; } if (msg->rsp[2] != 0) { /* An error getting the response, just ignore it. */ return 0; } netfn = msg->rsp[4] >> 2; cmd = msg->rsp[8]; chan = msg->rsp[3] & 0xf; rcu_read_lock(); rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; kref_get(&user->refcount); } else user = NULL; rcu_read_unlock(); if (user == NULL) { /* We didn't find a user, deliver an error response. */ ipmi_inc_stat(intf, unhandled_commands); msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); msg->data[1] = IPMI_SEND_MSG_CMD; msg->data[2] = msg->rsp[3]; msg->data[3] = msg->rsp[6]; msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); msg->data[5] = ipmb_checksum(&msg->data[3], 2); msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address; /* rqseq/lun */ msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); msg->data[8] = msg->rsp[8]; /* cmd */ msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; msg->data[10] = ipmb_checksum(&msg->data[6], 4); msg->data_size = 11; dev_dbg(intf->si_dev, "Invalid command: %*ph\n", msg->data_size, msg->data); rcu_read_lock(); if (!intf->in_shutdown) { smi_send(intf, intf->handlers, msg, 0); /* * We used the message, so return the value * that causes it to not be freed or * queued. */ rv = -1; } rcu_read_unlock(); } else { recv_msg = ipmi_alloc_recv_msg(); if (!recv_msg) { /* * We couldn't allocate memory for the * message, so requeue it for handling * later. */ rv = 1; kref_put(&user->refcount, free_user); } else { /* Extract the source address from the data. */ ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; ipmb_addr->slave_addr = msg->rsp[6]; ipmb_addr->lun = msg->rsp[7] & 3; ipmb_addr->channel = msg->rsp[3] & 0xf; /* * Extract the rest of the message information * from the IPMB header. */ recv_msg->user = user; recv_msg->recv_type = IPMI_CMD_RECV_TYPE; recv_msg->msgid = msg->rsp[7] >> 2; recv_msg->msg.netfn = msg->rsp[4] >> 2; recv_msg->msg.cmd = msg->rsp[8]; recv_msg->msg.data = recv_msg->msg_data; /* * We chop off 10, not 9 bytes because the checksum * at the end also needs to be removed. */ recv_msg->msg.data_len = msg->rsp_size - 10; memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 10); if (deliver_response(intf, recv_msg)) ipmi_inc_stat(intf, unhandled_commands); else ipmi_inc_stat(intf, handled_commands); } } return rv; } static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, struct ipmi_smi_msg *msg) { struct cmd_rcvr *rcvr; int rv = 0; struct ipmi_user *user = NULL; struct ipmi_ipmb_direct_addr *daddr; struct ipmi_recv_msg *recv_msg; unsigned char netfn = msg->rsp[0] >> 2; unsigned char cmd = msg->rsp[3]; rcu_read_lock(); /* We always use channel 0 for direct messages. */ rcvr = find_cmd_rcvr(intf, netfn, cmd, 0); if (rcvr) { user = rcvr->user; kref_get(&user->refcount); } else user = NULL; rcu_read_unlock(); if (user == NULL) { /* We didn't find a user, deliver an error response. */ ipmi_inc_stat(intf, unhandled_commands); msg->data[0] = (netfn + 1) << 2; msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */ msg->data[1] = msg->rsp[1]; /* Addr */ msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */ msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */ msg->data[3] = cmd; msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE; msg->data_size = 5; rcu_read_lock(); if (!intf->in_shutdown) { smi_send(intf, intf->handlers, msg, 0); /* * We used the message, so return the value * that causes it to not be freed or * queued. */ rv = -1; } rcu_read_unlock(); } else { recv_msg = ipmi_alloc_recv_msg(); if (!recv_msg) { /* * We couldn't allocate memory for the * message, so requeue it for handling * later. */ rv = 1; kref_put(&user->refcount, free_user); } else { /* Extract the source address from the data. */ daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; daddr->channel = 0; daddr->slave_addr = msg->rsp[1]; daddr->rs_lun = msg->rsp[0] & 3; daddr->rq_lun = msg->rsp[2] & 3; /* * Extract the rest of the message information * from the IPMB header. */ recv_msg->user = user; recv_msg->recv_type = IPMI_CMD_RECV_TYPE; recv_msg->msgid = (msg->rsp[2] >> 2); recv_msg->msg.netfn = msg->rsp[0] >> 2; recv_msg->msg.cmd = msg->rsp[3]; recv_msg->msg.data = recv_msg->msg_data; recv_msg->msg.data_len = msg->rsp_size - 4; memcpy(recv_msg->msg_data, msg->rsp + 4, msg->rsp_size - 4); if (deliver_response(intf, recv_msg)) ipmi_inc_stat(intf, unhandled_commands); else ipmi_inc_stat(intf, handled_commands); } } return rv; } static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf, struct ipmi_smi_msg *msg) { struct ipmi_recv_msg *recv_msg; struct ipmi_ipmb_direct_addr *daddr; recv_msg = msg->user_data; if (recv_msg == NULL) { dev_warn(intf->si_dev, "IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); return 0; } recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; recv_msg->msgid = msg->msgid; daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr; daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; daddr->channel = 0; daddr->slave_addr = msg->rsp[1]; daddr->rq_lun = msg->rsp[0] & 3; daddr->rs_lun = msg->rsp[2] & 3; recv_msg->msg.netfn = msg->rsp[0] >> 2; recv_msg->msg.cmd = msg->rsp[3]; memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4); recv_msg->msg.data = recv_msg->msg_data; recv_msg->msg.data_len = msg->rsp_size - 4; deliver_local_response(intf, recv_msg); return 0; } static int handle_lan_get_msg_rsp(struct ipmi_smi *intf, struct ipmi_smi_msg *msg) { struct ipmi_lan_addr lan_addr; struct ipmi_recv_msg *recv_msg; /* * This is 13, not 12, because the response must contain a * completion code. */ if (msg->rsp_size < 13) { /* Message not big enough, just ignore it. */ ipmi_inc_stat(intf, invalid_lan_responses); return 0; } if (msg->rsp[2] != 0) { /* An error getting the response, just ignore it. */ return 0; } lan_addr.addr_type = IPMI_LAN_ADDR_TYPE; lan_addr.session_handle = msg->rsp[4]; lan_addr.remote_SWID = msg->rsp[8]; lan_addr.local_SWID = msg->rsp[5]; lan_addr.channel = msg->rsp[3] & 0x0f; lan_addr.privilege = msg->rsp[3] >> 4; lan_addr.lun = msg->rsp[9] & 3; /* * It's a response from a remote entity. Look up the sequence * number and handle the response. */ if (intf_find_seq(intf, msg->rsp[9] >> 2, msg->rsp[3] & 0x0f, msg->rsp[10], (msg->rsp[6] >> 2) & (~1), (struct ipmi_addr *) &lan_addr, &recv_msg)) { /* * We were unable to find the sequence number, * so just nuke the message. */ ipmi_inc_stat(intf, unhandled_lan_responses); return 0; } memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11); /* * The other fields matched, so no need to set them, except * for netfn, which needs to be the response that was * returned, not the request value. */ recv_msg->msg.netfn = msg->rsp[6] >> 2; recv_msg->msg.data = recv_msg->msg_data; recv_msg->msg.data_len = msg->rsp_size - 12; recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; if (deliver_response(intf, recv_msg)) ipmi_inc_stat(intf, unhandled_lan_responses); else ipmi_inc_stat(intf, handled_lan_responses); return 0; } static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, struct ipmi_smi_msg *msg) { struct cmd_rcvr *rcvr; int rv = 0; unsigned char netfn; unsigned char cmd; unsigned char chan; struct ipmi_user *user = NULL; struct ipmi_lan_addr *lan_addr; struct ipmi_recv_msg *recv_msg; if (msg->rsp_size < 12) { /* Message not big enough, just ignore it. */ ipmi_inc_stat(intf, invalid_commands); return 0; } if (msg->rsp[2] != 0) { /* An error getting the response, just ignore it. */ return 0; } netfn = msg->rsp[6] >> 2; cmd = msg->rsp[10]; chan = msg->rsp[3] & 0xf; rcu_read_lock(); rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; kref_get(&user->refcount); } else user = NULL; rcu_read_unlock(); if (user == NULL) { /* We didn't find a user, just give up. */ ipmi_inc_stat(intf, unhandled_commands); /* * Don't do anything with these messages, just allow * them to be freed. */ rv = 0; } else { recv_msg = ipmi_alloc_recv_msg(); if (!recv_msg) { /* * We couldn't allocate memory for the * message, so requeue it for handling later. */ rv = 1; kref_put(&user->refcount, free_user); } else { /* Extract the source address from the data. */ lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; lan_addr->session_handle = msg->rsp[4]; lan_addr->remote_SWID = msg->rsp[8]; lan_addr->local_SWID = msg->rsp[5]; lan_addr->lun = msg->rsp[9] & 3; lan_addr->channel = msg->rsp[3] & 0xf; lan_addr->privilege = msg->rsp[3] >> 4; /* * Extract the rest of the message information * from the IPMB header. */ recv_msg->user = user; recv_msg->recv_type = IPMI_CMD_RECV_TYPE; recv_msg->msgid = msg->rsp[9] >> 2; recv_msg->msg.netfn = msg->rsp[6] >> 2; recv_msg->msg.cmd = msg->rsp[10]; recv_msg->msg.data = recv_msg->msg_data; /* * We chop off 12, not 11 bytes because the checksum * at the end also needs to be removed. */ recv_msg->msg.data_len = msg->rsp_size - 12; memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 12); if (deliver_response(intf, recv_msg)) ipmi_inc_stat(intf, unhandled_commands); else ipmi_inc_stat(intf, handled_commands); } } return rv; } /* * This routine will handle "Get Message" command responses with * channels that use an OEM Medium. The message format belongs to * the OEM. See IPMI 2.0 specification, Chapter 6 and * Chapter 22, sections 22.6 and 22.24 for more details. */ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, struct ipmi_smi_msg *msg) { struct cmd_rcvr *rcvr; int rv = 0; unsigned char netfn; unsigned char cmd; unsigned char chan; struct ipmi_user *user = NULL; struct ipmi_system_interface_addr *smi_addr; struct ipmi_recv_msg *recv_msg; /* * We expect the OEM SW to perform error checking * so we just do some basic sanity checks */ if (msg->rsp_size < 4) { /* Message not big enough, just ignore it. */ ipmi_inc_stat(intf, invalid_commands); return 0; } if (msg->rsp[2] != 0) { /* An error getting the response, just ignore it. */ return 0; } /* * This is an OEM Message so the OEM needs to know how * handle the message. We do no interpretation. */ netfn = msg->rsp[0] >> 2; cmd = msg->rsp[1]; chan = msg->rsp[3] & 0xf; rcu_read_lock(); rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; kref_get(&user->refcount); } else user = NULL; rcu_read_unlock(); if (user == NULL) { /* We didn't find a user, just give up. */ ipmi_inc_stat(intf, unhandled_commands); /* * Don't do anything with these messages, just allow * them to be freed. */ rv = 0; } else { recv_msg = ipmi_alloc_recv_msg(); if (!recv_msg) { /* * We couldn't allocate memory for the * message, so requeue it for handling * later. */ rv = 1; kref_put(&user->refcount, free_user); } else { /* * OEM Messages are expected to be delivered via * the system interface to SMS software. We might * need to visit this again depending on OEM * requirements */ smi_addr = ((struct ipmi_system_interface_addr *) &recv_msg->addr); smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; smi_addr->channel = IPMI_BMC_CHANNEL; smi_addr->lun = msg->rsp[0] & 3; recv_msg->user = user; recv_msg->user_msg_data = NULL; recv_msg->recv_type = IPMI_OEM_RECV_TYPE; recv_msg->msg.netfn = msg->rsp[0] >> 2; recv_msg->msg.cmd = msg->rsp[1]; recv_msg->msg.data = recv_msg->msg_data; /* * The message starts at byte 4 which follows the * Channel Byte in the "GET MESSAGE" command */ recv_msg->msg.data_len = msg->rsp_size - 4; memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4); if (deliver_response(intf, recv_msg)) ipmi_inc_stat(intf, unhandled_commands); else ipmi_inc_stat(intf, handled_commands); } } return rv; } static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, struct ipmi_smi_msg *msg) { struct ipmi_system_interface_addr *smi_addr; recv_msg->msgid = 0; smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr; smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; smi_addr->channel = IPMI_BMC_CHANNEL; smi_addr->lun = msg->rsp[0] & 3; recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; recv_msg->msg.netfn = msg->rsp[0] >> 2; recv_msg->msg.cmd = msg->rsp[1]; memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3); recv_msg->msg.data = recv_msg->msg_data; recv_msg->msg.data_len = msg->rsp_size - 3; } static int handle_read_event_rsp(struct ipmi_smi *intf, struct ipmi_smi_msg *msg) { struct ipmi_recv_msg *recv_msg, *recv_msg2; struct list_head msgs; struct ipmi_user *user; int rv = 0, deliver_count = 0, index; unsigned long flags; if (msg->rsp_size < 19) { /* Message is too small to be an IPMB event. */ ipmi_inc_stat(intf, invalid_events); return 0; } if (msg->rsp[2] != 0) { /* An error getting the event, just ignore it. */ return 0; } INIT_LIST_HEAD(&msgs); spin_lock_irqsave(&intf->events_lock, flags); ipmi_inc_stat(intf, events); /* * Allocate and fill in one message for every user that is * getting events. */ index = srcu_read_lock(&intf->users_srcu); list_for_each_entry_rcu(user, &intf->users, link) { if (!user->gets_events) continue; recv_msg = ipmi_alloc_recv_msg(); if (!recv_msg) { rcu_read_unlock(); list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { list_del(&recv_msg->link); ipmi_free_recv_msg(recv_msg); } /* * We couldn't allocate memory for the * message, so requeue it for handling * later. */ rv = 1; goto out; } deliver_count++; copy_event_into_recv_msg(recv_msg, msg); recv_msg->user = user; kref_get(&user->refcount); list_add_tail(&recv_msg->link, &msgs); } srcu_read_unlock(&intf->users_srcu, index); if (deliver_count) { /* Now deliver all the messages. */ list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { list_del(&recv_msg->link); deliver_local_response(intf, recv_msg); } } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { /* * No one to receive the message, put it in queue if there's * not already too many things in the queue. */ recv_msg = ipmi_alloc_recv_msg(); if (!recv_msg) { /* * We couldn't allocate memory for the * message, so requeue it for handling * later. */ rv = 1; goto out; } copy_event_into_recv_msg(recv_msg, msg); list_add_tail(&recv_msg->link, &intf->waiting_events); intf->waiting_events_count++; } else if (!intf->event_msg_printed) { /* * There's too many things in the queue, discard this * message. */ dev_warn(intf->si_dev, "Event queue full, discarding incoming events\n"); intf->event_msg_printed = 1; } out: spin_unlock_irqrestore(&intf->events_lock, flags); return rv; } static int handle_bmc_rsp(struct ipmi_smi *intf, struct ipmi_smi_msg *msg) { struct ipmi_recv_msg *recv_msg; struct ipmi_system_interface_addr *smi_addr; recv_msg = msg->user_data; if (recv_msg == NULL) { dev_warn(intf->si_dev, "IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); return 0; } recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; recv_msg->msgid = msg->msgid; smi_addr = ((struct ipmi_system_interface_addr *) &recv_msg->addr); smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; smi_addr->channel = IPMI_BMC_CHANNEL; smi_addr->lun = msg->rsp[0] & 3; recv_msg->msg.netfn = msg->rsp[0] >> 2; recv_msg->msg.cmd = msg->rsp[1]; memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2); recv_msg->msg.data = recv_msg->msg_data; recv_msg->msg.data_len = msg->rsp_size - 2; deliver_local_response(intf, recv_msg); return 0; } /* * Handle a received message. Return 1 if the message should be requeued, * 0 if the message should be freed, or -1 if the message should not * be freed or requeued. */ static int handle_one_recv_msg(struct ipmi_smi *intf, struct ipmi_smi_msg *msg) { int requeue = 0; int chan; unsigned char cc; bool is_cmd = !((msg->rsp[0] >> 2) & 1); dev_dbg(intf->si_dev, "Recv: %*ph\n", msg->rsp_size, msg->rsp); if (msg->rsp_size < 2) { /* Message is too small to be correct. */ dev_warn(intf->si_dev, "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); return_unspecified: /* Generate an error response for the message. */ msg->rsp[0] = msg->data[0] | (1 << 2); msg->rsp[1] = msg->data[1]; msg->rsp[2] = IPMI_ERR_UNSPECIFIED; msg->rsp_size = 3; } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { /* commands must have at least 4 bytes, responses 5. */ if (is_cmd && (msg->rsp_size < 4)) { ipmi_inc_stat(intf, invalid_commands); goto out; } if (!is_cmd && (msg->rsp_size < 5)) { ipmi_inc_stat(intf, invalid_ipmb_responses); /* Construct a valid error response. */ msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */ msg->rsp[0] |= (1 << 2); /* Make it a response */ msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */ msg->rsp[1] = msg->data[1]; /* Addr */ msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */ msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */ msg->rsp[3] = msg->data[3]; /* Cmd */ msg->rsp[4] = IPMI_ERR_UNSPECIFIED; msg->rsp_size = 5; } } else if ((msg->data_size >= 2) && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) && (msg->data[1] == IPMI_SEND_MSG_CMD) && (msg->user_data == NULL)) { if (intf->in_shutdown) goto out; /* * This is the local response to a command send, start * the timer for these. The user_data will not be * NULL if this is a response send, and we will let * response sends just go through. */ /* * Check for errors, if we get certain errors (ones * that mean basically we can try again later), we * ignore them and start the timer. Otherwise we * report the error immediately. */ if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) && (msg->rsp[2] != IPMI_BUS_ERR) && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { int ch = msg->rsp[3] & 0xf; struct ipmi_channel *chans; /* Got an error sending the message, handle it. */ chans = READ_ONCE(intf->channel_list)->c; if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) ipmi_inc_stat(intf, sent_lan_command_errs); else ipmi_inc_stat(intf, sent_ipmb_command_errs); intf_err_seq(intf, msg->msgid, msg->rsp[2]); } else /* The message was sent, start the timer. */ intf_start_seq_timer(intf, msg->msgid); requeue = 0; goto out; } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) || (msg->rsp[1] != msg->data[1])) { /* * The NetFN and Command in the response is not even * marginally correct. */ dev_warn(intf->si_dev, "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp[0] >> 2, msg->rsp[1]); goto return_unspecified; } if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { if ((msg->data[0] >> 2) & 1) { /* It's a response to a sent response. */ chan = 0; cc = msg->rsp[4]; goto process_response_response; } if (is_cmd) requeue = handle_ipmb_direct_rcv_cmd(intf, msg); else requeue = handle_ipmb_direct_rcv_rsp(intf, msg); } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) && (msg->rsp[1] == IPMI_SEND_MSG_CMD) && (msg->user_data != NULL)) { /* * It's a response to a response we sent. For this we * deliver a send message response to the user. */ struct ipmi_recv_msg *recv_msg; chan = msg->data[2] & 0x0f; if (chan >= IPMI_MAX_CHANNELS) /* Invalid channel number */ goto out; cc = msg->rsp[2]; process_response_response: recv_msg = msg->user_data; requeue = 0; if (!recv_msg) goto out; recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; recv_msg->msg.data = recv_msg->msg_data; recv_msg->msg_data[0] = cc; recv_msg->msg.data_len = 1; deliver_local_response(intf, recv_msg); } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { struct ipmi_channel *chans; /* It's from the receive queue. */ chan = msg->rsp[3] & 0xf; if (chan >= IPMI_MAX_CHANNELS) { /* Invalid channel number */ requeue = 0; goto out; } /* * We need to make sure the channels have been initialized. * The channel_handler routine will set the "curr_channel" * equal to or greater than IPMI_MAX_CHANNELS when all the * channels for this interface have been initialized. */ if (!intf->channels_ready) { requeue = 0; /* Throw the message away */ goto out; } chans = READ_ONCE(intf->channel_list)->c; switch (chans[chan].medium) { case IPMI_CHANNEL_MEDIUM_IPMB: if (msg->rsp[4] & 0x04) { /* * It's a response, so find the * requesting message and send it up. */ requeue = handle_ipmb_get_msg_rsp(intf, msg); } else { /* * It's a command to the SMS from some other * entity. Handle that. */ requeue = handle_ipmb_get_msg_cmd(intf, msg); } break; case IPMI_CHANNEL_MEDIUM_8023LAN: case IPMI_CHANNEL_MEDIUM_ASYNC: if (msg->rsp[6] & 0x04) { /* * It's a response, so find the * requesting message and send it up. */ requeue = handle_lan_get_msg_rsp(intf, msg); } else { /* * It's a command to the SMS from some other * entity. Handle that. */ requeue = handle_lan_get_msg_cmd(intf, msg); } break; default: /* Check for OEM Channels. Clients had better register for these commands. */ if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN) && (chans[chan].medium <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) { requeue = handle_oem_get_msg_cmd(intf, msg); } else { /* * We don't handle the channel type, so just * free the message. */ requeue = 0; } } } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { /* It's an asynchronous event. */ requeue = handle_read_event_rsp(intf, msg); } else { /* It's a response from the local BMC. */ requeue = handle_bmc_rsp(intf, msg); } out: return requeue; } /* * If there are messages in the queue or pretimeouts, handle them. */ static void handle_new_recv_msgs(struct ipmi_smi *intf) { struct ipmi_smi_msg *smi_msg; unsigned long flags = 0; int rv; int run_to_completion = intf->run_to_completion; /* See if any waiting messages need to be processed. */ if (!run_to_completion) spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); while (!list_empty(&intf->waiting_rcv_msgs)) { smi_msg = list_entry(intf->waiting_rcv_msgs.next, struct ipmi_smi_msg, link); list_del(&smi_msg->link); if (!run_to_completion) spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); rv = handle_one_recv_msg(intf, smi_msg); if (!run_to_completion) spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); if (rv > 0) { /* * To preserve message order, quit if we * can't handle a message. Add the message * back at the head, this is safe because this * tasklet is the only thing that pulls the * messages. */ list_add(&smi_msg->link, &intf->waiting_rcv_msgs); break; } else { if (rv == 0) /* Message handled */ ipmi_free_smi_msg(smi_msg); /* If rv < 0, fatal error, del but don't free. */ } } if (!run_to_completion) spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); /* * If the pretimout count is non-zero, decrement one from it and * deliver pretimeouts to all the users. */ if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { struct ipmi_user *user; int index; index = srcu_read_lock(&intf->users_srcu); list_for_each_entry_rcu(user, &intf->users, link) { if (user->handler->ipmi_watchdog_pretimeout) user->handler->ipmi_watchdog_pretimeout( user->handler_data); } srcu_read_unlock(&intf->users_srcu, index); } } static void smi_recv_tasklet(struct tasklet_struct *t) { unsigned long flags = 0; /* keep us warning-free. */ struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet); int run_to_completion = intf->run_to_completion; struct ipmi_smi_msg *newmsg = NULL; /* * Start the next message if available. * * Do this here, not in the actual receiver, because we may deadlock * because the lower layer is allowed to hold locks while calling * message delivery. */ rcu_read_lock(); if (!run_to_completion) spin_lock_irqsave(&intf->xmit_msgs_lock, flags); if (intf->curr_msg == NULL && !intf->in_shutdown) { struct list_head *entry = NULL; /* Pick the high priority queue first. */ if (!list_empty(&intf->hp_xmit_msgs)) entry = intf->hp_xmit_msgs.next; else if (!list_empty(&intf->xmit_msgs)) entry = intf->xmit_msgs.next; if (entry) { list_del(entry); newmsg = list_entry(entry, struct ipmi_smi_msg, link); intf->curr_msg = newmsg; } } if (!run_to_completion) spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); if (newmsg) intf->handlers->sender(intf->send_info, newmsg); rcu_read_unlock(); handle_new_recv_msgs(intf); } /* Handle a new message from the lower layer. */ void ipmi_smi_msg_received(struct ipmi_smi *intf, struct ipmi_smi_msg *msg) { unsigned long flags = 0; /* keep us warning-free. */ int run_to_completion = intf->run_to_completion; /* * To preserve message order, we keep a queue and deliver from * a tasklet. */ if (!run_to_completion) spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); list_add_tail(&msg->link, &intf->waiting_rcv_msgs); if (!run_to_completion) spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); if (!run_to_completion) spin_lock_irqsave(&intf->xmit_msgs_lock, flags); /* * We can get an asynchronous event or receive message in addition * to commands we send. */ if (msg == intf->curr_msg) intf->curr_msg = NULL; if (!run_to_completion) spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); if (run_to_completion) smi_recv_tasklet(&intf->recv_tasklet); else tasklet_schedule(&intf->recv_tasklet); } EXPORT_SYMBOL(ipmi_smi_msg_received); void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf) { if (intf->in_shutdown) return; atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); tasklet_schedule(&intf->recv_tasklet); } EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); static struct ipmi_smi_msg * smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, unsigned char seq, long seqid) { struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); if (!smi_msg) /* * If we can't allocate the message, then just return, we * get 4 retries, so this should be ok. */ return NULL; memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); smi_msg->data_size = recv_msg->msg.data_len; smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); dev_dbg(intf->si_dev, "Resend: %*ph\n", smi_msg->data_size, smi_msg->data); return smi_msg; } static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, struct list_head *timeouts, unsigned long timeout_period, int slot, unsigned long *flags, bool *need_timer) { struct ipmi_recv_msg *msg; if (intf->in_shutdown) return; if (!ent->inuse) return; if (timeout_period < ent->timeout) { ent->timeout -= timeout_period; *need_timer = true; return; } if (ent->retries_left == 0) { /* The message has used all its retries. */ ent->inuse = 0; smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); msg = ent->recv_msg; list_add_tail(&msg->link, timeouts); if (ent->broadcast) ipmi_inc_stat(intf, timed_out_ipmb_broadcasts); else if (is_lan_addr(&ent->recv_msg->addr)) ipmi_inc_stat(intf, timed_out_lan_commands); else ipmi_inc_stat(intf, timed_out_ipmb_commands); } else { struct ipmi_smi_msg *smi_msg; /* More retries, send again. */ *need_timer = true; /* * Start with the max timer, set to normal timer after * the message is sent. */ ent->timeout = MAX_MSG_TIMEOUT; ent->retries_left--; smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, ent->seqid); if (!smi_msg) { if (is_lan_addr(&ent->recv_msg->addr)) ipmi_inc_stat(intf, dropped_rexmit_lan_commands); else ipmi_inc_stat(intf, dropped_rexmit_ipmb_commands); return; } spin_unlock_irqrestore(&intf->seq_lock, *flags); /* * Send the new message. We send with a zero * priority. It timed out, I doubt time is that * critical now, and high priority messages are really * only for messages to the local MC, which don't get * resent. */ if (intf->handlers) { if (is_lan_addr(&ent->recv_msg->addr)) ipmi_inc_stat(intf, retransmitted_lan_commands); else ipmi_inc_stat(intf, retransmitted_ipmb_commands); smi_send(intf, intf->handlers, smi_msg, 0); } else ipmi_free_smi_msg(smi_msg); spin_lock_irqsave(&intf->seq_lock, *flags); } } static bool ipmi_timeout_handler(struct ipmi_smi *intf, unsigned long timeout_period) { struct list_head timeouts; struct ipmi_recv_msg *msg, *msg2; unsigned long flags; int i; bool need_timer = false; if (!intf->bmc_registered) { kref_get(&intf->refcount); if (!schedule_work(&intf->bmc_reg_work)) { kref_put(&intf->refcount, intf_free); need_timer = true; } } /* * Go through the seq table and find any messages that * have timed out, putting them in the timeouts * list. */ INIT_LIST_HEAD(&timeouts); spin_lock_irqsave(&intf->seq_lock, flags); if (intf->ipmb_maintenance_mode_timeout) { if (intf->ipmb_maintenance_mode_timeout <= timeout_period) intf->ipmb_maintenance_mode_timeout = 0; else intf->ipmb_maintenance_mode_timeout -= timeout_period; } for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) check_msg_timeout(intf, &intf->seq_table[i], &timeouts, timeout_period, i, &flags, &need_timer); spin_unlock_irqrestore(&intf->seq_lock, flags); list_for_each_entry_safe(msg, msg2, &timeouts, link) deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE); /* * Maintenance mode handling. Check the timeout * optimistically before we claim the lock. It may * mean a timeout gets missed occasionally, but that * only means the timeout gets extended by one period * in that case. No big deal, and it avoids the lock * most of the time. */ if (intf->auto_maintenance_timeout > 0) { spin_lock_irqsave(&intf->maintenance_mode_lock, flags); if (intf->auto_maintenance_timeout > 0) { intf->auto_maintenance_timeout -= timeout_period; if (!intf->maintenance_mode && (intf->auto_maintenance_timeout <= 0)) { intf->maintenance_mode_enable = false; maintenance_mode_update(intf); } } spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); } tasklet_schedule(&intf->recv_tasklet); return need_timer; } static void ipmi_request_event(struct ipmi_smi *intf) { /* No event requests when in maintenance mode. */ if (intf->maintenance_mode_enable) return; if (!intf->in_shutdown) intf->handlers->request_events(intf->send_info); } static struct timer_list ipmi_timer; static atomic_t stop_operation; static void ipmi_timeout(struct timer_list *unused) { struct ipmi_smi *intf; bool need_timer = false; int index; if (atomic_read(&stop_operation)) return; index = srcu_read_lock(&ipmi_interfaces_srcu); list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { if (atomic_read(&intf->event_waiters)) { intf->ticks_to_req_ev--; if (intf->ticks_to_req_ev == 0) { ipmi_request_event(intf); intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; } need_timer = true; } need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); } srcu_read_unlock(&ipmi_interfaces_srcu, index); if (need_timer) mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); } static void need_waiter(struct ipmi_smi *intf) { /* Racy, but worst case we start the timer twice. */ if (!timer_pending(&ipmi_timer)) mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); } static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); static void free_smi_msg(struct ipmi_smi_msg *msg) { atomic_dec(&smi_msg_inuse_count); /* Try to keep as much stuff out of the panic path as possible. */ if (!oops_in_progress) kfree(msg); } struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) { struct ipmi_smi_msg *rv; rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); if (rv) { rv->done = free_smi_msg; rv->user_data = NULL; rv->type = IPMI_SMI_MSG_TYPE_NORMAL; atomic_inc(&smi_msg_inuse_count); } return rv; } EXPORT_SYMBOL(ipmi_alloc_smi_msg); static void free_recv_msg(struct ipmi_recv_msg *msg) { atomic_dec(&recv_msg_inuse_count); /* Try to keep as much stuff out of the panic path as possible. */ if (!oops_in_progress) kfree(msg); } static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) { struct ipmi_recv_msg *rv; rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); if (rv) { rv->user = NULL; rv->done = free_recv_msg; atomic_inc(&recv_msg_inuse_count); } return rv; } void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) { if (msg->user && !oops_in_progress) kref_put(&msg->user->refcount, free_user); msg->done(msg); } EXPORT_SYMBOL(ipmi_free_recv_msg); static atomic_t panic_done_count = ATOMIC_INIT(0); static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) { atomic_dec(&panic_done_count); } static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) { atomic_dec(&panic_done_count); } /* * Inside a panic, send a message and wait for a response. */ static void ipmi_panic_request_and_wait(struct ipmi_smi *intf, struct ipmi_addr *addr, struct kernel_ipmi_msg *msg) { struct ipmi_smi_msg smi_msg; struct ipmi_recv_msg recv_msg; int rv; smi_msg.done = dummy_smi_done_handler; recv_msg.done = dummy_recv_done_handler; atomic_add(2, &panic_done_count); rv = i_ipmi_request(NULL, intf, addr, 0, msg, intf, &smi_msg, &recv_msg, 0, intf->addrinfo[0].address, intf->addrinfo[0].lun, 0, 1); /* Don't retry, and don't wait. */ if (rv) atomic_sub(2, &panic_done_count); else if (intf->handlers->flush_messages) intf->handlers->flush_messages(intf->send_info); while (atomic_read(&panic_done_count) != 0) ipmi_poll(intf); } static void event_receiver_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) { if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { /* A get event receiver command, save it. */ intf->event_receiver = msg->msg.data[1]; intf->event_receiver_lun = msg->msg.data[2] & 0x3; } } static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) { if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { /* * A get device id command, save if we are an event * receiver or generator. */ intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; } } static void send_panic_events(struct ipmi_smi *intf, char *str) { struct kernel_ipmi_msg msg; unsigned char data[16]; struct ipmi_system_interface_addr *si; struct ipmi_addr addr; char *p = str; struct ipmi_ipmb_addr *ipmb; int j; if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE) return; si = (struct ipmi_system_interface_addr *) &addr; si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; si->channel = IPMI_BMC_CHANNEL; si->lun = 0; /* Fill in an event telling that we have failed. */ msg.netfn = 0x04; /* Sensor or Event. */ msg.cmd = 2; /* Platform event command. */ msg.data = data; msg.data_len = 8; data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */ data[1] = 0x03; /* This is for IPMI 1.0. */ data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */ data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ /* * Put a few breadcrumbs in. Hopefully later we can add more things * to make the panic events more useful. */ if (str) { data[3] = str[0]; data[6] = str[1]; data[7] = str[2]; } /* Send the event announcing the panic. */ ipmi_panic_request_and_wait(intf, &addr, &msg); /* * On every interface, dump a bunch of OEM event holding the * string. */ if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str) return; /* * intf_num is used as an marker to tell if the * interface is valid. Thus we need a read barrier to * make sure data fetched before checking intf_num * won't be used. */ smp_rmb(); /* * First job here is to figure out where to send the * OEM events. There's no way in IPMI to send OEM * events using an event send command, so we have to * find the SEL to put them in and stick them in * there. */ /* Get capabilities from the get device id. */ intf->local_sel_device = 0; intf->local_event_generator = 0; intf->event_receiver = 0; /* Request the device info from the local MC. */ msg.netfn = IPMI_NETFN_APP_REQUEST; msg.cmd = IPMI_GET_DEVICE_ID_CMD; msg.data = NULL; msg.data_len = 0; intf->null_user_handler = device_id_fetcher; ipmi_panic_request_and_wait(intf, &addr, &msg); if (intf->local_event_generator) { /* Request the event receiver from the local MC. */ msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; msg.data = NULL; msg.data_len = 0; intf->null_user_handler = event_receiver_fetcher; ipmi_panic_request_and_wait(intf, &addr, &msg); } intf->null_user_handler = NULL; /* * Validate the event receiver. The low bit must not * be 1 (it must be a valid IPMB address), it cannot * be zero, and it must not be my address. */ if (((intf->event_receiver & 1) == 0) && (intf->event_receiver != 0) && (intf->event_receiver != intf->addrinfo[0].address)) { /* * The event receiver is valid, send an IPMB * message. */ ipmb = (struct ipmi_ipmb_addr *) &addr; ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; ipmb->channel = 0; /* FIXME - is this right? */ ipmb->lun = intf->event_receiver_lun; ipmb->slave_addr = intf->event_receiver; } else if (intf->local_sel_device) { /* * The event receiver was not valid (or was * me), but I am an SEL device, just dump it * in my SEL. */ si = (struct ipmi_system_interface_addr *) &addr; si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; si->channel = IPMI_BMC_CHANNEL; si->lun = 0; } else return; /* No where to send the event. */ msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; msg.data = data; msg.data_len = 16; j = 0; while (*p) { int size = strlen(p); if (size > 11) size = 11; data[0] = 0; data[1] = 0; data[2] = 0xf0; /* OEM event without timestamp. */ data[3] = intf->addrinfo[0].address; data[4] = j++; /* sequence # */ /* * Always give 11 bytes, so strncpy will fill * it with zeroes for me. */ strncpy(data+5, p, 11); p += size; ipmi_panic_request_and_wait(intf, &addr, &msg); } } static int has_panicked; static int panic_event(struct notifier_block *this, unsigned long event, void *ptr) { struct ipmi_smi *intf; struct ipmi_user *user; if (has_panicked) return NOTIFY_DONE; has_panicked = 1; /* For every registered interface, set it to run to completion. */ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { if (!intf->handlers || intf->intf_num == -1) /* Interface is not ready. */ continue; if (!intf->handlers->poll) continue; /* * If we were interrupted while locking xmit_msgs_lock or * waiting_rcv_msgs_lock, the corresponding list may be * corrupted. In this case, drop items on the list for * the safety. */ if (!spin_trylock(&intf->xmit_msgs_lock)) { INIT_LIST_HEAD(&intf->xmit_msgs); INIT_LIST_HEAD(&intf->hp_xmit_msgs); } else spin_unlock(&intf->xmit_msgs_lock); if (!spin_trylock(&intf->waiting_rcv_msgs_lock)) INIT_LIST_HEAD(&intf->waiting_rcv_msgs); else spin_unlock(&intf->waiting_rcv_msgs_lock); intf->run_to_completion = 1; if (intf->handlers->set_run_to_completion) intf->handlers->set_run_to_completion(intf->send_info, 1); list_for_each_entry_rcu(user, &intf->users, link) { if (user->handler->ipmi_panic_handler) user->handler->ipmi_panic_handler( user->handler_data); } send_panic_events(intf, ptr); } return NOTIFY_DONE; } /* Must be called with ipmi_interfaces_mutex held. */ static int ipmi_register_driver(void) { int rv; if (drvregistered) return 0; rv = driver_register(&ipmidriver.driver); if (rv) pr_err("Could not register IPMI driver\n"); else drvregistered = true; return rv; } static struct notifier_block panic_block = { .notifier_call = panic_event, .next = NULL, .priority = 200 /* priority: INT_MAX >= x >= 0 */ }; static int ipmi_init_msghandler(void) { int rv; mutex_lock(&ipmi_interfaces_mutex); rv = ipmi_register_driver(); if (rv) goto out; if (initialized) goto out; rv = init_srcu_struct(&ipmi_interfaces_srcu); if (rv) goto out; remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); if (!remove_work_wq) { pr_err("unable to create ipmi-msghandler-remove-wq workqueue"); rv = -ENOMEM; goto out_wq; } timer_setup(&ipmi_timer, ipmi_timeout, 0); mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); atomic_notifier_chain_register(&panic_notifier_list, &panic_block); initialized = true; out_wq: if (rv) cleanup_srcu_struct(&ipmi_interfaces_srcu); out: mutex_unlock(&ipmi_interfaces_mutex); return rv; } static int __init ipmi_init_msghandler_mod(void) { int rv; pr_info("version " IPMI_DRIVER_VERSION "\n"); mutex_lock(&ipmi_interfaces_mutex); rv = ipmi_register_driver(); mutex_unlock(&ipmi_interfaces_mutex); return rv; } static void __exit cleanup_ipmi(void) { int count; if (initialized) { destroy_workqueue(remove_work_wq); atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); /* * This can't be called if any interfaces exist, so no worry * about shutting down the interfaces. */ /* * Tell the timer to stop, then wait for it to stop. This * avoids problems with race conditions removing the timer * here. */ atomic_set(&stop_operation, 1); del_timer_sync(&ipmi_timer); initialized = false; /* Check for buffer leaks. */ count = atomic_read(&smi_msg_inuse_count); if (count != 0) pr_warn("SMI message count %d at exit\n", count); count = atomic_read(&recv_msg_inuse_count); if (count != 0) pr_warn("recv message count %d at exit\n", count); cleanup_srcu_struct(&ipmi_interfaces_srcu); } if (drvregistered) driver_unregister(&ipmidriver.driver); } module_exit(cleanup_ipmi); module_init(ipmi_init_msghandler_mod); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Corey Minyard <[email protected]>"); MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface."); MODULE_VERSION(IPMI_DRIVER_VERSION); MODULE_SOFTDEP("post: ipmi_devintf");
linux-master
drivers/char/ipmi/ipmi_msghandler.c
// SPDX-License-Identifier: GPL-2.0+ /* * PowerNV OPAL IPMI driver * * Copyright 2014 IBM Corp. */ #define pr_fmt(fmt) "ipmi-powernv: " fmt #include <linux/ipmi_smi.h> #include <linux/list.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/interrupt.h> #include <asm/opal.h> struct ipmi_smi_powernv { u64 interface_id; struct ipmi_smi *intf; unsigned int irq; /** * We assume that there can only be one outstanding request, so * keep the pending message in cur_msg. We protect this from concurrent * updates through send & recv calls, (and consequently opal_msg, which * is in-use when cur_msg is set) with msg_lock */ spinlock_t msg_lock; struct ipmi_smi_msg *cur_msg; struct opal_ipmi_msg *opal_msg; }; static int ipmi_powernv_start_processing(void *send_info, struct ipmi_smi *intf) { struct ipmi_smi_powernv *smi = send_info; smi->intf = intf; return 0; } static void send_error_reply(struct ipmi_smi_powernv *smi, struct ipmi_smi_msg *msg, u8 completion_code) { msg->rsp[0] = msg->data[0] | 0x4; msg->rsp[1] = msg->data[1]; msg->rsp[2] = completion_code; msg->rsp_size = 3; ipmi_smi_msg_received(smi->intf, msg); } static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg) { struct ipmi_smi_powernv *smi = send_info; struct opal_ipmi_msg *opal_msg; unsigned long flags; int comp, rc; size_t size; /* ensure data_len will fit in the opal_ipmi_msg buffer... */ if (msg->data_size > IPMI_MAX_MSG_LENGTH) { comp = IPMI_REQ_LEN_EXCEEDED_ERR; goto err; } /* ... and that we at least have netfn and cmd bytes */ if (msg->data_size < 2) { comp = IPMI_REQ_LEN_INVALID_ERR; goto err; } spin_lock_irqsave(&smi->msg_lock, flags); if (smi->cur_msg) { comp = IPMI_NODE_BUSY_ERR; goto err_unlock; } /* format our data for the OPAL API */ opal_msg = smi->opal_msg; opal_msg->version = OPAL_IPMI_MSG_FORMAT_VERSION_1; opal_msg->netfn = msg->data[0]; opal_msg->cmd = msg->data[1]; if (msg->data_size > 2) memcpy(opal_msg->data, msg->data + 2, msg->data_size - 2); /* data_size already includes the netfn and cmd bytes */ size = sizeof(*opal_msg) + msg->data_size - 2; pr_devel("%s: opal_ipmi_send(0x%llx, %p, %ld)\n", __func__, smi->interface_id, opal_msg, size); rc = opal_ipmi_send(smi->interface_id, opal_msg, size); pr_devel("%s: -> %d\n", __func__, rc); if (!rc) { smi->cur_msg = msg; spin_unlock_irqrestore(&smi->msg_lock, flags); return; } comp = IPMI_ERR_UNSPECIFIED; err_unlock: spin_unlock_irqrestore(&smi->msg_lock, flags); err: send_error_reply(smi, msg, comp); } static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi) { struct opal_ipmi_msg *opal_msg; struct ipmi_smi_msg *msg; unsigned long flags; uint64_t size; int rc; pr_devel("%s: opal_ipmi_recv(%llx, msg, sz)\n", __func__, smi->interface_id); spin_lock_irqsave(&smi->msg_lock, flags); if (!smi->cur_msg) { spin_unlock_irqrestore(&smi->msg_lock, flags); pr_warn("no current message?\n"); return 0; } msg = smi->cur_msg; opal_msg = smi->opal_msg; size = cpu_to_be64(sizeof(*opal_msg) + IPMI_MAX_MSG_LENGTH); rc = opal_ipmi_recv(smi->interface_id, opal_msg, &size); size = be64_to_cpu(size); pr_devel("%s: -> %d (size %lld)\n", __func__, rc, rc == 0 ? size : 0); if (rc) { /* If came via the poll, and response was not yet ready */ if (rc == OPAL_EMPTY) { spin_unlock_irqrestore(&smi->msg_lock, flags); return 0; } smi->cur_msg = NULL; spin_unlock_irqrestore(&smi->msg_lock, flags); send_error_reply(smi, msg, IPMI_ERR_UNSPECIFIED); return 0; } if (size < sizeof(*opal_msg)) { spin_unlock_irqrestore(&smi->msg_lock, flags); pr_warn("unexpected IPMI message size %lld\n", size); return 0; } if (opal_msg->version != OPAL_IPMI_MSG_FORMAT_VERSION_1) { spin_unlock_irqrestore(&smi->msg_lock, flags); pr_warn("unexpected IPMI message format (version %d)\n", opal_msg->version); return 0; } msg->rsp[0] = opal_msg->netfn; msg->rsp[1] = opal_msg->cmd; if (size > sizeof(*opal_msg)) memcpy(&msg->rsp[2], opal_msg->data, size - sizeof(*opal_msg)); msg->rsp_size = 2 + size - sizeof(*opal_msg); smi->cur_msg = NULL; spin_unlock_irqrestore(&smi->msg_lock, flags); ipmi_smi_msg_received(smi->intf, msg); return 0; } static void ipmi_powernv_request_events(void *send_info) { } static void ipmi_powernv_set_run_to_completion(void *send_info, bool run_to_completion) { } static void ipmi_powernv_poll(void *send_info) { struct ipmi_smi_powernv *smi = send_info; ipmi_powernv_recv(smi); } static const struct ipmi_smi_handlers ipmi_powernv_smi_handlers = { .owner = THIS_MODULE, .start_processing = ipmi_powernv_start_processing, .sender = ipmi_powernv_send, .request_events = ipmi_powernv_request_events, .set_run_to_completion = ipmi_powernv_set_run_to_completion, .poll = ipmi_powernv_poll, }; static irqreturn_t ipmi_opal_event(int irq, void *data) { struct ipmi_smi_powernv *smi = data; ipmi_powernv_recv(smi); return IRQ_HANDLED; } static int ipmi_powernv_probe(struct platform_device *pdev) { struct ipmi_smi_powernv *ipmi; struct device *dev; u32 prop; int rc; if (!pdev || !pdev->dev.of_node) return -ENODEV; dev = &pdev->dev; ipmi = devm_kzalloc(dev, sizeof(*ipmi), GFP_KERNEL); if (!ipmi) return -ENOMEM; spin_lock_init(&ipmi->msg_lock); rc = of_property_read_u32(dev->of_node, "ibm,ipmi-interface-id", &prop); if (rc) { dev_warn(dev, "No interface ID property\n"); goto err_free; } ipmi->interface_id = prop; rc = of_property_read_u32(dev->of_node, "interrupts", &prop); if (rc) { dev_warn(dev, "No interrupts property\n"); goto err_free; } ipmi->irq = irq_of_parse_and_map(dev->of_node, 0); if (!ipmi->irq) { dev_info(dev, "Unable to map irq from device tree\n"); ipmi->irq = opal_event_request(prop); } rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH, "opal-ipmi", ipmi); if (rc) { dev_warn(dev, "Unable to request irq\n"); goto err_dispose; } ipmi->opal_msg = devm_kmalloc(dev, sizeof(*ipmi->opal_msg) + IPMI_MAX_MSG_LENGTH, GFP_KERNEL); if (!ipmi->opal_msg) { rc = -ENOMEM; goto err_unregister; } rc = ipmi_register_smi(&ipmi_powernv_smi_handlers, ipmi, dev, 0); if (rc) { dev_warn(dev, "IPMI SMI registration failed (%d)\n", rc); goto err_free_msg; } dev_set_drvdata(dev, ipmi); return 0; err_free_msg: devm_kfree(dev, ipmi->opal_msg); err_unregister: free_irq(ipmi->irq, ipmi); err_dispose: irq_dispose_mapping(ipmi->irq); err_free: devm_kfree(dev, ipmi); return rc; } static int ipmi_powernv_remove(struct platform_device *pdev) { struct ipmi_smi_powernv *smi = dev_get_drvdata(&pdev->dev); ipmi_unregister_smi(smi->intf); free_irq(smi->irq, smi); irq_dispose_mapping(smi->irq); return 0; } static const struct of_device_id ipmi_powernv_match[] = { { .compatible = "ibm,opal-ipmi" }, { }, }; static struct platform_driver powernv_ipmi_driver = { .driver = { .name = "ipmi-powernv", .of_match_table = ipmi_powernv_match, }, .probe = ipmi_powernv_probe, .remove = ipmi_powernv_remove, }; module_platform_driver(powernv_ipmi_driver); MODULE_DEVICE_TABLE(of, ipmi_powernv_match); MODULE_DESCRIPTION("powernv IPMI driver"); MODULE_AUTHOR("Jeremy Kerr <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/char/ipmi/ipmi_powernv.c
// SPDX-License-Identifier: GPL-2.0+ /* * ipmi_kcs_sm.c * * State machine for handling IPMI KCS interfaces. * * Author: MontaVista Software, Inc. * Corey Minyard <[email protected]> * [email protected] * * Copyright 2002 MontaVista Software Inc. */ /* * This state machine is taken from the state machine in the IPMI spec, * pretty much verbatim. If you have questions about the states, see * that document. */ #define DEBUG /* So dev_dbg() is always available. */ #include <linux/kernel.h> /* For printk. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/string.h> #include <linux/jiffies.h> #include <linux/ipmi_msgdefs.h> /* for completion codes */ #include "ipmi_si_sm.h" /* kcs_debug is a bit-field * KCS_DEBUG_ENABLE - turned on for now * KCS_DEBUG_MSG - commands and their responses * KCS_DEBUG_STATES - state machine */ #define KCS_DEBUG_STATES 4 #define KCS_DEBUG_MSG 2 #define KCS_DEBUG_ENABLE 1 static int kcs_debug; module_param(kcs_debug, int, 0644); MODULE_PARM_DESC(kcs_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); /* The states the KCS driver may be in. */ enum kcs_states { /* The KCS interface is currently doing nothing. */ KCS_IDLE, /* * We are starting an operation. The data is in the output * buffer, but nothing has been done to the interface yet. This * was added to the state machine in the spec to wait for the * initial IBF. */ KCS_START_OP, /* We have written a write cmd to the interface. */ KCS_WAIT_WRITE_START, /* We are writing bytes to the interface. */ KCS_WAIT_WRITE, /* * We have written the write end cmd to the interface, and * still need to write the last byte. */ KCS_WAIT_WRITE_END, /* We are waiting to read data from the interface. */ KCS_WAIT_READ, /* * State to transition to the error handler, this was added to * the state machine in the spec to be sure IBF was there. */ KCS_ERROR0, /* * First stage error handler, wait for the interface to * respond. */ KCS_ERROR1, /* * The abort cmd has been written, wait for the interface to * respond. */ KCS_ERROR2, /* * We wrote some data to the interface, wait for it to switch * to read mode. */ KCS_ERROR3, /* The hardware failed to follow the state machine. */ KCS_HOSED }; #define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH #define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH /* Timeouts in microseconds. */ #define IBF_RETRY_TIMEOUT (5*USEC_PER_SEC) #define OBF_RETRY_TIMEOUT (5*USEC_PER_SEC) #define MAX_ERROR_RETRIES 10 #define ERROR0_OBF_WAIT_JIFFIES (2*HZ) struct si_sm_data { enum kcs_states state; struct si_sm_io *io; unsigned char write_data[MAX_KCS_WRITE_SIZE]; int write_pos; int write_count; int orig_write_count; unsigned char read_data[MAX_KCS_READ_SIZE]; int read_pos; int truncated; unsigned int error_retries; long ibf_timeout; long obf_timeout; unsigned long error0_timeout; }; static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs, struct si_sm_io *io, enum kcs_states state) { kcs->state = state; kcs->io = io; kcs->write_pos = 0; kcs->write_count = 0; kcs->orig_write_count = 0; kcs->read_pos = 0; kcs->error_retries = 0; kcs->truncated = 0; kcs->ibf_timeout = IBF_RETRY_TIMEOUT; kcs->obf_timeout = OBF_RETRY_TIMEOUT; /* Reserve 2 I/O bytes. */ return 2; } static unsigned int init_kcs_data(struct si_sm_data *kcs, struct si_sm_io *io) { return init_kcs_data_with_state(kcs, io, KCS_IDLE); } static inline unsigned char read_status(struct si_sm_data *kcs) { return kcs->io->inputb(kcs->io, 1); } static inline unsigned char read_data(struct si_sm_data *kcs) { return kcs->io->inputb(kcs->io, 0); } static inline void write_cmd(struct si_sm_data *kcs, unsigned char data) { kcs->io->outputb(kcs->io, 1, data); } static inline void write_data(struct si_sm_data *kcs, unsigned char data) { kcs->io->outputb(kcs->io, 0, data); } /* Control codes. */ #define KCS_GET_STATUS_ABORT 0x60 #define KCS_WRITE_START 0x61 #define KCS_WRITE_END 0x62 #define KCS_READ_BYTE 0x68 /* Status bits. */ #define GET_STATUS_STATE(status) (((status) >> 6) & 0x03) #define KCS_IDLE_STATE 0 #define KCS_READ_STATE 1 #define KCS_WRITE_STATE 2 #define KCS_ERROR_STATE 3 #define GET_STATUS_ATN(status) ((status) & 0x04) #define GET_STATUS_IBF(status) ((status) & 0x02) #define GET_STATUS_OBF(status) ((status) & 0x01) static inline void write_next_byte(struct si_sm_data *kcs) { write_data(kcs, kcs->write_data[kcs->write_pos]); (kcs->write_pos)++; (kcs->write_count)--; } static inline void start_error_recovery(struct si_sm_data *kcs, char *reason) { (kcs->error_retries)++; if (kcs->error_retries > MAX_ERROR_RETRIES) { if (kcs_debug & KCS_DEBUG_ENABLE) dev_dbg(kcs->io->dev, "ipmi_kcs_sm: kcs hosed: %s\n", reason); kcs->state = KCS_HOSED; } else { kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES; kcs->state = KCS_ERROR0; } } static inline void read_next_byte(struct si_sm_data *kcs) { if (kcs->read_pos >= MAX_KCS_READ_SIZE) { /* Throw the data away and mark it truncated. */ read_data(kcs); kcs->truncated = 1; } else { kcs->read_data[kcs->read_pos] = read_data(kcs); (kcs->read_pos)++; } write_data(kcs, KCS_READ_BYTE); } static inline int check_ibf(struct si_sm_data *kcs, unsigned char status, long time) { if (GET_STATUS_IBF(status)) { kcs->ibf_timeout -= time; if (kcs->ibf_timeout < 0) { start_error_recovery(kcs, "IBF not ready in time"); kcs->ibf_timeout = IBF_RETRY_TIMEOUT; return 1; } return 0; } kcs->ibf_timeout = IBF_RETRY_TIMEOUT; return 1; } static inline int check_obf(struct si_sm_data *kcs, unsigned char status, long time) { if (!GET_STATUS_OBF(status)) { kcs->obf_timeout -= time; if (kcs->obf_timeout < 0) { kcs->obf_timeout = OBF_RETRY_TIMEOUT; start_error_recovery(kcs, "OBF not ready in time"); return 1; } return 0; } kcs->obf_timeout = OBF_RETRY_TIMEOUT; return 1; } static void clear_obf(struct si_sm_data *kcs, unsigned char status) { if (GET_STATUS_OBF(status)) read_data(kcs); } static void restart_kcs_transaction(struct si_sm_data *kcs) { kcs->write_count = kcs->orig_write_count; kcs->write_pos = 0; kcs->read_pos = 0; kcs->state = KCS_WAIT_WRITE_START; kcs->ibf_timeout = IBF_RETRY_TIMEOUT; kcs->obf_timeout = OBF_RETRY_TIMEOUT; write_cmd(kcs, KCS_WRITE_START); } static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data, unsigned int size) { unsigned int i; if (size < 2) return IPMI_REQ_LEN_INVALID_ERR; if (size > MAX_KCS_WRITE_SIZE) return IPMI_REQ_LEN_EXCEEDED_ERR; if (kcs->state != KCS_IDLE) { dev_warn(kcs->io->dev, "KCS in invalid state %d\n", kcs->state); return IPMI_NOT_IN_MY_STATE_ERR; } if (kcs_debug & KCS_DEBUG_MSG) { dev_dbg(kcs->io->dev, "%s -", __func__); for (i = 0; i < size; i++) pr_cont(" %02x", data[i]); pr_cont("\n"); } kcs->error_retries = 0; memcpy(kcs->write_data, data, size); kcs->write_count = size; kcs->orig_write_count = size; kcs->write_pos = 0; kcs->read_pos = 0; kcs->state = KCS_START_OP; kcs->ibf_timeout = IBF_RETRY_TIMEOUT; kcs->obf_timeout = OBF_RETRY_TIMEOUT; return 0; } static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data, unsigned int length) { if (length < kcs->read_pos) { kcs->read_pos = length; kcs->truncated = 1; } memcpy(data, kcs->read_data, kcs->read_pos); if ((length >= 3) && (kcs->read_pos < 3)) { /* Guarantee that we return at least 3 bytes, with an error in the third byte if it is too short. */ data[2] = IPMI_ERR_UNSPECIFIED; kcs->read_pos = 3; } if (kcs->truncated) { /* * Report a truncated error. We might overwrite * another error, but that's too bad, the user needs * to know it was truncated. */ data[2] = IPMI_ERR_MSG_TRUNCATED; kcs->truncated = 0; } return kcs->read_pos; } /* * This implements the state machine defined in the IPMI manual, see * that for details on how this works. Divide that flowchart into * sections delimited by "Wait for IBF" and this will become clear. */ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) { unsigned char status; unsigned char state; status = read_status(kcs); if (kcs_debug & KCS_DEBUG_STATES) dev_dbg(kcs->io->dev, "KCS: State = %d, %x\n", kcs->state, status); /* All states wait for ibf, so just do it here. */ if (!check_ibf(kcs, status, time)) return SI_SM_CALL_WITH_DELAY; /* Just about everything looks at the KCS state, so grab that, too. */ state = GET_STATUS_STATE(status); switch (kcs->state) { case KCS_IDLE: /* If there's and interrupt source, turn it off. */ clear_obf(kcs, status); if (GET_STATUS_ATN(status)) return SI_SM_ATTN; else return SI_SM_IDLE; case KCS_START_OP: if (state != KCS_IDLE_STATE) { start_error_recovery(kcs, "State machine not idle at start"); break; } clear_obf(kcs, status); write_cmd(kcs, KCS_WRITE_START); kcs->state = KCS_WAIT_WRITE_START; break; case KCS_WAIT_WRITE_START: if (state != KCS_WRITE_STATE) { start_error_recovery( kcs, "Not in write state at write start"); break; } read_data(kcs); if (kcs->write_count == 1) { write_cmd(kcs, KCS_WRITE_END); kcs->state = KCS_WAIT_WRITE_END; } else { write_next_byte(kcs); kcs->state = KCS_WAIT_WRITE; } break; case KCS_WAIT_WRITE: if (state != KCS_WRITE_STATE) { start_error_recovery(kcs, "Not in write state for write"); break; } clear_obf(kcs, status); if (kcs->write_count == 1) { write_cmd(kcs, KCS_WRITE_END); kcs->state = KCS_WAIT_WRITE_END; } else { write_next_byte(kcs); } break; case KCS_WAIT_WRITE_END: if (state != KCS_WRITE_STATE) { start_error_recovery(kcs, "Not in write state" " for write end"); break; } clear_obf(kcs, status); write_next_byte(kcs); kcs->state = KCS_WAIT_READ; break; case KCS_WAIT_READ: if ((state != KCS_READ_STATE) && (state != KCS_IDLE_STATE)) { start_error_recovery( kcs, "Not in read or idle in read state"); break; } if (state == KCS_READ_STATE) { if (!check_obf(kcs, status, time)) return SI_SM_CALL_WITH_DELAY; read_next_byte(kcs); } else { /* * We don't implement this exactly like the state * machine in the spec. Some broken hardware * does not write the final dummy byte to the * read register. Thus obf will never go high * here. We just go straight to idle, and we * handle clearing out obf in idle state if it * happens to come in. */ clear_obf(kcs, status); kcs->orig_write_count = 0; kcs->state = KCS_IDLE; return SI_SM_TRANSACTION_COMPLETE; } break; case KCS_ERROR0: clear_obf(kcs, status); status = read_status(kcs); if (GET_STATUS_OBF(status)) /* controller isn't responding */ if (time_before(jiffies, kcs->error0_timeout)) return SI_SM_CALL_WITH_TICK_DELAY; write_cmd(kcs, KCS_GET_STATUS_ABORT); kcs->state = KCS_ERROR1; break; case KCS_ERROR1: clear_obf(kcs, status); write_data(kcs, 0); kcs->state = KCS_ERROR2; break; case KCS_ERROR2: if (state != KCS_READ_STATE) { start_error_recovery(kcs, "Not in read state for error2"); break; } if (!check_obf(kcs, status, time)) return SI_SM_CALL_WITH_DELAY; clear_obf(kcs, status); write_data(kcs, KCS_READ_BYTE); kcs->state = KCS_ERROR3; break; case KCS_ERROR3: if (state != KCS_IDLE_STATE) { start_error_recovery(kcs, "Not in idle state for error3"); break; } if (!check_obf(kcs, status, time)) return SI_SM_CALL_WITH_DELAY; clear_obf(kcs, status); if (kcs->orig_write_count) { restart_kcs_transaction(kcs); } else { kcs->state = KCS_IDLE; return SI_SM_TRANSACTION_COMPLETE; } break; case KCS_HOSED: break; } if (kcs->state == KCS_HOSED) { init_kcs_data_with_state(kcs, kcs->io, KCS_ERROR0); return SI_SM_HOSED; } return SI_SM_CALL_WITHOUT_DELAY; } static int kcs_size(void) { return sizeof(struct si_sm_data); } static int kcs_detect(struct si_sm_data *kcs) { /* * It's impossible for the KCS status register to be all 1's, * (assuming a properly functioning, self-initialized BMC) * but that's what you get from reading a bogus address, so we * test that first. */ if (read_status(kcs) == 0xff) return 1; return 0; } static void kcs_cleanup(struct si_sm_data *kcs) { } const struct si_sm_handlers kcs_smi_handlers = { .init_data = init_kcs_data, .start_transaction = start_kcs_transaction, .get_result = get_kcs_result, .event = kcs_event, .detect = kcs_detect, .cleanup = kcs_cleanup, .size = kcs_size, };
linux-master
drivers/char/ipmi/ipmi_kcs_sm.c
// SPDX-License-Identifier: GPL-2.0+ /* * ipmi_si_pci.c * * Handling for IPMI devices on the PCI bus. */ #define pr_fmt(fmt) "ipmi_pci: " fmt #include <linux/module.h> #include <linux/pci.h> #include "ipmi_si.h" static bool pci_registered; static bool si_trypci = true; module_param_named(trypci, si_trypci, bool, 0); MODULE_PARM_DESC(trypci, "Setting this to zero will disable the default scan of the interfaces identified via pci"); #define PCI_DEVICE_ID_HP_MMC 0x121A static int ipmi_pci_probe_regspacing(struct si_sm_io *io) { if (io->si_type == SI_KCS) { unsigned char status; int regspacing; io->regsize = DEFAULT_REGSIZE; io->regshift = 0; /* detect 1, 4, 16byte spacing */ for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) { io->regspacing = regspacing; if (io->io_setup(io)) { dev_err(io->dev, "Could not setup I/O space\n"); return DEFAULT_REGSPACING; } /* write invalid cmd */ io->outputb(io, 1, 0x10); /* read status back */ status = io->inputb(io, 1); io->io_cleanup(io); if (status) return regspacing; regspacing *= 4; } } return DEFAULT_REGSPACING; } static struct pci_device_id ipmi_pci_blacklist[] = { /* * This is a "Virtual IPMI device", whatever that is. It appears * as a KCS device by the class, but it is not one. */ { PCI_VDEVICE(REALTEK, 0x816c) }, { 0, } }; static int ipmi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int rv; struct si_sm_io io; if (pci_match_id(ipmi_pci_blacklist, pdev)) return -ENODEV; memset(&io, 0, sizeof(io)); io.addr_source = SI_PCI; dev_info(&pdev->dev, "probing via PCI"); switch (pdev->class) { case PCI_CLASS_SERIAL_IPMI_SMIC: io.si_type = SI_SMIC; break; case PCI_CLASS_SERIAL_IPMI_KCS: io.si_type = SI_KCS; break; case PCI_CLASS_SERIAL_IPMI_BT: io.si_type = SI_BT; break; default: dev_info(&pdev->dev, "Unknown IPMI class: %x\n", pdev->class); return -ENOMEM; } rv = pcim_enable_device(pdev); if (rv) { dev_err(&pdev->dev, "couldn't enable PCI device\n"); return rv; } if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) { io.addr_space = IPMI_IO_ADDR_SPACE; io.io_setup = ipmi_si_port_setup; } else { io.addr_space = IPMI_MEM_ADDR_SPACE; io.io_setup = ipmi_si_mem_setup; } io.addr_data = pci_resource_start(pdev, 0); io.dev = &pdev->dev; io.regspacing = ipmi_pci_probe_regspacing(&io); io.regsize = DEFAULT_REGSIZE; io.regshift = 0; io.irq = pdev->irq; if (io.irq) io.irq_setup = ipmi_std_irq_setup; dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n", &pdev->resource[0], io.regsize, io.regspacing, io.irq); return ipmi_si_add_smi(&io); } static void ipmi_pci_remove(struct pci_dev *pdev) { ipmi_si_remove_by_dev(&pdev->dev); } static const struct pci_device_id ipmi_pci_devices[] = { { PCI_VDEVICE(HP, PCI_DEVICE_ID_HP_MMC) }, { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_IPMI_SMIC, ~0) }, { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_IPMI_KCS, ~0) }, { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_IPMI_BT, ~0) }, { 0, } }; MODULE_DEVICE_TABLE(pci, ipmi_pci_devices); static struct pci_driver ipmi_pci_driver = { .name = SI_DEVICE_NAME, .id_table = ipmi_pci_devices, .probe = ipmi_pci_probe, .remove = ipmi_pci_remove, }; void ipmi_si_pci_init(void) { if (si_trypci) { int rv = pci_register_driver(&ipmi_pci_driver); if (rv) pr_err("Unable to register PCI driver: %d\n", rv); else pci_registered = true; } } void ipmi_si_pci_shutdown(void) { if (pci_registered) pci_unregister_driver(&ipmi_pci_driver); }
linux-master
drivers/char/ipmi/ipmi_si_pci.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * RapidIO driver support * * Copyright 2005 MontaVista Software, Inc. * Matt Porter <[email protected]> */ #include <linux/init.h> #include <linux/module.h> #include <linux/rio.h> #include <linux/rio_ids.h> #include <linux/rio_drv.h> #include "rio.h" /** * rio_match_device - Tell if a RIO device has a matching RIO device id structure * @id: the RIO device id structure to match against * @rdev: the RIO device structure to match against * * Used from driver probe and bus matching to check whether a RIO device * matches a device id structure provided by a RIO driver. Returns the * matching &struct rio_device_id or %NULL if there is no match. */ static const struct rio_device_id *rio_match_device(const struct rio_device_id *id, const struct rio_dev *rdev) { while (id->vid || id->asm_vid) { if (((id->vid == RIO_ANY_ID) || (id->vid == rdev->vid)) && ((id->did == RIO_ANY_ID) || (id->did == rdev->did)) && ((id->asm_vid == RIO_ANY_ID) || (id->asm_vid == rdev->asm_vid)) && ((id->asm_did == RIO_ANY_ID) || (id->asm_did == rdev->asm_did))) return id; id++; } return NULL; } /** * rio_dev_get - Increments the reference count of the RIO device structure * * @rdev: RIO device being referenced * * Each live reference to a device should be refcounted. * * Drivers for RIO devices should normally record such references in * their probe() methods, when they bind to a device, and release * them by calling rio_dev_put(), in their disconnect() methods. */ struct rio_dev *rio_dev_get(struct rio_dev *rdev) { if (rdev) get_device(&rdev->dev); return rdev; } /** * rio_dev_put - Release a use of the RIO device structure * * @rdev: RIO device being disconnected * * Must be called when a user of a device is finished with it. * When the last user of the device calls this function, the * memory of the device is freed. */ void rio_dev_put(struct rio_dev *rdev) { if (rdev) put_device(&rdev->dev); } /** * rio_device_probe - Tell if a RIO device structure has a matching RIO device id structure * @dev: the RIO device structure to match against * * return 0 and set rio_dev->driver when drv claims rio_dev, else error */ static int rio_device_probe(struct device *dev) { struct rio_driver *rdrv = to_rio_driver(dev->driver); struct rio_dev *rdev = to_rio_dev(dev); int error = -ENODEV; const struct rio_device_id *id; if (!rdev->driver && rdrv->probe) { if (!rdrv->id_table) return error; id = rio_match_device(rdrv->id_table, rdev); rio_dev_get(rdev); if (id) error = rdrv->probe(rdev, id); if (error >= 0) { rdev->driver = rdrv; error = 0; } else rio_dev_put(rdev); } return error; } /** * rio_device_remove - Remove a RIO device from the system * * @dev: the RIO device structure to match against * * Remove a RIO device from the system. If it has an associated * driver, then run the driver remove() method. Then update * the reference count. */ static void rio_device_remove(struct device *dev) { struct rio_dev *rdev = to_rio_dev(dev); struct rio_driver *rdrv = rdev->driver; if (rdrv) { if (rdrv->remove) rdrv->remove(rdev); rdev->driver = NULL; } rio_dev_put(rdev); } static void rio_device_shutdown(struct device *dev) { struct rio_dev *rdev = to_rio_dev(dev); struct rio_driver *rdrv = rdev->driver; dev_dbg(dev, "RIO: %s\n", __func__); if (rdrv && rdrv->shutdown) rdrv->shutdown(rdev); } /** * rio_register_driver - register a new RIO driver * @rdrv: the RIO driver structure to register * * Adds a &struct rio_driver to the list of registered drivers. * Returns a negative value on error, otherwise 0. If no error * occurred, the driver remains registered even if no device * was claimed during registration. */ int rio_register_driver(struct rio_driver *rdrv) { /* initialize common driver fields */ rdrv->driver.name = rdrv->name; rdrv->driver.bus = &rio_bus_type; /* register with core */ return driver_register(&rdrv->driver); } /** * rio_unregister_driver - unregister a RIO driver * @rdrv: the RIO driver structure to unregister * * Deletes the &struct rio_driver from the list of registered RIO * drivers, gives it a chance to clean up by calling its remove() * function for each device it was responsible for, and marks those * devices as driverless. */ void rio_unregister_driver(struct rio_driver *rdrv) { driver_unregister(&rdrv->driver); } void rio_attach_device(struct rio_dev *rdev) { rdev->dev.bus = &rio_bus_type; } EXPORT_SYMBOL_GPL(rio_attach_device); /** * rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure * @dev: the standard device structure to match against * @drv: the standard driver structure containing the ids to match against * * Used by a driver to check whether a RIO device present in the * system is in its list of supported devices. Returns 1 if * there is a matching &struct rio_device_id or 0 if there is * no match. */ static int rio_match_bus(struct device *dev, struct device_driver *drv) { struct rio_dev *rdev = to_rio_dev(dev); struct rio_driver *rdrv = to_rio_driver(drv); const struct rio_device_id *id = rdrv->id_table; const struct rio_device_id *found_id; if (!id) goto out; found_id = rio_match_device(id, rdev); if (found_id) return 1; out:return 0; } static int rio_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct rio_dev *rdev; if (!dev) return -ENODEV; rdev = to_rio_dev(dev); if (!rdev) return -ENODEV; if (add_uevent_var(env, "MODALIAS=rapidio:v%04Xd%04Xav%04Xad%04X", rdev->vid, rdev->did, rdev->asm_vid, rdev->asm_did)) return -ENOMEM; return 0; } struct class rio_mport_class = { .name = "rapidio_port", .dev_groups = rio_mport_groups, }; EXPORT_SYMBOL_GPL(rio_mport_class); struct bus_type rio_bus_type = { .name = "rapidio", .match = rio_match_bus, .dev_groups = rio_dev_groups, .bus_groups = rio_bus_groups, .probe = rio_device_probe, .remove = rio_device_remove, .shutdown = rio_device_shutdown, .uevent = rio_uevent, }; /** * rio_bus_init - Register the RapidIO bus with the device model * * Registers the RIO mport device class and RIO bus type with the Linux * device model. */ static int __init rio_bus_init(void) { int ret; ret = class_register(&rio_mport_class); if (!ret) { ret = bus_register(&rio_bus_type); if (ret) class_unregister(&rio_mport_class); } return ret; } postcore_initcall(rio_bus_init); EXPORT_SYMBOL_GPL(rio_register_driver); EXPORT_SYMBOL_GPL(rio_unregister_driver); EXPORT_SYMBOL_GPL(rio_bus_type); EXPORT_SYMBOL_GPL(rio_dev_get); EXPORT_SYMBOL_GPL(rio_dev_put);
linux-master
drivers/rapidio/rio-driver.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * RapidIO sysfs attributes and support * * Copyright 2005 MontaVista Software, Inc. * Matt Porter <[email protected]> */ #include <linux/kernel.h> #include <linux/rio.h> #include <linux/rio_drv.h> #include <linux/stat.h> #include <linux/capability.h> #include "rio.h" /* Sysfs support */ #define rio_config_attr(field, format_string) \ static ssize_t \ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \ { \ struct rio_dev *rdev = to_rio_dev(dev); \ \ return sprintf(buf, format_string, rdev->field); \ } \ static DEVICE_ATTR_RO(field); rio_config_attr(did, "0x%04x\n"); rio_config_attr(vid, "0x%04x\n"); rio_config_attr(device_rev, "0x%08x\n"); rio_config_attr(asm_did, "0x%04x\n"); rio_config_attr(asm_vid, "0x%04x\n"); rio_config_attr(asm_rev, "0x%04x\n"); rio_config_attr(destid, "0x%04x\n"); rio_config_attr(hopcount, "0x%02x\n"); static ssize_t routes_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rio_dev *rdev = to_rio_dev(dev); char *str = buf; int i; for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size); i++) { if (rdev->rswitch->route_table[i] == RIO_INVALID_ROUTE) continue; str += sprintf(str, "%04x %02x\n", i, rdev->rswitch->route_table[i]); } return (str - buf); } static DEVICE_ATTR_RO(routes); static ssize_t lprev_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rio_dev *rdev = to_rio_dev(dev); return sprintf(buf, "%s\n", (rdev->prev) ? rio_name(rdev->prev) : "root"); } static DEVICE_ATTR_RO(lprev); static ssize_t lnext_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rio_dev *rdev = to_rio_dev(dev); char *str = buf; int i; if (rdev->pef & RIO_PEF_SWITCH) { for (i = 0; i < RIO_GET_TOTAL_PORTS(rdev->swpinfo); i++) { if (rdev->rswitch->nextdev[i]) str += sprintf(str, "%s\n", rio_name(rdev->rswitch->nextdev[i])); else str += sprintf(str, "null\n"); } } return str - buf; } static DEVICE_ATTR_RO(lnext); static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rio_dev *rdev = to_rio_dev(dev); return sprintf(buf, "rapidio:v%04Xd%04Xav%04Xad%04X\n", rdev->vid, rdev->did, rdev->asm_vid, rdev->asm_did); } static DEVICE_ATTR_RO(modalias); static struct attribute *rio_dev_attrs[] = { &dev_attr_did.attr, &dev_attr_vid.attr, &dev_attr_device_rev.attr, &dev_attr_asm_did.attr, &dev_attr_asm_vid.attr, &dev_attr_asm_rev.attr, &dev_attr_lprev.attr, &dev_attr_destid.attr, &dev_attr_modalias.attr, /* Switch-only attributes */ &dev_attr_routes.attr, &dev_attr_lnext.attr, &dev_attr_hopcount.attr, NULL, }; static ssize_t rio_read_config(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct rio_dev *dev = to_rio_dev(kobj_to_dev(kobj)); unsigned int size = 0x100; loff_t init_off = off; u8 *data = (u8 *) buf; /* Several chips lock up trying to read undefined config space */ if (capable(CAP_SYS_ADMIN)) size = RIO_MAINT_SPACE_SZ; if (off >= size) return 0; if (off + count > size) { size -= off; count = size; } else { size = count; } if ((off & 1) && size) { u8 val; rio_read_config_8(dev, off, &val); data[off - init_off] = val; off++; size--; } if ((off & 3) && size > 2) { u16 val; rio_read_config_16(dev, off, &val); data[off - init_off] = (val >> 8) & 0xff; data[off - init_off + 1] = val & 0xff; off += 2; size -= 2; } while (size > 3) { u32 val; rio_read_config_32(dev, off, &val); data[off - init_off] = (val >> 24) & 0xff; data[off - init_off + 1] = (val >> 16) & 0xff; data[off - init_off + 2] = (val >> 8) & 0xff; data[off - init_off + 3] = val & 0xff; off += 4; size -= 4; } if (size >= 2) { u16 val; rio_read_config_16(dev, off, &val); data[off - init_off] = (val >> 8) & 0xff; data[off - init_off + 1] = val & 0xff; off += 2; size -= 2; } if (size > 0) { u8 val; rio_read_config_8(dev, off, &val); data[off - init_off] = val; off++; --size; } return count; } static ssize_t rio_write_config(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct rio_dev *dev = to_rio_dev(kobj_to_dev(kobj)); unsigned int size = count; loff_t init_off = off; u8 *data = (u8 *) buf; if (off >= RIO_MAINT_SPACE_SZ) return 0; if (off + count > RIO_MAINT_SPACE_SZ) { size = RIO_MAINT_SPACE_SZ - off; count = size; } if ((off & 1) && size) { rio_write_config_8(dev, off, data[off - init_off]); off++; size--; } if ((off & 3) && (size > 2)) { u16 val = data[off - init_off + 1]; val |= (u16) data[off - init_off] << 8; rio_write_config_16(dev, off, val); off += 2; size -= 2; } while (size > 3) { u32 val = data[off - init_off + 3]; val |= (u32) data[off - init_off + 2] << 8; val |= (u32) data[off - init_off + 1] << 16; val |= (u32) data[off - init_off] << 24; rio_write_config_32(dev, off, val); off += 4; size -= 4; } if (size >= 2) { u16 val = data[off - init_off + 1]; val |= (u16) data[off - init_off] << 8; rio_write_config_16(dev, off, val); off += 2; size -= 2; } if (size) { rio_write_config_8(dev, off, data[off - init_off]); off++; --size; } return count; } static struct bin_attribute rio_config_attr = { .attr = { .name = "config", .mode = S_IRUGO | S_IWUSR, }, .size = RIO_MAINT_SPACE_SZ, .read = rio_read_config, .write = rio_write_config, }; static struct bin_attribute *rio_dev_bin_attrs[] = { &rio_config_attr, NULL, }; static umode_t rio_dev_is_attr_visible(struct kobject *kobj, struct attribute *attr, int n) { struct rio_dev *rdev = to_rio_dev(kobj_to_dev(kobj)); umode_t mode = attr->mode; if (!(rdev->pef & RIO_PEF_SWITCH) && (attr == &dev_attr_routes.attr || attr == &dev_attr_lnext.attr || attr == &dev_attr_hopcount.attr)) { /* * Hide switch-specific attributes for a non-switch device. */ mode = 0; } return mode; } static const struct attribute_group rio_dev_group = { .attrs = rio_dev_attrs, .is_visible = rio_dev_is_attr_visible, .bin_attrs = rio_dev_bin_attrs, }; const struct attribute_group *rio_dev_groups[] = { &rio_dev_group, NULL, }; static ssize_t scan_store(const struct bus_type *bus, const char *buf, size_t count) { long val; int rc; if (kstrtol(buf, 0, &val) < 0) return -EINVAL; if (val == RIO_MPORT_ANY) { rc = rio_init_mports(); goto exit; } if (val < 0 || val >= RIO_MAX_MPORTS) return -EINVAL; rc = rio_mport_scan((int)val); exit: if (!rc) rc = count; return rc; } static BUS_ATTR_WO(scan); static struct attribute *rio_bus_attrs[] = { &bus_attr_scan.attr, NULL, }; static const struct attribute_group rio_bus_group = { .attrs = rio_bus_attrs, }; const struct attribute_group *rio_bus_groups[] = { &rio_bus_group, NULL, }; static ssize_t port_destid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rio_mport *mport = to_rio_mport(dev); if (mport) return sprintf(buf, "0x%04x\n", mport->host_deviceid); else return -ENODEV; } static DEVICE_ATTR_RO(port_destid); static ssize_t sys_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rio_mport *mport = to_rio_mport(dev); if (mport) return sprintf(buf, "%u\n", mport->sys_size); else return -ENODEV; } static DEVICE_ATTR_RO(sys_size); static struct attribute *rio_mport_attrs[] = { &dev_attr_port_destid.attr, &dev_attr_sys_size.attr, NULL, }; static const struct attribute_group rio_mport_group = { .attrs = rio_mport_attrs, }; const struct attribute_group *rio_mport_groups[] = { &rio_mport_group, NULL, };
linux-master
drivers/rapidio/rio-sysfs.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * RapidIO interconnect services * (RapidIO Interconnect Specification, http://www.rapidio.org) * * Copyright 2005 MontaVista Software, Inc. * Matt Porter <[email protected]> * * Copyright 2009 - 2013 Integrated Device Technology, Inc. * Alex Bounine <[email protected]> */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/rio.h> #include <linux/rio_drv.h> #include <linux/rio_ids.h> #include <linux/rio_regs.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/interrupt.h> #include "rio.h" /* * struct rio_pwrite - RIO portwrite event * @node: Node in list of doorbell events * @pwcback: Doorbell event callback * @context: Handler specific context to pass on event */ struct rio_pwrite { struct list_head node; int (*pwcback)(struct rio_mport *mport, void *context, union rio_pw_msg *msg, int step); void *context; }; MODULE_DESCRIPTION("RapidIO Subsystem Core"); MODULE_AUTHOR("Matt Porter <[email protected]>"); MODULE_AUTHOR("Alexandre Bounine <[email protected]>"); MODULE_LICENSE("GPL"); static int hdid[RIO_MAX_MPORTS]; static int ids_num; module_param_array(hdid, int, &ids_num, 0); MODULE_PARM_DESC(hdid, "Destination ID assignment to local RapidIO controllers"); static LIST_HEAD(rio_devices); static LIST_HEAD(rio_nets); static DEFINE_SPINLOCK(rio_global_list_lock); static LIST_HEAD(rio_mports); static LIST_HEAD(rio_scans); static DEFINE_MUTEX(rio_mport_list_lock); static unsigned char next_portid; static DEFINE_SPINLOCK(rio_mmap_lock); /** * rio_local_get_device_id - Get the base/extended device id for a port * @port: RIO master port from which to get the deviceid * * Reads the base/extended device id from the local device * implementing the master port. Returns the 8/16-bit device * id. */ u16 rio_local_get_device_id(struct rio_mport *port) { u32 result; rio_local_read_config_32(port, RIO_DID_CSR, &result); return (RIO_GET_DID(port->sys_size, result)); } EXPORT_SYMBOL_GPL(rio_local_get_device_id); /** * rio_query_mport - Query mport device attributes * @port: mport device to query * @mport_attr: mport attributes data structure * * Returns attributes of specified mport through the * pointer to attributes data structure. */ int rio_query_mport(struct rio_mport *port, struct rio_mport_attr *mport_attr) { if (!port->ops->query_mport) return -ENODATA; return port->ops->query_mport(port, mport_attr); } EXPORT_SYMBOL(rio_query_mport); /** * rio_alloc_net- Allocate and initialize a new RIO network data structure * @mport: Master port associated with the RIO network * * Allocates a RIO network structure, initializes per-network * list heads, and adds the associated master port to the * network list of associated master ports. Returns a * RIO network pointer on success or %NULL on failure. */ struct rio_net *rio_alloc_net(struct rio_mport *mport) { struct rio_net *net = kzalloc(sizeof(*net), GFP_KERNEL); if (net) { INIT_LIST_HEAD(&net->node); INIT_LIST_HEAD(&net->devices); INIT_LIST_HEAD(&net->switches); INIT_LIST_HEAD(&net->mports); mport->net = net; } return net; } EXPORT_SYMBOL_GPL(rio_alloc_net); int rio_add_net(struct rio_net *net) { int err; err = device_register(&net->dev); if (err) return err; spin_lock(&rio_global_list_lock); list_add_tail(&net->node, &rio_nets); spin_unlock(&rio_global_list_lock); return 0; } EXPORT_SYMBOL_GPL(rio_add_net); void rio_free_net(struct rio_net *net) { spin_lock(&rio_global_list_lock); if (!list_empty(&net->node)) list_del(&net->node); spin_unlock(&rio_global_list_lock); if (net->release) net->release(net); device_unregister(&net->dev); } EXPORT_SYMBOL_GPL(rio_free_net); /** * rio_local_set_device_id - Set the base/extended device id for a port * @port: RIO master port * @did: Device ID value to be written * * Writes the base/extended device id from a device. */ void rio_local_set_device_id(struct rio_mport *port, u16 did) { rio_local_write_config_32(port, RIO_DID_CSR, RIO_SET_DID(port->sys_size, did)); } EXPORT_SYMBOL_GPL(rio_local_set_device_id); /** * rio_add_device- Adds a RIO device to the device model * @rdev: RIO device * * Adds the RIO device to the global device list and adds the RIO * device to the RIO device list. Creates the generic sysfs nodes * for an RIO device. */ int rio_add_device(struct rio_dev *rdev) { int err; atomic_set(&rdev->state, RIO_DEVICE_RUNNING); err = device_register(&rdev->dev); if (err) return err; spin_lock(&rio_global_list_lock); list_add_tail(&rdev->global_list, &rio_devices); if (rdev->net) { list_add_tail(&rdev->net_list, &rdev->net->devices); if (rdev->pef & RIO_PEF_SWITCH) list_add_tail(&rdev->rswitch->node, &rdev->net->switches); } spin_unlock(&rio_global_list_lock); return 0; } EXPORT_SYMBOL_GPL(rio_add_device); /* * rio_del_device - removes a RIO device from the device model * @rdev: RIO device * @state: device state to set during removal process * * Removes the RIO device to the kernel device list and subsystem's device list. * Clears sysfs entries for the removed device. */ void rio_del_device(struct rio_dev *rdev, enum rio_device_state state) { pr_debug("RIO: %s: removing %s\n", __func__, rio_name(rdev)); atomic_set(&rdev->state, state); spin_lock(&rio_global_list_lock); list_del(&rdev->global_list); if (rdev->net) { list_del(&rdev->net_list); if (rdev->pef & RIO_PEF_SWITCH) { list_del(&rdev->rswitch->node); kfree(rdev->rswitch->route_table); } } spin_unlock(&rio_global_list_lock); device_unregister(&rdev->dev); } EXPORT_SYMBOL_GPL(rio_del_device); /** * rio_request_inb_mbox - request inbound mailbox service * @mport: RIO master port from which to allocate the mailbox resource * @dev_id: Device specific pointer to pass on event * @mbox: Mailbox number to claim * @entries: Number of entries in inbound mailbox queue * @minb: Callback to execute when inbound message is received * * Requests ownership of an inbound mailbox resource and binds * a callback function to the resource. Returns %0 on success. */ int rio_request_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries, void (*minb) (struct rio_mport * mport, void *dev_id, int mbox, int slot)) { int rc = -ENOSYS; struct resource *res; if (!mport->ops->open_inb_mbox) goto out; res = kzalloc(sizeof(*res), GFP_KERNEL); if (res) { rio_init_mbox_res(res, mbox, mbox); /* Make sure this mailbox isn't in use */ rc = request_resource(&mport->riores[RIO_INB_MBOX_RESOURCE], res); if (rc < 0) { kfree(res); goto out; } mport->inb_msg[mbox].res = res; /* Hook the inbound message callback */ mport->inb_msg[mbox].mcback = minb; rc = mport->ops->open_inb_mbox(mport, dev_id, mbox, entries); if (rc) { mport->inb_msg[mbox].mcback = NULL; mport->inb_msg[mbox].res = NULL; release_resource(res); kfree(res); } } else rc = -ENOMEM; out: return rc; } EXPORT_SYMBOL_GPL(rio_request_inb_mbox); /** * rio_release_inb_mbox - release inbound mailbox message service * @mport: RIO master port from which to release the mailbox resource * @mbox: Mailbox number to release * * Releases ownership of an inbound mailbox resource. Returns 0 * if the request has been satisfied. */ int rio_release_inb_mbox(struct rio_mport *mport, int mbox) { int rc; if (!mport->ops->close_inb_mbox || !mport->inb_msg[mbox].res) return -EINVAL; mport->ops->close_inb_mbox(mport, mbox); mport->inb_msg[mbox].mcback = NULL; rc = release_resource(mport->inb_msg[mbox].res); if (rc) return rc; kfree(mport->inb_msg[mbox].res); mport->inb_msg[mbox].res = NULL; return 0; } EXPORT_SYMBOL_GPL(rio_release_inb_mbox); /** * rio_request_outb_mbox - request outbound mailbox service * @mport: RIO master port from which to allocate the mailbox resource * @dev_id: Device specific pointer to pass on event * @mbox: Mailbox number to claim * @entries: Number of entries in outbound mailbox queue * @moutb: Callback to execute when outbound message is sent * * Requests ownership of an outbound mailbox resource and binds * a callback function to the resource. Returns 0 on success. */ int rio_request_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries, void (*moutb) (struct rio_mport * mport, void *dev_id, int mbox, int slot)) { int rc = -ENOSYS; struct resource *res; if (!mport->ops->open_outb_mbox) goto out; res = kzalloc(sizeof(*res), GFP_KERNEL); if (res) { rio_init_mbox_res(res, mbox, mbox); /* Make sure this outbound mailbox isn't in use */ rc = request_resource(&mport->riores[RIO_OUTB_MBOX_RESOURCE], res); if (rc < 0) { kfree(res); goto out; } mport->outb_msg[mbox].res = res; /* Hook the inbound message callback */ mport->outb_msg[mbox].mcback = moutb; rc = mport->ops->open_outb_mbox(mport, dev_id, mbox, entries); if (rc) { mport->outb_msg[mbox].mcback = NULL; mport->outb_msg[mbox].res = NULL; release_resource(res); kfree(res); } } else rc = -ENOMEM; out: return rc; } EXPORT_SYMBOL_GPL(rio_request_outb_mbox); /** * rio_release_outb_mbox - release outbound mailbox message service * @mport: RIO master port from which to release the mailbox resource * @mbox: Mailbox number to release * * Releases ownership of an inbound mailbox resource. Returns 0 * if the request has been satisfied. */ int rio_release_outb_mbox(struct rio_mport *mport, int mbox) { int rc; if (!mport->ops->close_outb_mbox || !mport->outb_msg[mbox].res) return -EINVAL; mport->ops->close_outb_mbox(mport, mbox); mport->outb_msg[mbox].mcback = NULL; rc = release_resource(mport->outb_msg[mbox].res); if (rc) return rc; kfree(mport->outb_msg[mbox].res); mport->outb_msg[mbox].res = NULL; return 0; } EXPORT_SYMBOL_GPL(rio_release_outb_mbox); /** * rio_setup_inb_dbell - bind inbound doorbell callback * @mport: RIO master port to bind the doorbell callback * @dev_id: Device specific pointer to pass on event * @res: Doorbell message resource * @dinb: Callback to execute when doorbell is received * * Adds a doorbell resource/callback pair into a port's * doorbell event list. Returns 0 if the request has been * satisfied. */ static int rio_setup_inb_dbell(struct rio_mport *mport, void *dev_id, struct resource *res, void (*dinb) (struct rio_mport * mport, void *dev_id, u16 src, u16 dst, u16 info)) { struct rio_dbell *dbell = kmalloc(sizeof(*dbell), GFP_KERNEL); if (!dbell) return -ENOMEM; dbell->res = res; dbell->dinb = dinb; dbell->dev_id = dev_id; mutex_lock(&mport->lock); list_add_tail(&dbell->node, &mport->dbells); mutex_unlock(&mport->lock); return 0; } /** * rio_request_inb_dbell - request inbound doorbell message service * @mport: RIO master port from which to allocate the doorbell resource * @dev_id: Device specific pointer to pass on event * @start: Doorbell info range start * @end: Doorbell info range end * @dinb: Callback to execute when doorbell is received * * Requests ownership of an inbound doorbell resource and binds * a callback function to the resource. Returns 0 if the request * has been satisfied. */ int rio_request_inb_dbell(struct rio_mport *mport, void *dev_id, u16 start, u16 end, void (*dinb) (struct rio_mport * mport, void *dev_id, u16 src, u16 dst, u16 info)) { int rc; struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); if (res) { rio_init_dbell_res(res, start, end); /* Make sure these doorbells aren't in use */ rc = request_resource(&mport->riores[RIO_DOORBELL_RESOURCE], res); if (rc < 0) { kfree(res); goto out; } /* Hook the doorbell callback */ rc = rio_setup_inb_dbell(mport, dev_id, res, dinb); } else rc = -ENOMEM; out: return rc; } EXPORT_SYMBOL_GPL(rio_request_inb_dbell); /** * rio_release_inb_dbell - release inbound doorbell message service * @mport: RIO master port from which to release the doorbell resource * @start: Doorbell info range start * @end: Doorbell info range end * * Releases ownership of an inbound doorbell resource and removes * callback from the doorbell event list. Returns 0 if the request * has been satisfied. */ int rio_release_inb_dbell(struct rio_mport *mport, u16 start, u16 end) { int rc = 0, found = 0; struct rio_dbell *dbell; mutex_lock(&mport->lock); list_for_each_entry(dbell, &mport->dbells, node) { if ((dbell->res->start == start) && (dbell->res->end == end)) { list_del(&dbell->node); found = 1; break; } } mutex_unlock(&mport->lock); /* If we can't find an exact match, fail */ if (!found) { rc = -EINVAL; goto out; } /* Release the doorbell resource */ rc = release_resource(dbell->res); /* Free the doorbell event */ kfree(dbell); out: return rc; } EXPORT_SYMBOL_GPL(rio_release_inb_dbell); /** * rio_request_outb_dbell - request outbound doorbell message range * @rdev: RIO device from which to allocate the doorbell resource * @start: Doorbell message range start * @end: Doorbell message range end * * Requests ownership of a doorbell message range. Returns a resource * if the request has been satisfied or %NULL on failure. */ struct resource *rio_request_outb_dbell(struct rio_dev *rdev, u16 start, u16 end) { struct resource *res = kzalloc(sizeof(struct resource), GFP_KERNEL); if (res) { rio_init_dbell_res(res, start, end); /* Make sure these doorbells aren't in use */ if (request_resource(&rdev->riores[RIO_DOORBELL_RESOURCE], res) < 0) { kfree(res); res = NULL; } } return res; } EXPORT_SYMBOL_GPL(rio_request_outb_dbell); /** * rio_release_outb_dbell - release outbound doorbell message range * @rdev: RIO device from which to release the doorbell resource * @res: Doorbell resource to be freed * * Releases ownership of a doorbell message range. Returns 0 if the * request has been satisfied. */ int rio_release_outb_dbell(struct rio_dev *rdev, struct resource *res) { int rc = release_resource(res); kfree(res); return rc; } EXPORT_SYMBOL_GPL(rio_release_outb_dbell); /** * rio_add_mport_pw_handler - add port-write message handler into the list * of mport specific pw handlers * @mport: RIO master port to bind the portwrite callback * @context: Handler specific context to pass on event * @pwcback: Callback to execute when portwrite is received * * Returns 0 if the request has been satisfied. */ int rio_add_mport_pw_handler(struct rio_mport *mport, void *context, int (*pwcback)(struct rio_mport *mport, void *context, union rio_pw_msg *msg, int step)) { struct rio_pwrite *pwrite = kzalloc(sizeof(*pwrite), GFP_KERNEL); if (!pwrite) return -ENOMEM; pwrite->pwcback = pwcback; pwrite->context = context; mutex_lock(&mport->lock); list_add_tail(&pwrite->node, &mport->pwrites); mutex_unlock(&mport->lock); return 0; } EXPORT_SYMBOL_GPL(rio_add_mport_pw_handler); /** * rio_del_mport_pw_handler - remove port-write message handler from the list * of mport specific pw handlers * @mport: RIO master port to bind the portwrite callback * @context: Registered handler specific context to pass on event * @pwcback: Registered callback function * * Returns 0 if the request has been satisfied. */ int rio_del_mport_pw_handler(struct rio_mport *mport, void *context, int (*pwcback)(struct rio_mport *mport, void *context, union rio_pw_msg *msg, int step)) { int rc = -EINVAL; struct rio_pwrite *pwrite; mutex_lock(&mport->lock); list_for_each_entry(pwrite, &mport->pwrites, node) { if (pwrite->pwcback == pwcback && pwrite->context == context) { list_del(&pwrite->node); kfree(pwrite); rc = 0; break; } } mutex_unlock(&mport->lock); return rc; } EXPORT_SYMBOL_GPL(rio_del_mport_pw_handler); /** * rio_request_inb_pwrite - request inbound port-write message service for * specific RapidIO device * @rdev: RIO device to which register inbound port-write callback routine * @pwcback: Callback routine to execute when port-write is received * * Binds a port-write callback function to the RapidIO device. * Returns 0 if the request has been satisfied. */ int rio_request_inb_pwrite(struct rio_dev *rdev, int (*pwcback)(struct rio_dev *rdev, union rio_pw_msg *msg, int step)) { int rc = 0; spin_lock(&rio_global_list_lock); if (rdev->pwcback) rc = -ENOMEM; else rdev->pwcback = pwcback; spin_unlock(&rio_global_list_lock); return rc; } EXPORT_SYMBOL_GPL(rio_request_inb_pwrite); /** * rio_release_inb_pwrite - release inbound port-write message service * associated with specific RapidIO device * @rdev: RIO device which registered for inbound port-write callback * * Removes callback from the rio_dev structure. Returns 0 if the request * has been satisfied. */ int rio_release_inb_pwrite(struct rio_dev *rdev) { int rc = -ENOMEM; spin_lock(&rio_global_list_lock); if (rdev->pwcback) { rdev->pwcback = NULL; rc = 0; } spin_unlock(&rio_global_list_lock); return rc; } EXPORT_SYMBOL_GPL(rio_release_inb_pwrite); /** * rio_pw_enable - Enables/disables port-write handling by a master port * @mport: Master port associated with port-write handling * @enable: 1=enable, 0=disable */ void rio_pw_enable(struct rio_mport *mport, int enable) { if (mport->ops->pwenable) { mutex_lock(&mport->lock); if ((enable && ++mport->pwe_refcnt == 1) || (!enable && mport->pwe_refcnt && --mport->pwe_refcnt == 0)) mport->ops->pwenable(mport, enable); mutex_unlock(&mport->lock); } } EXPORT_SYMBOL_GPL(rio_pw_enable); /** * rio_map_inb_region -- Map inbound memory region. * @mport: Master port. * @local: physical address of memory region to be mapped * @rbase: RIO base address assigned to this window * @size: Size of the memory region * @rflags: Flags for mapping. * * Return: 0 -- Success. * * This function will create the mapping from RIO space to local memory. */ int rio_map_inb_region(struct rio_mport *mport, dma_addr_t local, u64 rbase, u32 size, u32 rflags) { int rc; unsigned long flags; if (!mport->ops->map_inb) return -1; spin_lock_irqsave(&rio_mmap_lock, flags); rc = mport->ops->map_inb(mport, local, rbase, size, rflags); spin_unlock_irqrestore(&rio_mmap_lock, flags); return rc; } EXPORT_SYMBOL_GPL(rio_map_inb_region); /** * rio_unmap_inb_region -- Unmap the inbound memory region * @mport: Master port * @lstart: physical address of memory region to be unmapped */ void rio_unmap_inb_region(struct rio_mport *mport, dma_addr_t lstart) { unsigned long flags; if (!mport->ops->unmap_inb) return; spin_lock_irqsave(&rio_mmap_lock, flags); mport->ops->unmap_inb(mport, lstart); spin_unlock_irqrestore(&rio_mmap_lock, flags); } EXPORT_SYMBOL_GPL(rio_unmap_inb_region); /** * rio_map_outb_region -- Map outbound memory region. * @mport: Master port. * @destid: destination id window points to * @rbase: RIO base address window translates to * @size: Size of the memory region * @rflags: Flags for mapping. * @local: physical address of memory region mapped * * Return: 0 -- Success. * * This function will create the mapping from RIO space to local memory. */ int rio_map_outb_region(struct rio_mport *mport, u16 destid, u64 rbase, u32 size, u32 rflags, dma_addr_t *local) { int rc; unsigned long flags; if (!mport->ops->map_outb) return -ENODEV; spin_lock_irqsave(&rio_mmap_lock, flags); rc = mport->ops->map_outb(mport, destid, rbase, size, rflags, local); spin_unlock_irqrestore(&rio_mmap_lock, flags); return rc; } EXPORT_SYMBOL_GPL(rio_map_outb_region); /** * rio_unmap_outb_region -- Unmap the inbound memory region * @mport: Master port * @destid: destination id mapping points to * @rstart: RIO base address window translates to */ void rio_unmap_outb_region(struct rio_mport *mport, u16 destid, u64 rstart) { unsigned long flags; if (!mport->ops->unmap_outb) return; spin_lock_irqsave(&rio_mmap_lock, flags); mport->ops->unmap_outb(mport, destid, rstart); spin_unlock_irqrestore(&rio_mmap_lock, flags); } EXPORT_SYMBOL_GPL(rio_unmap_outb_region); /** * rio_mport_get_physefb - Helper function that returns register offset * for Physical Layer Extended Features Block. * @port: Master port to issue transaction * @local: Indicate a local master port or remote device access * @destid: Destination ID of the device * @hopcount: Number of switch hops to the device * @rmap: pointer to location to store register map type info */ u32 rio_mport_get_physefb(struct rio_mport *port, int local, u16 destid, u8 hopcount, u32 *rmap) { u32 ext_ftr_ptr; u32 ftr_header; ext_ftr_ptr = rio_mport_get_efb(port, local, destid, hopcount, 0); while (ext_ftr_ptr) { if (local) rio_local_read_config_32(port, ext_ftr_ptr, &ftr_header); else rio_mport_read_config_32(port, destid, hopcount, ext_ftr_ptr, &ftr_header); ftr_header = RIO_GET_BLOCK_ID(ftr_header); switch (ftr_header) { case RIO_EFB_SER_EP_ID: case RIO_EFB_SER_EP_REC_ID: case RIO_EFB_SER_EP_FREE_ID: case RIO_EFB_SER_EP_M1_ID: case RIO_EFB_SER_EP_SW_M1_ID: case RIO_EFB_SER_EPF_M1_ID: case RIO_EFB_SER_EPF_SW_M1_ID: *rmap = 1; return ext_ftr_ptr; case RIO_EFB_SER_EP_M2_ID: case RIO_EFB_SER_EP_SW_M2_ID: case RIO_EFB_SER_EPF_M2_ID: case RIO_EFB_SER_EPF_SW_M2_ID: *rmap = 2; return ext_ftr_ptr; default: break; } ext_ftr_ptr = rio_mport_get_efb(port, local, destid, hopcount, ext_ftr_ptr); } return ext_ftr_ptr; } EXPORT_SYMBOL_GPL(rio_mport_get_physefb); /** * rio_get_comptag - Begin or continue searching for a RIO device by component tag * @comp_tag: RIO component tag to match * @from: Previous RIO device found in search, or %NULL for new search * * Iterates through the list of known RIO devices. If a RIO device is * found with a matching @comp_tag, a pointer to its device * structure is returned. Otherwise, %NULL is returned. A new search * is initiated by passing %NULL to the @from argument. Otherwise, if * @from is not %NULL, searches continue from next device on the global * list. */ struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from) { struct list_head *n; struct rio_dev *rdev; spin_lock(&rio_global_list_lock); n = from ? from->global_list.next : rio_devices.next; while (n && (n != &rio_devices)) { rdev = rio_dev_g(n); if (rdev->comp_tag == comp_tag) goto exit; n = n->next; } rdev = NULL; exit: spin_unlock(&rio_global_list_lock); return rdev; } EXPORT_SYMBOL_GPL(rio_get_comptag); /** * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port. * @rdev: Pointer to RIO device control structure * @pnum: Switch port number to set LOCKOUT bit * @lock: Operation : set (=1) or clear (=0) */ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock) { u32 regval; rio_read_config_32(rdev, RIO_DEV_PORT_N_CTL_CSR(rdev, pnum), &regval); if (lock) regval |= RIO_PORT_N_CTL_LOCKOUT; else regval &= ~RIO_PORT_N_CTL_LOCKOUT; rio_write_config_32(rdev, RIO_DEV_PORT_N_CTL_CSR(rdev, pnum), regval); return 0; } EXPORT_SYMBOL_GPL(rio_set_port_lockout); /** * rio_enable_rx_tx_port - enable input receiver and output transmitter of * given port * @port: Master port associated with the RIO network * @local: local=1 select local port otherwise a far device is reached * @destid: Destination ID of the device to check host bit * @hopcount: Number of hops to reach the target * @port_num: Port (-number on switch) to enable on a far end device * * Returns 0 or 1 from on General Control Command and Status Register * (EXT_PTR+0x3C) */ int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid, u8 hopcount, u8 port_num) { #ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS u32 regval; u32 ext_ftr_ptr; u32 rmap; /* * enable rx input tx output port */ pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = " "%d, port_num = %d)\n", local, destid, hopcount, port_num); ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount, &rmap); if (local) { rio_local_read_config_32(port, ext_ftr_ptr + RIO_PORT_N_CTL_CSR(0, rmap), &regval); } else { if (rio_mport_read_config_32(port, destid, hopcount, ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num, rmap), &regval) < 0) return -EIO; } regval = regval | RIO_PORT_N_CTL_EN_RX | RIO_PORT_N_CTL_EN_TX; if (local) { rio_local_write_config_32(port, ext_ftr_ptr + RIO_PORT_N_CTL_CSR(0, rmap), regval); } else { if (rio_mport_write_config_32(port, destid, hopcount, ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num, rmap), regval) < 0) return -EIO; } #endif return 0; } EXPORT_SYMBOL_GPL(rio_enable_rx_tx_port); /** * rio_chk_dev_route - Validate route to the specified device. * @rdev: RIO device failed to respond * @nrdev: Last active device on the route to rdev * @npnum: nrdev's port number on the route to rdev * * Follows a route to the specified RIO device to determine the last available * device (and corresponding RIO port) on the route. */ static int rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum) { u32 result; int p_port, rc = -EIO; struct rio_dev *prev = NULL; /* Find switch with failed RIO link */ while (rdev->prev && (rdev->prev->pef & RIO_PEF_SWITCH)) { if (!rio_read_config_32(rdev->prev, RIO_DEV_ID_CAR, &result)) { prev = rdev->prev; break; } rdev = rdev->prev; } if (!prev) goto err_out; p_port = prev->rswitch->route_table[rdev->destid]; if (p_port != RIO_INVALID_ROUTE) { pr_debug("RIO: link failed on [%s]-P%d\n", rio_name(prev), p_port); *nrdev = prev; *npnum = p_port; rc = 0; } else pr_debug("RIO: failed to trace route to %s\n", rio_name(rdev)); err_out: return rc; } /** * rio_mport_chk_dev_access - Validate access to the specified device. * @mport: Master port to send transactions * @destid: Device destination ID in network * @hopcount: Number of hops into the network */ int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount) { int i = 0; u32 tmp; while (rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_ID_CAR, &tmp)) { i++; if (i == RIO_MAX_CHK_RETRY) return -EIO; mdelay(1); } return 0; } EXPORT_SYMBOL_GPL(rio_mport_chk_dev_access); /** * rio_chk_dev_access - Validate access to the specified device. * @rdev: Pointer to RIO device control structure */ static int rio_chk_dev_access(struct rio_dev *rdev) { return rio_mport_chk_dev_access(rdev->net->hport, rdev->destid, rdev->hopcount); } /** * rio_get_input_status - Sends a Link-Request/Input-Status control symbol and * returns link-response (if requested). * @rdev: RIO devive to issue Input-status command * @pnum: Device port number to issue the command * @lnkresp: Response from a link partner */ static int rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp) { u32 regval; int checkcount; if (lnkresp) { /* Read from link maintenance response register * to clear valid bit */ rio_read_config_32(rdev, RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, pnum), &regval); udelay(50); } /* Issue Input-status command */ rio_write_config_32(rdev, RIO_DEV_PORT_N_MNT_REQ_CSR(rdev, pnum), RIO_MNT_REQ_CMD_IS); /* Exit if the response is not expected */ if (!lnkresp) return 0; checkcount = 3; while (checkcount--) { udelay(50); rio_read_config_32(rdev, RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, pnum), &regval); if (regval & RIO_PORT_N_MNT_RSP_RVAL) { *lnkresp = regval; return 0; } } return -EIO; } /** * rio_clr_err_stopped - Clears port Error-stopped states. * @rdev: Pointer to RIO device control structure * @pnum: Switch port number to clear errors * @err_status: port error status (if 0 reads register from device) * * TODO: Currently this routine is not compatible with recovery process * specified for idt_gen3 RapidIO switch devices. It has to be reviewed * to implement universal recovery process that is compatible full range * off available devices. * IDT gen3 switch driver now implements HW-specific error handler that * issues soft port reset to the port to reset ERR_STOP bits and ackIDs. */ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status) { struct rio_dev *nextdev = rdev->rswitch->nextdev[pnum]; u32 regval; u32 far_ackid, far_linkstat, near_ackid; if (err_status == 0) rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum), &err_status); if (err_status & RIO_PORT_N_ERR_STS_OUT_ES) { pr_debug("RIO_EM: servicing Output Error-Stopped state\n"); /* * Send a Link-Request/Input-Status control symbol */ if (rio_get_input_status(rdev, pnum, &regval)) { pr_debug("RIO_EM: Input-status response timeout\n"); goto rd_err; } pr_debug("RIO_EM: SP%d Input-status response=0x%08x\n", pnum, regval); far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5; far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT; rio_read_config_32(rdev, RIO_DEV_PORT_N_ACK_STS_CSR(rdev, pnum), &regval); pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval); near_ackid = (regval & RIO_PORT_N_ACK_INBOUND) >> 24; pr_debug("RIO_EM: SP%d far_ackID=0x%02x far_linkstat=0x%02x" \ " near_ackID=0x%02x\n", pnum, far_ackid, far_linkstat, near_ackid); /* * If required, synchronize ackIDs of near and * far sides. */ if ((far_ackid != ((regval & RIO_PORT_N_ACK_OUTSTAND) >> 8)) || (far_ackid != (regval & RIO_PORT_N_ACK_OUTBOUND))) { /* Align near outstanding/outbound ackIDs with * far inbound. */ rio_write_config_32(rdev, RIO_DEV_PORT_N_ACK_STS_CSR(rdev, pnum), (near_ackid << 24) | (far_ackid << 8) | far_ackid); /* Align far outstanding/outbound ackIDs with * near inbound. */ far_ackid++; if (!nextdev) { pr_debug("RIO_EM: nextdev pointer == NULL\n"); goto rd_err; } rio_write_config_32(nextdev, RIO_DEV_PORT_N_ACK_STS_CSR(nextdev, RIO_GET_PORT_NUM(nextdev->swpinfo)), (far_ackid << 24) | (near_ackid << 8) | near_ackid); } rd_err: rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum), &err_status); pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); } if ((err_status & RIO_PORT_N_ERR_STS_INP_ES) && nextdev) { pr_debug("RIO_EM: servicing Input Error-Stopped state\n"); rio_get_input_status(nextdev, RIO_GET_PORT_NUM(nextdev->swpinfo), NULL); udelay(50); rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum), &err_status); pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); } return (err_status & (RIO_PORT_N_ERR_STS_OUT_ES | RIO_PORT_N_ERR_STS_INP_ES)) ? 1 : 0; } /** * rio_inb_pwrite_handler - inbound port-write message handler * @mport: mport device associated with port-write * @pw_msg: pointer to inbound port-write message * * Processes an inbound port-write message. Returns 0 if the request * has been satisfied. */ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg) { struct rio_dev *rdev; u32 err_status, em_perrdet, em_ltlerrdet; int rc, portnum; struct rio_pwrite *pwrite; #ifdef DEBUG_PW { u32 i; pr_debug("%s: PW to mport_%d:\n", __func__, mport->id); for (i = 0; i < RIO_PW_MSG_SIZE / sizeof(u32); i = i + 4) { pr_debug("0x%02x: %08x %08x %08x %08x\n", i * 4, pw_msg->raw[i], pw_msg->raw[i + 1], pw_msg->raw[i + 2], pw_msg->raw[i + 3]); } } #endif rdev = rio_get_comptag((pw_msg->em.comptag & RIO_CTAG_UDEVID), NULL); if (rdev) { pr_debug("RIO: Port-Write message from %s\n", rio_name(rdev)); } else { pr_debug("RIO: %s No matching device for CTag 0x%08x\n", __func__, pw_msg->em.comptag); } /* Call a device-specific handler (if it is registered for the device). * This may be the service for endpoints that send device-specific * port-write messages. End-point messages expected to be handled * completely by EP specific device driver. * For switches rc==0 signals that no standard processing required. */ if (rdev && rdev->pwcback) { rc = rdev->pwcback(rdev, pw_msg, 0); if (rc == 0) return 0; } mutex_lock(&mport->lock); list_for_each_entry(pwrite, &mport->pwrites, node) pwrite->pwcback(mport, pwrite->context, pw_msg, 0); mutex_unlock(&mport->lock); if (!rdev) return 0; /* * FIXME: The code below stays as it was before for now until we decide * how to do default PW handling in combination with per-mport callbacks */ portnum = pw_msg->em.is_port & 0xFF; /* Check if device and route to it are functional: * Sometimes devices may send PW message(s) just before being * powered down (or link being lost). */ if (rio_chk_dev_access(rdev)) { pr_debug("RIO: device access failed - get link partner\n"); /* Scan route to the device and identify failed link. * This will replace device and port reported in PW message. * PW message should not be used after this point. */ if (rio_chk_dev_route(rdev, &rdev, &portnum)) { pr_err("RIO: Route trace for %s failed\n", rio_name(rdev)); return -EIO; } pw_msg = NULL; } /* For End-point devices processing stops here */ if (!(rdev->pef & RIO_PEF_SWITCH)) return 0; if (rdev->phys_efptr == 0) { pr_err("RIO_PW: Bad switch initialization for %s\n", rio_name(rdev)); return 0; } /* * Process the port-write notification from switch */ if (rdev->rswitch->ops && rdev->rswitch->ops->em_handle) rdev->rswitch->ops->em_handle(rdev, portnum); rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), &err_status); pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status); if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) { if (!(rdev->rswitch->port_ok & (1 << portnum))) { rdev->rswitch->port_ok |= (1 << portnum); rio_set_port_lockout(rdev, portnum, 0); /* Schedule Insertion Service */ pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n", rio_name(rdev), portnum); } /* Clear error-stopped states (if reported). * Depending on the link partner state, two attempts * may be needed for successful recovery. */ if (err_status & (RIO_PORT_N_ERR_STS_OUT_ES | RIO_PORT_N_ERR_STS_INP_ES)) { if (rio_clr_err_stopped(rdev, portnum, err_status)) rio_clr_err_stopped(rdev, portnum, 0); } } else { /* if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) */ if (rdev->rswitch->port_ok & (1 << portnum)) { rdev->rswitch->port_ok &= ~(1 << portnum); rio_set_port_lockout(rdev, portnum, 1); if (rdev->phys_rmap == 1) { rio_write_config_32(rdev, RIO_DEV_PORT_N_ACK_STS_CSR(rdev, portnum), RIO_PORT_N_ACK_CLEAR); } else { rio_write_config_32(rdev, RIO_DEV_PORT_N_OB_ACK_CSR(rdev, portnum), RIO_PORT_N_OB_ACK_CLEAR); rio_write_config_32(rdev, RIO_DEV_PORT_N_IB_ACK_CSR(rdev, portnum), 0); } /* Schedule Extraction Service */ pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n", rio_name(rdev), portnum); } } rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet); if (em_perrdet) { pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n", portnum, em_perrdet); /* Clear EM Port N Error Detect CSR */ rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0); } rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet); if (em_ltlerrdet) { pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n", em_ltlerrdet); /* Clear EM L/T Layer Error Detect CSR */ rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0); } /* Clear remaining error bits and Port-Write Pending bit */ rio_write_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum), err_status); return 0; } EXPORT_SYMBOL_GPL(rio_inb_pwrite_handler); /** * rio_mport_get_efb - get pointer to next extended features block * @port: Master port to issue transaction * @local: Indicate a local master port or remote device access * @destid: Destination ID of the device * @hopcount: Number of switch hops to the device * @from: Offset of current Extended Feature block header (if 0 starts * from ExtFeaturePtr) */ u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, u8 hopcount, u32 from) { u32 reg_val; if (from == 0) { if (local) rio_local_read_config_32(port, RIO_ASM_INFO_CAR, &reg_val); else rio_mport_read_config_32(port, destid, hopcount, RIO_ASM_INFO_CAR, &reg_val); return reg_val & RIO_EXT_FTR_PTR_MASK; } else { if (local) rio_local_read_config_32(port, from, &reg_val); else rio_mport_read_config_32(port, destid, hopcount, from, &reg_val); return RIO_GET_BLOCK_ID(reg_val); } } EXPORT_SYMBOL_GPL(rio_mport_get_efb); /** * rio_mport_get_feature - query for devices' extended features * @port: Master port to issue transaction * @local: Indicate a local master port or remote device access * @destid: Destination ID of the device * @hopcount: Number of switch hops to the device * @ftr: Extended feature code * * Tell if a device supports a given RapidIO capability. * Returns the offset of the requested extended feature * block within the device's RIO configuration space or * 0 in case the device does not support it. */ u32 rio_mport_get_feature(struct rio_mport * port, int local, u16 destid, u8 hopcount, int ftr) { u32 asm_info, ext_ftr_ptr, ftr_header; if (local) rio_local_read_config_32(port, RIO_ASM_INFO_CAR, &asm_info); else rio_mport_read_config_32(port, destid, hopcount, RIO_ASM_INFO_CAR, &asm_info); ext_ftr_ptr = asm_info & RIO_EXT_FTR_PTR_MASK; while (ext_ftr_ptr) { if (local) rio_local_read_config_32(port, ext_ftr_ptr, &ftr_header); else rio_mport_read_config_32(port, destid, hopcount, ext_ftr_ptr, &ftr_header); if (RIO_GET_BLOCK_ID(ftr_header) == ftr) return ext_ftr_ptr; ext_ftr_ptr = RIO_GET_BLOCK_PTR(ftr_header); if (!ext_ftr_ptr) break; } return 0; } EXPORT_SYMBOL_GPL(rio_mport_get_feature); /** * rio_std_route_add_entry - Add switch route table entry using standard * registers defined in RIO specification rev.1.3 * @mport: Master port to issue transaction * @destid: Destination ID of the device * @hopcount: Number of switch hops to the device * @table: routing table ID (global or port-specific) * @route_destid: destID entry in the RT * @route_port: destination port for specified destID */ static int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 route_port) { if (table == RIO_GLOBAL_TABLE) { rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, (u32)route_destid); rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_PORT_SEL_CSR, (u32)route_port); } udelay(10); return 0; } /** * rio_std_route_get_entry - Read switch route table entry (port number) * associated with specified destID using standard registers defined in RIO * specification rev.1.3 * @mport: Master port to issue transaction * @destid: Destination ID of the device * @hopcount: Number of switch hops to the device * @table: routing table ID (global or port-specific) * @route_destid: destID entry in the RT * @route_port: returned destination port for specified destID */ static int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 *route_port) { u32 result; if (table == RIO_GLOBAL_TABLE) { rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); rio_mport_read_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); *route_port = (u8)result; } return 0; } /** * rio_std_route_clr_table - Clear swotch route table using standard registers * defined in RIO specification rev.1.3. * @mport: Master port to issue transaction * @destid: Destination ID of the device * @hopcount: Number of switch hops to the device * @table: routing table ID (global or port-specific) */ static int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table) { u32 max_destid = 0xff; u32 i, pef, id_inc = 1, ext_cfg = 0; u32 port_sel = RIO_INVALID_ROUTE; if (table == RIO_GLOBAL_TABLE) { rio_mport_read_config_32(mport, destid, hopcount, RIO_PEF_CAR, &pef); if (mport->sys_size) { rio_mport_read_config_32(mport, destid, hopcount, RIO_SWITCH_RT_LIMIT, &max_destid); max_destid &= RIO_RT_MAX_DESTID; } if (pef & RIO_PEF_EXT_RT) { ext_cfg = 0x80000000; id_inc = 4; port_sel = (RIO_INVALID_ROUTE << 24) | (RIO_INVALID_ROUTE << 16) | (RIO_INVALID_ROUTE << 8) | RIO_INVALID_ROUTE; } for (i = 0; i <= max_destid;) { rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, ext_cfg | i); rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_PORT_SEL_CSR, port_sel); i += id_inc; } } udelay(10); return 0; } /** * rio_lock_device - Acquires host device lock for specified device * @port: Master port to send transaction * @destid: Destination ID for device/switch * @hopcount: Hopcount to reach switch * @wait_ms: Max wait time in msec (0 = no timeout) * * Attepts to acquire host device lock for specified device * Returns 0 if device lock acquired or EINVAL if timeout expires. */ int rio_lock_device(struct rio_mport *port, u16 destid, u8 hopcount, int wait_ms) { u32 result; int tcnt = 0; /* Attempt to acquire device lock */ rio_mport_write_config_32(port, destid, hopcount, RIO_HOST_DID_LOCK_CSR, port->host_deviceid); rio_mport_read_config_32(port, destid, hopcount, RIO_HOST_DID_LOCK_CSR, &result); while (result != port->host_deviceid) { if (wait_ms != 0 && tcnt == wait_ms) { pr_debug("RIO: timeout when locking device %x:%x\n", destid, hopcount); return -EINVAL; } /* Delay a bit */ mdelay(1); tcnt++; /* Try to acquire device lock again */ rio_mport_write_config_32(port, destid, hopcount, RIO_HOST_DID_LOCK_CSR, port->host_deviceid); rio_mport_read_config_32(port, destid, hopcount, RIO_HOST_DID_LOCK_CSR, &result); } return 0; } EXPORT_SYMBOL_GPL(rio_lock_device); /** * rio_unlock_device - Releases host device lock for specified device * @port: Master port to send transaction * @destid: Destination ID for device/switch * @hopcount: Hopcount to reach switch * * Returns 0 if device lock released or EINVAL if fails. */ int rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount) { u32 result; /* Release device lock */ rio_mport_write_config_32(port, destid, hopcount, RIO_HOST_DID_LOCK_CSR, port->host_deviceid); rio_mport_read_config_32(port, destid, hopcount, RIO_HOST_DID_LOCK_CSR, &result); if ((result & 0xffff) != 0xffff) { pr_debug("RIO: badness when releasing device lock %x:%x\n", destid, hopcount); return -EINVAL; } return 0; } EXPORT_SYMBOL_GPL(rio_unlock_device); /** * rio_route_add_entry- Add a route entry to a switch routing table * @rdev: RIO device * @table: Routing table ID * @route_destid: Destination ID to be routed * @route_port: Port number to be routed * @lock: apply a hardware lock on switch device flag (1=lock, 0=no_lock) * * If available calls the switch specific add_entry() method to add a route * entry into a switch routing table. Otherwise uses standard RT update method * as defined by RapidIO specification. A specific routing table can be selected * using the @table argument if a switch has per port routing tables or * the standard (or global) table may be used by passing * %RIO_GLOBAL_TABLE in @table. * * Returns %0 on success or %-EINVAL on failure. */ int rio_route_add_entry(struct rio_dev *rdev, u16 table, u16 route_destid, u8 route_port, int lock) { int rc = -EINVAL; struct rio_switch_ops *ops = rdev->rswitch->ops; if (lock) { rc = rio_lock_device(rdev->net->hport, rdev->destid, rdev->hopcount, 1000); if (rc) return rc; } spin_lock(&rdev->rswitch->lock); if (!ops || !ops->add_entry) { rc = rio_std_route_add_entry(rdev->net->hport, rdev->destid, rdev->hopcount, table, route_destid, route_port); } else if (try_module_get(ops->owner)) { rc = ops->add_entry(rdev->net->hport, rdev->destid, rdev->hopcount, table, route_destid, route_port); module_put(ops->owner); } spin_unlock(&rdev->rswitch->lock); if (lock) rio_unlock_device(rdev->net->hport, rdev->destid, rdev->hopcount); return rc; } EXPORT_SYMBOL_GPL(rio_route_add_entry); /** * rio_route_get_entry- Read an entry from a switch routing table * @rdev: RIO device * @table: Routing table ID * @route_destid: Destination ID to be routed * @route_port: Pointer to read port number into * @lock: apply a hardware lock on switch device flag (1=lock, 0=no_lock) * * If available calls the switch specific get_entry() method to fetch a route * entry from a switch routing table. Otherwise uses standard RT read method * as defined by RapidIO specification. A specific routing table can be selected * using the @table argument if a switch has per port routing tables or * the standard (or global) table may be used by passing * %RIO_GLOBAL_TABLE in @table. * * Returns %0 on success or %-EINVAL on failure. */ int rio_route_get_entry(struct rio_dev *rdev, u16 table, u16 route_destid, u8 *route_port, int lock) { int rc = -EINVAL; struct rio_switch_ops *ops = rdev->rswitch->ops; if (lock) { rc = rio_lock_device(rdev->net->hport, rdev->destid, rdev->hopcount, 1000); if (rc) return rc; } spin_lock(&rdev->rswitch->lock); if (!ops || !ops->get_entry) { rc = rio_std_route_get_entry(rdev->net->hport, rdev->destid, rdev->hopcount, table, route_destid, route_port); } else if (try_module_get(ops->owner)) { rc = ops->get_entry(rdev->net->hport, rdev->destid, rdev->hopcount, table, route_destid, route_port); module_put(ops->owner); } spin_unlock(&rdev->rswitch->lock); if (lock) rio_unlock_device(rdev->net->hport, rdev->destid, rdev->hopcount); return rc; } EXPORT_SYMBOL_GPL(rio_route_get_entry); /** * rio_route_clr_table - Clear a switch routing table * @rdev: RIO device * @table: Routing table ID * @lock: apply a hardware lock on switch device flag (1=lock, 0=no_lock) * * If available calls the switch specific clr_table() method to clear a switch * routing table. Otherwise uses standard RT write method as defined by RapidIO * specification. A specific routing table can be selected using the @table * argument if a switch has per port routing tables or the standard (or global) * table may be used by passing %RIO_GLOBAL_TABLE in @table. * * Returns %0 on success or %-EINVAL on failure. */ int rio_route_clr_table(struct rio_dev *rdev, u16 table, int lock) { int rc = -EINVAL; struct rio_switch_ops *ops = rdev->rswitch->ops; if (lock) { rc = rio_lock_device(rdev->net->hport, rdev->destid, rdev->hopcount, 1000); if (rc) return rc; } spin_lock(&rdev->rswitch->lock); if (!ops || !ops->clr_table) { rc = rio_std_route_clr_table(rdev->net->hport, rdev->destid, rdev->hopcount, table); } else if (try_module_get(ops->owner)) { rc = ops->clr_table(rdev->net->hport, rdev->destid, rdev->hopcount, table); module_put(ops->owner); } spin_unlock(&rdev->rswitch->lock); if (lock) rio_unlock_device(rdev->net->hport, rdev->destid, rdev->hopcount); return rc; } EXPORT_SYMBOL_GPL(rio_route_clr_table); #ifdef CONFIG_RAPIDIO_DMA_ENGINE static bool rio_chan_filter(struct dma_chan *chan, void *arg) { struct rio_mport *mport = arg; /* Check that DMA device belongs to the right MPORT */ return mport == container_of(chan->device, struct rio_mport, dma); } /** * rio_request_mport_dma - request RapidIO capable DMA channel associated * with specified local RapidIO mport device. * @mport: RIO mport to perform DMA data transfers * * Returns pointer to allocated DMA channel or NULL if failed. */ struct dma_chan *rio_request_mport_dma(struct rio_mport *mport) { dma_cap_mask_t mask; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); return dma_request_channel(mask, rio_chan_filter, mport); } EXPORT_SYMBOL_GPL(rio_request_mport_dma); /** * rio_request_dma - request RapidIO capable DMA channel that supports * specified target RapidIO device. * @rdev: RIO device associated with DMA transfer * * Returns pointer to allocated DMA channel or NULL if failed. */ struct dma_chan *rio_request_dma(struct rio_dev *rdev) { return rio_request_mport_dma(rdev->net->hport); } EXPORT_SYMBOL_GPL(rio_request_dma); /** * rio_release_dma - release specified DMA channel * @dchan: DMA channel to release */ void rio_release_dma(struct dma_chan *dchan) { dma_release_channel(dchan); } EXPORT_SYMBOL_GPL(rio_release_dma); /** * rio_dma_prep_xfer - RapidIO specific wrapper * for device_prep_slave_sg callback defined by DMAENGINE. * @dchan: DMA channel to configure * @destid: target RapidIO device destination ID * @data: RIO specific data descriptor * @direction: DMA data transfer direction (TO or FROM the device) * @flags: dmaengine defined flags * * Initializes RapidIO capable DMA channel for the specified data transfer. * Uses DMA channel private extension to pass information related to remote * target RIO device. * * Returns: pointer to DMA transaction descriptor if successful, * error-valued pointer or NULL if failed. */ struct dma_async_tx_descriptor *rio_dma_prep_xfer(struct dma_chan *dchan, u16 destid, struct rio_dma_data *data, enum dma_transfer_direction direction, unsigned long flags) { struct rio_dma_ext rio_ext; if (!dchan->device->device_prep_slave_sg) { pr_err("%s: prep_rio_sg == NULL\n", __func__); return NULL; } rio_ext.destid = destid; rio_ext.rio_addr_u = data->rio_addr_u; rio_ext.rio_addr = data->rio_addr; rio_ext.wr_type = data->wr_type; return dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len, direction, flags, &rio_ext); } EXPORT_SYMBOL_GPL(rio_dma_prep_xfer); /** * rio_dma_prep_slave_sg - RapidIO specific wrapper * for device_prep_slave_sg callback defined by DMAENGINE. * @rdev: RIO device control structure * @dchan: DMA channel to configure * @data: RIO specific data descriptor * @direction: DMA data transfer direction (TO or FROM the device) * @flags: dmaengine defined flags * * Initializes RapidIO capable DMA channel for the specified data transfer. * Uses DMA channel private extension to pass information related to remote * target RIO device. * * Returns: pointer to DMA transaction descriptor if successful, * error-valued pointer or NULL if failed. */ struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev, struct dma_chan *dchan, struct rio_dma_data *data, enum dma_transfer_direction direction, unsigned long flags) { return rio_dma_prep_xfer(dchan, rdev->destid, data, direction, flags); } EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg); #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ /** * rio_find_mport - find RIO mport by its ID * @mport_id: number (ID) of mport device * * Given a RIO mport number, the desired mport is located * in the global list of mports. If the mport is found, a pointer to its * data structure is returned. If no mport is found, %NULL is returned. */ struct rio_mport *rio_find_mport(int mport_id) { struct rio_mport *port; mutex_lock(&rio_mport_list_lock); list_for_each_entry(port, &rio_mports, node) { if (port->id == mport_id) goto found; } port = NULL; found: mutex_unlock(&rio_mport_list_lock); return port; } /** * rio_register_scan - enumeration/discovery method registration interface * @mport_id: mport device ID for which fabric scan routine has to be set * (RIO_MPORT_ANY = set for all available mports) * @scan_ops: enumeration/discovery operations structure * * Registers enumeration/discovery operations with RapidIO subsystem and * attaches it to the specified mport device (or all available mports * if RIO_MPORT_ANY is specified). * * Returns error if the mport already has an enumerator attached to it. * In case of RIO_MPORT_ANY skips mports with valid scan routines (no error). */ int rio_register_scan(int mport_id, struct rio_scan *scan_ops) { struct rio_mport *port; struct rio_scan_node *scan; int rc = 0; pr_debug("RIO: %s for mport_id=%d\n", __func__, mport_id); if ((mport_id != RIO_MPORT_ANY && mport_id >= RIO_MAX_MPORTS) || !scan_ops) return -EINVAL; mutex_lock(&rio_mport_list_lock); /* * Check if there is another enumerator already registered for * the same mport ID (including RIO_MPORT_ANY). Multiple enumerators * for the same mport ID are not supported. */ list_for_each_entry(scan, &rio_scans, node) { if (scan->mport_id == mport_id) { rc = -EBUSY; goto err_out; } } /* * Allocate and initialize new scan registration node. */ scan = kzalloc(sizeof(*scan), GFP_KERNEL); if (!scan) { rc = -ENOMEM; goto err_out; } scan->mport_id = mport_id; scan->ops = scan_ops; /* * Traverse the list of registered mports to attach this new scan. * * The new scan with matching mport ID overrides any previously attached * scan assuming that old scan (if any) is the default one (based on the * enumerator registration check above). * If the new scan is the global one, it will be attached only to mports * that do not have their own individual operations already attached. */ list_for_each_entry(port, &rio_mports, node) { if (port->id == mport_id) { port->nscan = scan_ops; break; } else if (mport_id == RIO_MPORT_ANY && !port->nscan) port->nscan = scan_ops; } list_add_tail(&scan->node, &rio_scans); err_out: mutex_unlock(&rio_mport_list_lock); return rc; } EXPORT_SYMBOL_GPL(rio_register_scan); /** * rio_unregister_scan - removes enumeration/discovery method from mport * @mport_id: mport device ID for which fabric scan routine has to be * unregistered (RIO_MPORT_ANY = apply to all mports that use * the specified scan_ops) * @scan_ops: enumeration/discovery operations structure * * Removes enumeration or discovery method assigned to the specified mport * device. If RIO_MPORT_ANY is specified, removes the specified operations from * all mports that have them attached. */ int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops) { struct rio_mport *port; struct rio_scan_node *scan; pr_debug("RIO: %s for mport_id=%d\n", __func__, mport_id); if (mport_id != RIO_MPORT_ANY && mport_id >= RIO_MAX_MPORTS) return -EINVAL; mutex_lock(&rio_mport_list_lock); list_for_each_entry(port, &rio_mports, node) if (port->id == mport_id || (mport_id == RIO_MPORT_ANY && port->nscan == scan_ops)) port->nscan = NULL; list_for_each_entry(scan, &rio_scans, node) { if (scan->mport_id == mport_id) { list_del(&scan->node); kfree(scan); break; } } mutex_unlock(&rio_mport_list_lock); return 0; } EXPORT_SYMBOL_GPL(rio_unregister_scan); /** * rio_mport_scan - execute enumeration/discovery on the specified mport * @mport_id: number (ID) of mport device */ int rio_mport_scan(int mport_id) { struct rio_mport *port = NULL; int rc; mutex_lock(&rio_mport_list_lock); list_for_each_entry(port, &rio_mports, node) { if (port->id == mport_id) goto found; } mutex_unlock(&rio_mport_list_lock); return -ENODEV; found: if (!port->nscan) { mutex_unlock(&rio_mport_list_lock); return -EINVAL; } if (!try_module_get(port->nscan->owner)) { mutex_unlock(&rio_mport_list_lock); return -ENODEV; } mutex_unlock(&rio_mport_list_lock); if (port->host_deviceid >= 0) rc = port->nscan->enumerate(port, 0); else rc = port->nscan->discover(port, RIO_SCAN_ENUM_NO_WAIT); module_put(port->nscan->owner); return rc; } static struct workqueue_struct *rio_wq; struct rio_disc_work { struct work_struct work; struct rio_mport *mport; }; static void disc_work_handler(struct work_struct *_work) { struct rio_disc_work *work; work = container_of(_work, struct rio_disc_work, work); pr_debug("RIO: discovery work for mport %d %s\n", work->mport->id, work->mport->name); if (try_module_get(work->mport->nscan->owner)) { work->mport->nscan->discover(work->mport, 0); module_put(work->mport->nscan->owner); } } int rio_init_mports(void) { struct rio_mport *port; struct rio_disc_work *work; int n = 0; if (!next_portid) return -ENODEV; /* * First, run enumerations and check if we need to perform discovery * on any of the registered mports. */ mutex_lock(&rio_mport_list_lock); list_for_each_entry(port, &rio_mports, node) { if (port->host_deviceid >= 0) { if (port->nscan && try_module_get(port->nscan->owner)) { port->nscan->enumerate(port, 0); module_put(port->nscan->owner); } } else n++; } mutex_unlock(&rio_mport_list_lock); if (!n) goto no_disc; /* * If we have mports that require discovery schedule a discovery work * for each of them. If the code below fails to allocate needed * resources, exit without error to keep results of enumeration * process (if any). * TODO: Implement restart of discovery process for all or * individual discovering mports. */ rio_wq = alloc_workqueue("riodisc", 0, 0); if (!rio_wq) { pr_err("RIO: unable allocate rio_wq\n"); goto no_disc; } work = kcalloc(n, sizeof *work, GFP_KERNEL); if (!work) { destroy_workqueue(rio_wq); goto no_disc; } n = 0; mutex_lock(&rio_mport_list_lock); list_for_each_entry(port, &rio_mports, node) { if (port->host_deviceid < 0 && port->nscan) { work[n].mport = port; INIT_WORK(&work[n].work, disc_work_handler); queue_work(rio_wq, &work[n].work); n++; } } flush_workqueue(rio_wq); mutex_unlock(&rio_mport_list_lock); pr_debug("RIO: destroy discovery workqueue\n"); destroy_workqueue(rio_wq); kfree(work); no_disc: return 0; } EXPORT_SYMBOL_GPL(rio_init_mports); static int rio_get_hdid(int index) { if (ids_num == 0 || ids_num <= index || index >= RIO_MAX_MPORTS) return -1; return hdid[index]; } int rio_mport_initialize(struct rio_mport *mport) { if (next_portid >= RIO_MAX_MPORTS) { pr_err("RIO: reached specified max number of mports\n"); return -ENODEV; } atomic_set(&mport->state, RIO_DEVICE_INITIALIZING); mport->id = next_portid++; mport->host_deviceid = rio_get_hdid(mport->id); mport->nscan = NULL; mutex_init(&mport->lock); mport->pwe_refcnt = 0; INIT_LIST_HEAD(&mport->pwrites); return 0; } EXPORT_SYMBOL_GPL(rio_mport_initialize); int rio_register_mport(struct rio_mport *port) { struct rio_scan_node *scan = NULL; int res = 0; mutex_lock(&rio_mport_list_lock); /* * Check if there are any registered enumeration/discovery operations * that have to be attached to the added mport. */ list_for_each_entry(scan, &rio_scans, node) { if (port->id == scan->mport_id || scan->mport_id == RIO_MPORT_ANY) { port->nscan = scan->ops; if (port->id == scan->mport_id) break; } } list_add_tail(&port->node, &rio_mports); mutex_unlock(&rio_mport_list_lock); dev_set_name(&port->dev, "rapidio%d", port->id); port->dev.class = &rio_mport_class; atomic_set(&port->state, RIO_DEVICE_RUNNING); res = device_register(&port->dev); if (res) { dev_err(&port->dev, "RIO: mport%d registration failed ERR=%d\n", port->id, res); mutex_lock(&rio_mport_list_lock); list_del(&port->node); mutex_unlock(&rio_mport_list_lock); put_device(&port->dev); } else { dev_dbg(&port->dev, "RIO: registered mport%d\n", port->id); } return res; } EXPORT_SYMBOL_GPL(rio_register_mport); static int rio_mport_cleanup_callback(struct device *dev, void *data) { struct rio_dev *rdev = to_rio_dev(dev); if (dev->bus == &rio_bus_type) rio_del_device(rdev, RIO_DEVICE_SHUTDOWN); return 0; } static int rio_net_remove_children(struct rio_net *net) { /* * Unregister all RapidIO devices residing on this net (this will * invoke notification of registered subsystem interfaces as well). */ device_for_each_child(&net->dev, NULL, rio_mport_cleanup_callback); return 0; } int rio_unregister_mport(struct rio_mport *port) { pr_debug("RIO: %s %s id=%d\n", __func__, port->name, port->id); /* Transition mport to the SHUTDOWN state */ if (atomic_cmpxchg(&port->state, RIO_DEVICE_RUNNING, RIO_DEVICE_SHUTDOWN) != RIO_DEVICE_RUNNING) { pr_err("RIO: %s unexpected state transition for mport %s\n", __func__, port->name); } if (port->net && port->net->hport == port) { rio_net_remove_children(port->net); rio_free_net(port->net); } /* * Unregister all RapidIO devices attached to this mport (this will * invoke notification of registered subsystem interfaces as well). */ mutex_lock(&rio_mport_list_lock); list_del(&port->node); mutex_unlock(&rio_mport_list_lock); device_unregister(&port->dev); return 0; } EXPORT_SYMBOL_GPL(rio_unregister_mport);
linux-master
drivers/rapidio/rio.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * rio_cm - RapidIO Channelized Messaging Driver * * Copyright 2013-2016 Integrated Device Technology, Inc. * Copyright (c) 2015, Prodrive Technologies * Copyright (c) 2015, RapidIO Trade Association */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/rio.h> #include <linux/rio_drv.h> #include <linux/slab.h> #include <linux/idr.h> #include <linux/interrupt.h> #include <linux/cdev.h> #include <linux/fs.h> #include <linux/poll.h> #include <linux/reboot.h> #include <linux/bitops.h> #include <linux/printk.h> #include <linux/rio_cm_cdev.h> #define DRV_NAME "rio_cm" #define DRV_VERSION "1.0.0" #define DRV_AUTHOR "Alexandre Bounine <[email protected]>" #define DRV_DESC "RapidIO Channelized Messaging Driver" #define DEV_NAME "rio_cm" /* Debug output filtering masks */ enum { DBG_NONE = 0, DBG_INIT = BIT(0), /* driver init */ DBG_EXIT = BIT(1), /* driver exit */ DBG_MPORT = BIT(2), /* mport add/remove */ DBG_RDEV = BIT(3), /* RapidIO device add/remove */ DBG_CHOP = BIT(4), /* channel operations */ DBG_WAIT = BIT(5), /* waiting for events */ DBG_TX = BIT(6), /* message TX */ DBG_TX_EVENT = BIT(7), /* message TX event */ DBG_RX_DATA = BIT(8), /* inbound data messages */ DBG_RX_CMD = BIT(9), /* inbound REQ/ACK/NACK messages */ DBG_ALL = ~0, }; #ifdef DEBUG #define riocm_debug(level, fmt, arg...) \ do { \ if (DBG_##level & dbg_level) \ pr_debug(DRV_NAME ": %s " fmt "\n", \ __func__, ##arg); \ } while (0) #else #define riocm_debug(level, fmt, arg...) \ no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) #endif #define riocm_warn(fmt, arg...) \ pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) #define riocm_error(fmt, arg...) \ pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) static int cmbox = 1; module_param(cmbox, int, S_IRUGO); MODULE_PARM_DESC(cmbox, "RapidIO Mailbox number (default 1)"); static int chstart = 256; module_param(chstart, int, S_IRUGO); MODULE_PARM_DESC(chstart, "Start channel number for dynamic allocation (default 256)"); #ifdef DEBUG static u32 dbg_level = DBG_NONE; module_param(dbg_level, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); #endif MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION(DRV_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); #define RIOCM_TX_RING_SIZE 128 #define RIOCM_RX_RING_SIZE 128 #define RIOCM_CONNECT_TO 3 /* connect response TO (in sec) */ #define RIOCM_MAX_CHNUM 0xffff /* Use full range of u16 field */ #define RIOCM_CHNUM_AUTO 0 #define RIOCM_MAX_EP_COUNT 0x10000 /* Max number of endpoints */ enum rio_cm_state { RIO_CM_IDLE, RIO_CM_CONNECT, RIO_CM_CONNECTED, RIO_CM_DISCONNECT, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN, RIO_CM_DESTROYING, }; enum rio_cm_pkt_type { RIO_CM_SYS = 0xaa, RIO_CM_CHAN = 0x55, }; enum rio_cm_chop { CM_CONN_REQ, CM_CONN_ACK, CM_CONN_CLOSE, CM_DATA_MSG, }; struct rio_ch_base_bhdr { u32 src_id; u32 dst_id; #define RIO_HDR_LETTER_MASK 0xffff0000 #define RIO_HDR_MBOX_MASK 0x0000ffff u8 src_mbox; u8 dst_mbox; u8 type; } __attribute__((__packed__)); struct rio_ch_chan_hdr { struct rio_ch_base_bhdr bhdr; u8 ch_op; u16 dst_ch; u16 src_ch; u16 msg_len; u16 rsrvd; } __attribute__((__packed__)); struct tx_req { struct list_head node; struct rio_dev *rdev; void *buffer; size_t len; }; struct cm_dev { struct list_head list; struct rio_mport *mport; void *rx_buf[RIOCM_RX_RING_SIZE]; int rx_slots; struct mutex rx_lock; void *tx_buf[RIOCM_TX_RING_SIZE]; int tx_slot; int tx_cnt; int tx_ack_slot; struct list_head tx_reqs; spinlock_t tx_lock; struct list_head peers; u32 npeers; struct workqueue_struct *rx_wq; struct work_struct rx_work; }; struct chan_rx_ring { void *buf[RIOCM_RX_RING_SIZE]; int head; int tail; int count; /* Tracking RX buffers reported to upper level */ void *inuse[RIOCM_RX_RING_SIZE]; int inuse_cnt; }; struct rio_channel { u16 id; /* local channel ID */ struct kref ref; /* channel refcount */ struct file *filp; struct cm_dev *cmdev; /* associated CM device object */ struct rio_dev *rdev; /* remote RapidIO device */ enum rio_cm_state state; int error; spinlock_t lock; void *context; u32 loc_destid; /* local destID */ u32 rem_destid; /* remote destID */ u16 rem_channel; /* remote channel ID */ struct list_head accept_queue; struct list_head ch_node; struct completion comp; struct completion comp_close; struct chan_rx_ring rx_ring; }; struct cm_peer { struct list_head node; struct rio_dev *rdev; }; struct rio_cm_work { struct work_struct work; struct cm_dev *cm; void *data; }; struct conn_req { struct list_head node; u32 destid; /* requester destID */ u16 chan; /* requester channel ID */ struct cm_dev *cmdev; }; /* * A channel_dev structure represents a CM_CDEV * @cdev Character device * @dev Associated device object */ struct channel_dev { struct cdev cdev; struct device *dev; }; static struct rio_channel *riocm_ch_alloc(u16 ch_num); static void riocm_ch_free(struct kref *ref); static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev, void *buffer, size_t len); static int riocm_ch_close(struct rio_channel *ch); static DEFINE_SPINLOCK(idr_lock); static DEFINE_IDR(ch_idr); static LIST_HEAD(cm_dev_list); static DECLARE_RWSEM(rdev_sem); static struct class *dev_class; static unsigned int dev_major; static unsigned int dev_minor_base; static dev_t dev_number; static struct channel_dev riocm_cdev; #define is_msg_capable(src_ops, dst_ops) \ ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ (dst_ops & RIO_DST_OPS_DATA_MSG)) #define dev_cm_capable(dev) \ is_msg_capable(dev->src_ops, dev->dst_ops) static int riocm_cmp(struct rio_channel *ch, enum rio_cm_state cmp) { int ret; spin_lock_bh(&ch->lock); ret = (ch->state == cmp); spin_unlock_bh(&ch->lock); return ret; } static int riocm_cmp_exch(struct rio_channel *ch, enum rio_cm_state cmp, enum rio_cm_state exch) { int ret; spin_lock_bh(&ch->lock); ret = (ch->state == cmp); if (ret) ch->state = exch; spin_unlock_bh(&ch->lock); return ret; } static enum rio_cm_state riocm_exch(struct rio_channel *ch, enum rio_cm_state exch) { enum rio_cm_state old; spin_lock_bh(&ch->lock); old = ch->state; ch->state = exch; spin_unlock_bh(&ch->lock); return old; } static struct rio_channel *riocm_get_channel(u16 nr) { struct rio_channel *ch; spin_lock_bh(&idr_lock); ch = idr_find(&ch_idr, nr); if (ch) kref_get(&ch->ref); spin_unlock_bh(&idr_lock); return ch; } static void riocm_put_channel(struct rio_channel *ch) { kref_put(&ch->ref, riocm_ch_free); } static void *riocm_rx_get_msg(struct cm_dev *cm) { void *msg; int i; msg = rio_get_inb_message(cm->mport, cmbox); if (msg) { for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { if (cm->rx_buf[i] == msg) { cm->rx_buf[i] = NULL; cm->rx_slots++; break; } } if (i == RIOCM_RX_RING_SIZE) riocm_warn("no record for buffer 0x%p", msg); } return msg; } /* * riocm_rx_fill - fills a ring of receive buffers for given cm device * @cm: cm_dev object * @nent: max number of entries to fill * * Returns: none */ static void riocm_rx_fill(struct cm_dev *cm, int nent) { int i; if (cm->rx_slots == 0) return; for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) { if (cm->rx_buf[i] == NULL) { cm->rx_buf[i] = kmalloc(RIO_MAX_MSG_SIZE, GFP_KERNEL); if (cm->rx_buf[i] == NULL) break; rio_add_inb_buffer(cm->mport, cmbox, cm->rx_buf[i]); cm->rx_slots--; nent--; } } } /* * riocm_rx_free - frees all receive buffers associated with given cm device * @cm: cm_dev object * * Returns: none */ static void riocm_rx_free(struct cm_dev *cm) { int i; for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { if (cm->rx_buf[i] != NULL) { kfree(cm->rx_buf[i]); cm->rx_buf[i] = NULL; } } } /* * riocm_req_handler - connection request handler * @cm: cm_dev object * @req_data: pointer to the request packet * * Returns: 0 if success, or * -EINVAL if channel is not in correct state, * -ENODEV if cannot find a channel with specified ID, * -ENOMEM if unable to allocate memory to store the request */ static int riocm_req_handler(struct cm_dev *cm, void *req_data) { struct rio_channel *ch; struct conn_req *req; struct rio_ch_chan_hdr *hh = req_data; u16 chnum; chnum = ntohs(hh->dst_ch); ch = riocm_get_channel(chnum); if (!ch) return -ENODEV; if (ch->state != RIO_CM_LISTEN) { riocm_debug(RX_CMD, "channel %d is not in listen state", chnum); riocm_put_channel(ch); return -EINVAL; } req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) { riocm_put_channel(ch); return -ENOMEM; } req->destid = ntohl(hh->bhdr.src_id); req->chan = ntohs(hh->src_ch); req->cmdev = cm; spin_lock_bh(&ch->lock); list_add_tail(&req->node, &ch->accept_queue); spin_unlock_bh(&ch->lock); complete(&ch->comp); riocm_put_channel(ch); return 0; } /* * riocm_resp_handler - response to connection request handler * @resp_data: pointer to the response packet * * Returns: 0 if success, or * -EINVAL if channel is not in correct state, * -ENODEV if cannot find a channel with specified ID, */ static int riocm_resp_handler(void *resp_data) { struct rio_channel *ch; struct rio_ch_chan_hdr *hh = resp_data; u16 chnum; chnum = ntohs(hh->dst_ch); ch = riocm_get_channel(chnum); if (!ch) return -ENODEV; if (ch->state != RIO_CM_CONNECT) { riocm_put_channel(ch); return -EINVAL; } riocm_exch(ch, RIO_CM_CONNECTED); ch->rem_channel = ntohs(hh->src_ch); complete(&ch->comp); riocm_put_channel(ch); return 0; } /* * riocm_close_handler - channel close request handler * @req_data: pointer to the request packet * * Returns: 0 if success, or * -ENODEV if cannot find a channel with specified ID, * + error codes returned by riocm_ch_close. */ static int riocm_close_handler(void *data) { struct rio_channel *ch; struct rio_ch_chan_hdr *hh = data; int ret; riocm_debug(RX_CMD, "for ch=%d", ntohs(hh->dst_ch)); spin_lock_bh(&idr_lock); ch = idr_find(&ch_idr, ntohs(hh->dst_ch)); if (!ch) { spin_unlock_bh(&idr_lock); return -ENODEV; } idr_remove(&ch_idr, ch->id); spin_unlock_bh(&idr_lock); riocm_exch(ch, RIO_CM_DISCONNECT); ret = riocm_ch_close(ch); if (ret) riocm_debug(RX_CMD, "riocm_ch_close() returned %d", ret); return 0; } /* * rio_cm_handler - function that services request (non-data) packets * @cm: cm_dev object * @data: pointer to the packet */ static void rio_cm_handler(struct cm_dev *cm, void *data) { struct rio_ch_chan_hdr *hdr; if (!rio_mport_is_running(cm->mport)) goto out; hdr = data; riocm_debug(RX_CMD, "OP=%x for ch=%d from %d", hdr->ch_op, ntohs(hdr->dst_ch), ntohs(hdr->src_ch)); switch (hdr->ch_op) { case CM_CONN_REQ: riocm_req_handler(cm, data); break; case CM_CONN_ACK: riocm_resp_handler(data); break; case CM_CONN_CLOSE: riocm_close_handler(data); break; default: riocm_error("Invalid packet header"); break; } out: kfree(data); } /* * rio_rx_data_handler - received data packet handler * @cm: cm_dev object * @buf: data packet * * Returns: 0 if success, or * -ENODEV if cannot find a channel with specified ID, * -EIO if channel is not in CONNECTED state, * -ENOMEM if channel RX queue is full (packet discarded) */ static int rio_rx_data_handler(struct cm_dev *cm, void *buf) { struct rio_ch_chan_hdr *hdr; struct rio_channel *ch; hdr = buf; riocm_debug(RX_DATA, "for ch=%d", ntohs(hdr->dst_ch)); ch = riocm_get_channel(ntohs(hdr->dst_ch)); if (!ch) { /* Discard data message for non-existing channel */ kfree(buf); return -ENODEV; } /* Place pointer to the buffer into channel's RX queue */ spin_lock(&ch->lock); if (ch->state != RIO_CM_CONNECTED) { /* Channel is not ready to receive data, discard a packet */ riocm_debug(RX_DATA, "ch=%d is in wrong state=%d", ch->id, ch->state); spin_unlock(&ch->lock); kfree(buf); riocm_put_channel(ch); return -EIO; } if (ch->rx_ring.count == RIOCM_RX_RING_SIZE) { /* If RX ring is full, discard a packet */ riocm_debug(RX_DATA, "ch=%d is full", ch->id); spin_unlock(&ch->lock); kfree(buf); riocm_put_channel(ch); return -ENOMEM; } ch->rx_ring.buf[ch->rx_ring.head] = buf; ch->rx_ring.head++; ch->rx_ring.count++; ch->rx_ring.head %= RIOCM_RX_RING_SIZE; complete(&ch->comp); spin_unlock(&ch->lock); riocm_put_channel(ch); return 0; } /* * rio_ibmsg_handler - inbound message packet handler */ static void rio_ibmsg_handler(struct work_struct *work) { struct cm_dev *cm = container_of(work, struct cm_dev, rx_work); void *data; struct rio_ch_chan_hdr *hdr; if (!rio_mport_is_running(cm->mport)) return; while (1) { mutex_lock(&cm->rx_lock); data = riocm_rx_get_msg(cm); if (data) riocm_rx_fill(cm, 1); mutex_unlock(&cm->rx_lock); if (data == NULL) break; hdr = data; if (hdr->bhdr.type != RIO_CM_CHAN) { /* For now simply discard packets other than channel */ riocm_error("Unsupported TYPE code (0x%x). Msg dropped", hdr->bhdr.type); kfree(data); continue; } /* Process a channel message */ if (hdr->ch_op == CM_DATA_MSG) rio_rx_data_handler(cm, data); else rio_cm_handler(cm, data); } } static void riocm_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot) { struct cm_dev *cm = dev_id; if (rio_mport_is_running(cm->mport) && !work_pending(&cm->rx_work)) queue_work(cm->rx_wq, &cm->rx_work); } /* * rio_txcq_handler - TX completion handler * @cm: cm_dev object * @slot: TX queue slot * * TX completion handler also ensures that pending request packets are placed * into transmit queue as soon as a free slot becomes available. This is done * to give higher priority to request packets during high intensity data flow. */ static void rio_txcq_handler(struct cm_dev *cm, int slot) { int ack_slot; /* ATTN: Add TX completion notification if/when direct buffer * transfer is implemented. At this moment only correct tracking * of tx_count is important. */ riocm_debug(TX_EVENT, "for mport_%d slot %d tx_cnt %d", cm->mport->id, slot, cm->tx_cnt); spin_lock(&cm->tx_lock); ack_slot = cm->tx_ack_slot; if (ack_slot == slot) riocm_debug(TX_EVENT, "slot == ack_slot"); while (cm->tx_cnt && ((ack_slot != slot) || (cm->tx_cnt == RIOCM_TX_RING_SIZE))) { cm->tx_buf[ack_slot] = NULL; ++ack_slot; ack_slot &= (RIOCM_TX_RING_SIZE - 1); cm->tx_cnt--; } if (cm->tx_cnt < 0 || cm->tx_cnt > RIOCM_TX_RING_SIZE) riocm_error("tx_cnt %d out of sync", cm->tx_cnt); WARN_ON((cm->tx_cnt < 0) || (cm->tx_cnt > RIOCM_TX_RING_SIZE)); cm->tx_ack_slot = ack_slot; /* * If there are pending requests, insert them into transmit queue */ if (!list_empty(&cm->tx_reqs) && (cm->tx_cnt < RIOCM_TX_RING_SIZE)) { struct tx_req *req, *_req; int rc; list_for_each_entry_safe(req, _req, &cm->tx_reqs, node) { list_del(&req->node); cm->tx_buf[cm->tx_slot] = req->buffer; rc = rio_add_outb_message(cm->mport, req->rdev, cmbox, req->buffer, req->len); kfree(req->buffer); kfree(req); ++cm->tx_cnt; ++cm->tx_slot; cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1); if (cm->tx_cnt == RIOCM_TX_RING_SIZE) break; } } spin_unlock(&cm->tx_lock); } static void riocm_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot) { struct cm_dev *cm = dev_id; if (cm && rio_mport_is_running(cm->mport)) rio_txcq_handler(cm, slot); } static int riocm_queue_req(struct cm_dev *cm, struct rio_dev *rdev, void *buffer, size_t len) { unsigned long flags; struct tx_req *treq; treq = kzalloc(sizeof(*treq), GFP_KERNEL); if (treq == NULL) return -ENOMEM; treq->rdev = rdev; treq->buffer = buffer; treq->len = len; spin_lock_irqsave(&cm->tx_lock, flags); list_add_tail(&treq->node, &cm->tx_reqs); spin_unlock_irqrestore(&cm->tx_lock, flags); return 0; } /* * riocm_post_send - helper function that places packet into msg TX queue * @cm: cm_dev object * @rdev: target RapidIO device object (required by outbound msg interface) * @buffer: pointer to a packet buffer to send * @len: length of data to transfer * @req: request priority flag * * Returns: 0 if success, or error code otherwise. */ static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev, void *buffer, size_t len) { int rc; unsigned long flags; spin_lock_irqsave(&cm->tx_lock, flags); if (cm->mport == NULL) { rc = -ENODEV; goto err_out; } if (cm->tx_cnt == RIOCM_TX_RING_SIZE) { riocm_debug(TX, "Tx Queue is full"); rc = -EBUSY; goto err_out; } cm->tx_buf[cm->tx_slot] = buffer; rc = rio_add_outb_message(cm->mport, rdev, cmbox, buffer, len); riocm_debug(TX, "Add buf@%p destid=%x tx_slot=%d tx_cnt=%d", buffer, rdev->destid, cm->tx_slot, cm->tx_cnt); ++cm->tx_cnt; ++cm->tx_slot; cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1); err_out: spin_unlock_irqrestore(&cm->tx_lock, flags); return rc; } /* * riocm_ch_send - sends a data packet to a remote device * @ch_id: local channel ID * @buf: pointer to a data buffer to send (including CM header) * @len: length of data to transfer (including CM header) * * ATTN: ASSUMES THAT THE HEADER SPACE IS RESERVED PART OF THE DATA PACKET * * Returns: 0 if success, or * -EINVAL if one or more input parameters is/are not valid, * -ENODEV if cannot find a channel with specified ID, * -EAGAIN if a channel is not in CONNECTED state, * + error codes returned by HW send routine. */ static int riocm_ch_send(u16 ch_id, void *buf, int len) { struct rio_channel *ch; struct rio_ch_chan_hdr *hdr; int ret; if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE) return -EINVAL; ch = riocm_get_channel(ch_id); if (!ch) { riocm_error("%s(%d) ch_%d not found", current->comm, task_pid_nr(current), ch_id); return -ENODEV; } if (!riocm_cmp(ch, RIO_CM_CONNECTED)) { ret = -EAGAIN; goto err_out; } /* * Fill buffer header section with corresponding channel data */ hdr = buf; hdr->bhdr.src_id = htonl(ch->loc_destid); hdr->bhdr.dst_id = htonl(ch->rem_destid); hdr->bhdr.src_mbox = cmbox; hdr->bhdr.dst_mbox = cmbox; hdr->bhdr.type = RIO_CM_CHAN; hdr->ch_op = CM_DATA_MSG; hdr->dst_ch = htons(ch->rem_channel); hdr->src_ch = htons(ch->id); hdr->msg_len = htons((u16)len); /* ATTN: the function call below relies on the fact that underlying * HW-specific add_outb_message() routine copies TX data into its own * internal transfer buffer (true for all RIONET compatible mport * drivers). Must be reviewed if mport driver uses the buffer directly. */ ret = riocm_post_send(ch->cmdev, ch->rdev, buf, len); if (ret) riocm_debug(TX, "ch %d send_err=%d", ch->id, ret); err_out: riocm_put_channel(ch); return ret; } static int riocm_ch_free_rxbuf(struct rio_channel *ch, void *buf) { int i, ret = -EINVAL; spin_lock_bh(&ch->lock); for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { if (ch->rx_ring.inuse[i] == buf) { ch->rx_ring.inuse[i] = NULL; ch->rx_ring.inuse_cnt--; ret = 0; break; } } spin_unlock_bh(&ch->lock); if (!ret) kfree(buf); return ret; } /* * riocm_ch_receive - fetch a data packet received for the specified channel * @ch: local channel ID * @buf: pointer to a packet buffer * @timeout: timeout to wait for incoming packet (in jiffies) * * Returns: 0 and valid buffer pointer if success, or NULL pointer and one of: * -EAGAIN if a channel is not in CONNECTED state, * -ENOMEM if in-use tracking queue is full, * -ETIME if wait timeout expired, * -EINTR if wait was interrupted. */ static int riocm_ch_receive(struct rio_channel *ch, void **buf, long timeout) { void *rxmsg = NULL; int i, ret = 0; long wret; if (!riocm_cmp(ch, RIO_CM_CONNECTED)) { ret = -EAGAIN; goto out; } if (ch->rx_ring.inuse_cnt == RIOCM_RX_RING_SIZE) { /* If we do not have entries to track buffers given to upper * layer, reject request. */ ret = -ENOMEM; goto out; } wret = wait_for_completion_interruptible_timeout(&ch->comp, timeout); riocm_debug(WAIT, "wait on %d returned %ld", ch->id, wret); if (!wret) ret = -ETIME; else if (wret == -ERESTARTSYS) ret = -EINTR; else ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -ECONNRESET; if (ret) goto out; spin_lock_bh(&ch->lock); rxmsg = ch->rx_ring.buf[ch->rx_ring.tail]; ch->rx_ring.buf[ch->rx_ring.tail] = NULL; ch->rx_ring.count--; ch->rx_ring.tail++; ch->rx_ring.tail %= RIOCM_RX_RING_SIZE; ret = -ENOMEM; for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { if (ch->rx_ring.inuse[i] == NULL) { ch->rx_ring.inuse[i] = rxmsg; ch->rx_ring.inuse_cnt++; ret = 0; break; } } if (ret) { /* We have no entry to store pending message: drop it */ kfree(rxmsg); rxmsg = NULL; } spin_unlock_bh(&ch->lock); out: *buf = rxmsg; return ret; } /* * riocm_ch_connect - sends a connect request to a remote device * @loc_ch: local channel ID * @cm: CM device to send connect request * @peer: target RapidIO device * @rem_ch: remote channel ID * * Returns: 0 if success, or * -EINVAL if the channel is not in IDLE state, * -EAGAIN if no connection request available immediately, * -ETIME if ACK response timeout expired, * -EINTR if wait for response was interrupted. */ static int riocm_ch_connect(u16 loc_ch, struct cm_dev *cm, struct cm_peer *peer, u16 rem_ch) { struct rio_channel *ch = NULL; struct rio_ch_chan_hdr *hdr; int ret; long wret; ch = riocm_get_channel(loc_ch); if (!ch) return -ENODEV; if (!riocm_cmp_exch(ch, RIO_CM_IDLE, RIO_CM_CONNECT)) { ret = -EINVAL; goto conn_done; } ch->cmdev = cm; ch->rdev = peer->rdev; ch->context = NULL; ch->loc_destid = cm->mport->host_deviceid; ch->rem_channel = rem_ch; /* * Send connect request to the remote RapidIO device */ hdr = kzalloc(sizeof(*hdr), GFP_KERNEL); if (hdr == NULL) { ret = -ENOMEM; goto conn_done; } hdr->bhdr.src_id = htonl(ch->loc_destid); hdr->bhdr.dst_id = htonl(peer->rdev->destid); hdr->bhdr.src_mbox = cmbox; hdr->bhdr.dst_mbox = cmbox; hdr->bhdr.type = RIO_CM_CHAN; hdr->ch_op = CM_CONN_REQ; hdr->dst_ch = htons(rem_ch); hdr->src_ch = htons(loc_ch); /* ATTN: the function call below relies on the fact that underlying * HW-specific add_outb_message() routine copies TX data into its * internal transfer buffer. Must be reviewed if mport driver uses * this buffer directly. */ ret = riocm_post_send(cm, peer->rdev, hdr, sizeof(*hdr)); if (ret != -EBUSY) { kfree(hdr); } else { ret = riocm_queue_req(cm, peer->rdev, hdr, sizeof(*hdr)); if (ret) kfree(hdr); } if (ret) { riocm_cmp_exch(ch, RIO_CM_CONNECT, RIO_CM_IDLE); goto conn_done; } /* Wait for connect response from the remote device */ wret = wait_for_completion_interruptible_timeout(&ch->comp, RIOCM_CONNECT_TO * HZ); riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret); if (!wret) ret = -ETIME; else if (wret == -ERESTARTSYS) ret = -EINTR; else ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -1; conn_done: riocm_put_channel(ch); return ret; } static int riocm_send_ack(struct rio_channel *ch) { struct rio_ch_chan_hdr *hdr; int ret; hdr = kzalloc(sizeof(*hdr), GFP_KERNEL); if (hdr == NULL) return -ENOMEM; hdr->bhdr.src_id = htonl(ch->loc_destid); hdr->bhdr.dst_id = htonl(ch->rem_destid); hdr->dst_ch = htons(ch->rem_channel); hdr->src_ch = htons(ch->id); hdr->bhdr.src_mbox = cmbox; hdr->bhdr.dst_mbox = cmbox; hdr->bhdr.type = RIO_CM_CHAN; hdr->ch_op = CM_CONN_ACK; /* ATTN: the function call below relies on the fact that underlying * add_outb_message() routine copies TX data into its internal transfer * buffer. Review if switching to direct buffer version. */ ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr)); if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, ch->rdev, hdr, sizeof(*hdr))) return 0; kfree(hdr); if (ret) riocm_error("send ACK to ch_%d on %s failed (ret=%d)", ch->id, rio_name(ch->rdev), ret); return ret; } /* * riocm_ch_accept - accept incoming connection request * @ch_id: channel ID * @new_ch_id: local mport device * @timeout: wait timeout (if 0 non-blocking call, do not wait if connection * request is not available). * * Returns: pointer to new channel struct if success, or error-valued pointer: * -ENODEV - cannot find specified channel or mport, * -EINVAL - the channel is not in IDLE state, * -EAGAIN - no connection request available immediately (timeout=0), * -ENOMEM - unable to allocate new channel, * -ETIME - wait timeout expired, * -EINTR - wait was interrupted. */ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, long timeout) { struct rio_channel *ch; struct rio_channel *new_ch; struct conn_req *req; struct cm_peer *peer; int found = 0; int err = 0; long wret; ch = riocm_get_channel(ch_id); if (!ch) return ERR_PTR(-EINVAL); if (!riocm_cmp(ch, RIO_CM_LISTEN)) { err = -EINVAL; goto err_put; } /* Don't sleep if this is a non blocking call */ if (!timeout) { if (!try_wait_for_completion(&ch->comp)) { err = -EAGAIN; goto err_put; } } else { riocm_debug(WAIT, "on %d", ch->id); wret = wait_for_completion_interruptible_timeout(&ch->comp, timeout); if (!wret) { err = -ETIME; goto err_put; } else if (wret == -ERESTARTSYS) { err = -EINTR; goto err_put; } } spin_lock_bh(&ch->lock); if (ch->state != RIO_CM_LISTEN) { err = -ECANCELED; } else if (list_empty(&ch->accept_queue)) { riocm_debug(WAIT, "on %d accept_queue is empty on completion", ch->id); err = -EIO; } spin_unlock_bh(&ch->lock); if (err) { riocm_debug(WAIT, "on %d returns %d", ch->id, err); goto err_put; } /* Create new channel for this connection */ new_ch = riocm_ch_alloc(RIOCM_CHNUM_AUTO); if (IS_ERR(new_ch)) { riocm_error("failed to get channel for new req (%ld)", PTR_ERR(new_ch)); err = -ENOMEM; goto err_put; } spin_lock_bh(&ch->lock); req = list_first_entry(&ch->accept_queue, struct conn_req, node); list_del(&req->node); new_ch->cmdev = ch->cmdev; new_ch->loc_destid = ch->loc_destid; new_ch->rem_destid = req->destid; new_ch->rem_channel = req->chan; spin_unlock_bh(&ch->lock); riocm_put_channel(ch); ch = NULL; kfree(req); down_read(&rdev_sem); /* Find requester's device object */ list_for_each_entry(peer, &new_ch->cmdev->peers, node) { if (peer->rdev->destid == new_ch->rem_destid) { riocm_debug(RX_CMD, "found matching device(%s)", rio_name(peer->rdev)); found = 1; break; } } up_read(&rdev_sem); if (!found) { /* If peer device object not found, simply ignore the request */ err = -ENODEV; goto err_put_new_ch; } new_ch->rdev = peer->rdev; new_ch->state = RIO_CM_CONNECTED; spin_lock_init(&new_ch->lock); /* Acknowledge the connection request. */ riocm_send_ack(new_ch); *new_ch_id = new_ch->id; return new_ch; err_put_new_ch: spin_lock_bh(&idr_lock); idr_remove(&ch_idr, new_ch->id); spin_unlock_bh(&idr_lock); riocm_put_channel(new_ch); err_put: if (ch) riocm_put_channel(ch); *new_ch_id = 0; return ERR_PTR(err); } /* * riocm_ch_listen - puts a channel into LISTEN state * @ch_id: channel ID * * Returns: 0 if success, or * -EINVAL if the specified channel does not exists or * is not in CHAN_BOUND state. */ static int riocm_ch_listen(u16 ch_id) { struct rio_channel *ch = NULL; int ret = 0; riocm_debug(CHOP, "(ch_%d)", ch_id); ch = riocm_get_channel(ch_id); if (!ch) return -EINVAL; if (!riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN)) ret = -EINVAL; riocm_put_channel(ch); return ret; } /* * riocm_ch_bind - associate a channel object and an mport device * @ch_id: channel ID * @mport_id: local mport device ID * @context: pointer to the additional caller's context * * Returns: 0 if success, or * -ENODEV if cannot find specified mport, * -EINVAL if the specified channel does not exist or * is not in IDLE state. */ static int riocm_ch_bind(u16 ch_id, u8 mport_id, void *context) { struct rio_channel *ch = NULL; struct cm_dev *cm; int rc = -ENODEV; riocm_debug(CHOP, "ch_%d to mport_%d", ch_id, mport_id); /* Find matching cm_dev object */ down_read(&rdev_sem); list_for_each_entry(cm, &cm_dev_list, list) { if ((cm->mport->id == mport_id) && rio_mport_is_running(cm->mport)) { rc = 0; break; } } if (rc) goto exit; ch = riocm_get_channel(ch_id); if (!ch) { rc = -EINVAL; goto exit; } spin_lock_bh(&ch->lock); if (ch->state != RIO_CM_IDLE) { spin_unlock_bh(&ch->lock); rc = -EINVAL; goto err_put; } ch->cmdev = cm; ch->loc_destid = cm->mport->host_deviceid; ch->context = context; ch->state = RIO_CM_CHAN_BOUND; spin_unlock_bh(&ch->lock); err_put: riocm_put_channel(ch); exit: up_read(&rdev_sem); return rc; } /* * riocm_ch_alloc - channel object allocation helper routine * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic) * * Return value: pointer to newly created channel object, * or error-valued pointer */ static struct rio_channel *riocm_ch_alloc(u16 ch_num) { int id; int start, end; struct rio_channel *ch; ch = kzalloc(sizeof(*ch), GFP_KERNEL); if (!ch) return ERR_PTR(-ENOMEM); if (ch_num) { /* If requested, try to obtain the specified channel ID */ start = ch_num; end = ch_num + 1; } else { /* Obtain channel ID from the dynamic allocation range */ start = chstart; end = RIOCM_MAX_CHNUM + 1; } idr_preload(GFP_KERNEL); spin_lock_bh(&idr_lock); id = idr_alloc_cyclic(&ch_idr, ch, start, end, GFP_NOWAIT); spin_unlock_bh(&idr_lock); idr_preload_end(); if (id < 0) { kfree(ch); return ERR_PTR(id == -ENOSPC ? -EBUSY : id); } ch->id = (u16)id; ch->state = RIO_CM_IDLE; spin_lock_init(&ch->lock); INIT_LIST_HEAD(&ch->accept_queue); INIT_LIST_HEAD(&ch->ch_node); init_completion(&ch->comp); init_completion(&ch->comp_close); kref_init(&ch->ref); ch->rx_ring.head = 0; ch->rx_ring.tail = 0; ch->rx_ring.count = 0; ch->rx_ring.inuse_cnt = 0; return ch; } /* * riocm_ch_create - creates a new channel object and allocates ID for it * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic) * * Allocates and initializes a new channel object. If the parameter ch_num > 0 * and is within the valid range, riocm_ch_create tries to allocate the * specified ID for the new channel. If ch_num = 0, channel ID will be assigned * automatically from the range (chstart ... RIOCM_MAX_CHNUM). * Module parameter 'chstart' defines start of an ID range available for dynamic * allocation. Range below 'chstart' is reserved for pre-defined ID numbers. * Available channel numbers are limited by 16-bit size of channel numbers used * in the packet header. * * Return value: PTR to rio_channel structure if successful (with channel number * updated via pointer) or error-valued pointer if error. */ static struct rio_channel *riocm_ch_create(u16 *ch_num) { struct rio_channel *ch = NULL; ch = riocm_ch_alloc(*ch_num); if (IS_ERR(ch)) riocm_debug(CHOP, "Failed to allocate channel %d (err=%ld)", *ch_num, PTR_ERR(ch)); else *ch_num = ch->id; return ch; } /* * riocm_ch_free - channel object release routine * @ref: pointer to a channel's kref structure */ static void riocm_ch_free(struct kref *ref) { struct rio_channel *ch = container_of(ref, struct rio_channel, ref); int i; riocm_debug(CHOP, "(ch_%d)", ch->id); if (ch->rx_ring.inuse_cnt) { for (i = 0; i < RIOCM_RX_RING_SIZE && ch->rx_ring.inuse_cnt; i++) { if (ch->rx_ring.inuse[i] != NULL) { kfree(ch->rx_ring.inuse[i]); ch->rx_ring.inuse_cnt--; } } } if (ch->rx_ring.count) for (i = 0; i < RIOCM_RX_RING_SIZE && ch->rx_ring.count; i++) { if (ch->rx_ring.buf[i] != NULL) { kfree(ch->rx_ring.buf[i]); ch->rx_ring.count--; } } complete(&ch->comp_close); } static int riocm_send_close(struct rio_channel *ch) { struct rio_ch_chan_hdr *hdr; int ret; /* * Send CH_CLOSE notification to the remote RapidIO device */ hdr = kzalloc(sizeof(*hdr), GFP_KERNEL); if (hdr == NULL) return -ENOMEM; hdr->bhdr.src_id = htonl(ch->loc_destid); hdr->bhdr.dst_id = htonl(ch->rem_destid); hdr->bhdr.src_mbox = cmbox; hdr->bhdr.dst_mbox = cmbox; hdr->bhdr.type = RIO_CM_CHAN; hdr->ch_op = CM_CONN_CLOSE; hdr->dst_ch = htons(ch->rem_channel); hdr->src_ch = htons(ch->id); /* ATTN: the function call below relies on the fact that underlying * add_outb_message() routine copies TX data into its internal transfer * buffer. Needs to be reviewed if switched to direct buffer mode. */ ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr)); if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, ch->rdev, hdr, sizeof(*hdr))) return 0; kfree(hdr); if (ret) riocm_error("ch(%d) send CLOSE failed (ret=%d)", ch->id, ret); return ret; } /* * riocm_ch_close - closes a channel object with specified ID (by local request) * @ch: channel to be closed */ static int riocm_ch_close(struct rio_channel *ch) { unsigned long tmo = msecs_to_jiffies(3000); enum rio_cm_state state; long wret; int ret = 0; riocm_debug(CHOP, "ch_%d by %s(%d)", ch->id, current->comm, task_pid_nr(current)); state = riocm_exch(ch, RIO_CM_DESTROYING); if (state == RIO_CM_CONNECTED) riocm_send_close(ch); complete_all(&ch->comp); riocm_put_channel(ch); wret = wait_for_completion_interruptible_timeout(&ch->comp_close, tmo); riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret); if (wret == 0) { /* Timeout on wait occurred */ riocm_debug(CHOP, "%s(%d) timed out waiting for ch %d", current->comm, task_pid_nr(current), ch->id); ret = -ETIMEDOUT; } else if (wret == -ERESTARTSYS) { /* Wait_for_completion was interrupted by a signal */ riocm_debug(CHOP, "%s(%d) wait for ch %d was interrupted", current->comm, task_pid_nr(current), ch->id); ret = -EINTR; } if (!ret) { riocm_debug(CHOP, "ch_%d resources released", ch->id); kfree(ch); } else { riocm_debug(CHOP, "failed to release ch_%d resources", ch->id); } return ret; } /* * riocm_cdev_open() - Open character device */ static int riocm_cdev_open(struct inode *inode, struct file *filp) { riocm_debug(INIT, "by %s(%d) filp=%p ", current->comm, task_pid_nr(current), filp); if (list_empty(&cm_dev_list)) return -ENODEV; return 0; } /* * riocm_cdev_release() - Release character device */ static int riocm_cdev_release(struct inode *inode, struct file *filp) { struct rio_channel *ch, *_c; unsigned int i; LIST_HEAD(list); riocm_debug(EXIT, "by %s(%d) filp=%p", current->comm, task_pid_nr(current), filp); /* Check if there are channels associated with this file descriptor */ spin_lock_bh(&idr_lock); idr_for_each_entry(&ch_idr, ch, i) { if (ch && ch->filp == filp) { riocm_debug(EXIT, "ch_%d not released by %s(%d)", ch->id, current->comm, task_pid_nr(current)); idr_remove(&ch_idr, ch->id); list_add(&ch->ch_node, &list); } } spin_unlock_bh(&idr_lock); if (!list_empty(&list)) { list_for_each_entry_safe(ch, _c, &list, ch_node) { list_del(&ch->ch_node); riocm_ch_close(ch); } } return 0; } /* * cm_ep_get_list_size() - Reports number of endpoints in the network */ static int cm_ep_get_list_size(void __user *arg) { u32 __user *p = arg; u32 mport_id; u32 count = 0; struct cm_dev *cm; if (get_user(mport_id, p)) return -EFAULT; if (mport_id >= RIO_MAX_MPORTS) return -EINVAL; /* Find a matching cm_dev object */ down_read(&rdev_sem); list_for_each_entry(cm, &cm_dev_list, list) { if (cm->mport->id == mport_id) { count = cm->npeers; up_read(&rdev_sem); if (copy_to_user(arg, &count, sizeof(u32))) return -EFAULT; return 0; } } up_read(&rdev_sem); return -ENODEV; } /* * cm_ep_get_list() - Returns list of attached endpoints */ static int cm_ep_get_list(void __user *arg) { struct cm_dev *cm; struct cm_peer *peer; u32 info[2]; void *buf; u32 nent; u32 *entry_ptr; u32 i = 0; int ret = 0; if (copy_from_user(&info, arg, sizeof(info))) return -EFAULT; if (info[1] >= RIO_MAX_MPORTS || info[0] > RIOCM_MAX_EP_COUNT) return -EINVAL; /* Find a matching cm_dev object */ down_read(&rdev_sem); list_for_each_entry(cm, &cm_dev_list, list) if (cm->mport->id == (u8)info[1]) goto found; up_read(&rdev_sem); return -ENODEV; found: nent = min(info[0], cm->npeers); buf = kcalloc(nent + 2, sizeof(u32), GFP_KERNEL); if (!buf) { up_read(&rdev_sem); return -ENOMEM; } entry_ptr = (u32 *)((uintptr_t)buf + 2*sizeof(u32)); list_for_each_entry(peer, &cm->peers, node) { *entry_ptr = (u32)peer->rdev->destid; entry_ptr++; if (++i == nent) break; } up_read(&rdev_sem); ((u32 *)buf)[0] = i; /* report an updated number of entries */ ((u32 *)buf)[1] = info[1]; /* put back an mport ID */ if (copy_to_user(arg, buf, sizeof(u32) * (info[0] + 2))) ret = -EFAULT; kfree(buf); return ret; } /* * cm_mport_get_list() - Returns list of available local mport devices */ static int cm_mport_get_list(void __user *arg) { int ret = 0; u32 entries; void *buf; struct cm_dev *cm; u32 *entry_ptr; int count = 0; if (copy_from_user(&entries, arg, sizeof(entries))) return -EFAULT; if (entries == 0 || entries > RIO_MAX_MPORTS) return -EINVAL; buf = kcalloc(entries + 1, sizeof(u32), GFP_KERNEL); if (!buf) return -ENOMEM; /* Scan all registered cm_dev objects */ entry_ptr = (u32 *)((uintptr_t)buf + sizeof(u32)); down_read(&rdev_sem); list_for_each_entry(cm, &cm_dev_list, list) { if (count++ < entries) { *entry_ptr = (cm->mport->id << 16) | cm->mport->host_deviceid; entry_ptr++; } } up_read(&rdev_sem); *((u32 *)buf) = count; /* report a real number of entries */ if (copy_to_user(arg, buf, sizeof(u32) * (count + 1))) ret = -EFAULT; kfree(buf); return ret; } /* * cm_chan_create() - Create a message exchange channel */ static int cm_chan_create(struct file *filp, void __user *arg) { u16 __user *p = arg; u16 ch_num; struct rio_channel *ch; if (get_user(ch_num, p)) return -EFAULT; riocm_debug(CHOP, "ch_%d requested by %s(%d)", ch_num, current->comm, task_pid_nr(current)); ch = riocm_ch_create(&ch_num); if (IS_ERR(ch)) return PTR_ERR(ch); ch->filp = filp; riocm_debug(CHOP, "ch_%d created by %s(%d)", ch_num, current->comm, task_pid_nr(current)); return put_user(ch_num, p); } /* * cm_chan_close() - Close channel * @filp: Pointer to file object * @arg: Channel to close */ static int cm_chan_close(struct file *filp, void __user *arg) { u16 __user *p = arg; u16 ch_num; struct rio_channel *ch; if (get_user(ch_num, p)) return -EFAULT; riocm_debug(CHOP, "ch_%d by %s(%d)", ch_num, current->comm, task_pid_nr(current)); spin_lock_bh(&idr_lock); ch = idr_find(&ch_idr, ch_num); if (!ch) { spin_unlock_bh(&idr_lock); return 0; } if (ch->filp != filp) { spin_unlock_bh(&idr_lock); return -EINVAL; } idr_remove(&ch_idr, ch->id); spin_unlock_bh(&idr_lock); return riocm_ch_close(ch); } /* * cm_chan_bind() - Bind channel * @arg: Channel number */ static int cm_chan_bind(void __user *arg) { struct rio_cm_channel chan; if (copy_from_user(&chan, arg, sizeof(chan))) return -EFAULT; if (chan.mport_id >= RIO_MAX_MPORTS) return -EINVAL; return riocm_ch_bind(chan.id, chan.mport_id, NULL); } /* * cm_chan_listen() - Listen on channel * @arg: Channel number */ static int cm_chan_listen(void __user *arg) { u16 __user *p = arg; u16 ch_num; if (get_user(ch_num, p)) return -EFAULT; return riocm_ch_listen(ch_num); } /* * cm_chan_accept() - Accept incoming connection * @filp: Pointer to file object * @arg: Channel number */ static int cm_chan_accept(struct file *filp, void __user *arg) { struct rio_cm_accept param; long accept_to; struct rio_channel *ch; if (copy_from_user(&param, arg, sizeof(param))) return -EFAULT; riocm_debug(CHOP, "on ch_%d by %s(%d)", param.ch_num, current->comm, task_pid_nr(current)); accept_to = param.wait_to ? msecs_to_jiffies(param.wait_to) : 0; ch = riocm_ch_accept(param.ch_num, &param.ch_num, accept_to); if (IS_ERR(ch)) return PTR_ERR(ch); ch->filp = filp; riocm_debug(CHOP, "new ch_%d for %s(%d)", ch->id, current->comm, task_pid_nr(current)); if (copy_to_user(arg, &param, sizeof(param))) return -EFAULT; return 0; } /* * cm_chan_connect() - Connect on channel * @arg: Channel information */ static int cm_chan_connect(void __user *arg) { struct rio_cm_channel chan; struct cm_dev *cm; struct cm_peer *peer; int ret = -ENODEV; if (copy_from_user(&chan, arg, sizeof(chan))) return -EFAULT; if (chan.mport_id >= RIO_MAX_MPORTS) return -EINVAL; down_read(&rdev_sem); /* Find matching cm_dev object */ list_for_each_entry(cm, &cm_dev_list, list) { if (cm->mport->id == chan.mport_id) { ret = 0; break; } } if (ret) goto err_out; if (chan.remote_destid >= RIO_ANY_DESTID(cm->mport->sys_size)) { ret = -EINVAL; goto err_out; } /* Find corresponding RapidIO endpoint device object */ ret = -ENODEV; list_for_each_entry(peer, &cm->peers, node) { if (peer->rdev->destid == chan.remote_destid) { ret = 0; break; } } if (ret) goto err_out; up_read(&rdev_sem); return riocm_ch_connect(chan.id, cm, peer, chan.remote_channel); err_out: up_read(&rdev_sem); return ret; } /* * cm_chan_msg_send() - Send a message through channel * @arg: Outbound message information */ static int cm_chan_msg_send(void __user *arg) { struct rio_cm_msg msg; void *buf; int ret; if (copy_from_user(&msg, arg, sizeof(msg))) return -EFAULT; if (msg.size > RIO_MAX_MSG_SIZE) return -EINVAL; buf = memdup_user((void __user *)(uintptr_t)msg.msg, msg.size); if (IS_ERR(buf)) return PTR_ERR(buf); ret = riocm_ch_send(msg.ch_num, buf, msg.size); kfree(buf); return ret; } /* * cm_chan_msg_rcv() - Receive a message through channel * @arg: Inbound message information */ static int cm_chan_msg_rcv(void __user *arg) { struct rio_cm_msg msg; struct rio_channel *ch; void *buf; long rxto; int ret = 0, msg_size; if (copy_from_user(&msg, arg, sizeof(msg))) return -EFAULT; if (msg.ch_num == 0 || msg.size == 0) return -EINVAL; ch = riocm_get_channel(msg.ch_num); if (!ch) return -ENODEV; rxto = msg.rxto ? msecs_to_jiffies(msg.rxto) : MAX_SCHEDULE_TIMEOUT; ret = riocm_ch_receive(ch, &buf, rxto); if (ret) goto out; msg_size = min(msg.size, (u16)(RIO_MAX_MSG_SIZE)); if (copy_to_user((void __user *)(uintptr_t)msg.msg, buf, msg_size)) ret = -EFAULT; riocm_ch_free_rxbuf(ch, buf); out: riocm_put_channel(ch); return ret; } /* * riocm_cdev_ioctl() - IOCTL requests handler */ static long riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { switch (cmd) { case RIO_CM_EP_GET_LIST_SIZE: return cm_ep_get_list_size((void __user *)arg); case RIO_CM_EP_GET_LIST: return cm_ep_get_list((void __user *)arg); case RIO_CM_CHAN_CREATE: return cm_chan_create(filp, (void __user *)arg); case RIO_CM_CHAN_CLOSE: return cm_chan_close(filp, (void __user *)arg); case RIO_CM_CHAN_BIND: return cm_chan_bind((void __user *)arg); case RIO_CM_CHAN_LISTEN: return cm_chan_listen((void __user *)arg); case RIO_CM_CHAN_ACCEPT: return cm_chan_accept(filp, (void __user *)arg); case RIO_CM_CHAN_CONNECT: return cm_chan_connect((void __user *)arg); case RIO_CM_CHAN_SEND: return cm_chan_msg_send((void __user *)arg); case RIO_CM_CHAN_RECEIVE: return cm_chan_msg_rcv((void __user *)arg); case RIO_CM_MPORT_GET_LIST: return cm_mport_get_list((void __user *)arg); default: break; } return -EINVAL; } static const struct file_operations riocm_cdev_fops = { .owner = THIS_MODULE, .open = riocm_cdev_open, .release = riocm_cdev_release, .unlocked_ioctl = riocm_cdev_ioctl, }; /* * riocm_add_dev - add new remote RapidIO device into channel management core * @dev: device object associated with RapidIO device * @sif: subsystem interface * * Adds the specified RapidIO device (if applicable) into peers list of * the corresponding channel management device (cm_dev). */ static int riocm_add_dev(struct device *dev, struct subsys_interface *sif) { struct cm_peer *peer; struct rio_dev *rdev = to_rio_dev(dev); struct cm_dev *cm; /* Check if the remote device has capabilities required to support CM */ if (!dev_cm_capable(rdev)) return 0; riocm_debug(RDEV, "(%s)", rio_name(rdev)); peer = kmalloc(sizeof(*peer), GFP_KERNEL); if (!peer) return -ENOMEM; /* Find a corresponding cm_dev object */ down_write(&rdev_sem); list_for_each_entry(cm, &cm_dev_list, list) { if (cm->mport == rdev->net->hport) goto found; } up_write(&rdev_sem); kfree(peer); return -ENODEV; found: peer->rdev = rdev; list_add_tail(&peer->node, &cm->peers); cm->npeers++; up_write(&rdev_sem); return 0; } /* * riocm_remove_dev - remove remote RapidIO device from channel management core * @dev: device object associated with RapidIO device * @sif: subsystem interface * * Removes the specified RapidIO device (if applicable) from peers list of * the corresponding channel management device (cm_dev). */ static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif) { struct rio_dev *rdev = to_rio_dev(dev); struct cm_dev *cm; struct cm_peer *peer; struct rio_channel *ch, *_c; unsigned int i; bool found = false; LIST_HEAD(list); /* Check if the remote device has capabilities required to support CM */ if (!dev_cm_capable(rdev)) return; riocm_debug(RDEV, "(%s)", rio_name(rdev)); /* Find matching cm_dev object */ down_write(&rdev_sem); list_for_each_entry(cm, &cm_dev_list, list) { if (cm->mport == rdev->net->hport) { found = true; break; } } if (!found) { up_write(&rdev_sem); return; } /* Remove remote device from the list of peers */ found = false; list_for_each_entry(peer, &cm->peers, node) { if (peer->rdev == rdev) { riocm_debug(RDEV, "removing peer %s", rio_name(rdev)); found = true; list_del(&peer->node); cm->npeers--; kfree(peer); break; } } up_write(&rdev_sem); if (!found) return; /* * Release channels associated with this peer */ spin_lock_bh(&idr_lock); idr_for_each_entry(&ch_idr, ch, i) { if (ch && ch->rdev == rdev) { if (atomic_read(&rdev->state) != RIO_DEVICE_SHUTDOWN) riocm_exch(ch, RIO_CM_DISCONNECT); idr_remove(&ch_idr, ch->id); list_add(&ch->ch_node, &list); } } spin_unlock_bh(&idr_lock); if (!list_empty(&list)) { list_for_each_entry_safe(ch, _c, &list, ch_node) { list_del(&ch->ch_node); riocm_ch_close(ch); } } } /* * riocm_cdev_add() - Create rio_cm char device * @devno: device number assigned to device (MAJ + MIN) */ static int riocm_cdev_add(dev_t devno) { int ret; cdev_init(&riocm_cdev.cdev, &riocm_cdev_fops); riocm_cdev.cdev.owner = THIS_MODULE; ret = cdev_add(&riocm_cdev.cdev, devno, 1); if (ret < 0) { riocm_error("Cannot register a device with error %d", ret); return ret; } riocm_cdev.dev = device_create(dev_class, NULL, devno, NULL, DEV_NAME); if (IS_ERR(riocm_cdev.dev)) { cdev_del(&riocm_cdev.cdev); return PTR_ERR(riocm_cdev.dev); } riocm_debug(MPORT, "Added %s cdev(%d:%d)", DEV_NAME, MAJOR(devno), MINOR(devno)); return 0; } /* * riocm_add_mport - add new local mport device into channel management core * @dev: device object associated with mport * * When a new mport device is added, CM immediately reserves inbound and * outbound RapidIO mailboxes that will be used. */ static int riocm_add_mport(struct device *dev) { int rc; int i; struct cm_dev *cm; struct rio_mport *mport = to_rio_mport(dev); riocm_debug(MPORT, "add mport %s", mport->name); cm = kzalloc(sizeof(*cm), GFP_KERNEL); if (!cm) return -ENOMEM; cm->mport = mport; rc = rio_request_outb_mbox(mport, cm, cmbox, RIOCM_TX_RING_SIZE, riocm_outb_msg_event); if (rc) { riocm_error("failed to allocate OBMBOX_%d on %s", cmbox, mport->name); kfree(cm); return -ENODEV; } rc = rio_request_inb_mbox(mport, cm, cmbox, RIOCM_RX_RING_SIZE, riocm_inb_msg_event); if (rc) { riocm_error("failed to allocate IBMBOX_%d on %s", cmbox, mport->name); rio_release_outb_mbox(mport, cmbox); kfree(cm); return -ENODEV; } cm->rx_wq = create_workqueue(DRV_NAME "/rxq"); if (!cm->rx_wq) { rio_release_inb_mbox(mport, cmbox); rio_release_outb_mbox(mport, cmbox); kfree(cm); return -ENOMEM; } /* * Allocate and register inbound messaging buffers to be ready * to receive channel and system management requests */ for (i = 0; i < RIOCM_RX_RING_SIZE; i++) cm->rx_buf[i] = NULL; cm->rx_slots = RIOCM_RX_RING_SIZE; mutex_init(&cm->rx_lock); riocm_rx_fill(cm, RIOCM_RX_RING_SIZE); INIT_WORK(&cm->rx_work, rio_ibmsg_handler); cm->tx_slot = 0; cm->tx_cnt = 0; cm->tx_ack_slot = 0; spin_lock_init(&cm->tx_lock); INIT_LIST_HEAD(&cm->peers); cm->npeers = 0; INIT_LIST_HEAD(&cm->tx_reqs); down_write(&rdev_sem); list_add_tail(&cm->list, &cm_dev_list); up_write(&rdev_sem); return 0; } /* * riocm_remove_mport - remove local mport device from channel management core * @dev: device object associated with mport * * Removes a local mport device from the list of registered devices that provide * channel management services. Returns an error if the specified mport is not * registered with the CM core. */ static void riocm_remove_mport(struct device *dev) { struct rio_mport *mport = to_rio_mport(dev); struct cm_dev *cm; struct cm_peer *peer, *temp; struct rio_channel *ch, *_c; unsigned int i; bool found = false; LIST_HEAD(list); riocm_debug(MPORT, "%s", mport->name); /* Find a matching cm_dev object */ down_write(&rdev_sem); list_for_each_entry(cm, &cm_dev_list, list) { if (cm->mport == mport) { list_del(&cm->list); found = true; break; } } up_write(&rdev_sem); if (!found) return; flush_workqueue(cm->rx_wq); destroy_workqueue(cm->rx_wq); /* Release channels bound to this mport */ spin_lock_bh(&idr_lock); idr_for_each_entry(&ch_idr, ch, i) { if (ch->cmdev == cm) { riocm_debug(RDEV, "%s drop ch_%d", mport->name, ch->id); idr_remove(&ch_idr, ch->id); list_add(&ch->ch_node, &list); } } spin_unlock_bh(&idr_lock); if (!list_empty(&list)) { list_for_each_entry_safe(ch, _c, &list, ch_node) { list_del(&ch->ch_node); riocm_ch_close(ch); } } rio_release_inb_mbox(mport, cmbox); rio_release_outb_mbox(mport, cmbox); /* Remove and free peer entries */ if (!list_empty(&cm->peers)) riocm_debug(RDEV, "ATTN: peer list not empty"); list_for_each_entry_safe(peer, temp, &cm->peers, node) { riocm_debug(RDEV, "removing peer %s", rio_name(peer->rdev)); list_del(&peer->node); kfree(peer); } riocm_rx_free(cm); kfree(cm); riocm_debug(MPORT, "%s done", mport->name); } static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code, void *unused) { struct rio_channel *ch; unsigned int i; LIST_HEAD(list); riocm_debug(EXIT, "."); /* * If there are any channels left in connected state send * close notification to the connection partner. * First build a list of channels that require a closing * notification because function riocm_send_close() should * be called outside of spinlock protected code. */ spin_lock_bh(&idr_lock); idr_for_each_entry(&ch_idr, ch, i) { if (ch->state == RIO_CM_CONNECTED) { riocm_debug(EXIT, "close ch %d", ch->id); idr_remove(&ch_idr, ch->id); list_add(&ch->ch_node, &list); } } spin_unlock_bh(&idr_lock); list_for_each_entry(ch, &list, ch_node) riocm_send_close(ch); return NOTIFY_DONE; } /* * riocm_interface handles addition/removal of remote RapidIO devices */ static struct subsys_interface riocm_interface = { .name = "rio_cm", .subsys = &rio_bus_type, .add_dev = riocm_add_dev, .remove_dev = riocm_remove_dev, }; /* * rio_mport_interface handles addition/removal local mport devices */ static struct class_interface rio_mport_interface __refdata = { .class = &rio_mport_class, .add_dev = riocm_add_mport, .remove_dev = riocm_remove_mport, }; static struct notifier_block rio_cm_notifier = { .notifier_call = rio_cm_shutdown, }; static int __init riocm_init(void) { int ret; /* Create device class needed by udev */ dev_class = class_create(DRV_NAME); if (IS_ERR(dev_class)) { riocm_error("Cannot create " DRV_NAME " class"); return PTR_ERR(dev_class); } ret = alloc_chrdev_region(&dev_number, 0, 1, DRV_NAME); if (ret) { class_destroy(dev_class); return ret; } dev_major = MAJOR(dev_number); dev_minor_base = MINOR(dev_number); riocm_debug(INIT, "Registered class with %d major", dev_major); /* * Register as rapidio_port class interface to get notifications about * mport additions and removals. */ ret = class_interface_register(&rio_mport_interface); if (ret) { riocm_error("class_interface_register error: %d", ret); goto err_reg; } /* * Register as RapidIO bus interface to get notifications about * addition/removal of remote RapidIO devices. */ ret = subsys_interface_register(&riocm_interface); if (ret) { riocm_error("subsys_interface_register error: %d", ret); goto err_cl; } ret = register_reboot_notifier(&rio_cm_notifier); if (ret) { riocm_error("failed to register reboot notifier (err=%d)", ret); goto err_sif; } ret = riocm_cdev_add(dev_number); if (ret) { unregister_reboot_notifier(&rio_cm_notifier); ret = -ENODEV; goto err_sif; } return 0; err_sif: subsys_interface_unregister(&riocm_interface); err_cl: class_interface_unregister(&rio_mport_interface); err_reg: unregister_chrdev_region(dev_number, 1); class_destroy(dev_class); return ret; } static void __exit riocm_exit(void) { riocm_debug(EXIT, "enter"); unregister_reboot_notifier(&rio_cm_notifier); subsys_interface_unregister(&riocm_interface); class_interface_unregister(&rio_mport_interface); idr_destroy(&ch_idr); device_unregister(riocm_cdev.dev); cdev_del(&(riocm_cdev.cdev)); class_destroy(dev_class); unregister_chrdev_region(dev_number, 1); } late_initcall(riocm_init); module_exit(riocm_exit);
linux-master
drivers/rapidio/rio_cm.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * RapidIO enumeration and discovery support * * Copyright 2005 MontaVista Software, Inc. * Matt Porter <[email protected]> * * Copyright 2009 Integrated Device Technology, Inc. * Alex Bounine <[email protected]> * - Added Port-Write/Error Management initialization and handling * * Copyright 2009 Sysgo AG * Thomas Moll <[email protected]> * - Added Input- Output- enable functionality, to allow full communication */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/rio.h> #include <linux/rio_drv.h> #include <linux/rio_ids.h> #include <linux/rio_regs.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/sched.h> #include <linux/jiffies.h> #include <linux/slab.h> #include "rio.h" static void rio_init_em(struct rio_dev *rdev); struct rio_id_table { u16 start; /* logical minimal id */ u32 max; /* max number of IDs in table */ spinlock_t lock; unsigned long table[]; }; static int next_destid = 0; static int next_comptag = 1; /** * rio_destid_alloc - Allocate next available destID for given network * @net: RIO network * * Returns next available device destination ID for the specified RIO network. * Marks allocated ID as one in use. * Returns RIO_INVALID_DESTID if new destID is not available. */ static u16 rio_destid_alloc(struct rio_net *net) { int destid; struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data; spin_lock(&idtab->lock); destid = find_first_zero_bit(idtab->table, idtab->max); if (destid < idtab->max) { set_bit(destid, idtab->table); destid += idtab->start; } else destid = RIO_INVALID_DESTID; spin_unlock(&idtab->lock); return (u16)destid; } /** * rio_destid_reserve - Reserve the specified destID * @net: RIO network * @destid: destID to reserve * * Tries to reserve the specified destID. * Returns 0 if successful. */ static int rio_destid_reserve(struct rio_net *net, u16 destid) { int oldbit; struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data; destid -= idtab->start; spin_lock(&idtab->lock); oldbit = test_and_set_bit(destid, idtab->table); spin_unlock(&idtab->lock); return oldbit; } /** * rio_destid_free - free a previously allocated destID * @net: RIO network * @destid: destID to free * * Makes the specified destID available for use. */ static void rio_destid_free(struct rio_net *net, u16 destid) { struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data; destid -= idtab->start; spin_lock(&idtab->lock); clear_bit(destid, idtab->table); spin_unlock(&idtab->lock); } /** * rio_destid_first - return first destID in use * @net: RIO network */ static u16 rio_destid_first(struct rio_net *net) { int destid; struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data; spin_lock(&idtab->lock); destid = find_first_bit(idtab->table, idtab->max); if (destid >= idtab->max) destid = RIO_INVALID_DESTID; else destid += idtab->start; spin_unlock(&idtab->lock); return (u16)destid; } /** * rio_destid_next - return next destID in use * @net: RIO network * @from: destination ID from which search shall continue */ static u16 rio_destid_next(struct rio_net *net, u16 from) { int destid; struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data; spin_lock(&idtab->lock); destid = find_next_bit(idtab->table, idtab->max, from); if (destid >= idtab->max) destid = RIO_INVALID_DESTID; else destid += idtab->start; spin_unlock(&idtab->lock); return (u16)destid; } /** * rio_get_device_id - Get the base/extended device id for a device * @port: RIO master port * @destid: Destination ID of device * @hopcount: Hopcount to device * * Reads the base/extended device id from a device. Returns the * 8/16-bit device ID. */ static u16 rio_get_device_id(struct rio_mport *port, u16 destid, u8 hopcount) { u32 result; rio_mport_read_config_32(port, destid, hopcount, RIO_DID_CSR, &result); return RIO_GET_DID(port->sys_size, result); } /** * rio_set_device_id - Set the base/extended device id for a device * @port: RIO master port * @destid: Destination ID of device * @hopcount: Hopcount to device * @did: Device ID value to be written * * Writes the base/extended device id from a device. */ static void rio_set_device_id(struct rio_mport *port, u16 destid, u8 hopcount, u16 did) { rio_mport_write_config_32(port, destid, hopcount, RIO_DID_CSR, RIO_SET_DID(port->sys_size, did)); } /** * rio_clear_locks- Release all host locks and signal enumeration complete * @net: RIO network to run on * * Marks the component tag CSR on each device with the enumeration * complete flag. When complete, it then release the host locks on * each device. Returns 0 on success or %-EINVAL on failure. */ static int rio_clear_locks(struct rio_net *net) { struct rio_mport *port = net->hport; struct rio_dev *rdev; u32 result; int ret = 0; /* Release host device id locks */ rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR, port->host_deviceid); rio_local_read_config_32(port, RIO_HOST_DID_LOCK_CSR, &result); if ((result & 0xffff) != 0xffff) { printk(KERN_INFO "RIO: badness when releasing host lock on master port, result %8.8x\n", result); ret = -EINVAL; } list_for_each_entry(rdev, &net->devices, net_list) { rio_write_config_32(rdev, RIO_HOST_DID_LOCK_CSR, port->host_deviceid); rio_read_config_32(rdev, RIO_HOST_DID_LOCK_CSR, &result); if ((result & 0xffff) != 0xffff) { printk(KERN_INFO "RIO: badness when releasing host lock on vid %4.4x did %4.4x\n", rdev->vid, rdev->did); ret = -EINVAL; } /* Mark device as discovered and enable master */ rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR, &result); result |= RIO_PORT_GEN_DISCOVERED | RIO_PORT_GEN_MASTER; rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR, result); } return ret; } /** * rio_enum_host- Set host lock and initialize host destination ID * @port: Master port to issue transaction * * Sets the local host master port lock and destination ID register * with the host device ID value. The host device ID value is provided * by the platform. Returns %0 on success or %-1 on failure. */ static int rio_enum_host(struct rio_mport *port) { u32 result; /* Set master port host device id lock */ rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR, port->host_deviceid); rio_local_read_config_32(port, RIO_HOST_DID_LOCK_CSR, &result); if ((result & 0xffff) != port->host_deviceid) return -1; /* Set master port destid and init destid ctr */ rio_local_set_device_id(port, port->host_deviceid); return 0; } /** * rio_device_has_destid- Test if a device contains a destination ID register * @port: Master port to issue transaction * @src_ops: RIO device source operations * @dst_ops: RIO device destination operations * * Checks the provided @src_ops and @dst_ops for the necessary transaction * capabilities that indicate whether or not a device will implement a * destination ID register. Returns 1 if true or 0 if false. */ static int rio_device_has_destid(struct rio_mport *port, int src_ops, int dst_ops) { u32 mask = RIO_OPS_READ | RIO_OPS_WRITE | RIO_OPS_ATOMIC_TST_SWP | RIO_OPS_ATOMIC_INC | RIO_OPS_ATOMIC_DEC | RIO_OPS_ATOMIC_SET | RIO_OPS_ATOMIC_CLR; return !!((src_ops | dst_ops) & mask); } /** * rio_release_dev- Frees a RIO device struct * @dev: LDM device associated with a RIO device struct * * Gets the RIO device struct associated a RIO device struct. * The RIO device struct is freed. */ static void rio_release_dev(struct device *dev) { struct rio_dev *rdev; rdev = to_rio_dev(dev); kfree(rdev); } /** * rio_is_switch- Tests if a RIO device has switch capabilities * @rdev: RIO device * * Gets the RIO device Processing Element Features register * contents and tests for switch capabilities. Returns 1 if * the device is a switch or 0 if it is not a switch. * The RIO device struct is freed. */ static int rio_is_switch(struct rio_dev *rdev) { if (rdev->pef & RIO_PEF_SWITCH) return 1; return 0; } /** * rio_setup_device- Allocates and sets up a RIO device * @net: RIO network * @port: Master port to send transactions * @destid: Current destination ID * @hopcount: Current hopcount * @do_enum: Enumeration/Discovery mode flag * * Allocates a RIO device and configures fields based on configuration * space contents. If device has a destination ID register, a destination * ID is either assigned in enumeration mode or read from configuration * space in discovery mode. If the device has switch capabilities, then * a switch is allocated and configured appropriately. Returns a pointer * to a RIO device on success or NULL on failure. * */ static struct rio_dev *rio_setup_device(struct rio_net *net, struct rio_mport *port, u16 destid, u8 hopcount, int do_enum) { int ret = 0; struct rio_dev *rdev; struct rio_switch *rswitch = NULL; int result, rdid; size_t size; u32 swpinfo = 0; size = sizeof(*rdev); if (rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR, &result)) return NULL; if (result & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) { rio_mport_read_config_32(port, destid, hopcount, RIO_SWP_INFO_CAR, &swpinfo); if (result & RIO_PEF_SWITCH) size += struct_size(rswitch, nextdev, RIO_GET_TOTAL_PORTS(swpinfo)); } rdev = kzalloc(size, GFP_KERNEL); if (!rdev) return NULL; rdev->net = net; rdev->pef = result; rdev->swpinfo = swpinfo; rio_mport_read_config_32(port, destid, hopcount, RIO_DEV_ID_CAR, &result); rdev->did = result >> 16; rdev->vid = result & 0xffff; rio_mport_read_config_32(port, destid, hopcount, RIO_DEV_INFO_CAR, &rdev->device_rev); rio_mport_read_config_32(port, destid, hopcount, RIO_ASM_ID_CAR, &result); rdev->asm_did = result >> 16; rdev->asm_vid = result & 0xffff; rio_mport_read_config_32(port, destid, hopcount, RIO_ASM_INFO_CAR, &result); rdev->asm_rev = result >> 16; if (rdev->pef & RIO_PEF_EXT_FEATURES) { rdev->efptr = result & 0xffff; rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid, hopcount, &rdev->phys_rmap); pr_debug("RIO: %s Register Map %d device\n", __func__, rdev->phys_rmap); rdev->em_efptr = rio_mport_get_feature(port, 0, destid, hopcount, RIO_EFB_ERR_MGMNT); if (!rdev->em_efptr) rdev->em_efptr = rio_mport_get_feature(port, 0, destid, hopcount, RIO_EFB_ERR_MGMNT_HS); } rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR, &rdev->src_ops); rio_mport_read_config_32(port, destid, hopcount, RIO_DST_OPS_CAR, &rdev->dst_ops); if (do_enum) { /* Assign component tag to device */ if (next_comptag >= 0x10000) { pr_err("RIO: Component Tag Counter Overflow\n"); goto cleanup; } rio_mport_write_config_32(port, destid, hopcount, RIO_COMPONENT_TAG_CSR, next_comptag); rdev->comp_tag = next_comptag++; rdev->do_enum = true; } else { rio_mport_read_config_32(port, destid, hopcount, RIO_COMPONENT_TAG_CSR, &rdev->comp_tag); } if (rio_device_has_destid(port, rdev->src_ops, rdev->dst_ops)) { if (do_enum) { rio_set_device_id(port, destid, hopcount, next_destid); rdev->destid = next_destid; next_destid = rio_destid_alloc(net); } else rdev->destid = rio_get_device_id(port, destid, hopcount); rdev->hopcount = 0xff; } else { /* Switch device has an associated destID which * will be adjusted later */ rdev->destid = destid; rdev->hopcount = hopcount; } /* If a PE has both switch and other functions, show it as a switch */ if (rio_is_switch(rdev)) { rswitch = rdev->rswitch; rswitch->port_ok = 0; spin_lock_init(&rswitch->lock); rswitch->route_table = kzalloc(RIO_MAX_ROUTE_ENTRIES(port->sys_size), GFP_KERNEL); if (!rswitch->route_table) goto cleanup; /* Initialize switch route table */ for (rdid = 0; rdid < RIO_MAX_ROUTE_ENTRIES(port->sys_size); rdid++) rswitch->route_table[rdid] = RIO_INVALID_ROUTE; dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id, rdev->comp_tag & RIO_CTAG_UDEVID); if (do_enum) rio_route_clr_table(rdev, RIO_GLOBAL_TABLE, 0); } else { if (do_enum) /*Enable Input Output Port (transmitter receiver)*/ rio_enable_rx_tx_port(port, 0, destid, hopcount, 0); dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id, rdev->comp_tag & RIO_CTAG_UDEVID); } rdev->dev.parent = &net->dev; rio_attach_device(rdev); rdev->dev.release = rio_release_dev; rdev->dma_mask = DMA_BIT_MASK(32); rdev->dev.dma_mask = &rdev->dma_mask; rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); if (rdev->dst_ops & RIO_DST_OPS_DOORBELL) rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); ret = rio_add_device(rdev); if (ret) { if (rswitch) kfree(rswitch->route_table); put_device(&rdev->dev); return NULL; } rio_dev_get(rdev); return rdev; cleanup: if (rswitch) kfree(rswitch->route_table); kfree(rdev); return NULL; } /** * rio_sport_is_active- Tests if a switch port has an active connection. * @rdev: RapidIO device object * @sp: Switch port number * * Reads the port error status CSR for a particular switch port to * determine if the port has an active link. Returns * %RIO_PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is * inactive. */ static int rio_sport_is_active(struct rio_dev *rdev, int sp) { u32 result = 0; rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, sp), &result); return result & RIO_PORT_N_ERR_STS_PORT_OK; } /** * rio_get_host_deviceid_lock- Reads the Host Device ID Lock CSR on a device * @port: Master port to send transaction * @hopcount: Number of hops to the device * * Used during enumeration to read the Host Device ID Lock CSR on a * RIO device. Returns the value of the lock register. */ static u16 rio_get_host_deviceid_lock(struct rio_mport *port, u8 hopcount) { u32 result; rio_mport_read_config_32(port, RIO_ANY_DESTID(port->sys_size), hopcount, RIO_HOST_DID_LOCK_CSR, &result); return (u16) (result & 0xffff); } /** * rio_enum_peer- Recursively enumerate a RIO network through a master port * @net: RIO network being enumerated * @port: Master port to send transactions * @hopcount: Number of hops into the network * @prev: Previous RIO device connected to the enumerated one * @prev_port: Port on previous RIO device * * Recursively enumerates a RIO network. Transactions are sent via the * master port passed in @port. */ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port, u8 hopcount, struct rio_dev *prev, int prev_port) { struct rio_dev *rdev; u32 regval; int tmp; if (rio_mport_chk_dev_access(port, RIO_ANY_DESTID(port->sys_size), hopcount)) { pr_debug("RIO: device access check failed\n"); return -1; } if (rio_get_host_deviceid_lock(port, hopcount) == port->host_deviceid) { pr_debug("RIO: PE already discovered by this host\n"); /* * Already discovered by this host. Add it as another * link to the existing device. */ rio_mport_read_config_32(port, RIO_ANY_DESTID(port->sys_size), hopcount, RIO_COMPONENT_TAG_CSR, &regval); if (regval) { rdev = rio_get_comptag((regval & 0xffff), NULL); if (rdev && prev && rio_is_switch(prev)) { pr_debug("RIO: redundant path to %s\n", rio_name(rdev)); prev->rswitch->nextdev[prev_port] = rdev; } } return 0; } /* Attempt to acquire device lock */ rio_mport_write_config_32(port, RIO_ANY_DESTID(port->sys_size), hopcount, RIO_HOST_DID_LOCK_CSR, port->host_deviceid); while ((tmp = rio_get_host_deviceid_lock(port, hopcount)) < port->host_deviceid) { /* Delay a bit */ mdelay(1); /* Attempt to acquire device lock again */ rio_mport_write_config_32(port, RIO_ANY_DESTID(port->sys_size), hopcount, RIO_HOST_DID_LOCK_CSR, port->host_deviceid); } if (rio_get_host_deviceid_lock(port, hopcount) > port->host_deviceid) { pr_debug( "RIO: PE locked by a higher priority host...retreating\n"); return -1; } /* Setup new RIO device */ rdev = rio_setup_device(net, port, RIO_ANY_DESTID(port->sys_size), hopcount, 1); if (rdev) { rdev->prev = prev; if (prev && rio_is_switch(prev)) prev->rswitch->nextdev[prev_port] = rdev; } else return -1; if (rio_is_switch(rdev)) { int sw_destid; int cur_destid; int sw_inport; u16 destid; int port_num; sw_inport = RIO_GET_PORT_NUM(rdev->swpinfo); rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, port->host_deviceid, sw_inport, 0); rdev->rswitch->route_table[port->host_deviceid] = sw_inport; destid = rio_destid_first(net); while (destid != RIO_INVALID_DESTID && destid < next_destid) { if (destid != port->host_deviceid) { rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, destid, sw_inport, 0); rdev->rswitch->route_table[destid] = sw_inport; } destid = rio_destid_next(net, destid + 1); } pr_debug( "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n", rio_name(rdev), rdev->vid, rdev->did, RIO_GET_TOTAL_PORTS(rdev->swpinfo)); sw_destid = next_destid; for (port_num = 0; port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo); port_num++) { if (sw_inport == port_num) { rio_enable_rx_tx_port(port, 0, RIO_ANY_DESTID(port->sys_size), hopcount, port_num); rdev->rswitch->port_ok |= (1 << port_num); continue; } cur_destid = next_destid; if (rio_sport_is_active(rdev, port_num)) { pr_debug( "RIO: scanning device on port %d\n", port_num); rio_enable_rx_tx_port(port, 0, RIO_ANY_DESTID(port->sys_size), hopcount, port_num); rdev->rswitch->port_ok |= (1 << port_num); rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, RIO_ANY_DESTID(port->sys_size), port_num, 0); if (rio_enum_peer(net, port, hopcount + 1, rdev, port_num) < 0) return -1; /* Update routing tables */ destid = rio_destid_next(net, cur_destid + 1); if (destid != RIO_INVALID_DESTID) { for (destid = cur_destid; destid < next_destid;) { if (destid != port->host_deviceid) { rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, destid, port_num, 0); rdev->rswitch-> route_table[destid] = port_num; } destid = rio_destid_next(net, destid + 1); } } } else { /* If switch supports Error Management, * set PORT_LOCKOUT bit for unused port */ if (rdev->em_efptr) rio_set_port_lockout(rdev, port_num, 1); rdev->rswitch->port_ok &= ~(1 << port_num); } } /* Direct Port-write messages to the enumeratiing host */ if ((rdev->src_ops & RIO_SRC_OPS_PORT_WRITE) && (rdev->em_efptr)) { rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TGT_DEVID, (port->host_deviceid << 16) | (port->sys_size << 15)); } rio_init_em(rdev); /* Check for empty switch */ if (next_destid == sw_destid) next_destid = rio_destid_alloc(net); rdev->destid = sw_destid; } else pr_debug("RIO: found %s (vid %4.4x did %4.4x)\n", rio_name(rdev), rdev->vid, rdev->did); return 0; } /** * rio_enum_complete- Tests if enumeration of a network is complete * @port: Master port to send transaction * * Tests the PGCCSR discovered bit for non-zero value (enumeration * complete flag). Return %1 if enumeration is complete or %0 if * enumeration is incomplete. */ static int rio_enum_complete(struct rio_mport *port) { u32 regval; rio_local_read_config_32(port, port->phys_efptr + RIO_PORT_GEN_CTL_CSR, &regval); return (regval & RIO_PORT_GEN_DISCOVERED) ? 1 : 0; } /** * rio_disc_peer- Recursively discovers a RIO network through a master port * @net: RIO network being discovered * @port: Master port to send transactions * @destid: Current destination ID in network * @hopcount: Number of hops into the network * @prev: previous rio_dev * @prev_port: previous port number * * Recursively discovers a RIO network. Transactions are sent via the * master port passed in @port. */ static int rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid, u8 hopcount, struct rio_dev *prev, int prev_port) { u8 port_num, route_port; struct rio_dev *rdev; u16 ndestid; /* Setup new RIO device */ if ((rdev = rio_setup_device(net, port, destid, hopcount, 0))) { rdev->prev = prev; if (prev && rio_is_switch(prev)) prev->rswitch->nextdev[prev_port] = rdev; } else return -1; if (rio_is_switch(rdev)) { /* Associated destid is how we accessed this switch */ rdev->destid = destid; pr_debug( "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n", rio_name(rdev), rdev->vid, rdev->did, RIO_GET_TOTAL_PORTS(rdev->swpinfo)); for (port_num = 0; port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo); port_num++) { if (RIO_GET_PORT_NUM(rdev->swpinfo) == port_num) continue; if (rio_sport_is_active(rdev, port_num)) { pr_debug( "RIO: scanning device on port %d\n", port_num); rio_lock_device(port, destid, hopcount, 1000); for (ndestid = 0; ndestid < RIO_ANY_DESTID(port->sys_size); ndestid++) { rio_route_get_entry(rdev, RIO_GLOBAL_TABLE, ndestid, &route_port, 0); if (route_port == port_num) break; } if (ndestid == RIO_ANY_DESTID(port->sys_size)) continue; rio_unlock_device(port, destid, hopcount); if (rio_disc_peer(net, port, ndestid, hopcount + 1, rdev, port_num) < 0) return -1; } } } else pr_debug("RIO: found %s (vid %4.4x did %4.4x)\n", rio_name(rdev), rdev->vid, rdev->did); return 0; } /** * rio_mport_is_active- Tests if master port link is active * @port: Master port to test * * Reads the port error status CSR for the master port to * determine if the port has an active link. Returns * %RIO_PORT_N_ERR_STS_PORT_OK if the master port is active * or %0 if it is inactive. */ static int rio_mport_is_active(struct rio_mport *port) { u32 result = 0; rio_local_read_config_32(port, port->phys_efptr + RIO_PORT_N_ERR_STS_CSR(port->index, port->phys_rmap), &result); return result & RIO_PORT_N_ERR_STS_PORT_OK; } static void rio_scan_release_net(struct rio_net *net) { pr_debug("RIO-SCAN: %s: net_%d\n", __func__, net->id); kfree(net->enum_data); } static void rio_scan_release_dev(struct device *dev) { struct rio_net *net; net = to_rio_net(dev); pr_debug("RIO-SCAN: %s: net_%d\n", __func__, net->id); kfree(net); } /* * rio_scan_alloc_net - Allocate and configure a new RIO network * @mport: Master port associated with the RIO network * @do_enum: Enumeration/Discovery mode flag * @start: logical minimal start id for new net * * Allocates a new RIO network structure and initializes enumerator-specific * part of it (if required). * Returns a RIO network pointer on success or %NULL on failure. */ static struct rio_net *rio_scan_alloc_net(struct rio_mport *mport, int do_enum, u16 start) { struct rio_net *net; net = rio_alloc_net(mport); if (net && do_enum) { struct rio_id_table *idtab; size_t size; size = sizeof(struct rio_id_table) + BITS_TO_LONGS( RIO_MAX_ROUTE_ENTRIES(mport->sys_size) ) * sizeof(long); idtab = kzalloc(size, GFP_KERNEL); if (idtab == NULL) { pr_err("RIO: failed to allocate destID table\n"); rio_free_net(net); net = NULL; } else { net->enum_data = idtab; net->release = rio_scan_release_net; idtab->start = start; idtab->max = RIO_MAX_ROUTE_ENTRIES(mport->sys_size); spin_lock_init(&idtab->lock); } } if (net) { net->id = mport->id; net->hport = mport; dev_set_name(&net->dev, "rnet_%d", net->id); net->dev.parent = &mport->dev; net->dev.release = rio_scan_release_dev; rio_add_net(net); } return net; } /** * rio_update_route_tables- Updates route tables in switches * @net: RIO network to run update on * * For each enumerated device, ensure that each switch in a system * has correct routing entries. Add routes for devices that where * unknown during the first enumeration pass through the switch. */ static void rio_update_route_tables(struct rio_net *net) { struct rio_dev *rdev, *swrdev; struct rio_switch *rswitch; u8 sport; u16 destid; list_for_each_entry(rdev, &net->devices, net_list) { destid = rdev->destid; list_for_each_entry(rswitch, &net->switches, node) { if (rio_is_switch(rdev) && (rdev->rswitch == rswitch)) continue; if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) { swrdev = sw_to_rio_dev(rswitch); /* Skip if destid ends in empty switch*/ if (swrdev->destid == destid) continue; sport = RIO_GET_PORT_NUM(swrdev->swpinfo); rio_route_add_entry(swrdev, RIO_GLOBAL_TABLE, destid, sport, 0); rswitch->route_table[destid] = sport; } } } } /** * rio_init_em - Initializes RIO Error Management (for switches) * @rdev: RIO device * * For each enumerated switch, call device-specific error management * initialization routine (if supplied by the switch driver). */ static void rio_init_em(struct rio_dev *rdev) { if (rio_is_switch(rdev) && (rdev->em_efptr) && rdev->rswitch->ops && rdev->rswitch->ops->em_init) { rdev->rswitch->ops->em_init(rdev); } } /** * rio_enum_mport- Start enumeration through a master port * @mport: Master port to send transactions * @flags: Enumeration control flags * * Starts the enumeration process. If somebody has enumerated our * master port device, then give up. If not and we have an active * link, then start recursive peer enumeration. Returns %0 if * enumeration succeeds or %-EBUSY if enumeration fails. */ static int rio_enum_mport(struct rio_mport *mport, u32 flags) { struct rio_net *net = NULL; int rc = 0; printk(KERN_INFO "RIO: enumerate master port %d, %s\n", mport->id, mport->name); /* * To avoid multiple start requests (repeat enumeration is not supported * by this method) check if enumeration/discovery was performed for this * mport: if mport was added into the list of mports for a net exit * with error. */ if (mport->nnode.next || mport->nnode.prev) return -EBUSY; /* If somebody else enumerated our master port device, bail. */ if (rio_enum_host(mport) < 0) { printk(KERN_INFO "RIO: master port %d device has been enumerated by a remote host\n", mport->id); rc = -EBUSY; goto out; } /* If master port has an active link, allocate net and enum peers */ if (rio_mport_is_active(mport)) { net = rio_scan_alloc_net(mport, 1, 0); if (!net) { printk(KERN_ERR "RIO: failed to allocate new net\n"); rc = -ENOMEM; goto out; } /* reserve mport destID in new net */ rio_destid_reserve(net, mport->host_deviceid); /* Enable Input Output Port (transmitter receiver) */ rio_enable_rx_tx_port(mport, 1, 0, 0, 0); /* Set component tag for host */ rio_local_write_config_32(mport, RIO_COMPONENT_TAG_CSR, next_comptag++); next_destid = rio_destid_alloc(net); if (rio_enum_peer(net, mport, 0, NULL, 0) < 0) { /* A higher priority host won enumeration, bail. */ printk(KERN_INFO "RIO: master port %d device has lost enumeration to a remote host\n", mport->id); rio_clear_locks(net); rc = -EBUSY; goto out; } /* free the last allocated destID (unused) */ rio_destid_free(net, next_destid); rio_update_route_tables(net); rio_clear_locks(net); rio_pw_enable(mport, 1); } else { printk(KERN_INFO "RIO: master port %d link inactive\n", mport->id); rc = -EINVAL; } out: return rc; } /** * rio_build_route_tables- Generate route tables from switch route entries * @net: RIO network to run route tables scan on * * For each switch device, generate a route table by copying existing * route entries from the switch. */ static void rio_build_route_tables(struct rio_net *net) { struct rio_switch *rswitch; struct rio_dev *rdev; int i; u8 sport; list_for_each_entry(rswitch, &net->switches, node) { rdev = sw_to_rio_dev(rswitch); rio_lock_device(net->hport, rdev->destid, rdev->hopcount, 1000); for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(net->hport->sys_size); i++) { if (rio_route_get_entry(rdev, RIO_GLOBAL_TABLE, i, &sport, 0) < 0) continue; rswitch->route_table[i] = sport; } rio_unlock_device(net->hport, rdev->destid, rdev->hopcount); } } /** * rio_disc_mport- Start discovery through a master port * @mport: Master port to send transactions * @flags: discovery control flags * * Starts the discovery process. If we have an active link, * then wait for the signal that enumeration is complete (if wait * is allowed). * When enumeration completion is signaled, start recursive * peer discovery. Returns %0 if discovery succeeds or %-EBUSY * on failure. */ static int rio_disc_mport(struct rio_mport *mport, u32 flags) { struct rio_net *net = NULL; unsigned long to_end; printk(KERN_INFO "RIO: discover master port %d, %s\n", mport->id, mport->name); /* If master port has an active link, allocate net and discover peers */ if (rio_mport_is_active(mport)) { if (rio_enum_complete(mport)) goto enum_done; else if (flags & RIO_SCAN_ENUM_NO_WAIT) return -EAGAIN; pr_debug("RIO: wait for enumeration to complete...\n"); to_end = jiffies + CONFIG_RAPIDIO_DISC_TIMEOUT * HZ; while (time_before(jiffies, to_end)) { if (rio_enum_complete(mport)) goto enum_done; msleep(10); } pr_debug("RIO: discovery timeout on mport %d %s\n", mport->id, mport->name); goto bail; enum_done: pr_debug("RIO: ... enumeration done\n"); net = rio_scan_alloc_net(mport, 0, 0); if (!net) { printk(KERN_ERR "RIO: Failed to allocate new net\n"); goto bail; } /* Read DestID assigned by enumerator */ rio_local_read_config_32(mport, RIO_DID_CSR, &mport->host_deviceid); mport->host_deviceid = RIO_GET_DID(mport->sys_size, mport->host_deviceid); if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size), 0, NULL, 0) < 0) { printk(KERN_INFO "RIO: master port %d device has failed discovery\n", mport->id); goto bail; } rio_build_route_tables(net); } return 0; bail: return -EBUSY; } static struct rio_scan rio_scan_ops = { .owner = THIS_MODULE, .enumerate = rio_enum_mport, .discover = rio_disc_mport, }; static bool scan; module_param(scan, bool, 0); MODULE_PARM_DESC(scan, "Start RapidIO network enumeration/discovery " "(default = 0)"); /** * rio_basic_attach: * * When this enumeration/discovery method is loaded as a module this function * registers its specific enumeration and discover routines for all available * RapidIO mport devices. The "scan" command line parameter controls ability of * the module to start RapidIO enumeration/discovery automatically. * * Returns 0 for success or -EIO if unable to register itself. * * This enumeration/discovery method cannot be unloaded and therefore does not * provide a matching cleanup_module routine. */ static int __init rio_basic_attach(void) { if (rio_register_scan(RIO_MPORT_ANY, &rio_scan_ops)) return -EIO; if (scan) rio_init_mports(); return 0; } late_initcall(rio_basic_attach); MODULE_DESCRIPTION("Basic RapidIO enumeration/discovery"); MODULE_LICENSE("GPL");
linux-master
drivers/rapidio/rio-scan.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * RapidIO configuration space access support * * Copyright 2005 MontaVista Software, Inc. * Matt Porter <[email protected]> */ #include <linux/rio.h> #include <linux/module.h> #include <linux/rio_drv.h> /* * Wrappers for all RIO configuration access functions. They just check * alignment and call the low-level functions pointed to by rio_mport->ops. */ #define RIO_8_BAD 0 #define RIO_16_BAD (offset & 1) #define RIO_32_BAD (offset & 3) /** * RIO_LOP_READ - Generate rio_local_read_config_* functions * @size: Size of configuration space read (8, 16, 32 bits) * @type: C type of value argument * @len: Length of configuration space read (1, 2, 4 bytes) * * Generates rio_local_read_config_* functions used to access * configuration space registers on the local device. */ #define RIO_LOP_READ(size,type,len) \ int __rio_local_read_config_##size \ (struct rio_mport *mport, u32 offset, type *value) \ { \ int res; \ u32 data = 0; \ if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ res = mport->ops->lcread(mport, mport->id, offset, len, &data); \ *value = (type)data; \ return res; \ } /** * RIO_LOP_WRITE - Generate rio_local_write_config_* functions * @size: Size of configuration space write (8, 16, 32 bits) * @type: C type of value argument * @len: Length of configuration space write (1, 2, 4 bytes) * * Generates rio_local_write_config_* functions used to access * configuration space registers on the local device. */ #define RIO_LOP_WRITE(size,type,len) \ int __rio_local_write_config_##size \ (struct rio_mport *mport, u32 offset, type value) \ { \ if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ return mport->ops->lcwrite(mport, mport->id, offset, len, value);\ } RIO_LOP_READ(8, u8, 1) RIO_LOP_READ(16, u16, 2) RIO_LOP_READ(32, u32, 4) RIO_LOP_WRITE(8, u8, 1) RIO_LOP_WRITE(16, u16, 2) RIO_LOP_WRITE(32, u32, 4) EXPORT_SYMBOL_GPL(__rio_local_read_config_8); EXPORT_SYMBOL_GPL(__rio_local_read_config_16); EXPORT_SYMBOL_GPL(__rio_local_read_config_32); EXPORT_SYMBOL_GPL(__rio_local_write_config_8); EXPORT_SYMBOL_GPL(__rio_local_write_config_16); EXPORT_SYMBOL_GPL(__rio_local_write_config_32); /** * RIO_OP_READ - Generate rio_mport_read_config_* functions * @size: Size of configuration space read (8, 16, 32 bits) * @type: C type of value argument * @len: Length of configuration space read (1, 2, 4 bytes) * * Generates rio_mport_read_config_* functions used to access * configuration space registers on the local device. */ #define RIO_OP_READ(size,type,len) \ int rio_mport_read_config_##size \ (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type *value) \ { \ int res; \ u32 data = 0; \ if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ res = mport->ops->cread(mport, mport->id, destid, hopcount, offset, len, &data); \ *value = (type)data; \ return res; \ } /** * RIO_OP_WRITE - Generate rio_mport_write_config_* functions * @size: Size of configuration space write (8, 16, 32 bits) * @type: C type of value argument * @len: Length of configuration space write (1, 2, 4 bytes) * * Generates rio_mport_write_config_* functions used to access * configuration space registers on the local device. */ #define RIO_OP_WRITE(size,type,len) \ int rio_mport_write_config_##size \ (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type value) \ { \ if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ return mport->ops->cwrite(mport, mport->id, destid, hopcount, \ offset, len, value); \ } RIO_OP_READ(8, u8, 1) RIO_OP_READ(16, u16, 2) RIO_OP_READ(32, u32, 4) RIO_OP_WRITE(8, u8, 1) RIO_OP_WRITE(16, u16, 2) RIO_OP_WRITE(32, u32, 4) EXPORT_SYMBOL_GPL(rio_mport_read_config_8); EXPORT_SYMBOL_GPL(rio_mport_read_config_16); EXPORT_SYMBOL_GPL(rio_mport_read_config_32); EXPORT_SYMBOL_GPL(rio_mport_write_config_8); EXPORT_SYMBOL_GPL(rio_mport_write_config_16); EXPORT_SYMBOL_GPL(rio_mport_write_config_32); /** * rio_mport_send_doorbell - Send a doorbell message * * @mport: RIO master port * @destid: RIO device destination ID * @data: Doorbell message data * * Send a doorbell message to a RIO device. The doorbell message * has a 16-bit info field provided by the data argument. */ int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data) { return mport->ops->dsend(mport, mport->id, destid, data); } EXPORT_SYMBOL_GPL(rio_mport_send_doorbell);
linux-master
drivers/rapidio/rio-access.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * IDT CPS Gen.2 Serial RapidIO switch family support * * Copyright 2010 Integrated Device Technology, Inc. * Alexandre Bounine <[email protected]> */ #include <linux/stat.h> #include <linux/module.h> #include <linux/rio.h> #include <linux/rio_drv.h> #include <linux/rio_ids.h> #include <linux/delay.h> #include <asm/page.h> #include "../rio.h" #define LOCAL_RTE_CONF_DESTID_SEL 0x010070 #define LOCAL_RTE_CONF_DESTID_SEL_PSEL 0x0000001f #define IDT_LT_ERR_REPORT_EN 0x03100c #define IDT_PORT_ERR_REPORT_EN(n) (0x031044 + (n)*0x40) #define IDT_PORT_ERR_REPORT_EN_BC 0x03ff04 #define IDT_PORT_ISERR_REPORT_EN(n) (0x03104C + (n)*0x40) #define IDT_PORT_ISERR_REPORT_EN_BC 0x03ff0c #define IDT_PORT_INIT_TX_ACQUIRED 0x00000020 #define IDT_LANE_ERR_REPORT_EN(n) (0x038010 + (n)*0x100) #define IDT_LANE_ERR_REPORT_EN_BC 0x03ff10 #define IDT_DEV_CTRL_1 0xf2000c #define IDT_DEV_CTRL_1_GENPW 0x02000000 #define IDT_DEV_CTRL_1_PRSTBEH 0x00000001 #define IDT_CFGBLK_ERR_CAPTURE_EN 0x020008 #define IDT_CFGBLK_ERR_REPORT 0xf20014 #define IDT_CFGBLK_ERR_REPORT_GENPW 0x00000002 #define IDT_AUX_PORT_ERR_CAP_EN 0x020000 #define IDT_AUX_ERR_REPORT_EN 0xf20018 #define IDT_AUX_PORT_ERR_LOG_I2C 0x00000002 #define IDT_AUX_PORT_ERR_LOG_JTAG 0x00000001 #define IDT_ISLTL_ADDRESS_CAP 0x021014 #define IDT_RIO_DOMAIN 0xf20020 #define IDT_RIO_DOMAIN_MASK 0x000000ff #define IDT_PW_INFO_CSR 0xf20024 #define IDT_SOFT_RESET 0xf20040 #define IDT_SOFT_RESET_REQ 0x00030097 #define IDT_I2C_MCTRL 0xf20050 #define IDT_I2C_MCTRL_GENPW 0x04000000 #define IDT_JTAG_CTRL 0xf2005c #define IDT_JTAG_CTRL_GENPW 0x00000002 #define IDT_LANE_CTRL(n) (0xff8000 + (n)*0x100) #define IDT_LANE_CTRL_BC 0xffff00 #define IDT_LANE_CTRL_GENPW 0x00200000 #define IDT_LANE_DFE_1_BC 0xffff18 #define IDT_LANE_DFE_2_BC 0xffff1c #define IDT_PORT_OPS(n) (0xf40004 + (n)*0x100) #define IDT_PORT_OPS_GENPW 0x08000000 #define IDT_PORT_OPS_PL_ELOG 0x00000040 #define IDT_PORT_OPS_LL_ELOG 0x00000020 #define IDT_PORT_OPS_LT_ELOG 0x00000010 #define IDT_PORT_OPS_BC 0xf4ff04 #define IDT_PORT_ISERR_DET(n) (0xf40008 + (n)*0x100) #define IDT_ERR_CAP 0xfd0000 #define IDT_ERR_CAP_LOG_OVERWR 0x00000004 #define IDT_ERR_RD 0xfd0004 #define IDT_DEFAULT_ROUTE 0xde #define IDT_NO_ROUTE 0xdf static int idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 route_port) { /* * Select routing table to update */ if (table == RIO_GLOBAL_TABLE) table = 0; else table++; if (route_port == RIO_INVALID_ROUTE) route_port = IDT_DEFAULT_ROUTE; rio_mport_write_config_32(mport, destid, hopcount, LOCAL_RTE_CONF_DESTID_SEL, table); /* * Program destination port for the specified destID */ rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, (u32)route_destid); rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_PORT_SEL_CSR, (u32)route_port); udelay(10); return 0; } static int idtg2_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 *route_port) { u32 result; /* * Select routing table to read */ if (table == RIO_GLOBAL_TABLE) table = 0; else table++; rio_mport_write_config_32(mport, destid, hopcount, LOCAL_RTE_CONF_DESTID_SEL, table); rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); rio_mport_read_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); if (IDT_DEFAULT_ROUTE == (u8)result || IDT_NO_ROUTE == (u8)result) *route_port = RIO_INVALID_ROUTE; else *route_port = (u8)result; return 0; } static int idtg2_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table) { u32 i; /* * Select routing table to read */ if (table == RIO_GLOBAL_TABLE) table = 0; else table++; rio_mport_write_config_32(mport, destid, hopcount, LOCAL_RTE_CONF_DESTID_SEL, table); for (i = RIO_STD_RTE_CONF_EXTCFGEN; i <= (RIO_STD_RTE_CONF_EXTCFGEN | 0xff);) { rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, i); rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_PORT_SEL_CSR, (IDT_DEFAULT_ROUTE << 24) | (IDT_DEFAULT_ROUTE << 16) | (IDT_DEFAULT_ROUTE << 8) | IDT_DEFAULT_ROUTE); i += 4; } return 0; } static int idtg2_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount, u8 sw_domain) { /* * Switch domain configuration operates only at global level */ rio_mport_write_config_32(mport, destid, hopcount, IDT_RIO_DOMAIN, (u32)sw_domain); return 0; } static int idtg2_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount, u8 *sw_domain) { u32 regval; /* * Switch domain configuration operates only at global level */ rio_mport_read_config_32(mport, destid, hopcount, IDT_RIO_DOMAIN, &regval); *sw_domain = (u8)(regval & 0xff); return 0; } static int idtg2_em_init(struct rio_dev *rdev) { u32 regval; int i, tmp; /* * This routine performs device-specific initialization only. * All standard EM configuration should be performed at upper level. */ pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); /* Set Port-Write info CSR: PRIO=3 and CRF=1 */ rio_write_config_32(rdev, IDT_PW_INFO_CSR, 0x0000e000); /* * Configure LT LAYER error reporting. */ /* Enable standard (RIO.p8) error reporting */ rio_write_config_32(rdev, IDT_LT_ERR_REPORT_EN, REM_LTL_ERR_ILLTRAN | REM_LTL_ERR_UNSOLR | REM_LTL_ERR_UNSUPTR); /* Use Port-Writes for LT layer error reporting. * Enable per-port reset */ rio_read_config_32(rdev, IDT_DEV_CTRL_1, &regval); rio_write_config_32(rdev, IDT_DEV_CTRL_1, regval | IDT_DEV_CTRL_1_GENPW | IDT_DEV_CTRL_1_PRSTBEH); /* * Configure PORT error reporting. */ /* Report all RIO.p8 errors supported by device */ rio_write_config_32(rdev, IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037); /* Configure reporting of implementation specific errors/events */ rio_write_config_32(rdev, IDT_PORT_ISERR_REPORT_EN_BC, IDT_PORT_INIT_TX_ACQUIRED); /* Use Port-Writes for port error reporting and enable error logging */ tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo); for (i = 0; i < tmp; i++) { rio_read_config_32(rdev, IDT_PORT_OPS(i), &regval); rio_write_config_32(rdev, IDT_PORT_OPS(i), regval | IDT_PORT_OPS_GENPW | IDT_PORT_OPS_PL_ELOG | IDT_PORT_OPS_LL_ELOG | IDT_PORT_OPS_LT_ELOG); } /* Overwrite error log if full */ rio_write_config_32(rdev, IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR); /* * Configure LANE error reporting. */ /* Disable line error reporting */ rio_write_config_32(rdev, IDT_LANE_ERR_REPORT_EN_BC, 0); /* Use Port-Writes for lane error reporting (when enabled) * (do per-lane update because lanes may have different configuration) */ tmp = (rdev->did == RIO_DID_IDTCPS1848) ? 48 : 16; for (i = 0; i < tmp; i++) { rio_read_config_32(rdev, IDT_LANE_CTRL(i), &regval); rio_write_config_32(rdev, IDT_LANE_CTRL(i), regval | IDT_LANE_CTRL_GENPW); } /* * Configure AUX error reporting. */ /* Disable JTAG and I2C Error capture */ rio_write_config_32(rdev, IDT_AUX_PORT_ERR_CAP_EN, 0); /* Disable JTAG and I2C Error reporting/logging */ rio_write_config_32(rdev, IDT_AUX_ERR_REPORT_EN, 0); /* Disable Port-Write notification from JTAG */ rio_write_config_32(rdev, IDT_JTAG_CTRL, 0); /* Disable Port-Write notification from I2C */ rio_read_config_32(rdev, IDT_I2C_MCTRL, &regval); rio_write_config_32(rdev, IDT_I2C_MCTRL, regval & ~IDT_I2C_MCTRL_GENPW); /* * Configure CFG_BLK error reporting. */ /* Disable Configuration Block error capture */ rio_write_config_32(rdev, IDT_CFGBLK_ERR_CAPTURE_EN, 0); /* Disable Port-Writes for Configuration Block error reporting */ rio_read_config_32(rdev, IDT_CFGBLK_ERR_REPORT, &regval); rio_write_config_32(rdev, IDT_CFGBLK_ERR_REPORT, regval & ~IDT_CFGBLK_ERR_REPORT_GENPW); /* set TVAL = ~50us */ rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); return 0; } static int idtg2_em_handler(struct rio_dev *rdev, u8 portnum) { u32 regval, em_perrdet, em_ltlerrdet; rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet); if (em_ltlerrdet) { /* Service Logical/Transport Layer Error(s) */ if (em_ltlerrdet & REM_LTL_ERR_IMPSPEC) { /* Implementation specific error reported */ rio_read_config_32(rdev, IDT_ISLTL_ADDRESS_CAP, &regval); pr_debug("RIO: %s Implementation Specific LTL errors" \ " 0x%x @(0x%x)\n", rio_name(rdev), em_ltlerrdet, regval); /* Clear implementation specific address capture CSR */ rio_write_config_32(rdev, IDT_ISLTL_ADDRESS_CAP, 0); } } rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet); if (em_perrdet) { /* Service Port-Level Error(s) */ if (em_perrdet & REM_PED_IMPL_SPEC) { /* Implementation Specific port error reported */ /* Get IS errors reported */ rio_read_config_32(rdev, IDT_PORT_ISERR_DET(portnum), &regval); pr_debug("RIO: %s Implementation Specific Port" \ " errors 0x%x\n", rio_name(rdev), regval); /* Clear all implementation specific events */ rio_write_config_32(rdev, IDT_PORT_ISERR_DET(portnum), 0); } } return 0; } static ssize_t idtg2_show_errlog(struct device *dev, struct device_attribute *attr, char *buf) { struct rio_dev *rdev = to_rio_dev(dev); ssize_t len = 0; u32 regval; while (!rio_read_config_32(rdev, IDT_ERR_RD, &regval)) { if (!regval) /* 0 = end of log */ break; len += snprintf(buf + len, PAGE_SIZE - len, "%08x\n", regval); if (len >= (PAGE_SIZE - 10)) break; } return len; } static DEVICE_ATTR(errlog, S_IRUGO, idtg2_show_errlog, NULL); static int idtg2_sysfs(struct rio_dev *rdev, bool create) { struct device *dev = &rdev->dev; int err = 0; if (create) { /* Initialize sysfs entries */ err = device_create_file(dev, &dev_attr_errlog); if (err) dev_err(dev, "Unable create sysfs errlog file\n"); } else device_remove_file(dev, &dev_attr_errlog); return err; } static struct rio_switch_ops idtg2_switch_ops = { .owner = THIS_MODULE, .add_entry = idtg2_route_add_entry, .get_entry = idtg2_route_get_entry, .clr_table = idtg2_route_clr_table, .set_domain = idtg2_set_domain, .get_domain = idtg2_get_domain, .em_init = idtg2_em_init, .em_handle = idtg2_em_handler, }; static int idtg2_probe(struct rio_dev *rdev, const struct rio_device_id *id) { pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); spin_lock(&rdev->rswitch->lock); if (rdev->rswitch->ops) { spin_unlock(&rdev->rswitch->lock); return -EINVAL; } rdev->rswitch->ops = &idtg2_switch_ops; if (rdev->do_enum) { /* Ensure that default routing is disabled on startup */ rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE); } spin_unlock(&rdev->rswitch->lock); /* Create device-specific sysfs attributes */ idtg2_sysfs(rdev, true); return 0; } static void idtg2_remove(struct rio_dev *rdev) { pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); spin_lock(&rdev->rswitch->lock); if (rdev->rswitch->ops != &idtg2_switch_ops) { spin_unlock(&rdev->rswitch->lock); return; } rdev->rswitch->ops = NULL; spin_unlock(&rdev->rswitch->lock); /* Remove device-specific sysfs attributes */ idtg2_sysfs(rdev, false); } static const struct rio_device_id idtg2_id_table[] = { {RIO_DEVICE(RIO_DID_IDTCPS1848, RIO_VID_IDT)}, {RIO_DEVICE(RIO_DID_IDTCPS1616, RIO_VID_IDT)}, {RIO_DEVICE(RIO_DID_IDTVPS1616, RIO_VID_IDT)}, {RIO_DEVICE(RIO_DID_IDTSPS1616, RIO_VID_IDT)}, {RIO_DEVICE(RIO_DID_IDTCPS1432, RIO_VID_IDT)}, { 0, } /* terminate list */ }; static struct rio_driver idtg2_driver = { .name = "idt_gen2", .id_table = idtg2_id_table, .probe = idtg2_probe, .remove = idtg2_remove, }; static int __init idtg2_init(void) { return rio_register_driver(&idtg2_driver); } static void __exit idtg2_exit(void) { pr_debug("RIO: %s\n", __func__); rio_unregister_driver(&idtg2_driver); pr_debug("RIO: %s done\n", __func__); } device_initcall(idtg2_init); module_exit(idtg2_exit); MODULE_DESCRIPTION("IDT CPS Gen.2 Serial RapidIO switch family driver"); MODULE_AUTHOR("Integrated Device Technology, Inc."); MODULE_LICENSE("GPL");
linux-master
drivers/rapidio/switches/idt_gen2.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * IDT CPS RapidIO switches support * * Copyright 2009-2010 Integrated Device Technology, Inc. * Alexandre Bounine <[email protected]> */ #include <linux/rio.h> #include <linux/rio_drv.h> #include <linux/rio_ids.h> #include <linux/module.h> #include "../rio.h" #define CPS_DEFAULT_ROUTE 0xde #define CPS_NO_ROUTE 0xdf #define IDTCPS_RIO_DOMAIN 0xf20020 static int idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 route_port) { u32 result; if (route_port == RIO_INVALID_ROUTE) route_port = CPS_DEFAULT_ROUTE; if (table == RIO_GLOBAL_TABLE) { rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); rio_mport_read_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); result = (0xffffff00 & result) | (u32)route_port; rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_PORT_SEL_CSR, result); } return 0; } static int idtcps_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 *route_port) { u32 result; if (table == RIO_GLOBAL_TABLE) { rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); rio_mport_read_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); if (CPS_DEFAULT_ROUTE == (u8)result || CPS_NO_ROUTE == (u8)result) *route_port = RIO_INVALID_ROUTE; else *route_port = (u8)result; } return 0; } static int idtcps_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table) { u32 i; if (table == RIO_GLOBAL_TABLE) { for (i = 0x80000000; i <= 0x800000ff;) { rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, i); rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_PORT_SEL_CSR, (CPS_DEFAULT_ROUTE << 24) | (CPS_DEFAULT_ROUTE << 16) | (CPS_DEFAULT_ROUTE << 8) | CPS_DEFAULT_ROUTE); i += 4; } } return 0; } static int idtcps_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount, u8 sw_domain) { /* * Switch domain configuration operates only at global level */ rio_mport_write_config_32(mport, destid, hopcount, IDTCPS_RIO_DOMAIN, (u32)sw_domain); return 0; } static int idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount, u8 *sw_domain) { u32 regval; /* * Switch domain configuration operates only at global level */ rio_mport_read_config_32(mport, destid, hopcount, IDTCPS_RIO_DOMAIN, &regval); *sw_domain = (u8)(regval & 0xff); return 0; } static struct rio_switch_ops idtcps_switch_ops = { .owner = THIS_MODULE, .add_entry = idtcps_route_add_entry, .get_entry = idtcps_route_get_entry, .clr_table = idtcps_route_clr_table, .set_domain = idtcps_set_domain, .get_domain = idtcps_get_domain, .em_init = NULL, .em_handle = NULL, }; static int idtcps_probe(struct rio_dev *rdev, const struct rio_device_id *id) { pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); spin_lock(&rdev->rswitch->lock); if (rdev->rswitch->ops) { spin_unlock(&rdev->rswitch->lock); return -EINVAL; } rdev->rswitch->ops = &idtcps_switch_ops; if (rdev->do_enum) { /* set TVAL = ~50us */ rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); /* Ensure that default routing is disabled on startup */ rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT, CPS_NO_ROUTE); } spin_unlock(&rdev->rswitch->lock); return 0; } static void idtcps_remove(struct rio_dev *rdev) { pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); spin_lock(&rdev->rswitch->lock); if (rdev->rswitch->ops != &idtcps_switch_ops) { spin_unlock(&rdev->rswitch->lock); return; } rdev->rswitch->ops = NULL; spin_unlock(&rdev->rswitch->lock); } static const struct rio_device_id idtcps_id_table[] = { {RIO_DEVICE(RIO_DID_IDTCPS6Q, RIO_VID_IDT)}, {RIO_DEVICE(RIO_DID_IDTCPS8, RIO_VID_IDT)}, {RIO_DEVICE(RIO_DID_IDTCPS10Q, RIO_VID_IDT)}, {RIO_DEVICE(RIO_DID_IDTCPS12, RIO_VID_IDT)}, {RIO_DEVICE(RIO_DID_IDTCPS16, RIO_VID_IDT)}, {RIO_DEVICE(RIO_DID_IDT70K200, RIO_VID_IDT)}, { 0, } /* terminate list */ }; static struct rio_driver idtcps_driver = { .name = "idtcps", .id_table = idtcps_id_table, .probe = idtcps_probe, .remove = idtcps_remove, }; static int __init idtcps_init(void) { return rio_register_driver(&idtcps_driver); } static void __exit idtcps_exit(void) { rio_unregister_driver(&idtcps_driver); } device_initcall(idtcps_init); module_exit(idtcps_exit); MODULE_DESCRIPTION("IDT CPS Gen.1 Serial RapidIO switch family driver"); MODULE_AUTHOR("Integrated Device Technology, Inc."); MODULE_LICENSE("GPL");
linux-master
drivers/rapidio/switches/idtcps.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * IDT RXS Gen.3 Serial RapidIO switch family support * * Copyright 2016 Integrated Device Technology, Inc. */ #include <linux/stat.h> #include <linux/module.h> #include <linux/rio.h> #include <linux/rio_drv.h> #include <linux/rio_ids.h> #include <linux/delay.h> #include <asm/page.h> #include "../rio.h" #define RIO_EM_PW_STAT 0x40020 #define RIO_PW_CTL 0x40204 #define RIO_PW_CTL_PW_TMR 0xffffff00 #define RIO_PW_ROUTE 0x40208 #define RIO_EM_DEV_INT_EN 0x40030 #define RIO_PLM_SPx_IMP_SPEC_CTL(x) (0x10100 + (x)*0x100) #define RIO_PLM_SPx_IMP_SPEC_CTL_SOFT_RST 0x02000000 #define RIO_PLM_SPx_PW_EN(x) (0x10118 + (x)*0x100) #define RIO_PLM_SPx_PW_EN_OK2U 0x40000000 #define RIO_PLM_SPx_PW_EN_LINIT 0x10000000 #define RIO_BC_L2_Gn_ENTRYx_CSR(n, x) (0x31000 + (n)*0x400 + (x)*0x4) #define RIO_SPx_L2_Gn_ENTRYy_CSR(x, n, y) \ (0x51000 + (x)*0x2000 + (n)*0x400 + (y)*0x4) static int idtg3_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 route_port) { u32 rval; u32 entry = route_port; int err = 0; pr_debug("RIO: %s t=0x%x did_%x to p_%x\n", __func__, table, route_destid, entry); if (route_destid > 0xFF) return -EINVAL; if (route_port == RIO_INVALID_ROUTE) entry = RIO_RT_ENTRY_DROP_PKT; if (table == RIO_GLOBAL_TABLE) { /* Use broadcast register to update all per-port tables */ err = rio_mport_write_config_32(mport, destid, hopcount, RIO_BC_L2_Gn_ENTRYx_CSR(0, route_destid), entry); return err; } /* * Verify that specified port/table number is valid */ err = rio_mport_read_config_32(mport, destid, hopcount, RIO_SWP_INFO_CAR, &rval); if (err) return err; if (table >= RIO_GET_TOTAL_PORTS(rval)) return -EINVAL; err = rio_mport_write_config_32(mport, destid, hopcount, RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid), entry); return err; } static int idtg3_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 *route_port) { u32 rval; int err; if (route_destid > 0xFF) return -EINVAL; err = rio_mport_read_config_32(mport, destid, hopcount, RIO_SWP_INFO_CAR, &rval); if (err) return err; /* * This switch device does not have the dedicated global routing table. * It is substituted by reading routing table of the ingress port of * maintenance read requests. */ if (table == RIO_GLOBAL_TABLE) table = RIO_GET_PORT_NUM(rval); else if (table >= RIO_GET_TOTAL_PORTS(rval)) return -EINVAL; err = rio_mport_read_config_32(mport, destid, hopcount, RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid), &rval); if (err) return err; if (rval == RIO_RT_ENTRY_DROP_PKT) *route_port = RIO_INVALID_ROUTE; else *route_port = (u8)rval; return 0; } static int idtg3_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table) { u32 i; u32 rval; int err; if (table == RIO_GLOBAL_TABLE) { for (i = 0; i <= 0xff; i++) { err = rio_mport_write_config_32(mport, destid, hopcount, RIO_BC_L2_Gn_ENTRYx_CSR(0, i), RIO_RT_ENTRY_DROP_PKT); if (err) break; } return err; } err = rio_mport_read_config_32(mport, destid, hopcount, RIO_SWP_INFO_CAR, &rval); if (err) return err; if (table >= RIO_GET_TOTAL_PORTS(rval)) return -EINVAL; for (i = 0; i <= 0xff; i++) { err = rio_mport_write_config_32(mport, destid, hopcount, RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, i), RIO_RT_ENTRY_DROP_PKT); if (err) break; } return err; } /* * This routine performs device-specific initialization only. * All standard EM configuration should be performed at upper level. */ static int idtg3_em_init(struct rio_dev *rdev) { int i, tmp; u32 rval; pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); /* Disable assertion of interrupt signal */ rio_write_config_32(rdev, RIO_EM_DEV_INT_EN, 0); /* Disable port-write event notifications during initialization */ rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TX_CTRL, RIO_EM_PW_TX_CTRL_PW_DIS); /* Configure Port-Write notifications for hot-swap events */ tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo); for (i = 0; i < tmp; i++) { rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, i), &rval); if (rval & RIO_PORT_N_ERR_STS_PORT_UA) continue; /* Clear events signaled before enabling notification */ rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PN_ERR_DETECT(i), 0); /* Enable event notifications */ rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PN_ERRRATE_EN(i), RIO_EM_PN_ERRRATE_EN_OK2U | RIO_EM_PN_ERRRATE_EN_U2OK); /* Enable port-write generation on events */ rio_write_config_32(rdev, RIO_PLM_SPx_PW_EN(i), RIO_PLM_SPx_PW_EN_OK2U | RIO_PLM_SPx_PW_EN_LINIT); } /* Set Port-Write destination port */ tmp = RIO_GET_PORT_NUM(rdev->swpinfo); rio_write_config_32(rdev, RIO_PW_ROUTE, 1 << tmp); /* Enable sending port-write event notifications */ rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TX_CTRL, 0); /* set TVAL = ~50us */ rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); return 0; } /* * idtg3_em_handler - device-specific error handler * * If the link is down (PORT_UNINIT) does nothing - this is considered * as link partner removal from the port. * * If the link is up (PORT_OK) - situation is handled as *new* device insertion. * In this case ERR_STOP bits are cleared by issuing soft reset command to the * reporting port. Inbound and outbound ackIDs are cleared by the reset as well. * This way the port is synchronized with freshly inserted device (assuming it * was reset/powered-up on insertion). * * TODO: This is not sufficient in a situation when a link between two devices * was down and up again (e.g. cable disconnect). For that situation full ackID * realignment process has to be implemented. */ static int idtg3_em_handler(struct rio_dev *rdev, u8 pnum) { u32 err_status; u32 rval; rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum), &err_status); /* Do nothing for device/link removal */ if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) return 0; /* When link is OK we have a device insertion. * Request port soft reset to clear errors if they present. * Inbound and outbound ackIDs will be 0 after reset. */ if (err_status & (RIO_PORT_N_ERR_STS_OUT_ES | RIO_PORT_N_ERR_STS_INP_ES)) { rio_read_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), &rval); rio_write_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), rval | RIO_PLM_SPx_IMP_SPEC_CTL_SOFT_RST); udelay(10); rio_write_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), rval); msleep(500); } return 0; } static struct rio_switch_ops idtg3_switch_ops = { .owner = THIS_MODULE, .add_entry = idtg3_route_add_entry, .get_entry = idtg3_route_get_entry, .clr_table = idtg3_route_clr_table, .em_init = idtg3_em_init, .em_handle = idtg3_em_handler, }; static int idtg3_probe(struct rio_dev *rdev, const struct rio_device_id *id) { pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); spin_lock(&rdev->rswitch->lock); if (rdev->rswitch->ops) { spin_unlock(&rdev->rswitch->lock); return -EINVAL; } rdev->rswitch->ops = &idtg3_switch_ops; if (rdev->do_enum) { /* Disable hierarchical routing support: Existing fabric * enumeration/discovery process (see rio-scan.c) uses 8-bit * flat destination ID routing only. */ rio_write_config_32(rdev, 0x5000 + RIO_BC_RT_CTL_CSR, 0); } spin_unlock(&rdev->rswitch->lock); return 0; } static void idtg3_remove(struct rio_dev *rdev) { pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); spin_lock(&rdev->rswitch->lock); if (rdev->rswitch->ops == &idtg3_switch_ops) rdev->rswitch->ops = NULL; spin_unlock(&rdev->rswitch->lock); } /* * Gen3 switches repeat sending PW messages until a corresponding event flag * is cleared. Use shutdown notification to disable generation of port-write * messages if their destination node is shut down. */ static void idtg3_shutdown(struct rio_dev *rdev) { int i; u32 rval; u16 destid; /* Currently the enumerator node acts also as PW handler */ if (!rdev->do_enum) return; pr_debug("RIO: %s(%s)\n", __func__, rio_name(rdev)); rio_read_config_32(rdev, RIO_PW_ROUTE, &rval); i = RIO_GET_PORT_NUM(rdev->swpinfo); /* Check port-write destination port */ if (!((1 << i) & rval)) return; /* Disable sending port-write event notifications if PW destID * matches to one of the enumerator node */ rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TGT_DEVID, &rval); if (rval & RIO_EM_PW_TGT_DEVID_DEV16) destid = rval >> 16; else destid = ((rval & RIO_EM_PW_TGT_DEVID_D8) >> 16); if (rdev->net->hport->host_deviceid == destid) { rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TX_CTRL, 0); pr_debug("RIO: %s(%s) PW transmission disabled\n", __func__, rio_name(rdev)); } } static const struct rio_device_id idtg3_id_table[] = { {RIO_DEVICE(RIO_DID_IDTRXS1632, RIO_VID_IDT)}, {RIO_DEVICE(RIO_DID_IDTRXS2448, RIO_VID_IDT)}, { 0, } /* terminate list */ }; static struct rio_driver idtg3_driver = { .name = "idt_gen3", .id_table = idtg3_id_table, .probe = idtg3_probe, .remove = idtg3_remove, .shutdown = idtg3_shutdown, }; static int __init idtg3_init(void) { return rio_register_driver(&idtg3_driver); } static void __exit idtg3_exit(void) { pr_debug("RIO: %s\n", __func__); rio_unregister_driver(&idtg3_driver); pr_debug("RIO: %s done\n", __func__); } device_initcall(idtg3_init); module_exit(idtg3_exit); MODULE_DESCRIPTION("IDT RXS Gen.3 Serial RapidIO switch family driver"); MODULE_AUTHOR("Integrated Device Technology, Inc."); MODULE_LICENSE("GPL");
linux-master
drivers/rapidio/switches/idt_gen3.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * RapidIO mport driver for Tsi721 PCIExpress-to-SRIO bridge * * Copyright 2011 Integrated Device Technology, Inc. * Alexandre Bounine <[email protected]> * Chul Kim <[email protected]> */ #include <linux/io.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/rio.h> #include <linux/rio_drv.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/kfifo.h> #include <linux/delay.h> #include "tsi721.h" #ifdef DEBUG u32 tsi_dbg_level; module_param_named(dbg_level, tsi_dbg_level, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); #endif static int pcie_mrrs = -1; module_param(pcie_mrrs, int, S_IRUGO); MODULE_PARM_DESC(pcie_mrrs, "PCIe MRRS override value (0...5)"); static u8 mbox_sel = 0x0f; module_param(mbox_sel, byte, S_IRUGO); MODULE_PARM_DESC(mbox_sel, "RIO Messaging MBOX Selection Mask (default: 0x0f = all)"); static DEFINE_SPINLOCK(tsi721_maint_lock); static void tsi721_omsg_handler(struct tsi721_device *priv, int ch); static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); /** * tsi721_lcread - read from local SREP config space * @mport: RapidIO master port info * @index: ID of RapdiIO interface * @offset: Offset into configuration space * @len: Length (in bytes) of the maintenance transaction * @data: Value to be read into * * Generates a local SREP space read. Returns %0 on * success or %-EINVAL on failure. */ static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset, int len, u32 *data) { struct tsi721_device *priv = mport->priv; if (len != sizeof(u32)) return -EINVAL; /* only 32-bit access is supported */ *data = ioread32(priv->regs + offset); return 0; } /** * tsi721_lcwrite - write into local SREP config space * @mport: RapidIO master port info * @index: ID of RapdiIO interface * @offset: Offset into configuration space * @len: Length (in bytes) of the maintenance transaction * @data: Value to be written * * Generates a local write into SREP configuration space. Returns %0 on * success or %-EINVAL on failure. */ static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset, int len, u32 data) { struct tsi721_device *priv = mport->priv; if (len != sizeof(u32)) return -EINVAL; /* only 32-bit access is supported */ iowrite32(data, priv->regs + offset); return 0; } /** * tsi721_maint_dma - Helper function to generate RapidIO maintenance * transactions using designated Tsi721 DMA channel. * @priv: pointer to tsi721 private data * @sys_size: RapdiIO transport system size * @destid: Destination ID of transaction * @hopcount: Number of hops to target device * @offset: Offset into configuration space * @len: Length (in bytes) of the maintenance transaction * @data: Location to be read from or write into * @do_wr: Operation flag (1 == MAINT_WR) * * Generates a RapidIO maintenance transaction (Read or Write). * Returns %0 on success and %-EINVAL or %-EFAULT on failure. */ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, u16 destid, u8 hopcount, u32 offset, int len, u32 *data, int do_wr) { void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id); struct tsi721_dma_desc *bd_ptr; u32 rd_count, swr_ptr, ch_stat; unsigned long flags; int i, err = 0; u32 op = do_wr ? MAINT_WR : MAINT_RD; if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) return -EINVAL; spin_lock_irqsave(&tsi721_maint_lock, flags); bd_ptr = priv->mdma.bd_base; rd_count = ioread32(regs + TSI721_DMAC_DRDCNT); /* Initialize DMA descriptor */ bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid); bd_ptr[0].bcount = cpu_to_le32((sys_size << 26) | 0x04); bd_ptr[0].raddr_lo = cpu_to_le32((hopcount << 24) | offset); bd_ptr[0].raddr_hi = 0; if (do_wr) bd_ptr[0].data[0] = cpu_to_be32p(data); else bd_ptr[0].data[0] = 0xffffffff; mb(); /* Start DMA operation */ iowrite32(rd_count + 2, regs + TSI721_DMAC_DWRCNT); ioread32(regs + TSI721_DMAC_DWRCNT); i = 0; /* Wait until DMA transfer is finished */ while ((ch_stat = ioread32(regs + TSI721_DMAC_STS)) & TSI721_DMAC_STS_RUN) { udelay(1); if (++i >= 5000000) { tsi_debug(MAINT, &priv->pdev->dev, "DMA[%d] read timeout ch_status=%x", priv->mdma.ch_id, ch_stat); if (!do_wr) *data = 0xffffffff; err = -EIO; goto err_out; } } if (ch_stat & TSI721_DMAC_STS_ABORT) { /* If DMA operation aborted due to error, * reinitialize DMA channel */ tsi_debug(MAINT, &priv->pdev->dev, "DMA ABORT ch_stat=%x", ch_stat); tsi_debug(MAINT, &priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x", do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset); iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT); iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); udelay(10); iowrite32(0, regs + TSI721_DMAC_DWRCNT); udelay(1); if (!do_wr) *data = 0xffffffff; err = -EIO; goto err_out; } if (!do_wr) *data = be32_to_cpu(bd_ptr[0].data[0]); /* * Update descriptor status FIFO RD pointer. * NOTE: Skipping check and clear FIFO entries because we are waiting * for transfer to be completed. */ swr_ptr = ioread32(regs + TSI721_DMAC_DSWP); iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP); err_out: spin_unlock_irqrestore(&tsi721_maint_lock, flags); return err; } /** * tsi721_cread_dma - Generate a RapidIO maintenance read transaction * using Tsi721 BDMA engine. * @mport: RapidIO master port control structure * @index: ID of RapdiIO interface * @destid: Destination ID of transaction * @hopcount: Number of hops to target device * @offset: Offset into configuration space * @len: Length (in bytes) of the maintenance transaction * @val: Location to be read into * * Generates a RapidIO maintenance read transaction. * Returns %0 on success and %-EINVAL or %-EFAULT on failure. */ static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid, u8 hopcount, u32 offset, int len, u32 *data) { struct tsi721_device *priv = mport->priv; return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount, offset, len, data, 0); } /** * tsi721_cwrite_dma - Generate a RapidIO maintenance write transaction * using Tsi721 BDMA engine * @mport: RapidIO master port control structure * @index: ID of RapdiIO interface * @destid: Destination ID of transaction * @hopcount: Number of hops to target device * @offset: Offset into configuration space * @len: Length (in bytes) of the maintenance transaction * @val: Value to be written * * Generates a RapidIO maintenance write transaction. * Returns %0 on success and %-EINVAL or %-EFAULT on failure. */ static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid, u8 hopcount, u32 offset, int len, u32 data) { struct tsi721_device *priv = mport->priv; u32 temp = data; return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount, offset, len, &temp, 1); } /** * tsi721_pw_handler - Tsi721 inbound port-write interrupt handler * @priv: tsi721 device private structure * * Handles inbound port-write interrupts. Copies PW message from an internal * buffer into PW message FIFO and schedules deferred routine to process * queued messages. */ static int tsi721_pw_handler(struct tsi721_device *priv) { u32 pw_stat; u32 pw_buf[TSI721_RIO_PW_MSG_SIZE/sizeof(u32)]; pw_stat = ioread32(priv->regs + TSI721_RIO_PW_RX_STAT); if (pw_stat & TSI721_RIO_PW_RX_STAT_PW_VAL) { pw_buf[0] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(0)); pw_buf[1] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(1)); pw_buf[2] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(2)); pw_buf[3] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(3)); /* Queue PW message (if there is room in FIFO), * otherwise discard it. */ spin_lock(&priv->pw_fifo_lock); if (kfifo_avail(&priv->pw_fifo) >= TSI721_RIO_PW_MSG_SIZE) kfifo_in(&priv->pw_fifo, pw_buf, TSI721_RIO_PW_MSG_SIZE); else priv->pw_discard_count++; spin_unlock(&priv->pw_fifo_lock); } /* Clear pending PW interrupts */ iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL, priv->regs + TSI721_RIO_PW_RX_STAT); schedule_work(&priv->pw_work); return 0; } static void tsi721_pw_dpc(struct work_struct *work) { struct tsi721_device *priv = container_of(work, struct tsi721_device, pw_work); union rio_pw_msg pwmsg; /* * Process port-write messages */ while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)&pwmsg, TSI721_RIO_PW_MSG_SIZE, &priv->pw_fifo_lock)) { /* Pass the port-write message to RIO core for processing */ rio_inb_pwrite_handler(&priv->mport, &pwmsg); } } /** * tsi721_pw_enable - enable/disable port-write interface init * @mport: Master port implementing the port write unit * @enable: 1=enable; 0=disable port-write message handling */ static int tsi721_pw_enable(struct rio_mport *mport, int enable) { struct tsi721_device *priv = mport->priv; u32 rval; rval = ioread32(priv->regs + TSI721_RIO_EM_INT_ENABLE); if (enable) rval |= TSI721_RIO_EM_INT_ENABLE_PW_RX; else rval &= ~TSI721_RIO_EM_INT_ENABLE_PW_RX; /* Clear pending PW interrupts */ iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL, priv->regs + TSI721_RIO_PW_RX_STAT); /* Update enable bits */ iowrite32(rval, priv->regs + TSI721_RIO_EM_INT_ENABLE); return 0; } /** * tsi721_dsend - Send a RapidIO doorbell * @mport: RapidIO master port info * @index: ID of RapidIO interface * @destid: Destination ID of target device * @data: 16-bit info field of RapidIO doorbell * * Sends a RapidIO doorbell message. Always returns %0. */ static int tsi721_dsend(struct rio_mport *mport, int index, u16 destid, u16 data) { struct tsi721_device *priv = mport->priv; u32 offset; offset = (((mport->sys_size) ? RIO_TT_CODE_16 : RIO_TT_CODE_8) << 18) | (destid << 2); tsi_debug(DBELL, &priv->pdev->dev, "Send Doorbell 0x%04x to destID 0x%x", data, destid); iowrite16be(data, priv->odb_base + offset); return 0; } /** * tsi721_dbell_handler - Tsi721 doorbell interrupt handler * @priv: tsi721 device-specific data structure * * Handles inbound doorbell interrupts. Copies doorbell entry from an internal * buffer into DB message FIFO and schedules deferred routine to process * queued DBs. */ static int tsi721_dbell_handler(struct tsi721_device *priv) { u32 regval; /* Disable IDB interrupts */ regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); regval &= ~TSI721_SR_CHINT_IDBQRCV; iowrite32(regval, priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); schedule_work(&priv->idb_work); return 0; } static void tsi721_db_dpc(struct work_struct *work) { struct tsi721_device *priv = container_of(work, struct tsi721_device, idb_work); struct rio_mport *mport; struct rio_dbell *dbell; int found = 0; u32 wr_ptr, rd_ptr; u64 *idb_entry; u32 regval; union { u64 msg; u8 bytes[8]; } idb; /* * Process queued inbound doorbells */ mport = &priv->mport; wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE; while (wr_ptr != rd_ptr) { idb_entry = (u64 *)(priv->idb_base + (TSI721_IDB_ENTRY_SIZE * rd_ptr)); rd_ptr++; rd_ptr %= IDB_QSIZE; idb.msg = *idb_entry; *idb_entry = 0; /* Process one doorbell */ list_for_each_entry(dbell, &mport->dbells, node) { if ((dbell->res->start <= DBELL_INF(idb.bytes)) && (dbell->res->end >= DBELL_INF(idb.bytes))) { found = 1; break; } } if (found) { dbell->dinb(mport, dbell->dev_id, DBELL_SID(idb.bytes), DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); } else { tsi_debug(DBELL, &priv->pdev->dev, "spurious IDB sid %2.2x tid %2.2x info %4.4x", DBELL_SID(idb.bytes), DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); } wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; } iowrite32(rd_ptr & (IDB_QSIZE - 1), priv->regs + TSI721_IDQ_RP(IDB_QUEUE)); /* Re-enable IDB interrupts */ regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); regval |= TSI721_SR_CHINT_IDBQRCV; iowrite32(regval, priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; if (wr_ptr != rd_ptr) schedule_work(&priv->idb_work); } /** * tsi721_irqhandler - Tsi721 interrupt handler * @irq: Linux interrupt number * @ptr: Pointer to interrupt-specific data (tsi721_device structure) * * Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported * interrupt events and calls an event-specific handler(s). */ static irqreturn_t tsi721_irqhandler(int irq, void *ptr) { struct tsi721_device *priv = (struct tsi721_device *)ptr; u32 dev_int; u32 dev_ch_int; u32 intval; u32 ch_inte; /* For MSI mode disable all device-level interrupts */ if (priv->flags & TSI721_USING_MSI) iowrite32(0, priv->regs + TSI721_DEV_INTE); dev_int = ioread32(priv->regs + TSI721_DEV_INT); if (!dev_int) return IRQ_NONE; dev_ch_int = ioread32(priv->regs + TSI721_DEV_CHAN_INT); if (dev_int & TSI721_DEV_INT_SR2PC_CH) { /* Service SR2PC Channel interrupts */ if (dev_ch_int & TSI721_INT_SR2PC_CHAN(IDB_QUEUE)) { /* Service Inbound Doorbell interrupt */ intval = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); if (intval & TSI721_SR_CHINT_IDBQRCV) tsi721_dbell_handler(priv); else tsi_info(&priv->pdev->dev, "Unsupported SR_CH_INT %x", intval); /* Clear interrupts */ iowrite32(intval, priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); } } if (dev_int & TSI721_DEV_INT_SMSG_CH) { int ch; /* * Service channel interrupts from Messaging Engine */ if (dev_ch_int & TSI721_INT_IMSG_CHAN_M) { /* Inbound Msg */ /* Disable signaled OB MSG Channel interrupts */ ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); ch_inte &= ~(dev_ch_int & TSI721_INT_IMSG_CHAN_M); iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); /* * Process Inbound Message interrupt for each MBOX */ for (ch = 4; ch < RIO_MAX_MBOX + 4; ch++) { if (!(dev_ch_int & TSI721_INT_IMSG_CHAN(ch))) continue; tsi721_imsg_handler(priv, ch); } } if (dev_ch_int & TSI721_INT_OMSG_CHAN_M) { /* Outbound Msg */ /* Disable signaled OB MSG Channel interrupts */ ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); ch_inte &= ~(dev_ch_int & TSI721_INT_OMSG_CHAN_M); iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); /* * Process Outbound Message interrupts for each MBOX */ for (ch = 0; ch < RIO_MAX_MBOX; ch++) { if (!(dev_ch_int & TSI721_INT_OMSG_CHAN(ch))) continue; tsi721_omsg_handler(priv, ch); } } } if (dev_int & TSI721_DEV_INT_SRIO) { /* Service SRIO MAC interrupts */ intval = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT); if (intval & TSI721_RIO_EM_INT_STAT_PW_RX) tsi721_pw_handler(priv); } #ifdef CONFIG_RAPIDIO_DMA_ENGINE if (dev_int & TSI721_DEV_INT_BDMA_CH) { int ch; if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) { tsi_debug(DMA, &priv->pdev->dev, "IRQ from DMA channel 0x%08x", dev_ch_int); for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) { if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch))) continue; tsi721_bdma_handler(&priv->bdma[ch]); } } } #endif /* For MSI mode re-enable device-level interrupts */ if (priv->flags & TSI721_USING_MSI) { dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH; iowrite32(dev_int, priv->regs + TSI721_DEV_INTE); } return IRQ_HANDLED; } static void tsi721_interrupts_init(struct tsi721_device *priv) { u32 intr; /* Enable IDB interrupts */ iowrite32(TSI721_SR_CHINT_ALL, priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); iowrite32(TSI721_SR_CHINT_IDBQRCV, priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); /* Enable SRIO MAC interrupts */ iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT, priv->regs + TSI721_RIO_EM_DEV_INT_EN); /* Enable interrupts from channels in use */ #ifdef CONFIG_RAPIDIO_DMA_ENGINE intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) | (TSI721_INT_BDMA_CHAN_M & ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT)); #else intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE); #endif iowrite32(intr, priv->regs + TSI721_DEV_CHAN_INTE); if (priv->flags & TSI721_USING_MSIX) intr = TSI721_DEV_INT_SRIO; else intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH; iowrite32(intr, priv->regs + TSI721_DEV_INTE); ioread32(priv->regs + TSI721_DEV_INTE); } #ifdef CONFIG_PCI_MSI /** * tsi721_omsg_msix - MSI-X interrupt handler for outbound messaging * @irq: Linux interrupt number * @ptr: Pointer to interrupt-specific data (tsi721_device structure) * * Handles outbound messaging interrupts signaled using MSI-X. */ static irqreturn_t tsi721_omsg_msix(int irq, void *ptr) { struct tsi721_device *priv = (struct tsi721_device *)ptr; int mbox; mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX; tsi721_omsg_handler(priv, mbox); return IRQ_HANDLED; } /** * tsi721_imsg_msix - MSI-X interrupt handler for inbound messaging * @irq: Linux interrupt number * @ptr: Pointer to interrupt-specific data (tsi721_device structure) * * Handles inbound messaging interrupts signaled using MSI-X. */ static irqreturn_t tsi721_imsg_msix(int irq, void *ptr) { struct tsi721_device *priv = (struct tsi721_device *)ptr; int mbox; mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX; tsi721_imsg_handler(priv, mbox + 4); return IRQ_HANDLED; } /** * tsi721_srio_msix - Tsi721 MSI-X SRIO MAC interrupt handler * @irq: Linux interrupt number * @ptr: Pointer to interrupt-specific data (tsi721_device structure) * * Handles Tsi721 interrupts from SRIO MAC. */ static irqreturn_t tsi721_srio_msix(int irq, void *ptr) { struct tsi721_device *priv = (struct tsi721_device *)ptr; u32 srio_int; /* Service SRIO MAC interrupts */ srio_int = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT); if (srio_int & TSI721_RIO_EM_INT_STAT_PW_RX) tsi721_pw_handler(priv); return IRQ_HANDLED; } /** * tsi721_sr2pc_ch_msix - Tsi721 MSI-X SR2PC Channel interrupt handler * @irq: Linux interrupt number * @ptr: Pointer to interrupt-specific data (tsi721_device structure) * * Handles Tsi721 interrupts from SR2PC Channel. * NOTE: At this moment services only one SR2PC channel associated with inbound * doorbells. */ static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr) { struct tsi721_device *priv = (struct tsi721_device *)ptr; u32 sr_ch_int; /* Service Inbound DB interrupt from SR2PC channel */ sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); if (sr_ch_int & TSI721_SR_CHINT_IDBQRCV) tsi721_dbell_handler(priv); /* Clear interrupts */ iowrite32(sr_ch_int, priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); /* Read back to ensure that interrupt was cleared */ sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); return IRQ_HANDLED; } /** * tsi721_request_msix - register interrupt service for MSI-X mode. * @priv: tsi721 device-specific data structure * * Registers MSI-X interrupt service routines for interrupts that are active * immediately after mport initialization. Messaging interrupt service routines * should be registered during corresponding open requests. */ static int tsi721_request_msix(struct tsi721_device *priv) { int err = 0; err = request_irq(priv->msix[TSI721_VECT_IDB].vector, tsi721_sr2pc_ch_msix, 0, priv->msix[TSI721_VECT_IDB].irq_name, (void *)priv); if (err) return err; err = request_irq(priv->msix[TSI721_VECT_PWRX].vector, tsi721_srio_msix, 0, priv->msix[TSI721_VECT_PWRX].irq_name, (void *)priv); if (err) { free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv); return err; } return 0; } /** * tsi721_enable_msix - Attempts to enable MSI-X support for Tsi721. * @priv: pointer to tsi721 private data * * Configures MSI-X support for Tsi721. Supports only an exact number * of requested vectors. */ static int tsi721_enable_msix(struct tsi721_device *priv) { struct msix_entry entries[TSI721_VECT_MAX]; int err; int i; entries[TSI721_VECT_IDB].entry = TSI721_MSIX_SR2PC_IDBQ_RCV(IDB_QUEUE); entries[TSI721_VECT_PWRX].entry = TSI721_MSIX_SRIO_MAC_INT; /* * Initialize MSI-X entries for Messaging Engine: * this driver supports four RIO mailboxes (inbound and outbound) * NOTE: Inbound message MBOX 0...4 use IB channels 4...7. Therefore * offset +4 is added to IB MBOX number. */ for (i = 0; i < RIO_MAX_MBOX; i++) { entries[TSI721_VECT_IMB0_RCV + i].entry = TSI721_MSIX_IMSG_DQ_RCV(i + 4); entries[TSI721_VECT_IMB0_INT + i].entry = TSI721_MSIX_IMSG_INT(i + 4); entries[TSI721_VECT_OMB0_DONE + i].entry = TSI721_MSIX_OMSG_DONE(i); entries[TSI721_VECT_OMB0_INT + i].entry = TSI721_MSIX_OMSG_INT(i); } #ifdef CONFIG_RAPIDIO_DMA_ENGINE /* * Initialize MSI-X entries for Block DMA Engine: * this driver supports XXX DMA channels * (one is reserved for SRIO maintenance transactions) */ for (i = 0; i < TSI721_DMA_CHNUM; i++) { entries[TSI721_VECT_DMA0_DONE + i].entry = TSI721_MSIX_DMACH_DONE(i); entries[TSI721_VECT_DMA0_INT + i].entry = TSI721_MSIX_DMACH_INT(i); } #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ err = pci_enable_msix_exact(priv->pdev, entries, ARRAY_SIZE(entries)); if (err) { tsi_err(&priv->pdev->dev, "Failed to enable MSI-X (err=%d)", err); return err; } /* * Copy MSI-X vector information into tsi721 private structure */ priv->msix[TSI721_VECT_IDB].vector = entries[TSI721_VECT_IDB].vector; snprintf(priv->msix[TSI721_VECT_IDB].irq_name, IRQ_DEVICE_NAME_MAX, DRV_NAME "-idb@pci:%s", pci_name(priv->pdev)); priv->msix[TSI721_VECT_PWRX].vector = entries[TSI721_VECT_PWRX].vector; snprintf(priv->msix[TSI721_VECT_PWRX].irq_name, IRQ_DEVICE_NAME_MAX, DRV_NAME "-pwrx@pci:%s", pci_name(priv->pdev)); for (i = 0; i < RIO_MAX_MBOX; i++) { priv->msix[TSI721_VECT_IMB0_RCV + i].vector = entries[TSI721_VECT_IMB0_RCV + i].vector; snprintf(priv->msix[TSI721_VECT_IMB0_RCV + i].irq_name, IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbr%d@pci:%s", i, pci_name(priv->pdev)); priv->msix[TSI721_VECT_IMB0_INT + i].vector = entries[TSI721_VECT_IMB0_INT + i].vector; snprintf(priv->msix[TSI721_VECT_IMB0_INT + i].irq_name, IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbi%d@pci:%s", i, pci_name(priv->pdev)); priv->msix[TSI721_VECT_OMB0_DONE + i].vector = entries[TSI721_VECT_OMB0_DONE + i].vector; snprintf(priv->msix[TSI721_VECT_OMB0_DONE + i].irq_name, IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombd%d@pci:%s", i, pci_name(priv->pdev)); priv->msix[TSI721_VECT_OMB0_INT + i].vector = entries[TSI721_VECT_OMB0_INT + i].vector; snprintf(priv->msix[TSI721_VECT_OMB0_INT + i].irq_name, IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombi%d@pci:%s", i, pci_name(priv->pdev)); } #ifdef CONFIG_RAPIDIO_DMA_ENGINE for (i = 0; i < TSI721_DMA_CHNUM; i++) { priv->msix[TSI721_VECT_DMA0_DONE + i].vector = entries[TSI721_VECT_DMA0_DONE + i].vector; snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name, IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s", i, pci_name(priv->pdev)); priv->msix[TSI721_VECT_DMA0_INT + i].vector = entries[TSI721_VECT_DMA0_INT + i].vector; snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name, IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s", i, pci_name(priv->pdev)); } #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ return 0; } #endif /* CONFIG_PCI_MSI */ static int tsi721_request_irq(struct tsi721_device *priv) { int err; #ifdef CONFIG_PCI_MSI if (priv->flags & TSI721_USING_MSIX) err = tsi721_request_msix(priv); else #endif err = request_irq(priv->pdev->irq, tsi721_irqhandler, (priv->flags & TSI721_USING_MSI) ? 0 : IRQF_SHARED, DRV_NAME, (void *)priv); if (err) tsi_err(&priv->pdev->dev, "Unable to allocate interrupt, err=%d", err); return err; } static void tsi721_free_irq(struct tsi721_device *priv) { #ifdef CONFIG_PCI_MSI if (priv->flags & TSI721_USING_MSIX) { free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv); free_irq(priv->msix[TSI721_VECT_PWRX].vector, (void *)priv); } else #endif free_irq(priv->pdev->irq, (void *)priv); } static int tsi721_obw_alloc(struct tsi721_device *priv, struct tsi721_obw_bar *pbar, u32 size, int *win_id) { u64 win_base; u64 bar_base; u64 bar_end; u32 align; struct tsi721_ob_win *win; struct tsi721_ob_win *new_win = NULL; int new_win_idx = -1; int i = 0; bar_base = pbar->base; bar_end = bar_base + pbar->size; win_base = bar_base; align = size/TSI721_PC2SR_ZONES; while (i < TSI721_IBWIN_NUM) { for (i = 0; i < TSI721_IBWIN_NUM; i++) { if (!priv->ob_win[i].active) { if (new_win == NULL) { new_win = &priv->ob_win[i]; new_win_idx = i; } continue; } /* * If this window belongs to the current BAR check it * for overlap */ win = &priv->ob_win[i]; if (win->base >= bar_base && win->base < bar_end) { if (win_base < (win->base + win->size) && (win_base + size) > win->base) { /* Overlap detected */ win_base = win->base + win->size; win_base = ALIGN(win_base, align); break; } } } } if (win_base + size > bar_end) return -ENOMEM; if (!new_win) { tsi_err(&priv->pdev->dev, "OBW count tracking failed"); return -EIO; } new_win->active = true; new_win->base = win_base; new_win->size = size; new_win->pbar = pbar; priv->obwin_cnt--; pbar->free -= size; *win_id = new_win_idx; return 0; } static int tsi721_map_outb_win(struct rio_mport *mport, u16 destid, u64 rstart, u32 size, u32 flags, dma_addr_t *laddr) { struct tsi721_device *priv = mport->priv; int i; struct tsi721_obw_bar *pbar; struct tsi721_ob_win *ob_win; int obw = -1; u32 rval; u64 rio_addr; u32 zsize; int ret = -ENOMEM; tsi_debug(OBW, &priv->pdev->dev, "did=%d ra=0x%llx sz=0x%x", destid, rstart, size); if (!is_power_of_2(size) || (size < 0x8000) || (rstart & (size - 1))) return -EINVAL; if (priv->obwin_cnt == 0) return -EBUSY; for (i = 0; i < 2; i++) { if (priv->p2r_bar[i].free >= size) { pbar = &priv->p2r_bar[i]; ret = tsi721_obw_alloc(priv, pbar, size, &obw); if (!ret) break; } } if (ret) return ret; WARN_ON(obw == -1); ob_win = &priv->ob_win[obw]; ob_win->destid = destid; ob_win->rstart = rstart; tsi_debug(OBW, &priv->pdev->dev, "allocated OBW%d @%llx", obw, ob_win->base); /* * Configure Outbound Window */ zsize = size/TSI721_PC2SR_ZONES; rio_addr = rstart; /* * Program Address Translation Zones: * This implementation uses all 8 zones associated wit window. */ for (i = 0; i < TSI721_PC2SR_ZONES; i++) { while (ioread32(priv->regs + TSI721_ZONE_SEL) & TSI721_ZONE_SEL_GO) { udelay(1); } rval = (u32)(rio_addr & TSI721_LUT_DATA0_ADD) | TSI721_LUT_DATA0_NREAD | TSI721_LUT_DATA0_NWR; iowrite32(rval, priv->regs + TSI721_LUT_DATA0); rval = (u32)(rio_addr >> 32); iowrite32(rval, priv->regs + TSI721_LUT_DATA1); rval = destid; iowrite32(rval, priv->regs + TSI721_LUT_DATA2); rval = TSI721_ZONE_SEL_GO | (obw << 3) | i; iowrite32(rval, priv->regs + TSI721_ZONE_SEL); rio_addr += zsize; } iowrite32(TSI721_OBWIN_SIZE(size) << 8, priv->regs + TSI721_OBWINSZ(obw)); iowrite32((u32)(ob_win->base >> 32), priv->regs + TSI721_OBWINUB(obw)); iowrite32((u32)(ob_win->base & TSI721_OBWINLB_BA) | TSI721_OBWINLB_WEN, priv->regs + TSI721_OBWINLB(obw)); *laddr = ob_win->base; return 0; } static void tsi721_unmap_outb_win(struct rio_mport *mport, u16 destid, u64 rstart) { struct tsi721_device *priv = mport->priv; struct tsi721_ob_win *ob_win; int i; tsi_debug(OBW, &priv->pdev->dev, "did=%d ra=0x%llx", destid, rstart); for (i = 0; i < TSI721_OBWIN_NUM; i++) { ob_win = &priv->ob_win[i]; if (ob_win->active && ob_win->destid == destid && ob_win->rstart == rstart) { tsi_debug(OBW, &priv->pdev->dev, "free OBW%d @%llx", i, ob_win->base); ob_win->active = false; iowrite32(0, priv->regs + TSI721_OBWINLB(i)); ob_win->pbar->free += ob_win->size; priv->obwin_cnt++; break; } } } /** * tsi721_init_pc2sr_mapping - initializes outbound (PCIe->SRIO) * translation regions. * @priv: pointer to tsi721 private data * * Disables SREP translation regions. */ static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv) { int i, z; u32 rval; /* Disable all PC2SR translation windows */ for (i = 0; i < TSI721_OBWIN_NUM; i++) iowrite32(0, priv->regs + TSI721_OBWINLB(i)); /* Initialize zone lookup tables to avoid ECC errors on reads */ iowrite32(0, priv->regs + TSI721_LUT_DATA0); iowrite32(0, priv->regs + TSI721_LUT_DATA1); iowrite32(0, priv->regs + TSI721_LUT_DATA2); for (i = 0; i < TSI721_OBWIN_NUM; i++) { for (z = 0; z < TSI721_PC2SR_ZONES; z++) { while (ioread32(priv->regs + TSI721_ZONE_SEL) & TSI721_ZONE_SEL_GO) { udelay(1); } rval = TSI721_ZONE_SEL_GO | (i << 3) | z; iowrite32(rval, priv->regs + TSI721_ZONE_SEL); } } if (priv->p2r_bar[0].size == 0 && priv->p2r_bar[1].size == 0) { priv->obwin_cnt = 0; return; } priv->p2r_bar[0].free = priv->p2r_bar[0].size; priv->p2r_bar[1].free = priv->p2r_bar[1].size; for (i = 0; i < TSI721_OBWIN_NUM; i++) priv->ob_win[i].active = false; priv->obwin_cnt = TSI721_OBWIN_NUM; } /** * tsi721_rio_map_inb_mem -- Mapping inbound memory region. * @mport: RapidIO master port * @lstart: Local memory space start address. * @rstart: RapidIO space start address. * @size: The mapping region size. * @flags: Flags for mapping. 0 for using default flags. * * Return: 0 -- Success. * * This function will create the inbound mapping * from rstart to lstart. */ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, u64 rstart, u64 size, u32 flags) { struct tsi721_device *priv = mport->priv; int i, avail = -1; u32 regval; struct tsi721_ib_win *ib_win; bool direct = (lstart == rstart); u64 ibw_size; dma_addr_t loc_start; u64 ibw_start; struct tsi721_ib_win_mapping *map = NULL; int ret = -EBUSY; /* Max IBW size supported by HW is 16GB */ if (size > 0x400000000UL) return -EINVAL; if (direct) { /* Calculate minimal acceptable window size and base address */ ibw_size = roundup_pow_of_two(size); ibw_start = lstart & ~(ibw_size - 1); tsi_debug(IBW, &priv->pdev->dev, "Direct (RIO_0x%llx -> PCIe_%pad), size=0x%llx, ibw_start = 0x%llx", rstart, &lstart, size, ibw_start); while ((lstart + size) > (ibw_start + ibw_size)) { ibw_size *= 2; ibw_start = lstart & ~(ibw_size - 1); /* Check for crossing IBW max size 16GB */ if (ibw_size > 0x400000000UL) return -EBUSY; } loc_start = ibw_start; map = kzalloc(sizeof(struct tsi721_ib_win_mapping), GFP_ATOMIC); if (map == NULL) return -ENOMEM; } else { tsi_debug(IBW, &priv->pdev->dev, "Translated (RIO_0x%llx -> PCIe_%pad), size=0x%llx", rstart, &lstart, size); if (!is_power_of_2(size) || size < 0x1000 || ((u64)lstart & (size - 1)) || (rstart & (size - 1))) return -EINVAL; if (priv->ibwin_cnt == 0) return -EBUSY; ibw_start = rstart; ibw_size = size; loc_start = lstart; } /* * Scan for overlapping with active regions and mark the first available * IB window at the same time. */ for (i = 0; i < TSI721_IBWIN_NUM; i++) { ib_win = &priv->ib_win[i]; if (!ib_win->active) { if (avail == -1) { avail = i; ret = 0; } } else if (ibw_start < (ib_win->rstart + ib_win->size) && (ibw_start + ibw_size) > ib_win->rstart) { /* Return error if address translation involved */ if (!direct || ib_win->xlat) { ret = -EFAULT; break; } /* * Direct mappings usually are larger than originally * requested fragments - check if this new request fits * into it. */ if (rstart >= ib_win->rstart && (rstart + size) <= (ib_win->rstart + ib_win->size)) { /* We are in - no further mapping required */ map->lstart = lstart; list_add_tail(&map->node, &ib_win->mappings); return 0; } ret = -EFAULT; break; } } if (ret) goto out; i = avail; /* Sanity check: available IB window must be disabled at this point */ regval = ioread32(priv->regs + TSI721_IBWIN_LB(i)); if (WARN_ON(regval & TSI721_IBWIN_LB_WEN)) { ret = -EIO; goto out; } ib_win = &priv->ib_win[i]; ib_win->active = true; ib_win->rstart = ibw_start; ib_win->lstart = loc_start; ib_win->size = ibw_size; ib_win->xlat = (lstart != rstart); INIT_LIST_HEAD(&ib_win->mappings); /* * When using direct IBW mapping and have larger than requested IBW size * we can have multiple local memory blocks mapped through the same IBW * To handle this situation we maintain list of "clients" for such IBWs. */ if (direct) { map->lstart = lstart; list_add_tail(&map->node, &ib_win->mappings); } iowrite32(TSI721_IBWIN_SIZE(ibw_size) << 8, priv->regs + TSI721_IBWIN_SZ(i)); iowrite32(((u64)loc_start >> 32), priv->regs + TSI721_IBWIN_TUA(i)); iowrite32(((u64)loc_start & TSI721_IBWIN_TLA_ADD), priv->regs + TSI721_IBWIN_TLA(i)); iowrite32(ibw_start >> 32, priv->regs + TSI721_IBWIN_UB(i)); iowrite32((ibw_start & TSI721_IBWIN_LB_BA) | TSI721_IBWIN_LB_WEN, priv->regs + TSI721_IBWIN_LB(i)); priv->ibwin_cnt--; tsi_debug(IBW, &priv->pdev->dev, "Configured IBWIN%d (RIO_0x%llx -> PCIe_%pad), size=0x%llx", i, ibw_start, &loc_start, ibw_size); return 0; out: kfree(map); return ret; } /** * tsi721_rio_unmap_inb_mem -- Unmapping inbound memory region. * @mport: RapidIO master port * @lstart: Local memory space start address. */ static void tsi721_rio_unmap_inb_mem(struct rio_mport *mport, dma_addr_t lstart) { struct tsi721_device *priv = mport->priv; struct tsi721_ib_win *ib_win; int i; tsi_debug(IBW, &priv->pdev->dev, "Unmap IBW mapped to PCIe_%pad", &lstart); /* Search for matching active inbound translation window */ for (i = 0; i < TSI721_IBWIN_NUM; i++) { ib_win = &priv->ib_win[i]; /* Address translating IBWs must to be an exact march */ if (!ib_win->active || (ib_win->xlat && lstart != ib_win->lstart)) continue; if (lstart >= ib_win->lstart && lstart < (ib_win->lstart + ib_win->size)) { if (!ib_win->xlat) { struct tsi721_ib_win_mapping *map; int found = 0; list_for_each_entry(map, &ib_win->mappings, node) { if (map->lstart == lstart) { list_del(&map->node); kfree(map); found = 1; break; } } if (!found) continue; if (!list_empty(&ib_win->mappings)) break; } tsi_debug(IBW, &priv->pdev->dev, "Disable IBWIN_%d", i); iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); ib_win->active = false; priv->ibwin_cnt++; break; } } if (i == TSI721_IBWIN_NUM) tsi_debug(IBW, &priv->pdev->dev, "IB window mapped to %pad not found", &lstart); } /** * tsi721_init_sr2pc_mapping - initializes inbound (SRIO->PCIe) * translation regions. * @priv: pointer to tsi721 private data * * Disables inbound windows. */ static void tsi721_init_sr2pc_mapping(struct tsi721_device *priv) { int i; /* Disable all SR2PC inbound windows */ for (i = 0; i < TSI721_IBWIN_NUM; i++) iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); priv->ibwin_cnt = TSI721_IBWIN_NUM; } /* * tsi721_close_sr2pc_mapping - closes all active inbound (SRIO->PCIe) * translation regions. * @priv: pointer to tsi721 device private data */ static void tsi721_close_sr2pc_mapping(struct tsi721_device *priv) { struct tsi721_ib_win *ib_win; int i; /* Disable all active SR2PC inbound windows */ for (i = 0; i < TSI721_IBWIN_NUM; i++) { ib_win = &priv->ib_win[i]; if (ib_win->active) { iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); ib_win->active = false; } } } /** * tsi721_port_write_init - Inbound port write interface init * @priv: pointer to tsi721 private data * * Initializes inbound port write handler. * Returns %0 on success or %-ENOMEM on failure. */ static int tsi721_port_write_init(struct tsi721_device *priv) { priv->pw_discard_count = 0; INIT_WORK(&priv->pw_work, tsi721_pw_dpc); spin_lock_init(&priv->pw_fifo_lock); if (kfifo_alloc(&priv->pw_fifo, TSI721_RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { tsi_err(&priv->pdev->dev, "PW FIFO allocation failed"); return -ENOMEM; } /* Use reliable port-write capture mode */ iowrite32(TSI721_RIO_PW_CTL_PWC_REL, priv->regs + TSI721_RIO_PW_CTL); return 0; } static void tsi721_port_write_free(struct tsi721_device *priv) { kfifo_free(&priv->pw_fifo); } static int tsi721_doorbell_init(struct tsi721_device *priv) { /* Outbound Doorbells do not require any setup. * Tsi721 uses dedicated PCI BAR1 to generate doorbells. * That BAR1 was mapped during the probe routine. */ /* Initialize Inbound Doorbell processing DPC and queue */ priv->db_discard_count = 0; INIT_WORK(&priv->idb_work, tsi721_db_dpc); /* Allocate buffer for inbound doorbells queue */ priv->idb_base = dma_alloc_coherent(&priv->pdev->dev, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, &priv->idb_dma, GFP_KERNEL); if (!priv->idb_base) return -ENOMEM; tsi_debug(DBELL, &priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %pad)", priv->idb_base, &priv->idb_dma); iowrite32(TSI721_IDQ_SIZE_VAL(IDB_QSIZE), priv->regs + TSI721_IDQ_SIZE(IDB_QUEUE)); iowrite32(((u64)priv->idb_dma >> 32), priv->regs + TSI721_IDQ_BASEU(IDB_QUEUE)); iowrite32(((u64)priv->idb_dma & TSI721_IDQ_BASEL_ADDR), priv->regs + TSI721_IDQ_BASEL(IDB_QUEUE)); /* Enable accepting all inbound doorbells */ iowrite32(0, priv->regs + TSI721_IDQ_MASK(IDB_QUEUE)); iowrite32(TSI721_IDQ_INIT, priv->regs + TSI721_IDQ_CTL(IDB_QUEUE)); iowrite32(0, priv->regs + TSI721_IDQ_RP(IDB_QUEUE)); return 0; } static void tsi721_doorbell_free(struct tsi721_device *priv) { if (priv->idb_base == NULL) return; /* Free buffer allocated for inbound doorbell queue */ dma_free_coherent(&priv->pdev->dev, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, priv->idb_base, priv->idb_dma); priv->idb_base = NULL; } /** * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel. * @priv: pointer to tsi721 private data * * Initialize BDMA channel allocated for RapidIO maintenance read/write * request generation * Returns %0 on success or %-ENOMEM on failure. */ static int tsi721_bdma_maint_init(struct tsi721_device *priv) { struct tsi721_dma_desc *bd_ptr; u64 *sts_ptr; dma_addr_t bd_phys, sts_phys; int sts_size; int bd_num = 2; void __iomem *regs; tsi_debug(MAINT, &priv->pdev->dev, "Init BDMA_%d Maintenance requests", TSI721_DMACH_MAINT); /* * Initialize DMA channel for maintenance requests */ priv->mdma.ch_id = TSI721_DMACH_MAINT; regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT); /* Allocate space for DMA descriptors */ bd_ptr = dma_alloc_coherent(&priv->pdev->dev, bd_num * sizeof(struct tsi721_dma_desc), &bd_phys, GFP_KERNEL); if (!bd_ptr) return -ENOMEM; priv->mdma.bd_num = bd_num; priv->mdma.bd_phys = bd_phys; priv->mdma.bd_base = bd_ptr; tsi_debug(MAINT, &priv->pdev->dev, "DMA descriptors @ %p (phys = %pad)", bd_ptr, &bd_phys); /* Allocate space for descriptor status FIFO */ sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? bd_num : TSI721_DMA_MINSTSSZ; sts_size = roundup_pow_of_two(sts_size); sts_ptr = dma_alloc_coherent(&priv->pdev->dev, sts_size * sizeof(struct tsi721_dma_sts), &sts_phys, GFP_KERNEL); if (!sts_ptr) { /* Free space allocated for DMA descriptors */ dma_free_coherent(&priv->pdev->dev, bd_num * sizeof(struct tsi721_dma_desc), bd_ptr, bd_phys); priv->mdma.bd_base = NULL; return -ENOMEM; } priv->mdma.sts_phys = sts_phys; priv->mdma.sts_base = sts_ptr; priv->mdma.sts_size = sts_size; tsi_debug(MAINT, &priv->pdev->dev, "desc status FIFO @ %p (phys = %pad) size=0x%x", sts_ptr, &sts_phys, sts_size); /* Initialize DMA descriptors ring */ bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29); bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys & TSI721_DMAC_DPTRL_MASK); bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); /* Setup DMA descriptor pointers */ iowrite32(((u64)bd_phys >> 32), regs + TSI721_DMAC_DPTRH); iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), regs + TSI721_DMAC_DPTRL); /* Setup descriptor status FIFO */ iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH); iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), regs + TSI721_DMAC_DSBL); iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), regs + TSI721_DMAC_DSSZ); /* Clear interrupt bits */ iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT); ioread32(regs + TSI721_DMAC_INT); /* Toggle DMA channel initialization */ iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); ioread32(regs + TSI721_DMAC_CTL); udelay(10); return 0; } static int tsi721_bdma_maint_free(struct tsi721_device *priv) { u32 ch_stat; struct tsi721_bdma_maint *mdma = &priv->mdma; void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id); if (mdma->bd_base == NULL) return 0; /* Check if DMA channel still running */ ch_stat = ioread32(regs + TSI721_DMAC_STS); if (ch_stat & TSI721_DMAC_STS_RUN) return -EFAULT; /* Put DMA channel into init state */ iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); /* Free space allocated for DMA descriptors */ dma_free_coherent(&priv->pdev->dev, mdma->bd_num * sizeof(struct tsi721_dma_desc), mdma->bd_base, mdma->bd_phys); mdma->bd_base = NULL; /* Free space allocated for status FIFO */ dma_free_coherent(&priv->pdev->dev, mdma->sts_size * sizeof(struct tsi721_dma_sts), mdma->sts_base, mdma->sts_phys); mdma->sts_base = NULL; return 0; } /* Enable Inbound Messaging Interrupts */ static void tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch, u32 inte_mask) { u32 rval; if (!inte_mask) return; /* Clear pending Inbound Messaging interrupts */ iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch)); /* Enable Inbound Messaging interrupts */ rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch)); iowrite32(rval | inte_mask, priv->regs + TSI721_IBDMAC_INTE(ch)); if (priv->flags & TSI721_USING_MSIX) return; /* Finished if we are in MSI-X mode */ /* * For MSI and INTA interrupt signalling we need to enable next levels */ /* Enable Device Channel Interrupt */ rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); iowrite32(rval | TSI721_INT_IMSG_CHAN(ch), priv->regs + TSI721_DEV_CHAN_INTE); } /* Disable Inbound Messaging Interrupts */ static void tsi721_imsg_interrupt_disable(struct tsi721_device *priv, int ch, u32 inte_mask) { u32 rval; if (!inte_mask) return; /* Clear pending Inbound Messaging interrupts */ iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch)); /* Disable Inbound Messaging interrupts */ rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch)); rval &= ~inte_mask; iowrite32(rval, priv->regs + TSI721_IBDMAC_INTE(ch)); if (priv->flags & TSI721_USING_MSIX) return; /* Finished if we are in MSI-X mode */ /* * For MSI and INTA interrupt signalling we need to disable next levels */ /* Disable Device Channel Interrupt */ rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); rval &= ~TSI721_INT_IMSG_CHAN(ch); iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE); } /* Enable Outbound Messaging interrupts */ static void tsi721_omsg_interrupt_enable(struct tsi721_device *priv, int ch, u32 inte_mask) { u32 rval; if (!inte_mask) return; /* Clear pending Outbound Messaging interrupts */ iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch)); /* Enable Outbound Messaging channel interrupts */ rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch)); iowrite32(rval | inte_mask, priv->regs + TSI721_OBDMAC_INTE(ch)); if (priv->flags & TSI721_USING_MSIX) return; /* Finished if we are in MSI-X mode */ /* * For MSI and INTA interrupt signalling we need to enable next levels */ /* Enable Device Channel Interrupt */ rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); iowrite32(rval | TSI721_INT_OMSG_CHAN(ch), priv->regs + TSI721_DEV_CHAN_INTE); } /* Disable Outbound Messaging interrupts */ static void tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch, u32 inte_mask) { u32 rval; if (!inte_mask) return; /* Clear pending Outbound Messaging interrupts */ iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch)); /* Disable Outbound Messaging interrupts */ rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch)); rval &= ~inte_mask; iowrite32(rval, priv->regs + TSI721_OBDMAC_INTE(ch)); if (priv->flags & TSI721_USING_MSIX) return; /* Finished if we are in MSI-X mode */ /* * For MSI and INTA interrupt signalling we need to disable next levels */ /* Disable Device Channel Interrupt */ rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); rval &= ~TSI721_INT_OMSG_CHAN(ch); iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE); } /** * tsi721_add_outb_message - Add message to the Tsi721 outbound message queue * @mport: Master port with outbound message queue * @rdev: Target of outbound message * @mbox: Outbound mailbox * @buffer: Message to add to outbound queue * @len: Length of message */ static int tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, void *buffer, size_t len) { struct tsi721_device *priv = mport->priv; struct tsi721_omsg_desc *desc; u32 tx_slot; unsigned long flags; if (!priv->omsg_init[mbox] || len > TSI721_MSG_MAX_SIZE || len < 8) return -EINVAL; spin_lock_irqsave(&priv->omsg_ring[mbox].lock, flags); tx_slot = priv->omsg_ring[mbox].tx_slot; /* Copy copy message into transfer buffer */ memcpy(priv->omsg_ring[mbox].omq_base[tx_slot], buffer, len); if (len & 0x7) len += 8; /* Build descriptor associated with buffer */ desc = priv->omsg_ring[mbox].omd_base; desc[tx_slot].type_id = cpu_to_le32((DTYPE4 << 29) | rdev->destid); #ifdef TSI721_OMSG_DESC_INT /* Request IOF_DONE interrupt generation for each N-th frame in queue */ if (tx_slot % 4 == 0) desc[tx_slot].type_id |= cpu_to_le32(TSI721_OMD_IOF); #endif desc[tx_slot].msg_info = cpu_to_le32((mport->sys_size << 26) | (mbox << 22) | (0xe << 12) | (len & 0xff8)); desc[tx_slot].bufptr_lo = cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] & 0xffffffff); desc[tx_slot].bufptr_hi = cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] >> 32); priv->omsg_ring[mbox].wr_count++; /* Go to next descriptor */ if (++priv->omsg_ring[mbox].tx_slot == priv->omsg_ring[mbox].size) { priv->omsg_ring[mbox].tx_slot = 0; /* Move through the ring link descriptor at the end */ priv->omsg_ring[mbox].wr_count++; } mb(); /* Set new write count value */ iowrite32(priv->omsg_ring[mbox].wr_count, priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); spin_unlock_irqrestore(&priv->omsg_ring[mbox].lock, flags); return 0; } /** * tsi721_omsg_handler - Outbound Message Interrupt Handler * @priv: pointer to tsi721 private data * @ch: number of OB MSG channel to service * * Services channel interrupts from outbound messaging engine. */ static void tsi721_omsg_handler(struct tsi721_device *priv, int ch) { u32 omsg_int; struct rio_mport *mport = &priv->mport; void *dev_id = NULL; u32 tx_slot = 0xffffffff; int do_callback = 0; spin_lock(&priv->omsg_ring[ch].lock); omsg_int = ioread32(priv->regs + TSI721_OBDMAC_INT(ch)); if (omsg_int & TSI721_OBDMAC_INT_ST_FULL) tsi_info(&priv->pdev->dev, "OB MBOX%d: Status FIFO is full", ch); if (omsg_int & (TSI721_OBDMAC_INT_DONE | TSI721_OBDMAC_INT_IOF_DONE)) { u32 srd_ptr; u64 *sts_ptr, last_ptr = 0, prev_ptr = 0; int i, j; /* * Find last successfully processed descriptor */ /* Check and clear descriptor status FIFO entries */ srd_ptr = priv->omsg_ring[ch].sts_rdptr; sts_ptr = priv->omsg_ring[ch].sts_base; j = srd_ptr * 8; while (sts_ptr[j]) { for (i = 0; i < 8 && sts_ptr[j]; i++, j++) { prev_ptr = last_ptr; last_ptr = le64_to_cpu(sts_ptr[j]); sts_ptr[j] = 0; } ++srd_ptr; srd_ptr %= priv->omsg_ring[ch].sts_size; j = srd_ptr * 8; } if (last_ptr == 0) goto no_sts_update; priv->omsg_ring[ch].sts_rdptr = srd_ptr; iowrite32(srd_ptr, priv->regs + TSI721_OBDMAC_DSRP(ch)); if (!mport->outb_msg[ch].mcback) goto no_sts_update; /* Inform upper layer about transfer completion */ tx_slot = (last_ptr - (u64)priv->omsg_ring[ch].omd_phys)/ sizeof(struct tsi721_omsg_desc); /* * Check if this is a Link Descriptor (LD). * If yes, ignore LD and use descriptor processed * before LD. */ if (tx_slot == priv->omsg_ring[ch].size) { if (prev_ptr) tx_slot = (prev_ptr - (u64)priv->omsg_ring[ch].omd_phys)/ sizeof(struct tsi721_omsg_desc); else goto no_sts_update; } if (tx_slot >= priv->omsg_ring[ch].size) tsi_debug(OMSG, &priv->pdev->dev, "OB_MSG tx_slot=%x > size=%x", tx_slot, priv->omsg_ring[ch].size); WARN_ON(tx_slot >= priv->omsg_ring[ch].size); /* Move slot index to the next message to be sent */ ++tx_slot; if (tx_slot == priv->omsg_ring[ch].size) tx_slot = 0; dev_id = priv->omsg_ring[ch].dev_id; do_callback = 1; } no_sts_update: if (omsg_int & TSI721_OBDMAC_INT_ERROR) { /* * Outbound message operation aborted due to error, * reinitialize OB MSG channel */ tsi_debug(OMSG, &priv->pdev->dev, "OB MSG ABORT ch_stat=%x", ioread32(priv->regs + TSI721_OBDMAC_STS(ch))); iowrite32(TSI721_OBDMAC_INT_ERROR, priv->regs + TSI721_OBDMAC_INT(ch)); iowrite32(TSI721_OBDMAC_CTL_RETRY_THR | TSI721_OBDMAC_CTL_INIT, priv->regs + TSI721_OBDMAC_CTL(ch)); ioread32(priv->regs + TSI721_OBDMAC_CTL(ch)); /* Inform upper level to clear all pending tx slots */ dev_id = priv->omsg_ring[ch].dev_id; tx_slot = priv->omsg_ring[ch].tx_slot; do_callback = 1; /* Synch tx_slot tracking */ iowrite32(priv->omsg_ring[ch].tx_slot, priv->regs + TSI721_OBDMAC_DRDCNT(ch)); ioread32(priv->regs + TSI721_OBDMAC_DRDCNT(ch)); priv->omsg_ring[ch].wr_count = priv->omsg_ring[ch].tx_slot; priv->omsg_ring[ch].sts_rdptr = 0; } /* Clear channel interrupts */ iowrite32(omsg_int, priv->regs + TSI721_OBDMAC_INT(ch)); if (!(priv->flags & TSI721_USING_MSIX)) { u32 ch_inte; /* Re-enable channel interrupts */ ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); ch_inte |= TSI721_INT_OMSG_CHAN(ch); iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); } spin_unlock(&priv->omsg_ring[ch].lock); if (mport->outb_msg[ch].mcback && do_callback) mport->outb_msg[ch].mcback(mport, dev_id, ch, tx_slot); } /** * tsi721_open_outb_mbox - Initialize Tsi721 outbound mailbox * @mport: Master port implementing Outbound Messaging Engine * @dev_id: Device specific pointer to pass on event * @mbox: Mailbox to open * @entries: Number of entries in the outbound mailbox ring */ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) { struct tsi721_device *priv = mport->priv; struct tsi721_omsg_desc *bd_ptr; int i, rc = 0; if ((entries < TSI721_OMSGD_MIN_RING_SIZE) || (entries > (TSI721_OMSGD_RING_SIZE)) || (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) { rc = -EINVAL; goto out; } if ((mbox_sel & (1 << mbox)) == 0) { rc = -ENODEV; goto out; } priv->omsg_ring[mbox].dev_id = dev_id; priv->omsg_ring[mbox].size = entries; priv->omsg_ring[mbox].sts_rdptr = 0; spin_lock_init(&priv->omsg_ring[mbox].lock); /* Outbound Msg Buffer allocation based on the number of maximum descriptor entries */ for (i = 0; i < entries; i++) { priv->omsg_ring[mbox].omq_base[i] = dma_alloc_coherent( &priv->pdev->dev, TSI721_MSG_BUFFER_SIZE, &priv->omsg_ring[mbox].omq_phys[i], GFP_KERNEL); if (priv->omsg_ring[mbox].omq_base[i] == NULL) { tsi_debug(OMSG, &priv->pdev->dev, "ENOMEM for OB_MSG_%d data buffer", mbox); rc = -ENOMEM; goto out_buf; } } /* Outbound message descriptor allocation */ priv->omsg_ring[mbox].omd_base = dma_alloc_coherent( &priv->pdev->dev, (entries + 1) * sizeof(struct tsi721_omsg_desc), &priv->omsg_ring[mbox].omd_phys, GFP_KERNEL); if (priv->omsg_ring[mbox].omd_base == NULL) { tsi_debug(OMSG, &priv->pdev->dev, "ENOMEM for OB_MSG_%d descriptor memory", mbox); rc = -ENOMEM; goto out_buf; } priv->omsg_ring[mbox].tx_slot = 0; /* Outbound message descriptor status FIFO allocation */ priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev, priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); if (priv->omsg_ring[mbox].sts_base == NULL) { tsi_debug(OMSG, &priv->pdev->dev, "ENOMEM for OB_MSG_%d status FIFO", mbox); rc = -ENOMEM; goto out_desc; } /* * Configure Outbound Messaging Engine */ /* Setup Outbound Message descriptor pointer */ iowrite32(((u64)priv->omsg_ring[mbox].omd_phys >> 32), priv->regs + TSI721_OBDMAC_DPTRH(mbox)); iowrite32(((u64)priv->omsg_ring[mbox].omd_phys & TSI721_OBDMAC_DPTRL_MASK), priv->regs + TSI721_OBDMAC_DPTRL(mbox)); /* Setup Outbound Message descriptor status FIFO */ iowrite32(((u64)priv->omsg_ring[mbox].sts_phys >> 32), priv->regs + TSI721_OBDMAC_DSBH(mbox)); iowrite32(((u64)priv->omsg_ring[mbox].sts_phys & TSI721_OBDMAC_DSBL_MASK), priv->regs + TSI721_OBDMAC_DSBL(mbox)); iowrite32(TSI721_DMAC_DSSZ_SIZE(priv->omsg_ring[mbox].sts_size), priv->regs + (u32)TSI721_OBDMAC_DSSZ(mbox)); /* Enable interrupts */ #ifdef CONFIG_PCI_MSI if (priv->flags & TSI721_USING_MSIX) { int idx = TSI721_VECT_OMB0_DONE + mbox; /* Request interrupt service if we are in MSI-X mode */ rc = request_irq(priv->msix[idx].vector, tsi721_omsg_msix, 0, priv->msix[idx].irq_name, (void *)priv); if (rc) { tsi_debug(OMSG, &priv->pdev->dev, "Unable to get MSI-X IRQ for OBOX%d-DONE", mbox); goto out_stat; } idx = TSI721_VECT_OMB0_INT + mbox; rc = request_irq(priv->msix[idx].vector, tsi721_omsg_msix, 0, priv->msix[idx].irq_name, (void *)priv); if (rc) { tsi_debug(OMSG, &priv->pdev->dev, "Unable to get MSI-X IRQ for MBOX%d-INT", mbox); idx = TSI721_VECT_OMB0_DONE + mbox; free_irq(priv->msix[idx].vector, (void *)priv); goto out_stat; } } #endif /* CONFIG_PCI_MSI */ tsi721_omsg_interrupt_enable(priv, mbox, TSI721_OBDMAC_INT_ALL); /* Initialize Outbound Message descriptors ring */ bd_ptr = priv->omsg_ring[mbox].omd_base; bd_ptr[entries].type_id = cpu_to_le32(DTYPE5 << 29); bd_ptr[entries].msg_info = 0; bd_ptr[entries].next_lo = cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys & TSI721_OBDMAC_DPTRL_MASK); bd_ptr[entries].next_hi = cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys >> 32); priv->omsg_ring[mbox].wr_count = 0; mb(); /* Initialize Outbound Message engine */ iowrite32(TSI721_OBDMAC_CTL_RETRY_THR | TSI721_OBDMAC_CTL_INIT, priv->regs + TSI721_OBDMAC_CTL(mbox)); ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); udelay(10); priv->omsg_init[mbox] = 1; return 0; #ifdef CONFIG_PCI_MSI out_stat: dma_free_coherent(&priv->pdev->dev, priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), priv->omsg_ring[mbox].sts_base, priv->omsg_ring[mbox].sts_phys); priv->omsg_ring[mbox].sts_base = NULL; #endif /* CONFIG_PCI_MSI */ out_desc: dma_free_coherent(&priv->pdev->dev, (entries + 1) * sizeof(struct tsi721_omsg_desc), priv->omsg_ring[mbox].omd_base, priv->omsg_ring[mbox].omd_phys); priv->omsg_ring[mbox].omd_base = NULL; out_buf: for (i = 0; i < priv->omsg_ring[mbox].size; i++) { if (priv->omsg_ring[mbox].omq_base[i]) { dma_free_coherent(&priv->pdev->dev, TSI721_MSG_BUFFER_SIZE, priv->omsg_ring[mbox].omq_base[i], priv->omsg_ring[mbox].omq_phys[i]); priv->omsg_ring[mbox].omq_base[i] = NULL; } } out: return rc; } /** * tsi721_close_outb_mbox - Close Tsi721 outbound mailbox * @mport: Master port implementing the outbound message unit * @mbox: Mailbox to close */ static void tsi721_close_outb_mbox(struct rio_mport *mport, int mbox) { struct tsi721_device *priv = mport->priv; u32 i; if (!priv->omsg_init[mbox]) return; priv->omsg_init[mbox] = 0; /* Disable Interrupts */ tsi721_omsg_interrupt_disable(priv, mbox, TSI721_OBDMAC_INT_ALL); #ifdef CONFIG_PCI_MSI if (priv->flags & TSI721_USING_MSIX) { free_irq(priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, (void *)priv); free_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector, (void *)priv); } #endif /* CONFIG_PCI_MSI */ /* Free OMSG Descriptor Status FIFO */ dma_free_coherent(&priv->pdev->dev, priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), priv->omsg_ring[mbox].sts_base, priv->omsg_ring[mbox].sts_phys); priv->omsg_ring[mbox].sts_base = NULL; /* Free OMSG descriptors */ dma_free_coherent(&priv->pdev->dev, (priv->omsg_ring[mbox].size + 1) * sizeof(struct tsi721_omsg_desc), priv->omsg_ring[mbox].omd_base, priv->omsg_ring[mbox].omd_phys); priv->omsg_ring[mbox].omd_base = NULL; /* Free message buffers */ for (i = 0; i < priv->omsg_ring[mbox].size; i++) { if (priv->omsg_ring[mbox].omq_base[i]) { dma_free_coherent(&priv->pdev->dev, TSI721_MSG_BUFFER_SIZE, priv->omsg_ring[mbox].omq_base[i], priv->omsg_ring[mbox].omq_phys[i]); priv->omsg_ring[mbox].omq_base[i] = NULL; } } } /** * tsi721_imsg_handler - Inbound Message Interrupt Handler * @priv: pointer to tsi721 private data * @ch: inbound message channel number to service * * Services channel interrupts from inbound messaging engine. */ static void tsi721_imsg_handler(struct tsi721_device *priv, int ch) { u32 mbox = ch - 4; u32 imsg_int; struct rio_mport *mport = &priv->mport; spin_lock(&priv->imsg_ring[mbox].lock); imsg_int = ioread32(priv->regs + TSI721_IBDMAC_INT(ch)); if (imsg_int & TSI721_IBDMAC_INT_SRTO) tsi_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout", mbox); if (imsg_int & TSI721_IBDMAC_INT_PC_ERROR) tsi_info(&priv->pdev->dev, "IB MBOX%d PCIe error", mbox); if (imsg_int & TSI721_IBDMAC_INT_FQ_LOW) tsi_info(&priv->pdev->dev, "IB MBOX%d IB free queue low", mbox); /* Clear IB channel interrupts */ iowrite32(imsg_int, priv->regs + TSI721_IBDMAC_INT(ch)); /* If an IB Msg is received notify the upper layer */ if (imsg_int & TSI721_IBDMAC_INT_DQ_RCV && mport->inb_msg[mbox].mcback) mport->inb_msg[mbox].mcback(mport, priv->imsg_ring[mbox].dev_id, mbox, -1); if (!(priv->flags & TSI721_USING_MSIX)) { u32 ch_inte; /* Re-enable channel interrupts */ ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); ch_inte |= TSI721_INT_IMSG_CHAN(ch); iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); } spin_unlock(&priv->imsg_ring[mbox].lock); } /** * tsi721_open_inb_mbox - Initialize Tsi721 inbound mailbox * @mport: Master port implementing the Inbound Messaging Engine * @dev_id: Device specific pointer to pass on event * @mbox: Mailbox to open * @entries: Number of entries in the inbound mailbox ring */ static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) { struct tsi721_device *priv = mport->priv; int ch = mbox + 4; int i; u64 *free_ptr; int rc = 0; if ((entries < TSI721_IMSGD_MIN_RING_SIZE) || (entries > TSI721_IMSGD_RING_SIZE) || (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) { rc = -EINVAL; goto out; } if ((mbox_sel & (1 << mbox)) == 0) { rc = -ENODEV; goto out; } /* Initialize IB Messaging Ring */ priv->imsg_ring[mbox].dev_id = dev_id; priv->imsg_ring[mbox].size = entries; priv->imsg_ring[mbox].rx_slot = 0; priv->imsg_ring[mbox].desc_rdptr = 0; priv->imsg_ring[mbox].fq_wrptr = 0; for (i = 0; i < priv->imsg_ring[mbox].size; i++) priv->imsg_ring[mbox].imq_base[i] = NULL; spin_lock_init(&priv->imsg_ring[mbox].lock); /* Allocate buffers for incoming messages */ priv->imsg_ring[mbox].buf_base = dma_alloc_coherent(&priv->pdev->dev, entries * TSI721_MSG_BUFFER_SIZE, &priv->imsg_ring[mbox].buf_phys, GFP_KERNEL); if (priv->imsg_ring[mbox].buf_base == NULL) { tsi_err(&priv->pdev->dev, "Failed to allocate buffers for IB MBOX%d", mbox); rc = -ENOMEM; goto out; } /* Allocate memory for circular free list */ priv->imsg_ring[mbox].imfq_base = dma_alloc_coherent(&priv->pdev->dev, entries * 8, &priv->imsg_ring[mbox].imfq_phys, GFP_KERNEL); if (priv->imsg_ring[mbox].imfq_base == NULL) { tsi_err(&priv->pdev->dev, "Failed to allocate free queue for IB MBOX%d", mbox); rc = -ENOMEM; goto out_buf; } /* Allocate memory for Inbound message descriptors */ priv->imsg_ring[mbox].imd_base = dma_alloc_coherent(&priv->pdev->dev, entries * sizeof(struct tsi721_imsg_desc), &priv->imsg_ring[mbox].imd_phys, GFP_KERNEL); if (priv->imsg_ring[mbox].imd_base == NULL) { tsi_err(&priv->pdev->dev, "Failed to allocate descriptor memory for IB MBOX%d", mbox); rc = -ENOMEM; goto out_dma; } /* Fill free buffer pointer list */ free_ptr = priv->imsg_ring[mbox].imfq_base; for (i = 0; i < entries; i++) free_ptr[i] = cpu_to_le64( (u64)(priv->imsg_ring[mbox].buf_phys) + i * 0x1000); mb(); /* * For mapping of inbound SRIO Messages into appropriate queues we need * to set Inbound Device ID register in the messaging engine. We do it * once when first inbound mailbox is requested. */ if (!(priv->flags & TSI721_IMSGID_SET)) { iowrite32((u32)priv->mport.host_deviceid, priv->regs + TSI721_IB_DEVID); priv->flags |= TSI721_IMSGID_SET; } /* * Configure Inbound Messaging channel (ch = mbox + 4) */ /* Setup Inbound Message free queue */ iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys >> 32), priv->regs + TSI721_IBDMAC_FQBH(ch)); iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys & TSI721_IBDMAC_FQBL_MASK), priv->regs+TSI721_IBDMAC_FQBL(ch)); iowrite32(TSI721_DMAC_DSSZ_SIZE(entries), priv->regs + TSI721_IBDMAC_FQSZ(ch)); /* Setup Inbound Message descriptor queue */ iowrite32(((u64)priv->imsg_ring[mbox].imd_phys >> 32), priv->regs + TSI721_IBDMAC_DQBH(ch)); iowrite32(((u32)priv->imsg_ring[mbox].imd_phys & (u32)TSI721_IBDMAC_DQBL_MASK), priv->regs+TSI721_IBDMAC_DQBL(ch)); iowrite32(TSI721_DMAC_DSSZ_SIZE(entries), priv->regs + TSI721_IBDMAC_DQSZ(ch)); /* Enable interrupts */ #ifdef CONFIG_PCI_MSI if (priv->flags & TSI721_USING_MSIX) { int idx = TSI721_VECT_IMB0_RCV + mbox; /* Request interrupt service if we are in MSI-X mode */ rc = request_irq(priv->msix[idx].vector, tsi721_imsg_msix, 0, priv->msix[idx].irq_name, (void *)priv); if (rc) { tsi_debug(IMSG, &priv->pdev->dev, "Unable to get MSI-X IRQ for IBOX%d-DONE", mbox); goto out_desc; } idx = TSI721_VECT_IMB0_INT + mbox; rc = request_irq(priv->msix[idx].vector, tsi721_imsg_msix, 0, priv->msix[idx].irq_name, (void *)priv); if (rc) { tsi_debug(IMSG, &priv->pdev->dev, "Unable to get MSI-X IRQ for IBOX%d-INT", mbox); free_irq( priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, (void *)priv); goto out_desc; } } #endif /* CONFIG_PCI_MSI */ tsi721_imsg_interrupt_enable(priv, ch, TSI721_IBDMAC_INT_ALL); /* Initialize Inbound Message Engine */ iowrite32(TSI721_IBDMAC_CTL_INIT, priv->regs + TSI721_IBDMAC_CTL(ch)); ioread32(priv->regs + TSI721_IBDMAC_CTL(ch)); udelay(10); priv->imsg_ring[mbox].fq_wrptr = entries - 1; iowrite32(entries - 1, priv->regs + TSI721_IBDMAC_FQWP(ch)); priv->imsg_init[mbox] = 1; return 0; #ifdef CONFIG_PCI_MSI out_desc: dma_free_coherent(&priv->pdev->dev, priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc), priv->imsg_ring[mbox].imd_base, priv->imsg_ring[mbox].imd_phys); priv->imsg_ring[mbox].imd_base = NULL; #endif /* CONFIG_PCI_MSI */ out_dma: dma_free_coherent(&priv->pdev->dev, priv->imsg_ring[mbox].size * 8, priv->imsg_ring[mbox].imfq_base, priv->imsg_ring[mbox].imfq_phys); priv->imsg_ring[mbox].imfq_base = NULL; out_buf: dma_free_coherent(&priv->pdev->dev, priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE, priv->imsg_ring[mbox].buf_base, priv->imsg_ring[mbox].buf_phys); priv->imsg_ring[mbox].buf_base = NULL; out: return rc; } /** * tsi721_close_inb_mbox - Shut down Tsi721 inbound mailbox * @mport: Master port implementing the Inbound Messaging Engine * @mbox: Mailbox to close */ static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox) { struct tsi721_device *priv = mport->priv; u32 rx_slot; int ch = mbox + 4; if (!priv->imsg_init[mbox]) /* mbox isn't initialized yet */ return; priv->imsg_init[mbox] = 0; /* Disable Inbound Messaging Engine */ /* Disable Interrupts */ tsi721_imsg_interrupt_disable(priv, ch, TSI721_OBDMAC_INT_MASK); #ifdef CONFIG_PCI_MSI if (priv->flags & TSI721_USING_MSIX) { free_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, (void *)priv); free_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector, (void *)priv); } #endif /* CONFIG_PCI_MSI */ /* Clear Inbound Buffer Queue */ for (rx_slot = 0; rx_slot < priv->imsg_ring[mbox].size; rx_slot++) priv->imsg_ring[mbox].imq_base[rx_slot] = NULL; /* Free memory allocated for message buffers */ dma_free_coherent(&priv->pdev->dev, priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE, priv->imsg_ring[mbox].buf_base, priv->imsg_ring[mbox].buf_phys); priv->imsg_ring[mbox].buf_base = NULL; /* Free memory allocated for free pointr list */ dma_free_coherent(&priv->pdev->dev, priv->imsg_ring[mbox].size * 8, priv->imsg_ring[mbox].imfq_base, priv->imsg_ring[mbox].imfq_phys); priv->imsg_ring[mbox].imfq_base = NULL; /* Free memory allocated for RX descriptors */ dma_free_coherent(&priv->pdev->dev, priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc), priv->imsg_ring[mbox].imd_base, priv->imsg_ring[mbox].imd_phys); priv->imsg_ring[mbox].imd_base = NULL; } /** * tsi721_add_inb_buffer - Add buffer to the Tsi721 inbound message queue * @mport: Master port implementing the Inbound Messaging Engine * @mbox: Inbound mailbox number * @buf: Buffer to add to inbound queue */ static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) { struct tsi721_device *priv = mport->priv; u32 rx_slot; int rc = 0; rx_slot = priv->imsg_ring[mbox].rx_slot; if (priv->imsg_ring[mbox].imq_base[rx_slot]) { tsi_err(&priv->pdev->dev, "Error adding inbound buffer %d, buffer exists", rx_slot); rc = -EINVAL; goto out; } priv->imsg_ring[mbox].imq_base[rx_slot] = buf; if (++priv->imsg_ring[mbox].rx_slot == priv->imsg_ring[mbox].size) priv->imsg_ring[mbox].rx_slot = 0; out: return rc; } /** * tsi721_get_inb_message - Fetch inbound message from the Tsi721 MSG Queue * @mport: Master port implementing the Inbound Messaging Engine * @mbox: Inbound mailbox number * * Returns pointer to the message on success or NULL on failure. */ static void *tsi721_get_inb_message(struct rio_mport *mport, int mbox) { struct tsi721_device *priv = mport->priv; struct tsi721_imsg_desc *desc; u32 rx_slot; void *rx_virt = NULL; u64 rx_phys; void *buf = NULL; u64 *free_ptr; int ch = mbox + 4; int msg_size; if (!priv->imsg_init[mbox]) return NULL; desc = priv->imsg_ring[mbox].imd_base; desc += priv->imsg_ring[mbox].desc_rdptr; if (!(le32_to_cpu(desc->msg_info) & TSI721_IMD_HO)) goto out; rx_slot = priv->imsg_ring[mbox].rx_slot; while (priv->imsg_ring[mbox].imq_base[rx_slot] == NULL) { if (++rx_slot == priv->imsg_ring[mbox].size) rx_slot = 0; } rx_phys = ((u64)le32_to_cpu(desc->bufptr_hi) << 32) | le32_to_cpu(desc->bufptr_lo); rx_virt = priv->imsg_ring[mbox].buf_base + (rx_phys - (u64)priv->imsg_ring[mbox].buf_phys); buf = priv->imsg_ring[mbox].imq_base[rx_slot]; msg_size = le32_to_cpu(desc->msg_info) & TSI721_IMD_BCOUNT; if (msg_size == 0) msg_size = RIO_MAX_MSG_SIZE; memcpy(buf, rx_virt, msg_size); priv->imsg_ring[mbox].imq_base[rx_slot] = NULL; desc->msg_info &= cpu_to_le32(~TSI721_IMD_HO); if (++priv->imsg_ring[mbox].desc_rdptr == priv->imsg_ring[mbox].size) priv->imsg_ring[mbox].desc_rdptr = 0; iowrite32(priv->imsg_ring[mbox].desc_rdptr, priv->regs + TSI721_IBDMAC_DQRP(ch)); /* Return free buffer into the pointer list */ free_ptr = priv->imsg_ring[mbox].imfq_base; free_ptr[priv->imsg_ring[mbox].fq_wrptr] = cpu_to_le64(rx_phys); if (++priv->imsg_ring[mbox].fq_wrptr == priv->imsg_ring[mbox].size) priv->imsg_ring[mbox].fq_wrptr = 0; iowrite32(priv->imsg_ring[mbox].fq_wrptr, priv->regs + TSI721_IBDMAC_FQWP(ch)); out: return buf; } /** * tsi721_messages_init - Initialization of Messaging Engine * @priv: pointer to tsi721 private data * * Configures Tsi721 messaging engine. */ static int tsi721_messages_init(struct tsi721_device *priv) { int ch; iowrite32(0, priv->regs + TSI721_SMSG_ECC_LOG); iowrite32(0, priv->regs + TSI721_RETRY_GEN_CNT); iowrite32(0, priv->regs + TSI721_RETRY_RX_CNT); /* Set SRIO Message Request/Response Timeout */ iowrite32(TSI721_RQRPTO_VAL, priv->regs + TSI721_RQRPTO); /* Initialize Inbound Messaging Engine Registers */ for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) { /* Clear interrupt bits */ iowrite32(TSI721_IBDMAC_INT_MASK, priv->regs + TSI721_IBDMAC_INT(ch)); /* Clear Status */ iowrite32(0, priv->regs + TSI721_IBDMAC_STS(ch)); iowrite32(TSI721_SMSG_ECC_COR_LOG_MASK, priv->regs + TSI721_SMSG_ECC_COR_LOG(ch)); iowrite32(TSI721_SMSG_ECC_NCOR_MASK, priv->regs + TSI721_SMSG_ECC_NCOR(ch)); } return 0; } /** * tsi721_query_mport - Fetch inbound message from the Tsi721 MSG Queue * @mport: Master port implementing the Inbound Messaging Engine * @mbox: Inbound mailbox number * * Returns pointer to the message on success or NULL on failure. */ static int tsi721_query_mport(struct rio_mport *mport, struct rio_mport_attr *attr) { struct tsi721_device *priv = mport->priv; u32 rval; rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_ERR_STS_CSR(0, 0)); if (rval & RIO_PORT_N_ERR_STS_PORT_OK) { rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL2_CSR(0, 0)); attr->link_speed = (rval & RIO_PORT_N_CTL2_SEL_BAUD) >> 28; rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL_CSR(0, 0)); attr->link_width = (rval & RIO_PORT_N_CTL_IPW) >> 27; } else attr->link_speed = RIO_LINK_DOWN; #ifdef CONFIG_RAPIDIO_DMA_ENGINE attr->flags = RIO_MPORT_DMA | RIO_MPORT_DMA_SG; attr->dma_max_sge = 0; attr->dma_max_size = TSI721_BDMA_MAX_BCOUNT; attr->dma_align = 0; #else attr->flags = 0; #endif return 0; } /** * tsi721_disable_ints - disables all device interrupts * @priv: pointer to tsi721 private data */ static void tsi721_disable_ints(struct tsi721_device *priv) { int ch; /* Disable all device level interrupts */ iowrite32(0, priv->regs + TSI721_DEV_INTE); /* Disable all Device Channel interrupts */ iowrite32(0, priv->regs + TSI721_DEV_CHAN_INTE); /* Disable all Inbound Msg Channel interrupts */ for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) iowrite32(0, priv->regs + TSI721_IBDMAC_INTE(ch)); /* Disable all Outbound Msg Channel interrupts */ for (ch = 0; ch < TSI721_OMSG_CHNUM; ch++) iowrite32(0, priv->regs + TSI721_OBDMAC_INTE(ch)); /* Disable all general messaging interrupts */ iowrite32(0, priv->regs + TSI721_SMSG_INTE); /* Disable all BDMA Channel interrupts */ for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) iowrite32(0, priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE); /* Disable all general BDMA interrupts */ iowrite32(0, priv->regs + TSI721_BDMA_INTE); /* Disable all SRIO Channel interrupts */ for (ch = 0; ch < TSI721_SRIO_MAXCH; ch++) iowrite32(0, priv->regs + TSI721_SR_CHINTE(ch)); /* Disable all general SR2PC interrupts */ iowrite32(0, priv->regs + TSI721_SR2PC_GEN_INTE); /* Disable all PC2SR interrupts */ iowrite32(0, priv->regs + TSI721_PC2SR_INTE); /* Disable all I2C interrupts */ iowrite32(0, priv->regs + TSI721_I2C_INT_ENABLE); /* Disable SRIO MAC interrupts */ iowrite32(0, priv->regs + TSI721_RIO_EM_INT_ENABLE); iowrite32(0, priv->regs + TSI721_RIO_EM_DEV_INT_EN); } static struct rio_ops tsi721_rio_ops = { .lcread = tsi721_lcread, .lcwrite = tsi721_lcwrite, .cread = tsi721_cread_dma, .cwrite = tsi721_cwrite_dma, .dsend = tsi721_dsend, .open_inb_mbox = tsi721_open_inb_mbox, .close_inb_mbox = tsi721_close_inb_mbox, .open_outb_mbox = tsi721_open_outb_mbox, .close_outb_mbox = tsi721_close_outb_mbox, .add_outb_message = tsi721_add_outb_message, .add_inb_buffer = tsi721_add_inb_buffer, .get_inb_message = tsi721_get_inb_message, .map_inb = tsi721_rio_map_inb_mem, .unmap_inb = tsi721_rio_unmap_inb_mem, .pwenable = tsi721_pw_enable, .query_mport = tsi721_query_mport, .map_outb = tsi721_map_outb_win, .unmap_outb = tsi721_unmap_outb_win, }; static void tsi721_mport_release(struct device *dev) { struct rio_mport *mport = to_rio_mport(dev); tsi_debug(EXIT, dev, "%s id=%d", mport->name, mport->id); } /** * tsi721_setup_mport - Setup Tsi721 as RapidIO subsystem master port * @priv: pointer to tsi721 private data * * Configures Tsi721 as RapidIO master port. */ static int tsi721_setup_mport(struct tsi721_device *priv) { struct pci_dev *pdev = priv->pdev; int err = 0; struct rio_mport *mport = &priv->mport; err = rio_mport_initialize(mport); if (err) return err; mport->ops = &tsi721_rio_ops; mport->index = 0; mport->sys_size = 0; /* small system */ mport->priv = (void *)priv; mport->phys_efptr = 0x100; mport->phys_rmap = 1; mport->dev.parent = &pdev->dev; mport->dev.release = tsi721_mport_release; INIT_LIST_HEAD(&mport->dbells); rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3); rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3); snprintf(mport->name, RIO_MAX_MPORT_NAME, "%s(%s)", dev_driver_string(&pdev->dev), dev_name(&pdev->dev)); /* Hook up interrupt handler */ #ifdef CONFIG_PCI_MSI if (!tsi721_enable_msix(priv)) priv->flags |= TSI721_USING_MSIX; else if (!pci_enable_msi(pdev)) priv->flags |= TSI721_USING_MSI; else tsi_debug(MPORT, &pdev->dev, "MSI/MSI-X is not available. Using legacy INTx."); #endif /* CONFIG_PCI_MSI */ err = tsi721_request_irq(priv); if (err) { tsi_err(&pdev->dev, "Unable to get PCI IRQ %02X (err=0x%x)", pdev->irq, err); return err; } #ifdef CONFIG_RAPIDIO_DMA_ENGINE err = tsi721_register_dma(priv); if (err) goto err_exit; #endif /* Enable SRIO link */ iowrite32(ioread32(priv->regs + TSI721_DEVCTL) | TSI721_DEVCTL_SRBOOT_CMPL, priv->regs + TSI721_DEVCTL); if (mport->host_deviceid >= 0) iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); else iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); err = rio_register_mport(mport); if (err) { tsi721_unregister_dma(priv); goto err_exit; } return 0; err_exit: tsi721_free_irq(priv); return err; } static int tsi721_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct tsi721_device *priv; int err; priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL); if (!priv) { err = -ENOMEM; goto err_exit; } err = pci_enable_device(pdev); if (err) { tsi_err(&pdev->dev, "Failed to enable PCI device"); goto err_clean; } priv->pdev = pdev; #ifdef DEBUG { int i; for (i = 0; i < PCI_STD_NUM_BARS; i++) { tsi_debug(INIT, &pdev->dev, "res%d %pR", i, &pdev->resource[i]); } } #endif /* * Verify BAR configuration */ /* BAR_0 (registers) must be 512KB+ in 32-bit address space */ if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM) || pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM_64 || pci_resource_len(pdev, BAR_0) < TSI721_REG_SPACE_SIZE) { tsi_err(&pdev->dev, "Missing or misconfigured CSR BAR0"); err = -ENODEV; goto err_disable_pdev; } /* BAR_1 (outbound doorbells) must be 16MB+ in 32-bit address space */ if (!(pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM) || pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM_64 || pci_resource_len(pdev, BAR_1) < TSI721_DB_WIN_SIZE) { tsi_err(&pdev->dev, "Missing or misconfigured Doorbell BAR1"); err = -ENODEV; goto err_disable_pdev; } /* * BAR_2 and BAR_4 (outbound translation) must be in 64-bit PCIe address * space. * NOTE: BAR_2 and BAR_4 are not used by this version of driver. * It may be a good idea to keep them disabled using HW configuration * to save PCI memory space. */ priv->p2r_bar[0].size = priv->p2r_bar[1].size = 0; if (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64) { if (pci_resource_flags(pdev, BAR_2) & IORESOURCE_PREFETCH) tsi_debug(INIT, &pdev->dev, "Prefetchable OBW BAR2 will not be used"); else { priv->p2r_bar[0].base = pci_resource_start(pdev, BAR_2); priv->p2r_bar[0].size = pci_resource_len(pdev, BAR_2); } } if (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64) { if (pci_resource_flags(pdev, BAR_4) & IORESOURCE_PREFETCH) tsi_debug(INIT, &pdev->dev, "Prefetchable OBW BAR4 will not be used"); else { priv->p2r_bar[1].base = pci_resource_start(pdev, BAR_4); priv->p2r_bar[1].size = pci_resource_len(pdev, BAR_4); } } err = pci_request_regions(pdev, DRV_NAME); if (err) { tsi_err(&pdev->dev, "Unable to obtain PCI resources"); goto err_disable_pdev; } pci_set_master(pdev); priv->regs = pci_ioremap_bar(pdev, BAR_0); if (!priv->regs) { tsi_err(&pdev->dev, "Unable to map device registers space"); err = -ENOMEM; goto err_free_res; } priv->odb_base = pci_ioremap_bar(pdev, BAR_1); if (!priv->odb_base) { tsi_err(&pdev->dev, "Unable to map outbound doorbells space"); err = -ENOMEM; goto err_unmap_bars; } /* Configure DMA attributes. */ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { tsi_err(&pdev->dev, "Unable to set DMA mask"); goto err_unmap_bars; } if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) tsi_info(&pdev->dev, "Unable to set consistent DMA mask"); } else { err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); if (err) tsi_info(&pdev->dev, "Unable to set consistent DMA mask"); } BUG_ON(!pci_is_pcie(pdev)); /* Clear "no snoop" and "relaxed ordering" bits. */ pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN, 0); /* Override PCIe Maximum Read Request Size setting if requested */ if (pcie_mrrs >= 0) { if (pcie_mrrs <= 5) pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_READRQ, pcie_mrrs << 12); else tsi_info(&pdev->dev, "Invalid MRRS override value %d", pcie_mrrs); } /* Set PCIe completion timeout to 1-10ms */ pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0x2); /* * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block */ pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0x01); pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXTBL, TSI721_MSIXTBL_OFFSET); pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXPBA, TSI721_MSIXPBA_OFFSET); pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0); /* End of FIXUP */ tsi721_disable_ints(priv); tsi721_init_pc2sr_mapping(priv); tsi721_init_sr2pc_mapping(priv); if (tsi721_bdma_maint_init(priv)) { tsi_err(&pdev->dev, "BDMA initialization failed"); err = -ENOMEM; goto err_unmap_bars; } err = tsi721_doorbell_init(priv); if (err) goto err_free_bdma; tsi721_port_write_init(priv); err = tsi721_messages_init(priv); if (err) goto err_free_consistent; err = tsi721_setup_mport(priv); if (err) goto err_free_consistent; pci_set_drvdata(pdev, priv); tsi721_interrupts_init(priv); return 0; err_free_consistent: tsi721_port_write_free(priv); tsi721_doorbell_free(priv); err_free_bdma: tsi721_bdma_maint_free(priv); err_unmap_bars: if (priv->regs) iounmap(priv->regs); if (priv->odb_base) iounmap(priv->odb_base); err_free_res: pci_release_regions(pdev); err_disable_pdev: pci_disable_device(pdev); err_clean: kfree(priv); err_exit: return err; } static void tsi721_remove(struct pci_dev *pdev) { struct tsi721_device *priv = pci_get_drvdata(pdev); tsi_debug(EXIT, &pdev->dev, "enter"); tsi721_disable_ints(priv); tsi721_free_irq(priv); flush_work(&priv->idb_work); flush_work(&priv->pw_work); rio_unregister_mport(&priv->mport); tsi721_unregister_dma(priv); tsi721_bdma_maint_free(priv); tsi721_doorbell_free(priv); tsi721_port_write_free(priv); tsi721_close_sr2pc_mapping(priv); if (priv->regs) iounmap(priv->regs); if (priv->odb_base) iounmap(priv->odb_base); #ifdef CONFIG_PCI_MSI if (priv->flags & TSI721_USING_MSIX) pci_disable_msix(priv->pdev); else if (priv->flags & TSI721_USING_MSI) pci_disable_msi(priv->pdev); #endif pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); kfree(priv); tsi_debug(EXIT, &pdev->dev, "exit"); } static void tsi721_shutdown(struct pci_dev *pdev) { struct tsi721_device *priv = pci_get_drvdata(pdev); tsi_debug(EXIT, &pdev->dev, "enter"); tsi721_disable_ints(priv); tsi721_dma_stop_all(priv); pci_disable_device(pdev); } static const struct pci_device_id tsi721_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_TSI721) }, { 0, } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, tsi721_pci_tbl); static struct pci_driver tsi721_driver = { .name = "tsi721", .id_table = tsi721_pci_tbl, .probe = tsi721_probe, .remove = tsi721_remove, .shutdown = tsi721_shutdown, }; module_pci_driver(tsi721_driver); MODULE_DESCRIPTION("IDT Tsi721 PCIExpress-to-SRIO bridge driver"); MODULE_AUTHOR("Integrated Device Technology, Inc."); MODULE_LICENSE("GPL");
linux-master
drivers/rapidio/devices/tsi721.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * RapidIO mport character device * * Copyright 2014-2015 Integrated Device Technology, Inc. * Alexandre Bounine <[email protected]> * Copyright 2014-2015 Prodrive Technologies * Andre van Herk <[email protected]> * Jerry Jacobs <[email protected]> * Copyright (C) 2014 Texas Instruments Incorporated * Aurelien Jacquiot <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/cdev.h> #include <linux/ioctl.h> #include <linux/uaccess.h> #include <linux/list.h> #include <linux/fs.h> #include <linux/err.h> #include <linux/net.h> #include <linux/poll.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/kfifo.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mman.h> #include <linux/dma-mapping.h> #ifdef CONFIG_RAPIDIO_DMA_ENGINE #include <linux/dmaengine.h> #endif #include <linux/rio.h> #include <linux/rio_ids.h> #include <linux/rio_drv.h> #include <linux/rio_mport_cdev.h> #include "../rio.h" #define DRV_NAME "rio_mport" #define DRV_PREFIX DRV_NAME ": " #define DEV_NAME "rio_mport" #define DRV_VERSION "1.0.0" /* Debug output filtering masks */ enum { DBG_NONE = 0, DBG_INIT = BIT(0), /* driver init */ DBG_EXIT = BIT(1), /* driver exit */ DBG_MPORT = BIT(2), /* mport add/remove */ DBG_RDEV = BIT(3), /* RapidIO device add/remove */ DBG_DMA = BIT(4), /* DMA transfer messages */ DBG_MMAP = BIT(5), /* mapping messages */ DBG_IBW = BIT(6), /* inbound window */ DBG_EVENT = BIT(7), /* event handling messages */ DBG_OBW = BIT(8), /* outbound window messages */ DBG_DBELL = BIT(9), /* doorbell messages */ DBG_ALL = ~0, }; #ifdef DEBUG #define rmcd_debug(level, fmt, arg...) \ do { \ if (DBG_##level & dbg_level) \ pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \ } while (0) #else #define rmcd_debug(level, fmt, arg...) \ no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg) #endif #define rmcd_warn(fmt, arg...) \ pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg) #define rmcd_error(fmt, arg...) \ pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg) MODULE_AUTHOR("Jerry Jacobs <[email protected]>"); MODULE_AUTHOR("Aurelien Jacquiot <[email protected]>"); MODULE_AUTHOR("Alexandre Bounine <[email protected]>"); MODULE_AUTHOR("Andre van Herk <[email protected]>"); MODULE_DESCRIPTION("RapidIO mport character device driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); static int dma_timeout = 3000; /* DMA transfer timeout in msec */ module_param(dma_timeout, int, S_IRUGO); MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)"); #ifdef DEBUG static u32 dbg_level = DBG_NONE; module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO); MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); #endif /* * An internal DMA coherent buffer */ struct mport_dma_buf { void *ib_base; dma_addr_t ib_phys; u32 ib_size; u64 ib_rio_base; bool ib_map; struct file *filp; }; /* * Internal memory mapping structure */ enum rio_mport_map_dir { MAP_INBOUND, MAP_OUTBOUND, MAP_DMA, }; struct rio_mport_mapping { struct list_head node; struct mport_dev *md; enum rio_mport_map_dir dir; u16 rioid; u64 rio_addr; dma_addr_t phys_addr; /* for mmap */ void *virt_addr; /* kernel address, for dma_free_coherent */ u64 size; struct kref ref; /* refcount of vmas sharing the mapping */ struct file *filp; }; struct rio_mport_dma_map { int valid; u64 length; void *vaddr; dma_addr_t paddr; }; #define MPORT_MAX_DMA_BUFS 16 #define MPORT_EVENT_DEPTH 10 /* * mport_dev driver-specific structure that represents mport device * @active mport device status flag * @node list node to maintain list of registered mports * @cdev character device * @dev associated device object * @mport associated subsystem's master port device object * @buf_mutex lock for buffer handling * @file_mutex - lock for open files list * @file_list - list of open files on given mport * @properties properties of this mport * @portwrites queue of inbound portwrites * @pw_lock lock for port write queue * @mappings queue for memory mappings * @dma_chan DMA channels associated with this device * @dma_ref: * @comp: */ struct mport_dev { atomic_t active; struct list_head node; struct cdev cdev; struct device dev; struct rio_mport *mport; struct mutex buf_mutex; struct mutex file_mutex; struct list_head file_list; struct rio_mport_properties properties; struct list_head doorbells; spinlock_t db_lock; struct list_head portwrites; spinlock_t pw_lock; struct list_head mappings; #ifdef CONFIG_RAPIDIO_DMA_ENGINE struct dma_chan *dma_chan; struct kref dma_ref; struct completion comp; #endif }; /* * mport_cdev_priv - data structure specific to individual file object * associated with an open device * @md master port character device object * @async_queue - asynchronous notification queue * @list - file objects tracking list * @db_filters inbound doorbell filters for this descriptor * @pw_filters portwrite filters for this descriptor * @event_fifo event fifo for this descriptor * @event_rx_wait wait queue for this descriptor * @fifo_lock lock for event_fifo * @event_mask event mask for this descriptor * @dmach DMA engine channel allocated for specific file object */ struct mport_cdev_priv { struct mport_dev *md; struct fasync_struct *async_queue; struct list_head list; struct list_head db_filters; struct list_head pw_filters; struct kfifo event_fifo; wait_queue_head_t event_rx_wait; spinlock_t fifo_lock; u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ #ifdef CONFIG_RAPIDIO_DMA_ENGINE struct dma_chan *dmach; struct list_head async_list; spinlock_t req_lock; struct mutex dma_lock; struct kref dma_ref; struct completion comp; #endif }; /* * rio_mport_pw_filter - structure to describe a portwrite filter * md_node node in mport device's list * priv_node node in private file object's list * priv reference to private data * filter actual portwrite filter */ struct rio_mport_pw_filter { struct list_head md_node; struct list_head priv_node; struct mport_cdev_priv *priv; struct rio_pw_filter filter; }; /* * rio_mport_db_filter - structure to describe a doorbell filter * @data_node reference to device node * @priv_node node in private data * @priv reference to private data * @filter actual doorbell filter */ struct rio_mport_db_filter { struct list_head data_node; struct list_head priv_node; struct mport_cdev_priv *priv; struct rio_doorbell_filter filter; }; static LIST_HEAD(mport_devs); static DEFINE_MUTEX(mport_devs_lock); #if (0) /* used by commented out portion of poll function : FIXME */ static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait); #endif static struct class *dev_class; static dev_t dev_number; static void mport_release_mapping(struct kref *ref); static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg, int local) { struct rio_mport *mport = priv->md->mport; struct rio_mport_maint_io maint_io; u32 *buffer; u32 offset; size_t length; int ret, i; if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io)))) return -EFAULT; if ((maint_io.offset % 4) || (maint_io.length == 0) || (maint_io.length % 4) || (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) return -EINVAL; buffer = vmalloc(maint_io.length); if (buffer == NULL) return -ENOMEM; length = maint_io.length/sizeof(u32); offset = maint_io.offset; for (i = 0; i < length; i++) { if (local) ret = __rio_local_read_config_32(mport, offset, &buffer[i]); else ret = rio_mport_read_config_32(mport, maint_io.rioid, maint_io.hopcount, offset, &buffer[i]); if (ret) goto out; offset += 4; } if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer, buffer, maint_io.length))) ret = -EFAULT; out: vfree(buffer); return ret; } static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg, int local) { struct rio_mport *mport = priv->md->mport; struct rio_mport_maint_io maint_io; u32 *buffer; u32 offset; size_t length; int ret = -EINVAL, i; if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io)))) return -EFAULT; if ((maint_io.offset % 4) || (maint_io.length == 0) || (maint_io.length % 4) || (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) return -EINVAL; buffer = vmalloc(maint_io.length); if (buffer == NULL) return -ENOMEM; length = maint_io.length; if (unlikely(copy_from_user(buffer, (void __user *)(uintptr_t)maint_io.buffer, length))) { ret = -EFAULT; goto out; } offset = maint_io.offset; length /= sizeof(u32); for (i = 0; i < length; i++) { if (local) ret = __rio_local_write_config_32(mport, offset, buffer[i]); else ret = rio_mport_write_config_32(mport, maint_io.rioid, maint_io.hopcount, offset, buffer[i]); if (ret) goto out; offset += 4; } out: vfree(buffer); return ret; } /* * Inbound/outbound memory mapping functions */ static int rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, u16 rioid, u64 raddr, u32 size, dma_addr_t *paddr) { struct rio_mport *mport = md->mport; struct rio_mport_mapping *map; int ret; rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size); map = kzalloc(sizeof(*map), GFP_KERNEL); if (map == NULL) return -ENOMEM; ret = rio_map_outb_region(mport, rioid, raddr, size, 0, paddr); if (ret < 0) goto err_map_outb; map->dir = MAP_OUTBOUND; map->rioid = rioid; map->rio_addr = raddr; map->size = size; map->phys_addr = *paddr; map->filp = filp; map->md = md; kref_init(&map->ref); list_add_tail(&map->node, &md->mappings); return 0; err_map_outb: kfree(map); return ret; } static int rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, u16 rioid, u64 raddr, u32 size, dma_addr_t *paddr) { struct rio_mport_mapping *map; int err = -ENOMEM; mutex_lock(&md->buf_mutex); list_for_each_entry(map, &md->mappings, node) { if (map->dir != MAP_OUTBOUND) continue; if (rioid == map->rioid && raddr == map->rio_addr && size == map->size) { *paddr = map->phys_addr; err = 0; break; } else if (rioid == map->rioid && raddr < (map->rio_addr + map->size - 1) && (raddr + size) > map->rio_addr) { err = -EBUSY; break; } } /* If not found, create new */ if (err == -ENOMEM) err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr, size, paddr); mutex_unlock(&md->buf_mutex); return err; } static int rio_mport_obw_map(struct file *filp, void __user *arg) { struct mport_cdev_priv *priv = filp->private_data; struct mport_dev *data = priv->md; struct rio_mmap map; dma_addr_t paddr; int ret; if (unlikely(copy_from_user(&map, arg, sizeof(map)))) return -EFAULT; rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx", map.rioid, map.rio_addr, map.length); ret = rio_mport_get_outbound_mapping(data, filp, map.rioid, map.rio_addr, map.length, &paddr); if (ret < 0) { rmcd_error("Failed to set OBW err= %d", ret); return ret; } map.handle = paddr; if (unlikely(copy_to_user(arg, &map, sizeof(map)))) return -EFAULT; return 0; } /* * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space * * @priv: driver private data * @arg: buffer handle returned by allocation routine */ static int rio_mport_obw_free(struct file *filp, void __user *arg) { struct mport_cdev_priv *priv = filp->private_data; struct mport_dev *md = priv->md; u64 handle; struct rio_mport_mapping *map, *_map; if (!md->mport->ops->unmap_outb) return -EPROTONOSUPPORT; if (copy_from_user(&handle, arg, sizeof(handle))) return -EFAULT; rmcd_debug(OBW, "h=0x%llx", handle); mutex_lock(&md->buf_mutex); list_for_each_entry_safe(map, _map, &md->mappings, node) { if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) { if (map->filp == filp) { rmcd_debug(OBW, "kref_put h=0x%llx", handle); map->filp = NULL; kref_put(&map->ref, mport_release_mapping); } break; } } mutex_unlock(&md->buf_mutex); return 0; } /* * maint_hdid_set() - Set the host Device ID * @priv: driver private data * @arg: Device Id */ static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) { struct mport_dev *md = priv->md; u16 hdid; if (copy_from_user(&hdid, arg, sizeof(hdid))) return -EFAULT; md->mport->host_deviceid = hdid; md->properties.hdid = hdid; rio_local_set_device_id(md->mport, hdid); rmcd_debug(MPORT, "Set host device Id to %d", hdid); return 0; } /* * maint_comptag_set() - Set the host Component Tag * @priv: driver private data * @arg: Component Tag */ static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg) { struct mport_dev *md = priv->md; u32 comptag; if (copy_from_user(&comptag, arg, sizeof(comptag))) return -EFAULT; rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag); rmcd_debug(MPORT, "Set host Component Tag to %d", comptag); return 0; } #ifdef CONFIG_RAPIDIO_DMA_ENGINE struct mport_dma_req { struct kref refcount; struct list_head node; struct file *filp; struct mport_cdev_priv *priv; enum rio_transfer_sync sync; struct sg_table sgt; struct page **page_list; unsigned int nr_pages; struct rio_mport_mapping *map; struct dma_chan *dmach; enum dma_data_direction dir; dma_cookie_t cookie; enum dma_status status; struct completion req_comp; }; static void mport_release_def_dma(struct kref *dma_ref) { struct mport_dev *md = container_of(dma_ref, struct mport_dev, dma_ref); rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id); rio_release_dma(md->dma_chan); md->dma_chan = NULL; } static void mport_release_dma(struct kref *dma_ref) { struct mport_cdev_priv *priv = container_of(dma_ref, struct mport_cdev_priv, dma_ref); rmcd_debug(EXIT, "DMA_%d", priv->dmach->chan_id); complete(&priv->comp); } static void dma_req_free(struct kref *ref) { struct mport_dma_req *req = container_of(ref, struct mport_dma_req, refcount); struct mport_cdev_priv *priv = req->priv; dma_unmap_sg(req->dmach->device->dev, req->sgt.sgl, req->sgt.nents, req->dir); sg_free_table(&req->sgt); if (req->page_list) { unpin_user_pages(req->page_list, req->nr_pages); kfree(req->page_list); } if (req->map) { mutex_lock(&req->map->md->buf_mutex); kref_put(&req->map->ref, mport_release_mapping); mutex_unlock(&req->map->md->buf_mutex); } kref_put(&priv->dma_ref, mport_release_dma); kfree(req); } static void dma_xfer_callback(void *param) { struct mport_dma_req *req = (struct mport_dma_req *)param; struct mport_cdev_priv *priv = req->priv; req->status = dma_async_is_tx_complete(priv->dmach, req->cookie, NULL, NULL); complete(&req->req_comp); kref_put(&req->refcount, dma_req_free); } /* * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA * transfer object. * Returns pointer to DMA transaction descriptor allocated by DMA driver on * success or ERR_PTR (and/or NULL) if failed. Caller must check returned * non-NULL pointer using IS_ERR macro. */ static struct dma_async_tx_descriptor *prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer, struct sg_table *sgt, int nents, enum dma_transfer_direction dir, enum dma_ctrl_flags flags) { struct rio_dma_data tx_data; tx_data.sg = sgt->sgl; tx_data.sg_len = nents; tx_data.rio_addr_u = 0; tx_data.rio_addr = transfer->rio_addr; if (dir == DMA_MEM_TO_DEV) { switch (transfer->method) { case RIO_EXCHANGE_NWRITE: tx_data.wr_type = RDW_ALL_NWRITE; break; case RIO_EXCHANGE_NWRITE_R_ALL: tx_data.wr_type = RDW_ALL_NWRITE_R; break; case RIO_EXCHANGE_NWRITE_R: tx_data.wr_type = RDW_LAST_NWRITE_R; break; case RIO_EXCHANGE_DEFAULT: tx_data.wr_type = RDW_DEFAULT; break; default: return ERR_PTR(-EINVAL); } } return rio_dma_prep_xfer(chan, transfer->rioid, &tx_data, dir, flags); } /* Request DMA channel associated with this mport device. * Try to request DMA channel for every new process that opened given * mport. If a new DMA channel is not available use default channel * which is the first DMA channel opened on mport device. */ static int get_dma_channel(struct mport_cdev_priv *priv) { mutex_lock(&priv->dma_lock); if (!priv->dmach) { priv->dmach = rio_request_mport_dma(priv->md->mport); if (!priv->dmach) { /* Use default DMA channel if available */ if (priv->md->dma_chan) { priv->dmach = priv->md->dma_chan; kref_get(&priv->md->dma_ref); } else { rmcd_error("Failed to get DMA channel"); mutex_unlock(&priv->dma_lock); return -ENODEV; } } else if (!priv->md->dma_chan) { /* Register default DMA channel if we do not have one */ priv->md->dma_chan = priv->dmach; kref_init(&priv->md->dma_ref); rmcd_debug(DMA, "Register DMA_chan %d as default", priv->dmach->chan_id); } kref_init(&priv->dma_ref); init_completion(&priv->comp); } kref_get(&priv->dma_ref); mutex_unlock(&priv->dma_lock); return 0; } static void put_dma_channel(struct mport_cdev_priv *priv) { kref_put(&priv->dma_ref, mport_release_dma); } /* * DMA transfer functions */ static int do_dma_request(struct mport_dma_req *req, struct rio_transfer_io *xfer, enum rio_transfer_sync sync, int nents) { struct mport_cdev_priv *priv; struct sg_table *sgt; struct dma_chan *chan; struct dma_async_tx_descriptor *tx; dma_cookie_t cookie; unsigned long tmo = msecs_to_jiffies(dma_timeout); enum dma_transfer_direction dir; long wret; int ret = 0; priv = req->priv; sgt = &req->sgt; chan = priv->dmach; dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; rmcd_debug(DMA, "%s(%d) uses %s for DMA_%s", current->comm, task_pid_nr(current), dev_name(&chan->dev->device), (dir == DMA_DEV_TO_MEM)?"READ":"WRITE"); /* Initialize DMA transaction request */ tx = prep_dma_xfer(chan, xfer, sgt, nents, dir, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); if (!tx) { rmcd_debug(DMA, "prep error for %s A:0x%llx L:0x%llx", (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", xfer->rio_addr, xfer->length); ret = -EIO; goto err_out; } else if (IS_ERR(tx)) { ret = PTR_ERR(tx); rmcd_debug(DMA, "prep error %d for %s A:0x%llx L:0x%llx", ret, (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", xfer->rio_addr, xfer->length); goto err_out; } tx->callback = dma_xfer_callback; tx->callback_param = req; req->status = DMA_IN_PROGRESS; kref_get(&req->refcount); cookie = dmaengine_submit(tx); req->cookie = cookie; rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current), (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie); if (dma_submit_error(cookie)) { rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)", cookie, xfer->rio_addr, xfer->length); kref_put(&req->refcount, dma_req_free); ret = -EIO; goto err_out; } dma_async_issue_pending(chan); if (sync == RIO_TRANSFER_ASYNC) { spin_lock(&priv->req_lock); list_add_tail(&req->node, &priv->async_list); spin_unlock(&priv->req_lock); return cookie; } else if (sync == RIO_TRANSFER_FAF) return 0; wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo); if (wret == 0) { /* Timeout on wait occurred */ rmcd_error("%s(%d) timed out waiting for DMA_%s %d", current->comm, task_pid_nr(current), (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie); return -ETIMEDOUT; } else if (wret == -ERESTARTSYS) { /* Wait_for_completion was interrupted by a signal but DMA may * be in progress */ rmcd_error("%s(%d) wait for DMA_%s %d was interrupted", current->comm, task_pid_nr(current), (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie); return -EINTR; } if (req->status != DMA_COMPLETE) { /* DMA transaction completion was signaled with error */ rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)", current->comm, task_pid_nr(current), (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie, req->status, ret); ret = -EIO; } err_out: return ret; } /* * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from * the remote RapidIO device * @filp: file pointer associated with the call * @transfer_mode: DMA transfer mode * @sync: synchronization mode * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR * DMA_DEV_TO_MEM = read) * @xfer: data transfer descriptor structure */ static int rio_dma_transfer(struct file *filp, u32 transfer_mode, enum rio_transfer_sync sync, enum dma_data_direction dir, struct rio_transfer_io *xfer) { struct mport_cdev_priv *priv = filp->private_data; unsigned long nr_pages = 0; struct page **page_list = NULL; struct mport_dma_req *req; struct mport_dev *md = priv->md; struct dma_chan *chan; int ret; int nents; if (xfer->length == 0) return -EINVAL; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; ret = get_dma_channel(priv); if (ret) { kfree(req); return ret; } chan = priv->dmach; kref_init(&req->refcount); init_completion(&req->req_comp); req->dir = dir; req->filp = filp; req->priv = priv; req->dmach = chan; req->sync = sync; /* * If parameter loc_addr != NULL, we are transferring data from/to * data buffer allocated in user-space: lock in memory user-space * buffer pages and build an SG table for DMA transfer request * * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is * used for DMA data transfers: build single entry SG table using * offset within the internal buffer specified by handle parameter. */ if (xfer->loc_addr) { unsigned int offset; long pinned; offset = lower_32_bits(offset_in_page(xfer->loc_addr)); nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT; page_list = kmalloc_array(nr_pages, sizeof(*page_list), GFP_KERNEL); if (page_list == NULL) { ret = -ENOMEM; goto err_req; } pinned = pin_user_pages_fast( (unsigned long)xfer->loc_addr & PAGE_MASK, nr_pages, dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0, page_list); if (pinned != nr_pages) { if (pinned < 0) { rmcd_error("pin_user_pages_fast err=%ld", pinned); nr_pages = 0; } else { rmcd_error("pinned %ld out of %ld pages", pinned, nr_pages); /* * Set nr_pages up to mean "how many pages to unpin, in * the error handler: */ nr_pages = pinned; } ret = -EFAULT; goto err_pg; } ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages, offset, xfer->length, GFP_KERNEL); if (ret) { rmcd_error("sg_alloc_table failed with err=%d", ret); goto err_pg; } req->page_list = page_list; req->nr_pages = nr_pages; } else { dma_addr_t baddr; struct rio_mport_mapping *map; baddr = (dma_addr_t)xfer->handle; mutex_lock(&md->buf_mutex); list_for_each_entry(map, &md->mappings, node) { if (baddr >= map->phys_addr && baddr < (map->phys_addr + map->size)) { kref_get(&map->ref); req->map = map; break; } } mutex_unlock(&md->buf_mutex); if (req->map == NULL) { ret = -ENOMEM; goto err_req; } if (xfer->length + xfer->offset > req->map->size) { ret = -EINVAL; goto err_req; } ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL); if (unlikely(ret)) { rmcd_error("sg_alloc_table failed for internal buf"); goto err_req; } sg_set_buf(req->sgt.sgl, req->map->virt_addr + (baddr - req->map->phys_addr) + xfer->offset, xfer->length); } nents = dma_map_sg(chan->device->dev, req->sgt.sgl, req->sgt.nents, dir); if (nents == 0) { rmcd_error("Failed to map SG list"); ret = -EFAULT; goto err_pg; } ret = do_dma_request(req, xfer, sync, nents); if (ret >= 0) { if (sync == RIO_TRANSFER_ASYNC) return ret; /* return ASYNC cookie */ } else { rmcd_debug(DMA, "do_dma_request failed with err=%d", ret); } err_pg: if (!req->page_list) { unpin_user_pages(page_list, nr_pages); kfree(page_list); } err_req: kref_put(&req->refcount, dma_req_free); return ret; } static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg) { struct mport_cdev_priv *priv = filp->private_data; struct rio_transaction transaction; struct rio_transfer_io *transfer; enum dma_data_direction dir; int i, ret = 0; size_t size; if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction)))) return -EFAULT; if (transaction.count != 1) /* only single transfer for now */ return -EINVAL; if ((transaction.transfer_mode & priv->md->properties.transfer_mode) == 0) return -ENODEV; size = array_size(sizeof(*transfer), transaction.count); transfer = vmalloc(size); if (!transfer) return -ENOMEM; if (unlikely(copy_from_user(transfer, (void __user *)(uintptr_t)transaction.block, size))) { ret = -EFAULT; goto out_free; } dir = (transaction.dir == RIO_TRANSFER_DIR_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; for (i = 0; i < transaction.count && ret == 0; i++) ret = rio_dma_transfer(filp, transaction.transfer_mode, transaction.sync, dir, &transfer[i]); if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block, transfer, size))) ret = -EFAULT; out_free: vfree(transfer); return ret; } static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg) { struct mport_cdev_priv *priv; struct rio_async_tx_wait w_param; struct mport_dma_req *req; dma_cookie_t cookie; unsigned long tmo; long wret; int found = 0; int ret; priv = (struct mport_cdev_priv *)filp->private_data; if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param)))) return -EFAULT; cookie = w_param.token; if (w_param.timeout) tmo = msecs_to_jiffies(w_param.timeout); else /* Use default DMA timeout */ tmo = msecs_to_jiffies(dma_timeout); spin_lock(&priv->req_lock); list_for_each_entry(req, &priv->async_list, node) { if (req->cookie == cookie) { list_del(&req->node); found = 1; break; } } spin_unlock(&priv->req_lock); if (!found) return -EAGAIN; wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo); if (wret == 0) { /* Timeout on wait occurred */ rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s", current->comm, task_pid_nr(current), (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE"); ret = -ETIMEDOUT; goto err_tmo; } else if (wret == -ERESTARTSYS) { /* Wait_for_completion was interrupted by a signal but DMA may * be still in progress */ rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted", current->comm, task_pid_nr(current), (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE"); ret = -EINTR; goto err_tmo; } if (req->status != DMA_COMPLETE) { /* DMA transaction completion signaled with transfer error */ rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d", current->comm, task_pid_nr(current), (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE", req->status); ret = -EIO; } else ret = 0; if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED) kref_put(&req->refcount, dma_req_free); return ret; err_tmo: /* Return request back into async queue */ spin_lock(&priv->req_lock); list_add_tail(&req->node, &priv->async_list); spin_unlock(&priv->req_lock); return ret; } static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp, u64 size, struct rio_mport_mapping **mapping) { struct rio_mport_mapping *map; map = kzalloc(sizeof(*map), GFP_KERNEL); if (map == NULL) return -ENOMEM; map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size, &map->phys_addr, GFP_KERNEL); if (map->virt_addr == NULL) { kfree(map); return -ENOMEM; } map->dir = MAP_DMA; map->size = size; map->filp = filp; map->md = md; kref_init(&map->ref); mutex_lock(&md->buf_mutex); list_add_tail(&map->node, &md->mappings); mutex_unlock(&md->buf_mutex); *mapping = map; return 0; } static int rio_mport_alloc_dma(struct file *filp, void __user *arg) { struct mport_cdev_priv *priv = filp->private_data; struct mport_dev *md = priv->md; struct rio_dma_mem map; struct rio_mport_mapping *mapping = NULL; int ret; if (unlikely(copy_from_user(&map, arg, sizeof(map)))) return -EFAULT; ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); if (ret) return ret; map.dma_handle = mapping->phys_addr; if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { mutex_lock(&md->buf_mutex); kref_put(&mapping->ref, mport_release_mapping); mutex_unlock(&md->buf_mutex); return -EFAULT; } return 0; } static int rio_mport_free_dma(struct file *filp, void __user *arg) { struct mport_cdev_priv *priv = filp->private_data; struct mport_dev *md = priv->md; u64 handle; int ret = -EFAULT; struct rio_mport_mapping *map, *_map; if (copy_from_user(&handle, arg, sizeof(handle))) return -EFAULT; rmcd_debug(EXIT, "filp=%p", filp); mutex_lock(&md->buf_mutex); list_for_each_entry_safe(map, _map, &md->mappings, node) { if (map->dir == MAP_DMA && map->phys_addr == handle && map->filp == filp) { kref_put(&map->ref, mport_release_mapping); ret = 0; break; } } mutex_unlock(&md->buf_mutex); if (ret == -EFAULT) { rmcd_debug(DMA, "ERR no matching mapping"); return ret; } return 0; } #else static int rio_mport_transfer_ioctl(struct file *filp, void *arg) { return -ENODEV; } static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg) { return -ENODEV; } static int rio_mport_alloc_dma(struct file *filp, void __user *arg) { return -ENODEV; } static int rio_mport_free_dma(struct file *filp, void __user *arg) { return -ENODEV; } #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ /* * Inbound/outbound memory mapping functions */ static int rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, u64 raddr, u64 size, struct rio_mport_mapping **mapping) { struct rio_mport *mport = md->mport; struct rio_mport_mapping *map; int ret; /* rio_map_inb_region() accepts u32 size */ if (size > 0xffffffff) return -EINVAL; map = kzalloc(sizeof(*map), GFP_KERNEL); if (map == NULL) return -ENOMEM; map->virt_addr = dma_alloc_coherent(mport->dev.parent, size, &map->phys_addr, GFP_KERNEL); if (map->virt_addr == NULL) { ret = -ENOMEM; goto err_dma_alloc; } if (raddr == RIO_MAP_ANY_ADDR) raddr = map->phys_addr; ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0); if (ret < 0) goto err_map_inb; map->dir = MAP_INBOUND; map->rio_addr = raddr; map->size = size; map->filp = filp; map->md = md; kref_init(&map->ref); mutex_lock(&md->buf_mutex); list_add_tail(&map->node, &md->mappings); mutex_unlock(&md->buf_mutex); *mapping = map; return 0; err_map_inb: dma_free_coherent(mport->dev.parent, size, map->virt_addr, map->phys_addr); err_dma_alloc: kfree(map); return ret; } static int rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp, u64 raddr, u64 size, struct rio_mport_mapping **mapping) { struct rio_mport_mapping *map; int err = -ENOMEM; if (raddr == RIO_MAP_ANY_ADDR) goto get_new; mutex_lock(&md->buf_mutex); list_for_each_entry(map, &md->mappings, node) { if (map->dir != MAP_INBOUND) continue; if (raddr == map->rio_addr && size == map->size) { /* allow exact match only */ *mapping = map; err = 0; break; } else if (raddr < (map->rio_addr + map->size - 1) && (raddr + size) > map->rio_addr) { err = -EBUSY; break; } } mutex_unlock(&md->buf_mutex); if (err != -ENOMEM) return err; get_new: /* not found, create new */ return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping); } static int rio_mport_map_inbound(struct file *filp, void __user *arg) { struct mport_cdev_priv *priv = filp->private_data; struct mport_dev *md = priv->md; struct rio_mmap map; struct rio_mport_mapping *mapping = NULL; int ret; if (!md->mport->ops->map_inb) return -EPROTONOSUPPORT; if (unlikely(copy_from_user(&map, arg, sizeof(map)))) return -EFAULT; rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr, map.length, &mapping); if (ret) return ret; map.handle = mapping->phys_addr; map.rio_addr = mapping->rio_addr; if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { /* Delete mapping if it was created by this request */ if (ret == 0 && mapping->filp == filp) { mutex_lock(&md->buf_mutex); kref_put(&mapping->ref, mport_release_mapping); mutex_unlock(&md->buf_mutex); } return -EFAULT; } return 0; } /* * rio_mport_inbound_free() - unmap from RapidIO address space and free * previously allocated inbound DMA coherent buffer * @priv: driver private data * @arg: buffer handle returned by allocation routine */ static int rio_mport_inbound_free(struct file *filp, void __user *arg) { struct mport_cdev_priv *priv = filp->private_data; struct mport_dev *md = priv->md; u64 handle; struct rio_mport_mapping *map, *_map; rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); if (!md->mport->ops->unmap_inb) return -EPROTONOSUPPORT; if (copy_from_user(&handle, arg, sizeof(handle))) return -EFAULT; mutex_lock(&md->buf_mutex); list_for_each_entry_safe(map, _map, &md->mappings, node) { if (map->dir == MAP_INBOUND && map->phys_addr == handle) { if (map->filp == filp) { map->filp = NULL; kref_put(&map->ref, mport_release_mapping); } break; } } mutex_unlock(&md->buf_mutex); return 0; } /* * maint_port_idx_get() - Get the port index of the mport instance * @priv: driver private data * @arg: port index */ static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg) { struct mport_dev *md = priv->md; u32 port_idx = md->mport->index; rmcd_debug(MPORT, "port_index=%d", port_idx); if (copy_to_user(arg, &port_idx, sizeof(port_idx))) return -EFAULT; return 0; } static int rio_mport_add_event(struct mport_cdev_priv *priv, struct rio_event *event) { int overflow; if (!(priv->event_mask & event->header)) return -EACCES; spin_lock(&priv->fifo_lock); overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event) || kfifo_in(&priv->event_fifo, (unsigned char *)event, sizeof(*event)) != sizeof(*event); spin_unlock(&priv->fifo_lock); wake_up_interruptible(&priv->event_rx_wait); if (overflow) { dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n"); return -EBUSY; } return 0; } static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id, u16 src, u16 dst, u16 info) { struct mport_dev *data = dev_id; struct mport_cdev_priv *priv; struct rio_mport_db_filter *db_filter; struct rio_event event; int handled; event.header = RIO_DOORBELL; event.u.doorbell.rioid = src; event.u.doorbell.payload = info; handled = 0; spin_lock(&data->db_lock); list_for_each_entry(db_filter, &data->doorbells, data_node) { if (((db_filter->filter.rioid == RIO_INVALID_DESTID || db_filter->filter.rioid == src)) && info >= db_filter->filter.low && info <= db_filter->filter.high) { priv = db_filter->priv; rio_mport_add_event(priv, &event); handled = 1; } } spin_unlock(&data->db_lock); if (!handled) dev_warn(&data->dev, "%s: spurious DB received from 0x%x, info=0x%04x\n", __func__, src, info); } static int rio_mport_add_db_filter(struct mport_cdev_priv *priv, void __user *arg) { struct mport_dev *md = priv->md; struct rio_mport_db_filter *db_filter; struct rio_doorbell_filter filter; unsigned long flags; int ret; if (copy_from_user(&filter, arg, sizeof(filter))) return -EFAULT; if (filter.low > filter.high) return -EINVAL; ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high, rio_mport_doorbell_handler); if (ret) { rmcd_error("%s failed to register IBDB, err=%d", dev_name(&md->dev), ret); return ret; } db_filter = kzalloc(sizeof(*db_filter), GFP_KERNEL); if (db_filter == NULL) { rio_release_inb_dbell(md->mport, filter.low, filter.high); return -ENOMEM; } db_filter->filter = filter; db_filter->priv = priv; spin_lock_irqsave(&md->db_lock, flags); list_add_tail(&db_filter->priv_node, &priv->db_filters); list_add_tail(&db_filter->data_node, &md->doorbells); spin_unlock_irqrestore(&md->db_lock, flags); return 0; } static void rio_mport_delete_db_filter(struct rio_mport_db_filter *db_filter) { list_del(&db_filter->data_node); list_del(&db_filter->priv_node); kfree(db_filter); } static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv, void __user *arg) { struct rio_mport_db_filter *db_filter; struct rio_doorbell_filter filter; unsigned long flags; int ret = -EINVAL; if (copy_from_user(&filter, arg, sizeof(filter))) return -EFAULT; if (filter.low > filter.high) return -EINVAL; spin_lock_irqsave(&priv->md->db_lock, flags); list_for_each_entry(db_filter, &priv->db_filters, priv_node) { if (db_filter->filter.rioid == filter.rioid && db_filter->filter.low == filter.low && db_filter->filter.high == filter.high) { rio_mport_delete_db_filter(db_filter); ret = 0; break; } } spin_unlock_irqrestore(&priv->md->db_lock, flags); if (!ret) rio_release_inb_dbell(priv->md->mport, filter.low, filter.high); return ret; } static int rio_mport_match_pw(union rio_pw_msg *msg, struct rio_pw_filter *filter) { if ((msg->em.comptag & filter->mask) < filter->low || (msg->em.comptag & filter->mask) > filter->high) return 0; return 1; } static int rio_mport_pw_handler(struct rio_mport *mport, void *context, union rio_pw_msg *msg, int step) { struct mport_dev *md = context; struct mport_cdev_priv *priv; struct rio_mport_pw_filter *pw_filter; struct rio_event event; int handled; event.header = RIO_PORTWRITE; memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE); handled = 0; spin_lock(&md->pw_lock); list_for_each_entry(pw_filter, &md->portwrites, md_node) { if (rio_mport_match_pw(msg, &pw_filter->filter)) { priv = pw_filter->priv; rio_mport_add_event(priv, &event); handled = 1; } } spin_unlock(&md->pw_lock); if (!handled) { printk_ratelimited(KERN_WARNING DRV_NAME ": mport%d received spurious PW from 0x%08x\n", mport->id, msg->em.comptag); } return 0; } static int rio_mport_add_pw_filter(struct mport_cdev_priv *priv, void __user *arg) { struct mport_dev *md = priv->md; struct rio_mport_pw_filter *pw_filter; struct rio_pw_filter filter; unsigned long flags; int hadd = 0; if (copy_from_user(&filter, arg, sizeof(filter))) return -EFAULT; pw_filter = kzalloc(sizeof(*pw_filter), GFP_KERNEL); if (pw_filter == NULL) return -ENOMEM; pw_filter->filter = filter; pw_filter->priv = priv; spin_lock_irqsave(&md->pw_lock, flags); if (list_empty(&md->portwrites)) hadd = 1; list_add_tail(&pw_filter->priv_node, &priv->pw_filters); list_add_tail(&pw_filter->md_node, &md->portwrites); spin_unlock_irqrestore(&md->pw_lock, flags); if (hadd) { int ret; ret = rio_add_mport_pw_handler(md->mport, md, rio_mport_pw_handler); if (ret) { dev_err(&md->dev, "%s: failed to add IB_PW handler, err=%d\n", __func__, ret); return ret; } rio_pw_enable(md->mport, 1); } return 0; } static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter *pw_filter) { list_del(&pw_filter->md_node); list_del(&pw_filter->priv_node); kfree(pw_filter); } static int rio_mport_match_pw_filter(struct rio_pw_filter *a, struct rio_pw_filter *b) { if ((a->mask == b->mask) && (a->low == b->low) && (a->high == b->high)) return 1; return 0; } static int rio_mport_remove_pw_filter(struct mport_cdev_priv *priv, void __user *arg) { struct mport_dev *md = priv->md; struct rio_mport_pw_filter *pw_filter; struct rio_pw_filter filter; unsigned long flags; int ret = -EINVAL; int hdel = 0; if (copy_from_user(&filter, arg, sizeof(filter))) return -EFAULT; spin_lock_irqsave(&md->pw_lock, flags); list_for_each_entry(pw_filter, &priv->pw_filters, priv_node) { if (rio_mport_match_pw_filter(&pw_filter->filter, &filter)) { rio_mport_delete_pw_filter(pw_filter); ret = 0; break; } } if (list_empty(&md->portwrites)) hdel = 1; spin_unlock_irqrestore(&md->pw_lock, flags); if (hdel) { rio_del_mport_pw_handler(md->mport, priv->md, rio_mport_pw_handler); rio_pw_enable(md->mport, 0); } return ret; } /* * rio_release_dev - release routine for kernel RIO device object * @dev: kernel device object associated with a RIO device structure * * Frees a RIO device struct associated a RIO device struct. * The RIO device struct is freed. */ static void rio_release_dev(struct device *dev) { struct rio_dev *rdev; rdev = to_rio_dev(dev); pr_info(DRV_PREFIX "%s: %s\n", __func__, rio_name(rdev)); kfree(rdev); } static void rio_release_net(struct device *dev) { struct rio_net *net; net = to_rio_net(dev); rmcd_debug(RDEV, "net_%d", net->id); kfree(net); } /* * rio_mport_add_riodev - creates a kernel RIO device object * * Allocates a RIO device data structure and initializes required fields based * on device's configuration space contents. * If the device has switch capabilities, then a switch specific portion is * allocated and configured. */ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, void __user *arg) { struct mport_dev *md = priv->md; struct rio_rdev_info dev_info; struct rio_dev *rdev; struct rio_switch *rswitch = NULL; struct rio_mport *mport; struct device *dev; size_t size; u32 rval; u32 swpinfo = 0; u16 destid; u8 hopcount; int err; if (copy_from_user(&dev_info, arg, sizeof(dev_info))) return -EFAULT; dev_info.name[sizeof(dev_info.name) - 1] = '\0'; rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name, dev_info.comptag, dev_info.destid, dev_info.hopcount); dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name); if (dev) { rmcd_debug(RDEV, "device %s already exists", dev_info.name); put_device(dev); return -EEXIST; } size = sizeof(*rdev); mport = md->mport; destid = dev_info.destid; hopcount = dev_info.hopcount; if (rio_mport_read_config_32(mport, destid, hopcount, RIO_PEF_CAR, &rval)) return -EIO; if (rval & RIO_PEF_SWITCH) { rio_mport_read_config_32(mport, destid, hopcount, RIO_SWP_INFO_CAR, &swpinfo); size += struct_size(rswitch, nextdev, RIO_GET_TOTAL_PORTS(swpinfo)); } rdev = kzalloc(size, GFP_KERNEL); if (rdev == NULL) return -ENOMEM; if (mport->net == NULL) { struct rio_net *net; net = rio_alloc_net(mport); if (!net) { err = -ENOMEM; rmcd_debug(RDEV, "failed to allocate net object"); goto cleanup; } net->id = mport->id; net->hport = mport; dev_set_name(&net->dev, "rnet_%d", net->id); net->dev.parent = &mport->dev; net->dev.release = rio_release_net; err = rio_add_net(net); if (err) { rmcd_debug(RDEV, "failed to register net, err=%d", err); kfree(net); goto cleanup; } } rdev->net = mport->net; rdev->pef = rval; rdev->swpinfo = swpinfo; rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_ID_CAR, &rval); rdev->did = rval >> 16; rdev->vid = rval & 0xffff; rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_INFO_CAR, &rdev->device_rev); rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_ID_CAR, &rval); rdev->asm_did = rval >> 16; rdev->asm_vid = rval & 0xffff; rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_INFO_CAR, &rval); rdev->asm_rev = rval >> 16; if (rdev->pef & RIO_PEF_EXT_FEATURES) { rdev->efptr = rval & 0xffff; rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid, hopcount, &rdev->phys_rmap); rdev->em_efptr = rio_mport_get_feature(mport, 0, destid, hopcount, RIO_EFB_ERR_MGMNT); } rio_mport_read_config_32(mport, destid, hopcount, RIO_SRC_OPS_CAR, &rdev->src_ops); rio_mport_read_config_32(mport, destid, hopcount, RIO_DST_OPS_CAR, &rdev->dst_ops); rdev->comp_tag = dev_info.comptag; rdev->destid = destid; /* hopcount is stored as specified by a caller, regardles of EP or SW */ rdev->hopcount = hopcount; if (rdev->pef & RIO_PEF_SWITCH) { rswitch = rdev->rswitch; rswitch->route_table = NULL; } if (strlen(dev_info.name)) dev_set_name(&rdev->dev, "%s", dev_info.name); else if (rdev->pef & RIO_PEF_SWITCH) dev_set_name(&rdev->dev, "%02x:s:%04x", mport->id, rdev->comp_tag & RIO_CTAG_UDEVID); else dev_set_name(&rdev->dev, "%02x:e:%04x", mport->id, rdev->comp_tag & RIO_CTAG_UDEVID); INIT_LIST_HEAD(&rdev->net_list); rdev->dev.parent = &mport->net->dev; rio_attach_device(rdev); rdev->dev.release = rio_release_dev; if (rdev->dst_ops & RIO_DST_OPS_DOORBELL) rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); err = rio_add_device(rdev); if (err) { put_device(&rdev->dev); return err; } rio_dev_get(rdev); return 0; cleanup: kfree(rdev); return err; } static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg) { struct rio_rdev_info dev_info; struct rio_dev *rdev = NULL; struct device *dev; struct rio_mport *mport; struct rio_net *net; if (copy_from_user(&dev_info, arg, sizeof(dev_info))) return -EFAULT; dev_info.name[sizeof(dev_info.name) - 1] = '\0'; mport = priv->md->mport; /* If device name is specified, removal by name has priority */ if (strlen(dev_info.name)) { dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name); if (dev) rdev = to_rio_dev(dev); } else { do { rdev = rio_get_comptag(dev_info.comptag, rdev); if (rdev && rdev->dev.parent == &mport->net->dev && rdev->destid == dev_info.destid && rdev->hopcount == dev_info.hopcount) break; } while (rdev); } if (!rdev) { rmcd_debug(RDEV, "device name:%s ct:0x%x did:0x%x hc:0x%x not found", dev_info.name, dev_info.comptag, dev_info.destid, dev_info.hopcount); return -ENODEV; } net = rdev->net; rio_dev_put(rdev); rio_del_device(rdev, RIO_DEVICE_SHUTDOWN); if (list_empty(&net->devices)) { rio_free_net(net); mport->net = NULL; } return 0; } /* * Mport cdev management */ /* * mport_cdev_open() - Open character device (mport) */ static int mport_cdev_open(struct inode *inode, struct file *filp) { int ret; int minor = iminor(inode); struct mport_dev *chdev; struct mport_cdev_priv *priv; /* Test for valid device */ if (minor >= RIO_MAX_MPORTS) { rmcd_error("Invalid minor device number"); return -EINVAL; } chdev = container_of(inode->i_cdev, struct mport_dev, cdev); rmcd_debug(INIT, "%s filp=%p", dev_name(&chdev->dev), filp); if (atomic_read(&chdev->active) == 0) return -ENODEV; get_device(&chdev->dev); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { put_device(&chdev->dev); return -ENOMEM; } priv->md = chdev; INIT_LIST_HEAD(&priv->db_filters); INIT_LIST_HEAD(&priv->pw_filters); spin_lock_init(&priv->fifo_lock); init_waitqueue_head(&priv->event_rx_wait); ret = kfifo_alloc(&priv->event_fifo, sizeof(struct rio_event) * MPORT_EVENT_DEPTH, GFP_KERNEL); if (ret < 0) { put_device(&chdev->dev); dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n"); ret = -ENOMEM; goto err_fifo; } #ifdef CONFIG_RAPIDIO_DMA_ENGINE INIT_LIST_HEAD(&priv->async_list); spin_lock_init(&priv->req_lock); mutex_init(&priv->dma_lock); #endif mutex_lock(&chdev->file_mutex); list_add_tail(&priv->list, &chdev->file_list); mutex_unlock(&chdev->file_mutex); filp->private_data = priv; goto out; err_fifo: kfree(priv); out: return ret; } static int mport_cdev_fasync(int fd, struct file *filp, int mode) { struct mport_cdev_priv *priv = filp->private_data; return fasync_helper(fd, filp, mode, &priv->async_queue); } #ifdef CONFIG_RAPIDIO_DMA_ENGINE static void mport_cdev_release_dma(struct file *filp) { struct mport_cdev_priv *priv = filp->private_data; struct mport_dev *md; struct mport_dma_req *req, *req_next; unsigned long tmo = msecs_to_jiffies(dma_timeout); long wret; LIST_HEAD(list); rmcd_debug(EXIT, "from filp=%p %s(%d)", filp, current->comm, task_pid_nr(current)); if (!priv->dmach) { rmcd_debug(EXIT, "No DMA channel for filp=%p", filp); return; } md = priv->md; spin_lock(&priv->req_lock); if (!list_empty(&priv->async_list)) { rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)", filp, current->comm, task_pid_nr(current)); list_splice_init(&priv->async_list, &list); } spin_unlock(&priv->req_lock); if (!list_empty(&list)) { rmcd_debug(EXIT, "temp list not empty"); list_for_each_entry_safe(req, req_next, &list, node) { rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s", req->filp, req->cookie, completion_done(&req->req_comp)?"yes":"no"); list_del(&req->node); kref_put(&req->refcount, dma_req_free); } } put_dma_channel(priv); wret = wait_for_completion_interruptible_timeout(&priv->comp, tmo); if (wret <= 0) { rmcd_error("%s(%d) failed waiting for DMA release err=%ld", current->comm, task_pid_nr(current), wret); } if (priv->dmach != priv->md->dma_chan) { rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)", filp, current->comm, task_pid_nr(current)); rio_release_dma(priv->dmach); } else { rmcd_debug(EXIT, "Adjust default DMA channel refcount"); kref_put(&md->dma_ref, mport_release_def_dma); } priv->dmach = NULL; } #else #define mport_cdev_release_dma(priv) do {} while (0) #endif /* * mport_cdev_release() - Release character device */ static int mport_cdev_release(struct inode *inode, struct file *filp) { struct mport_cdev_priv *priv = filp->private_data; struct mport_dev *chdev; struct rio_mport_pw_filter *pw_filter, *pw_filter_next; struct rio_mport_db_filter *db_filter, *db_filter_next; struct rio_mport_mapping *map, *_map; unsigned long flags; rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp); chdev = priv->md; mport_cdev_release_dma(filp); priv->event_mask = 0; spin_lock_irqsave(&chdev->pw_lock, flags); if (!list_empty(&priv->pw_filters)) { list_for_each_entry_safe(pw_filter, pw_filter_next, &priv->pw_filters, priv_node) rio_mport_delete_pw_filter(pw_filter); } spin_unlock_irqrestore(&chdev->pw_lock, flags); spin_lock_irqsave(&chdev->db_lock, flags); list_for_each_entry_safe(db_filter, db_filter_next, &priv->db_filters, priv_node) { rio_mport_delete_db_filter(db_filter); } spin_unlock_irqrestore(&chdev->db_lock, flags); kfifo_free(&priv->event_fifo); mutex_lock(&chdev->buf_mutex); list_for_each_entry_safe(map, _map, &chdev->mappings, node) { if (map->filp == filp) { rmcd_debug(EXIT, "release mapping %p filp=%p", map->virt_addr, filp); kref_put(&map->ref, mport_release_mapping); } } mutex_unlock(&chdev->buf_mutex); mport_cdev_fasync(-1, filp, 0); filp->private_data = NULL; mutex_lock(&chdev->file_mutex); list_del(&priv->list); mutex_unlock(&chdev->file_mutex); put_device(&chdev->dev); kfree(priv); return 0; } /* * mport_cdev_ioctl() - IOCTLs for character device */ static long mport_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int err = -EINVAL; struct mport_cdev_priv *data = filp->private_data; struct mport_dev *md = data->md; if (atomic_read(&md->active) == 0) return -ENODEV; switch (cmd) { case RIO_MPORT_MAINT_READ_LOCAL: return rio_mport_maint_rd(data, (void __user *)arg, 1); case RIO_MPORT_MAINT_WRITE_LOCAL: return rio_mport_maint_wr(data, (void __user *)arg, 1); case RIO_MPORT_MAINT_READ_REMOTE: return rio_mport_maint_rd(data, (void __user *)arg, 0); case RIO_MPORT_MAINT_WRITE_REMOTE: return rio_mport_maint_wr(data, (void __user *)arg, 0); case RIO_MPORT_MAINT_HDID_SET: return maint_hdid_set(data, (void __user *)arg); case RIO_MPORT_MAINT_COMPTAG_SET: return maint_comptag_set(data, (void __user *)arg); case RIO_MPORT_MAINT_PORT_IDX_GET: return maint_port_idx_get(data, (void __user *)arg); case RIO_MPORT_GET_PROPERTIES: md->properties.hdid = md->mport->host_deviceid; if (copy_to_user((void __user *)arg, &(md->properties), sizeof(md->properties))) return -EFAULT; return 0; case RIO_ENABLE_DOORBELL_RANGE: return rio_mport_add_db_filter(data, (void __user *)arg); case RIO_DISABLE_DOORBELL_RANGE: return rio_mport_remove_db_filter(data, (void __user *)arg); case RIO_ENABLE_PORTWRITE_RANGE: return rio_mport_add_pw_filter(data, (void __user *)arg); case RIO_DISABLE_PORTWRITE_RANGE: return rio_mport_remove_pw_filter(data, (void __user *)arg); case RIO_SET_EVENT_MASK: data->event_mask = (u32)arg; return 0; case RIO_GET_EVENT_MASK: if (copy_to_user((void __user *)arg, &data->event_mask, sizeof(u32))) return -EFAULT; return 0; case RIO_MAP_OUTBOUND: return rio_mport_obw_map(filp, (void __user *)arg); case RIO_MAP_INBOUND: return rio_mport_map_inbound(filp, (void __user *)arg); case RIO_UNMAP_OUTBOUND: return rio_mport_obw_free(filp, (void __user *)arg); case RIO_UNMAP_INBOUND: return rio_mport_inbound_free(filp, (void __user *)arg); case RIO_ALLOC_DMA: return rio_mport_alloc_dma(filp, (void __user *)arg); case RIO_FREE_DMA: return rio_mport_free_dma(filp, (void __user *)arg); case RIO_WAIT_FOR_ASYNC: return rio_mport_wait_for_async_dma(filp, (void __user *)arg); case RIO_TRANSFER: return rio_mport_transfer_ioctl(filp, (void __user *)arg); case RIO_DEV_ADD: return rio_mport_add_riodev(data, (void __user *)arg); case RIO_DEV_DEL: return rio_mport_del_riodev(data, (void __user *)arg); default: break; } return err; } /* * mport_release_mapping - free mapping resources and info structure * @ref: a pointer to the kref within struct rio_mport_mapping * * NOTE: Shall be called while holding buf_mutex. */ static void mport_release_mapping(struct kref *ref) { struct rio_mport_mapping *map = container_of(ref, struct rio_mport_mapping, ref); struct rio_mport *mport = map->md->mport; rmcd_debug(MMAP, "type %d mapping @ %p (phys = %pad) for %s", map->dir, map->virt_addr, &map->phys_addr, mport->name); list_del(&map->node); switch (map->dir) { case MAP_INBOUND: rio_unmap_inb_region(mport, map->phys_addr); fallthrough; case MAP_DMA: dma_free_coherent(mport->dev.parent, map->size, map->virt_addr, map->phys_addr); break; case MAP_OUTBOUND: rio_unmap_outb_region(mport, map->rioid, map->rio_addr); break; } kfree(map); } static void mport_mm_open(struct vm_area_struct *vma) { struct rio_mport_mapping *map = vma->vm_private_data; rmcd_debug(MMAP, "%pad", &map->phys_addr); kref_get(&map->ref); } static void mport_mm_close(struct vm_area_struct *vma) { struct rio_mport_mapping *map = vma->vm_private_data; rmcd_debug(MMAP, "%pad", &map->phys_addr); mutex_lock(&map->md->buf_mutex); kref_put(&map->ref, mport_release_mapping); mutex_unlock(&map->md->buf_mutex); } static const struct vm_operations_struct vm_ops = { .open = mport_mm_open, .close = mport_mm_close, }; static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma) { struct mport_cdev_priv *priv = filp->private_data; struct mport_dev *md; size_t size = vma->vm_end - vma->vm_start; dma_addr_t baddr; unsigned long offset; int found = 0, ret; struct rio_mport_mapping *map; rmcd_debug(MMAP, "0x%x bytes at offset 0x%lx", (unsigned int)size, vma->vm_pgoff); md = priv->md; baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT); mutex_lock(&md->buf_mutex); list_for_each_entry(map, &md->mappings, node) { if (baddr >= map->phys_addr && baddr < (map->phys_addr + map->size)) { found = 1; break; } } mutex_unlock(&md->buf_mutex); if (!found) return -ENOMEM; offset = baddr - map->phys_addr; if (size + offset > map->size) return -EINVAL; vma->vm_pgoff = offset >> PAGE_SHIFT; rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx", vma->vm_pgoff); if (map->dir == MAP_INBOUND || map->dir == MAP_DMA) ret = dma_mmap_coherent(md->mport->dev.parent, vma, map->virt_addr, map->phys_addr, map->size); else if (map->dir == MAP_OUTBOUND) { vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ret = vm_iomap_memory(vma, map->phys_addr, map->size); } else { rmcd_error("Attempt to mmap unsupported mapping type"); ret = -EIO; } if (!ret) { vma->vm_private_data = map; vma->vm_ops = &vm_ops; mport_mm_open(vma); } else { rmcd_error("MMAP exit with err=%d", ret); } return ret; } static __poll_t mport_cdev_poll(struct file *filp, poll_table *wait) { struct mport_cdev_priv *priv = filp->private_data; poll_wait(filp, &priv->event_rx_wait, wait); if (kfifo_len(&priv->event_fifo)) return EPOLLIN | EPOLLRDNORM; return 0; } static ssize_t mport_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { struct mport_cdev_priv *priv = filp->private_data; int copied; ssize_t ret; if (!count) return 0; if (kfifo_is_empty(&priv->event_fifo) && (filp->f_flags & O_NONBLOCK)) return -EAGAIN; if (count % sizeof(struct rio_event)) return -EINVAL; ret = wait_event_interruptible(priv->event_rx_wait, kfifo_len(&priv->event_fifo) != 0); if (ret) return ret; while (ret < count) { if (kfifo_to_user(&priv->event_fifo, buf, sizeof(struct rio_event), &copied)) return -EFAULT; ret += copied; buf += copied; } return ret; } static ssize_t mport_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct mport_cdev_priv *priv = filp->private_data; struct rio_mport *mport = priv->md->mport; struct rio_event event; int len, ret; if (!count) return 0; if (count % sizeof(event)) return -EINVAL; len = 0; while ((count - len) >= (int)sizeof(event)) { if (copy_from_user(&event, buf, sizeof(event))) return -EFAULT; if (event.header != RIO_DOORBELL) return -EINVAL; ret = rio_mport_send_doorbell(mport, event.u.doorbell.rioid, event.u.doorbell.payload); if (ret < 0) return ret; len += sizeof(event); buf += sizeof(event); } return len; } static const struct file_operations mport_fops = { .owner = THIS_MODULE, .open = mport_cdev_open, .release = mport_cdev_release, .poll = mport_cdev_poll, .read = mport_read, .write = mport_write, .mmap = mport_cdev_mmap, .fasync = mport_cdev_fasync, .unlocked_ioctl = mport_cdev_ioctl }; /* * Character device management */ static void mport_device_release(struct device *dev) { struct mport_dev *md; rmcd_debug(EXIT, "%s", dev_name(dev)); md = container_of(dev, struct mport_dev, dev); kfree(md); } /* * mport_cdev_add() - Create mport_dev from rio_mport * @mport: RapidIO master port */ static struct mport_dev *mport_cdev_add(struct rio_mport *mport) { int ret = 0; struct mport_dev *md; struct rio_mport_attr attr; md = kzalloc(sizeof(*md), GFP_KERNEL); if (!md) { rmcd_error("Unable allocate a device object"); return NULL; } md->mport = mport; mutex_init(&md->buf_mutex); mutex_init(&md->file_mutex); INIT_LIST_HEAD(&md->file_list); device_initialize(&md->dev); md->dev.devt = MKDEV(MAJOR(dev_number), mport->id); md->dev.class = dev_class; md->dev.parent = &mport->dev; md->dev.release = mport_device_release; dev_set_name(&md->dev, DEV_NAME "%d", mport->id); atomic_set(&md->active, 1); cdev_init(&md->cdev, &mport_fops); md->cdev.owner = THIS_MODULE; INIT_LIST_HEAD(&md->doorbells); spin_lock_init(&md->db_lock); INIT_LIST_HEAD(&md->portwrites); spin_lock_init(&md->pw_lock); INIT_LIST_HEAD(&md->mappings); md->properties.id = mport->id; md->properties.sys_size = mport->sys_size; md->properties.hdid = mport->host_deviceid; md->properties.index = mport->index; /* The transfer_mode property will be returned through mport query * interface */ #ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */ md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED; #else md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; #endif ret = cdev_device_add(&md->cdev, &md->dev); if (ret) { rmcd_error("Failed to register mport %d (err=%d)", mport->id, ret); goto err_cdev; } ret = rio_query_mport(mport, &attr); if (!ret) { md->properties.flags = attr.flags; md->properties.link_speed = attr.link_speed; md->properties.link_width = attr.link_width; md->properties.dma_max_sge = attr.dma_max_sge; md->properties.dma_max_size = attr.dma_max_size; md->properties.dma_align = attr.dma_align; md->properties.cap_sys_size = 0; md->properties.cap_transfer_mode = 0; md->properties.cap_addr_size = 0; } else pr_info(DRV_PREFIX "Failed to obtain info for %s cdev(%d:%d)\n", mport->name, MAJOR(dev_number), mport->id); mutex_lock(&mport_devs_lock); list_add_tail(&md->node, &mport_devs); mutex_unlock(&mport_devs_lock); pr_info(DRV_PREFIX "Added %s cdev(%d:%d)\n", mport->name, MAJOR(dev_number), mport->id); return md; err_cdev: put_device(&md->dev); return NULL; } /* * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release * associated DMA channels. */ static void mport_cdev_terminate_dma(struct mport_dev *md) { #ifdef CONFIG_RAPIDIO_DMA_ENGINE struct mport_cdev_priv *client; rmcd_debug(DMA, "%s", dev_name(&md->dev)); mutex_lock(&md->file_mutex); list_for_each_entry(client, &md->file_list, list) { if (client->dmach) { dmaengine_terminate_all(client->dmach); rio_release_dma(client->dmach); } } mutex_unlock(&md->file_mutex); if (md->dma_chan) { dmaengine_terminate_all(md->dma_chan); rio_release_dma(md->dma_chan); md->dma_chan = NULL; } #endif } /* * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open * mport_cdev files. */ static int mport_cdev_kill_fasync(struct mport_dev *md) { unsigned int files = 0; struct mport_cdev_priv *client; mutex_lock(&md->file_mutex); list_for_each_entry(client, &md->file_list, list) { if (client->async_queue) kill_fasync(&client->async_queue, SIGIO, POLL_HUP); files++; } mutex_unlock(&md->file_mutex); return files; } /* * mport_cdev_remove() - Remove mport character device * @dev: Mport device to remove */ static void mport_cdev_remove(struct mport_dev *md) { struct rio_mport_mapping *map, *_map; rmcd_debug(EXIT, "Remove %s cdev", md->mport->name); atomic_set(&md->active, 0); mport_cdev_terminate_dma(md); rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler); cdev_device_del(&md->cdev, &md->dev); mport_cdev_kill_fasync(md); /* TODO: do we need to give clients some time to close file * descriptors? Simple wait for XX, or kref? */ /* * Release DMA buffers allocated for the mport device. * Disable associated inbound Rapidio requests mapping if applicable. */ mutex_lock(&md->buf_mutex); list_for_each_entry_safe(map, _map, &md->mappings, node) { kref_put(&map->ref, mport_release_mapping); } mutex_unlock(&md->buf_mutex); if (!list_empty(&md->mappings)) rmcd_warn("WARNING: %s pending mappings on removal", md->mport->name); rio_release_inb_dbell(md->mport, 0, 0x0fff); put_device(&md->dev); } /* * RIO rio_mport_interface driver */ /* * mport_add_mport() - Add rio_mport from LDM device struct * @dev: Linux device model struct */ static int mport_add_mport(struct device *dev) { struct rio_mport *mport = NULL; struct mport_dev *chdev = NULL; mport = to_rio_mport(dev); if (!mport) return -ENODEV; chdev = mport_cdev_add(mport); if (!chdev) return -ENODEV; return 0; } /* * mport_remove_mport() - Remove rio_mport from global list * TODO remove device from global mport_dev list */ static void mport_remove_mport(struct device *dev) { struct rio_mport *mport = NULL; struct mport_dev *chdev; int found = 0; mport = to_rio_mport(dev); rmcd_debug(EXIT, "Remove %s", mport->name); mutex_lock(&mport_devs_lock); list_for_each_entry(chdev, &mport_devs, node) { if (chdev->mport->id == mport->id) { atomic_set(&chdev->active, 0); list_del(&chdev->node); found = 1; break; } } mutex_unlock(&mport_devs_lock); if (found) mport_cdev_remove(chdev); } /* the rio_mport_interface is used to handle local mport devices */ static struct class_interface rio_mport_interface __refdata = { .class = &rio_mport_class, .add_dev = mport_add_mport, .remove_dev = mport_remove_mport, }; /* * Linux kernel module */ /* * mport_init - Driver module loading */ static int __init mport_init(void) { int ret; /* Create device class needed by udev */ dev_class = class_create(DRV_NAME); if (IS_ERR(dev_class)) { rmcd_error("Unable to create " DRV_NAME " class"); return PTR_ERR(dev_class); } ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME); if (ret < 0) goto err_chr; rmcd_debug(INIT, "Registered class with major=%d", MAJOR(dev_number)); /* Register to rio_mport_interface */ ret = class_interface_register(&rio_mport_interface); if (ret) { rmcd_error("class_interface_register() failed, err=%d", ret); goto err_cli; } return 0; err_cli: unregister_chrdev_region(dev_number, RIO_MAX_MPORTS); err_chr: class_destroy(dev_class); return ret; } /** * mport_exit - Driver module unloading */ static void __exit mport_exit(void) { class_interface_unregister(&rio_mport_interface); class_destroy(dev_class); unregister_chrdev_region(dev_number, RIO_MAX_MPORTS); } module_init(mport_init); module_exit(mport_exit);
linux-master
drivers/rapidio/devices/rio_mport_cdev.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge * * Copyright (c) 2011-2014 Integrated Device Technology, Inc. * Alexandre Bounine <[email protected]> */ #include <linux/io.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/rio.h> #include <linux/rio_drv.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/kfifo.h> #include <linux/sched.h> #include <linux/delay.h> #include "../../dma/dmaengine.h" #include "tsi721.h" #ifdef CONFIG_PCI_MSI static irqreturn_t tsi721_bdma_msix(int irq, void *ptr); #endif static int tsi721_submit_sg(struct tsi721_tx_desc *desc); static unsigned int dma_desc_per_channel = 128; module_param(dma_desc_per_channel, uint, S_IRUGO); MODULE_PARM_DESC(dma_desc_per_channel, "Number of DMA descriptors per channel (default: 128)"); static unsigned int dma_txqueue_sz = 16; module_param(dma_txqueue_sz, uint, S_IRUGO); MODULE_PARM_DESC(dma_txqueue_sz, "DMA Transactions Queue Size (default: 16)"); static u8 dma_sel = 0x7f; module_param(dma_sel, byte, S_IRUGO); MODULE_PARM_DESC(dma_sel, "DMA Channel Selection Mask (default: 0x7f = all)"); static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan) { return container_of(chan, struct tsi721_bdma_chan, dchan); } static inline struct tsi721_device *to_tsi721(struct dma_device *ddev) { return container_of(ddev, struct rio_mport, dma)->priv; } static inline struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd) { return container_of(txd, struct tsi721_tx_desc, txd); } static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) { struct tsi721_dma_desc *bd_ptr; struct device *dev = bdma_chan->dchan.device->dev; u64 *sts_ptr; dma_addr_t bd_phys; dma_addr_t sts_phys; int sts_size; #ifdef CONFIG_PCI_MSI struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); #endif tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id); /* * Allocate space for DMA descriptors * (add an extra element for link descriptor) */ bd_ptr = dma_alloc_coherent(dev, (bd_num + 1) * sizeof(struct tsi721_dma_desc), &bd_phys, GFP_ATOMIC); if (!bd_ptr) return -ENOMEM; bdma_chan->bd_num = bd_num; bdma_chan->bd_phys = bd_phys; bdma_chan->bd_base = bd_ptr; tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d descriptors @ %p (phys = %pad)", bdma_chan->id, bd_ptr, &bd_phys); /* Allocate space for descriptor status FIFO */ sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ? (bd_num + 1) : TSI721_DMA_MINSTSSZ; sts_size = roundup_pow_of_two(sts_size); sts_ptr = dma_alloc_coherent(dev, sts_size * sizeof(struct tsi721_dma_sts), &sts_phys, GFP_ATOMIC); if (!sts_ptr) { /* Free space allocated for DMA descriptors */ dma_free_coherent(dev, (bd_num + 1) * sizeof(struct tsi721_dma_desc), bd_ptr, bd_phys); bdma_chan->bd_base = NULL; return -ENOMEM; } bdma_chan->sts_phys = sts_phys; bdma_chan->sts_base = sts_ptr; bdma_chan->sts_size = sts_size; tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d desc status FIFO @ %p (phys = %pad) size=0x%x", bdma_chan->id, sts_ptr, &sts_phys, sts_size); /* Initialize DMA descriptors ring using added link descriptor */ bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29); bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys & TSI721_DMAC_DPTRL_MASK); bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32); /* Setup DMA descriptor pointers */ iowrite32(((u64)bd_phys >> 32), bdma_chan->regs + TSI721_DMAC_DPTRH); iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), bdma_chan->regs + TSI721_DMAC_DPTRL); /* Setup descriptor status FIFO */ iowrite32(((u64)sts_phys >> 32), bdma_chan->regs + TSI721_DMAC_DSBH); iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), bdma_chan->regs + TSI721_DMAC_DSBL); iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), bdma_chan->regs + TSI721_DMAC_DSSZ); /* Clear interrupt bits */ iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INT); ioread32(bdma_chan->regs + TSI721_DMAC_INT); #ifdef CONFIG_PCI_MSI /* Request interrupt service if we are in MSI-X mode */ if (priv->flags & TSI721_USING_MSIX) { int rc, idx; idx = TSI721_VECT_DMA0_DONE + bdma_chan->id; rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, priv->msix[idx].irq_name, (void *)bdma_chan); if (rc) { tsi_debug(DMA, &bdma_chan->dchan.dev->device, "Unable to get MSI-X for DMAC%d-DONE", bdma_chan->id); goto err_out; } idx = TSI721_VECT_DMA0_INT + bdma_chan->id; rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, priv->msix[idx].irq_name, (void *)bdma_chan); if (rc) { tsi_debug(DMA, &bdma_chan->dchan.dev->device, "Unable to get MSI-X for DMAC%d-INT", bdma_chan->id); free_irq( priv->msix[TSI721_VECT_DMA0_DONE + bdma_chan->id].vector, (void *)bdma_chan); } err_out: if (rc) { /* Free space allocated for DMA descriptors */ dma_free_coherent(dev, (bd_num + 1) * sizeof(struct tsi721_dma_desc), bd_ptr, bd_phys); bdma_chan->bd_base = NULL; /* Free space allocated for status descriptors */ dma_free_coherent(dev, sts_size * sizeof(struct tsi721_dma_sts), sts_ptr, sts_phys); bdma_chan->sts_base = NULL; return -EIO; } } #endif /* CONFIG_PCI_MSI */ /* Toggle DMA channel initialization */ iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); ioread32(bdma_chan->regs + TSI721_DMAC_CTL); bdma_chan->wr_count = bdma_chan->wr_count_next = 0; bdma_chan->sts_rdptr = 0; udelay(10); return 0; } static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan) { u32 ch_stat; #ifdef CONFIG_PCI_MSI struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); #endif if (!bdma_chan->bd_base) return 0; /* Check if DMA channel still running */ ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS); if (ch_stat & TSI721_DMAC_STS_RUN) return -EFAULT; /* Put DMA channel into init state */ iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); #ifdef CONFIG_PCI_MSI if (priv->flags & TSI721_USING_MSIX) { free_irq(priv->msix[TSI721_VECT_DMA0_DONE + bdma_chan->id].vector, (void *)bdma_chan); free_irq(priv->msix[TSI721_VECT_DMA0_INT + bdma_chan->id].vector, (void *)bdma_chan); } #endif /* CONFIG_PCI_MSI */ /* Free space allocated for DMA descriptors */ dma_free_coherent(bdma_chan->dchan.device->dev, (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc), bdma_chan->bd_base, bdma_chan->bd_phys); bdma_chan->bd_base = NULL; /* Free space allocated for status FIFO */ dma_free_coherent(bdma_chan->dchan.device->dev, bdma_chan->sts_size * sizeof(struct tsi721_dma_sts), bdma_chan->sts_base, bdma_chan->sts_phys); bdma_chan->sts_base = NULL; return 0; } static void tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable) { if (enable) { /* Clear pending BDMA channel interrupts */ iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INT); ioread32(bdma_chan->regs + TSI721_DMAC_INT); /* Enable BDMA channel interrupts */ iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE); } else { /* Disable BDMA channel interrupts */ iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); /* Clear pending BDMA channel interrupts */ iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INT); } } static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan) { u32 sts; sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); return ((sts & TSI721_DMAC_STS_RUN) == 0); } void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan) { /* Disable BDMA channel interrupts */ iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); if (bdma_chan->active) tasklet_hi_schedule(&bdma_chan->tasklet); } #ifdef CONFIG_PCI_MSI /** * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels * @irq: Linux interrupt number * @ptr: Pointer to interrupt-specific data (BDMA channel structure) * * Handles BDMA channel interrupts signaled using MSI-X. */ static irqreturn_t tsi721_bdma_msix(int irq, void *ptr) { struct tsi721_bdma_chan *bdma_chan = ptr; if (bdma_chan->active) tasklet_hi_schedule(&bdma_chan->tasklet); return IRQ_HANDLED; } #endif /* CONFIG_PCI_MSI */ /* Must be called with the spinlock held */ static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan) { if (!tsi721_dma_is_idle(bdma_chan)) { tsi_err(&bdma_chan->dchan.dev->device, "DMAC%d Attempt to start non-idle channel", bdma_chan->id); return; } if (bdma_chan->wr_count == bdma_chan->wr_count_next) { tsi_err(&bdma_chan->dchan.dev->device, "DMAC%d Attempt to start DMA with no BDs ready %d", bdma_chan->id, task_pid_nr(current)); return; } tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d (wrc=%d) %d", bdma_chan->id, bdma_chan->wr_count_next, task_pid_nr(current)); iowrite32(bdma_chan->wr_count_next, bdma_chan->regs + TSI721_DMAC_DWRCNT); ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT); bdma_chan->wr_count = bdma_chan->wr_count_next; } static int tsi721_desc_fill_init(struct tsi721_tx_desc *desc, struct tsi721_dma_desc *bd_ptr, struct scatterlist *sg, u32 sys_size) { u64 rio_addr; if (!bd_ptr) return -EINVAL; /* Initialize DMA descriptor */ bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) | (desc->rtype << 19) | desc->destid); bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) | (sys_size << 26)); rio_addr = (desc->rio_addr >> 2) | ((u64)(desc->rio_addr_u & 0x3) << 62); bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff); bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32); bd_ptr->t1.bufptr_lo = cpu_to_le32( (u64)sg_dma_address(sg) & 0xffffffff); bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32); bd_ptr->t1.s_dist = 0; bd_ptr->t1.s_size = 0; return 0; } static int tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt) { if (!bd_ptr) return -EINVAL; /* Update DMA descriptor */ if (interrupt) bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF); bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1); return 0; } static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan, struct tsi721_tx_desc *desc) { struct dma_async_tx_descriptor *txd = &desc->txd; dma_async_tx_callback callback = txd->callback; void *param = txd->callback_param; list_move(&desc->desc_node, &bdma_chan->free_list); if (callback) callback(param); } static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan) { u32 srd_ptr; u64 *sts_ptr; int i, j; /* Check and clear descriptor status FIFO entries */ srd_ptr = bdma_chan->sts_rdptr; sts_ptr = bdma_chan->sts_base; j = srd_ptr * 8; while (sts_ptr[j]) { for (i = 0; i < 8 && sts_ptr[j]; i++, j++) sts_ptr[j] = 0; ++srd_ptr; srd_ptr %= bdma_chan->sts_size; j = srd_ptr * 8; } iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP); bdma_chan->sts_rdptr = srd_ptr; } /* Must be called with the channel spinlock held */ static int tsi721_submit_sg(struct tsi721_tx_desc *desc) { struct dma_chan *dchan = desc->txd.chan; struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); u32 sys_size; u64 rio_addr; dma_addr_t next_addr; u32 bcount; struct scatterlist *sg; unsigned int i; int err = 0; struct tsi721_dma_desc *bd_ptr = NULL; u32 idx, rd_idx; u32 add_count = 0; struct device *ch_dev = &dchan->dev->device; if (!tsi721_dma_is_idle(bdma_chan)) { tsi_err(ch_dev, "DMAC%d ERR: Attempt to use non-idle channel", bdma_chan->id); return -EIO; } /* * Fill DMA channel's hardware buffer descriptors. * (NOTE: RapidIO destination address is limited to 64 bits for now) */ rio_addr = desc->rio_addr; next_addr = -1; bcount = 0; sys_size = dma_to_mport(dchan->device)->sys_size; rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT); rd_idx %= (bdma_chan->bd_num + 1); idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1); if (idx == bdma_chan->bd_num) { /* wrap around link descriptor */ idx = 0; add_count++; } tsi_debug(DMA, ch_dev, "DMAC%d BD ring status: rdi=%d wri=%d", bdma_chan->id, rd_idx, idx); for_each_sg(desc->sg, sg, desc->sg_len, i) { tsi_debug(DMAV, ch_dev, "DMAC%d sg%d/%d addr: 0x%llx len: %d", bdma_chan->id, i, desc->sg_len, (unsigned long long)sg_dma_address(sg), sg_dma_len(sg)); if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) { tsi_err(ch_dev, "DMAC%d SG entry %d is too large", bdma_chan->id, i); err = -EINVAL; break; } /* * If this sg entry forms contiguous block with previous one, * try to merge it into existing DMA descriptor */ if (next_addr == sg_dma_address(sg) && bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) { /* Adjust byte count of the descriptor */ bcount += sg_dma_len(sg); goto entry_done; } else if (next_addr != -1) { /* Finalize descriptor using total byte count value */ tsi721_desc_fill_end(bd_ptr, bcount, 0); tsi_debug(DMAV, ch_dev, "DMAC%d prev desc final len: %d", bdma_chan->id, bcount); } desc->rio_addr = rio_addr; if (i && idx == rd_idx) { tsi_debug(DMAV, ch_dev, "DMAC%d HW descriptor ring is full @ %d", bdma_chan->id, i); desc->sg = sg; desc->sg_len -= i; break; } bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx]; err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size); if (err) { tsi_err(ch_dev, "Failed to build desc: err=%d", err); break; } tsi_debug(DMAV, ch_dev, "DMAC%d bd_ptr = %p did=%d raddr=0x%llx", bdma_chan->id, bd_ptr, desc->destid, desc->rio_addr); next_addr = sg_dma_address(sg); bcount = sg_dma_len(sg); add_count++; if (++idx == bdma_chan->bd_num) { /* wrap around link descriptor */ idx = 0; add_count++; } entry_done: if (sg_is_last(sg)) { tsi721_desc_fill_end(bd_ptr, bcount, 0); tsi_debug(DMAV, ch_dev, "DMAC%d last desc final len: %d", bdma_chan->id, bcount); desc->sg_len = 0; } else { rio_addr += sg_dma_len(sg); next_addr += sg_dma_len(sg); } } if (!err) bdma_chan->wr_count_next += add_count; return err; } static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan, struct tsi721_tx_desc *desc) { int err; tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id); if (!tsi721_dma_is_idle(bdma_chan)) return; /* * If there is no data transfer in progress, fetch new descriptor from * the pending queue. */ if (!desc && !bdma_chan->active_tx && !list_empty(&bdma_chan->queue)) { desc = list_first_entry(&bdma_chan->queue, struct tsi721_tx_desc, desc_node); list_del_init((&desc->desc_node)); bdma_chan->active_tx = desc; } if (desc) { err = tsi721_submit_sg(desc); if (!err) tsi721_start_dma(bdma_chan); else { tsi721_dma_tx_err(bdma_chan, desc); tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d ERR: tsi721_submit_sg failed with err=%d", bdma_chan->id, err); } } tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d Exit", bdma_chan->id); } static void tsi721_dma_tasklet(unsigned long data) { struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data; u32 dmac_int, dmac_sts; dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d_INT = 0x%x", bdma_chan->id, dmac_int); /* Clear channel interrupts */ iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT); if (dmac_int & TSI721_DMAC_INT_ERR) { int i = 10000; struct tsi721_tx_desc *desc; desc = bdma_chan->active_tx; dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); tsi_err(&bdma_chan->dchan.dev->device, "DMAC%d_STS = 0x%x did=%d raddr=0x%llx", bdma_chan->id, dmac_sts, desc->destid, desc->rio_addr); /* Re-initialize DMA channel if possible */ if ((dmac_sts & TSI721_DMAC_STS_ABORT) == 0) goto err_out; tsi721_clr_stat(bdma_chan); spin_lock(&bdma_chan->lock); /* Put DMA channel into init state */ iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); do { udelay(1); dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); i--; } while ((dmac_sts & TSI721_DMAC_STS_ABORT) && i); if (dmac_sts & TSI721_DMAC_STS_ABORT) { tsi_err(&bdma_chan->dchan.dev->device, "Failed to re-initiate DMAC%d", bdma_chan->id); spin_unlock(&bdma_chan->lock); goto err_out; } /* Setup DMA descriptor pointers */ iowrite32(((u64)bdma_chan->bd_phys >> 32), bdma_chan->regs + TSI721_DMAC_DPTRH); iowrite32(((u64)bdma_chan->bd_phys & TSI721_DMAC_DPTRL_MASK), bdma_chan->regs + TSI721_DMAC_DPTRL); /* Setup descriptor status FIFO */ iowrite32(((u64)bdma_chan->sts_phys >> 32), bdma_chan->regs + TSI721_DMAC_DSBH); iowrite32(((u64)bdma_chan->sts_phys & TSI721_DMAC_DSBL_MASK), bdma_chan->regs + TSI721_DMAC_DSBL); iowrite32(TSI721_DMAC_DSSZ_SIZE(bdma_chan->sts_size), bdma_chan->regs + TSI721_DMAC_DSSZ); /* Clear interrupt bits */ iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INT); ioread32(bdma_chan->regs + TSI721_DMAC_INT); bdma_chan->wr_count = bdma_chan->wr_count_next = 0; bdma_chan->sts_rdptr = 0; udelay(10); desc = bdma_chan->active_tx; desc->status = DMA_ERROR; dma_cookie_complete(&desc->txd); list_add(&desc->desc_node, &bdma_chan->free_list); bdma_chan->active_tx = NULL; if (bdma_chan->active) tsi721_advance_work(bdma_chan, NULL); spin_unlock(&bdma_chan->lock); } if (dmac_int & TSI721_DMAC_INT_STFULL) { tsi_err(&bdma_chan->dchan.dev->device, "DMAC%d descriptor status FIFO is full", bdma_chan->id); } if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) { struct tsi721_tx_desc *desc; tsi721_clr_stat(bdma_chan); spin_lock(&bdma_chan->lock); desc = bdma_chan->active_tx; if (desc->sg_len == 0) { dma_async_tx_callback callback = NULL; void *param = NULL; desc->status = DMA_COMPLETE; dma_cookie_complete(&desc->txd); if (desc->txd.flags & DMA_PREP_INTERRUPT) { callback = desc->txd.callback; param = desc->txd.callback_param; } list_add(&desc->desc_node, &bdma_chan->free_list); bdma_chan->active_tx = NULL; if (bdma_chan->active) tsi721_advance_work(bdma_chan, NULL); spin_unlock(&bdma_chan->lock); if (callback) callback(param); } else { if (bdma_chan->active) tsi721_advance_work(bdma_chan, bdma_chan->active_tx); spin_unlock(&bdma_chan->lock); } } err_out: /* Re-Enable BDMA channel interrupts */ iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE); } static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd) { struct tsi721_tx_desc *desc = to_tsi721_desc(txd); struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan); dma_cookie_t cookie; /* Check if the descriptor is detached from any lists */ if (!list_empty(&desc->desc_node)) { tsi_err(&bdma_chan->dchan.dev->device, "DMAC%d wrong state of descriptor %p", bdma_chan->id, txd); return -EIO; } spin_lock_bh(&bdma_chan->lock); if (!bdma_chan->active) { spin_unlock_bh(&bdma_chan->lock); return -ENODEV; } cookie = dma_cookie_assign(txd); desc->status = DMA_IN_PROGRESS; list_add_tail(&desc->desc_node, &bdma_chan->queue); tsi721_advance_work(bdma_chan, NULL); spin_unlock_bh(&bdma_chan->lock); return cookie; } static int tsi721_alloc_chan_resources(struct dma_chan *dchan) { struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); struct tsi721_tx_desc *desc; int i; tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); if (bdma_chan->bd_base) return dma_txqueue_sz; /* Initialize BDMA channel */ if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) { tsi_err(&dchan->dev->device, "Unable to initialize DMAC%d", bdma_chan->id); return -ENODEV; } /* Allocate queue of transaction descriptors */ desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc), GFP_ATOMIC); if (!desc) { tsi721_bdma_ch_free(bdma_chan); return -ENOMEM; } bdma_chan->tx_desc = desc; for (i = 0; i < dma_txqueue_sz; i++) { dma_async_tx_descriptor_init(&desc[i].txd, dchan); desc[i].txd.tx_submit = tsi721_tx_submit; desc[i].txd.flags = DMA_CTRL_ACK; list_add(&desc[i].desc_node, &bdma_chan->free_list); } dma_cookie_init(dchan); bdma_chan->active = true; tsi721_bdma_interrupt_enable(bdma_chan, 1); return dma_txqueue_sz; } static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan) { struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); #ifdef CONFIG_PCI_MSI if (priv->flags & TSI721_USING_MSIX) { synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE + bdma_chan->id].vector); synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT + bdma_chan->id].vector); } else #endif synchronize_irq(priv->pdev->irq); } static void tsi721_free_chan_resources(struct dma_chan *dchan) { struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); if (!bdma_chan->bd_base) return; tsi721_bdma_interrupt_enable(bdma_chan, 0); bdma_chan->active = false; tsi721_sync_dma_irq(bdma_chan); tasklet_kill(&bdma_chan->tasklet); INIT_LIST_HEAD(&bdma_chan->free_list); kfree(bdma_chan->tx_desc); tsi721_bdma_ch_free(bdma_chan); } static enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); enum dma_status status; spin_lock_bh(&bdma_chan->lock); status = dma_cookie_status(dchan, cookie, txstate); spin_unlock_bh(&bdma_chan->lock); return status; } static void tsi721_issue_pending(struct dma_chan *dchan) { struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); spin_lock_bh(&bdma_chan->lock); if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) { tsi721_advance_work(bdma_chan, NULL); } spin_unlock_bh(&bdma_chan->lock); } static struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction dir, unsigned long flags, void *tinfo) { struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); struct tsi721_tx_desc *desc; struct rio_dma_ext *rext = tinfo; enum dma_rtype rtype; struct dma_async_tx_descriptor *txd = NULL; if (!sgl || !sg_len) { tsi_err(&dchan->dev->device, "DMAC%d No SG list", bdma_chan->id); return ERR_PTR(-EINVAL); } tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id, (dir == DMA_DEV_TO_MEM)?"READ":"WRITE"); if (dir == DMA_DEV_TO_MEM) rtype = NREAD; else if (dir == DMA_MEM_TO_DEV) { switch (rext->wr_type) { case RDW_ALL_NWRITE: rtype = ALL_NWRITE; break; case RDW_ALL_NWRITE_R: rtype = ALL_NWRITE_R; break; case RDW_LAST_NWRITE_R: default: rtype = LAST_NWRITE_R; break; } } else { tsi_err(&dchan->dev->device, "DMAC%d Unsupported DMA direction option", bdma_chan->id); return ERR_PTR(-EINVAL); } spin_lock_bh(&bdma_chan->lock); if (!list_empty(&bdma_chan->free_list)) { desc = list_first_entry(&bdma_chan->free_list, struct tsi721_tx_desc, desc_node); list_del_init(&desc->desc_node); desc->destid = rext->destid; desc->rio_addr = rext->rio_addr; desc->rio_addr_u = 0; desc->rtype = rtype; desc->sg_len = sg_len; desc->sg = sgl; txd = &desc->txd; txd->flags = flags; } spin_unlock_bh(&bdma_chan->lock); if (!txd) { tsi_debug(DMA, &dchan->dev->device, "DMAC%d free TXD is not available", bdma_chan->id); return ERR_PTR(-EBUSY); } return txd; } static int tsi721_terminate_all(struct dma_chan *dchan) { struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); struct tsi721_tx_desc *desc, *_d; LIST_HEAD(list); tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); spin_lock_bh(&bdma_chan->lock); bdma_chan->active = false; while (!tsi721_dma_is_idle(bdma_chan)) { udelay(5); #if (0) /* make sure to stop the transfer */ iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL); /* Wait until DMA channel stops */ do { dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0); #endif } if (bdma_chan->active_tx) list_add(&bdma_chan->active_tx->desc_node, &list); list_splice_init(&bdma_chan->queue, &list); list_for_each_entry_safe(desc, _d, &list, desc_node) tsi721_dma_tx_err(bdma_chan, desc); spin_unlock_bh(&bdma_chan->lock); return 0; } static void tsi721_dma_stop(struct tsi721_bdma_chan *bdma_chan) { if (!bdma_chan->active) return; spin_lock_bh(&bdma_chan->lock); if (!tsi721_dma_is_idle(bdma_chan)) { int timeout = 100000; /* stop the transfer in progress */ iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL); /* Wait until DMA channel stops */ while (!tsi721_dma_is_idle(bdma_chan) && --timeout) udelay(1); } spin_unlock_bh(&bdma_chan->lock); } void tsi721_dma_stop_all(struct tsi721_device *priv) { int i; for (i = 0; i < TSI721_DMA_MAXCH; i++) { if ((i != TSI721_DMACH_MAINT) && (dma_sel & (1 << i))) tsi721_dma_stop(&priv->bdma[i]); } } int tsi721_register_dma(struct tsi721_device *priv) { int i; int nr_channels = 0; int err; struct rio_mport *mport = &priv->mport; INIT_LIST_HEAD(&mport->dma.channels); for (i = 0; i < TSI721_DMA_MAXCH; i++) { struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i]; if ((i == TSI721_DMACH_MAINT) || (dma_sel & (1 << i)) == 0) continue; bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i); bdma_chan->dchan.device = &mport->dma; bdma_chan->dchan.cookie = 1; bdma_chan->dchan.chan_id = i; bdma_chan->id = i; bdma_chan->active = false; spin_lock_init(&bdma_chan->lock); bdma_chan->active_tx = NULL; INIT_LIST_HEAD(&bdma_chan->queue); INIT_LIST_HEAD(&bdma_chan->free_list); tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, (unsigned long)bdma_chan); list_add_tail(&bdma_chan->dchan.device_node, &mport->dma.channels); nr_channels++; } mport->dma.chancnt = nr_channels; dma_cap_zero(mport->dma.cap_mask); dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask); dma_cap_set(DMA_SLAVE, mport->dma.cap_mask); mport->dma.dev = &priv->pdev->dev; mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources; mport->dma.device_free_chan_resources = tsi721_free_chan_resources; mport->dma.device_tx_status = tsi721_tx_status; mport->dma.device_issue_pending = tsi721_issue_pending; mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg; mport->dma.device_terminate_all = tsi721_terminate_all; err = dma_async_device_register(&mport->dma); if (err) tsi_err(&priv->pdev->dev, "Failed to register DMA device"); return err; } void tsi721_unregister_dma(struct tsi721_device *priv) { struct rio_mport *mport = &priv->mport; struct dma_chan *chan, *_c; struct tsi721_bdma_chan *bdma_chan; tsi721_dma_stop_all(priv); dma_async_device_unregister(&mport->dma); list_for_each_entry_safe(chan, _c, &mport->dma.channels, device_node) { bdma_chan = to_tsi721_chan(chan); if (bdma_chan->active) { tsi721_bdma_interrupt_enable(bdma_chan, 0); bdma_chan->active = false; tsi721_sync_dma_irq(bdma_chan); tasklet_kill(&bdma_chan->tasklet); INIT_LIST_HEAD(&bdma_chan->free_list); kfree(bdma_chan->tx_desc); tsi721_bdma_ch_free(bdma_chan); } list_del(&chan->device_node); } }
linux-master
drivers/rapidio/devices/tsi721_dma.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2020 Birger Koblitz <[email protected]> * Copyright (C) 2020 Bert Vermeulen <[email protected]> * Copyright (C) 2020 John Crispin <[email protected]> */ #include <linux/of_irq.h> #include <linux/irqchip.h> #include <linux/spinlock.h> #include <linux/of_address.h> #include <linux/irqchip/chained_irq.h> /* Global Interrupt Mask Register */ #define RTL_ICTL_GIMR 0x00 /* Global Interrupt Status Register */ #define RTL_ICTL_GISR 0x04 /* Interrupt Routing Registers */ #define RTL_ICTL_IRR0 0x08 #define RTL_ICTL_IRR1 0x0c #define RTL_ICTL_IRR2 0x10 #define RTL_ICTL_IRR3 0x14 #define RTL_ICTL_NUM_INPUTS 32 #define REG(x) (realtek_ictl_base + x) static DEFINE_RAW_SPINLOCK(irq_lock); static void __iomem *realtek_ictl_base; /* * IRR0-IRR3 store 4 bits per interrupt, but Realtek uses inverted numbering, * placing IRQ 31 in the first four bits. A routing value of '0' means the * interrupt is left disconnected. Routing values {1..15} connect to output * lines {0..14}. */ #define IRR_OFFSET(idx) (4 * (3 - (idx * 4) / 32)) #define IRR_SHIFT(idx) ((idx * 4) % 32) static void write_irr(void __iomem *irr0, int idx, u32 value) { unsigned int offset = IRR_OFFSET(idx); unsigned int shift = IRR_SHIFT(idx); u32 irr; irr = readl(irr0 + offset) & ~(0xf << shift); irr |= (value & 0xf) << shift; writel(irr, irr0 + offset); } static void realtek_ictl_unmask_irq(struct irq_data *i) { unsigned long flags; u32 value; raw_spin_lock_irqsave(&irq_lock, flags); value = readl(REG(RTL_ICTL_GIMR)); value |= BIT(i->hwirq); writel(value, REG(RTL_ICTL_GIMR)); raw_spin_unlock_irqrestore(&irq_lock, flags); } static void realtek_ictl_mask_irq(struct irq_data *i) { unsigned long flags; u32 value; raw_spin_lock_irqsave(&irq_lock, flags); value = readl(REG(RTL_ICTL_GIMR)); value &= ~BIT(i->hwirq); writel(value, REG(RTL_ICTL_GIMR)); raw_spin_unlock_irqrestore(&irq_lock, flags); } static struct irq_chip realtek_ictl_irq = { .name = "realtek-rtl-intc", .irq_mask = realtek_ictl_mask_irq, .irq_unmask = realtek_ictl_unmask_irq, }; static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { unsigned long flags; irq_set_chip_and_handler(irq, &realtek_ictl_irq, handle_level_irq); raw_spin_lock_irqsave(&irq_lock, flags); write_irr(REG(RTL_ICTL_IRR0), hw, 1); raw_spin_unlock_irqrestore(&irq_lock, flags); return 0; } static const struct irq_domain_ops irq_domain_ops = { .map = intc_map, .xlate = irq_domain_xlate_onecell, }; static void realtek_irq_dispatch(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_domain *domain; unsigned long pending; unsigned int soc_int; chained_irq_enter(chip, desc); pending = readl(REG(RTL_ICTL_GIMR)) & readl(REG(RTL_ICTL_GISR)); if (unlikely(!pending)) { spurious_interrupt(); goto out; } domain = irq_desc_get_handler_data(desc); for_each_set_bit(soc_int, &pending, 32) generic_handle_domain_irq(domain, soc_int); out: chained_irq_exit(chip, desc); } static int __init realtek_rtl_of_init(struct device_node *node, struct device_node *parent) { struct of_phandle_args oirq; struct irq_domain *domain; unsigned int soc_irq; int parent_irq; realtek_ictl_base = of_iomap(node, 0); if (!realtek_ictl_base) return -ENXIO; /* Disable all cascaded interrupts and clear routing */ writel(0, REG(RTL_ICTL_GIMR)); for (soc_irq = 0; soc_irq < RTL_ICTL_NUM_INPUTS; soc_irq++) write_irr(REG(RTL_ICTL_IRR0), soc_irq, 0); if (WARN_ON(!of_irq_count(node))) { /* * If DT contains no parent interrupts, assume MIPS CPU IRQ 2 * (HW0) is connected to the first output. This is the case for * all known hardware anyway. "interrupt-map" is deprecated, so * don't bother trying to parse that. */ oirq.np = of_find_compatible_node(NULL, NULL, "mti,cpu-interrupt-controller"); oirq.args_count = 1; oirq.args[0] = 2; parent_irq = irq_create_of_mapping(&oirq); of_node_put(oirq.np); } else { parent_irq = of_irq_get(node, 0); } if (parent_irq < 0) return parent_irq; else if (!parent_irq) return -ENODEV; domain = irq_domain_add_linear(node, RTL_ICTL_NUM_INPUTS, &irq_domain_ops, NULL); if (!domain) return -ENOMEM; irq_set_chained_handler_and_data(parent_irq, realtek_irq_dispatch, domain); return 0; } IRQCHIP_DECLARE(realtek_rtl_intc, "realtek,rtl-intc", realtek_rtl_of_init);
linux-master
drivers/irqchip/irq-realtek-rtl.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/arm/mach-vt8500/irq.c * * Copyright (C) 2012 Tony Prisk <[email protected]> * Copyright (C) 2010 Alexey Charkov <[email protected]> */ /* * This file is copied and modified from the original irq.c provided by * Alexey Charkov. Minor changes have been made for Device Tree Support. */ #include <linux/slab.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/irqdomain.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_address.h> #include <asm/irq.h> #include <asm/exception.h> #include <asm/mach/irq.h> #define VT8500_ICPC_IRQ 0x20 #define VT8500_ICPC_FIQ 0x24 #define VT8500_ICDC 0x40 /* Destination Control 64*u32 */ #define VT8500_ICIS 0x80 /* Interrupt status, 16*u32 */ /* ICPC */ #define ICPC_MASK 0x3F #define ICPC_ROTATE BIT(6) /* IC_DCTR */ #define ICDC_IRQ 0x00 #define ICDC_FIQ 0x01 #define ICDC_DSS0 0x02 #define ICDC_DSS1 0x03 #define ICDC_DSS2 0x04 #define ICDC_DSS3 0x05 #define ICDC_DSS4 0x06 #define ICDC_DSS5 0x07 #define VT8500_INT_DISABLE 0 #define VT8500_INT_ENABLE BIT(3) #define VT8500_TRIGGER_HIGH 0 #define VT8500_TRIGGER_RISING BIT(5) #define VT8500_TRIGGER_FALLING BIT(6) #define VT8500_EDGE ( VT8500_TRIGGER_RISING \ | VT8500_TRIGGER_FALLING) /* vt8500 has 1 intc, wm8505 and wm8650 have 2 */ #define VT8500_INTC_MAX 2 struct vt8500_irq_data { void __iomem *base; /* IO Memory base address */ struct irq_domain *domain; /* Domain for this controller */ }; /* Global variable for accessing io-mem addresses */ static struct vt8500_irq_data intc[VT8500_INTC_MAX]; static u32 active_cnt = 0; static void vt8500_irq_mask(struct irq_data *d) { struct vt8500_irq_data *priv = d->domain->host_data; void __iomem *base = priv->base; void __iomem *stat_reg = base + VT8500_ICIS + (d->hwirq < 32 ? 0 : 4); u8 edge, dctr; u32 status; edge = readb(base + VT8500_ICDC + d->hwirq) & VT8500_EDGE; if (edge) { status = readl(stat_reg); status |= (1 << (d->hwirq & 0x1f)); writel(status, stat_reg); } else { dctr = readb(base + VT8500_ICDC + d->hwirq); dctr &= ~VT8500_INT_ENABLE; writeb(dctr, base + VT8500_ICDC + d->hwirq); } } static void vt8500_irq_unmask(struct irq_data *d) { struct vt8500_irq_data *priv = d->domain->host_data; void __iomem *base = priv->base; u8 dctr; dctr = readb(base + VT8500_ICDC + d->hwirq); dctr |= VT8500_INT_ENABLE; writeb(dctr, base + VT8500_ICDC + d->hwirq); } static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type) { struct vt8500_irq_data *priv = d->domain->host_data; void __iomem *base = priv->base; u8 dctr; dctr = readb(base + VT8500_ICDC + d->hwirq); dctr &= ~VT8500_EDGE; switch (flow_type) { case IRQF_TRIGGER_LOW: return -EINVAL; case IRQF_TRIGGER_HIGH: dctr |= VT8500_TRIGGER_HIGH; irq_set_handler_locked(d, handle_level_irq); break; case IRQF_TRIGGER_FALLING: dctr |= VT8500_TRIGGER_FALLING; irq_set_handler_locked(d, handle_edge_irq); break; case IRQF_TRIGGER_RISING: dctr |= VT8500_TRIGGER_RISING; irq_set_handler_locked(d, handle_edge_irq); break; } writeb(dctr, base + VT8500_ICDC + d->hwirq); return 0; } static struct irq_chip vt8500_irq_chip = { .name = "vt8500", .irq_ack = vt8500_irq_mask, .irq_mask = vt8500_irq_mask, .irq_unmask = vt8500_irq_unmask, .irq_set_type = vt8500_irq_set_type, }; static void __init vt8500_init_irq_hw(void __iomem *base) { u32 i; /* Enable rotating priority for IRQ */ writel(ICPC_ROTATE, base + VT8500_ICPC_IRQ); writel(0x00, base + VT8500_ICPC_FIQ); /* Disable all interrupts and route them to IRQ */ for (i = 0; i < 64; i++) writeb(VT8500_INT_DISABLE | ICDC_IRQ, base + VT8500_ICDC + i); } static int vt8500_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_and_handler(virq, &vt8500_irq_chip, handle_level_irq); return 0; } static const struct irq_domain_ops vt8500_irq_domain_ops = { .map = vt8500_irq_map, .xlate = irq_domain_xlate_onecell, }; static void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs) { u32 stat, i; int irqnr; void __iomem *base; /* Loop through each active controller */ for (i=0; i<active_cnt; i++) { base = intc[i].base; irqnr = readl_relaxed(base) & 0x3F; /* Highest Priority register default = 63, so check that this is a real interrupt by checking the status register */ if (irqnr == 63) { stat = readl_relaxed(base + VT8500_ICIS + 4); if (!(stat & BIT(31))) continue; } generic_handle_domain_irq(intc[i].domain, irqnr); } } static int __init vt8500_irq_init(struct device_node *node, struct device_node *parent) { int irq, i; struct device_node *np = node; if (active_cnt == VT8500_INTC_MAX) { pr_err("%s: Interrupt controllers > VT8500_INTC_MAX\n", __func__); goto out; } intc[active_cnt].base = of_iomap(np, 0); intc[active_cnt].domain = irq_domain_add_linear(node, 64, &vt8500_irq_domain_ops, &intc[active_cnt]); if (!intc[active_cnt].base) { pr_err("%s: Unable to map IO memory\n", __func__); goto out; } if (!intc[active_cnt].domain) { pr_err("%s: Unable to add irq domain!\n", __func__); goto out; } set_handle_irq(vt8500_handle_irq); vt8500_init_irq_hw(intc[active_cnt].base); pr_info("vt8500-irq: Added interrupt controller\n"); active_cnt++; /* check if this is a slaved controller */ if (of_irq_count(np) != 0) { /* check that we have the correct number of interrupts */ if (of_irq_count(np) != 8) { pr_err("%s: Incorrect IRQ map for slaved controller\n", __func__); return -EINVAL; } for (i = 0; i < 8; i++) { irq = irq_of_parse_and_map(np, i); enable_irq(irq); } pr_info("vt8500-irq: Enabled slave->parent interrupts\n"); } out: return 0; } IRQCHIP_DECLARE(vt8500_irq, "via,vt8500-intc", vt8500_irq_init);
linux-master
drivers/irqchip/irq-vt8500.c
// SPDX-License-Identifier: GPL-2.0-only /* * Driver code for Tegra's Legacy Interrupt Controller * * Author: Marc Zyngier <[email protected]> * * Heavily based on the original arch/arm/mach-tegra/irq.c code: * Copyright (C) 2011 Google, Inc. * * Author: * Colin Cross <[email protected]> * * Copyright (C) 2010,2013, NVIDIA Corporation */ #include <linux/io.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/irqdomain.h> #include <linux/of_address.h> #include <linux/slab.h> #include <linux/syscore_ops.h> #include <dt-bindings/interrupt-controller/arm-gic.h> #define ICTLR_CPU_IEP_VFIQ 0x08 #define ICTLR_CPU_IEP_FIR 0x14 #define ICTLR_CPU_IEP_FIR_SET 0x18 #define ICTLR_CPU_IEP_FIR_CLR 0x1c #define ICTLR_CPU_IER 0x20 #define ICTLR_CPU_IER_SET 0x24 #define ICTLR_CPU_IER_CLR 0x28 #define ICTLR_CPU_IEP_CLASS 0x2C #define ICTLR_COP_IER 0x30 #define ICTLR_COP_IER_SET 0x34 #define ICTLR_COP_IER_CLR 0x38 #define ICTLR_COP_IEP_CLASS 0x3c #define TEGRA_MAX_NUM_ICTLRS 6 static unsigned int num_ictlrs; struct tegra_ictlr_soc { unsigned int num_ictlrs; }; static const struct tegra_ictlr_soc tegra20_ictlr_soc = { .num_ictlrs = 4, }; static const struct tegra_ictlr_soc tegra30_ictlr_soc = { .num_ictlrs = 5, }; static const struct tegra_ictlr_soc tegra210_ictlr_soc = { .num_ictlrs = 6, }; static const struct of_device_id ictlr_matches[] = { { .compatible = "nvidia,tegra210-ictlr", .data = &tegra210_ictlr_soc }, { .compatible = "nvidia,tegra30-ictlr", .data = &tegra30_ictlr_soc }, { .compatible = "nvidia,tegra20-ictlr", .data = &tegra20_ictlr_soc }, { } }; struct tegra_ictlr_info { void __iomem *base[TEGRA_MAX_NUM_ICTLRS]; #ifdef CONFIG_PM_SLEEP u32 cop_ier[TEGRA_MAX_NUM_ICTLRS]; u32 cop_iep[TEGRA_MAX_NUM_ICTLRS]; u32 cpu_ier[TEGRA_MAX_NUM_ICTLRS]; u32 cpu_iep[TEGRA_MAX_NUM_ICTLRS]; u32 ictlr_wake_mask[TEGRA_MAX_NUM_ICTLRS]; #endif }; static struct tegra_ictlr_info *lic; static inline void tegra_ictlr_write_mask(struct irq_data *d, unsigned long reg) { void __iomem *base = (void __iomem __force *)d->chip_data; u32 mask; mask = BIT(d->hwirq % 32); writel_relaxed(mask, base + reg); } static void tegra_mask(struct irq_data *d) { tegra_ictlr_write_mask(d, ICTLR_CPU_IER_CLR); irq_chip_mask_parent(d); } static void tegra_unmask(struct irq_data *d) { tegra_ictlr_write_mask(d, ICTLR_CPU_IER_SET); irq_chip_unmask_parent(d); } static void tegra_eoi(struct irq_data *d) { tegra_ictlr_write_mask(d, ICTLR_CPU_IEP_FIR_CLR); irq_chip_eoi_parent(d); } static int tegra_retrigger(struct irq_data *d) { tegra_ictlr_write_mask(d, ICTLR_CPU_IEP_FIR_SET); return irq_chip_retrigger_hierarchy(d); } #ifdef CONFIG_PM_SLEEP static int tegra_set_wake(struct irq_data *d, unsigned int enable) { u32 irq = d->hwirq; u32 index, mask; index = (irq / 32); mask = BIT(irq % 32); if (enable) lic->ictlr_wake_mask[index] |= mask; else lic->ictlr_wake_mask[index] &= ~mask; /* * Do *not* call into the parent, as the GIC doesn't have any * wake-up facility... */ return 0; } static int tegra_ictlr_suspend(void) { unsigned long flags; unsigned int i; local_irq_save(flags); for (i = 0; i < num_ictlrs; i++) { void __iomem *ictlr = lic->base[i]; /* Save interrupt state */ lic->cpu_ier[i] = readl_relaxed(ictlr + ICTLR_CPU_IER); lic->cpu_iep[i] = readl_relaxed(ictlr + ICTLR_CPU_IEP_CLASS); lic->cop_ier[i] = readl_relaxed(ictlr + ICTLR_COP_IER); lic->cop_iep[i] = readl_relaxed(ictlr + ICTLR_COP_IEP_CLASS); /* Disable COP interrupts */ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR); /* Disable CPU interrupts */ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR); /* Enable the wakeup sources of ictlr */ writel_relaxed(lic->ictlr_wake_mask[i], ictlr + ICTLR_CPU_IER_SET); } local_irq_restore(flags); return 0; } static void tegra_ictlr_resume(void) { unsigned long flags; unsigned int i; local_irq_save(flags); for (i = 0; i < num_ictlrs; i++) { void __iomem *ictlr = lic->base[i]; writel_relaxed(lic->cpu_iep[i], ictlr + ICTLR_CPU_IEP_CLASS); writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR); writel_relaxed(lic->cpu_ier[i], ictlr + ICTLR_CPU_IER_SET); writel_relaxed(lic->cop_iep[i], ictlr + ICTLR_COP_IEP_CLASS); writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR); writel_relaxed(lic->cop_ier[i], ictlr + ICTLR_COP_IER_SET); } local_irq_restore(flags); } static struct syscore_ops tegra_ictlr_syscore_ops = { .suspend = tegra_ictlr_suspend, .resume = tegra_ictlr_resume, }; static void tegra_ictlr_syscore_init(void) { register_syscore_ops(&tegra_ictlr_syscore_ops); } #else #define tegra_set_wake NULL static inline void tegra_ictlr_syscore_init(void) {} #endif static struct irq_chip tegra_ictlr_chip = { .name = "LIC", .irq_eoi = tegra_eoi, .irq_mask = tegra_mask, .irq_unmask = tegra_unmask, .irq_retrigger = tegra_retrigger, .irq_set_wake = tegra_set_wake, .irq_set_type = irq_chip_set_type_parent, .flags = IRQCHIP_MASK_ON_SUSPEND, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, #endif }; static int tegra_ictlr_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type) { if (is_of_node(fwspec->fwnode)) { if (fwspec->param_count != 3) return -EINVAL; /* No PPI should point to this domain */ if (fwspec->param[0] != 0) return -EINVAL; *hwirq = fwspec->param[1]; *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; return 0; } return -EINVAL; } static int tegra_ictlr_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *data) { struct irq_fwspec *fwspec = data; struct irq_fwspec parent_fwspec; struct tegra_ictlr_info *info = domain->host_data; irq_hw_number_t hwirq; unsigned int i; if (fwspec->param_count != 3) return -EINVAL; /* Not GIC compliant */ if (fwspec->param[0] != GIC_SPI) return -EINVAL; /* No PPI should point to this domain */ hwirq = fwspec->param[1]; if (hwirq >= (num_ictlrs * 32)) return -EINVAL; for (i = 0; i < nr_irqs; i++) { int ictlr = (hwirq + i) / 32; irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &tegra_ictlr_chip, (void __force *)info->base[ictlr]); } parent_fwspec = *fwspec; parent_fwspec.fwnode = domain->parent->fwnode; return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_fwspec); } static const struct irq_domain_ops tegra_ictlr_domain_ops = { .translate = tegra_ictlr_domain_translate, .alloc = tegra_ictlr_domain_alloc, .free = irq_domain_free_irqs_common, }; static int __init tegra_ictlr_init(struct device_node *node, struct device_node *parent) { struct irq_domain *parent_domain, *domain; const struct of_device_id *match; const struct tegra_ictlr_soc *soc; unsigned int i; int err; if (!parent) { pr_err("%pOF: no parent, giving up\n", node); return -ENODEV; } parent_domain = irq_find_host(parent); if (!parent_domain) { pr_err("%pOF: unable to obtain parent domain\n", node); return -ENXIO; } match = of_match_node(ictlr_matches, node); if (!match) /* Should never happen... */ return -ENODEV; soc = match->data; lic = kzalloc(sizeof(*lic), GFP_KERNEL); if (!lic) return -ENOMEM; for (i = 0; i < TEGRA_MAX_NUM_ICTLRS; i++) { void __iomem *base; base = of_iomap(node, i); if (!base) break; lic->base[i] = base; /* Disable all interrupts */ writel_relaxed(GENMASK(31, 0), base + ICTLR_CPU_IER_CLR); /* All interrupts target IRQ */ writel_relaxed(0, base + ICTLR_CPU_IEP_CLASS); num_ictlrs++; } if (!num_ictlrs) { pr_err("%pOF: no valid regions, giving up\n", node); err = -ENOMEM; goto out_free; } WARN(num_ictlrs != soc->num_ictlrs, "%pOF: Found %u interrupt controllers in DT; expected %u.\n", node, num_ictlrs, soc->num_ictlrs); domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32, node, &tegra_ictlr_domain_ops, lic); if (!domain) { pr_err("%pOF: failed to allocated domain\n", node); err = -ENOMEM; goto out_unmap; } tegra_ictlr_syscore_init(); pr_info("%pOF: %d interrupts forwarded to %pOF\n", node, num_ictlrs * 32, parent); return 0; out_unmap: for (i = 0; i < num_ictlrs; i++) iounmap(lic->base[i]); out_free: kfree(lic); return err; } IRQCHIP_DECLARE(tegra20_ictlr, "nvidia,tegra20-ictlr", tegra_ictlr_init); IRQCHIP_DECLARE(tegra30_ictlr, "nvidia,tegra30-ictlr", tegra_ictlr_init); IRQCHIP_DECLARE(tegra210_ictlr, "nvidia,tegra210-ictlr", tegra_ictlr_init);
linux-master
drivers/irqchip/irq-tegra.c
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. */ /* * Driver for interrupt combiners in the Top-level Control and Status * Registers (TCSR) hardware block in Qualcomm Technologies chips. * An interrupt combiner in this block combines a set of interrupts by * OR'ing the individual interrupt signals into a summary interrupt * signal routed to a parent interrupt controller, and provides read- * only, 32-bit registers to query the status of individual interrupts. * The status bit for IRQ n is bit (n % 32) within register (n / 32) * of the given combiner. Thus, each combiner can be described as a set * of register offsets and the number of IRQs managed. */ #define pr_fmt(fmt) "QCOM80B1:" fmt #include <linux/acpi.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/platform_device.h> #define REG_SIZE 32 struct combiner_reg { void __iomem *addr; unsigned long enabled; }; struct combiner { struct irq_domain *domain; int parent_irq; u32 nirqs; u32 nregs; struct combiner_reg regs[]; }; static inline int irq_nr(u32 reg, u32 bit) { return reg * REG_SIZE + bit; } /* * Handler for the cascaded IRQ. */ static void combiner_handle_irq(struct irq_desc *desc) { struct combiner *combiner = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); u32 reg; chained_irq_enter(chip, desc); for (reg = 0; reg < combiner->nregs; reg++) { int hwirq; u32 bit; u32 status; bit = readl_relaxed(combiner->regs[reg].addr); status = bit & combiner->regs[reg].enabled; if (bit && !status) pr_warn_ratelimited("Unexpected IRQ on CPU%d: (%08x %08lx %p)\n", smp_processor_id(), bit, combiner->regs[reg].enabled, combiner->regs[reg].addr); while (status) { bit = __ffs(status); status &= ~(1 << bit); hwirq = irq_nr(reg, bit); generic_handle_domain_irq(combiner->domain, hwirq); } } chained_irq_exit(chip, desc); } static void combiner_irq_chip_mask_irq(struct irq_data *data) { struct combiner *combiner = irq_data_get_irq_chip_data(data); struct combiner_reg *reg = combiner->regs + data->hwirq / REG_SIZE; clear_bit(data->hwirq % REG_SIZE, &reg->enabled); } static void combiner_irq_chip_unmask_irq(struct irq_data *data) { struct combiner *combiner = irq_data_get_irq_chip_data(data); struct combiner_reg *reg = combiner->regs + data->hwirq / REG_SIZE; set_bit(data->hwirq % REG_SIZE, &reg->enabled); } static struct irq_chip irq_chip = { .irq_mask = combiner_irq_chip_mask_irq, .irq_unmask = combiner_irq_chip_unmask_irq, .name = "qcom-irq-combiner" }; static int combiner_irq_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &irq_chip, handle_level_irq); irq_set_chip_data(irq, domain->host_data); irq_set_noprobe(irq); return 0; } static void combiner_irq_unmap(struct irq_domain *domain, unsigned int irq) { irq_domain_reset_irq_data(irq_get_irq_data(irq)); } static int combiner_irq_translate(struct irq_domain *d, struct irq_fwspec *fws, unsigned long *hwirq, unsigned int *type) { struct combiner *combiner = d->host_data; if (is_acpi_node(fws->fwnode)) { if (WARN_ON((fws->param_count != 2) || (fws->param[0] >= combiner->nirqs) || (fws->param[1] & IORESOURCE_IRQ_LOWEDGE) || (fws->param[1] & IORESOURCE_IRQ_HIGHEDGE))) return -EINVAL; *hwirq = fws->param[0]; *type = fws->param[1]; return 0; } return -EINVAL; } static const struct irq_domain_ops domain_ops = { .map = combiner_irq_map, .unmap = combiner_irq_unmap, .translate = combiner_irq_translate }; static acpi_status count_registers_cb(struct acpi_resource *ares, void *context) { int *count = context; if (ares->type == ACPI_RESOURCE_TYPE_GENERIC_REGISTER) ++(*count); return AE_OK; } static int count_registers(struct platform_device *pdev) { acpi_handle ahandle = ACPI_HANDLE(&pdev->dev); acpi_status status; int count = 0; if (!acpi_has_method(ahandle, METHOD_NAME__CRS)) return -EINVAL; status = acpi_walk_resources(ahandle, METHOD_NAME__CRS, count_registers_cb, &count); if (ACPI_FAILURE(status)) return -EINVAL; return count; } struct get_registers_context { struct device *dev; struct combiner *combiner; int err; }; static acpi_status get_registers_cb(struct acpi_resource *ares, void *context) { struct get_registers_context *ctx = context; struct acpi_resource_generic_register *reg; phys_addr_t paddr; void __iomem *vaddr; if (ares->type != ACPI_RESOURCE_TYPE_GENERIC_REGISTER) return AE_OK; reg = &ares->data.generic_reg; paddr = reg->address; if ((reg->space_id != ACPI_SPACE_MEM) || (reg->bit_offset != 0) || (reg->bit_width > REG_SIZE)) { dev_err(ctx->dev, "Bad register resource @%pa\n", &paddr); ctx->err = -EINVAL; return AE_ERROR; } vaddr = devm_ioremap(ctx->dev, reg->address, REG_SIZE); if (!vaddr) { dev_err(ctx->dev, "Can't map register @%pa\n", &paddr); ctx->err = -ENOMEM; return AE_ERROR; } ctx->combiner->regs[ctx->combiner->nregs].addr = vaddr; ctx->combiner->nirqs += reg->bit_width; ctx->combiner->nregs++; return AE_OK; } static int get_registers(struct platform_device *pdev, struct combiner *comb) { acpi_handle ahandle = ACPI_HANDLE(&pdev->dev); acpi_status status; struct get_registers_context ctx; if (!acpi_has_method(ahandle, METHOD_NAME__CRS)) return -EINVAL; ctx.dev = &pdev->dev; ctx.combiner = comb; ctx.err = 0; status = acpi_walk_resources(ahandle, METHOD_NAME__CRS, get_registers_cb, &ctx); if (ACPI_FAILURE(status)) return ctx.err; return 0; } static int __init combiner_probe(struct platform_device *pdev) { struct combiner *combiner; int nregs; int err; nregs = count_registers(pdev); if (nregs <= 0) { dev_err(&pdev->dev, "Error reading register resources\n"); return -EINVAL; } combiner = devm_kzalloc(&pdev->dev, struct_size(combiner, regs, nregs), GFP_KERNEL); if (!combiner) return -ENOMEM; err = get_registers(pdev, combiner); if (err < 0) return err; combiner->parent_irq = platform_get_irq(pdev, 0); if (combiner->parent_irq <= 0) return -EPROBE_DEFER; combiner->domain = irq_domain_create_linear(pdev->dev.fwnode, combiner->nirqs, &domain_ops, combiner); if (!combiner->domain) /* Errors printed by irq_domain_create_linear */ return -ENODEV; irq_set_chained_handler_and_data(combiner->parent_irq, combiner_handle_irq, combiner); dev_info(&pdev->dev, "Initialized with [p=%d,n=%d,r=%p]\n", combiner->parent_irq, combiner->nirqs, combiner->regs[0].addr); return 0; } static const struct acpi_device_id qcom_irq_combiner_ids[] = { { "QCOM80B1", }, { } }; static struct platform_driver qcom_irq_combiner_probe = { .driver = { .name = "qcom-irq-combiner", .acpi_match_table = ACPI_PTR(qcom_irq_combiner_ids), }, .probe = combiner_probe, }; builtin_platform_driver(qcom_irq_combiner_probe);
linux-master
drivers/irqchip/qcom-irq-combiner.c
/* * Copyright (C) 2007-2013 Michal Simek <[email protected]> * Copyright (C) 2012-2013 Xilinx, Inc. * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2006 Atmark Techno, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/irqdomain.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/irqchip/chained_irq.h> #include <linux/of_address.h> #include <linux/io.h> #include <linux/jump_label.h> #include <linux/bug.h> #include <linux/of_irq.h> /* No one else should require these constants, so define them locally here. */ #define ISR 0x00 /* Interrupt Status Register */ #define IPR 0x04 /* Interrupt Pending Register */ #define IER 0x08 /* Interrupt Enable Register */ #define IAR 0x0c /* Interrupt Acknowledge Register */ #define SIE 0x10 /* Set Interrupt Enable bits */ #define CIE 0x14 /* Clear Interrupt Enable bits */ #define IVR 0x18 /* Interrupt Vector Register */ #define MER 0x1c /* Master Enable Register */ #define MER_ME (1<<0) #define MER_HIE (1<<1) #define SPURIOUS_IRQ (-1U) static DEFINE_STATIC_KEY_FALSE(xintc_is_be); struct xintc_irq_chip { void __iomem *base; struct irq_domain *root_domain; u32 intr_mask; u32 nr_irq; }; static struct xintc_irq_chip *primary_intc; static void xintc_write(struct xintc_irq_chip *irqc, int reg, u32 data) { if (static_branch_unlikely(&xintc_is_be)) iowrite32be(data, irqc->base + reg); else iowrite32(data, irqc->base + reg); } static u32 xintc_read(struct xintc_irq_chip *irqc, int reg) { if (static_branch_unlikely(&xintc_is_be)) return ioread32be(irqc->base + reg); else return ioread32(irqc->base + reg); } static void intc_enable_or_unmask(struct irq_data *d) { struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d); unsigned long mask = BIT(d->hwirq); pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq); /* ack level irqs because they can't be acked during * ack function since the handle_level_irq function * acks the irq before calling the interrupt handler */ if (irqd_is_level_type(d)) xintc_write(irqc, IAR, mask); xintc_write(irqc, SIE, mask); } static void intc_disable_or_mask(struct irq_data *d) { struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d); pr_debug("irq-xilinx: disable: %ld\n", d->hwirq); xintc_write(irqc, CIE, BIT(d->hwirq)); } static void intc_ack(struct irq_data *d) { struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d); pr_debug("irq-xilinx: ack: %ld\n", d->hwirq); xintc_write(irqc, IAR, BIT(d->hwirq)); } static void intc_mask_ack(struct irq_data *d) { struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d); unsigned long mask = BIT(d->hwirq); pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq); xintc_write(irqc, CIE, mask); xintc_write(irqc, IAR, mask); } static struct irq_chip intc_dev = { .name = "Xilinx INTC", .irq_unmask = intc_enable_or_unmask, .irq_mask = intc_disable_or_mask, .irq_ack = intc_ack, .irq_mask_ack = intc_mask_ack, }; static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct xintc_irq_chip *irqc = d->host_data; if (irqc->intr_mask & BIT(hw)) { irq_set_chip_and_handler_name(irq, &intc_dev, handle_edge_irq, "edge"); irq_clear_status_flags(irq, IRQ_LEVEL); } else { irq_set_chip_and_handler_name(irq, &intc_dev, handle_level_irq, "level"); irq_set_status_flags(irq, IRQ_LEVEL); } irq_set_chip_data(irq, irqc); return 0; } static const struct irq_domain_ops xintc_irq_domain_ops = { .xlate = irq_domain_xlate_onetwocell, .map = xintc_map, }; static void xil_intc_irq_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct xintc_irq_chip *irqc; irqc = irq_data_get_irq_handler_data(&desc->irq_data); chained_irq_enter(chip, desc); do { u32 hwirq = xintc_read(irqc, IVR); if (hwirq == -1U) break; generic_handle_domain_irq(irqc->root_domain, hwirq); } while (true); chained_irq_exit(chip, desc); } static void xil_intc_handle_irq(struct pt_regs *regs) { u32 hwirq; do { hwirq = xintc_read(primary_intc, IVR); if (unlikely(hwirq == SPURIOUS_IRQ)) break; generic_handle_domain_irq(primary_intc->root_domain, hwirq); } while (true); } static int __init xilinx_intc_of_init(struct device_node *intc, struct device_node *parent) { struct xintc_irq_chip *irqc; int ret, irq; irqc = kzalloc(sizeof(*irqc), GFP_KERNEL); if (!irqc) return -ENOMEM; irqc->base = of_iomap(intc, 0); BUG_ON(!irqc->base); ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &irqc->nr_irq); if (ret < 0) { pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n"); goto error; } ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask); if (ret < 0) { pr_warn("irq-xilinx: unable to read xlnx,kind-of-intr\n"); irqc->intr_mask = 0; } if (irqc->intr_mask >> irqc->nr_irq) pr_warn("irq-xilinx: mismatch in kind-of-intr param\n"); pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n", intc, irqc->nr_irq, irqc->intr_mask); /* * Disable all external interrupts until they are * explicitly requested. */ xintc_write(irqc, IER, 0); /* Acknowledge any pending interrupts just in case. */ xintc_write(irqc, IAR, 0xffffffff); /* Turn on the Master Enable. */ xintc_write(irqc, MER, MER_HIE | MER_ME); if (xintc_read(irqc, MER) != (MER_HIE | MER_ME)) { static_branch_enable(&xintc_is_be); xintc_write(irqc, MER, MER_HIE | MER_ME); } irqc->root_domain = irq_domain_add_linear(intc, irqc->nr_irq, &xintc_irq_domain_ops, irqc); if (!irqc->root_domain) { pr_err("irq-xilinx: Unable to create IRQ domain\n"); ret = -EINVAL; goto error; } if (parent) { irq = irq_of_parse_and_map(intc, 0); if (irq) { irq_set_chained_handler_and_data(irq, xil_intc_irq_handler, irqc); } else { pr_err("irq-xilinx: interrupts property not in DT\n"); ret = -EINVAL; goto error; } } else { primary_intc = irqc; irq_set_default_host(primary_intc->root_domain); set_handle_irq(xil_intc_handle_irq); } return 0; error: iounmap(irqc->base); kfree(irqc); return ret; } IRQCHIP_DECLARE(xilinx_intc_xps, "xlnx,xps-intc-1.00.a", xilinx_intc_of_init); IRQCHIP_DECLARE(xilinx_intc_opb, "xlnx,opb-intc-1.00.c", xilinx_intc_of_init);
linux-master
drivers/irqchip/irq-xilinx-intc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2021, Linaro Limited * Copyright (c) 2010-2020, The Linux Foundation. All rights reserved. */ #include <linux/delay.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irqchip.h> #include <linux/irqdomain.h> #include <linux/mailbox_client.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/slab.h> #include <linux/soc/qcom/irq.h> #include <linux/spinlock.h> /* * This is the driver for Qualcomm MPM (MSM Power Manager) interrupt controller, * which is commonly found on Qualcomm SoCs built on the RPM architecture. * Sitting in always-on domain, MPM monitors the wakeup interrupts when SoC is * asleep, and wakes up the AP when one of those interrupts occurs. This driver * doesn't directly access physical MPM registers though. Instead, the access * is bridged via a piece of internal memory (SRAM) that is accessible to both * AP and RPM. This piece of memory is called 'vMPM' in the driver. * * When SoC is awake, the vMPM is owned by AP and the register setup by this * driver all happens on vMPM. When AP is about to get power collapsed, the * driver sends a mailbox notification to RPM, which will take over the vMPM * ownership and dump vMPM into physical MPM registers. On wakeup, AP is woken * up by a MPM pin/interrupt, and RPM will copy STATUS registers into vMPM. * Then AP start owning vMPM again. * * vMPM register map: * * 31 0 * +--------------------------------+ * | TIMER0 | 0x00 * +--------------------------------+ * | TIMER1 | 0x04 * +--------------------------------+ * | ENABLE0 | 0x08 * +--------------------------------+ * | ... | ... * +--------------------------------+ * | ENABLEn | * +--------------------------------+ * | FALLING_EDGE0 | * +--------------------------------+ * | ... | * +--------------------------------+ * | STATUSn | * +--------------------------------+ * * n = DIV_ROUND_UP(pin_cnt, 32) * */ #define MPM_REG_ENABLE 0 #define MPM_REG_FALLING_EDGE 1 #define MPM_REG_RISING_EDGE 2 #define MPM_REG_POLARITY 3 #define MPM_REG_STATUS 4 /* MPM pin map to GIC hwirq */ struct mpm_gic_map { int pin; irq_hw_number_t hwirq; }; struct qcom_mpm_priv { void __iomem *base; raw_spinlock_t lock; struct mbox_client mbox_client; struct mbox_chan *mbox_chan; struct mpm_gic_map *maps; unsigned int map_cnt; unsigned int reg_stride; struct irq_domain *domain; struct generic_pm_domain genpd; }; static u32 qcom_mpm_read(struct qcom_mpm_priv *priv, unsigned int reg, unsigned int index) { unsigned int offset = (reg * priv->reg_stride + index + 2) * 4; return readl_relaxed(priv->base + offset); } static void qcom_mpm_write(struct qcom_mpm_priv *priv, unsigned int reg, unsigned int index, u32 val) { unsigned int offset = (reg * priv->reg_stride + index + 2) * 4; writel_relaxed(val, priv->base + offset); /* Ensure the write is completed */ wmb(); } static void qcom_mpm_enable_irq(struct irq_data *d, bool en) { struct qcom_mpm_priv *priv = d->chip_data; int pin = d->hwirq; unsigned int index = pin / 32; unsigned int shift = pin % 32; unsigned long flags, val; raw_spin_lock_irqsave(&priv->lock, flags); val = qcom_mpm_read(priv, MPM_REG_ENABLE, index); __assign_bit(shift, &val, en); qcom_mpm_write(priv, MPM_REG_ENABLE, index, val); raw_spin_unlock_irqrestore(&priv->lock, flags); } static void qcom_mpm_mask(struct irq_data *d) { qcom_mpm_enable_irq(d, false); if (d->parent_data) irq_chip_mask_parent(d); } static void qcom_mpm_unmask(struct irq_data *d) { qcom_mpm_enable_irq(d, true); if (d->parent_data) irq_chip_unmask_parent(d); } static void mpm_set_type(struct qcom_mpm_priv *priv, bool set, unsigned int reg, unsigned int index, unsigned int shift) { unsigned long flags, val; raw_spin_lock_irqsave(&priv->lock, flags); val = qcom_mpm_read(priv, reg, index); __assign_bit(shift, &val, set); qcom_mpm_write(priv, reg, index, val); raw_spin_unlock_irqrestore(&priv->lock, flags); } static int qcom_mpm_set_type(struct irq_data *d, unsigned int type) { struct qcom_mpm_priv *priv = d->chip_data; int pin = d->hwirq; unsigned int index = pin / 32; unsigned int shift = pin % 32; if (type & IRQ_TYPE_EDGE_RISING) mpm_set_type(priv, true, MPM_REG_RISING_EDGE, index, shift); else mpm_set_type(priv, false, MPM_REG_RISING_EDGE, index, shift); if (type & IRQ_TYPE_EDGE_FALLING) mpm_set_type(priv, true, MPM_REG_FALLING_EDGE, index, shift); else mpm_set_type(priv, false, MPM_REG_FALLING_EDGE, index, shift); if (type & IRQ_TYPE_LEVEL_HIGH) mpm_set_type(priv, true, MPM_REG_POLARITY, index, shift); else mpm_set_type(priv, false, MPM_REG_POLARITY, index, shift); if (!d->parent_data) return 0; if (type & IRQ_TYPE_EDGE_BOTH) type = IRQ_TYPE_EDGE_RISING; if (type & IRQ_TYPE_LEVEL_MASK) type = IRQ_TYPE_LEVEL_HIGH; return irq_chip_set_type_parent(d, type); } static struct irq_chip qcom_mpm_chip = { .name = "mpm", .irq_eoi = irq_chip_eoi_parent, .irq_mask = qcom_mpm_mask, .irq_unmask = qcom_mpm_unmask, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_set_type = qcom_mpm_set_type, .irq_set_affinity = irq_chip_set_affinity_parent, .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE, }; static struct mpm_gic_map *get_mpm_gic_map(struct qcom_mpm_priv *priv, int pin) { struct mpm_gic_map *maps = priv->maps; int i; for (i = 0; i < priv->map_cnt; i++) { if (maps[i].pin == pin) return &maps[i]; } return NULL; } static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *data) { struct qcom_mpm_priv *priv = domain->host_data; struct irq_fwspec *fwspec = data; struct irq_fwspec parent_fwspec; struct mpm_gic_map *map; irq_hw_number_t pin; unsigned int type; int ret; ret = irq_domain_translate_twocell(domain, fwspec, &pin, &type); if (ret) return ret; ret = irq_domain_set_hwirq_and_chip(domain, virq, pin, &qcom_mpm_chip, priv); if (ret) return ret; map = get_mpm_gic_map(priv, pin); if (map == NULL) return irq_domain_disconnect_hierarchy(domain->parent, virq); if (type & IRQ_TYPE_EDGE_BOTH) type = IRQ_TYPE_EDGE_RISING; if (type & IRQ_TYPE_LEVEL_MASK) type = IRQ_TYPE_LEVEL_HIGH; parent_fwspec.fwnode = domain->parent->fwnode; parent_fwspec.param_count = 3; parent_fwspec.param[0] = 0; parent_fwspec.param[1] = map->hwirq; parent_fwspec.param[2] = type; return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_fwspec); } static const struct irq_domain_ops qcom_mpm_ops = { .alloc = qcom_mpm_alloc, .free = irq_domain_free_irqs_common, .translate = irq_domain_translate_twocell, }; /* Triggered by RPM when system resumes from deep sleep */ static irqreturn_t qcom_mpm_handler(int irq, void *dev_id) { struct qcom_mpm_priv *priv = dev_id; unsigned long enable, pending; irqreturn_t ret = IRQ_NONE; unsigned long flags; int i, j; for (i = 0; i < priv->reg_stride; i++) { raw_spin_lock_irqsave(&priv->lock, flags); enable = qcom_mpm_read(priv, MPM_REG_ENABLE, i); pending = qcom_mpm_read(priv, MPM_REG_STATUS, i); pending &= enable; raw_spin_unlock_irqrestore(&priv->lock, flags); for_each_set_bit(j, &pending, 32) { unsigned int pin = 32 * i + j; struct irq_desc *desc = irq_resolve_mapping(priv->domain, pin); struct irq_data *d = &desc->irq_data; if (!irqd_is_level_type(d)) irq_set_irqchip_state(d->irq, IRQCHIP_STATE_PENDING, true); ret = IRQ_HANDLED; } } return ret; } static int mpm_pd_power_off(struct generic_pm_domain *genpd) { struct qcom_mpm_priv *priv = container_of(genpd, struct qcom_mpm_priv, genpd); int i, ret; for (i = 0; i < priv->reg_stride; i++) qcom_mpm_write(priv, MPM_REG_STATUS, i, 0); /* Notify RPM to write vMPM into HW */ ret = mbox_send_message(priv->mbox_chan, NULL); if (ret < 0) return ret; return 0; } static bool gic_hwirq_is_mapped(struct mpm_gic_map *maps, int cnt, u32 hwirq) { int i; for (i = 0; i < cnt; i++) if (maps[i].hwirq == hwirq) return true; return false; } static int qcom_mpm_init(struct device_node *np, struct device_node *parent) { struct platform_device *pdev = of_find_device_by_node(np); struct device *dev = &pdev->dev; struct irq_domain *parent_domain; struct generic_pm_domain *genpd; struct qcom_mpm_priv *priv; unsigned int pin_cnt; int i, irq; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; ret = of_property_read_u32(np, "qcom,mpm-pin-count", &pin_cnt); if (ret) { dev_err(dev, "failed to read qcom,mpm-pin-count: %d\n", ret); return ret; } priv->reg_stride = DIV_ROUND_UP(pin_cnt, 32); ret = of_property_count_u32_elems(np, "qcom,mpm-pin-map"); if (ret < 0) { dev_err(dev, "failed to read qcom,mpm-pin-map: %d\n", ret); return ret; } if (ret % 2) { dev_err(dev, "invalid qcom,mpm-pin-map\n"); return -EINVAL; } priv->map_cnt = ret / 2; priv->maps = devm_kcalloc(dev, priv->map_cnt, sizeof(*priv->maps), GFP_KERNEL); if (!priv->maps) return -ENOMEM; for (i = 0; i < priv->map_cnt; i++) { u32 pin, hwirq; of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2, &pin); of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2 + 1, &hwirq); if (gic_hwirq_is_mapped(priv->maps, i, hwirq)) { dev_warn(dev, "failed to map pin %d as GIC hwirq %d is already mapped\n", pin, hwirq); continue; } priv->maps[i].pin = pin; priv->maps[i].hwirq = hwirq; } raw_spin_lock_init(&priv->lock); priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); for (i = 0; i < priv->reg_stride; i++) { qcom_mpm_write(priv, MPM_REG_ENABLE, i, 0); qcom_mpm_write(priv, MPM_REG_FALLING_EDGE, i, 0); qcom_mpm_write(priv, MPM_REG_RISING_EDGE, i, 0); qcom_mpm_write(priv, MPM_REG_POLARITY, i, 0); qcom_mpm_write(priv, MPM_REG_STATUS, i, 0); } irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; genpd = &priv->genpd; genpd->flags = GENPD_FLAG_IRQ_SAFE; genpd->power_off = mpm_pd_power_off; genpd->name = devm_kasprintf(dev, GFP_KERNEL, "%s", dev_name(dev)); if (!genpd->name) return -ENOMEM; ret = pm_genpd_init(genpd, NULL, false); if (ret) { dev_err(dev, "failed to init genpd: %d\n", ret); return ret; } ret = of_genpd_add_provider_simple(np, genpd); if (ret) { dev_err(dev, "failed to add genpd provider: %d\n", ret); goto remove_genpd; } priv->mbox_client.dev = dev; priv->mbox_chan = mbox_request_channel(&priv->mbox_client, 0); if (IS_ERR(priv->mbox_chan)) { ret = PTR_ERR(priv->mbox_chan); dev_err(dev, "failed to acquire IPC channel: %d\n", ret); return ret; } parent_domain = irq_find_host(parent); if (!parent_domain) { dev_err(dev, "failed to find MPM parent domain\n"); ret = -ENXIO; goto free_mbox; } priv->domain = irq_domain_create_hierarchy(parent_domain, IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP, pin_cnt, of_node_to_fwnode(np), &qcom_mpm_ops, priv); if (!priv->domain) { dev_err(dev, "failed to create MPM domain\n"); ret = -ENOMEM; goto free_mbox; } irq_domain_update_bus_token(priv->domain, DOMAIN_BUS_WAKEUP); ret = devm_request_irq(dev, irq, qcom_mpm_handler, IRQF_NO_SUSPEND, "qcom_mpm", priv); if (ret) { dev_err(dev, "failed to request irq: %d\n", ret); goto remove_domain; } return 0; remove_domain: irq_domain_remove(priv->domain); free_mbox: mbox_free_channel(priv->mbox_chan); remove_genpd: pm_genpd_remove(genpd); return ret; } IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_mpm) IRQCHIP_MATCH("qcom,mpm", qcom_mpm_init) IRQCHIP_PLATFORM_DRIVER_END(qcom_mpm) MODULE_DESCRIPTION("Qualcomm Technologies, Inc. MSM Power Manager"); MODULE_LICENSE("GPL v2");
linux-master
drivers/irqchip/irq-qcom-mpm.c
// SPDX-License-Identifier: GPL-2.0-only // // Author: Steve Chen <[email protected]> // Copyright (C) 2008-2009, MontaVista Software, Inc. <[email protected]> // Author: Bartosz Golaszewski <[email protected]> // Copyright (C) 2019, Texas Instruments // // TI Common Platform Interrupt Controller (cp_intc) driver #include <linux/export.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/irqchip/irq-davinci-cp-intc.h> #include <linux/irqdomain.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <asm/exception.h> #define DAVINCI_CP_INTC_CTRL 0x04 #define DAVINCI_CP_INTC_HOST_CTRL 0x0c #define DAVINCI_CP_INTC_GLOBAL_ENABLE 0x10 #define DAVINCI_CP_INTC_SYS_STAT_IDX_CLR 0x24 #define DAVINCI_CP_INTC_SYS_ENABLE_IDX_SET 0x28 #define DAVINCI_CP_INTC_SYS_ENABLE_IDX_CLR 0x2c #define DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET 0x34 #define DAVINCI_CP_INTC_HOST_ENABLE_IDX_CLR 0x38 #define DAVINCI_CP_INTC_PRIO_IDX 0x80 #define DAVINCI_CP_INTC_SYS_STAT_CLR(n) (0x0280 + (n << 2)) #define DAVINCI_CP_INTC_SYS_ENABLE_CLR(n) (0x0380 + (n << 2)) #define DAVINCI_CP_INTC_CHAN_MAP(n) (0x0400 + (n << 2)) #define DAVINCI_CP_INTC_SYS_POLARITY(n) (0x0d00 + (n << 2)) #define DAVINCI_CP_INTC_SYS_TYPE(n) (0x0d80 + (n << 2)) #define DAVINCI_CP_INTC_HOST_ENABLE(n) (0x1500 + (n << 2)) #define DAVINCI_CP_INTC_PRI_INDX_MASK GENMASK(9, 0) #define DAVINCI_CP_INTC_GPIR_NONE BIT(31) static void __iomem *davinci_cp_intc_base; static struct irq_domain *davinci_cp_intc_irq_domain; static inline unsigned int davinci_cp_intc_read(unsigned int offset) { return readl_relaxed(davinci_cp_intc_base + offset); } static inline void davinci_cp_intc_write(unsigned long value, unsigned int offset) { writel_relaxed(value, davinci_cp_intc_base + offset); } static void davinci_cp_intc_ack_irq(struct irq_data *d) { davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_STAT_IDX_CLR); } static void davinci_cp_intc_mask_irq(struct irq_data *d) { /* XXX don't know why we need to disable nIRQ here... */ davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_CLR); davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_ENABLE_IDX_CLR); davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET); } static void davinci_cp_intc_unmask_irq(struct irq_data *d) { davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_ENABLE_IDX_SET); } static int davinci_cp_intc_set_irq_type(struct irq_data *d, unsigned int flow_type) { unsigned int reg, mask, polarity, type; reg = BIT_WORD(d->hwirq); mask = BIT_MASK(d->hwirq); polarity = davinci_cp_intc_read(DAVINCI_CP_INTC_SYS_POLARITY(reg)); type = davinci_cp_intc_read(DAVINCI_CP_INTC_SYS_TYPE(reg)); switch (flow_type) { case IRQ_TYPE_EDGE_RISING: polarity |= mask; type |= mask; break; case IRQ_TYPE_EDGE_FALLING: polarity &= ~mask; type |= mask; break; case IRQ_TYPE_LEVEL_HIGH: polarity |= mask; type &= ~mask; break; case IRQ_TYPE_LEVEL_LOW: polarity &= ~mask; type &= ~mask; break; default: return -EINVAL; } davinci_cp_intc_write(polarity, DAVINCI_CP_INTC_SYS_POLARITY(reg)); davinci_cp_intc_write(type, DAVINCI_CP_INTC_SYS_TYPE(reg)); return 0; } static struct irq_chip davinci_cp_intc_irq_chip = { .name = "cp_intc", .irq_ack = davinci_cp_intc_ack_irq, .irq_mask = davinci_cp_intc_mask_irq, .irq_unmask = davinci_cp_intc_unmask_irq, .irq_set_type = davinci_cp_intc_set_irq_type, .flags = IRQCHIP_SKIP_SET_WAKE, }; static asmlinkage void __exception_irq_entry davinci_cp_intc_handle_irq(struct pt_regs *regs) { int gpir, irqnr, none; /* * The interrupt number is in first ten bits. The NONE field set to 1 * indicates a spurious irq. */ gpir = davinci_cp_intc_read(DAVINCI_CP_INTC_PRIO_IDX); irqnr = gpir & DAVINCI_CP_INTC_PRI_INDX_MASK; none = gpir & DAVINCI_CP_INTC_GPIR_NONE; if (unlikely(none)) { pr_err_once("%s: spurious irq!\n", __func__); return; } generic_handle_domain_irq(davinci_cp_intc_irq_domain, irqnr); } static int davinci_cp_intc_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { pr_debug("cp_intc_host_map(%d, 0x%lx)\n", virq, hw); irq_set_chip(virq, &davinci_cp_intc_irq_chip); irq_set_probe(virq); irq_set_handler(virq, handle_edge_irq); return 0; } static const struct irq_domain_ops davinci_cp_intc_irq_domain_ops = { .map = davinci_cp_intc_host_map, .xlate = irq_domain_xlate_onetwocell, }; static int __init davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config, struct device_node *node) { unsigned int num_regs = BITS_TO_LONGS(config->num_irqs); int offset, irq_base; void __iomem *req; req = request_mem_region(config->reg.start, resource_size(&config->reg), "davinci-cp-intc"); if (!req) { pr_err("%s: register range busy\n", __func__); return -EBUSY; } davinci_cp_intc_base = ioremap(config->reg.start, resource_size(&config->reg)); if (!davinci_cp_intc_base) { pr_err("%s: unable to ioremap register range\n", __func__); return -EINVAL; } davinci_cp_intc_write(0, DAVINCI_CP_INTC_GLOBAL_ENABLE); /* Disable all host interrupts */ davinci_cp_intc_write(0, DAVINCI_CP_INTC_HOST_ENABLE(0)); /* Disable system interrupts */ for (offset = 0; offset < num_regs; offset++) davinci_cp_intc_write(~0, DAVINCI_CP_INTC_SYS_ENABLE_CLR(offset)); /* Set to normal mode, no nesting, no priority hold */ davinci_cp_intc_write(0, DAVINCI_CP_INTC_CTRL); davinci_cp_intc_write(0, DAVINCI_CP_INTC_HOST_CTRL); /* Clear system interrupt status */ for (offset = 0; offset < num_regs; offset++) davinci_cp_intc_write(~0, DAVINCI_CP_INTC_SYS_STAT_CLR(offset)); /* Enable nIRQ (what about nFIQ?) */ davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET); /* Default all priorities to channel 7. */ num_regs = (config->num_irqs + 3) >> 2; /* 4 channels per register */ for (offset = 0; offset < num_regs; offset++) davinci_cp_intc_write(0x07070707, DAVINCI_CP_INTC_CHAN_MAP(offset)); irq_base = irq_alloc_descs(-1, 0, config->num_irqs, 0); if (irq_base < 0) { pr_err("%s: unable to allocate interrupt descriptors: %d\n", __func__, irq_base); return irq_base; } davinci_cp_intc_irq_domain = irq_domain_add_legacy( node, config->num_irqs, irq_base, 0, &davinci_cp_intc_irq_domain_ops, NULL); if (!davinci_cp_intc_irq_domain) { pr_err("%s: unable to create an interrupt domain\n", __func__); return -EINVAL; } set_handle_irq(davinci_cp_intc_handle_irq); /* Enable global interrupt */ davinci_cp_intc_write(1, DAVINCI_CP_INTC_GLOBAL_ENABLE); return 0; } int __init davinci_cp_intc_init(const struct davinci_cp_intc_config *config) { return davinci_cp_intc_do_init(config, NULL); } static int __init davinci_cp_intc_of_init(struct device_node *node, struct device_node *parent) { struct davinci_cp_intc_config config = { }; int ret; ret = of_address_to_resource(node, 0, &config.reg); if (ret) { pr_err("%s: unable to get the register range from device-tree\n", __func__); return ret; } ret = of_property_read_u32(node, "ti,intc-size", &config.num_irqs); if (ret) { pr_err("%s: unable to read the 'ti,intc-size' property\n", __func__); return ret; } return davinci_cp_intc_do_init(&config, node); } IRQCHIP_DECLARE(cp_intc, "ti,cp-intc", davinci_cp_intc_of_init);
linux-master
drivers/irqchip/irq-davinci-cp-intc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 Freescale Semiconductor, Inc. */ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/slab.h> #include <linux/irqchip.h> #include <linux/syscore_ops.h> #define IMR_NUM 4 #define GPC_MAX_IRQS (IMR_NUM * 32) #define GPC_IMR1_CORE0 0x30 #define GPC_IMR1_CORE1 0x40 #define GPC_IMR1_CORE2 0x1c0 #define GPC_IMR1_CORE3 0x1d0 struct gpcv2_irqchip_data { struct raw_spinlock rlock; void __iomem *gpc_base; u32 wakeup_sources[IMR_NUM]; u32 saved_irq_mask[IMR_NUM]; u32 cpu2wakeup; }; static struct gpcv2_irqchip_data *imx_gpcv2_instance __ro_after_init; static void __iomem *gpcv2_idx_to_reg(struct gpcv2_irqchip_data *cd, int i) { return cd->gpc_base + cd->cpu2wakeup + i * 4; } static int gpcv2_wakeup_source_save(void) { struct gpcv2_irqchip_data *cd; void __iomem *reg; int i; cd = imx_gpcv2_instance; if (!cd) return 0; for (i = 0; i < IMR_NUM; i++) { reg = gpcv2_idx_to_reg(cd, i); cd->saved_irq_mask[i] = readl_relaxed(reg); writel_relaxed(cd->wakeup_sources[i], reg); } return 0; } static void gpcv2_wakeup_source_restore(void) { struct gpcv2_irqchip_data *cd; int i; cd = imx_gpcv2_instance; if (!cd) return; for (i = 0; i < IMR_NUM; i++) writel_relaxed(cd->saved_irq_mask[i], gpcv2_idx_to_reg(cd, i)); } static struct syscore_ops imx_gpcv2_syscore_ops = { .suspend = gpcv2_wakeup_source_save, .resume = gpcv2_wakeup_source_restore, }; static int imx_gpcv2_irq_set_wake(struct irq_data *d, unsigned int on) { struct gpcv2_irqchip_data *cd = d->chip_data; unsigned int idx = d->hwirq / 32; unsigned long flags; u32 mask, val; raw_spin_lock_irqsave(&cd->rlock, flags); mask = BIT(d->hwirq % 32); val = cd->wakeup_sources[idx]; cd->wakeup_sources[idx] = on ? (val & ~mask) : (val | mask); raw_spin_unlock_irqrestore(&cd->rlock, flags); /* * Do *not* call into the parent, as the GIC doesn't have any * wake-up facility... */ return 0; } static void imx_gpcv2_irq_unmask(struct irq_data *d) { struct gpcv2_irqchip_data *cd = d->chip_data; void __iomem *reg; u32 val; raw_spin_lock(&cd->rlock); reg = gpcv2_idx_to_reg(cd, d->hwirq / 32); val = readl_relaxed(reg); val &= ~BIT(d->hwirq % 32); writel_relaxed(val, reg); raw_spin_unlock(&cd->rlock); irq_chip_unmask_parent(d); } static void imx_gpcv2_irq_mask(struct irq_data *d) { struct gpcv2_irqchip_data *cd = d->chip_data; void __iomem *reg; u32 val; raw_spin_lock(&cd->rlock); reg = gpcv2_idx_to_reg(cd, d->hwirq / 32); val = readl_relaxed(reg); val |= BIT(d->hwirq % 32); writel_relaxed(val, reg); raw_spin_unlock(&cd->rlock); irq_chip_mask_parent(d); } static struct irq_chip gpcv2_irqchip_data_chip = { .name = "GPCv2", .irq_eoi = irq_chip_eoi_parent, .irq_mask = imx_gpcv2_irq_mask, .irq_unmask = imx_gpcv2_irq_unmask, .irq_set_wake = imx_gpcv2_irq_set_wake, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_set_type = irq_chip_set_type_parent, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, #endif }; static int imx_gpcv2_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type) { if (is_of_node(fwspec->fwnode)) { if (fwspec->param_count != 3) return -EINVAL; /* No PPI should point to this domain */ if (fwspec->param[0] != 0) return -EINVAL; *hwirq = fwspec->param[1]; *type = fwspec->param[2]; return 0; } return -EINVAL; } static int imx_gpcv2_domain_alloc(struct irq_domain *domain, unsigned int irq, unsigned int nr_irqs, void *data) { struct irq_fwspec *fwspec = data; struct irq_fwspec parent_fwspec; irq_hw_number_t hwirq; unsigned int type; int err; int i; err = imx_gpcv2_domain_translate(domain, fwspec, &hwirq, &type); if (err) return err; if (hwirq >= GPC_MAX_IRQS) return -EINVAL; for (i = 0; i < nr_irqs; i++) { irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i, &gpcv2_irqchip_data_chip, domain->host_data); } parent_fwspec = *fwspec; parent_fwspec.fwnode = domain->parent->fwnode; return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, &parent_fwspec); } static const struct irq_domain_ops gpcv2_irqchip_data_domain_ops = { .translate = imx_gpcv2_domain_translate, .alloc = imx_gpcv2_domain_alloc, .free = irq_domain_free_irqs_common, }; static const struct of_device_id gpcv2_of_match[] = { { .compatible = "fsl,imx7d-gpc", .data = (const void *) 2 }, { .compatible = "fsl,imx8mq-gpc", .data = (const void *) 4 }, { /* END */ } }; static int __init imx_gpcv2_irqchip_init(struct device_node *node, struct device_node *parent) { struct irq_domain *parent_domain, *domain; struct gpcv2_irqchip_data *cd; const struct of_device_id *id; unsigned long core_num; int i; if (!parent) { pr_err("%pOF: no parent, giving up\n", node); return -ENODEV; } id = of_match_node(gpcv2_of_match, node); if (!id) { pr_err("%pOF: unknown compatibility string\n", node); return -ENODEV; } core_num = (unsigned long)id->data; parent_domain = irq_find_host(parent); if (!parent_domain) { pr_err("%pOF: unable to get parent domain\n", node); return -ENXIO; } cd = kzalloc(sizeof(struct gpcv2_irqchip_data), GFP_KERNEL); if (!cd) return -ENOMEM; raw_spin_lock_init(&cd->rlock); cd->gpc_base = of_iomap(node, 0); if (!cd->gpc_base) { pr_err("%pOF: unable to map gpc registers\n", node); kfree(cd); return -ENOMEM; } domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS, node, &gpcv2_irqchip_data_domain_ops, cd); if (!domain) { iounmap(cd->gpc_base); kfree(cd); return -ENOMEM; } irq_set_default_host(domain); /* Initially mask all interrupts */ for (i = 0; i < IMR_NUM; i++) { void __iomem *reg = cd->gpc_base + i * 4; switch (core_num) { case 4: writel_relaxed(~0, reg + GPC_IMR1_CORE2); writel_relaxed(~0, reg + GPC_IMR1_CORE3); fallthrough; case 2: writel_relaxed(~0, reg + GPC_IMR1_CORE0); writel_relaxed(~0, reg + GPC_IMR1_CORE1); } cd->wakeup_sources[i] = ~0; } /* Let CORE0 as the default CPU to wake up by GPC */ cd->cpu2wakeup = GPC_IMR1_CORE0; /* * Due to hardware design failure, need to make sure GPR * interrupt(#32) is unmasked during RUN mode to avoid entering * DSM by mistake. */ writel_relaxed(~0x1, cd->gpc_base + cd->cpu2wakeup); imx_gpcv2_instance = cd; register_syscore_ops(&imx_gpcv2_syscore_ops); /* * Clear the OF_POPULATED flag set in of_irq_init so that * later the GPC power domain driver will not be skipped. */ of_node_clear_flag(node, OF_POPULATED); fwnode_dev_initialized(domain->fwnode, false); return 0; } IRQCHIP_DECLARE(imx_gpcv2_imx7d, "fsl,imx7d-gpc", imx_gpcv2_irqchip_init); IRQCHIP_DECLARE(imx_gpcv2_imx8mq, "fsl,imx8mq-gpc", imx_gpcv2_irqchip_init);
linux-master
drivers/irqchip/irq-imx-gpcv2.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright (C) 2014 Oleksij Rempel <[email protected]> * Add Alphascale ASM9260 support. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/irqdomain.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/stmp_device.h> #include <asm/exception.h> #include "alphascale_asm9260-icoll.h" /* * this device provide 4 offsets for each register: * 0x0 - plain read write mode * 0x4 - set mode, OR logic. * 0x8 - clr mode, XOR logic. * 0xc - togle mode. */ #define SET_REG 4 #define CLR_REG 8 #define HW_ICOLL_VECTOR 0x0000 #define HW_ICOLL_LEVELACK 0x0010 #define HW_ICOLL_CTRL 0x0020 #define HW_ICOLL_STAT_OFFSET 0x0070 #define HW_ICOLL_INTERRUPT0 0x0120 #define HW_ICOLL_INTERRUPTn(n) ((n) * 0x10) #define BM_ICOLL_INTR_ENABLE BIT(2) #define BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 0x1 #define ICOLL_NUM_IRQS 128 enum icoll_type { ICOLL, ASM9260_ICOLL, }; struct icoll_priv { void __iomem *vector; void __iomem *levelack; void __iomem *ctrl; void __iomem *stat; void __iomem *intr; void __iomem *clear; enum icoll_type type; }; static struct icoll_priv icoll_priv; static struct irq_domain *icoll_domain; /* calculate bit offset depending on number of interrupt per register */ static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit) { /* * mask lower part of hwirq to convert it * in 0, 1, 2 or 3 and then multiply it by 8 (or shift by 3) */ return bit << ((d->hwirq & 3) << 3); } /* calculate mem offset depending on number of interrupt per register */ static void __iomem *icoll_intr_reg(struct irq_data *d) { /* offset = hwirq / intr_per_reg * 0x10 */ return icoll_priv.intr + ((d->hwirq >> 2) * 0x10); } static void icoll_ack_irq(struct irq_data *d) { /* * The Interrupt Collector is able to prioritize irqs. * Currently only level 0 is used. So acking can use * BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 unconditionally. */ __raw_writel(BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0, icoll_priv.levelack); } static void icoll_mask_irq(struct irq_data *d) { __raw_writel(BM_ICOLL_INTR_ENABLE, icoll_priv.intr + CLR_REG + HW_ICOLL_INTERRUPTn(d->hwirq)); } static void icoll_unmask_irq(struct irq_data *d) { __raw_writel(BM_ICOLL_INTR_ENABLE, icoll_priv.intr + SET_REG + HW_ICOLL_INTERRUPTn(d->hwirq)); } static void asm9260_mask_irq(struct irq_data *d) { __raw_writel(icoll_intr_bitshift(d, BM_ICOLL_INTR_ENABLE), icoll_intr_reg(d) + CLR_REG); } static void asm9260_unmask_irq(struct irq_data *d) { __raw_writel(ASM9260_BM_CLEAR_BIT(d->hwirq), icoll_priv.clear + ASM9260_HW_ICOLL_CLEARn(d->hwirq)); __raw_writel(icoll_intr_bitshift(d, BM_ICOLL_INTR_ENABLE), icoll_intr_reg(d) + SET_REG); } static struct irq_chip mxs_icoll_chip = { .irq_ack = icoll_ack_irq, .irq_mask = icoll_mask_irq, .irq_unmask = icoll_unmask_irq, .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE, }; static struct irq_chip asm9260_icoll_chip = { .irq_ack = icoll_ack_irq, .irq_mask = asm9260_mask_irq, .irq_unmask = asm9260_unmask_irq, .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE, }; asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs) { u32 irqnr; irqnr = __raw_readl(icoll_priv.stat); __raw_writel(irqnr, icoll_priv.vector); generic_handle_domain_irq(icoll_domain, irqnr); } static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) { struct irq_chip *chip; if (icoll_priv.type == ICOLL) chip = &mxs_icoll_chip; else chip = &asm9260_icoll_chip; irq_set_chip_and_handler(virq, chip, handle_level_irq); return 0; } static const struct irq_domain_ops icoll_irq_domain_ops = { .map = icoll_irq_domain_map, .xlate = irq_domain_xlate_onecell, }; static void __init icoll_add_domain(struct device_node *np, int num) { icoll_domain = irq_domain_add_linear(np, num, &icoll_irq_domain_ops, NULL); if (!icoll_domain) panic("%pOF: unable to create irq domain", np); } static void __iomem * __init icoll_init_iobase(struct device_node *np) { void __iomem *icoll_base; icoll_base = of_io_request_and_map(np, 0, np->name); if (IS_ERR(icoll_base)) panic("%pOF: unable to map resource", np); return icoll_base; } static int __init icoll_of_init(struct device_node *np, struct device_node *interrupt_parent) { void __iomem *icoll_base; icoll_priv.type = ICOLL; icoll_base = icoll_init_iobase(np); icoll_priv.vector = icoll_base + HW_ICOLL_VECTOR; icoll_priv.levelack = icoll_base + HW_ICOLL_LEVELACK; icoll_priv.ctrl = icoll_base + HW_ICOLL_CTRL; icoll_priv.stat = icoll_base + HW_ICOLL_STAT_OFFSET; icoll_priv.intr = icoll_base + HW_ICOLL_INTERRUPT0; icoll_priv.clear = NULL; /* * Interrupt Collector reset, which initializes the priority * for each irq to level 0. */ stmp_reset_block(icoll_priv.ctrl); icoll_add_domain(np, ICOLL_NUM_IRQS); set_handle_irq(icoll_handle_irq); return 0; } IRQCHIP_DECLARE(mxs, "fsl,icoll", icoll_of_init); static int __init asm9260_of_init(struct device_node *np, struct device_node *interrupt_parent) { void __iomem *icoll_base; int i; icoll_priv.type = ASM9260_ICOLL; icoll_base = icoll_init_iobase(np); icoll_priv.vector = icoll_base + ASM9260_HW_ICOLL_VECTOR; icoll_priv.levelack = icoll_base + ASM9260_HW_ICOLL_LEVELACK; icoll_priv.ctrl = icoll_base + ASM9260_HW_ICOLL_CTRL; icoll_priv.stat = icoll_base + ASM9260_HW_ICOLL_STAT_OFFSET; icoll_priv.intr = icoll_base + ASM9260_HW_ICOLL_INTERRUPT0; icoll_priv.clear = icoll_base + ASM9260_HW_ICOLL_CLEAR0; writel_relaxed(ASM9260_BM_CTRL_IRQ_ENABLE, icoll_priv.ctrl); /* * ASM9260 don't provide reset bit. So, we need to set level 0 * manually. */ for (i = 0; i < 16 * 0x10; i += 0x10) writel(0, icoll_priv.intr + i); icoll_add_domain(np, ASM9260_NUM_IRQS); set_handle_irq(icoll_handle_irq); return 0; } IRQCHIP_DECLARE(asm9260, "alphascale,asm9260-icoll", asm9260_of_init);
linux-master
drivers/irqchip/irq-mxs.c
/* * Allwinner A20/A31 SoCs NMI IRQ chip driver. * * Carlo Caione <[email protected]> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #define DRV_NAME "sunxi-nmi" #define pr_fmt(fmt) DRV_NAME ": " fmt #include <linux/bitops.h> #include <linux/device.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/irqchip.h> #include <linux/irqchip/chained_irq.h> #define SUNXI_NMI_SRC_TYPE_MASK 0x00000003 #define SUNXI_NMI_IRQ_BIT BIT(0) /* * For deprecated sun6i-a31-sc-nmi compatible. */ #define SUN6I_NMI_CTRL 0x00 #define SUN6I_NMI_PENDING 0x04 #define SUN6I_NMI_ENABLE 0x34 #define SUN7I_NMI_CTRL 0x00 #define SUN7I_NMI_PENDING 0x04 #define SUN7I_NMI_ENABLE 0x08 #define SUN9I_NMI_CTRL 0x00 #define SUN9I_NMI_ENABLE 0x04 #define SUN9I_NMI_PENDING 0x08 enum { SUNXI_SRC_TYPE_LEVEL_LOW = 0, SUNXI_SRC_TYPE_EDGE_FALLING, SUNXI_SRC_TYPE_LEVEL_HIGH, SUNXI_SRC_TYPE_EDGE_RISING, }; struct sunxi_sc_nmi_reg_offs { u32 ctrl; u32 pend; u32 enable; }; static const struct sunxi_sc_nmi_reg_offs sun6i_reg_offs __initconst = { .ctrl = SUN6I_NMI_CTRL, .pend = SUN6I_NMI_PENDING, .enable = SUN6I_NMI_ENABLE, }; static const struct sunxi_sc_nmi_reg_offs sun7i_reg_offs __initconst = { .ctrl = SUN7I_NMI_CTRL, .pend = SUN7I_NMI_PENDING, .enable = SUN7I_NMI_ENABLE, }; static const struct sunxi_sc_nmi_reg_offs sun9i_reg_offs __initconst = { .ctrl = SUN9I_NMI_CTRL, .pend = SUN9I_NMI_PENDING, .enable = SUN9I_NMI_ENABLE, }; static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off, u32 val) { irq_reg_writel(gc, val, off); } static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off) { return irq_reg_readl(gc, off); } static void sunxi_sc_nmi_handle_irq(struct irq_desc *desc) { struct irq_domain *domain = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); chained_irq_enter(chip, desc); generic_handle_domain_irq(domain, 0); chained_irq_exit(chip, desc); } static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); struct irq_chip_type *ct = gc->chip_types; u32 src_type_reg; u32 ctrl_off = ct->regs.type; unsigned int src_type; unsigned int i; irq_gc_lock(gc); switch (flow_type & IRQF_TRIGGER_MASK) { case IRQ_TYPE_EDGE_FALLING: src_type = SUNXI_SRC_TYPE_EDGE_FALLING; break; case IRQ_TYPE_EDGE_RISING: src_type = SUNXI_SRC_TYPE_EDGE_RISING; break; case IRQ_TYPE_LEVEL_HIGH: src_type = SUNXI_SRC_TYPE_LEVEL_HIGH; break; case IRQ_TYPE_NONE: case IRQ_TYPE_LEVEL_LOW: src_type = SUNXI_SRC_TYPE_LEVEL_LOW; break; default: irq_gc_unlock(gc); pr_err("Cannot assign multiple trigger modes to IRQ %d.\n", data->irq); return -EBADR; } irqd_set_trigger_type(data, flow_type); irq_setup_alt_chip(data, flow_type); for (i = 0; i < gc->num_ct; i++, ct++) if (ct->type & flow_type) ctrl_off = ct->regs.type; src_type_reg = sunxi_sc_nmi_read(gc, ctrl_off); src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK; src_type_reg |= src_type; sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg); irq_gc_unlock(gc); return IRQ_SET_MASK_OK; } static int __init sunxi_sc_nmi_irq_init(struct device_node *node, const struct sunxi_sc_nmi_reg_offs *reg_offs) { struct irq_domain *domain; struct irq_chip_generic *gc; unsigned int irq; unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; int ret; domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL); if (!domain) { pr_err("Could not register interrupt domain.\n"); return -ENOMEM; } ret = irq_alloc_domain_generic_chips(domain, 1, 2, DRV_NAME, handle_fasteoi_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); if (ret) { pr_err("Could not allocate generic interrupt chip.\n"); goto fail_irqd_remove; } irq = irq_of_parse_and_map(node, 0); if (irq <= 0) { pr_err("unable to parse irq\n"); ret = -EINVAL; goto fail_irqd_remove; } gc = irq_get_domain_generic_chip(domain, 0); gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(gc->reg_base)) { pr_err("unable to map resource\n"); ret = PTR_ERR(gc->reg_base); goto fail_irqd_remove; } gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK; gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit; gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type; gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED; gc->chip_types[0].regs.ack = reg_offs->pend; gc->chip_types[0].regs.mask = reg_offs->enable; gc->chip_types[0].regs.type = reg_offs->ctrl; gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH; gc->chip_types[1].chip.name = gc->chip_types[0].chip.name; gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit; gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit; gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit; gc->chip_types[1].chip.irq_set_type = sunxi_sc_nmi_set_type; gc->chip_types[1].regs.ack = reg_offs->pend; gc->chip_types[1].regs.mask = reg_offs->enable; gc->chip_types[1].regs.type = reg_offs->ctrl; gc->chip_types[1].handler = handle_edge_irq; /* Disable any active interrupts */ sunxi_sc_nmi_write(gc, reg_offs->enable, 0); /* Clear any pending NMI interrupts */ sunxi_sc_nmi_write(gc, reg_offs->pend, SUNXI_NMI_IRQ_BIT); irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain); return 0; fail_irqd_remove: irq_domain_remove(domain); return ret; } static int __init sun6i_sc_nmi_irq_init(struct device_node *node, struct device_node *parent) { return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs); } IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init); static int __init sun7i_sc_nmi_irq_init(struct device_node *node, struct device_node *parent) { return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs); } IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init); static int __init sun9i_nmi_irq_init(struct device_node *node, struct device_node *parent) { return sunxi_sc_nmi_irq_init(node, &sun9i_reg_offs); } IRQCHIP_DECLARE(sun9i_nmi, "allwinner,sun9i-a80-nmi", sun9i_nmi_irq_init);
linux-master
drivers/irqchip/irq-sunxi-nmi.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013-2015 ARM Limited, All Rights Reserved. * Author: Marc Zyngier <[email protected]> */ #include <linux/acpi_iort.h> #include <linux/device.h> #include <linux/msi.h> #include <linux/of.h> #include <linux/of_irq.h> static struct irq_chip its_pmsi_irq_chip = { .name = "ITS-pMSI", }; static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev, u32 *dev_id) { int ret, index = 0; /* Suck the DeviceID out of the msi-parent property */ do { struct of_phandle_args args; ret = of_parse_phandle_with_args(dev->of_node, "msi-parent", "#msi-cells", index, &args); if (args.np == irq_domain_get_of_node(domain)) { if (WARN_ON(args.args_count != 1)) return -EINVAL; *dev_id = args.args[0]; break; } index++; } while (!ret); return ret; } int __weak iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id) { return -1; } static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *info) { struct msi_domain_info *msi_info; u32 dev_id; int ret; msi_info = msi_get_domain_info(domain->parent); if (dev->of_node) ret = of_pmsi_get_dev_id(domain, dev, &dev_id); else ret = iort_pmsi_get_dev_id(dev, &dev_id); if (ret) return ret; /* ITS specific DeviceID, as the core ITS ignores dev. */ info->scratchpad[0].ul = dev_id; /* Allocate at least 32 MSIs, and always as a power of 2 */ nvec = max_t(int, 32, roundup_pow_of_two(nvec)); return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info); } static struct msi_domain_ops its_pmsi_ops = { .msi_prepare = its_pmsi_prepare, }; static struct msi_domain_info its_pmsi_domain_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), .ops = &its_pmsi_ops, .chip = &its_pmsi_irq_chip, }; static const struct of_device_id its_device_id[] = { { .compatible = "arm,gic-v3-its", }, {}, }; static int __init its_pmsi_init_one(struct fwnode_handle *fwnode, const char *name) { struct irq_domain *parent; parent = irq_find_matching_fwnode(fwnode, DOMAIN_BUS_NEXUS); if (!parent || !msi_get_domain_info(parent)) { pr_err("%s: unable to locate ITS domain\n", name); return -ENXIO; } if (!platform_msi_create_irq_domain(fwnode, &its_pmsi_domain_info, parent)) { pr_err("%s: unable to create platform domain\n", name); return -ENXIO; } pr_info("Platform MSI: %s domain created\n", name); return 0; } #ifdef CONFIG_ACPI static int __init its_pmsi_parse_madt(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_translator *its_entry; struct fwnode_handle *domain_handle; const char *node_name; int err = -ENXIO; its_entry = (struct acpi_madt_generic_translator *)header; node_name = kasprintf(GFP_KERNEL, "ITS@0x%lx", (long)its_entry->base_address); domain_handle = iort_find_domain_token(its_entry->translation_id); if (!domain_handle) { pr_err("%s: Unable to locate ITS domain handle\n", node_name); goto out; } err = its_pmsi_init_one(domain_handle, node_name); out: kfree(node_name); return err; } static void __init its_pmsi_acpi_init(void) { acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, its_pmsi_parse_madt, 0); } #else static inline void its_pmsi_acpi_init(void) { } #endif static void __init its_pmsi_of_init(void) { struct device_node *np; for (np = of_find_matching_node(NULL, its_device_id); np; np = of_find_matching_node(np, its_device_id)) { if (!of_device_is_available(np)) continue; if (!of_property_read_bool(np, "msi-controller")) continue; its_pmsi_init_one(of_node_to_fwnode(np), np->full_name); } } static int __init its_pmsi_init(void) { its_pmsi_of_init(); its_pmsi_acpi_init(); return 0; } early_initcall(its_pmsi_init);
linux-master
drivers/irqchip/irq-gic-v3-its-platform-msi.c
// SPDX-License-Identifier: GPL-2.0 /* * JZ47xx SoCs TCU IRQ driver * Copyright (C) 2019 Paul Cercueil <[email protected]> */ #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/irqchip.h> #include <linux/irqchip/chained_irq.h> #include <linux/mfd/ingenic-tcu.h> #include <linux/mfd/syscon.h> #include <linux/of_irq.h> #include <linux/regmap.h> struct ingenic_tcu { struct regmap *map; struct clk *clk; struct irq_domain *domain; unsigned int nb_parent_irqs; u32 parent_irqs[3]; }; static void ingenic_tcu_intc_cascade(struct irq_desc *desc) { struct irq_chip *irq_chip = irq_data_get_irq_chip(&desc->irq_data); struct irq_domain *domain = irq_desc_get_handler_data(desc); struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0); struct regmap *map = gc->private; uint32_t irq_reg, irq_mask; unsigned long bits; unsigned int i; regmap_read(map, TCU_REG_TFR, &irq_reg); regmap_read(map, TCU_REG_TMR, &irq_mask); chained_irq_enter(irq_chip, desc); irq_reg &= ~irq_mask; bits = irq_reg; for_each_set_bit(i, &bits, 32) generic_handle_domain_irq(domain, i); chained_irq_exit(irq_chip, desc); } static void ingenic_tcu_gc_unmask_enable_reg(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = irq_data_get_chip_type(d); struct regmap *map = gc->private; u32 mask = d->mask; irq_gc_lock(gc); regmap_write(map, ct->regs.ack, mask); regmap_write(map, ct->regs.enable, mask); *ct->mask_cache |= mask; irq_gc_unlock(gc); } static void ingenic_tcu_gc_mask_disable_reg(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = irq_data_get_chip_type(d); struct regmap *map = gc->private; u32 mask = d->mask; irq_gc_lock(gc); regmap_write(map, ct->regs.disable, mask); *ct->mask_cache &= ~mask; irq_gc_unlock(gc); } static void ingenic_tcu_gc_mask_disable_reg_and_ack(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = irq_data_get_chip_type(d); struct regmap *map = gc->private; u32 mask = d->mask; irq_gc_lock(gc); regmap_write(map, ct->regs.ack, mask); regmap_write(map, ct->regs.disable, mask); irq_gc_unlock(gc); } static int __init ingenic_tcu_irq_init(struct device_node *np, struct device_node *parent) { struct irq_chip_generic *gc; struct irq_chip_type *ct; struct ingenic_tcu *tcu; struct regmap *map; unsigned int i; int ret, irqs; map = device_node_to_regmap(np); if (IS_ERR(map)) return PTR_ERR(map); tcu = kzalloc(sizeof(*tcu), GFP_KERNEL); if (!tcu) return -ENOMEM; tcu->map = map; irqs = of_property_count_elems_of_size(np, "interrupts", sizeof(u32)); if (irqs < 0 || irqs > ARRAY_SIZE(tcu->parent_irqs)) { pr_crit("%s: Invalid 'interrupts' property\n", __func__); ret = -EINVAL; goto err_free_tcu; } tcu->nb_parent_irqs = irqs; tcu->domain = irq_domain_add_linear(np, 32, &irq_generic_chip_ops, NULL); if (!tcu->domain) { ret = -ENOMEM; goto err_free_tcu; } ret = irq_alloc_domain_generic_chips(tcu->domain, 32, 1, "TCU", handle_level_irq, 0, IRQ_NOPROBE | IRQ_LEVEL, 0); if (ret) { pr_crit("%s: Invalid 'interrupts' property\n", __func__); goto out_domain_remove; } gc = irq_get_domain_generic_chip(tcu->domain, 0); ct = gc->chip_types; gc->wake_enabled = IRQ_MSK(32); gc->private = tcu->map; ct->regs.disable = TCU_REG_TMSR; ct->regs.enable = TCU_REG_TMCR; ct->regs.ack = TCU_REG_TFCR; ct->chip.irq_unmask = ingenic_tcu_gc_unmask_enable_reg; ct->chip.irq_mask = ingenic_tcu_gc_mask_disable_reg; ct->chip.irq_mask_ack = ingenic_tcu_gc_mask_disable_reg_and_ack; ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE; /* Mask all IRQs by default */ regmap_write(tcu->map, TCU_REG_TMSR, IRQ_MSK(32)); /* * On JZ4740, timer 0 and timer 1 have their own interrupt line; * timers 2-7 share one interrupt. * On SoCs >= JZ4770, timer 5 has its own interrupt line; * timers 0-4 and 6-7 share one single interrupt. * * To keep things simple, we just register the same handler to * all parent interrupts. The handler will properly detect which * channel fired the interrupt. */ for (i = 0; i < irqs; i++) { tcu->parent_irqs[i] = irq_of_parse_and_map(np, i); if (!tcu->parent_irqs[i]) { ret = -EINVAL; goto out_unmap_irqs; } irq_set_chained_handler_and_data(tcu->parent_irqs[i], ingenic_tcu_intc_cascade, tcu->domain); } return 0; out_unmap_irqs: for (; i > 0; i--) irq_dispose_mapping(tcu->parent_irqs[i - 1]); out_domain_remove: irq_domain_remove(tcu->domain); err_free_tcu: kfree(tcu); return ret; } IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(jz4760_tcu_irq, "ingenic,jz4760-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(x1000_tcu_irq, "ingenic,x1000-tcu", ingenic_tcu_irq_init);
linux-master
drivers/irqchip/irq-ingenic-tcu.c
// SPDX-License-Identifier: GPL-2.0-only /* * Freescale MU used as MSI controller * * Copyright (c) 2018 Pengutronix, Oleksij Rempel <[email protected]> * Copyright 2022 NXP * Frank Li <[email protected]> * Peng Fan <[email protected]> * * Based on drivers/mailbox/imx-mailbox.c */ #include <linux/clk.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/pm_runtime.h> #include <linux/pm_domain.h> #include <linux/spinlock.h> #define IMX_MU_CHANS 4 enum imx_mu_xcr { IMX_MU_GIER, IMX_MU_GCR, IMX_MU_TCR, IMX_MU_RCR, IMX_MU_xCR_MAX, }; enum imx_mu_xsr { IMX_MU_SR, IMX_MU_GSR, IMX_MU_TSR, IMX_MU_RSR, IMX_MU_xSR_MAX }; enum imx_mu_type { IMX_MU_V2 = BIT(1), }; /* Receive Interrupt Enable */ #define IMX_MU_xCR_RIEn(data, x) ((data->cfg->type) & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x)))) #define IMX_MU_xSR_RFn(data, x) ((data->cfg->type) & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x)))) struct imx_mu_dcfg { enum imx_mu_type type; u32 xTR; /* Transmit Register0 */ u32 xRR; /* Receive Register0 */ u32 xSR[IMX_MU_xSR_MAX]; /* Status Registers */ u32 xCR[IMX_MU_xCR_MAX]; /* Control Registers */ }; struct imx_mu_msi { raw_spinlock_t lock; struct irq_domain *msi_domain; void __iomem *regs; phys_addr_t msiir_addr; const struct imx_mu_dcfg *cfg; unsigned long used; struct clk *clk; }; static void imx_mu_write(struct imx_mu_msi *msi_data, u32 val, u32 offs) { iowrite32(val, msi_data->regs + offs); } static u32 imx_mu_read(struct imx_mu_msi *msi_data, u32 offs) { return ioread32(msi_data->regs + offs); } static u32 imx_mu_xcr_rmw(struct imx_mu_msi *msi_data, enum imx_mu_xcr type, u32 set, u32 clr) { unsigned long flags; u32 val; raw_spin_lock_irqsave(&msi_data->lock, flags); val = imx_mu_read(msi_data, msi_data->cfg->xCR[type]); val &= ~clr; val |= set; imx_mu_write(msi_data, val, msi_data->cfg->xCR[type]); raw_spin_unlock_irqrestore(&msi_data->lock, flags); return val; } static void imx_mu_msi_parent_mask_irq(struct irq_data *data) { struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data); imx_mu_xcr_rmw(msi_data, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(msi_data, data->hwirq)); } static void imx_mu_msi_parent_unmask_irq(struct irq_data *data) { struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data); imx_mu_xcr_rmw(msi_data, IMX_MU_RCR, IMX_MU_xCR_RIEn(msi_data, data->hwirq), 0); } static void imx_mu_msi_parent_ack_irq(struct irq_data *data) { struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data); imx_mu_read(msi_data, msi_data->cfg->xRR + data->hwirq * 4); } static struct irq_chip imx_mu_msi_irq_chip = { .name = "MU-MSI", .irq_ack = irq_chip_ack_parent, }; static struct msi_domain_ops imx_mu_msi_irq_ops = { }; static struct msi_domain_info imx_mu_msi_domain_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), .ops = &imx_mu_msi_irq_ops, .chip = &imx_mu_msi_irq_chip, }; static void imx_mu_msi_parent_compose_msg(struct irq_data *data, struct msi_msg *msg) { struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data); u64 addr = msi_data->msiir_addr + 4 * data->hwirq; msg->address_hi = upper_32_bits(addr); msg->address_lo = lower_32_bits(addr); msg->data = data->hwirq; } static int imx_mu_msi_parent_set_affinity(struct irq_data *irq_data, const struct cpumask *mask, bool force) { return -EINVAL; } static struct irq_chip imx_mu_msi_parent_chip = { .name = "MU", .irq_mask = imx_mu_msi_parent_mask_irq, .irq_unmask = imx_mu_msi_parent_unmask_irq, .irq_ack = imx_mu_msi_parent_ack_irq, .irq_compose_msi_msg = imx_mu_msi_parent_compose_msg, .irq_set_affinity = imx_mu_msi_parent_set_affinity, }; static int imx_mu_msi_domain_irq_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct imx_mu_msi *msi_data = domain->host_data; unsigned long flags; int pos, err = 0; WARN_ON(nr_irqs != 1); raw_spin_lock_irqsave(&msi_data->lock, flags); pos = find_first_zero_bit(&msi_data->used, IMX_MU_CHANS); if (pos < IMX_MU_CHANS) __set_bit(pos, &msi_data->used); else err = -ENOSPC; raw_spin_unlock_irqrestore(&msi_data->lock, flags); if (err) return err; irq_domain_set_info(domain, virq, pos, &imx_mu_msi_parent_chip, msi_data, handle_edge_irq, NULL, NULL); return 0; } static void imx_mu_msi_domain_irq_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(d); unsigned long flags; raw_spin_lock_irqsave(&msi_data->lock, flags); __clear_bit(d->hwirq, &msi_data->used); raw_spin_unlock_irqrestore(&msi_data->lock, flags); } static const struct irq_domain_ops imx_mu_msi_domain_ops = { .alloc = imx_mu_msi_domain_irq_alloc, .free = imx_mu_msi_domain_irq_free, }; static void imx_mu_msi_irq_handler(struct irq_desc *desc) { struct imx_mu_msi *msi_data = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); u32 status; int i; status = imx_mu_read(msi_data, msi_data->cfg->xSR[IMX_MU_RSR]); chained_irq_enter(chip, desc); for (i = 0; i < IMX_MU_CHANS; i++) { if (status & IMX_MU_xSR_RFn(msi_data, i)) generic_handle_domain_irq(msi_data->msi_domain, i); } chained_irq_exit(chip, desc); } static int imx_mu_msi_domains_init(struct imx_mu_msi *msi_data, struct device *dev) { struct fwnode_handle *fwnodes = dev_fwnode(dev); struct irq_domain *parent; /* Initialize MSI domain parent */ parent = irq_domain_create_linear(fwnodes, IMX_MU_CHANS, &imx_mu_msi_domain_ops, msi_data); if (!parent) { dev_err(dev, "failed to create IRQ domain\n"); return -ENOMEM; } irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); msi_data->msi_domain = platform_msi_create_irq_domain(fwnodes, &imx_mu_msi_domain_info, parent); if (!msi_data->msi_domain) { dev_err(dev, "failed to create MSI domain\n"); irq_domain_remove(parent); return -ENOMEM; } irq_domain_set_pm_device(msi_data->msi_domain, dev); return 0; } /* Register offset of different version MU IP */ static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = { .type = 0, .xTR = 0x0, .xRR = 0x10, .xSR = { [IMX_MU_SR] = 0x20, [IMX_MU_GSR] = 0x20, [IMX_MU_TSR] = 0x20, [IMX_MU_RSR] = 0x20, }, .xCR = { [IMX_MU_GIER] = 0x24, [IMX_MU_GCR] = 0x24, [IMX_MU_TCR] = 0x24, [IMX_MU_RCR] = 0x24, }, }; static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = { .type = 0, .xTR = 0x20, .xRR = 0x40, .xSR = { [IMX_MU_SR] = 0x60, [IMX_MU_GSR] = 0x60, [IMX_MU_TSR] = 0x60, [IMX_MU_RSR] = 0x60, }, .xCR = { [IMX_MU_GIER] = 0x64, [IMX_MU_GCR] = 0x64, [IMX_MU_TCR] = 0x64, [IMX_MU_RCR] = 0x64, }, }; static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = { .type = IMX_MU_V2, .xTR = 0x200, .xRR = 0x280, .xSR = { [IMX_MU_SR] = 0xC, [IMX_MU_GSR] = 0x118, [IMX_MU_TSR] = 0x124, [IMX_MU_RSR] = 0x12C, }, .xCR = { [IMX_MU_GIER] = 0x110, [IMX_MU_GCR] = 0x114, [IMX_MU_TCR] = 0x120, [IMX_MU_RCR] = 0x128 }, }; static int __init imx_mu_of_init(struct device_node *dn, struct device_node *parent, const struct imx_mu_dcfg *cfg) { struct platform_device *pdev = of_find_device_by_node(dn); struct device_link *pd_link_a; struct device_link *pd_link_b; struct imx_mu_msi *msi_data; struct resource *res; struct device *pd_a; struct device *pd_b; struct device *dev; int ret; int irq; dev = &pdev->dev; msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL); if (!msi_data) return -ENOMEM; msi_data->cfg = cfg; msi_data->regs = devm_platform_ioremap_resource_byname(pdev, "processor-a-side"); if (IS_ERR(msi_data->regs)) { dev_err(&pdev->dev, "failed to initialize 'regs'\n"); return PTR_ERR(msi_data->regs); } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "processor-b-side"); if (!res) return -EIO; msi_data->msiir_addr = res->start + msi_data->cfg->xTR; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; platform_set_drvdata(pdev, msi_data); msi_data->clk = devm_clk_get(dev, NULL); if (IS_ERR(msi_data->clk)) return PTR_ERR(msi_data->clk); pd_a = dev_pm_domain_attach_by_name(dev, "processor-a-side"); if (IS_ERR(pd_a)) return PTR_ERR(pd_a); pd_b = dev_pm_domain_attach_by_name(dev, "processor-b-side"); if (IS_ERR(pd_b)) return PTR_ERR(pd_b); pd_link_a = device_link_add(dev, pd_a, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); if (!pd_link_a) { dev_err(dev, "Failed to add device_link to mu a.\n"); goto err_pd_a; } pd_link_b = device_link_add(dev, pd_b, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); if (!pd_link_b) { dev_err(dev, "Failed to add device_link to mu a.\n"); goto err_pd_b; } ret = imx_mu_msi_domains_init(msi_data, dev); if (ret) goto err_dm_init; pm_runtime_enable(dev); irq_set_chained_handler_and_data(irq, imx_mu_msi_irq_handler, msi_data); return 0; err_dm_init: device_link_remove(dev, pd_b); err_pd_b: device_link_remove(dev, pd_a); err_pd_a: return -EINVAL; } static int __maybe_unused imx_mu_runtime_suspend(struct device *dev) { struct imx_mu_msi *priv = dev_get_drvdata(dev); clk_disable_unprepare(priv->clk); return 0; } static int __maybe_unused imx_mu_runtime_resume(struct device *dev) { struct imx_mu_msi *priv = dev_get_drvdata(dev); int ret; ret = clk_prepare_enable(priv->clk); if (ret) dev_err(dev, "failed to enable clock\n"); return ret; } static const struct dev_pm_ops imx_mu_pm_ops = { SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend, imx_mu_runtime_resume, NULL) }; static int __init imx_mu_imx7ulp_of_init(struct device_node *dn, struct device_node *parent) { return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx7ulp); } static int __init imx_mu_imx6sx_of_init(struct device_node *dn, struct device_node *parent) { return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx6sx); } static int __init imx_mu_imx8ulp_of_init(struct device_node *dn, struct device_node *parent) { return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx8ulp); } IRQCHIP_PLATFORM_DRIVER_BEGIN(imx_mu_msi) IRQCHIP_MATCH("fsl,imx7ulp-mu-msi", imx_mu_imx7ulp_of_init) IRQCHIP_MATCH("fsl,imx6sx-mu-msi", imx_mu_imx6sx_of_init) IRQCHIP_MATCH("fsl,imx8ulp-mu-msi", imx_mu_imx8ulp_of_init) IRQCHIP_PLATFORM_DRIVER_END(imx_mu_msi, .pm = &imx_mu_pm_ops) MODULE_AUTHOR("Frank Li <[email protected]>"); MODULE_DESCRIPTION("Freescale MU MSI controller driver"); MODULE_LICENSE("GPL");
linux-master
drivers/irqchip/irq-imx-mu-msi.c
// SPDX-License-Identifier: GPL-2.0 // Copyright 2017 NXP /* INTMUX Block Diagram * * ________________ * interrupt source # 0 +---->| | * | | | * interrupt source # 1 +++-->| | * ... | | | channel # 0 |--------->interrupt out # 0 * ... | | | | * ... | | | | * interrupt source # X-1 +++-->|________________| * | | | * | | | * | | | ________________ * +---->| | * | | | | | * | +-->| | * | | | | channel # 1 |--------->interrupt out # 1 * | | +>| | * | | | | | * | | | |________________| * | | | * | | | * | | | ... * | | | ... * | | | * | | | ________________ * +---->| | * | | | | * +-->| | * | | channel # N |--------->interrupt out # N * +>| | * | | * |________________| * * * N: Interrupt Channel Instance Number (N=7) * X: Interrupt Source Number for each channel (X=32) * * The INTMUX interrupt multiplexer has 8 channels, each channel receives 32 * interrupt sources and generates 1 interrupt output. * */ #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/pm_runtime.h> #define CHANIER(n) (0x10 + (0x40 * n)) #define CHANIPR(n) (0x20 + (0x40 * n)) #define CHAN_MAX_NUM 0x8 struct intmux_irqchip_data { u32 saved_reg; int chanidx; int irq; struct irq_domain *domain; }; struct intmux_data { raw_spinlock_t lock; void __iomem *regs; struct clk *ipg_clk; int channum; struct intmux_irqchip_data irqchip_data[]; }; static void imx_intmux_irq_mask(struct irq_data *d) { struct intmux_irqchip_data *irqchip_data = d->chip_data; int idx = irqchip_data->chanidx; struct intmux_data *data = container_of(irqchip_data, struct intmux_data, irqchip_data[idx]); unsigned long flags; void __iomem *reg; u32 val; raw_spin_lock_irqsave(&data->lock, flags); reg = data->regs + CHANIER(idx); val = readl_relaxed(reg); /* disable the interrupt source of this channel */ val &= ~BIT(d->hwirq); writel_relaxed(val, reg); raw_spin_unlock_irqrestore(&data->lock, flags); } static void imx_intmux_irq_unmask(struct irq_data *d) { struct intmux_irqchip_data *irqchip_data = d->chip_data; int idx = irqchip_data->chanidx; struct intmux_data *data = container_of(irqchip_data, struct intmux_data, irqchip_data[idx]); unsigned long flags; void __iomem *reg; u32 val; raw_spin_lock_irqsave(&data->lock, flags); reg = data->regs + CHANIER(idx); val = readl_relaxed(reg); /* enable the interrupt source of this channel */ val |= BIT(d->hwirq); writel_relaxed(val, reg); raw_spin_unlock_irqrestore(&data->lock, flags); } static struct irq_chip imx_intmux_irq_chip __ro_after_init = { .name = "intmux", .irq_mask = imx_intmux_irq_mask, .irq_unmask = imx_intmux_irq_unmask, }; static int imx_intmux_irq_map(struct irq_domain *h, unsigned int irq, irq_hw_number_t hwirq) { struct intmux_irqchip_data *data = h->host_data; irq_set_chip_data(irq, data); irq_set_chip_and_handler(irq, &imx_intmux_irq_chip, handle_level_irq); return 0; } static int imx_intmux_irq_xlate(struct irq_domain *d, struct device_node *node, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type) { struct intmux_irqchip_data *irqchip_data = d->host_data; int idx = irqchip_data->chanidx; struct intmux_data *data = container_of(irqchip_data, struct intmux_data, irqchip_data[idx]); /* * two cells needed in interrupt specifier: * the 1st cell: hw interrupt number * the 2nd cell: channel index */ if (WARN_ON(intsize != 2)) return -EINVAL; if (WARN_ON(intspec[1] >= data->channum)) return -EINVAL; *out_hwirq = intspec[0]; *out_type = IRQ_TYPE_LEVEL_HIGH; return 0; } static int imx_intmux_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token) { struct intmux_irqchip_data *irqchip_data = d->host_data; /* Not for us */ if (fwspec->fwnode != d->fwnode) return false; return irqchip_data->chanidx == fwspec->param[1]; } static const struct irq_domain_ops imx_intmux_domain_ops = { .map = imx_intmux_irq_map, .xlate = imx_intmux_irq_xlate, .select = imx_intmux_irq_select, }; static void imx_intmux_irq_handler(struct irq_desc *desc) { struct intmux_irqchip_data *irqchip_data = irq_desc_get_handler_data(desc); int idx = irqchip_data->chanidx; struct intmux_data *data = container_of(irqchip_data, struct intmux_data, irqchip_data[idx]); unsigned long irqstat; int pos; chained_irq_enter(irq_desc_get_chip(desc), desc); /* read the interrupt source pending status of this channel */ irqstat = readl_relaxed(data->regs + CHANIPR(idx)); for_each_set_bit(pos, &irqstat, 32) generic_handle_domain_irq(irqchip_data->domain, pos); chained_irq_exit(irq_desc_get_chip(desc), desc); } static int imx_intmux_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct irq_domain *domain; struct intmux_data *data; int channum; int i, ret; channum = platform_irq_count(pdev); if (channum == -EPROBE_DEFER) { return -EPROBE_DEFER; } else if (channum > CHAN_MAX_NUM) { dev_err(&pdev->dev, "supports up to %d multiplex channels\n", CHAN_MAX_NUM); return -EINVAL; } data = devm_kzalloc(&pdev->dev, struct_size(data, irqchip_data, channum), GFP_KERNEL); if (!data) return -ENOMEM; data->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->regs)) { dev_err(&pdev->dev, "failed to initialize reg\n"); return PTR_ERR(data->regs); } data->ipg_clk = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(data->ipg_clk)) return dev_err_probe(&pdev->dev, PTR_ERR(data->ipg_clk), "failed to get ipg clk\n"); data->channum = channum; raw_spin_lock_init(&data->lock); pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); ret = clk_prepare_enable(data->ipg_clk); if (ret) { dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret); return ret; } for (i = 0; i < channum; i++) { data->irqchip_data[i].chanidx = i; data->irqchip_data[i].irq = irq_of_parse_and_map(np, i); if (data->irqchip_data[i].irq <= 0) { ret = -EINVAL; dev_err(&pdev->dev, "failed to get irq\n"); goto out; } domain = irq_domain_add_linear(np, 32, &imx_intmux_domain_ops, &data->irqchip_data[i]); if (!domain) { ret = -ENOMEM; dev_err(&pdev->dev, "failed to create IRQ domain\n"); goto out; } data->irqchip_data[i].domain = domain; irq_domain_set_pm_device(domain, &pdev->dev); /* disable all interrupt sources of this channel firstly */ writel_relaxed(0, data->regs + CHANIER(i)); irq_set_chained_handler_and_data(data->irqchip_data[i].irq, imx_intmux_irq_handler, &data->irqchip_data[i]); } platform_set_drvdata(pdev, data); /* * Let pm_runtime_put() disable clock. * If CONFIG_PM is not enabled, the clock will stay powered. */ pm_runtime_put(&pdev->dev); return 0; out: clk_disable_unprepare(data->ipg_clk); return ret; } static int imx_intmux_remove(struct platform_device *pdev) { struct intmux_data *data = platform_get_drvdata(pdev); int i; for (i = 0; i < data->channum; i++) { /* disable all interrupt sources of this channel */ writel_relaxed(0, data->regs + CHANIER(i)); irq_set_chained_handler_and_data(data->irqchip_data[i].irq, NULL, NULL); irq_domain_remove(data->irqchip_data[i].domain); } pm_runtime_disable(&pdev->dev); return 0; } #ifdef CONFIG_PM static int imx_intmux_runtime_suspend(struct device *dev) { struct intmux_data *data = dev_get_drvdata(dev); struct intmux_irqchip_data *irqchip_data; int i; for (i = 0; i < data->channum; i++) { irqchip_data = &data->irqchip_data[i]; irqchip_data->saved_reg = readl_relaxed(data->regs + CHANIER(i)); } clk_disable_unprepare(data->ipg_clk); return 0; } static int imx_intmux_runtime_resume(struct device *dev) { struct intmux_data *data = dev_get_drvdata(dev); struct intmux_irqchip_data *irqchip_data; int ret, i; ret = clk_prepare_enable(data->ipg_clk); if (ret) { dev_err(dev, "failed to enable ipg clk: %d\n", ret); return ret; } for (i = 0; i < data->channum; i++) { irqchip_data = &data->irqchip_data[i]; writel_relaxed(irqchip_data->saved_reg, data->regs + CHANIER(i)); } return 0; } #endif static const struct dev_pm_ops imx_intmux_pm_ops = { SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) SET_RUNTIME_PM_OPS(imx_intmux_runtime_suspend, imx_intmux_runtime_resume, NULL) }; static const struct of_device_id imx_intmux_id[] = { { .compatible = "fsl,imx-intmux", }, { /* sentinel */ }, }; static struct platform_driver imx_intmux_driver = { .driver = { .name = "imx-intmux", .of_match_table = imx_intmux_id, .pm = &imx_intmux_pm_ops, }, .probe = imx_intmux_probe, .remove = imx_intmux_remove, }; builtin_platform_driver(imx_intmux_driver);
linux-master
drivers/irqchip/irq-imx-intmux.c
// SPDX-License-Identifier: GPL-2.0 /* * Renesas RZ/G2L IRQC Driver * * Copyright (C) 2022 Renesas Electronics Corporation. * * Author: Lad Prabhakar <[email protected]> */ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/irqchip.h> #include <linux/irqdomain.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/spinlock.h> #define IRQC_IRQ_START 1 #define IRQC_IRQ_COUNT 8 #define IRQC_TINT_START (IRQC_IRQ_START + IRQC_IRQ_COUNT) #define IRQC_TINT_COUNT 32 #define IRQC_NUM_IRQ (IRQC_TINT_START + IRQC_TINT_COUNT) #define ISCR 0x10 #define IITSR 0x14 #define TSCR 0x20 #define TITSR0 0x24 #define TITSR1 0x28 #define TITSR0_MAX_INT 16 #define TITSEL_WIDTH 0x2 #define TSSR(n) (0x30 + ((n) * 4)) #define TIEN BIT(7) #define TSSEL_SHIFT(n) (8 * (n)) #define TSSEL_MASK GENMASK(7, 0) #define IRQ_MASK 0x3 #define TSSR_OFFSET(n) ((n) % 4) #define TSSR_INDEX(n) ((n) / 4) #define TITSR_TITSEL_EDGE_RISING 0 #define TITSR_TITSEL_EDGE_FALLING 1 #define TITSR_TITSEL_LEVEL_HIGH 2 #define TITSR_TITSEL_LEVEL_LOW 3 #define IITSR_IITSEL(n, sense) ((sense) << ((n) * 2)) #define IITSR_IITSEL_LEVEL_LOW 0 #define IITSR_IITSEL_EDGE_FALLING 1 #define IITSR_IITSEL_EDGE_RISING 2 #define IITSR_IITSEL_EDGE_BOTH 3 #define IITSR_IITSEL_MASK(n) IITSR_IITSEL((n), 3) #define TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x)) #define TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x)) struct rzg2l_irqc_priv { void __iomem *base; struct irq_fwspec fwspec[IRQC_NUM_IRQ]; raw_spinlock_t lock; }; static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data) { return data->domain->host_data; } static void rzg2l_irq_eoi(struct irq_data *d) { unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START; struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); u32 bit = BIT(hw_irq); u32 reg; reg = readl_relaxed(priv->base + ISCR); if (reg & bit) writel_relaxed(reg & ~bit, priv->base + ISCR); } static void rzg2l_tint_eoi(struct irq_data *d) { unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_TINT_START; struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); u32 bit = BIT(hw_irq); u32 reg; reg = readl_relaxed(priv->base + TSCR); if (reg & bit) writel_relaxed(reg & ~bit, priv->base + TSCR); } static void rzg2l_irqc_eoi(struct irq_data *d) { struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); unsigned int hw_irq = irqd_to_hwirq(d); raw_spin_lock(&priv->lock); if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT) rzg2l_irq_eoi(d); else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) rzg2l_tint_eoi(d); raw_spin_unlock(&priv->lock); irq_chip_eoi_parent(d); } static void rzg2l_irqc_irq_disable(struct irq_data *d) { unsigned int hw_irq = irqd_to_hwirq(d); if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) { struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); u32 offset = hw_irq - IRQC_TINT_START; u32 tssr_offset = TSSR_OFFSET(offset); u8 tssr_index = TSSR_INDEX(offset); u32 reg; raw_spin_lock(&priv->lock); reg = readl_relaxed(priv->base + TSSR(tssr_index)); reg &= ~(TSSEL_MASK << tssr_offset); writel_relaxed(reg, priv->base + TSSR(tssr_index)); raw_spin_unlock(&priv->lock); } irq_chip_disable_parent(d); } static void rzg2l_irqc_irq_enable(struct irq_data *d) { unsigned int hw_irq = irqd_to_hwirq(d); if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) { struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); unsigned long tint = (uintptr_t)d->chip_data; u32 offset = hw_irq - IRQC_TINT_START; u32 tssr_offset = TSSR_OFFSET(offset); u8 tssr_index = TSSR_INDEX(offset); u32 reg; raw_spin_lock(&priv->lock); reg = readl_relaxed(priv->base + TSSR(tssr_index)); reg |= (TIEN | tint) << TSSEL_SHIFT(tssr_offset); writel_relaxed(reg, priv->base + TSSR(tssr_index)); raw_spin_unlock(&priv->lock); } irq_chip_enable_parent(d); } static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type) { unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START; struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); u16 sense, tmp; switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_LEVEL_LOW: sense = IITSR_IITSEL_LEVEL_LOW; break; case IRQ_TYPE_EDGE_FALLING: sense = IITSR_IITSEL_EDGE_FALLING; break; case IRQ_TYPE_EDGE_RISING: sense = IITSR_IITSEL_EDGE_RISING; break; case IRQ_TYPE_EDGE_BOTH: sense = IITSR_IITSEL_EDGE_BOTH; break; default: return -EINVAL; } raw_spin_lock(&priv->lock); tmp = readl_relaxed(priv->base + IITSR); tmp &= ~IITSR_IITSEL_MASK(hw_irq); tmp |= IITSR_IITSEL(hw_irq, sense); writel_relaxed(tmp, priv->base + IITSR); raw_spin_unlock(&priv->lock); return 0; } static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type) { struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); unsigned int hwirq = irqd_to_hwirq(d); u32 titseln = hwirq - IRQC_TINT_START; u32 offset; u8 sense; u32 reg; switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_RISING: sense = TITSR_TITSEL_EDGE_RISING; break; case IRQ_TYPE_EDGE_FALLING: sense = TITSR_TITSEL_EDGE_FALLING; break; default: return -EINVAL; } offset = TITSR0; if (titseln >= TITSR0_MAX_INT) { titseln -= TITSR0_MAX_INT; offset = TITSR1; } raw_spin_lock(&priv->lock); reg = readl_relaxed(priv->base + offset); reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH)); reg |= sense << (titseln * TITSEL_WIDTH); writel_relaxed(reg, priv->base + offset); raw_spin_unlock(&priv->lock); return 0; } static int rzg2l_irqc_set_type(struct irq_data *d, unsigned int type) { unsigned int hw_irq = irqd_to_hwirq(d); int ret = -EINVAL; if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT) ret = rzg2l_irq_set_type(d, type); else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) ret = rzg2l_tint_set_edge(d, type); if (ret) return ret; return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH); } static const struct irq_chip irqc_chip = { .name = "rzg2l-irqc", .irq_eoi = rzg2l_irqc_eoi, .irq_mask = irq_chip_mask_parent, .irq_unmask = irq_chip_unmask_parent, .irq_disable = rzg2l_irqc_irq_disable, .irq_enable = rzg2l_irqc_irq_enable, .irq_get_irqchip_state = irq_chip_get_parent_state, .irq_set_irqchip_state = irq_chip_set_parent_state, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_set_type = rzg2l_irqc_set_type, .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE, }; static int rzg2l_irqc_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { struct rzg2l_irqc_priv *priv = domain->host_data; unsigned long tint = 0; irq_hw_number_t hwirq; unsigned int type; int ret; ret = irq_domain_translate_twocell(domain, arg, &hwirq, &type); if (ret) return ret; /* * For TINT interrupts ie where pinctrl driver is child of irqc domain * the hwirq and TINT are encoded in fwspec->param[0]. * hwirq for TINT range from 9-40, hwirq is embedded 0-15 bits and TINT * from 16-31 bits. TINT from the pinctrl driver needs to be programmed * in IRQC registers to enable a given gpio pin as interrupt. */ if (hwirq > IRQC_IRQ_COUNT) { tint = TINT_EXTRACT_GPIOINT(hwirq); hwirq = TINT_EXTRACT_HWIRQ(hwirq); if (hwirq < IRQC_TINT_START) return -EINVAL; } if (hwirq > (IRQC_NUM_IRQ - 1)) return -EINVAL; ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &irqc_chip, (void *)(uintptr_t)tint); if (ret) return ret; return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &priv->fwspec[hwirq]); } static const struct irq_domain_ops rzg2l_irqc_domain_ops = { .alloc = rzg2l_irqc_alloc, .free = irq_domain_free_irqs_common, .translate = irq_domain_translate_twocell, }; static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv, struct device_node *np) { struct of_phandle_args map; unsigned int i; int ret; for (i = 0; i < IRQC_NUM_IRQ; i++) { ret = of_irq_parse_one(np, i, &map); if (ret) return ret; of_phandle_args_to_fwspec(np, map.args, map.args_count, &priv->fwspec[i]); } return 0; } static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent) { struct irq_domain *irq_domain, *parent_domain; struct platform_device *pdev; struct reset_control *resetn; struct rzg2l_irqc_priv *priv; int ret; pdev = of_find_device_by_node(node); if (!pdev) return -ENODEV; parent_domain = irq_find_host(parent); if (!parent_domain) { dev_err(&pdev->dev, "cannot find parent domain\n"); return -ENODEV; } priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); ret = rzg2l_irqc_parse_interrupts(priv, node); if (ret) { dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret); return ret; } resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(resetn)) return PTR_ERR(resetn); ret = reset_control_deassert(resetn); if (ret) { dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret); return ret; } pm_runtime_enable(&pdev->dev); ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret); goto pm_disable; } raw_spin_lock_init(&priv->lock); irq_domain = irq_domain_add_hierarchy(parent_domain, 0, IRQC_NUM_IRQ, node, &rzg2l_irqc_domain_ops, priv); if (!irq_domain) { dev_err(&pdev->dev, "failed to add irq domain\n"); ret = -ENOMEM; goto pm_put; } return 0; pm_put: pm_runtime_put(&pdev->dev); pm_disable: pm_runtime_disable(&pdev->dev); reset_control_assert(resetn); return ret; } IRQCHIP_PLATFORM_DRIVER_BEGIN(rzg2l_irqc) IRQCHIP_MATCH("renesas,rzg2l-irqc", rzg2l_irqc_init) IRQCHIP_PLATFORM_DRIVER_END(rzg2l_irqc) MODULE_AUTHOR("Lad Prabhakar <[email protected]>"); MODULE_DESCRIPTION("Renesas RZ/G2L IRQC Driver");
linux-master
drivers/irqchip/irq-renesas-rzg2l.c
// SPDX-License-Identifier: GPL-2.0 /* * Renesas INTC External IRQ Pin Driver * * Copyright (C) 2013 Magnus Damm */ #include <linux/init.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/pm_runtime.h> #define INTC_IRQPIN_MAX 8 /* maximum 8 interrupts per driver instance */ #define INTC_IRQPIN_REG_SENSE 0 /* ICRn */ #define INTC_IRQPIN_REG_PRIO 1 /* INTPRInn */ #define INTC_IRQPIN_REG_SOURCE 2 /* INTREQnn */ #define INTC_IRQPIN_REG_MASK 3 /* INTMSKnn */ #define INTC_IRQPIN_REG_CLEAR 4 /* INTMSKCLRnn */ #define INTC_IRQPIN_REG_NR_MANDATORY 5 #define INTC_IRQPIN_REG_IRLM 5 /* ICR0 with IRLM bit (optional) */ #define INTC_IRQPIN_REG_NR 6 /* INTC external IRQ PIN hardware register access: * * SENSE is read-write 32-bit with 2-bits or 4-bits per IRQ (*) * PRIO is read-write 32-bit with 4-bits per IRQ (**) * SOURCE is read-only 32-bit or 8-bit with 1-bit per IRQ (***) * MASK is write-only 32-bit or 8-bit with 1-bit per IRQ (***) * CLEAR is write-only 32-bit or 8-bit with 1-bit per IRQ (***) * * (*) May be accessed by more than one driver instance - lock needed * (**) Read-modify-write access by one driver instance - lock needed * (***) Accessed by one driver instance only - no locking needed */ struct intc_irqpin_iomem { void __iomem *iomem; unsigned long (*read)(void __iomem *iomem); void (*write)(void __iomem *iomem, unsigned long data); int width; }; struct intc_irqpin_irq { int hw_irq; int requested_irq; int domain_irq; struct intc_irqpin_priv *p; }; struct intc_irqpin_priv { struct intc_irqpin_iomem iomem[INTC_IRQPIN_REG_NR]; struct intc_irqpin_irq irq[INTC_IRQPIN_MAX]; unsigned int sense_bitfield_width; struct platform_device *pdev; struct irq_chip irq_chip; struct irq_domain *irq_domain; atomic_t wakeup_path; unsigned shared_irqs:1; u8 shared_irq_mask; }; struct intc_irqpin_config { int irlm_bit; /* -1 if non-existent */ }; static unsigned long intc_irqpin_read32(void __iomem *iomem) { return ioread32(iomem); } static unsigned long intc_irqpin_read8(void __iomem *iomem) { return ioread8(iomem); } static void intc_irqpin_write32(void __iomem *iomem, unsigned long data) { iowrite32(data, iomem); } static void intc_irqpin_write8(void __iomem *iomem, unsigned long data) { iowrite8(data, iomem); } static inline unsigned long intc_irqpin_read(struct intc_irqpin_priv *p, int reg) { struct intc_irqpin_iomem *i = &p->iomem[reg]; return i->read(i->iomem); } static inline void intc_irqpin_write(struct intc_irqpin_priv *p, int reg, unsigned long data) { struct intc_irqpin_iomem *i = &p->iomem[reg]; i->write(i->iomem, data); } static inline unsigned long intc_irqpin_hwirq_mask(struct intc_irqpin_priv *p, int reg, int hw_irq) { return BIT((p->iomem[reg].width - 1) - hw_irq); } static inline void intc_irqpin_irq_write_hwirq(struct intc_irqpin_priv *p, int reg, int hw_irq) { intc_irqpin_write(p, reg, intc_irqpin_hwirq_mask(p, reg, hw_irq)); } static DEFINE_RAW_SPINLOCK(intc_irqpin_lock); /* only used by slow path */ static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p, int reg, int shift, int width, int value) { unsigned long flags; unsigned long tmp; raw_spin_lock_irqsave(&intc_irqpin_lock, flags); tmp = intc_irqpin_read(p, reg); tmp &= ~(((1 << width) - 1) << shift); tmp |= value << shift; intc_irqpin_write(p, reg, tmp); raw_spin_unlock_irqrestore(&intc_irqpin_lock, flags); } static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p, int irq, int do_mask) { /* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */ int bitfield_width = 4; int shift = 32 - (irq + 1) * bitfield_width; intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO, shift, bitfield_width, do_mask ? 0 : (1 << bitfield_width) - 1); } static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value) { /* The SENSE register is assumed to be 32-bit. */ int bitfield_width = p->sense_bitfield_width; int shift = 32 - (irq + 1) * bitfield_width; dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value); if (value >= (1 << bitfield_width)) return -EINVAL; intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_SENSE, shift, bitfield_width, value); return 0; } static void intc_irqpin_dbg(struct intc_irqpin_irq *i, char *str) { dev_dbg(&i->p->pdev->dev, "%s (%d:%d:%d)\n", str, i->requested_irq, i->hw_irq, i->domain_irq); } static void intc_irqpin_irq_enable(struct irq_data *d) { struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); int hw_irq = irqd_to_hwirq(d); intc_irqpin_dbg(&p->irq[hw_irq], "enable"); intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_CLEAR, hw_irq); } static void intc_irqpin_irq_disable(struct irq_data *d) { struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); int hw_irq = irqd_to_hwirq(d); intc_irqpin_dbg(&p->irq[hw_irq], "disable"); intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq); } static void intc_irqpin_shared_irq_enable(struct irq_data *d) { struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); int hw_irq = irqd_to_hwirq(d); intc_irqpin_dbg(&p->irq[hw_irq], "shared enable"); intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_CLEAR, hw_irq); p->shared_irq_mask &= ~BIT(hw_irq); } static void intc_irqpin_shared_irq_disable(struct irq_data *d) { struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); int hw_irq = irqd_to_hwirq(d); intc_irqpin_dbg(&p->irq[hw_irq], "shared disable"); intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq); p->shared_irq_mask |= BIT(hw_irq); } static void intc_irqpin_irq_enable_force(struct irq_data *d) { struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); int irq = p->irq[irqd_to_hwirq(d)].requested_irq; intc_irqpin_irq_enable(d); /* enable interrupt through parent interrupt controller, * assumes non-shared interrupt with 1:1 mapping * needed for busted IRQs on some SoCs like sh73a0 */ irq_get_chip(irq)->irq_unmask(irq_get_irq_data(irq)); } static void intc_irqpin_irq_disable_force(struct irq_data *d) { struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); int irq = p->irq[irqd_to_hwirq(d)].requested_irq; /* disable interrupt through parent interrupt controller, * assumes non-shared interrupt with 1:1 mapping * needed for busted IRQs on some SoCs like sh73a0 */ irq_get_chip(irq)->irq_mask(irq_get_irq_data(irq)); intc_irqpin_irq_disable(d); } #define INTC_IRQ_SENSE_VALID 0x10 #define INTC_IRQ_SENSE(x) (x + INTC_IRQ_SENSE_VALID) static unsigned char intc_irqpin_sense[IRQ_TYPE_SENSE_MASK + 1] = { [IRQ_TYPE_EDGE_FALLING] = INTC_IRQ_SENSE(0x00), [IRQ_TYPE_EDGE_RISING] = INTC_IRQ_SENSE(0x01), [IRQ_TYPE_LEVEL_LOW] = INTC_IRQ_SENSE(0x02), [IRQ_TYPE_LEVEL_HIGH] = INTC_IRQ_SENSE(0x03), [IRQ_TYPE_EDGE_BOTH] = INTC_IRQ_SENSE(0x04), }; static int intc_irqpin_irq_set_type(struct irq_data *d, unsigned int type) { unsigned char value = intc_irqpin_sense[type & IRQ_TYPE_SENSE_MASK]; struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); if (!(value & INTC_IRQ_SENSE_VALID)) return -EINVAL; return intc_irqpin_set_sense(p, irqd_to_hwirq(d), value ^ INTC_IRQ_SENSE_VALID); } static int intc_irqpin_irq_set_wake(struct irq_data *d, unsigned int on) { struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); int hw_irq = irqd_to_hwirq(d); irq_set_irq_wake(p->irq[hw_irq].requested_irq, on); if (on) atomic_inc(&p->wakeup_path); else atomic_dec(&p->wakeup_path); return 0; } static irqreturn_t intc_irqpin_irq_handler(int irq, void *dev_id) { struct intc_irqpin_irq *i = dev_id; struct intc_irqpin_priv *p = i->p; unsigned long bit; intc_irqpin_dbg(i, "demux1"); bit = intc_irqpin_hwirq_mask(p, INTC_IRQPIN_REG_SOURCE, i->hw_irq); if (intc_irqpin_read(p, INTC_IRQPIN_REG_SOURCE) & bit) { intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, ~bit); intc_irqpin_dbg(i, "demux2"); generic_handle_irq(i->domain_irq); return IRQ_HANDLED; } return IRQ_NONE; } static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id) { struct intc_irqpin_priv *p = dev_id; unsigned int reg_source = intc_irqpin_read(p, INTC_IRQPIN_REG_SOURCE); irqreturn_t status = IRQ_NONE; int k; for (k = 0; k < 8; k++) { if (reg_source & BIT(7 - k)) { if (BIT(k) & p->shared_irq_mask) continue; status |= intc_irqpin_irq_handler(irq, &p->irq[k]); } } return status; } /* * This lock class tells lockdep that INTC External IRQ Pin irqs are in a * different category than their parents, so it won't report false recursion. */ static struct lock_class_key intc_irqpin_irq_lock_class; /* And this is for the request mutex */ static struct lock_class_key intc_irqpin_irq_request_class; static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct intc_irqpin_priv *p = h->host_data; p->irq[hw].domain_irq = virq; p->irq[hw].hw_irq = hw; intc_irqpin_dbg(&p->irq[hw], "map"); irq_set_chip_data(virq, h->host_data); irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class, &intc_irqpin_irq_request_class); irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq); return 0; } static const struct irq_domain_ops intc_irqpin_irq_domain_ops = { .map = intc_irqpin_irq_domain_map, .xlate = irq_domain_xlate_twocell, }; static const struct intc_irqpin_config intc_irqpin_irlm_r8a777x = { .irlm_bit = 23, /* ICR0.IRLM0 */ }; static const struct intc_irqpin_config intc_irqpin_rmobile = { .irlm_bit = -1, }; static const struct of_device_id intc_irqpin_dt_ids[] = { { .compatible = "renesas,intc-irqpin", }, { .compatible = "renesas,intc-irqpin-r8a7778", .data = &intc_irqpin_irlm_r8a777x }, { .compatible = "renesas,intc-irqpin-r8a7779", .data = &intc_irqpin_irlm_r8a777x }, { .compatible = "renesas,intc-irqpin-r8a7740", .data = &intc_irqpin_rmobile }, { .compatible = "renesas,intc-irqpin-sh73a0", .data = &intc_irqpin_rmobile }, {}, }; MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids); static int intc_irqpin_probe(struct platform_device *pdev) { const struct intc_irqpin_config *config; struct device *dev = &pdev->dev; struct intc_irqpin_priv *p; struct intc_irqpin_iomem *i; struct resource *io[INTC_IRQPIN_REG_NR]; struct irq_chip *irq_chip; void (*enable_fn)(struct irq_data *d); void (*disable_fn)(struct irq_data *d); const char *name = dev_name(dev); bool control_parent; unsigned int nirqs; int ref_irq; int ret; int k; p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; /* deal with driver instance configuration */ of_property_read_u32(dev->of_node, "sense-bitfield-width", &p->sense_bitfield_width); control_parent = of_property_read_bool(dev->of_node, "control-parent"); if (!p->sense_bitfield_width) p->sense_bitfield_width = 4; /* default to 4 bits */ p->pdev = pdev; platform_set_drvdata(pdev, p); config = of_device_get_match_data(dev); pm_runtime_enable(dev); pm_runtime_get_sync(dev); /* get hold of register banks */ memset(io, 0, sizeof(io)); for (k = 0; k < INTC_IRQPIN_REG_NR; k++) { io[k] = platform_get_resource(pdev, IORESOURCE_MEM, k); if (!io[k] && k < INTC_IRQPIN_REG_NR_MANDATORY) { dev_err(dev, "not enough IOMEM resources\n"); ret = -EINVAL; goto err0; } } /* allow any number of IRQs between 1 and INTC_IRQPIN_MAX */ for (k = 0; k < INTC_IRQPIN_MAX; k++) { ret = platform_get_irq_optional(pdev, k); if (ret == -ENXIO) break; if (ret < 0) goto err0; p->irq[k].p = p; p->irq[k].requested_irq = ret; } nirqs = k; if (nirqs < 1) { dev_err(dev, "not enough IRQ resources\n"); ret = -EINVAL; goto err0; } /* ioremap IOMEM and setup read/write callbacks */ for (k = 0; k < INTC_IRQPIN_REG_NR; k++) { i = &p->iomem[k]; /* handle optional registers */ if (!io[k]) continue; switch (resource_size(io[k])) { case 1: i->width = 8; i->read = intc_irqpin_read8; i->write = intc_irqpin_write8; break; case 4: i->width = 32; i->read = intc_irqpin_read32; i->write = intc_irqpin_write32; break; default: dev_err(dev, "IOMEM size mismatch\n"); ret = -EINVAL; goto err0; } i->iomem = devm_ioremap(dev, io[k]->start, resource_size(io[k])); if (!i->iomem) { dev_err(dev, "failed to remap IOMEM\n"); ret = -ENXIO; goto err0; } } /* configure "individual IRQ mode" where needed */ if (config && config->irlm_bit >= 0) { if (io[INTC_IRQPIN_REG_IRLM]) intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_IRLM, config->irlm_bit, 1, 1); else dev_warn(dev, "unable to select IRLM mode\n"); } /* mask all interrupts using priority */ for (k = 0; k < nirqs; k++) intc_irqpin_mask_unmask_prio(p, k, 1); /* clear all pending interrupts */ intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, 0x0); /* scan for shared interrupt lines */ ref_irq = p->irq[0].requested_irq; p->shared_irqs = 1; for (k = 1; k < nirqs; k++) { if (ref_irq != p->irq[k].requested_irq) { p->shared_irqs = 0; break; } } /* use more severe masking method if requested */ if (control_parent) { enable_fn = intc_irqpin_irq_enable_force; disable_fn = intc_irqpin_irq_disable_force; } else if (!p->shared_irqs) { enable_fn = intc_irqpin_irq_enable; disable_fn = intc_irqpin_irq_disable; } else { enable_fn = intc_irqpin_shared_irq_enable; disable_fn = intc_irqpin_shared_irq_disable; } irq_chip = &p->irq_chip; irq_chip->name = "intc-irqpin"; irq_chip->irq_mask = disable_fn; irq_chip->irq_unmask = enable_fn; irq_chip->irq_set_type = intc_irqpin_irq_set_type; irq_chip->irq_set_wake = intc_irqpin_irq_set_wake; irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND; p->irq_domain = irq_domain_add_simple(dev->of_node, nirqs, 0, &intc_irqpin_irq_domain_ops, p); if (!p->irq_domain) { ret = -ENXIO; dev_err(dev, "cannot initialize irq domain\n"); goto err0; } irq_domain_set_pm_device(p->irq_domain, dev); if (p->shared_irqs) { /* request one shared interrupt */ if (devm_request_irq(dev, p->irq[0].requested_irq, intc_irqpin_shared_irq_handler, IRQF_SHARED, name, p)) { dev_err(dev, "failed to request low IRQ\n"); ret = -ENOENT; goto err1; } } else { /* request interrupts one by one */ for (k = 0; k < nirqs; k++) { if (devm_request_irq(dev, p->irq[k].requested_irq, intc_irqpin_irq_handler, 0, name, &p->irq[k])) { dev_err(dev, "failed to request low IRQ\n"); ret = -ENOENT; goto err1; } } } /* unmask all interrupts on prio level */ for (k = 0; k < nirqs; k++) intc_irqpin_mask_unmask_prio(p, k, 0); dev_info(dev, "driving %d irqs\n", nirqs); return 0; err1: irq_domain_remove(p->irq_domain); err0: pm_runtime_put(dev); pm_runtime_disable(dev); return ret; } static int intc_irqpin_remove(struct platform_device *pdev) { struct intc_irqpin_priv *p = platform_get_drvdata(pdev); irq_domain_remove(p->irq_domain); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } static int __maybe_unused intc_irqpin_suspend(struct device *dev) { struct intc_irqpin_priv *p = dev_get_drvdata(dev); if (atomic_read(&p->wakeup_path)) device_set_wakeup_path(dev); return 0; } static SIMPLE_DEV_PM_OPS(intc_irqpin_pm_ops, intc_irqpin_suspend, NULL); static struct platform_driver intc_irqpin_device_driver = { .probe = intc_irqpin_probe, .remove = intc_irqpin_remove, .driver = { .name = "renesas_intc_irqpin", .of_match_table = intc_irqpin_dt_ids, .pm = &intc_irqpin_pm_ops, } }; static int __init intc_irqpin_init(void) { return platform_driver_register(&intc_irqpin_device_driver); } postcore_initcall(intc_irqpin_init); static void __exit intc_irqpin_exit(void) { platform_driver_unregister(&intc_irqpin_device_driver); } module_exit(intc_irqpin_exit); MODULE_AUTHOR("Magnus Damm"); MODULE_DESCRIPTION("Renesas INTC External IRQ Pin Driver");
linux-master
drivers/irqchip/irq-renesas-intc-irqpin.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2015 Endless Mobile, Inc. * Author: Carlo Caione <[email protected]> * Copyright (c) 2016 BayLibre, SAS. * Author: Jerome Brunet <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/io.h> #include <linux/module.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/irqchip.h> #include <linux/of.h> #include <linux/of_address.h> #define MAX_NUM_CHANNEL 64 #define MAX_INPUT_MUX 256 #define REG_EDGE_POL 0x00 #define REG_PIN_03_SEL 0x04 #define REG_PIN_47_SEL 0x08 #define REG_FILTER_SEL 0x0c /* use for A1 like chips */ #define REG_PIN_A1_SEL 0x04 /* Used for s4 chips */ #define REG_EDGE_POL_S4 0x1c /* * Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by * bits 24 to 31. Tests on the actual HW show that these bits are * stuck at 0. Bits 8 to 15 are responsive and have the expected * effect. */ #define REG_EDGE_POL_EDGE(params, x) BIT((params)->edge_single_offset + (x)) #define REG_EDGE_POL_LOW(params, x) BIT((params)->pol_low_offset + (x)) #define REG_BOTH_EDGE(params, x) BIT((params)->edge_both_offset + (x)) #define REG_EDGE_POL_MASK(params, x) ( \ REG_EDGE_POL_EDGE(params, x) | \ REG_EDGE_POL_LOW(params, x) | \ REG_BOTH_EDGE(params, x)) #define REG_PIN_SEL_SHIFT(x) (((x) % 4) * 8) #define REG_FILTER_SEL_SHIFT(x) ((x) * 4) struct meson_gpio_irq_controller; static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, unsigned int channel, unsigned long hwirq); static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl); static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, unsigned int channel, unsigned long hwirq); static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl); static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl, unsigned int type, u32 *channel_hwirq); static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl, unsigned int type, u32 *channel_hwirq); struct irq_ctl_ops { void (*gpio_irq_sel_pin)(struct meson_gpio_irq_controller *ctl, unsigned int channel, unsigned long hwirq); void (*gpio_irq_init)(struct meson_gpio_irq_controller *ctl); int (*gpio_irq_set_type)(struct meson_gpio_irq_controller *ctl, unsigned int type, u32 *channel_hwirq); }; struct meson_gpio_irq_params { unsigned int nr_hwirq; unsigned int nr_channels; bool support_edge_both; unsigned int edge_both_offset; unsigned int edge_single_offset; unsigned int pol_low_offset; unsigned int pin_sel_mask; struct irq_ctl_ops ops; }; #define INIT_MESON_COMMON(irqs, init, sel, type) \ .nr_hwirq = irqs, \ .ops = { \ .gpio_irq_init = init, \ .gpio_irq_sel_pin = sel, \ .gpio_irq_set_type = type, \ }, #define INIT_MESON8_COMMON_DATA(irqs) \ INIT_MESON_COMMON(irqs, meson_gpio_irq_init_dummy, \ meson8_gpio_irq_sel_pin, \ meson8_gpio_irq_set_type) \ .edge_single_offset = 0, \ .pol_low_offset = 16, \ .pin_sel_mask = 0xff, \ .nr_channels = 8, \ #define INIT_MESON_A1_COMMON_DATA(irqs) \ INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \ meson_a1_gpio_irq_sel_pin, \ meson8_gpio_irq_set_type) \ .support_edge_both = true, \ .edge_both_offset = 16, \ .edge_single_offset = 8, \ .pol_low_offset = 0, \ .pin_sel_mask = 0x7f, \ .nr_channels = 8, \ #define INIT_MESON_S4_COMMON_DATA(irqs) \ INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \ meson_a1_gpio_irq_sel_pin, \ meson_s4_gpio_irq_set_type) \ .support_edge_both = true, \ .edge_both_offset = 0, \ .edge_single_offset = 12, \ .pol_low_offset = 0, \ .pin_sel_mask = 0xff, \ .nr_channels = 12, \ static const struct meson_gpio_irq_params meson8_params = { INIT_MESON8_COMMON_DATA(134) }; static const struct meson_gpio_irq_params meson8b_params = { INIT_MESON8_COMMON_DATA(119) }; static const struct meson_gpio_irq_params gxbb_params = { INIT_MESON8_COMMON_DATA(133) }; static const struct meson_gpio_irq_params gxl_params = { INIT_MESON8_COMMON_DATA(110) }; static const struct meson_gpio_irq_params axg_params = { INIT_MESON8_COMMON_DATA(100) }; static const struct meson_gpio_irq_params sm1_params = { INIT_MESON8_COMMON_DATA(100) .support_edge_both = true, .edge_both_offset = 8, }; static const struct meson_gpio_irq_params a1_params = { INIT_MESON_A1_COMMON_DATA(62) }; static const struct meson_gpio_irq_params s4_params = { INIT_MESON_S4_COMMON_DATA(82) }; static const struct meson_gpio_irq_params c3_params = { INIT_MESON_S4_COMMON_DATA(55) }; static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = { { .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params }, { .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params }, { .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params }, { .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params }, { .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params }, { .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params }, { .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params }, { .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params }, { .compatible = "amlogic,meson-s4-gpio-intc", .data = &s4_params }, { .compatible = "amlogic,c3-gpio-intc", .data = &c3_params }, { } }; struct meson_gpio_irq_controller { const struct meson_gpio_irq_params *params; void __iomem *base; u32 channel_irqs[MAX_NUM_CHANNEL]; DECLARE_BITMAP(channel_map, MAX_NUM_CHANNEL); spinlock_t lock; }; static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl, unsigned int reg, u32 mask, u32 val) { unsigned long flags; u32 tmp; spin_lock_irqsave(&ctl->lock, flags); tmp = readl_relaxed(ctl->base + reg); tmp &= ~mask; tmp |= val; writel_relaxed(tmp, ctl->base + reg); spin_unlock_irqrestore(&ctl->lock, flags); } static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl) { } static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, unsigned int channel, unsigned long hwirq) { unsigned int reg_offset; unsigned int bit_offset; reg_offset = (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL; bit_offset = REG_PIN_SEL_SHIFT(channel); meson_gpio_irq_update_bits(ctl, reg_offset, ctl->params->pin_sel_mask << bit_offset, hwirq << bit_offset); } static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, unsigned int channel, unsigned long hwirq) { unsigned int reg_offset; unsigned int bit_offset; bit_offset = ((channel % 2) == 0) ? 0 : 16; reg_offset = REG_PIN_A1_SEL + ((channel / 2) << 2); meson_gpio_irq_update_bits(ctl, reg_offset, ctl->params->pin_sel_mask << bit_offset, hwirq << bit_offset); } /* For a1 or later chips like a1 there is a switch to enable/disable irq */ static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl) { meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, BIT(31), BIT(31)); } static int meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, unsigned long hwirq, u32 **channel_hwirq) { unsigned long flags; unsigned int idx; spin_lock_irqsave(&ctl->lock, flags); /* Find a free channel */ idx = find_first_zero_bit(ctl->channel_map, ctl->params->nr_channels); if (idx >= ctl->params->nr_channels) { spin_unlock_irqrestore(&ctl->lock, flags); pr_err("No channel available\n"); return -ENOSPC; } /* Mark the channel as used */ set_bit(idx, ctl->channel_map); spin_unlock_irqrestore(&ctl->lock, flags); /* * Setup the mux of the channel to route the signal of the pad * to the appropriate input of the GIC */ ctl->params->ops.gpio_irq_sel_pin(ctl, idx, hwirq); /* * Get the hwirq number assigned to this channel through * a pointer the channel_irq table. The added benefit of this * method is that we can also retrieve the channel index with * it, using the table base. */ *channel_hwirq = &(ctl->channel_irqs[idx]); pr_debug("hwirq %lu assigned to channel %d - irq %u\n", hwirq, idx, **channel_hwirq); return 0; } static unsigned int meson_gpio_irq_get_channel_idx(struct meson_gpio_irq_controller *ctl, u32 *channel_hwirq) { return channel_hwirq - ctl->channel_irqs; } static void meson_gpio_irq_release_channel(struct meson_gpio_irq_controller *ctl, u32 *channel_hwirq) { unsigned int idx; idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq); clear_bit(idx, ctl->channel_map); } static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl, unsigned int type, u32 *channel_hwirq) { u32 val = 0; unsigned int idx; const struct meson_gpio_irq_params *params; params = ctl->params; idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq); /* * The controller has a filter block to operate in either LEVEL or * EDGE mode, then signal is sent to the GIC. To enable LEVEL_LOW and * EDGE_FALLING support (which the GIC does not support), the filter * block is also able to invert the input signal it gets before * providing it to the GIC. */ type &= IRQ_TYPE_SENSE_MASK; /* * New controller support EDGE_BOTH trigger. This setting takes * precedence over the other edge/polarity settings */ if (type == IRQ_TYPE_EDGE_BOTH) { if (!params->support_edge_both) return -EINVAL; val |= REG_BOTH_EDGE(params, idx); } else { if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) val |= REG_EDGE_POL_EDGE(params, idx); if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) val |= REG_EDGE_POL_LOW(params, idx); } meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, REG_EDGE_POL_MASK(params, idx), val); return 0; } /* * gpio irq relative registers for s4 * -PADCTRL_GPIO_IRQ_CTRL0 * bit[31]: enable/disable all the irq lines * bit[12-23]: single edge trigger * bit[0-11]: polarity trigger * * -PADCTRL_GPIO_IRQ_CTRL[X] * bit[0-16]: 7 bits to choose gpio source for irq line 2*[X] - 2 * bit[16-22]:7 bits to choose gpio source for irq line 2*[X] - 1 * where X = 1-6 * * -PADCTRL_GPIO_IRQ_CTRL[7] * bit[0-11]: both edge trigger */ static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl, unsigned int type, u32 *channel_hwirq) { u32 val = 0; unsigned int idx; idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq); type &= IRQ_TYPE_SENSE_MASK; meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4, BIT(idx), 0); if (type == IRQ_TYPE_EDGE_BOTH) { val |= BIT(ctl->params->edge_both_offset + idx); meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4, BIT(ctl->params->edge_both_offset + idx), val); return 0; } if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) val |= BIT(ctl->params->pol_low_offset + idx); if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) val |= BIT(ctl->params->edge_single_offset + idx); meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, BIT(idx) | BIT(12 + idx), val); return 0; }; static unsigned int meson_gpio_irq_type_output(unsigned int type) { unsigned int sense = type & IRQ_TYPE_SENSE_MASK; type &= ~IRQ_TYPE_SENSE_MASK; /* * The polarity of the signal provided to the GIC should always * be high. */ if (sense & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) type |= IRQ_TYPE_LEVEL_HIGH; else type |= IRQ_TYPE_EDGE_RISING; return type; } static int meson_gpio_irq_set_type(struct irq_data *data, unsigned int type) { struct meson_gpio_irq_controller *ctl = data->domain->host_data; u32 *channel_hwirq = irq_data_get_irq_chip_data(data); int ret; ret = ctl->params->ops.gpio_irq_set_type(ctl, type, channel_hwirq); if (ret) return ret; return irq_chip_set_type_parent(data, meson_gpio_irq_type_output(type)); } static struct irq_chip meson_gpio_irq_chip = { .name = "meson-gpio-irqchip", .irq_mask = irq_chip_mask_parent, .irq_unmask = irq_chip_unmask_parent, .irq_eoi = irq_chip_eoi_parent, .irq_set_type = meson_gpio_irq_set_type, .irq_retrigger = irq_chip_retrigger_hierarchy, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, #endif .flags = IRQCHIP_SET_TYPE_MASKED, }; static int meson_gpio_irq_domain_translate(struct irq_domain *domain, struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type) { if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) { *hwirq = fwspec->param[0]; *type = fwspec->param[1]; return 0; } return -EINVAL; } static int meson_gpio_irq_allocate_gic_irq(struct irq_domain *domain, unsigned int virq, u32 hwirq, unsigned int type) { struct irq_fwspec fwspec; fwspec.fwnode = domain->parent->fwnode; fwspec.param_count = 3; fwspec.param[0] = 0; /* SPI */ fwspec.param[1] = hwirq; fwspec.param[2] = meson_gpio_irq_type_output(type); return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); } static int meson_gpio_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *data) { struct irq_fwspec *fwspec = data; struct meson_gpio_irq_controller *ctl = domain->host_data; unsigned long hwirq; u32 *channel_hwirq; unsigned int type; int ret; if (WARN_ON(nr_irqs != 1)) return -EINVAL; ret = meson_gpio_irq_domain_translate(domain, fwspec, &hwirq, &type); if (ret) return ret; ret = meson_gpio_irq_request_channel(ctl, hwirq, &channel_hwirq); if (ret) return ret; ret = meson_gpio_irq_allocate_gic_irq(domain, virq, *channel_hwirq, type); if (ret < 0) { pr_err("failed to allocate gic irq %u\n", *channel_hwirq); meson_gpio_irq_release_channel(ctl, channel_hwirq); return ret; } irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &meson_gpio_irq_chip, channel_hwirq); return 0; } static void meson_gpio_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct meson_gpio_irq_controller *ctl = domain->host_data; struct irq_data *irq_data; u32 *channel_hwirq; if (WARN_ON(nr_irqs != 1)) return; irq_domain_free_irqs_parent(domain, virq, 1); irq_data = irq_domain_get_irq_data(domain, virq); channel_hwirq = irq_data_get_irq_chip_data(irq_data); meson_gpio_irq_release_channel(ctl, channel_hwirq); } static const struct irq_domain_ops meson_gpio_irq_domain_ops = { .alloc = meson_gpio_irq_domain_alloc, .free = meson_gpio_irq_domain_free, .translate = meson_gpio_irq_domain_translate, }; static int meson_gpio_irq_parse_dt(struct device_node *node, struct meson_gpio_irq_controller *ctl) { const struct of_device_id *match; int ret; match = of_match_node(meson_irq_gpio_matches, node); if (!match) return -ENODEV; ctl->params = match->data; ret = of_property_read_variable_u32_array(node, "amlogic,channel-interrupts", ctl->channel_irqs, ctl->params->nr_channels, ctl->params->nr_channels); if (ret < 0) { pr_err("can't get %d channel interrupts\n", ctl->params->nr_channels); return ret; } ctl->params->ops.gpio_irq_init(ctl); return 0; } static int meson_gpio_irq_of_init(struct device_node *node, struct device_node *parent) { struct irq_domain *domain, *parent_domain; struct meson_gpio_irq_controller *ctl; int ret; if (!parent) { pr_err("missing parent interrupt node\n"); return -ENODEV; } parent_domain = irq_find_host(parent); if (!parent_domain) { pr_err("unable to obtain parent domain\n"); return -ENXIO; } ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); if (!ctl) return -ENOMEM; spin_lock_init(&ctl->lock); ctl->base = of_iomap(node, 0); if (!ctl->base) { ret = -ENOMEM; goto free_ctl; } ret = meson_gpio_irq_parse_dt(node, ctl); if (ret) goto free_channel_irqs; domain = irq_domain_create_hierarchy(parent_domain, 0, ctl->params->nr_hwirq, of_node_to_fwnode(node), &meson_gpio_irq_domain_ops, ctl); if (!domain) { pr_err("failed to add domain\n"); ret = -ENODEV; goto free_channel_irqs; } pr_info("%d to %d gpio interrupt mux initialized\n", ctl->params->nr_hwirq, ctl->params->nr_channels); return 0; free_channel_irqs: iounmap(ctl->base); free_ctl: kfree(ctl); return ret; } IRQCHIP_PLATFORM_DRIVER_BEGIN(meson_gpio_intc) IRQCHIP_MATCH("amlogic,meson-gpio-intc", meson_gpio_irq_of_init) IRQCHIP_PLATFORM_DRIVER_END(meson_gpio_intc) MODULE_AUTHOR("Jerome Brunet <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:meson-gpio-intc");
linux-master
drivers/irqchip/irq-meson-gpio.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2020, Jiaxun Yang <[email protected]> * Loongson HTPIC IRQ support */ #include <linux/init.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/irqchip.h> #include <linux/irqchip/chained_irq.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/syscore_ops.h> #include <asm/i8259.h> #define HTPIC_MAX_PARENT_IRQ 4 #define HTINT_NUM_VECTORS 8 #define HTINT_EN_OFF 0x20 struct loongson_htpic { void __iomem *base; struct irq_domain *domain; }; static struct loongson_htpic *htpic; static void htpic_irq_dispatch(struct irq_desc *desc) { struct loongson_htpic *priv = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); uint32_t pending; chained_irq_enter(chip, desc); pending = readl(priv->base); /* Ack all IRQs at once, otherwise IRQ flood might happen */ writel(pending, priv->base); if (!pending) spurious_interrupt(); while (pending) { int bit = __ffs(pending); if (unlikely(bit > 15)) { spurious_interrupt(); break; } generic_handle_domain_irq(priv->domain, bit); pending &= ~BIT(bit); } chained_irq_exit(chip, desc); } static void htpic_reg_init(void) { int i; for (i = 0; i < HTINT_NUM_VECTORS; i++) { /* Disable all HT Vectors */ writel(0x0, htpic->base + HTINT_EN_OFF + i * 0x4); /* Read back to force write */ (void) readl(htpic->base + i * 0x4); /* Ack all possible pending IRQs */ writel(GENMASK(31, 0), htpic->base + i * 0x4); } /* Enable 16 vectors for PIC */ writel(0xffff, htpic->base + HTINT_EN_OFF); } static void htpic_resume(void) { htpic_reg_init(); } struct syscore_ops htpic_syscore_ops = { .resume = htpic_resume, }; static int __init htpic_of_init(struct device_node *node, struct device_node *parent) { unsigned int parent_irq[4]; int i, err; int num_parents = 0; if (htpic) { pr_err("loongson-htpic: Only one HTPIC is allowed in the system\n"); return -ENODEV; } htpic = kzalloc(sizeof(*htpic), GFP_KERNEL); if (!htpic) return -ENOMEM; htpic->base = of_iomap(node, 0); if (!htpic->base) { err = -ENODEV; goto out_free; } htpic->domain = __init_i8259_irqs(node); if (!htpic->domain) { pr_err("loongson-htpic: Failed to initialize i8259 IRQs\n"); err = -ENOMEM; goto out_iounmap; } /* Interrupt may come from any of the 4 interrupt line */ for (i = 0; i < HTPIC_MAX_PARENT_IRQ; i++) { parent_irq[i] = irq_of_parse_and_map(node, i); if (parent_irq[i] <= 0) break; num_parents++; } if (!num_parents) { pr_err("loongson-htpic: Failed to get parent irqs\n"); err = -ENODEV; goto out_remove_domain; } htpic_reg_init(); for (i = 0; i < num_parents; i++) { irq_set_chained_handler_and_data(parent_irq[i], htpic_irq_dispatch, htpic); } register_syscore_ops(&htpic_syscore_ops); return 0; out_remove_domain: irq_domain_remove(htpic->domain); out_iounmap: iounmap(htpic->base); out_free: kfree(htpic); return err; } IRQCHIP_DECLARE(loongson_htpic, "loongson,htpic-1.0", htpic_of_init);
linux-master
drivers/irqchip/irq-loongson-htpic.c
// SPDX-License-Identifier: GPL-2.0 /* * Freescale Management Complex (MC) bus driver MSI support * * Copyright (C) 2015-2016 Freescale Semiconductor, Inc. * Author: German Rivera <[email protected]> * */ #include <linux/acpi.h> #include <linux/acpi_iort.h> #include <linux/irq.h> #include <linux/msi.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/fsl/mc.h> static struct irq_chip its_msi_irq_chip = { .name = "ITS-fMSI", .irq_mask = irq_chip_mask_parent, .irq_unmask = irq_chip_unmask_parent, .irq_eoi = irq_chip_eoi_parent, .irq_set_affinity = msi_domain_set_affinity }; static u32 fsl_mc_msi_domain_get_msi_id(struct irq_domain *domain, struct fsl_mc_device *mc_dev) { struct device_node *of_node; u32 out_id; of_node = irq_domain_get_of_node(domain); out_id = of_node ? of_msi_map_id(&mc_dev->dev, of_node, mc_dev->icid) : iort_msi_map_id(&mc_dev->dev, mc_dev->icid); return out_id; } static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain, struct device *dev, int nvec, msi_alloc_info_t *info) { struct fsl_mc_device *mc_bus_dev; struct msi_domain_info *msi_info; if (!dev_is_fsl_mc(dev)) return -EINVAL; mc_bus_dev = to_fsl_mc_device(dev); if (!(mc_bus_dev->flags & FSL_MC_IS_DPRC)) return -EINVAL; /* * Set the device Id to be passed to the GIC-ITS: * * NOTE: This device id corresponds to the IOMMU stream ID * associated with the DPRC object (ICID). */ info->scratchpad[0].ul = fsl_mc_msi_domain_get_msi_id(msi_domain, mc_bus_dev); msi_info = msi_get_domain_info(msi_domain->parent); /* Allocate at least 32 MSIs, and always as a power of 2 */ nvec = max_t(int, 32, roundup_pow_of_two(nvec)); return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info); } static struct msi_domain_ops its_fsl_mc_msi_ops __ro_after_init = { .msi_prepare = its_fsl_mc_msi_prepare, }; static struct msi_domain_info its_fsl_mc_msi_domain_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), .ops = &its_fsl_mc_msi_ops, .chip = &its_msi_irq_chip, }; static const struct of_device_id its_device_id[] = { { .compatible = "arm,gic-v3-its", }, {}, }; static void __init its_fsl_mc_msi_init_one(struct fwnode_handle *handle, const char *name) { struct irq_domain *parent; struct irq_domain *mc_msi_domain; parent = irq_find_matching_fwnode(handle, DOMAIN_BUS_NEXUS); if (!parent || !msi_get_domain_info(parent)) { pr_err("%s: unable to locate ITS domain\n", name); return; } mc_msi_domain = fsl_mc_msi_create_irq_domain(handle, &its_fsl_mc_msi_domain_info, parent); if (!mc_msi_domain) { pr_err("%s: unable to create fsl-mc domain\n", name); return; } pr_info("fsl-mc MSI: %s domain created\n", name); } #ifdef CONFIG_ACPI static int __init its_fsl_mc_msi_parse_madt(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_translator *its_entry; struct fwnode_handle *dom_handle; const char *node_name; int err = 0; its_entry = (struct acpi_madt_generic_translator *)header; node_name = kasprintf(GFP_KERNEL, "ITS@0x%lx", (long)its_entry->base_address); dom_handle = iort_find_domain_token(its_entry->translation_id); if (!dom_handle) { pr_err("%s: Unable to locate ITS domain handle\n", node_name); err = -ENXIO; goto out; } its_fsl_mc_msi_init_one(dom_handle, node_name); out: kfree(node_name); return err; } static void __init its_fsl_mc_acpi_msi_init(void) { acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, its_fsl_mc_msi_parse_madt, 0); } #else static inline void its_fsl_mc_acpi_msi_init(void) { } #endif static void __init its_fsl_mc_of_msi_init(void) { struct device_node *np; for (np = of_find_matching_node(NULL, its_device_id); np; np = of_find_matching_node(np, its_device_id)) { if (!of_device_is_available(np)) continue; if (!of_property_read_bool(np, "msi-controller")) continue; its_fsl_mc_msi_init_one(of_node_to_fwnode(np), np->full_name); } } static int __init its_fsl_mc_msi_init(void) { its_fsl_mc_of_msi_init(); its_fsl_mc_acpi_msi_init(); return 0; } early_initcall(its_fsl_mc_msi_init);
linux-master
drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2010-2011 Jonas Bonn <[email protected]> * Copyright (C) 2014 Stefan Kristansson <[email protected]> */ #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_address.h> /* OR1K PIC implementation */ struct or1k_pic_dev { struct irq_chip chip; irq_flow_handler_t handle; unsigned long flags; }; /* * We're a couple of cycles faster than the generic implementations with * these 'fast' versions. */ static void or1k_pic_mask(struct irq_data *data) { mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->hwirq)); } static void or1k_pic_unmask(struct irq_data *data) { mtspr(SPR_PICMR, mfspr(SPR_PICMR) | (1UL << data->hwirq)); } static void or1k_pic_ack(struct irq_data *data) { mtspr(SPR_PICSR, (1UL << data->hwirq)); } static void or1k_pic_mask_ack(struct irq_data *data) { mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->hwirq)); mtspr(SPR_PICSR, (1UL << data->hwirq)); } /* * There are two oddities with the OR1200 PIC implementation: * i) LEVEL-triggered interrupts are latched and need to be cleared * ii) the interrupt latch is cleared by writing a 0 to the bit, * as opposed to a 1 as mandated by the spec */ static void or1k_pic_or1200_ack(struct irq_data *data) { mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->hwirq)); } static void or1k_pic_or1200_mask_ack(struct irq_data *data) { mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->hwirq)); mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->hwirq)); } static struct or1k_pic_dev or1k_pic_level = { .chip = { .name = "or1k-PIC-level", .irq_unmask = or1k_pic_unmask, .irq_mask = or1k_pic_mask, }, .handle = handle_level_irq, .flags = IRQ_LEVEL | IRQ_NOPROBE, }; static struct or1k_pic_dev or1k_pic_edge = { .chip = { .name = "or1k-PIC-edge", .irq_unmask = or1k_pic_unmask, .irq_mask = or1k_pic_mask, .irq_ack = or1k_pic_ack, .irq_mask_ack = or1k_pic_mask_ack, }, .handle = handle_edge_irq, .flags = IRQ_LEVEL | IRQ_NOPROBE, }; static struct or1k_pic_dev or1k_pic_or1200 = { .chip = { .name = "or1200-PIC", .irq_unmask = or1k_pic_unmask, .irq_mask = or1k_pic_mask, .irq_ack = or1k_pic_or1200_ack, .irq_mask_ack = or1k_pic_or1200_mask_ack, }, .handle = handle_level_irq, .flags = IRQ_LEVEL | IRQ_NOPROBE, }; static struct irq_domain *root_domain; static inline int pic_get_irq(int first) { int hwirq; hwirq = ffs(mfspr(SPR_PICSR) >> first); if (!hwirq) return NO_IRQ; else hwirq = hwirq + first - 1; return hwirq; } static void or1k_pic_handle_irq(struct pt_regs *regs) { int irq = -1; while ((irq = pic_get_irq(irq + 1)) != NO_IRQ) generic_handle_domain_irq(root_domain, irq); } static int or1k_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct or1k_pic_dev *pic = d->host_data; irq_set_chip_and_handler(irq, &pic->chip, pic->handle); irq_set_status_flags(irq, pic->flags); return 0; } static const struct irq_domain_ops or1k_irq_domain_ops = { .xlate = irq_domain_xlate_onecell, .map = or1k_map, }; /* * This sets up the IRQ domain for the PIC built in to the OpenRISC * 1000 CPU. This is the "root" domain as these are the interrupts * that directly trigger an exception in the CPU. */ static int __init or1k_pic_init(struct device_node *node, struct or1k_pic_dev *pic) { /* Disable all interrupts until explicitly requested */ mtspr(SPR_PICMR, (0UL)); root_domain = irq_domain_add_linear(node, 32, &or1k_irq_domain_ops, pic); set_handle_irq(or1k_pic_handle_irq); return 0; } static int __init or1k_pic_or1200_init(struct device_node *node, struct device_node *parent) { return or1k_pic_init(node, &or1k_pic_or1200); } IRQCHIP_DECLARE(or1k_pic_or1200, "opencores,or1200-pic", or1k_pic_or1200_init); IRQCHIP_DECLARE(or1k_pic, "opencores,or1k-pic", or1k_pic_or1200_init); static int __init or1k_pic_level_init(struct device_node *node, struct device_node *parent) { return or1k_pic_init(node, &or1k_pic_level); } IRQCHIP_DECLARE(or1k_pic_level, "opencores,or1k-pic-level", or1k_pic_level_init); static int __init or1k_pic_edge_init(struct device_node *node, struct device_node *parent) { return or1k_pic_init(node, &or1k_pic_edge); } IRQCHIP_DECLARE(or1k_pic_edge, "opencores,or1k-pic-edge", or1k_pic_edge_init);
linux-master
drivers/irqchip/irq-or1k-pic.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/kernel.h> #include <linux/init.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/module.h> #include <linux/irqdomain.h> #include <linux/irqchip.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/io.h> #include <asm/irq.h> #define INTC_IRQS 64 #define CK_INTC_ICR 0x00 #define CK_INTC_PEN31_00 0x14 #define CK_INTC_PEN63_32 0x2c #define CK_INTC_NEN31_00 0x10 #define CK_INTC_NEN63_32 0x28 #define CK_INTC_SOURCE 0x40 #define CK_INTC_DUAL_BASE 0x100 #define GX_INTC_PEN31_00 0x00 #define GX_INTC_PEN63_32 0x04 #define GX_INTC_NEN31_00 0x40 #define GX_INTC_NEN63_32 0x44 #define GX_INTC_NMASK31_00 0x50 #define GX_INTC_NMASK63_32 0x54 #define GX_INTC_SOURCE 0x60 static void __iomem *reg_base; static struct irq_domain *root_domain; static int nr_irq = INTC_IRQS; /* * When controller support pulse signal, the PEN_reg will hold on signal * without software trigger. * * So, to support pulse signal we need to clear IFR_reg and the address of * IFR_offset is NEN_offset - 8. */ static void irq_ck_mask_set_bit(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = irq_data_get_chip_type(d); unsigned long ifr = ct->regs.mask - 8; u32 mask = d->mask; irq_gc_lock(gc); *ct->mask_cache |= mask; irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); irq_reg_writel(gc, irq_reg_readl(gc, ifr) & ~mask, ifr); irq_gc_unlock(gc); } static void __init ck_set_gc(struct device_node *node, void __iomem *reg_base, u32 mask_reg, u32 irq_base) { struct irq_chip_generic *gc; gc = irq_get_domain_generic_chip(root_domain, irq_base); gc->reg_base = reg_base; gc->chip_types[0].regs.mask = mask_reg; gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; if (of_property_read_bool(node, "csky,support-pulse-signal")) gc->chip_types[0].chip.irq_unmask = irq_ck_mask_set_bit; } static inline u32 build_channel_val(u32 idx, u32 magic) { u32 res; /* * Set the same index for each channel */ res = idx | (idx << 8) | (idx << 16) | (idx << 24); /* * Set the channel magic number in descending order. * The magic is 0x00010203 for ck-intc * The magic is 0x03020100 for gx6605s-intc */ return res | magic; } static inline void setup_irq_channel(u32 magic, void __iomem *reg_addr) { u32 i; /* Setup 64 channel slots */ for (i = 0; i < INTC_IRQS; i += 4) writel(build_channel_val(i, magic), reg_addr + i); } static int __init ck_intc_init_comm(struct device_node *node, struct device_node *parent) { int ret; if (parent) { pr_err("C-SKY Intc not a root irq controller\n"); return -EINVAL; } reg_base = of_iomap(node, 0); if (!reg_base) { pr_err("C-SKY Intc unable to map: %p.\n", node); return -EINVAL; } root_domain = irq_domain_add_linear(node, nr_irq, &irq_generic_chip_ops, NULL); if (!root_domain) { pr_err("C-SKY Intc irq_domain_add failed.\n"); return -ENOMEM; } ret = irq_alloc_domain_generic_chips(root_domain, 32, 1, "csky_intc", handle_level_irq, IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN, 0, 0); if (ret) { pr_err("C-SKY Intc irq_alloc_gc failed.\n"); return -ENOMEM; } return 0; } static inline bool handle_irq_perbit(struct pt_regs *regs, u32 hwirq, u32 irq_base) { if (hwirq == 0) return false; generic_handle_domain_irq(root_domain, irq_base + __fls(hwirq)); return true; } /* gx6605s 64 irqs interrupt controller */ static void gx_irq_handler(struct pt_regs *regs) { bool ret; retry: ret = handle_irq_perbit(regs, readl(reg_base + GX_INTC_PEN63_32), 32); if (ret) goto retry; ret = handle_irq_perbit(regs, readl(reg_base + GX_INTC_PEN31_00), 0); if (ret) goto retry; } static int __init gx_intc_init(struct device_node *node, struct device_node *parent) { int ret; ret = ck_intc_init_comm(node, parent); if (ret) return ret; /* * Initial enable reg to disable all interrupts */ writel(0x0, reg_base + GX_INTC_NEN31_00); writel(0x0, reg_base + GX_INTC_NEN63_32); /* * Initial mask reg with all unmasked, because we only use enable reg */ writel(0x0, reg_base + GX_INTC_NMASK31_00); writel(0x0, reg_base + GX_INTC_NMASK63_32); setup_irq_channel(0x03020100, reg_base + GX_INTC_SOURCE); ck_set_gc(node, reg_base, GX_INTC_NEN31_00, 0); ck_set_gc(node, reg_base, GX_INTC_NEN63_32, 32); set_handle_irq(gx_irq_handler); return 0; } IRQCHIP_DECLARE(csky_gx6605s_intc, "csky,gx6605s-intc", gx_intc_init); /* * C-SKY simple 64 irqs interrupt controller, dual-together could support 128 * irqs. */ static void ck_irq_handler(struct pt_regs *regs) { bool ret; void __iomem *reg_pen_lo = reg_base + CK_INTC_PEN31_00; void __iomem *reg_pen_hi = reg_base + CK_INTC_PEN63_32; retry: /* handle 0 - 63 irqs */ ret = handle_irq_perbit(regs, readl(reg_pen_hi), 32); if (ret) goto retry; ret = handle_irq_perbit(regs, readl(reg_pen_lo), 0); if (ret) goto retry; if (nr_irq == INTC_IRQS) return; /* handle 64 - 127 irqs */ ret = handle_irq_perbit(regs, readl(reg_pen_hi + CK_INTC_DUAL_BASE), 96); if (ret) goto retry; ret = handle_irq_perbit(regs, readl(reg_pen_lo + CK_INTC_DUAL_BASE), 64); if (ret) goto retry; } static int __init ck_intc_init(struct device_node *node, struct device_node *parent) { int ret; ret = ck_intc_init_comm(node, parent); if (ret) return ret; /* Initial enable reg to disable all interrupts */ writel(0, reg_base + CK_INTC_NEN31_00); writel(0, reg_base + CK_INTC_NEN63_32); /* Enable irq intc */ writel(BIT(31), reg_base + CK_INTC_ICR); ck_set_gc(node, reg_base, CK_INTC_NEN31_00, 0); ck_set_gc(node, reg_base, CK_INTC_NEN63_32, 32); setup_irq_channel(0x00010203, reg_base + CK_INTC_SOURCE); set_handle_irq(ck_irq_handler); return 0; } IRQCHIP_DECLARE(ck_intc, "csky,apb-intc", ck_intc_init); static int __init ck_dual_intc_init(struct device_node *node, struct device_node *parent) { int ret; /* dual-apb-intc up to 128 irq sources*/ nr_irq = INTC_IRQS * 2; ret = ck_intc_init(node, parent); if (ret) return ret; /* Initial enable reg to disable all interrupts */ writel(0, reg_base + CK_INTC_NEN31_00 + CK_INTC_DUAL_BASE); writel(0, reg_base + CK_INTC_NEN63_32 + CK_INTC_DUAL_BASE); ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN31_00, 64); ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN63_32, 96); setup_irq_channel(0x00010203, reg_base + CK_INTC_SOURCE + CK_INTC_DUAL_BASE); return 0; } IRQCHIP_DECLARE(ck_dual_intc, "csky,dual-apb-intc", ck_dual_intc_init);
linux-master
drivers/irqchip/irq-csky-apb-intc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014-2015 Toradex AG * Author: Stefan Agner <[email protected]> * * IRQ chip driver for MSCM interrupt router available on Vybrid SoC's. * The interrupt router is between the CPU's interrupt controller and the * peripheral. The router allows to route the peripheral interrupts to * one of the two available CPU's on Vybrid VF6xx SoC's (Cortex-A5 or * Cortex-M4). The router will be configured transparently on a IRQ * request. * * o All peripheral interrupts of the Vybrid SoC can be routed to * CPU 0, CPU 1 or both. The routing is useful for dual-core * variants of Vybrid SoC such as VF6xx. This driver routes the * requested interrupt to the CPU currently running on. * * o It is required to setup the interrupt router even on single-core * variants of Vybrid. */ #include <linux/cpu_pm.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/irqdomain.h> #include <linux/mfd/syscon.h> #include <dt-bindings/interrupt-controller/arm-gic.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/slab.h> #include <linux/regmap.h> #define MSCM_CPxNUM 0x4 #define MSCM_IRSPRC(n) (0x80 + 2 * (n)) #define MSCM_IRSPRC_CPEN_MASK 0x3 #define MSCM_IRSPRC_NUM 112 struct vf610_mscm_ir_chip_data { void __iomem *mscm_ir_base; u16 cpu_mask; u16 saved_irsprc[MSCM_IRSPRC_NUM]; bool is_nvic; }; static struct vf610_mscm_ir_chip_data *mscm_ir_data; static inline void vf610_mscm_ir_save(struct vf610_mscm_ir_chip_data *data) { int i; for (i = 0; i < MSCM_IRSPRC_NUM; i++) data->saved_irsprc[i] = readw_relaxed(data->mscm_ir_base + MSCM_IRSPRC(i)); } static inline void vf610_mscm_ir_restore(struct vf610_mscm_ir_chip_data *data) { int i; for (i = 0; i < MSCM_IRSPRC_NUM; i++) writew_relaxed(data->saved_irsprc[i], data->mscm_ir_base + MSCM_IRSPRC(i)); } static int vf610_mscm_ir_notifier(struct notifier_block *self, unsigned long cmd, void *v) { switch (cmd) { case CPU_CLUSTER_PM_ENTER: vf610_mscm_ir_save(mscm_ir_data); break; case CPU_CLUSTER_PM_ENTER_FAILED: case CPU_CLUSTER_PM_EXIT: vf610_mscm_ir_restore(mscm_ir_data); break; } return NOTIFY_OK; } static struct notifier_block mscm_ir_notifier_block = { .notifier_call = vf610_mscm_ir_notifier, }; static void vf610_mscm_ir_enable(struct irq_data *data) { irq_hw_number_t hwirq = data->hwirq; struct vf610_mscm_ir_chip_data *chip_data = data->chip_data; u16 irsprc; irsprc = readw_relaxed(chip_data->mscm_ir_base + MSCM_IRSPRC(hwirq)); irsprc &= MSCM_IRSPRC_CPEN_MASK; WARN_ON(irsprc & ~chip_data->cpu_mask); writew_relaxed(chip_data->cpu_mask, chip_data->mscm_ir_base + MSCM_IRSPRC(hwirq)); irq_chip_enable_parent(data); } static void vf610_mscm_ir_disable(struct irq_data *data) { irq_hw_number_t hwirq = data->hwirq; struct vf610_mscm_ir_chip_data *chip_data = data->chip_data; writew_relaxed(0x0, chip_data->mscm_ir_base + MSCM_IRSPRC(hwirq)); irq_chip_disable_parent(data); } static struct irq_chip vf610_mscm_ir_irq_chip = { .name = "mscm-ir", .irq_mask = irq_chip_mask_parent, .irq_unmask = irq_chip_unmask_parent, .irq_eoi = irq_chip_eoi_parent, .irq_enable = vf610_mscm_ir_enable, .irq_disable = vf610_mscm_ir_disable, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_set_affinity = irq_chip_set_affinity_parent, }; static int vf610_mscm_ir_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { int i; irq_hw_number_t hwirq; struct irq_fwspec *fwspec = arg; struct irq_fwspec parent_fwspec; if (!irq_domain_get_of_node(domain->parent)) return -EINVAL; if (fwspec->param_count != 2) return -EINVAL; hwirq = fwspec->param[0]; for (i = 0; i < nr_irqs; i++) irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &vf610_mscm_ir_irq_chip, domain->host_data); parent_fwspec.fwnode = domain->parent->fwnode; if (mscm_ir_data->is_nvic) { parent_fwspec.param_count = 1; parent_fwspec.param[0] = fwspec->param[0]; } else { parent_fwspec.param_count = 3; parent_fwspec.param[0] = GIC_SPI; parent_fwspec.param[1] = fwspec->param[0]; parent_fwspec.param[2] = fwspec->param[1]; } return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_fwspec); } static int vf610_mscm_ir_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type) { if (WARN_ON(fwspec->param_count < 2)) return -EINVAL; *hwirq = fwspec->param[0]; *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; return 0; } static const struct irq_domain_ops mscm_irq_domain_ops = { .translate = vf610_mscm_ir_domain_translate, .alloc = vf610_mscm_ir_domain_alloc, .free = irq_domain_free_irqs_common, }; static int __init vf610_mscm_ir_of_init(struct device_node *node, struct device_node *parent) { struct irq_domain *domain, *domain_parent; struct regmap *mscm_cp_regmap; int ret, cpuid; domain_parent = irq_find_host(parent); if (!domain_parent) { pr_err("vf610_mscm_ir: interrupt-parent not found\n"); return -EINVAL; } mscm_ir_data = kzalloc(sizeof(*mscm_ir_data), GFP_KERNEL); if (!mscm_ir_data) return -ENOMEM; mscm_ir_data->mscm_ir_base = of_io_request_and_map(node, 0, "mscm-ir"); if (IS_ERR(mscm_ir_data->mscm_ir_base)) { pr_err("vf610_mscm_ir: unable to map mscm register\n"); ret = PTR_ERR(mscm_ir_data->mscm_ir_base); goto out_free; } mscm_cp_regmap = syscon_regmap_lookup_by_phandle(node, "fsl,cpucfg"); if (IS_ERR(mscm_cp_regmap)) { ret = PTR_ERR(mscm_cp_regmap); pr_err("vf610_mscm_ir: regmap lookup for cpucfg failed\n"); goto out_unmap; } regmap_read(mscm_cp_regmap, MSCM_CPxNUM, &cpuid); mscm_ir_data->cpu_mask = 0x1 << cpuid; domain = irq_domain_add_hierarchy(domain_parent, 0, MSCM_IRSPRC_NUM, node, &mscm_irq_domain_ops, mscm_ir_data); if (!domain) { ret = -ENOMEM; goto out_unmap; } if (of_device_is_compatible(irq_domain_get_of_node(domain->parent), "arm,armv7m-nvic")) mscm_ir_data->is_nvic = true; cpu_pm_register_notifier(&mscm_ir_notifier_block); return 0; out_unmap: iounmap(mscm_ir_data->mscm_ir_base); out_free: kfree(mscm_ir_data); return ret; } IRQCHIP_DECLARE(vf610_mscm_ir, "fsl,vf610-mscm-ir", vf610_mscm_ir_of_init);
linux-master
drivers/irqchip/irq-vf610-mscm-ir.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 NVIDIA CORPORATION, All Rights Reserved. */ #include <linux/module.h> #include <linux/clk.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/irqchip/arm-gic.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> struct gic_clk_data { unsigned int num_clocks; const char *const *clocks; }; struct gic_chip_pm { struct gic_chip_data *chip_data; const struct gic_clk_data *clk_data; struct clk_bulk_data *clks; }; static int gic_runtime_resume(struct device *dev) { struct gic_chip_pm *chip_pm = dev_get_drvdata(dev); struct gic_chip_data *gic = chip_pm->chip_data; const struct gic_clk_data *data = chip_pm->clk_data; int ret; ret = clk_bulk_prepare_enable(data->num_clocks, chip_pm->clks); if (ret) return ret; /* * On the very first resume, the pointer to chip_pm->chip_data * will be NULL and this is intentional, because we do not * want to restore the GIC on the very first resume. So if * the pointer is not valid just return. */ if (!gic) return 0; gic_dist_restore(gic); gic_cpu_restore(gic); return 0; } static int gic_runtime_suspend(struct device *dev) { struct gic_chip_pm *chip_pm = dev_get_drvdata(dev); struct gic_chip_data *gic = chip_pm->chip_data; const struct gic_clk_data *data = chip_pm->clk_data; gic_dist_save(gic); gic_cpu_save(gic); clk_bulk_disable_unprepare(data->num_clocks, chip_pm->clks); return 0; } static int gic_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct gic_clk_data *data; struct gic_chip_pm *chip_pm; int ret, irq, i; data = of_device_get_match_data(&pdev->dev); if (!data) { dev_err(&pdev->dev, "no device match found\n"); return -ENODEV; } chip_pm = devm_kzalloc(dev, sizeof(*chip_pm), GFP_KERNEL); if (!chip_pm) return -ENOMEM; irq = irq_of_parse_and_map(dev->of_node, 0); if (!irq) { dev_err(dev, "no parent interrupt found!\n"); return -EINVAL; } chip_pm->clks = devm_kcalloc(dev, data->num_clocks, sizeof(*chip_pm->clks), GFP_KERNEL); if (!chip_pm->clks) return -ENOMEM; for (i = 0; i < data->num_clocks; i++) chip_pm->clks[i].id = data->clocks[i]; ret = devm_clk_bulk_get(dev, data->num_clocks, chip_pm->clks); if (ret) goto irq_dispose; chip_pm->clk_data = data; dev_set_drvdata(dev, chip_pm); pm_runtime_enable(dev); ret = pm_runtime_resume_and_get(dev); if (ret < 0) goto rpm_disable; ret = gic_of_init_child(dev, &chip_pm->chip_data, irq); if (ret) goto rpm_put; pm_runtime_put(dev); dev_info(dev, "GIC IRQ controller registered\n"); return 0; rpm_put: pm_runtime_put_sync(dev); rpm_disable: pm_runtime_disable(dev); irq_dispose: irq_dispose_mapping(irq); return ret; } static const struct dev_pm_ops gic_pm_ops = { SET_RUNTIME_PM_OPS(gic_runtime_suspend, gic_runtime_resume, NULL) SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; static const char * const gic400_clocks[] = { "clk", }; static const struct gic_clk_data gic400_data = { .num_clocks = ARRAY_SIZE(gic400_clocks), .clocks = gic400_clocks, }; static const struct of_device_id gic_match[] = { { .compatible = "nvidia,tegra210-agic", .data = &gic400_data }, {}, }; MODULE_DEVICE_TABLE(of, gic_match); static struct platform_driver gic_driver = { .probe = gic_probe, .driver = { .name = "gic", .of_match_table = gic_match, .pm = &gic_pm_ops, } }; builtin_platform_driver(gic_driver);
linux-master
drivers/irqchip/irq-gic-pm.c